focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public COSBase remove( int i ) { COSBase removedEntry = objects.remove( i ); getUpdateState().update(); return removedEntry; }
@Test void testRemove() { COSArray cosArray = COSArray .ofCOSIntegers(Arrays.asList(1, 2, 3, 4, 5, 6)); cosArray.clear(); assertEquals(0, cosArray.size()); cosArray = COSArray.ofCOSIntegers(Arrays.asList(1, 2, 3, 4, 5, 6)); assertEquals(COSInteger.get(3), cosArray.remove(2)); // 1,2,4,5,6 should be left assertEquals(5, cosArray.size()); assertEquals(1, cosArray.getInt(0)); assertEquals(4, cosArray.getInt(2)); // 1,2,4,6 should be left assertTrue(cosArray.removeObject(COSInteger.get(5))); assertEquals(4, cosArray.size()); assertEquals(1, cosArray.getInt(0)); assertEquals(4, cosArray.getInt(2)); assertEquals(6, cosArray.getInt(3)); cosArray = COSArray.ofCOSIntegers(Arrays.asList(1, 2, 3, 4, 5, 6)); cosArray.removeAll(Arrays.asList(COSInteger.get(3), COSInteger.get(4))); // 1,2,5,6 should be left assertEquals(4, cosArray.size()); assertEquals(2, cosArray.getInt(1)); assertEquals(5, cosArray.getInt(2)); cosArray = COSArray.ofCOSIntegers(Arrays.asList(1, 2, 3, 4, 5, 6)); cosArray.retainAll(Arrays.asList(COSInteger.get(3), COSInteger.get(4))); // 3,4 should be left assertEquals(2, cosArray.size()); assertEquals(3, cosArray.getInt(0)); assertEquals(4, cosArray.getInt(1)); }
@Override public void doInject(RequestResource resource, RamContext context, LoginIdentityContext result) { String accessKey = context.getAccessKey(); String secretKey = context.getSecretKey(); // STS 临时凭证鉴权的优先级高于 AK/SK 鉴权 if (StsConfig.getInstance().isStsOn()) { StsCredential stsCredential = StsCredentialHolder.getInstance().getStsCredential(); accessKey = stsCredential.getAccessKeyId(); secretKey = stsCredential.getAccessKeySecret(); result.setParameter(IdentifyConstants.SECURITY_TOKEN_HEADER, stsCredential.getSecurityToken()); } if (StringUtils.isNotEmpty(accessKey) && StringUtils.isNotBlank(secretKey)) { result.setParameter(ACCESS_KEY_HEADER, accessKey); } String signatureKey = secretKey; if (StringUtils.isNotEmpty(context.getRegionId())) { signatureKey = CalculateV4SigningKeyUtil .finalSigningKeyStringWithDefaultInfo(secretKey, context.getRegionId()); result.setParameter(RamConstants.SIGNATURE_VERSION, RamConstants.V4); } Map<String, String> signHeaders = SpasAdapter .getSignHeaders(getResource(resource.getNamespace(), resource.getGroup()), signatureKey); result.setParameters(signHeaders); }
@Test void testDoInjectWithFullResource() throws Exception { LoginIdentityContext actual = new LoginIdentityContext(); configResourceInjector.doInject(resource, ramContext, actual); assertEquals(3, actual.getAllKey().size()); assertEquals(PropertyKeyConst.ACCESS_KEY, actual.getParameter("Spas-AccessKey")); assertTrue(actual.getAllKey().contains("Timestamp")); assertTrue(actual.getAllKey().contains("Spas-Signature")); }
@Description("bitwise AND in 2's complement arithmetic") @ScalarFunction @SqlType(StandardTypes.BIGINT) public static long bitwiseAnd(@SqlType(StandardTypes.BIGINT) long left, @SqlType(StandardTypes.BIGINT) long right) { return left & right; }
@Test public void testBitwiseAnd() { assertFunction("bitwise_and(0, -1)", BIGINT, 0L); assertFunction("bitwise_and(3, 8)", BIGINT, 3L & 8L); assertFunction("bitwise_and(-4, 12)", BIGINT, -4L & 12L); assertFunction("bitwise_and(60, 21)", BIGINT, 60L & 21L); }
@Override public boolean removeIf(IntPredicate filter) { throw new UnsupportedOperationException("RangeSet is immutable"); }
@Test(expected = UnsupportedOperationException.class) public void removeIfObject() { RangeSet rs = new RangeSet(4); rs.removeIf((Integer i) -> i == 3); }
public Object resolve(final Expression expression) { return new Visitor().process(expression, null); }
@Test public void shouldParseTime() { // Given: final SqlType type = SqlTypes.TIME; final Expression exp = new StringLiteral("04:40:02"); // When: Object o = new GenericExpressionResolver(type, FIELD_NAME, registry, config, "insert value", false).resolve(exp); // Then: assertTrue(o instanceof Time); assertThat(((Time) o).getTime(), is(16802000L)); }
@Override public String getMethod() { return this.methodType; }
@Test public void test() { String uri = "http://www.ccc.com/test"; String method = "GET"; final HttpCommonRequest httpCommonRequest = new HttpCommonRequest(method, uri); Assert.assertEquals(httpCommonRequest.getMethod(), method); Assert.assertEquals(httpCommonRequest.getRequestLine().getUri(), uri); }
public void reset() { metricsContainers.forEach((key, value) -> value.reset()); unboundContainer.reset(); }
@Test public void testReset() { MetricsContainerStepMap attemptedMetrics = new MetricsContainerStepMap(); attemptedMetrics.update(STEP1, metricsContainer); attemptedMetrics.update(STEP2, metricsContainer); attemptedMetrics.update(STEP2, metricsContainer); MetricResults metricResults = asAttemptedOnlyMetricResults(attemptedMetrics); MetricQueryResults allres = metricResults.allMetrics(); assertCounter(COUNTER_NAME, allres, STEP1, VALUE, false); assertDistribution( DISTRIBUTION_NAME, allres, STEP1, DistributionResult.create(VALUE * 3, 2, VALUE, VALUE * 2), false); assertGauge(GAUGE_NAME, allres, STEP1, GaugeResult.create(VALUE, Instant.now()), false); assertStringSet( STRING_SET_NAME, allres, STEP1, StringSetResult.create(ImmutableSet.of(FIRST_STRING, SECOND_STRING)), false); assertCounter(COUNTER_NAME, allres, STEP2, VALUE * 2, false); assertDistribution( DISTRIBUTION_NAME, allres, STEP2, DistributionResult.create(VALUE * 6, 4, VALUE, VALUE * 2), false); assertGauge(GAUGE_NAME, allres, STEP2, GaugeResult.create(VALUE, Instant.now()), false); assertStringSet( STRING_SET_NAME, allres, STEP2, StringSetResult.create(ImmutableSet.of(FIRST_STRING, SECOND_STRING)), false); attemptedMetrics.reset(); metricResults = asAttemptedOnlyMetricResults(attemptedMetrics); allres = metricResults.allMetrics(); // Check that the metrics container for STEP1 is reset assertCounter(COUNTER_NAME, allres, STEP1, 0L, false); assertDistribution( DISTRIBUTION_NAME, allres, STEP1, DistributionResult.IDENTITY_ELEMENT, false); assertGauge(GAUGE_NAME, allres, STEP1, GaugeResult.empty(), false); assertStringSet(STRING_SET_NAME, allres, STEP1, StringSetResult.empty(), false); // Check that the metrics container for STEP2 is reset assertCounter(COUNTER_NAME, allres, STEP2, 0L, false); assertDistribution( DISTRIBUTION_NAME, allres, STEP2, DistributionResult.IDENTITY_ELEMENT, false); assertGauge(GAUGE_NAME, allres, STEP2, GaugeResult.empty(), false); assertStringSet(STRING_SET_NAME, allres, STEP2, StringSetResult.empty(), false); }
public boolean isAuditTopic(KafkaCluster cluster, String topic) { var writer = auditWriters.get(cluster.getName()); return writer != null && topic.equals(writer.targetTopic()) && writer.isTopicWritingEnabled(); }
@Test void isAuditTopicChecksIfAuditIsEnabledForCluster() { Map<String, AuditWriter> writers = Map.of( "c1", new AuditWriter("с1", true, "c1topic", null, null), "c2", new AuditWriter("c2", false, "c2topic", mock(KafkaProducer.class), null) ); var auditService = new AuditService(writers); assertThat(auditService.isAuditTopic(KafkaCluster.builder().name("notExist").build(), "some")) .isFalse(); assertThat(auditService.isAuditTopic(KafkaCluster.builder().name("c1").build(), "c1topic")) .isFalse(); assertThat(auditService.isAuditTopic(KafkaCluster.builder().name("c2").build(), "c2topic")) .isTrue(); }
@Override public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception { if (args.isEmpty()) { printHelp(out); return 0; } OutputStream output = out; if (args.size() > 1) { output = Util.fileOrStdout(args.get(args.size() - 1), out); args = args.subList(0, args.size() - 1); } DataFileWriter<GenericRecord> writer = new DataFileWriter<>(new GenericDatumWriter<>()); Schema schema = null; Map<String, byte[]> metadata = new TreeMap<>(); String inputCodec = null; for (String inFile : expandsInputFiles(args)) { InputStream input = Util.fileOrStdin(inFile, in); DataFileStream<GenericRecord> reader = new DataFileStream<>(input, new GenericDatumReader<>()); if (schema == null) { // this is the first file - set up the writer, and store the // Schema & metadata we'll use. schema = reader.getSchema(); for (String key : reader.getMetaKeys()) { if (!DataFileWriter.isReservedMeta(key)) { byte[] metadatum = reader.getMeta(key); metadata.put(key, metadatum); writer.setMeta(key, metadatum); } } inputCodec = reader.getMetaString(DataFileConstants.CODEC); if (inputCodec == null) { inputCodec = DataFileConstants.NULL_CODEC; } writer.setCodec(CodecFactory.fromString(inputCodec)); writer.create(schema, output); } else { // check that we're appending to the same schema & metadata. if (!schema.equals(reader.getSchema())) { err.println("input files have different schemas"); reader.close(); return 1; } for (String key : reader.getMetaKeys()) { if (!DataFileWriter.isReservedMeta(key)) { byte[] metadatum = reader.getMeta(key); byte[] writersMetadatum = metadata.get(key); if (!Arrays.equals(metadatum, writersMetadatum)) { err.println("input files have different non-reserved metadata"); reader.close(); return 2; } } } String thisCodec = reader.getMetaString(DataFileConstants.CODEC); if (thisCodec == null) { thisCodec = DataFileConstants.NULL_CODEC; } if (!inputCodec.equals(thisCodec)) { err.println("input files have different codecs"); reader.close(); return 3; } } writer.appendAllFrom(reader, /* recompress */ false); reader.close(); } writer.close(); return 0; }
@Test void helpfulMessageWhenNoArgsGiven() throws Exception { int returnCode; try (ByteArrayOutputStream buffer = new ByteArrayOutputStream(1024)) { try (PrintStream out = new PrintStream(buffer)) { returnCode = new ConcatTool().run(System.in, out, System.err, Collections.emptyList()); } assertTrue(buffer.toString().trim().length() > 200, "should have lots of help"); } assertEquals(0, returnCode); }
public static boolean areCompatible(final SqlArgument actual, final ParamType declared) { return areCompatible(actual, declared, false); }
@Test public void shouldPassCompatibleSchemasWithImplicitCasting() { assertThat(ParamTypes.areCompatible(SqlArgument.of(SqlTypes.INTEGER), ParamTypes.LONG, true), is(true)); assertThat(ParamTypes.areCompatible(SqlArgument.of(SqlTypes.INTEGER), ParamTypes.DOUBLE, true), is(true)); assertThat(ParamTypes.areCompatible(SqlArgument.of(SqlTypes.INTEGER), ParamTypes.DECIMAL, true), is(true)); assertThat(ParamTypes.areCompatible(SqlArgument.of(SqlTypes.BIGINT), ParamTypes.DOUBLE, true), is(true)); assertThat(ParamTypes.areCompatible(SqlArgument.of(SqlTypes.BIGINT), ParamTypes.DECIMAL, true), is(true)); assertThat(ParamTypes.areCompatible(SqlArgument.of(SqlTypes.decimal(2, 1)), ParamTypes.DOUBLE, true), is(true)); }
public static String resolveMethodName(Method method) { if (method == null) { throw new IllegalArgumentException("Null method"); } String methodName = methodNameMap.get(method); if (methodName == null) { synchronized (LOCK) { methodName = methodNameMap.get(method); if (methodName == null) { StringBuilder sb = new StringBuilder(); String className = method.getDeclaringClass().getName(); String name = method.getName(); Class<?>[] params = method.getParameterTypes(); sb.append(className).append(":").append(name); sb.append("("); int paramPos = 0; for (Class<?> clazz : params) { sb.append(clazz.getCanonicalName()); if (++paramPos < params.length) { sb.append(","); } } sb.append(")"); methodName = sb.toString(); methodNameMap.put(method, methodName); } } } return methodName; }
@Test(expected = IllegalArgumentException.class) public void testResolveNullMethod() { MethodUtil.resolveMethodName(null); }
public static FileRewriteCoordinator get() { return INSTANCE; }
@Test public void testSortRewrite() throws NoSuchTableException, IOException { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); Dataset<Row> df = newDF(1000); df.coalesce(1).writeTo(tableName).append(); df.coalesce(1).writeTo(tableName).append(); df.coalesce(1).writeTo(tableName).append(); df.coalesce(1).writeTo(tableName).append(); Table table = validationCatalog.loadTable(tableIdent); Assert.assertEquals("Should produce 4 snapshots", 4, Iterables.size(table.snapshots())); try (CloseableIterable<FileScanTask> fileScanTasks = table.newScan().planFiles()) { String fileSetID = UUID.randomUUID().toString(); ScanTaskSetManager taskSetManager = ScanTaskSetManager.get(); taskSetManager.stageTasks(table, fileSetID, Lists.newArrayList(fileScanTasks)); // read original 4 files as 4 splits Dataset<Row> scanDF = spark .read() .format("iceberg") .option(SparkReadOptions.SCAN_TASK_SET_ID, fileSetID) .option(SparkReadOptions.SPLIT_SIZE, "134217728") .option(SparkReadOptions.FILE_OPEN_COST, "134217728") .load(tableName); // make sure we disable AQE and set the number of shuffle partitions as the target num files ImmutableMap<String, String> sqlConf = ImmutableMap.of( "spark.sql.shuffle.partitions", "2", "spark.sql.adaptive.enabled", "false"); withSQLConf( sqlConf, () -> { try { // write new files with sorted records scanDF .sort("id") .writeTo(tableName) .option(SparkWriteOptions.REWRITTEN_FILE_SCAN_TASK_SET_ID, fileSetID) .append(); } catch (NoSuchTableException e) { throw new RuntimeException("Could not replace files", e); } }); // commit the rewrite FileRewriteCoordinator rewriteCoordinator = FileRewriteCoordinator.get(); Set<DataFile> rewrittenFiles = taskSetManager.fetchTasks(table, fileSetID).stream() .map(t -> t.asFileScanTask().file()) .collect(Collectors.toSet()); Set<DataFile> addedFiles = rewriteCoordinator.fetchNewFiles(table, fileSetID); table.newRewrite().rewriteFiles(rewrittenFiles, addedFiles).commit(); } table.refresh(); Map<String, String> summary = table.currentSnapshot().summary(); Assert.assertEquals("Deleted files count must match", "4", summary.get("deleted-data-files")); Assert.assertEquals("Added files count must match", "2", summary.get("added-data-files")); Object rowCount = scalarSql("SELECT count(*) FROM %s", tableName); Assert.assertEquals("Row count must match", 4000L, rowCount); }
@Override public boolean checkCredentials(String username, String password) { if (username == null || password == null) { return false; } Credentials credentials = new Credentials(username, password); if (validCredentialsCache.contains(credentials)) { return true; } else if (invalidCredentialsCache.contains(credentials)) { return false; } boolean isValid = this.username.equals(username) && this.passwordHash.equals( generatePasswordHash( algorithm, salt, iterations, keyLength, password)); if (isValid) { validCredentialsCache.add(credentials); } else { invalidCredentialsCache.add(credentials); } return isValid; }
@Test public void testPBKDF2WithHmacSHA256_withoutColon() throws Exception { String algorithm = "PBKDF2WithHmacSHA256"; int iterations = 1000; int keyLength = 128; String hash = "B6:9C:5C:8A:10:3E:41:7B:BA:18:FC:E1:F2:0C:BC:D9:65:70:D3:53:AB:97:EE:2F:3F:A8:88:AF:43:EA:E6:D7:FB" + ":70:14:23:F9:51:29:5C:3A:9F:65:C3:20:EE:09:C9:C6:8A:B7:D3:0A:E1:F3:10:2B:9B:36:3F:1F:B6:1D:52:A7" + ":9C:CB:AD:55:25:46:C5:73:09:6C:38:9C:F2:FD:82:7F:90:E5:31:EF:7E:3E:6B:B2:0C:38:77:23:EC:3A:CF:29" + ":F7:E5:4D:4E:CC:35:7A:C2:E5:CB:E3:B3:E5:09:2B:CC:B9:40:26:A4:28:E9:5F:2D:18:B2:14:41:E7:4D:5B"; hash = hash.replace(":", ""); PBKDF2Authenticator PBKDF2Authenticator = new PBKDF2Authenticator( "/", VALID_USERNAME, hash, algorithm, SALT, iterations, keyLength); for (String username : TEST_USERNAMES) { for (String password : TEST_PASSWORDS) { boolean expectedIsAuthenticated = VALID_USERNAME.equals(username) && VALID_PASSWORD.equals(password); boolean actualIsAuthenticated = PBKDF2Authenticator.checkCredentials(username, password); assertEquals(expectedIsAuthenticated, actualIsAuthenticated); } } }
@Override protected Map<String, Object> toJsonMap(ILoggingEvent event) { final MapBuilder mapBuilder = new MapBuilder(timestampFormatter, customFieldNames, additionalFields, includes.size()) .addTimestamp("timestamp", isIncluded(EventAttribute.TIMESTAMP), event.getTimeStamp()) .add("level", isIncluded(EventAttribute.LEVEL), () -> String.valueOf(event.getLevel())) .add("thread", isIncluded(EventAttribute.THREAD_NAME), event::getThreadName) .add("marker", isIncluded(EventAttribute.MARKER) && event.getMarker() != null, () -> event.getMarker().getName()) .add("logger", isIncluded(EventAttribute.LOGGER_NAME), event::getLoggerName) .add("message", isIncluded(EventAttribute.MESSAGE), event::getFormattedMessage) .add("context", isIncluded(EventAttribute.CONTEXT_NAME), () -> event.getLoggerContextVO().getName()) .add("version", jsonProtocolVersion != null, jsonProtocolVersion) .add("exception", isIncluded(EventAttribute.EXCEPTION) && event.getThrowableProxy() != null, () -> throwableProxyConverter.convert(event)); final boolean includeMdc = isIncluded(EventAttribute.MDC); if (flattenMdc) { filterMdc(event.getMDCPropertyMap()).forEach((k,v) -> mapBuilder.add(k, includeMdc, v)); } else { mapBuilder.addMap("mdc", includeMdc, () -> filterMdc(event.getMDCPropertyMap())); } final boolean includeCallerData = isIncluded(EventAttribute.CALLER_DATA); final StackTraceElement[] callerData = event.getCallerData(); if (includeCallerData && callerData.length >= 1) { final StackTraceElement stackTraceElement = callerData[0]; mapBuilder.add("caller_class_name", includeCallerData, stackTraceElement.getClassName()); mapBuilder.add("caller_method_name", includeCallerData, stackTraceElement.getMethodName()); mapBuilder.add("caller_file_name", includeCallerData, stackTraceElement.getFileName()); mapBuilder.addNumber("caller_line_number", includeCallerData, stackTraceElement.getLineNumber()); } return mapBuilder.build(); }
@Test void testAddNewField() { final Map<String, Object> additionalFields = Map.of( "serviceName", "userService", "serviceBuild", 207); Map<String, Object> map = new EventJsonLayout(jsonFormatter, timestampFormatter, throwableProxyConverter, DEFAULT_EVENT_ATTRIBUTES, Collections.emptyMap(), additionalFields, Collections.emptySet(), false) .toJsonMap(event); final HashMap<String, Object> expectedFields = new HashMap<>(defaultExpectedFields); expectedFields.put("serviceName", "userService"); expectedFields.put("serviceBuild", 207); assertThat(map).isEqualTo(expectedFields); }
@Override public Uuid clientInstanceId(Duration timeout) { if (timeout.isNegative()) { throw new IllegalArgumentException("The timeout cannot be negative."); } if (!clientTelemetryEnabled) { throw new IllegalStateException("Telemetry is not enabled. Set config `" + AdminClientConfig.ENABLE_METRICS_PUSH_CONFIG + "` to `true`."); } if (clientInstanceId != null) { return clientInstanceId; } final long now = time.milliseconds(); final KafkaFutureImpl<Uuid> future = new KafkaFutureImpl<>(); runnable.call(new Call("getTelemetrySubscriptions", calcDeadlineMs(now, (int) timeout.toMillis()), new LeastLoadedNodeProvider()) { @Override GetTelemetrySubscriptionsRequest.Builder createRequest(int timeoutMs) { return new GetTelemetrySubscriptionsRequest.Builder(new GetTelemetrySubscriptionsRequestData(), true); } @Override void handleResponse(AbstractResponse abstractResponse) { GetTelemetrySubscriptionsResponse response = (GetTelemetrySubscriptionsResponse) abstractResponse; if (response.error() != Errors.NONE) { future.completeExceptionally(response.error().exception()); } else { future.complete(response.data().clientInstanceId()); } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }, now); try { clientInstanceId = future.get(); } catch (Exception e) { log.error("Error occurred while fetching client instance id", e); throw new KafkaException("Error occurred while fetching client instance id", e); } return clientInstanceId; }
@Test public void testClientInstanceId() { try (AdminClientUnitTestEnv env = mockClientEnv()) { Uuid expected = Uuid.randomUuid(); GetTelemetrySubscriptionsResponseData responseData = new GetTelemetrySubscriptionsResponseData().setClientInstanceId(expected).setErrorCode(Errors.NONE.code()); env.kafkaClient().prepareResponse( request -> request instanceof GetTelemetrySubscriptionsRequest, new GetTelemetrySubscriptionsResponse(responseData)); Uuid result = env.adminClient().clientInstanceId(Duration.ofSeconds(1)); assertEquals(expected, result); } }
public <T> T createProxy(Class<T> targetClass, T target) { return PerfStatsCollector.getInstance() .measure( "createProxyInstance", () -> factories.get(targetClass).createProxy(targetClass, target)); }
@Test public void cachesProxyClass() { ProxyMaker maker = new ProxyMaker(IDENTITY_NAME); Thing thing1 = mock(Thing.class); Thing thing2 = mock(Thing.class); Thing proxy1 = maker.createProxy(Thing.class, thing1); Thing proxy2 = maker.createProxy(Thing.class, thing2); assertThat(proxy1.getClass()).isSameInstanceAs(proxy2.getClass()); }
@Override public <X> FunctionRecord.FunctionRecordBuilder<X> newOutputRecordBuilder(Schema<X> schema) { return FunctionRecord.from(this, schema); }
@Test public void testNewOutputRecordBuilder() { Map<String, String> properties = new HashMap<>(); properties.put("prop-key", "prop-value"); long now = System.currentTimeMillis(); context.setCurrentMessageContext(new Record<String>() { @Override public Optional<String> getTopicName() { return Optional.of("input-topic"); } @Override public Optional<String> getKey() { return Optional.of("input-key"); } @Override public Schema<String> getSchema() { return Schema.STRING; } @Override public String getValue() { return "input-value"; } @Override public Optional<Long> getEventTime() { return Optional.of(now); } @Override public Optional<String> getPartitionId() { return Optional.of("input-partition-id"); } @Override public Optional<Integer> getPartitionIndex() { return Optional.of(42); } @Override public Optional<Long> getRecordSequence() { return Optional.of(43L); } @Override public Map<String, String> getProperties() { return properties; } }); Record<Integer> record = context.newOutputRecordBuilder(Schema.INT32).build(); assertEquals(record.getSchema(), Schema.INT32); assertEquals(record.getTopicName().get(), "input-topic"); assertEquals(record.getKey().get(), "input-key"); assertEquals(record.getEventTime(), Optional.of(now)); assertEquals(record.getPartitionId().get(), "input-partition-id"); assertEquals(record.getPartitionIndex(), Optional.of(42)); assertEquals(record.getRecordSequence(), Optional.of(43L)); assertTrue(record.getProperties().containsKey("prop-key")); assertEquals(record.getProperties().get("prop-key"), "prop-value"); assertNull(record.getValue()); }
@Override public void exit(int count, Object... args) throws ErrorEntryFreeException { trueExit(count, args); }
@Test public void testExitNotMatchCurEntry() { String contextName = "context-rpc"; ContextUtil.enter(contextName); Context context = ContextUtil.getContext(); CtEntry entry1 = null; CtEntry entry2 = null; try { entry1 = new CtEntry(new StringResourceWrapper("res1", EntryType.IN), null, ContextUtil.getContext()); assertSame(entry1, context.getCurEntry()); entry2 = new CtEntry(new StringResourceWrapper("res2", EntryType.IN), null, ContextUtil.getContext()); assertSame(entry2, context.getCurEntry()); // Forget to exit for entry 2... // Directly exit for entry 1, then boom... entry1.exit(); } catch (ErrorEntryFreeException ex) { assertNotNull(entry1); assertNotNull(entry2); assertNull(entry1.context); assertNull(entry2.context); assertNull(context.getCurEntry()); return; } finally { ContextUtil.exit(); } fail("Mismatch entry-exit should throw an ErrorEntryFreeException"); }
public static List<Common.MessageFormatting> dbMessageFormattingToWs(@Nullable DbIssues.MessageFormattings dbFormattings) { if (dbFormattings == null) { return List.of(); } return dbMessageFormattingListToWs(dbFormattings.getMessageFormattingList()); }
@Test public void nullFormattingShouldBeEmptyList() { assertThat(MessageFormattingUtils.dbMessageFormattingToWs(null)).isEmpty(); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { final String msg = new String(rawMessage.getPayload(), charset); try (Timer.Context ignored = this.decodeTime.time()) { final ResolvableInetSocketAddress address = rawMessage.getRemoteAddress(); final InetSocketAddress remoteAddress; if (address == null) { remoteAddress = null; } else { remoteAddress = address.getInetSocketAddress(); } return parse(msg, remoteAddress == null ? null : remoteAddress.getAddress(), rawMessage.getTimestamp()); } }
@Test public void rfc3164_section5_4_messages() { // See https://tools.ietf.org/html/rfc3164#section-5.4 final Map<String, Map<String, Object>> rfc3164messages = ImmutableMap.of( "<34>Oct 11 22:14:15 mymachine su: 'su root' failed for lonvick on /dev/pts/8", ImmutableMap.of( "timestamp", new DateTime(YEAR + "-10-11T22:14:15"), "source", "mymachine", "level", 2, "facility", "security/authorization", "message", "mymachine su: 'su root' failed for lonvick on /dev/pts/8"), "<13>Feb 5 17:32:18 10.0.0.99 Use the BFG!", ImmutableMap.of( "timestamp", new DateTime(YEAR + "-02-05T17:32:18"), "source", "10.0.0.99", "level", 5, "facility", "user-level", "message", "10.0.0.99 Use the BFG!") /* FAILING "<165>Aug 24 05:34:00 CST 1987 mymachine myproc[10]: %% It's time to make the do-nuts. %% Ingredients: Mix=OK, Jelly=OK # Devices: Mixer=OK, Jelly_Injector=OK, Frier=OK # Transport: Conveyer1=OK, Conveyer2=OK # %%", ImmutableMap.of( "timestamp", new DateTime("1987-08-24T05:34:00", DateTimeZone.forID("CST6CDT")), "source", "mymachine", "level", 5, "facility", "local4" ) */ /* FAILING "<0>1990 Oct 22 10:52:01 TZ-6 scapegoat.dmz.example.org 10.1.2.3 sched[0]: That's All Folks!", ImmutableMap.of( "timestamp", new DateTime("1990-10-22T10:52:01", DateTimeZone.forID("Etc/GMT-6")), "source", "scapegoat.dmz.example.org", "level", 0, "facility", "kernel" ) */ ); for (Map.Entry<String, Map<String, Object>> entry : rfc3164messages.entrySet()) { final Message message = codec.decode(buildRawMessage(entry.getKey())); assertThat(message).isNotNull(); assertThat(message.getFields()).containsAllEntriesOf(entry.getValue()); } }
@Override public void validateQuery( final SessionConfig config, final ExecutionPlan executionPlan, final Collection<QueryMetadata> runningQueries ) { validateCacheBytesUsage( runningQueries.stream() .filter(q -> q instanceof PersistentQueryMetadata) .collect(Collectors.toList()), config, config.getConfig(false) .getLong(KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING) ); }
@Test public void shouldNotValidateSharedRuntimesWhenCreatingAnewRuntimeWouldGoOverTheLimit() { // Given: final SessionConfig config = SessionConfig.of( new KsqlConfig(ImmutableMap.of(KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING, 50, KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED, true)), ImmutableMap.of( StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 50, KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING, 500 ) ); // When/Then: assertThrows( KsqlException.class, () -> queryValidator.validateQuery(config, plan, queries) ); }
public Optional<HostFailurePath> worstCaseHostLossLeadingToFailure() { Map<Node, Integer> timesNodeCanBeRemoved = computeMaximalRepeatedRemovals(); return greedyHeuristicFindFailurePath(timesNodeCanBeRemoved); }
@Test public void testEdgeCaseFailurePaths() { { CapacityCheckerTester tester = new CapacityCheckerTester(); tester.createNodes(1, 1, 0, new NodeResources(1, 10, 100, 1), 10, 0, new NodeResources(1, 10, 100, 1), 10); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertFalse("Computing worst case host loss with no hosts should return an empty optional.", failurePath.isPresent()); } // Odd edge case that should never be able to occur in prod { CapacityCheckerTester tester = new CapacityCheckerTester(); tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000, 1), 100, 1, new NodeResources(10, 1000, 10000, 1), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue("Computing worst case host loss if all hosts have to be removed should result in an non-empty failureReason with empty nodes.", failurePath.get().failureReason.tenant.isEmpty() && failurePath.get().failureReason.host.isEmpty()); assertEquals(tester.nodeRepository.nodes().list().nodeType(NodeType.host).size(), failurePath.get().hostsCausingFailure.size()); } { CapacityCheckerTester tester = new CapacityCheckerTester(); tester.createNodes(3, 30, 10, new NodeResources(0, 0, 10000, 1), 1000, 0, new NodeResources(0, 0, 0, 0), 0); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("When there are multiple lacking resources, all failures are multipleReasonFailures", failureReasons.size(), failureReasons.multipleReasonFailures().size()); assertEquals(0, failureReasons.singularReasonFailures().size()); } else { fail(); } } }
public StepExpression createExpression(StepDefinition stepDefinition) { List<ParameterInfo> parameterInfos = stepDefinition.parameterInfos(); if (parameterInfos.isEmpty()) { return createExpression( stepDefinition.getPattern(), stepDefinitionDoesNotTakeAnyParameter(stepDefinition), false); } ParameterInfo parameterInfo = parameterInfos.get(parameterInfos.size() - 1); return createExpression( stepDefinition.getPattern(), parameterInfo.getTypeResolver()::resolve, parameterInfo.isTransposed()); }
@Test void throws_for_unknown_parameter_types() { StepDefinition stepDefinition = new StubStepDefinition("Given a {unknownParameterType}"); List<Envelope> events = new ArrayList<>(); bus.registerHandlerFor(Envelope.class, events::add); CucumberException exception = assertThrows( CucumberException.class, () -> stepExpressionFactory.createExpression(stepDefinition)); assertThat(exception.getMessage(), is("" + "Could not create a cucumber expression for 'Given a {unknownParameterType}'.\n" + "It appears you did not register a parameter type." )); assertThat(events, iterableWithSize(1)); assertNotNull(events.get(0).getUndefinedParameterType()); }
public static SchemaKStream<?> buildSource( final PlanBuildContext buildContext, final DataSource dataSource, final QueryContext.Stacker contextStacker ) { final boolean windowed = dataSource.getKsqlTopic().getKeyFormat().isWindowed(); switch (dataSource.getDataSourceType()) { case KSTREAM: return windowed ? buildWindowedStream( buildContext, dataSource, contextStacker ) : buildStream( buildContext, dataSource, contextStacker ); case KTABLE: return windowed ? buildWindowedTable( buildContext, dataSource, contextStacker ) : buildTable( buildContext, dataSource, contextStacker ); default: throw new UnsupportedOperationException("Source type:" + dataSource.getDataSourceType()); } }
@Test public void shouldCreateNonWindowedTableSourceV2WithNewPseudoColumnVersionIfNoOldQuery() { // Given: givenNonWindowedTable(); // When: final SchemaKStream<?> result = SchemaKSourceFactory.buildSource( buildContext, dataSource, contextStacker ); // Then: assertThat(((TableSource) result.getSourceStep()).getPseudoColumnVersion(), equalTo(CURRENT_PSEUDOCOLUMN_VERSION_NUMBER)); assertValidSchema(result); }
@Override public boolean test(final String functionName) { if (functionName == null || functionName.trim().isEmpty() || JAVA_RESERVED_WORDS.contains(functionName.toLowerCase()) || KSQL_RESERVED_WORDS.contains(functionName.toLowerCase())) { return false; } return isValidJavaIdentifier(functionName); }
@Test public void shouldNotAllowJavaReservedWords() { // not exhaustive.. assertFalse(validator.test("enum")); assertFalse(validator.test("static")); assertFalse(validator.test("final")); assertFalse(validator.test("do")); assertFalse(validator.test("while")); assertFalse(validator.test("double")); assertFalse(validator.test("float")); assertFalse(validator.test("private")); assertFalse(validator.test("public")); assertFalse(validator.test("goto")); assertFalse(validator.test("default")); }
@Override public boolean handleResult(int returncode, GoPublisher goPublisher) { if (returncode == HttpURLConnection.HTTP_NOT_FOUND) { deleteQuietly(checksumFile); goPublisher.taggedConsumeLineWithPrefix(GoPublisher.ERR, "[WARN] The md5checksum property file was not found on the server. Hence, Go can not verify the integrity of the artifacts."); return true; } if (returncode == HttpURLConnection.HTTP_NOT_MODIFIED) { LOG.info("[Agent Fetch Artifact] Not downloading checksum file as it has not changed"); return true; } if (returncode == HttpURLConnection.HTTP_OK) { LOG.info("[Agent Fetch Artifact] Saved checksum property file [{}]", checksumFile); return true; } return returncode < HttpURLConnection.HTTP_BAD_REQUEST; }
@Test public void shouldDeleteOldMd5ChecksumFileIfItWasNotFoundOnTheServer() throws IOException { StubGoPublisher goPublisher = new StubGoPublisher(); file.createNewFile(); boolean isSuccessful = checksumFileHandler.handleResult(HttpServletResponse.SC_NOT_FOUND, goPublisher); assertThat(isSuccessful, is(true)); assertThat(file.exists(), is(false)); }
static Coder<Message> of() { return INSTANCE; }
@Test public void testConsistentWithEquals() { // some attributes might be omitted assertThat(SqsMessageCoder.of().consistentWithEquals()).isFalse(); }
public void forceRemoveFile(@NonNull Path path) throws IOException { for (int retryAttempts = 0; ; retryAttempts++) { Optional<IOException> maybeError = tryRemoveFile(path); if (maybeError.isEmpty()) return; if (retryStrategy.shouldRetry(retryAttempts)) continue; IOException error = maybeError.get(); throw new IOException(retryStrategy.failureMessage(path, retryAttempts), error); } }
@Test public void testForceRemoveFile() throws IOException { File file = tmp.newFile(); touchWithFileName(file); PathRemover remover = PathRemover.newSimpleRemover(); remover.forceRemoveFile(file.toPath()); assertFalse("Unable to delete file: " + file, file.exists()); }
@Override public ImportResult importItem( UUID jobId, IdempotentImportExecutor idempotentImportExecutor, TokensAndUrlAuthData authData, PhotosContainerResource resource) throws Exception { KoofrClient koofrClient = koofrClientFactory.create(authData); monitor.debug( () -> String.format( "%s: Importing %s albums and %s photos before transmogrification", jobId, resource.getAlbums().size(), resource.getPhotos().size())); // Make the data Koofr compatible resource.transmogrify(transmogrificationConfig); monitor.debug( () -> String.format( "%s: Importing %s albums and %s photos after transmogrification", jobId, resource.getAlbums().size(), resource.getPhotos().size())); for (PhotoAlbum album : resource.getAlbums()) { // Create a Koofr folder and then save the id with the mapping data idempotentImportExecutor.executeAndSwallowIOExceptions( album.getId(), album.getName(), () -> createAlbumFolder(album, koofrClient)); } final LongAdder totalImportedFilesSizes = new LongAdder(); for (PhotoModel photoModel : resource.getPhotos()) { idempotentImportExecutor.importAndSwallowIOExceptions( photoModel, photo -> { ItemImportResult<String> fileImportResult = importSinglePhoto(photoModel, jobId, idempotentImportExecutor, koofrClient); if (fileImportResult != null && fileImportResult.hasBytes()) { totalImportedFilesSizes.add(fileImportResult.getBytes()); } return fileImportResult; }); } return ImportResult.OK.copyWithBytes(totalImportedFilesSizes.longValue()); }
@Test public void testImportItemFromJobStore() throws Exception { ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[] {0, 1, 2, 3, 4}); when(client.ensureRootFolder()).thenReturn("/root"); when(jobStore.getStream(any(), any())).thenReturn(new InputStreamWrapper(inputStream, 5L)); doNothing().when(jobStore).removeData(any(), anyString()); when(executor.getCachedValue(eq("id1"))).thenReturn("/root/Album 1"); UUID jobId = UUID.randomUUID(); Collection<PhotoAlbum> albums = ImmutableList.of(new PhotoAlbum("id1", "Album 1", "This is a fake album")); Collection<PhotoModel> photos = ImmutableList.of( new PhotoModel( "pic1.jpg", "http://fake.com/1.jpg", "A pic", "image/jpeg", "p1", "id1", true), new PhotoModel( "pic2.png", "https://fake.com/2.png", "fine art", "image/png", "p2", "id1", true)); PhotosContainerResource resource = spy(new PhotosContainerResource(albums, photos)); importer.importItem(jobId, executor, authData, resource); InOrder clientInOrder = Mockito.inOrder(client); verify(resource).transmogrify(any(KoofrTransmogrificationConfig.class)); clientInOrder.verify(client).ensureRootFolder(); clientInOrder.verify(client).ensureFolder("/root", "Album 1"); clientInOrder .verify(client) .uploadFile( eq("/root/Album 1"), eq("pic1.jpg"), any(), eq("image/jpeg"), isNull(), eq("A pic")); clientInOrder .verify(client) .uploadFile( eq("/root/Album 1"), eq("pic2.png"), any(), eq("image/png"), isNull(), eq("fine art")); verify(jobStore, Mockito.times(2)).removeData(any(), anyString()); }
void onFatalError(final Throwable t) { try { log.error("Fatal error occurred in TaskExecutor {}.", getAddress(), t); } catch (Throwable ignored) { } // The fatal error handler implementation should make sure that this call is non-blocking fatalErrorHandler.onFatalError(t); }
@Test @Timeout(10) void testTerminationOnFatalError() throws Throwable { try (TaskSubmissionTestEnvironment env = new Builder(jobId) .setConfiguration(configuration) .build(EXECUTOR_EXTENSION.getExecutor())) { String testExceptionMsg = "Test exception of fatal error."; env.getTaskExecutor().onFatalError(new Exception(testExceptionMsg)); Throwable exception = env.getTestingFatalErrorHandler().getErrorFuture().get(); env.getTestingFatalErrorHandler().clearError(); assertThat(exception.getMessage()).startsWith(testExceptionMsg); } }
@Override public void execute(SensorContext context) { analyse(context, Xoo.KEY, XooRulesDefinition.XOO_REPOSITORY); analyse(context, Xoo2.KEY, XooRulesDefinition.XOO2_REPOSITORY); }
@Test public void testProvideGap_before_5_5() throws IOException { DefaultInputFile inputFile = new TestInputFileBuilder("foo", "src/Foo.xoo") .setLanguage(Xoo.KEY) .initMetadata("a\nb\nc\nd\ne\nf\ng\nh\ni\n") .build(); SensorContextTester context = SensorContextTester.create(temp.newFolder()); context.fileSystem().add(inputFile); context.setSettings(new MapSettings().setProperty(OneIssuePerLineSensor.EFFORT_TO_FIX_PROPERTY, "1.2")); context.setRuntime(SonarRuntimeImpl.forSonarQube(Version.parse("5.4"), SonarQubeSide.SCANNER, SonarEdition.COMMUNITY)); sensor.execute(context); assertThat(context.allIssues()).hasSize(10); // One issue per line for (Issue issue : context.allIssues()) { assertThat(issue.gap()).isEqualTo(1.2d); } }
public Command create( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext context) { return create(statement, context.getServiceContext(), context); }
@Test public void shouldCreateCommandForTerminateAllQuery() { // Given: givenTerminateAll(); // When: final Command command = commandFactory.create(configuredStatement, executionContext); // Then: assertThat(command, is(Command.of(configuredStatement))); }
@Override public PageResult<DictDataDO> getDictDataPage(DictDataPageReqVO pageReqVO) { return dictDataMapper.selectPage(pageReqVO); }
@Test public void testGetDictDataPage() { // mock 数据 DictDataDO dbDictData = randomPojo(DictDataDO.class, o -> { // 等会查询到 o.setLabel("芋艿"); o.setDictType("yunai"); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); }); dictDataMapper.insert(dbDictData); // 测试 label 不匹配 dictDataMapper.insert(cloneIgnoreId(dbDictData, o -> o.setLabel("艿"))); // 测试 dictType 不匹配 dictDataMapper.insert(cloneIgnoreId(dbDictData, o -> o.setDictType("nai"))); // 测试 status 不匹配 dictDataMapper.insert(cloneIgnoreId(dbDictData, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 准备参数 DictDataPageReqVO reqVO = new DictDataPageReqVO(); reqVO.setLabel("芋"); reqVO.setDictType("yunai"); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); // 调用 PageResult<DictDataDO> pageResult = dictDataService.getDictDataPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbDictData, pageResult.getList().get(0)); }
@Override public abstract IsmPrefixReaderIterator iterator() throws IOException;
@Test public void testReadKeyThatEncodesToEmptyByteArray() throws Exception { File tmpFile = tmpFolder.newFile(); IsmRecordCoder<Void> coder = IsmRecordCoder.of(1, 0, ImmutableList.<Coder<?>>of(VoidCoder.of()), VoidCoder.of()); IsmSink<Void> sink = new IsmSink<>( FileSystems.matchNewResource(tmpFile.getPath(), false), coder, BLOOM_FILTER_SIZE_LIMIT); IsmRecord<Void> element = IsmRecord.of(Arrays.asList((Void) null), (Void) null); try (SinkWriter<WindowedValue<IsmRecord<Void>>> writer = sink.writer()) { writer.add(new ValueInEmptyWindows<>(element)); } Cache< IsmShardKey, WeightedValue<NavigableMap<RandomAccessData, WindowedValue<IsmRecord<Void>>>>> cache = CacheBuilder.newBuilder() .weigher(Weighers.fixedWeightKeys(1)) .maximumWeight(10_000) .build(); IsmReader<Void> reader = new IsmReaderImpl<>( FileSystems.matchSingleFileSpec(tmpFile.getAbsolutePath()).resourceId(), coder, cache); IsmReader<Void>.IsmPrefixReaderIterator iterator = reader.iterator(); assertTrue(iterator.start()); assertEquals( coder.structuralValue(element), coder.structuralValue(iterator.getCurrent().getValue())); }
@Override public T build(ConfigurationSourceProvider provider, String path) throws IOException, ConfigurationException { try (InputStream input = provider.open(requireNonNull(path))) { final JsonNode node = mapper.readTree(createParser(input)); if (node == null) { throw ConfigurationParsingException .builder("Configuration at " + path + " must not be empty") .build(path); } return build(node, path); } catch (JsonParseException e) { throw ConfigurationParsingException .builder("Malformed " + formatName) .setCause(e) .setLocation(e.getLocation()) .setDetail(e.getMessage()) .build(path); } }
@Test void loadsValidConfigFiles() throws Exception { final Example example = factory.build(configurationSourceProvider, validFile); assertThat(example.getName()) .isEqualTo("Coda Hale"); assertThat(example.getType()) .satisfies(type -> assertThat(type).element(0).isEqualTo("coder")) .satisfies(type -> assertThat(type).element(1).isEqualTo("wizard")); assertThat(example.getProperties()) .contains(MapEntry.entry("debug", "true"), MapEntry.entry("settings.enabled", "false")); assertThat(example.getServers()) .hasSize(3) .element(0) .extracting(ExampleServer::getPort) .isEqualTo(8080); }
public static FieldScope ignoringFields(int firstFieldNumber, int... rest) { return FieldScopeImpl.createIgnoringFields(asList(firstFieldNumber, rest)); }
@Test public void testIgnoreSubMessageField() { Message message = parse("o_int: 1 o_sub_test_message: { o_int: 2 }"); Message diffMessage = parse("o_int: 2 o_sub_test_message: { o_int: 2 }"); Message eqMessage1 = parse("o_int: 1"); Message eqMessage2 = parse("o_int: 1 o_sub_test_message: {}"); Message eqMessage3 = parse("o_int: 1 o_sub_test_message: { o_int: 3 r_string: \"x\" }"); int fieldNumber = getFieldNumber("o_sub_test_message"); expectThat(diffMessage).ignoringFields(fieldNumber).isNotEqualTo(message); expectThat(eqMessage1).ignoringFields(fieldNumber).isEqualTo(message); expectThat(eqMessage2).ignoringFields(fieldNumber).isEqualTo(message); expectThat(eqMessage3).ignoringFields(fieldNumber).isEqualTo(message); expectFailureWhenTesting().that(diffMessage).ignoringFields(fieldNumber).isEqualTo(message); expectIsEqualToFailed(); expectThatFailure().hasMessageThat().contains("modified: o_int: 1 -> 2"); expectFailureWhenTesting().that(eqMessage3).ignoringFields(fieldNumber).isNotEqualTo(message); expectIsNotEqualToFailed(); expectThatFailure().hasMessageThat().contains("ignored: o_sub_test_message"); }
static String determinePackageName(Path baseDir, String basePackageName, Path classFile) { String subPackageName = determineSubpackageName(baseDir, classFile); return of(basePackageName, subPackageName) .filter(value -> !value.isEmpty()) // default package .collect(joining(PACKAGE_SEPARATOR_STRING)); }
@Test void determinePackageNameFromRootPackage() { Path baseDir = Paths.get("path", "to"); String basePackageName = ""; Path classFile = Paths.get("path", "to", "com", "example", "app", "App.class"); String packageName = ClasspathSupport.determinePackageName(baseDir, basePackageName, classFile); assertEquals("com.example.app", packageName); }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test void testInvalidCreateTimeNonCompressedV1() { long now = System.currentTimeMillis(); MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V1, now - 1001L, Compression.NONE); assertThrows(RecordValidationException.class, () -> new LogValidator( records, topicPartition, time, CompressionType.NONE, Compression.NONE, false, RecordBatch.MAGIC_VALUE_V1, TimestampType.CREATE_TIME, 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() )); }
static IndexComponentFilter findBestComponentFilter( IndexType type, List<IndexComponentCandidate> candidates, QueryDataType converterType ) { // First look for equality filters, assuming that they are more selective than ranges IndexComponentFilter equalityComponentFilter = searchForEquality(candidates, converterType); if (equalityComponentFilter != null) { return equalityComponentFilter; } // Look for ranges filters return searchForRange(type, candidates, converterType); }
@Test public void when_twoEqualitiesFilterPresentAndNoBetterChoice_then_itIsUsed() { IndexComponentFilter bestFilter = IndexComponentFilterResolver.findBestComponentFilter( indexType, WITH_TWO_EQUALITIES_AS_BEST_CANDIDATES, QUERY_DATA_TYPE ); assertEquals(TWO_EQUALITIES_CANDIDATE.getFilter(), bestFilter.getFilter()); }
public static UThrow create(UExpression expression) { return new AutoValue_UThrow(expression); }
@Test public void equality() { new EqualsTester() .addEqualityGroup( UThrow.create( UNewClass.create(UClassIdent.create("java.lang.IllegalArgumentException")))) .addEqualityGroup( UThrow.create(UNewClass.create(UClassIdent.create("java.lang.IllegalStateException")))) .testEquals(); }
@Override public MaterializedWindowedTable windowed() { return new KsqlMaterializedWindowedTable(inner.windowed()); }
@Test public void shouldPipeTransformsWindowed_fullTableScan() { // Given: final MaterializedWindowedTable table = materialization.windowed(); givenNoopProject(); when(filter.apply(any(), any(), any())).thenReturn(Optional.of(transformed)); // When: final Iterator<WindowedRow> result = table.get(partition, windowStartBounds, windowEndBounds); result.next(); result.next(); // Then: verify(project).apply( new Windowed<>(aKey, streamWindow), transformed, new PullProcessingContext(aRowtime) ); verify(project).apply( new Windowed<>(aKey2, streamWindow), transformed, new PullProcessingContext(aRowtime) ); }
@SuppressWarnings("unchecked") public <V extends FileAttributeView> @Nullable V getFileAttributeView( FileLookup lookup, Class<V> type) { AttributeProvider provider = providersByViewType.get(type); if (provider != null) { return (V) provider.view(lookup, createInheritedViews(lookup, provider)); } return null; }
@SuppressWarnings("ConstantConditions") @Test public void testGetFileAttributeView() throws IOException { final File file = createFile(); service.setInitialAttributes(file); FileLookup fileLookup = new FileLookup() { @Override public File lookup() throws IOException { return file; } }; assertThat(service.getFileAttributeView(fileLookup, TestAttributeView.class)).isNotNull(); assertThat(service.getFileAttributeView(fileLookup, BasicFileAttributeView.class)).isNotNull(); TestAttributes attrs = service.getFileAttributeView(fileLookup, TestAttributeView.class).readAttributes(); assertThat(attrs.foo()).isEqualTo("hello"); assertThat(attrs.bar()).isEqualTo(0); assertThat(attrs.baz()).isEqualTo(1); }
@Override public synchronized boolean tryReturnRecordAt(boolean isAtSplitPoint, ByteKey recordStart) { if (done) { return false; } checkState(!(position == null && !isAtSplitPoint), "The first record must be at a split point"); checkState( !(recordStart.compareTo(range.getStartKey()) < 0), "Trying to return record which is before the start key"); checkState( !(position != null && recordStart.compareTo(position) < 0), "Trying to return record which is before the last-returned record"); if (position == null) { LOG.info( "Adjusting range start from {} to {} as position of first returned record", range.getStartKey(), recordStart); range = range.withStartKey(recordStart); } position = recordStart; if (isAtSplitPoint) { if (!range.containsKey(recordStart)) { done = true; return false; } ++splitPointsSeen; } return true; }
@Test public void testTryReturnRecordAt() { ByteKeyRangeTracker tracker = ByteKeyRangeTracker.of(INITIAL_RANGE); // Should be able to emit at the same key twice, should that happen. // Should be able to emit within range (in order, but system guarantees won't try out of order). // Should not be able to emit past end of range. assertTrue(tracker.tryReturnRecordAt(true, INITIAL_START_KEY)); assertTrue(tracker.tryReturnRecordAt(true, INITIAL_START_KEY)); assertTrue(tracker.tryReturnRecordAt(true, INITIAL_MIDDLE_KEY)); assertTrue(tracker.tryReturnRecordAt(true, INITIAL_MIDDLE_KEY)); assertTrue(tracker.tryReturnRecordAt(true, BEFORE_END_KEY)); assertFalse(tracker.tryReturnRecordAt(true, END_KEY)); // after end assertFalse(tracker.tryReturnRecordAt(true, BEFORE_END_KEY)); // false because done }
public static String formatAnnotation(Annotation annotation) { String annotationName = annotation.annotationType().getName(); String annotationNameWithoutPackage = annotationName.substring(annotationName.lastIndexOf('.') + 1).replace('$', '.'); String annotationToString = annotation.toString(); String values = annotationToString.substring(annotationToString.indexOf('(')); return String.format("%s%s", annotationNameWithoutPackage, values); }
@Test public void testFormatAnnotationDefault() throws Exception { // Java 11 puts quotes in unparsed string, Java 8 does not. // It would be an improvement for our own formatter to make it have the // Java 11 behavior even when running on Java 8, but we can just // wait it out. assertThat( ReflectHelpers.formatAnnotation(Options.class.getMethod("getString").getAnnotations()[0]), anyOf( equalTo("Default.String(value=package.OuterClass$InnerClass#method())"), equalTo("Default.String(value=\"package.OuterClass$InnerClass#method()\")"))); }
@InvokeOnHeader(Web3jConstants.NET_LISTENING) void netListening(Message message) throws IOException { Request<?, NetListening> netListeningRequest = web3j.netListening(); setRequestId(message, netListeningRequest); NetListening response = netListeningRequest.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.isListening()); } }
@Test public void netListeningTest() throws Exception { NetListening response = Mockito.mock(NetListening.class); Mockito.when(mockWeb3j.netListening()).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.isListening()).thenReturn(true); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.NET_LISTENING); template.send(exchange); Boolean body = exchange.getIn().getBody(Boolean.class); assertTrue(body); }
public static long oneTimeUnitMillisecond(TimeUnit timeUnit) { long millisecond = 0; switch (timeUnit) { case MILLISECONDS: millisecond = 1; break; case SECONDS: millisecond = 1000; break; case MINUTES: millisecond = 60000; break; case HOURS: millisecond = 3600000; break; case DAYS: millisecond = 86400000; break; } return millisecond; }
@Test public void testTimeUnitToMillisecond() { Assert.assertEquals(1000, TimeUtil.oneTimeUnitMillisecond(TimeUnit.SECONDS)); }
public boolean isUserAllowed(UserGroupInformation ugi) { return isUserInList(ugi); }
@Test public void testIsUserAllowed() { AccessControlList acl; UserGroupInformation drwho = UserGroupInformation.createUserForTesting("[email protected]", new String[] { "aliens", "humanoids", "timelord" }); UserGroupInformation susan = UserGroupInformation.createUserForTesting("[email protected]", new String[] { "aliens", "humanoids", "timelord" }); UserGroupInformation barbara = UserGroupInformation.createUserForTesting("[email protected]", new String[] { "humans", "teachers" }); UserGroupInformation ian = UserGroupInformation.createUserForTesting("[email protected]", new String[] { "humans", "teachers" }); acl = new AccessControlList("drwho humanoids"); assertUserAllowed(drwho, acl); assertUserAllowed(susan, acl); assertUserNotAllowed(barbara, acl); assertUserNotAllowed(ian, acl); acl = new AccessControlList("drwho"); assertUserAllowed(drwho, acl); assertUserNotAllowed(susan, acl); assertUserNotAllowed(barbara, acl); assertUserNotAllowed(ian, acl); acl = new AccessControlList("drwho "); assertUserAllowed(drwho, acl); assertUserNotAllowed(susan, acl); assertUserNotAllowed(barbara, acl); assertUserNotAllowed(ian, acl); acl = new AccessControlList(" humanoids"); assertUserAllowed(drwho, acl); assertUserAllowed(susan, acl); assertUserNotAllowed(barbara, acl); assertUserNotAllowed(ian, acl); acl = new AccessControlList("drwho,ian aliens,teachers"); assertUserAllowed(drwho, acl); assertUserAllowed(susan, acl); assertUserAllowed(barbara, acl); assertUserAllowed(ian, acl); acl = new AccessControlList(""); UserGroupInformation spyUser = spy(drwho); acl.isUserAllowed(spyUser); verify(spyUser, never()).getGroupNames(); }
@Override public CompletableFuture<Void> deleteKvConfig(String address, DeleteKVConfigRequestHeader requestHeader, long timeoutMillis) { CompletableFuture<Void> future = new CompletableFuture<>(); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.DELETE_KV_CONFIG, requestHeader); remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> { if (response.getCode() == ResponseCode.SUCCESS) { future.complete(null); } else { log.warn("deleteKvConfig getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), requestHeader); future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark())); } }); return future; }
@Test public void assertDeleteKvConfigWithSuccess() throws Exception { setResponseSuccess(null); DeleteKVConfigRequestHeader requestHeader = mock(DeleteKVConfigRequestHeader.class); CompletableFuture<Void> actual = mqClientAdminImpl.deleteKvConfig(defaultBrokerAddr, requestHeader, defaultTimeout); assertNull(actual.get()); }
@Override public void setViewID(View view, String viewID) { }
@Test public void testSetViewID() { Dialog view = new Dialog(mApplication); mSensorsAPI.setViewID(view, "R.id.login"); Object tag = view.getWindow().getDecorView().getTag(R.id.sensors_analytics_tag_view_id); Assert.assertNull(tag); }
void fetchPluginSettingsMetaData(GoPluginDescriptor pluginDescriptor) { String pluginId = pluginDescriptor.id(); List<ExtensionSettingsInfo> allMetadata = findSettingsAndViewOfAllExtensionsIn(pluginId); List<ExtensionSettingsInfo> validMetadata = allSettingsAndViewPairsWhichAreValid(allMetadata); if (validMetadata.isEmpty()) { LOGGER.warn("Failed to fetch plugin settings metadata for plugin {}. Maybe the plugin does not implement plugin settings and view?", pluginId); LOGGER.warn("Plugin: {} - Metadata load info: {}", pluginId, allMetadata); LOGGER.warn("Not all plugins are required to implement the request above. This error may be safe to ignore."); return; } if (validMetadata.size() > 1) { throw new RuntimeException(String.format("Plugin with ID: %s has more than one extension which supports plugin settings. " + "Only one extension should support it and respond to %s and %s.", pluginId, REQUEST_PLUGIN_SETTINGS_CONFIGURATION, REQUEST_PLUGIN_SETTINGS_VIEW)); } ExtensionSettingsInfo extensionSettingsInfo = validMetadata.get(0); metadataStore.addMetadataFor(pluginId, extensionSettingsInfo.extensionName, extensionSettingsInfo.configuration, extensionSettingsInfo.viewTemplate); }
@Test public void shouldNotFailWhenAPluginWithMultipleExtensionsHasMoreThanOneExtensionRespondingWithSettings_BUT_NoneIsValid() { PluginSettingsConfiguration configuration = new PluginSettingsConfiguration(); configuration.add(new PluginSettingsProperty("k1").with(Property.REQUIRED, true).with(Property.SECURE, false)); String pluginId = "plugin-id"; GoPluginDescriptor pluginDescriptor = GoPluginDescriptor.builder().id(pluginId).build(); setupSettingsResponses(notificationExtension, pluginId, configuration, null); setupSettingsResponses(packageRepositoryExtension, pluginId, null, "view"); metadataLoader.fetchPluginSettingsMetaData(pluginDescriptor); assertThat(PluginSettingsMetadataStore.getInstance().hasPlugin(pluginId)).isFalse(); }
public void setPresent(final UUID accountUuid, final byte deviceId, final DisplacedPresenceListener displacementListener) { setPresenceTimer.record(() -> { final String presenceKey = getPresenceKey(accountUuid, deviceId); displacePresence(presenceKey, true); displacementListenersByPresenceKey.put(presenceKey, displacementListener); presenceCluster.useCluster(connection -> { final RedisAdvancedClusterCommands<String, String> commands = connection.sync(); commands.sadd(connectedClientSetKey, presenceKey); commands.setex(presenceKey, PRESENCE_EXPIRATION_SECONDS, managerId); }); subscribeForRemotePresenceChanges(presenceKey); }); }
@Test void testLocalDisplacement() { final UUID accountUuid = UUID.randomUUID(); final byte deviceId = 1; final AtomicInteger displacementCounter = new AtomicInteger(0); final DisplacedPresenceListener displacementListener = connectedElsewhere -> displacementCounter.incrementAndGet(); clientPresenceManager.setPresent(accountUuid, deviceId, displacementListener); assertEquals(0, displacementCounter.get()); clientPresenceManager.setPresent(accountUuid, deviceId, displacementListener); assertEquals(1, displacementCounter.get()); }
public static void initRequestFromEntity(HttpRequestBase requestBase, Map<String, String> body, String charset) throws Exception { if (body == null || body.isEmpty()) { return; } List<NameValuePair> params = new ArrayList<>(body.size()); for (Map.Entry<String, String> entry : body.entrySet()) { params.add(new BasicNameValuePair(entry.getKey(), entry.getValue())); } if (requestBase instanceof HttpEntityEnclosingRequest) { HttpEntityEnclosingRequest request = (HttpEntityEnclosingRequest) requestBase; HttpEntity entity = new UrlEncodedFormEntity(params, charset); request.setEntity(entity); } }
@Test void testInitRequestFromEntity4() throws Exception { BaseHttpMethod.HttpGetWithEntity httpRequest = new BaseHttpMethod.HttpGetWithEntity(""); HttpUtils.initRequestFromEntity(mock(HttpRequestBase.class), Collections.emptyMap(), "UTF-8"); // nothing change assertEquals(new BaseHttpMethod.HttpGetWithEntity("").getEntity(), httpRequest.getEntity()); }
@Override public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { SendMessageContext sendMessageContext; switch (request.getCode()) { case RequestCode.CONSUMER_SEND_MSG_BACK: return this.consumerSendMsgBack(ctx, request); default: SendMessageRequestHeader requestHeader = parseRequestHeader(request); if (requestHeader == null) { return null; } TopicQueueMappingContext mappingContext = this.brokerController.getTopicQueueMappingManager().buildTopicQueueMappingContext(requestHeader, true); RemotingCommand rewriteResult = this.brokerController.getTopicQueueMappingManager().rewriteRequestForStaticTopic(requestHeader, mappingContext); if (rewriteResult != null) { return rewriteResult; } sendMessageContext = buildMsgContext(ctx, requestHeader, request); try { this.executeSendMessageHookBefore(sendMessageContext); } catch (AbortProcessException e) { final RemotingCommand errorResponse = RemotingCommand.createResponseCommand(e.getResponseCode(), e.getErrorMessage()); errorResponse.setOpaque(request.getOpaque()); return errorResponse; } RemotingCommand response; clearReservedProperties(requestHeader); if (requestHeader.isBatch()) { response = this.sendBatchMessage(ctx, request, sendMessageContext, requestHeader, mappingContext, (ctx1, response1) -> executeSendMessageHookAfter(response1, ctx1)); } else { response = this.sendMessage(ctx, request, sendMessageContext, requestHeader, mappingContext, (ctx12, response12) -> executeSendMessageHookAfter(response12, ctx12)); } return response; } }
@Test public void testProcessRequest_Transaction() throws RemotingCommandException { brokerController.setTransactionalMessageService(transactionMsgService); when(brokerController.getTransactionalMessageService().asyncPrepareMessage(any(MessageExtBrokerInner.class))) .thenReturn(CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.PUT_OK, new AppendMessageResult(AppendMessageStatus.PUT_OK)))); RemotingCommand request = createSendTransactionMsgCommand(RequestCode.SEND_MESSAGE); final RemotingCommand[] response = new RemotingCommand[1]; doAnswer(invocation -> { response[0] = invocation.getArgument(0); return null; }).when(channel).writeAndFlush(any(Object.class)); await().atMost(Duration.ofSeconds(10)).until(() -> { RemotingCommand responseToReturn = sendMessageProcessor.processRequest(handlerContext, request); if (responseToReturn != null) { assertThat(response[0]).isNull(); response[0] = responseToReturn; } if (response[0] == null) { return false; } assertThat(response[0].getCode()).isEqualTo(ResponseCode.SUCCESS); assertThat(response[0].getOpaque()).isEqualTo(request.getOpaque()); return true; }); }
public void setTag(String tag) { this.tag = tag; }
@Test void testSetTag() { String tag = "tag"; metadataOperation.setTag(tag); assertEquals(metadataOperation.getTag(), tag); }
@Override public void execute(String commandName, BufferedReader reader, BufferedWriter writer) throws Py4JException, IOException { String targetObjectId = reader.readLine(); String methodName = reader.readLine(); List<Object> arguments = getArguments(reader); ReturnObject returnObject = invokeMethod(methodName, targetObjectId, arguments); String returnCommand = Protocol.getOutputCommand(returnObject); logger.finest("Returning command: " + returnCommand); writer.write(returnCommand); writer.flush(); }
@Test public void testMethodWithParams() { String inputCommand = target + "\nmethod3\ni1\nbtrue\ne\n"; try { command.execute("c", new BufferedReader(new StringReader(inputCommand)), writer); assertEquals("!ysHello World\n", sWriter.toString()); } catch (Exception e) { e.printStackTrace(); fail(); } }
public static Map<UUID, PartitionIdSet> createPartitionMap( NodeEngine nodeEngine, @Nullable MemberVersion localMemberVersion, boolean failOnUnassignedPartition ) { Collection<Partition> parts = nodeEngine.getHazelcastInstance().getPartitionService().getPartitions(); int partCnt = parts.size(); Map<UUID, PartitionIdSet> partMap = new LinkedHashMap<>(); for (Partition part : parts) { Member owner = part.getOwner(); if (owner == null) { if (failOnUnassignedPartition) { throw QueryException.error( SqlErrorCode.PARTITION_DISTRIBUTION, "Partition is not assigned to any member: " + part.getPartitionId() ); } else { continue; } } if (localMemberVersion != null) { if (!localMemberVersion.equals(owner.getVersion())) { UUID localMemberId = nodeEngine.getLocalMember().getUuid(); throw QueryException.error("Cannot execute SQL query when members have different versions " + "(make sure that all members have the same version) {localMemberId=" + localMemberId + ", localMemberVersion=" + localMemberVersion + ", remoteMemberId=" + owner.getUuid() + ", remoteMemberVersion=" + owner.getVersion() + "}"); } } partMap.computeIfAbsent(owner.getUuid(), (key) -> new PartitionIdSet(partCnt)).add(part.getPartitionId()); } return partMap; }
@Test public void testUnassignedPartition_exception() { HazelcastInstance member = factory.newHazelcastInstance(); member.getCluster().changeClusterState(ClusterState.FROZEN); try { QueryUtils.createPartitionMap(Accessors.getNodeEngineImpl(member), null, true); fail("Must fail"); } catch (QueryException e) { assertEquals(SqlErrorCode.PARTITION_DISTRIBUTION, e.getCode()); assertTrue(e.getMessage(), e.getMessage().startsWith("Partition is not assigned to any member")); } }
public boolean add(final Integer element) { return addInt(null == element ? nullValue : element); }
@Test void shouldCreateIntegerArray() { final int count = 20; final Integer[] expected = new Integer[count]; for (int i = 0; i < count; i++) { list.add(i); expected[i] = i; } final Integer[] integers = list.toArray(new Integer[0]); assertEquals(expected.getClass(), integers.getClass()); assertArrayEquals(expected, integers); }
@Override public int statfs(String path, Statvfs stbuf) { return AlluxioFuseUtils.call(LOG, () -> statfsInternal(path, stbuf), "Fuse.Statfs", "path=%s", path); }
@Test @DoraTestTodoItem(action = DoraTestTodoItem.Action.FIX, owner = "bowen", comment = "aggregated capacity and usage info is not available yet in dora") @Ignore public void statfs() throws Exception { ByteBuffer buffer = ByteBuffer.allocateDirect(4 * Constants.KB); buffer.clear(); Statvfs stbuf = Statvfs.of(buffer); int blockSize = 16 * Constants.KB; int totalBlocks = 4; int freeBlocks = 3; BlockMasterClient blockMasterClient = PowerMockito.mock(BlockMasterClient.class); PowerMockito.mockStatic(BlockMasterClient.Factory.class); when(BlockMasterClient.Factory.create(any())).thenReturn(blockMasterClient); BlockMasterInfo blockMasterInfo = new BlockMasterInfo(); blockMasterInfo.setCapacityBytes(totalBlocks * blockSize); blockMasterInfo.setFreeBytes(freeBlocks * blockSize); when(blockMasterClient.getBlockMasterInfo(any())).thenReturn(blockMasterInfo); when(mFileSystemContext.acquireBlockMasterClientResource()).thenReturn( new CloseableResource<BlockMasterClient>(blockMasterClient) { @Override public void closeResource() {} }); assertEquals(0, mFuseFs.statfs("/", stbuf)); assertEquals(blockSize, stbuf.f_bsize.intValue()); assertEquals(blockSize, stbuf.f_frsize.intValue()); assertEquals(totalBlocks, stbuf.f_blocks.longValue()); assertEquals(freeBlocks, stbuf.f_bfree.longValue()); assertEquals(freeBlocks, stbuf.f_bavail.longValue()); assertEquals(AlluxioJniFuseFileSystem.UNKNOWN_INODES, stbuf.f_files.intValue()); assertEquals(AlluxioJniFuseFileSystem.UNKNOWN_INODES, stbuf.f_ffree.intValue()); assertEquals(AlluxioJniFuseFileSystem.UNKNOWN_INODES, stbuf.f_favail.intValue()); assertEquals(AlluxioFuseUtils.MAX_NAME_LENGTH, stbuf.f_namemax.intValue()); }
public String search(String type, String sortBy) { return getQuerySummary(type, sortBy, SortOrder.ASC); }
@Test void testDefaultParametersMatch() { assertEquals(searchService.search(parameterObject), searchService.search("sneakers", SortOrder.ASC), "Default Parameter values do not not match."); LOGGER.info("SortBy Default parameter value matches."); assertEquals(searchService.search(parameterObject), searchService.search("sneakers", "price"), "Default Parameter values do not not match."); LOGGER.info("SortOrder Default parameter value matches."); LOGGER.info("testDefaultParametersMatch executed successfully without errors."); }
public static Schema create(Type type) { switch (type) { case STRING: return new StringSchema(); case BYTES: return new BytesSchema(); case INT: return new IntSchema(); case LONG: return new LongSchema(); case FLOAT: return new FloatSchema(); case DOUBLE: return new DoubleSchema(); case BOOLEAN: return new BooleanSchema(); case NULL: return new NullSchema(); default: throw new AvroRuntimeException("Can't create a: " + type); } }
@Test void floatAsDoubleDefaultValue() { Schema.Field field = new Schema.Field("myField", Schema.create(Schema.Type.DOUBLE), "doc", 1.0f); assertTrue(field.hasDefaultValue()); assertEquals(1.0d, field.defaultVal()); assertEquals(1.0d, GenericData.get().getDefaultValue(field)); }
@Override public boolean betterThan(Num criterionValue1, Num criterionValue2) { return lessIsBetter ? criterionValue1.isLessThan(criterionValue2) : criterionValue1.isGreaterThan(criterionValue2); }
@Test public void betterThanWithLessIsNotBetter() { AnalysisCriterion criterion = getCriterion(new ProfitLossCriterion()); assertTrue(criterion.betterThan(numOf(5000), numOf(4500))); assertFalse(criterion.betterThan(numOf(4500), numOf(5000))); }
@Override public void registerInstance(String serviceName, String ip, int port) throws NacosException { registerInstance(serviceName, ip, port, Constants.DEFAULT_CLUSTER_NAME); }
@Test void testRegisterInstance6() throws NacosException { //given String serviceName = "service1"; String groupName = "group1"; Instance instance = new Instance(); //when client.registerInstance(serviceName, groupName, instance); //then verify(proxy, times(1)).registerService(serviceName, groupName, instance); }
public void initializePluggableDevicePlugins(Context context, Configuration configuration, Map<String, ResourcePlugin> pluginMap) throws YarnRuntimeException, ClassNotFoundException { LOG.info("The pluggable device framework enabled," + "trying to load the vendor plugins"); if (null == deviceMappingManager) { LOG.debug("DeviceMappingManager initialized."); deviceMappingManager = new DeviceMappingManager(context); } String[] pluginClassNames = configuration.getStrings( YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_DEVICE_CLASSES); if (null == pluginClassNames) { throw new YarnRuntimeException("Null value found in configuration: " + YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_DEVICE_CLASSES); } for (String pluginClassName : pluginClassNames) { Class<?> pluginClazz = Class.forName(pluginClassName); if (!DevicePlugin.class.isAssignableFrom(pluginClazz)) { throw new YarnRuntimeException("Class: " + pluginClassName + " not instance of " + DevicePlugin.class.getCanonicalName()); } // sanity-check before initialization checkInterfaceCompatibility(DevicePlugin.class, pluginClazz); DevicePlugin dpInstance = (DevicePlugin) ReflectionUtils.newInstance( pluginClazz, configuration); // Try to register plugin // TODO: handle the plugin method timeout issue DeviceRegisterRequest request = null; try { request = dpInstance.getRegisterRequestInfo(); } catch (Exception e) { throw new YarnRuntimeException("Exception thrown from plugin's" + " getRegisterRequestInfo:" + e.getMessage()); } String resourceName = request.getResourceName(); // check if someone has already registered this resource type name if (pluginMap.containsKey(resourceName)) { throw new YarnRuntimeException(resourceName + " already registered! Please change resource type name" + " or configure correct resource type name" + " in resource-types.xml for " + pluginClassName); } // check resource name is valid and configured in resource-types.xml if (!isConfiguredResourceName(resourceName)) { throw new YarnRuntimeException(resourceName + " is not configured inside " + YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE + " , please configure it first"); } LOG.info("New resource type: {} registered successfully by {}", resourceName, pluginClassName); DevicePluginAdapter pluginAdapter = new DevicePluginAdapter( resourceName, dpInstance, deviceMappingManager); LOG.info("Adapter of {} created. Initializing..", pluginClassName); try { pluginAdapter.initialize(context); } catch (YarnException e) { throw new YarnRuntimeException("Adapter of " + pluginClassName + " init failed!"); } LOG.info("Adapter of {} init success!", pluginClassName); // Store plugin as adapter instance pluginMap.put(request.getResourceName(), pluginAdapter); // If the device plugin implements DevicePluginScheduler interface if (dpInstance instanceof DevicePluginScheduler) { // check DevicePluginScheduler interface compatibility checkInterfaceCompatibility(DevicePluginScheduler.class, pluginClazz); LOG.info( "{} can schedule {} devices." + "Added as preferred device plugin scheduler", pluginClassName, resourceName); deviceMappingManager.addDevicePluginScheduler( resourceName, (DevicePluginScheduler) dpInstance); } } // end for }
@Test(timeout = 30000) public void testInitializationWithPluggableDeviceFrameworkEnabled2() throws ClassNotFoundException { ResourcePluginManager rpm = new ResourcePluginManager(); ResourcePluginManager rpmSpy = spy(rpm); nm = new ResourcePluginMockNM(rpmSpy); boolean fail = false; try { conf.setBoolean(YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_ENABLED, true); nm.init(conf); nm.start(); } catch (YarnRuntimeException e) { fail = true; } catch (Exception ignored) { // ignore } verify(rpmSpy).initializePluggableDevicePlugins( any(Context.class), any(Configuration.class), anyMap()); assertThat(fail).isTrue(); }
public static MetricRegistry getDefault() { if (defaultRegistryName != null) { return getOrCreate(defaultRegistryName); } throw new IllegalStateException("Default registry name has not been set."); }
@Test public void errorsWhenDefaultUnset() throws Exception { try { SharedMetricRegistries.getDefault(); } catch (final Exception e) { assertThat(e).isInstanceOf(IllegalStateException.class); assertThat(e.getMessage()).isEqualTo("Default registry name has not been set."); } }
@SuppressWarnings("IOResourceOpenedButNotSafelyClosed") public KDiag(Configuration conf, PrintWriter out, File keytab, String principal, long minKeyLength, boolean securityRequired) { super(conf); this.keytab = keytab; this.principal = principal; this.out = out; this.minKeyLength = minKeyLength; this.securityRequired = securityRequired; }
@Test public void testShortName() throws Throwable { kdiag(ARG_KEYLEN, KEYLEN, ARG_KEYTAB, keytab.getAbsolutePath(), ARG_PRINCIPAL, ARG_VERIFYSHORTNAME, ARG_PRINCIPAL, "[email protected]"); }
@Override @SuppressWarnings("checkstyle:magicnumber") public void process(int ordinal, @Nonnull Inbox inbox) { try { switch (ordinal) { case 0: process0(inbox); break; case 1: process1(inbox); break; case 2: process2(inbox); break; case 3: process3(inbox); break; case 4: process4(inbox); break; default: processAny(ordinal, inbox); } } catch (Exception e) { throw sneakyThrow(e); } }
@Test public void when_processInbox4_then_tryProcess4Called() { // When tryProcessP.process(ORDINAL_4, inbox); // Then tryProcessP.validateReceptionOfItem(ORDINAL_4, MOCK_ITEM); }
@Override public ProcessingResult process(ReplicationTask task) { try { EurekaHttpResponse<?> httpResponse = task.execute(); int statusCode = httpResponse.getStatusCode(); Object entity = httpResponse.getEntity(); if (logger.isDebugEnabled()) { logger.debug("Replication task {} completed with status {}, (includes entity {})", task.getTaskName(), statusCode, entity != null); } if (isSuccess(statusCode)) { task.handleSuccess(); } else if (statusCode == 503) { logger.debug("Server busy (503) reply for task {}", task.getTaskName()); return ProcessingResult.Congestion; } else { task.handleFailure(statusCode, entity); return ProcessingResult.PermanentError; } } catch (Throwable e) { if (maybeReadTimeOut(e)) { logger.error("It seems to be a socket read timeout exception, it will retry later. if it continues to happen and some eureka node occupied all the cpu time, you should set property 'eureka.server.peer-node-read-timeout-ms' to a bigger value", e); //read timeout exception is more Congestion then TransientError, return Congestion for longer delay return ProcessingResult.Congestion; } else if (isNetworkConnectException(e)) { logNetworkErrorSample(task, e); return ProcessingResult.TransientError; } else { logger.error("{}: {} Not re-trying this exception because it does not seem to be a network exception", peerId, task.getTaskName(), e); return ProcessingResult.PermanentError; } } return ProcessingResult.Success; }
@Test public void testNonBatchableTaskCongestionFailureHandling() throws Exception { TestableInstanceReplicationTask task = aReplicationTask().withAction(Action.Heartbeat).withReplyStatusCode(503).build(); ProcessingResult status = replicationTaskProcessor.process(task); assertThat(status, is(ProcessingResult.Congestion)); assertThat(task.getProcessingState(), is(ProcessingState.Pending)); }
public String getDefaultValue() { return defaultValue; }
@Test public void getDefaultValue_ReturnsDefaultValue() { assertEquals("value1", enumAttribute.getDefaultValue()); }
@Override public Committer closeForCommit() throws IOException { lock(); try { closeAndUploadPart(); return upload.snapshotAndGetCommitter(); } finally { unlock(); } }
@Test public void commitEmptyStreamShouldBeSuccessful() throws IOException { streamUnderTest.closeForCommit().commit(); }
@CheckForNull @Override public Set<Path> branchChangedFiles(String targetBranchName, Path rootBaseDir) { return Optional.ofNullable((branchChangedFilesWithFileMovementDetection(targetBranchName, rootBaseDir))) .map(GitScmProvider::extractAbsoluteFilePaths) .orElse(null); }
@Test public void branchChangedFiles_should_throw_when_repo_nonexistent() { assertThatThrownBy(() -> newScmProvider().branchChangedFiles("main", temp.newFolder().toPath())) .isInstanceOf(MessageException.class) .hasMessageContaining("Not inside a Git work tree: "); }
public synchronized boolean saveNamespace(long timeWindow, long txGap, FSNamesystem source) throws IOException { if (timeWindow > 0 || txGap > 0) { final FSImageStorageInspector inspector = storage.readAndInspectDirs( EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK), StartupOption.REGULAR); FSImageFile image = inspector.getLatestImages().get(0); File imageFile = image.getFile(); final long checkpointTxId = image.getCheckpointTxId(); final long checkpointAge = Time.now() - imageFile.lastModified(); if (checkpointAge <= timeWindow * 1000 && checkpointTxId >= this.getCorrectLastAppliedOrWrittenTxId() - txGap) { return false; } } saveNamespace(source, NameNodeFile.IMAGE, null); return true; }
@Test public void testHasNonEcBlockUsingStripedIDForLoadFile() throws IOException{ // start a cluster Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9) .build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); FSNamesystem fns = cluster.getNamesystem(); String testDir = "/test_block_manager"; String testFile = "testfile_loadfile"; String testFilePath = testDir + "/" + testFile; String clientName = "testUser_loadfile"; String clientMachine = "testMachine_loadfile"; long blkId = -1; long blkNumBytes = 1024; long timestamp = 1426222918; fs.mkdir(new Path(testDir), new FsPermission("755")); Path p = new Path(testFilePath); DFSTestUtil.createFile(fs, p, 0, (short) 1, 1); BlockInfoContiguous cBlk = new BlockInfoContiguous( new Block(blkId, blkNumBytes, timestamp), (short)3); INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath); file.toUnderConstruction(clientName, clientMachine); file.addBlock(cBlk); TestINodeFile.toCompleteFile(file); fns.enterSafeMode(false); fns.saveNamespace(0, 0); cluster.restartNameNodes(); cluster.waitActive(); fns = cluster.getNamesystem(); assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID()); //after nonEcBlockUsingStripedID is deleted //the hasNonEcBlockUsingStripedID is set to false fs = cluster.getFileSystem(); fs.delete(p,false); fns.enterSafeMode(false); fns.saveNamespace(0, 0); cluster.restartNameNodes(); cluster.waitActive(); fns = cluster.getNamesystem(); assertFalse(fns.getBlockManager().hasNonEcBlockUsingStripedID()); cluster.shutdown(); cluster = null; } finally { if (cluster != null) { cluster.shutdown(); } } }
@Override public Collection<TaskManagerLocation> getPreferredLocations( ExecutionVertexID executionVertexId, Set<ExecutionVertexID> producersToIgnore) { CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture = asyncPreferredLocationsRetriever.getPreferredLocations( executionVertexId, producersToIgnore); Preconditions.checkState(preferredLocationsFuture.isDone()); // it is safe to do the blocking call here // as the underlying InputsLocationsRetriever returns only immediately available locations return preferredLocationsFuture.join(); }
@Test void testAvailableInputLocationRetrieval() { TestingInputsLocationsRetriever originalLocationRetriever = new TestingInputsLocationsRetriever.Builder() .connectConsumerToProducers(EV21, Arrays.asList(EV11, EV12, EV13, EV14)) .build(); originalLocationRetriever.assignTaskManagerLocation(EV11); originalLocationRetriever.markScheduled(EV12); originalLocationRetriever.failTaskManagerLocation(EV13, new Throwable()); originalLocationRetriever.cancelTaskManagerLocation(EV14); SyncPreferredLocationsRetriever locationsRetriever = new DefaultSyncPreferredLocationsRetriever( executionVertexId -> Optional.empty(), originalLocationRetriever); Collection<TaskManagerLocation> preferredLocations = locationsRetriever.getPreferredLocations(EV21, Collections.emptySet()); TaskManagerLocation expectedLocation = originalLocationRetriever.getTaskManagerLocation(EV11).get().join(); assertThat(preferredLocations).containsExactly(expectedLocation); }
public static CDCResponse succeed(final String requestId) { return succeed(requestId, ResponseCase.RESPONSE_NOT_SET, null); }
@Test void assertSucceedWhenResponseCaseStreamDataResult() { Message msg = StreamDataResult.newBuilder().build(); CDCResponse actualResponse = CDCResponseUtils.succeed("request_id_1", CDCResponse.ResponseCase.STREAM_DATA_RESULT, msg); assertThat(actualResponse.getStatus(), is(CDCResponse.Status.SUCCEED)); assertThat(actualResponse.getRequestId(), is("request_id_1")); assertNotNull(actualResponse.getStreamDataResult()); }
public int getListFederationQueuePoliciesFailedRetrieved() { return numListFederationQueuePoliciesFailedRetrieved.value(); }
@Test public void testListFederationQueuePoliciesFailedRetrieved() { long totalBadBefore = metrics.getListFederationQueuePoliciesFailedRetrieved(); badSubCluster.getListFederationQueuePoliciesFailedRetrieved(); Assert.assertEquals(totalBadBefore + 1, metrics.getListFederationQueuePoliciesFailedRetrieved()); }
public synchronized boolean maybeUpdatePushRequestTimestamp(long currentTime) { /* Immediate push request after get subscriptions fetch can be accepted outside push interval time as client applies a jitter to the push interval, which might result in a request being sent between 0.5 * pushIntervalMs and 1.5 * pushIntervalMs. */ boolean canAccept = lastGetRequestTimestamp > lastPushRequestTimestamp; if (!canAccept) { long timeElapsedSinceLastMsg = currentTime - lastPushRequestTimestamp; canAccept = timeElapsedSinceLastMsg >= pushIntervalMs; } // Update the timestamp only if the request can be accepted. if (canAccept) { lastPushRequestTimestamp = currentTime; } return canAccept; }
@Test public void testMaybeUpdatePushRequestAfterElapsedTimeValid() { assertTrue(clientInstance.maybeUpdatePushRequestTimestamp(System.currentTimeMillis() - ClientMetricsConfigs.DEFAULT_INTERVAL_MS)); // Second request should be accepted as time since last request is greater than the push interval. assertTrue(clientInstance.maybeUpdatePushRequestTimestamp(System.currentTimeMillis())); }
public static String matchPattern(String regEx, String s) { Pattern pattern = Pattern.compile(regEx); Matcher matcher = pattern.matcher(s); if (matcher.find()) { return matcher.group(1); } return null; }
@Test public void shouldFindSimpleRegExMatch() { String url = "http://java.sun.com:80/docs/books/tutorial/essential/regex/test_harness.html"; String baseUrl = StringUtil.matchPattern("^(http://[^/]*)/", url); assertThat(baseUrl, is("http://java.sun.com:80")); }
public void terminateCluster(final List<String> deleteTopicPatterns) { terminatePersistentQueries(); deleteSinkTopics(deleteTopicPatterns); deleteTopics(managedTopics); ksqlEngine.close(); }
@Test public void shouldCleanUpSchemasForExplicitTopicListProtobuf() throws Exception { // Given: givenTopicsExistInKafka("K_Foo"); givenSinkTopicsExistInMetastore(FormatFactory.PROTOBUF, "K_Foo"); givenSchemasForTopicsExistInSchemaRegistry("K_Foo"); // When: clusterTerminator.terminateCluster(ImmutableList.of("K_Foo")); // Then: verifySchemaDeletedForTopics("K_Foo"); }
public static int weekOfYear(TemporalAccessor date){ return TemporalAccessorUtil.get(date, WeekFields.ISO.weekOfYear()); }
@Test public void weekOfYearTest(){ final LocalDate date1 = LocalDate.of(2021, 12, 31); final int weekOfYear1 = LocalDateTimeUtil.weekOfYear(date1); assertEquals(52, weekOfYear1); final int weekOfYear2 = LocalDateTimeUtil.weekOfYear(date1.atStartOfDay()); assertEquals(52, weekOfYear2); }
public String build( final String cellValue ) { switch ( type ) { case FORALL: return buildForAll( cellValue ); case INDEXED: return buildMulti( cellValue ); default: return buildSingle( cellValue ); } }
@Test public void testBuildSnippet() { final String snippet = "something.param.getAnother().equals($param);"; final SnippetBuilder snip = new SnippetBuilder(snippet); final String cellValue = "$42"; final String result = snip.build(cellValue); assertThat(result).isNotNull(); assertThat(result).isEqualTo("something.param.getAnother().equals($42);"); }
@Override public Page<ConfigInfoAggr> findConfigInfoAggrByPage(String dataId, String group, String tenant, final int pageNo, final int pageSize) { String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; ConfigInfoAggrMapper configInfoAggrMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_AGGR); final int startRow = (pageNo - 1) * pageSize; String sqlCountRows = configInfoAggrMapper.select(Arrays.asList("count(*)"), Arrays.asList("data_id", "group_id", "tenant_id")); MapperContext context = new MapperContext(); context.putWhereParameter(FieldConstant.DATA_ID, dataId); context.putWhereParameter(FieldConstant.GROUP_ID, group); context.putWhereParameter(FieldConstant.TENANT_ID, tenantTmp); context.setStartRow(startRow); context.setPageSize(pageSize); MapperResult mapperResult = configInfoAggrMapper.findConfigInfoAggrByPageFetchRows(context); String sqlFetchRows = mapperResult.getSql(); Object[] sqlFetchArgs = mapperResult.getParamList().toArray(); PaginationHelper<ConfigInfoAggr> helper = this.createPaginationHelper(); try { return helper.fetchPageLimit(sqlCountRows, new Object[] {dataId, group, tenantTmp}, sqlFetchRows, sqlFetchArgs, pageNo, pageSize, CONFIG_INFO_AGGR_ROW_MAPPER); } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } }
@Test void testFindConfigInfoAggrByPageOfException() { String dataId = "dataId111"; String group = "group"; String tenant = "tenant"; //mock query count exception. when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(Integer.class))).thenThrow( new CannotGetJdbcConnectionException("mock fail222")); try { int pageNo = 1; int pageSize = 120; externalConfigInfoAggrPersistService.findConfigInfoAggrByPage(dataId, group, tenant, pageNo, pageSize); assertTrue(false); } catch (Throwable throwable) { assertEquals("mock fail222", throwable.getMessage()); } }
@Override public Object evaluate(final ProcessingDTO processingDTO) { Number input = (Number) getFromPossibleSources(name, processingDTO) .orElse(mapMissingTo); if (input == null) { throw new KiePMMLException("Failed to retrieve input number for " + name); } return evaluate(input); }
@Test void evaluateWithOutlierValueAsMissingValues() { Number missingValue = 45; KiePMMLNormContinuous kiePMMLNormContinuous = getKiePMMLNormContinuous(null, OUTLIER_TREATMENT_METHOD.AS_MISSING_VALUES, missingValue); Number input = 23; Number retrieved = kiePMMLNormContinuous.evaluate(input); assertThat(retrieved).isEqualTo(missingValue); input = 41; retrieved = kiePMMLNormContinuous.evaluate(input); assertThat(retrieved).isEqualTo(missingValue); }
public boolean meetCompletenessRequirements(Cluster cluster, ModelCompletenessRequirements requirements) { int numValidWindows = _partitionMetricSampleAggregator.validWindows(cluster, requirements.minMonitoredPartitionsPercentage()).size(); int requiredNumValidWindows = requirements.minRequiredNumWindows(); return numValidWindows >= requiredNumValidWindows; }
@Test public void testMeetCompletenessRequirements() { TestContext context = prepareContext(); LoadMonitor loadMonitor = context.loadmonitor(); KafkaPartitionMetricSampleAggregator aggregator = context.aggregator(); // Require at least 1 valid window with 1.0 of valid partitions ratio. ModelCompletenessRequirements requirements1 = new ModelCompletenessRequirements(1, 1.0, false); // Require at least 1 valid window with 0.5 of valid partitions ratio. ModelCompletenessRequirements requirements2 = new ModelCompletenessRequirements(1, 0.5, false); // Require at least 2 valid windows with 1.0 of valid partitions ratio. ModelCompletenessRequirements requirements3 = new ModelCompletenessRequirements(2, 1.0, false); // Require at least 2 valid windows with 0.5 of valid partitions ratio. ModelCompletenessRequirements requirements4 = new ModelCompletenessRequirements(2, 0.5, false); // populate the metrics aggregator. // One stable window + one active window, enough samples for each partition except T1P1. CruiseControlUnitTestUtils.populateSampleAggregator(2, 4, aggregator, PE_T0P0, 0, WINDOW_MS, METRIC_DEF); CruiseControlUnitTestUtils.populateSampleAggregator(2, 4, aggregator, PE_T0P1, 0, WINDOW_MS, METRIC_DEF); CruiseControlUnitTestUtils.populateSampleAggregator(2, 4, aggregator, PE_T1P0, 0, WINDOW_MS, METRIC_DEF); // The load monitor has one window with 0.5 valid partitions ratio. MetadataClient.ClusterAndGeneration clusterAndGeneration = loadMonitor.refreshClusterAndGeneration(); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements1)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements2)); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements3)); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements4)); // Add more samples, two stable windows + one active window. enough samples for each partition except T1P1 CruiseControlUnitTestUtils.populateSampleAggregator(1, 4, aggregator, PE_T0P0, 2, WINDOW_MS, METRIC_DEF); CruiseControlUnitTestUtils.populateSampleAggregator(1, 4, aggregator, PE_T0P1, 2, WINDOW_MS, METRIC_DEF); CruiseControlUnitTestUtils.populateSampleAggregator(1, 4, aggregator, PE_T1P0, 2, WINDOW_MS, METRIC_DEF); CruiseControlUnitTestUtils.populateSampleAggregator(1, 1, aggregator, PE_T1P1, 2, WINDOW_MS, METRIC_DEF); // The load monitor has two windows, both with 0.5 valid partitions ratio clusterAndGeneration = loadMonitor.refreshClusterAndGeneration(); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements1)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements2)); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements3)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements4)); // Back fill the first stable window for T1P1 CruiseControlUnitTestUtils.populateSampleAggregator(1, 1, aggregator, PE_T1P1, 0, WINDOW_MS, METRIC_DEF); // The load monitor has two windows with 1.0 and 0.5 of completeness respectively. clusterAndGeneration = loadMonitor.refreshClusterAndGeneration(); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements1)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements2)); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements3)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements4)); // Back fill all stable windows for T1P1 CruiseControlUnitTestUtils.populateSampleAggregator(1, 3, aggregator, PE_T1P1, 1, WINDOW_MS, METRIC_DEF); // The load monitor has two windows both with 1.0 of completeness. clusterAndGeneration = loadMonitor.refreshClusterAndGeneration(); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements1)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements2)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements3)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements4)); }
@Override public Application find(final String filename) { final String extension = Path.getExtension(filename); if(!defaultApplicationCache.contains(extension)) { if(StringUtils.isEmpty(extension)) { return Application.notfound; } final String path = this.findForType(extension); if(StringUtils.isEmpty(path)) { defaultApplicationCache.put(extension, Application.notfound); } else { final NSBundle bundle = NSBundle.bundleWithPath(path); if(null == bundle) { log.error(String.format("Loading bundle %s failed", path)); defaultApplicationCache.put(extension, Application.notfound); } else { defaultApplicationCache.put(extension, this.getDescription(bundle.bundleIdentifier())); } } } return defaultApplicationCache.get(extension); }
@Test public void testFindByFilename() { ApplicationFinder f = new LaunchServicesApplicationFinder(); assertEquals(new Application("com.apple.Preview", "Preview"), f.find("file.png")); assertEquals(Application.notfound, f.find("file.txt_")); }
void addVolume(FsVolumeReference ref) throws IOException { FsVolumeImpl volume = (FsVolumeImpl) ref.getVolume(); volumes.add(volume); if (isSameDiskTieringApplied(volume)) { mountVolumeMap.addVolume(volume); URI uri = volume.getStorageLocation().getUri(); if (capacityRatioMap.containsKey(uri)) { mountVolumeMap.setCapacityRatio(volume, capacityRatioMap.get(uri)); } } if (blockScanner != null) { blockScanner.addVolumeScanner(ref); } else { // If the volume is not put into a volume scanner, it does not need to // hold the reference. IOUtils.cleanupWithLogger(null, ref); } // If the volume is used to replace a failed volume, it needs to reset the // volume failure info for this volume. removeVolumeFailureInfo(volume.getStorageLocation()); FsDatasetImpl.LOG.info("Added new volume: " + volume.getStorageID()); }
@Test(timeout=30000) public void testReleaseVolumeRefIfNoBlockScanner() throws IOException { FsVolumeList volumeList = new FsVolumeList( Collections.<VolumeFailureInfo>emptyList(), null, blockChooser, conf, null); File volDir = new File(baseDir, "volume-0"); volDir.mkdirs(); FsVolumeImpl volume = new FsVolumeImplBuilder() .setConf(conf) .setDataset(dataset) .setStorageID("storage-id") .setStorageDirectory( new StorageDirectory(StorageLocation.parse(volDir.getPath()))) .build(); FsVolumeReference ref = volume.obtainReference(); volumeList.addVolume(ref); assertNull(ref.getVolume()); }
private void onPendingTaskFinished(SparkPendingTaskAttachment attachment) { writeLock(); try { // check if job has been cancelled if (isTxnDone()) { LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id) .add("state", state) .add("error_msg", "this task will be ignored when job is: " + state) .build()); return; } if (finishedTaskIds.contains(attachment.getTaskId())) { LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id) .add("task_id", attachment.getTaskId()) .add("error_msg", "this is a duplicated callback of pending task " + "when broker already has loading task") .build()); return; } // add task id into finishedTaskIds finishedTaskIds.add(attachment.getTaskId()); sparkLoadAppHandle = attachment.getHandle(); appId = attachment.getAppId(); etlOutputPath = attachment.getOutputPath(); executeEtl(); // log etl state unprotectedLogUpdateStateInfo(); } finally { writeUnlock(); } }
@Test public void testOnPendingTaskFinished(@Mocked GlobalStateMgr globalStateMgr, @Injectable String originStmt) throws MetaNotFoundException { ResourceDesc resourceDesc = new ResourceDesc(resourceName, Maps.newHashMap()); SparkLoadJob job = new SparkLoadJob(dbId, label, resourceDesc, new OriginStatement(originStmt, 0)); SparkPendingTaskAttachment attachment = new SparkPendingTaskAttachment(pendingTaskId); attachment.setAppId(appId); attachment.setOutputPath(etlOutputPath); job.onTaskFinished(attachment); // check pending task finish Assert.assertTrue(job.finishedTaskIds.contains(pendingTaskId)); Assert.assertEquals(appId, Deencapsulation.getField(job, "appId")); Assert.assertEquals(etlOutputPath, Deencapsulation.getField(job, "etlOutputPath")); Assert.assertEquals(JobState.ETL, job.getState()); }
public List<String> listXAttrs(final Path path) throws IOException { return new ArrayList<>(retrieveHeaders(path, INVOCATION_OP_XATTR_LIST) .keySet()); }
@Test public void testListXAttrKeys() throws Throwable { List<String> xAttrs = headerProcessing.listXAttrs(MAGIC_PATH); Assertions.assertThat(xAttrs) .describedAs("Attribute keys") .contains(RETRIEVED_XATTRS); }
@Override @NonNull public Mono<Void> handle(@NonNull final ServerWebExchange exchange, @NonNull final Throwable throwable) { LOG.error("handle error: {} formatError:{} throwable:", exchange.getLogPrefix(), formatError(throwable, exchange.getRequest()), throwable); HttpStatusCode httpStatusCode; Object errorResult; String errorMsg = ""; if (throwable instanceof IllegalArgumentException) { httpStatusCode = HttpStatus.BAD_REQUEST; errorResult = ShenyuResultWrap.error(exchange, httpStatusCode.value(), throwable.getMessage(), null); errorMsg = throwable.getMessage(); } else if (throwable instanceof ResponseStatusException) { httpStatusCode = ((ResponseStatusException) throwable).getStatusCode(); HttpStatus httpStatus = (HttpStatus) httpStatusCode; String errMsg = StringUtils.hasLength(((ResponseStatusException) throwable).getReason()) ? ((ResponseStatusException) throwable).getReason() : httpStatus.getReasonPhrase(); errorResult = ShenyuResultWrap.error(exchange, httpStatusCode.value(), errMsg, null); errorMsg = errMsg; } else { httpStatusCode = HttpStatus.INTERNAL_SERVER_ERROR; HttpStatus httpStatus = (HttpStatus) httpStatusCode; errorResult = ShenyuResultWrap.error(exchange, httpStatusCode.value(), httpStatus.getReasonPhrase(), null); errorMsg = httpStatus.getReasonPhrase(); } exchange.getResponse().setStatusCode(httpStatusCode); Map<String, String> labels = new HashMap<>(8); labels.put("global", "error"); labels.put("component", "gateway"); AlarmSender.alarmMediumCritical("ShenYu-Gateway-Global-Error", errorMsg, labels); return WebFluxResultUtils.result(exchange, errorResult); }
@Test public void getErrorAttributes() { doNothing().when(loggerSpy).error(anyString()); ServerWebExchange webExchange = MockServerWebExchange.from(MockServerHttpRequest.get("http://localhost:8080/favicon.ico")); NullPointerException nullPointerException = new NullPointerException("nullPointerException"); Mono<Void> response = globalErrorHandler.handle(webExchange, nullPointerException); assertNotNull(response); assertNotNull(globalErrorHandler.handle(webExchange, new ResponseStatusException(HttpStatus.BAD_REQUEST))); }
@Override public Set<String> getInputMetrics() { return inputMetricKeys; }
@Test public void input_metrics_can_be_empty() { MeasureComputer.MeasureComputerDefinition measureComputer = new MeasureComputerDefinitionImpl.BuilderImpl() .setInputMetrics() .setOutputMetrics("comment_density_1", "comment_density_2") .build(); assertThat(measureComputer.getInputMetrics()).isEmpty(); }
public List<ChangeStreamRecord> toChangeStreamRecords( PartitionMetadata partition, ChangeStreamResultSet resultSet, ChangeStreamResultSetMetadata resultSetMetadata) { if (this.isPostgres()) { // In PostgresQL, change stream records are returned as JsonB. return Collections.singletonList( toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata)); } // In GoogleSQL, change stream records are returned as an array of structs. return resultSet.getCurrentRowAsStruct().getStructList(0).stream() .flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata)) .collect(Collectors.toList()); }
@Test public void testMappingJsonRowToChildPartitionRecord() { final ChildPartitionsRecord childPartitionsRecord = new ChildPartitionsRecord( Timestamp.ofTimeSecondsAndNanos(10L, 20), "1", Arrays.asList( new ChildPartition("childToken1", Sets.newHashSet("parentToken1", "parentToken2")), new ChildPartition("childToken2", Sets.newHashSet("parentToken1", "parentToken2"))), null); final String jsonString = recordToJson(childPartitionsRecord, false, false); assertNotNull(jsonString); ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class); when(resultSet.getPgJsonb(0)).thenReturn(jsonString); assertEquals( Collections.singletonList(childPartitionsRecord), mapperPostgres.toChangeStreamRecords(partition, resultSet, resultSetMetadata)); }
public static void cleanupInternalTopicSchemas( final String applicationId, final SchemaRegistryClient schemaRegistryClient) { getInternalSubjectNames(applicationId, schemaRegistryClient) .forEach(subject -> tryDeleteInternalSubject( applicationId, schemaRegistryClient, subject)); }
@Test public void shouldNotDeleteOtherSchemas() throws Exception { // Given: when(schemaRegistryClient.getAllSubjects()).thenReturn(ImmutableList.of( "SOME-other-key", "SOME-other-value" )); // When: SchemaRegistryUtil.cleanupInternalTopicSchemas(APP_ID, schemaRegistryClient); // Then not exception: verify(schemaRegistryClient, never()).deleteSubject(any()); }
public Optional<Long> getActualRowCount() { if (null == rowCountSegment) { return Optional.empty(); } return Optional.of(rowCountSegment.isBoundOpened() ? actualRowCount + 1L : actualRowCount); }
@Test void assertGetActualRowCountWithNumberLiteralPaginationValueSegment() { assertThat(new PaginationContext(getOffsetSegmentWithNumberLiteralPaginationValueSegment(), getRowCountSegmentWithNumberLiteralPaginationValueSegment(), getParameters()).getActualRowCount().orElse(null), is(20L)); }
@Nullable public static TraceContextOrSamplingFlags parseB3SingleFormat(CharSequence b3) { return parseB3SingleFormat(b3, 0, b3.length()); }
@Test void parseTraceparentFormat_padded_right() { assertThat( parseB3SingleFormat(traceIdHigh + "0000000000000000-" + spanId + "-1-" + parentId).context() ).isEqualToComparingFieldByField(TraceContext.newBuilder() .traceIdHigh(Long.parseUnsignedLong(traceIdHigh, 16)) .parentId(Long.parseUnsignedLong(parentId, 16)) .spanId(Long.parseUnsignedLong(spanId, 16)) .sampled(true).build() ); }
@Override public void updateService(String serviceName, String groupName, float protectThreshold) throws NacosException { Service service = new Service(); service.setName(serviceName); service.setGroupName(groupName); service.setProtectThreshold(protectThreshold); updateService(service, new NoneSelector()); }
@Test void testUpdateService2() throws NacosException { //given String serviceName = "service1"; String groupName = "groupName"; float protectThreshold = 0.1f; Map<String, String> meta = new HashMap<>(); meta.put("k", "v"); //when nacosNamingMaintainService.updateService(serviceName, groupName, protectThreshold, meta); //then verify(serverProxy, times(1)).updateService(argThat(new ArgumentMatcher<Service>() { @Override public boolean matches(Service service) { return service.getName().equals(serviceName) && service.getGroupName().equals(groupName) && Math.abs(service.getProtectThreshold() - protectThreshold) < 0.1f && service.getMetadata().size() == 1; } }), argThat(o -> o instanceof NoneSelector)); }
@Override public SchemaKTable<GenericKey> aggregate( final List<ColumnName> nonAggregateColumns, final List<FunctionCall> aggregations, final Optional<WindowExpression> windowExpression, final FormatInfo valueFormat, final Stacker contextStacker ) { if (windowExpression.isPresent()) { throw new KsqlException("Windowing not supported for table aggregations."); } final List<String> unsupportedFunctionNames = aggregations.stream() .map(call -> UdafUtil.resolveAggregateFunction(functionRegistry, call, schema, ksqlConfig)) .filter(function -> !(function instanceof TableAggregationFunction)) .map(KsqlAggregateFunction::name) .map(FunctionName::text) .distinct() .collect(Collectors.toList()); if (!unsupportedFunctionNames.isEmpty()) { final String postfix = unsupportedFunctionNames.size() == 1 ? "" : "s"; throw new KsqlException("The aggregation function" + postfix + " " + GrammaticalJoiner.and().join(unsupportedFunctionNames) + " cannot be applied to a table source, only to a stream source." ); } final TableAggregate step = ExecutionStepFactory.tableAggregate( contextStacker, sourceTableStep, InternalFormats.of(keyFormat, valueFormat), nonAggregateColumns, aggregations ); return new SchemaKTable<>( step, resolveSchema(step), keyFormat, ksqlConfig, functionRegistry ); }
@Test public void shouldFailWindowedTableAggregation() { // Given: final WindowExpression windowExp = mock(WindowExpression.class); final SchemaKGroupedTable groupedTable = buildSchemaKGroupedTable(); // When: final Exception e = assertThrows( KsqlException.class, () -> groupedTable.aggregate( NON_AGG_COLUMNS, ImmutableList.of(SUM, COUNT), Optional.of(windowExp), valueFormat.getFormatInfo(), queryContext ) ); // Then: assertThat(e.getMessage(), containsString("Windowing not supported for table aggregations.")); }
@Override public PostgreSQLPacketPayload createPacketPayload(final ByteBuf message, final Charset charset) { return new PostgreSQLPacketPayload(message, charset); }
@Test void assertCreatePacketPayload() { assertThat(new OpenGaussPacketCodecEngine().createPacketPayload(byteBuf, StandardCharsets.UTF_8).getByteBuf(), is(byteBuf)); }
public PDOutlineItem getFirstChild() { return getOutlineItem(COSName.FIRST); }
@Test void nullFirstChild() { assertNull(root.getFirstChild()); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void underlineStrikethroughMarkdown() { String cap = "__under1__ ~strike1~ __~nested~__"; ParseMode parseMode = ParseMode.MarkdownV2; SendAudio sendAudio = new SendAudio(chatId, audioFileId).caption(cap).parseMode(parseMode); Message message = bot.execute(sendAudio).message(); MessageTest.checkMessage(message); String htmlCaption = cap.replace("__", "").replace("~", ""); assertEquals(htmlCaption, message.caption()); assertEquals(4, message.captionEntities().length); MessageEntity captionEntity = message.captionEntities()[0]; assertEquals(MessageEntity.Type.underline, captionEntity.type()); assertEquals((Integer) 0, captionEntity.offset()); assertEquals((Integer) 6, captionEntity.length()); captionEntity = message.captionEntities()[1]; assertEquals(MessageEntity.Type.strikethrough, captionEntity.type()); assertEquals((Integer) 7, captionEntity.offset()); assertEquals((Integer) 7, captionEntity.length()); captionEntity = message.captionEntities()[2]; assertEquals(MessageEntity.Type.underline, captionEntity.type()); assertEquals((Integer) 15, captionEntity.offset()); assertEquals((Integer) 6, captionEntity.length()); captionEntity = message.captionEntities()[3]; assertEquals(MessageEntity.Type.strikethrough, captionEntity.type()); assertEquals((Integer) 15, captionEntity.offset()); assertEquals((Integer) 6, captionEntity.length()); }
public static Builder forCurrentMagic(ProduceRequestData data) { return forMagic(RecordBatch.CURRENT_MAGIC_VALUE, data); }
@Test public void testV3AndAboveCannotHaveNoRecordBatches() { ProduceRequest.Builder requestBuilder = ProduceRequest.forCurrentMagic(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() .setName("test") .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(0) .setRecords(MemoryRecords.EMPTY)))).iterator())) .setAcks((short) 1) .setTimeoutMs(5000)); assertThrowsForAllVersions(requestBuilder, InvalidRecordException.class); }
public <V> Iterable<V> getAll(TupleTag<V> tag) { int index = schema.getIndex(tag); if (index < 0) { throw new IllegalArgumentException("TupleTag " + tag + " is not in the schema"); } @SuppressWarnings("unchecked") Iterable<V> unions = (Iterable<V>) valueMap.get(index); return unions; }
@Test public void testLazyResults() { TestUnionValues values = new TestUnionValues(0, 0, 1, 1, 0, 1, 1); CoGbkResult result = new CoGbkResult(createSchema(5), values, 0, 2); // Nothing is read until we try to iterate. assertThat(values.maxPos(), equalTo(0)); Iterable<?> tag0iterable = result.getAll("tag0"); assertThat(values.maxPos(), equalTo(0)); Iterator<?> ignored = tag0iterable.iterator(); assertThat(values.maxPos(), equalTo(0)); // Iterating reads (nearly) the minimal number of values. Iterator<?> tag0 = tag0iterable.iterator(); tag0.next(); assertThat(values.maxPos(), lessThanOrEqualTo(2)); tag0.next(); assertThat(values.maxPos(), equalTo(2)); // Note that we're skipping over tag 1. tag0.next(); assertThat(values.maxPos(), equalTo(5)); // Iterating again does not cause more reads. Iterator<?> tag0iterAgain = tag0iterable.iterator(); tag0iterAgain.next(); tag0iterAgain.next(); tag0iterAgain.next(); assertThat(values.maxPos(), equalTo(5)); // Iterating over other tags does not cause more reads for values we have seen. Iterator<?> tag1 = result.getAll("tag1").iterator(); tag1.next(); tag1.next(); assertThat(values.maxPos(), equalTo(5)); // However, finding the next tag1 value does require more reads. tag1.next(); assertThat(values.maxPos(), equalTo(6)); }
@VisibleForTesting ResourceGroupNamespaceConfigListener getRgNamespaceConfigListener() { return rgNamespaceConfigListener; }
@Test public void testNewResourceGroupNamespaceConfigListener() { PulsarService pulsarService = mock(PulsarService.class); PulsarResources pulsarResources = mock(PulsarResources.class); doReturn(pulsarResources).when(pulsarService).getPulsarResources(); ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); doReturn(scheduledExecutorService).when(pulsarService).getExecutor(); ResourceGroupService resourceGroupService = mock(ResourceGroupService.class); ResourceGroupResources resourceGroupResources = mock(ResourceGroupResources.class); RuntimeException exception = new RuntimeException("listResourceGroupsAsync error"); doReturn(CompletableFuture.failedFuture(exception)) .when(resourceGroupResources).listResourceGroupsAsync(); doReturn(mock(MetadataStore.class)) .when(resourceGroupResources).getStore(); doReturn(resourceGroupResources).when(pulsarResources).getResourcegroupResources(); ServiceConfiguration ServiceConfiguration = new ServiceConfiguration(); doReturn(ServiceConfiguration).when(pulsarService).getConfiguration(); ResourceGroupConfigListener resourceGroupConfigListener = new ResourceGroupConfigListener(resourceGroupService, pulsarService); // getResourcegroupResources() returns an error, ResourceGroupNamespaceConfigListener doesn't be created. Awaitility.await().pollDelay(3, TimeUnit.SECONDS).untilAsserted(() -> { assertNull(resourceGroupConfigListener.getRgNamespaceConfigListener()); }); // ResourceGroupNamespaceConfigListener will be created, and uses real pulsar resource. doReturn(CompletableFuture.completedFuture(new ArrayList<String>())) .when(resourceGroupResources).listResourceGroupsAsync(); doReturn(pulsar.getPulsarResources()).when(pulsarService).getPulsarResources(); Awaitility.await().untilAsserted(() -> { assertNotNull(resourceGroupConfigListener.getRgNamespaceConfigListener()); }); }