focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
protected static List<LastOpenedDTO> filterForExistingIdAndCapAtMaximum(final LastOpenedForUserDTO loi, final GRN grn, final long max) {
return loi.items().stream().filter(i -> !i.grn().equals(grn)).limit(max - 1).toList();
}
|
@Test
public void testCapAtMaximumMinusOneSoYouCanAddANewElement() {
var list = List.of(
new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "1"), DateTime.now(DateTimeZone.UTC)),
new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "2"), DateTime.now(DateTimeZone.UTC)),
new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "3"), DateTime.now(DateTimeZone.UTC)),
new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "4"), DateTime.now(DateTimeZone.UTC)),
new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "5"), DateTime.now(DateTimeZone.UTC)),
new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "6"), DateTime.now(DateTimeZone.UTC)),
new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "7"), DateTime.now(DateTimeZone.UTC)),
new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "8"), DateTime.now(DateTimeZone.UTC)),
new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "9"), DateTime.now(DateTimeZone.UTC)),
new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "10"), DateTime.now(DateTimeZone.UTC))
);
LastOpenedForUserDTO dto = new LastOpenedForUserDTO("userId", list);
var result = StartPageService.filterForExistingIdAndCapAtMaximum(dto, grnRegistry.newGRN(GRNTypes.DASHBOARD, "11"), MAX);
assertThat(result.size()).isEqualTo(9);
}
|
private Set<Long> getUnavailableBeIdsInGroup(SystemInfoService infoService, ColocateTableIndex colocateIndex,
GroupId groupId) {
Set<Long> backends = colocateIndex.getBackendsByGroup(groupId);
Set<Long> unavailableBeIds = Sets.newHashSet();
for (Long backendId : backends) {
if (!checkBackendAvailable(backendId, infoService)) {
unavailableBeIds.add(backendId);
}
}
return unavailableBeIds;
}
|
@Test
public void testGetUnavailableBeIdsInGroup() {
GroupId groupId = new GroupId(10000, 10001);
List<Long> allBackendsInGroup = Lists.newArrayList(1L, 2L, 3L, 4L, 5L);
List<List<Long>> backendsPerBucketSeq = new ArrayList<>();
backendsPerBucketSeq.add(allBackendsInGroup);
ColocateTableIndex colocateTableIndex = new ColocateTableIndex();
SystemInfoService infoService = new SystemInfoService();
colocateTableIndex.addBackendsPerBucketSeq(groupId, backendsPerBucketSeq);
Backend be2 = new Backend(2L, "", 2002);
be2.setAlive(true);
infoService.replayAddBackend(be2);
Backend be3 = new Backend(3L, "", 3003);
be3.setAlive(false);
be3.setLastUpdateMs(System.currentTimeMillis() - Config.tablet_sched_colocate_be_down_tolerate_time_s * 1000 * 2);
infoService.replayAddBackend(be3);
Backend be4 = new Backend(4L, "", 4004);
be4.setAlive(false);
be4.setLastUpdateMs(System.currentTimeMillis());
infoService.replayAddBackend(be4);
Backend be5 = new Backend(5L, "", 5005);
be5.setDecommissioned(true);
infoService.replayAddBackend(be5);
GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getIdToBackend();
Set<Long> unavailableBeIds = Deencapsulation
.invoke(balancer, "getUnavailableBeIdsInGroup", infoService, colocateTableIndex, groupId);
Assert.assertArrayEquals(new long[] {1L, 3L, 5L},
unavailableBeIds.stream().mapToLong(i -> i).sorted().toArray());
}
|
public <T> HttpResponse<T> httpRequest(String url, String method, HttpHeaders headers, Object requestBodyData,
TypeReference<T> responseFormat) {
return httpRequest(url, method, headers, requestBodyData, responseFormat, null, null);
}
|
@Test
public void testNonEmptyResponseWithVoidResponseType() throws Exception {
int statusCode = Response.Status.OK.getStatusCode();
Request req = mock(Request.class);
ContentResponse resp = mock(ContentResponse.class);
when(resp.getContentAsString()).thenReturn(toJsonString(TEST_DTO));
setupHttpClient(statusCode, req, resp);
TypeReference<Void> voidResponse = new TypeReference<Void>() { };
RestClient.HttpResponse<Void> httpResp = httpRequest(
httpClient, MOCK_URL, TEST_METHOD, voidResponse, TEST_SIGNATURE_ALGORITHM
);
assertEquals(statusCode, httpResp.status());
assertNull(httpResp.body());
}
|
@VisibleForTesting
void validateParentMenu(Long parentId, Long childId) {
if (parentId == null || ID_ROOT.equals(parentId)) {
return;
}
// 不能设置自己为父菜单
if (parentId.equals(childId)) {
throw exception(MENU_PARENT_ERROR);
}
MenuDO menu = menuMapper.selectById(parentId);
// 父菜单不存在
if (menu == null) {
throw exception(MENU_PARENT_NOT_EXISTS);
}
// 父菜单必须是目录或者菜单类型
if (!MenuTypeEnum.DIR.getType().equals(menu.getType())
&& !MenuTypeEnum.MENU.getType().equals(menu.getType())) {
throw exception(MENU_PARENT_NOT_DIR_OR_MENU);
}
}
|
@Test
public void testValidateParentMenu_parentNotExist() {
// 调用,并断言异常
assertServiceException(() -> menuService.validateParentMenu(randomLongId(), null),
MENU_PARENT_NOT_EXISTS);
}
|
public static <T> Key<T> newKey(String name) {
return new Key<>(name);
}
|
@Test
void newKeyFailsOnNull() {
assertThrows(NullPointerException.class, () -> SessionContext.newKey(null));
}
|
static ParseResult parse(final int javaMajorVersion, final BufferedReader br) throws IOException {
final ParseResult result = new ParseResult();
int lineNumber = 0;
while (true) {
final String line = br.readLine();
lineNumber++;
if (line == null) {
break;
}
try{
jvmOptionFromLine(javaMajorVersion, line).ifPresent(result::appendOption);
} catch (IllegalArgumentException e){
result.appendError(lineNumber, line);
};
}
return result;
}
|
@Test
public void testParseOptionVersionRange() throws IOException {
JvmOptionsParser.ParseResult res = JvmOptionsParser.parse(11, asReader("10-11:-XX:+UseConcMarkSweepGC"));
verifyOptions("Option must be present for Java 11", "-XX:+UseConcMarkSweepGC", res);
res = JvmOptionsParser.parse(14, asReader("10-11:-XX:+UseConcMarkSweepGC"));
assertTrue("No option match outside the range [10-11]", res.getJvmOptions().isEmpty());
}
|
public FEELFnResult<Boolean> invoke(@ParameterName( "point" ) Comparable point, @ParameterName( "range" ) Range range) {
if ( point == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null"));
}
if ( range == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null"));
}
try {
boolean result = ( range.getLowBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getLowEndPoint() ) == 0 );
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range"));
}
}
|
@Test
void invokeParamIsNull() {
FunctionTestUtil.assertResultError(startsFunction.invoke((Comparable) null, new RangeImpl()), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(startsFunction.invoke("a", null), InvalidParametersEvent.class);
}
|
public static EndpointResponse mapException(final Throwable exception) {
if (exception instanceof KsqlRestException) {
final KsqlRestException restException = (KsqlRestException) exception;
return restException.getResponse();
}
return EndpointResponse.create()
.status(INTERNAL_SERVER_ERROR.code())
.type("application/json")
.entity(new KsqlErrorMessage(Errors.ERROR_CODE_SERVER_ERROR, exception))
.build();
}
|
@Test
public void shouldReturnCorrectResponseForUnspecificException() {
final EndpointResponse response = OldApiUtils
.mapException(new Exception("error msg"));
assertThat(response.getEntity(), instanceOf(KsqlErrorMessage.class));
final KsqlErrorMessage errorMessage = (KsqlErrorMessage) response.getEntity();
assertThat(errorMessage.getMessage(), equalTo("error msg"));
assertThat(errorMessage.getErrorCode(), equalTo(Errors.ERROR_CODE_SERVER_ERROR));
assertThat(response.getStatus(), equalTo(INTERNAL_SERVER_ERROR.code()));
}
|
static boolean isTableUsingInstancePoolAndReplicaGroup(@Nonnull TableConfig tableConfig) {
boolean status = true;
Map<String, InstanceAssignmentConfig> instanceAssignmentConfigMap = tableConfig.getInstanceAssignmentConfigMap();
if (instanceAssignmentConfigMap != null) {
for (InstanceAssignmentConfig instanceAssignmentConfig : instanceAssignmentConfigMap.values()) {
if (instanceAssignmentConfig != null) {
status &= (instanceAssignmentConfig.getTagPoolConfig().isPoolBased()
&& instanceAssignmentConfig.getReplicaGroupPartitionConfig().isReplicaGroupBased());
} else {
status = false;
}
}
} else {
status = false;
}
return status;
}
|
@Test
public void testNoIACOfflineTable() {
TableConfig tableConfig =
new TableConfig("table", TableType.OFFLINE.name(), new SegmentsValidationAndRetentionConfig(),
new TenantConfig("DefaultTenant", "DefaultTenant", null), new IndexingConfig(), new TableCustomConfig(null),
null, null, null, null, null, null, null, null, null, null, null, false, null, null, null);
Assert.assertFalse(TableConfigUtils.isTableUsingInstancePoolAndReplicaGroup(tableConfig));
}
|
String prependInstruction(String text, Context context) {
if (prependQuery != null && !prependQuery.isEmpty() && context.getDestination().startsWith("query")) {
return prependQuery + " " + text;
}
if (prependDocument != null && !prependDocument.isEmpty()){
return prependDocument + " " + text;
}
return text;
}
|
@Test
public void testPrepend() {
var context = new Embedder.Context("schema.indexing");
String input = "This is a test";
var embedder = getNormalizePrefixdEmbedder();
var result = embedder.prependInstruction(input, context);
assertEquals("This is a document: This is a test", result);
var queryContext = new Embedder.Context("query.qt");
var queryResult = embedder.prependInstruction(input, queryContext);
assertEquals("Represent this text: This is a test", queryResult);
}
|
Config targetConfig(Config sourceConfig, boolean incremental) {
// If using incrementalAlterConfigs API, sync the default property with either SET or DELETE action determined by ConfigPropertyFilter::shouldReplicateSourceDefault later.
// If not using incrementalAlterConfigs API, sync the default property only if ConfigPropertyFilter::shouldReplicateSourceDefault returns true.
// If ConfigPropertyFilter::shouldReplicateConfigProperty returns false, do not sync the property at all.
List<ConfigEntry> entries = sourceConfig.entries().stream()
.filter(x -> incremental || (x.isDefault() && shouldReplicateSourceDefault(x.name())) || !x.isDefault())
.filter(x -> !x.isReadOnly() && !x.isSensitive())
.filter(x -> x.source() != ConfigEntry.ConfigSource.STATIC_BROKER_CONFIG)
.filter(x -> shouldReplicateTopicConfigurationProperty(x.name()))
.collect(Collectors.toList());
return new Config(entries);
}
|
@Test
@Deprecated
public void testConfigPropertyFilteringWithAlterConfigs() {
MirrorSourceConnector connector = new MirrorSourceConnector(new SourceAndTarget("source", "target"),
new DefaultReplicationPolicy(), x -> true, new DefaultConfigPropertyFilter());
List<ConfigEntry> entries = new ArrayList<>();
entries.add(new ConfigEntry("name-1", "value-1"));
// When "use.defaults.from" set to "target" by default, the config with default value should be excluded
entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, Collections.emptyList(), ConfigEntry.ConfigType.STRING, ""));
entries.add(new ConfigEntry("min.insync.replicas", "2"));
Config config = new Config(entries);
Config targetConfig = connector.targetConfig(config, false);
assertTrue(targetConfig.entries().stream()
.anyMatch(x -> x.name().equals("name-1")), "should replicate properties");
assertFalse(targetConfig.entries().stream()
.anyMatch(x -> x.name().equals("name-2")), "should not replicate default properties");
assertFalse(targetConfig.entries().stream()
.anyMatch(x -> x.name().equals("min.insync.replicas")), "should not replicate excluded properties");
}
|
@Override
public Endpoint<Http2RemoteFlowController> remote() {
return remoteEndpoint;
}
|
@Test
public void clientRemoteIncrementAndGetStreamShouldRespectOverflow() throws Http2Exception {
incrementAndGetStreamShouldRespectOverflow(client.remote(), MAX_VALUE - 1);
}
|
@Override
public ExportResult<MediaContainerResource> export(UUID jobId, AD authData,
Optional<ExportInformation> exportInfo) throws Exception {
ExportResult<PhotosContainerResource> per = exportPhotos(jobId, authData, exportInfo);
if (per.getThrowable().isPresent()) {
return new ExportResult<>(per.getThrowable().get());
}
ExportResult<VideosContainerResource> ver = exportVideos(jobId, authData, exportInfo);
if (ver.getThrowable().isPresent()) {
return new ExportResult<>(ver.getThrowable().get());
}
return mergeResults(per, ver);
}
|
@Test
public void shouldHandleOnlyPhotos() throws Exception {
MediaContainerResource mcr = new MediaContainerResource(albums, photos, null);
ExportResult<MediaContainerResource> exp = new ExportResult<>(ResultType.END, mcr);
Optional<ExportInformation> ei = Optional.of(new ExportInformation(null, mcr));
ExportResult<MediaContainerResource> res = mediaExporter.export(null, null, ei);
assertEquals(exp, res);
}
|
public static CreateSourceAsProperties from(final Map<String, Literal> literals) {
try {
return new CreateSourceAsProperties(literals, false);
} catch (final ConfigException e) {
final String message = e.getMessage().replace(
"configuration",
"property"
);
throw new KsqlException(message, e);
}
}
|
@Test
public void shouldThrowOnInvalidTimestampFormat() {
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> from(
of(TIMESTAMP_FORMAT_PROPERTY, new StringLiteral("invalid")))
);
// Then:
assertThat(e.getMessage(), containsString("Invalid datetime format for config:TIMESTAMP_FORMAT, reason:Unknown pattern letter: i"));
}
|
@Override
public Dictionary buildDictionary(VirtualColumnContext context) {
FieldSpec fieldSpec = context.getFieldSpec();
switch (fieldSpec.getDataType().getStoredType()) {
case INT:
return new ConstantValueIntDictionary((int) fieldSpec.getDefaultNullValue());
case LONG:
return new ConstantValueLongDictionary((long) fieldSpec.getDefaultNullValue());
case FLOAT:
return new ConstantValueFloatDictionary((float) fieldSpec.getDefaultNullValue());
case DOUBLE:
return new ConstantValueDoubleDictionary((double) fieldSpec.getDefaultNullValue());
case BIG_DECIMAL:
return new ConstantValueBigDecimalDictionary((BigDecimal) fieldSpec.getDefaultNullValue());
case STRING:
return new ConstantValueStringDictionary((String) fieldSpec.getDefaultNullValue());
case BYTES:
return new ConstantValueBytesDictionary((byte[]) fieldSpec.getDefaultNullValue());
default:
throw new IllegalStateException();
}
}
|
@Test
public void testBuildDictionary() {
VirtualColumnContext virtualColumnContext = new VirtualColumnContext(SV_INT, 1);
Dictionary dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueIntDictionary.class);
assertEquals(dictionary.getIntValue(0), Integer.MIN_VALUE);
virtualColumnContext = new VirtualColumnContext(SV_LONG, 1);
dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueLongDictionary.class);
assertEquals(dictionary.getLongValue(0), Long.MIN_VALUE);
virtualColumnContext = new VirtualColumnContext(SV_FLOAT, 1);
dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueFloatDictionary.class);
assertEquals(dictionary.getFloatValue(0), Float.NEGATIVE_INFINITY);
virtualColumnContext = new VirtualColumnContext(SV_DOUBLE, 1);
dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueDoubleDictionary.class);
assertEquals(dictionary.getDoubleValue(0), Double.NEGATIVE_INFINITY);
virtualColumnContext = new VirtualColumnContext(SV_STRING, 1);
dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueStringDictionary.class);
assertEquals(dictionary.getStringValue(0), "null");
virtualColumnContext = new VirtualColumnContext(SV_BYTES, 1);
dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueBytesDictionary.class);
assertEquals(dictionary.getBytesValue(0), new byte[0]);
virtualColumnContext = new VirtualColumnContext(MV_INT, 1);
dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueIntDictionary.class);
assertEquals(dictionary.getIntValue(0), Integer.MIN_VALUE);
virtualColumnContext = new VirtualColumnContext(MV_LONG, 1);
dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueLongDictionary.class);
assertEquals(dictionary.getLongValue(0), Long.MIN_VALUE);
virtualColumnContext = new VirtualColumnContext(MV_FLOAT, 1);
dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueFloatDictionary.class);
assertEquals(dictionary.getFloatValue(0), Float.NEGATIVE_INFINITY);
virtualColumnContext = new VirtualColumnContext(MV_DOUBLE, 1);
dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueDoubleDictionary.class);
assertEquals(dictionary.getDoubleValue(0), Double.NEGATIVE_INFINITY);
virtualColumnContext = new VirtualColumnContext(MV_STRING, 1);
dictionary = new DefaultNullValueVirtualColumnProvider().buildDictionary(virtualColumnContext);
assertEquals(dictionary.getClass(), ConstantValueStringDictionary.class);
assertEquals(dictionary.getStringValue(0), "null");
}
|
@Udf(description = "Returns the cosine of an INT value")
public Double cos(
@UdfParameter(
value = "value",
description = "The value in radians to get the cosine of."
) final Integer value
) {
return cos(value == null ? null : value.doubleValue());
}
|
@Test
public void shouldHandleNull() {
assertThat(udf.cos((Integer) null), is(nullValue()));
assertThat(udf.cos((Long) null), is(nullValue()));
assertThat(udf.cos((Double) null), is(nullValue()));
}
|
public static boolean isPowerOfTwo(long n) {
return (n > 0) && ((n & (n - 1)) == 0);
}
|
@Test
public void isPowerOfTwoTest() {
assertFalse(NumberUtil.isPowerOfTwo(-1));
assertTrue(NumberUtil.isPowerOfTwo(16));
assertTrue(NumberUtil.isPowerOfTwo(65536));
assertTrue(NumberUtil.isPowerOfTwo(1));
assertFalse(NumberUtil.isPowerOfTwo(17));
}
|
@Override
public TsunamiConfig loadConfig() {
Yaml yaml = new Yaml(new SafeConstructor(new LoaderOptions()));
Map<String, Object> rawYamlData = yaml.load(configFileReader());
return TsunamiConfig.fromYamlData(rawYamlData);
}
|
@Test
public void loadConfig_whenYamlFileNotFound_usesEmptyConfig() {
TsunamiConfig tsunamiConfig = new YamlConfigLoader().loadConfig();
assertThat(tsunamiConfig.getRawConfigData()).isEmpty();
}
|
public static Optional<Object[]> coerceParams(Class<?> currentIdxActualParameterType, Class<?> expectedParameterType, Object[] actualParams, int i) {
Object actualObject = actualParams[i];
Optional<Object> coercedObject = coerceParam(currentIdxActualParameterType, expectedParameterType,
actualObject);
return coercedObject.map(o -> actualCoerceParams(actualParams, o, i));
}
|
@Test
void coerceParamsCollectionToArrayConverted() {
Object item = "TESTED_OBJECT";
Object value = Collections.singleton(item);
Object[] actualParams1 = {value, "NOT_DATE"};
Optional<Object[]> retrieved = CoerceUtil.coerceParams(Set.class, String.class, actualParams1, 0);
assertNotNull(retrieved);
assertTrue(retrieved.isPresent());
Object[] retrievedObjects = retrieved.get();
assertEquals(item, retrievedObjects[0]);
assertEquals(actualParams1[1], retrievedObjects[1]);
item = LocalDate.now();
value = Collections.singleton(item);
Object[] actualParams2 = {value, "NOT_DATE"};
retrieved = CoerceUtil.coerceParams(Set.class, ZonedDateTime.class, actualParams2, 0);
assertNotNull(retrieved);
assertTrue(retrieved.isPresent());
retrievedObjects = retrieved.get();
assertTrue(retrievedObjects[0] instanceof ZonedDateTime);
ZonedDateTime zdtRetrieved = (ZonedDateTime) retrievedObjects[0];
assertEquals(item, zdtRetrieved.toLocalDate());
assertEquals(ZoneOffset.UTC, zdtRetrieved.getOffset());
assertEquals(0, zdtRetrieved.getHour());
assertEquals(0, zdtRetrieved.getMinute());
assertEquals(0, zdtRetrieved.getSecond());
assertEquals(actualParams2[1], retrievedObjects[1]);
}
|
@Override
public SelJodaDateTimeZone field(SelString field) {
String fieldName = field.getInternalVal();
if ("UTC".equals(fieldName)) {
return new SelJodaDateTimeZone(DateTimeZone.UTC);
}
throw new UnsupportedOperationException(type() + " DO NOT support accessing field: " + field);
}
|
@Test
public void testField() {
SelType res = one.field(SelString.of("UTC"));
assertEquals("DATETIME_ZONE: UTC", res.type() + ": " + res);
}
|
public String getRealMaximumTimeout() {
return Const.trim( environmentSubstitute( getMaximumTimeout() ) );
}
|
@Test
public void testGetRealMaximumTimeout() {
JobEntryDelay entry = new JobEntryDelay();
assertTrue( Utils.isEmpty( entry.getRealMaximumTimeout() ) );
entry.setMaximumTimeout( " 1" );
assertEquals( "1", entry.getRealMaximumTimeout() );
entry.setVariable( "testValue", " 20" );
entry.setMaximumTimeout( "${testValue}" );
assertEquals( "20", entry.getRealMaximumTimeout() );
}
|
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
}
|
@Test
public void shouldFormatExplainQuery() {
final String statementString = "EXPLAIN foo;";
final Statement statement = parseSingle(statementString);
final String result = SqlFormatter.formatSql(statement);
assertThat(result, is("EXPLAIN \nFOO"));
}
|
long importPhotos(
Collection<PhotoModel> photos,
GPhotosUpload gPhotosUpload)
throws Exception {
return gPhotosUpload.uploadItemsViaBatching(
photos,
this::importPhotoBatch);
}
|
@Test
public void importPhotoInTempStoreFailure() throws Exception {
PhotoModel photoModel =
new PhotoModel(
PHOTO_TITLE,
IMG_URI,
PHOTO_DESCRIPTION,
JPEG_MEDIA_TYPE,
"oldPhotoID1",
OLD_ALBUM_ID,
true);
Mockito.when(googlePhotosInterface.uploadMediaContent(any(), eq(null)))
.thenThrow(new IOException("Unit Testing"));
PhotosLibraryClient photosLibraryClient = mock(PhotosLibraryClient.class);
JobStore jobStore = mock(LocalJobStore.class);
Mockito.when(jobStore.getStream(any(), any()))
.thenReturn(
new TemporaryPerJobDataStore.InputStreamWrapper(
new ByteArrayInputStream("TestingBytes".getBytes())));
Mockito.doNothing().when(jobStore).removeData(any(), anyString());
ConnectionProvider connectionProvider = new ConnectionProvider(jobStore);
GoogleMediaImporter googleMediaImporter =
new GoogleMediaImporter(
null, /*credentialFactory*/
jobStore,
null, /*jsonFactory*/
new HashMap<>(), /*photosInterfacesMap*/
new HashMap<>(), /*photosLibraryClientMap*/
appCredentials,
googlePhotosInterface,
connectionProvider,
monitor,
1.0 /*writesPerSecond*/);
BatchMediaItemResponse batchMediaItemResponse =
new BatchMediaItemResponse(
new NewMediaItemResult[] {buildMediaItemResult("token1", Code.OK_VALUE)});
Mockito.when(googlePhotosInterface.createPhotos(any(NewMediaItemUpload.class)))
.thenReturn(batchMediaItemResponse);
UUID jobId = UUID.randomUUID();
googleMediaImporter.importPhotos(Lists.newArrayList(photoModel), new GPhotosUpload(jobId, executor,
mock(TokensAndUrlAuthData.class)));
Mockito.verify(jobStore, Mockito.times(0)).removeData(any(), anyString());
Mockito.verify(jobStore, Mockito.times(1)).getStream(any(), anyString());
}
|
public static Builder route() {
return new RouterFunctionBuilder();
}
|
@Test
void andRoute() {
RouterFunction<ServerResponse> routerFunction1 = request -> Optional.empty();
RequestPredicate requestPredicate = request -> true;
RouterFunction<ServerResponse> result = routerFunction1.andRoute(requestPredicate, this::handlerMethod);
assertThat(result).isNotNull();
Optional<? extends HandlerFunction<?>> resultHandlerFunction = result.route(request);
assertThat(resultHandlerFunction).isPresent();
}
|
public void validateLoginName() throws ValidationException {
validate(Validator.presenceValidator("Login name field must be non-blank."), getName());
}
|
@Test
void shouldValidateWhenLoginNameExists() throws Exception {
user = new User("bob", new String[]{"Jez,Pavan"}, "[email protected]", true);
user.validateLoginName();
}
|
public static AztecCode encode(String data) {
return encode(data.getBytes(StandardCharsets.ISO_8859_1));
}
|
@Test
public void testAztecWriter() throws Exception {
testWriter("Espa\u00F1ol", null, 25, true, 1); // Without ECI (implicit ISO-8859-1)
testWriter("Espa\u00F1ol", ISO_8859_1, 25, true, 1); // Explicit ISO-8859-1
testWriter("\u20AC 1 sample data.", WINDOWS_1252, 25, true, 2); // ISO-8859-1 can't encode Euro; Windows-1252 can
testWriter("\u20AC 1 sample data.", ISO_8859_15, 25, true, 2);
testWriter("\u20AC 1 sample data.", UTF_8, 25, true, 2);
testWriter("\u20AC 1 sample data.", UTF_8, 100, true, 3);
testWriter("\u20AC 1 sample data.", UTF_8, 300, true, 4);
testWriter("\u20AC 1 sample data.", UTF_8, 500, false, 5);
testWriter("The capital of Japan is named \u6771\u4EAC.", SHIFT_JIS, 25, true, 3);
// Test AztecWriter defaults
String data = "In ut magna vel mauris malesuada";
AztecWriter writer = new AztecWriter();
BitMatrix matrix = writer.encode(data, BarcodeFormat.AZTEC, 0, 0);
AztecCode aztec = Encoder.encode(data,
Encoder.DEFAULT_EC_PERCENT, Encoder.DEFAULT_AZTEC_LAYERS);
BitMatrix expectedMatrix = aztec.getMatrix();
assertEquals(matrix, expectedMatrix);
}
|
public void addReceiptHandle(ProxyContext context, Channel channel, String group, String msgID, MessageReceiptHandle messageReceiptHandle) {
ConcurrentHashMapUtils.computeIfAbsent(this.receiptHandleGroupMap, new ReceiptHandleGroupKey(channel, group),
k -> new ReceiptHandleGroup()).put(msgID, messageReceiptHandle);
}
|
@Test
public void testClientOffline() {
ArgumentCaptor<ConsumerIdsChangeListener> listenerArgumentCaptor = ArgumentCaptor.forClass(ConsumerIdsChangeListener.class);
Mockito.verify(consumerManager, Mockito.times(1)).appendConsumerIdsChangeListener(listenerArgumentCaptor.capture());
Channel channel = PROXY_CONTEXT.getVal(ContextVariable.CHANNEL);
receiptHandleManager.addReceiptHandle(PROXY_CONTEXT, channel, GROUP, MSG_ID, messageReceiptHandle);
listenerArgumentCaptor.getValue().handle(ConsumerGroupEvent.CLIENT_UNREGISTER, GROUP, new ClientChannelInfo(channel, "", LanguageCode.JAVA, 0));
assertTrue(receiptHandleManager.receiptHandleGroupMap.isEmpty());
}
|
@Override
public List<HivePartitionName> refreshTable(String hiveDbName, String hiveTblName,
boolean onlyCachedPartitions) {
DatabaseTableName databaseTableName = DatabaseTableName.of(hiveDbName, hiveTblName);
tableNameLockMap.putIfAbsent(databaseTableName, hiveDbName + "_" + hiveTblName + "_lock");
String lockStr = tableNameLockMap.get(databaseTableName);
synchronized (lockStr) {
return refreshTableWithoutSync(hiveDbName, hiveTblName, databaseTableName, onlyCachedPartitions);
}
}
|
@Test
public void testRefreshTable() {
new Expectations(metastore) {
{
metastore.getTable(anyString, "notExistTbl");
minTimes = 0;
Throwable targetException = new NoSuchObjectException("no such obj");
Throwable e = new InvocationTargetException(targetException);
result = new StarRocksConnectorException("table not exist", e);
}
};
CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore(
metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false);
try {
cachingHiveMetastore.refreshTable("db1", "notExistTbl", true);
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e instanceof StarRocksConnectorException);
Assert.assertTrue(e.getMessage().contains("invalidated cache"));
}
try {
cachingHiveMetastore.refreshTable("db1", "tbl1", true);
} catch (Exception e) {
Assert.fail();
}
}
|
public List<VespaService> getMonitoringServices(String service) {
if (service.equalsIgnoreCase(ALL_SERVICES))
return services;
List<VespaService> myServices = new ArrayList<>();
for (VespaService s : services) {
log.log(FINE, () -> "getMonitoringServices. service=" + service + ", checking against " + s + ", which has monitoring name " + s.getMonitoringName());
if (s.getMonitoringName().id.equalsIgnoreCase(service)) {
myServices.add(s);
}
}
return myServices;
}
|
@Test
public void services_can_be_retrieved_from_monitoring_name() {
List<VespaService> dummyServices = List.of(
new DummyService(0, "dummy/id/0"),
new DummyService(1, "dummy/id/1"));
VespaServices services = new VespaServices(dummyServices);
assertEquals(2, services.getMonitoringServices("vespa.dummy").size());
}
|
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks,
final Map<TaskId, Set<TopicPartition>> standbyTasks) {
log.info("Handle new assignment with:\n" +
"\tNew active tasks: {}\n" +
"\tNew standby tasks: {}\n" +
"\tExisting active tasks: {}\n" +
"\tExisting standby tasks: {}",
activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds());
topologyMetadata.addSubscribedTopicsFromAssignment(
activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()),
logPrefix
);
final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks);
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks);
final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>();
final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id));
final Set<TaskId> tasksToLock =
tasks.allTaskIds().stream()
.filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x))
.collect(Collectors.toSet());
maybeLockTasks(tasksToLock);
// first put aside those unrecognized tasks because of unknown named-topologies
tasks.clearPendingTasksToCreate();
tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate));
tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate));
// first rectify all existing tasks:
// 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them
// 2. for tasks that have changed active/standby status, just recycle and skip re-creating them
// 3. otherwise, close them since they are no longer owned
final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>();
if (stateUpdater == null) {
handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean);
} else {
handleTasksWithStateUpdater(
activeTasksToCreate,
standbyTasksToCreate,
tasksToRecycle,
tasksToCloseClean,
failedTasks
);
failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater());
}
final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean);
maybeUnlockTasks(tasksToLock);
failedTasks.putAll(taskCloseExceptions);
maybeThrowTaskExceptions(failedTasks);
createNewTasks(activeTasksToCreate, standbyTasksToCreate);
}
|
@Test
public void shouldRemoveUnusedFailedStandbyTaskFromStateUpdaterAndCloseDirty() {
final StandbyTask standbyTaskToClose = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true);
when(stateUpdater.getTasks()).thenReturn(mkSet(standbyTaskToClose));
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(standbyTaskToClose.id())).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(standbyTaskToClose, new RuntimeException("KABOOM!")));
taskManager.handleAssignment(Collections.emptyMap(), Collections.emptyMap());
verify(standbyTaskToClose).prepareCommit();
verify(standbyTaskToClose).suspend();
verify(standbyTaskToClose).closeDirty();
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
|
@Override
public Object get(PropertyKey key) {
return get(key, ConfigurationValueOptions.defaults());
}
|
@Test
public void initConfWithExtenstionProperty() throws Exception {
try (Closeable p = new SystemPropertyRule("alluxio.master.journal.ufs.option.a.b.c",
"foo").toResource()) {
resetConf();
assertEquals("foo",
mConfiguration.get(Template.MASTER_JOURNAL_UFS_OPTION_PROPERTY
.format("a.b.c")));
}
}
|
@Override
public double dot(SGDVector other) {
if (other.size() != size) {
throw new IllegalArgumentException("Can't dot two vectors of different lengths, this = " + size + ", other = " + other.size());
} else if (other instanceof SparseVector) {
double score = 0.0;
// If there are elements, calculate the dot product.
if ((other.numActiveElements() != 0) && (indices.length != 0)) {
Iterator<VectorTuple> itr = iterator();
Iterator<VectorTuple> otherItr = other.iterator();
VectorTuple tuple = itr.next();
VectorTuple otherTuple = otherItr.next();
while (itr.hasNext() && otherItr.hasNext()) {
if (tuple.index == otherTuple.index) {
score += tuple.value * otherTuple.value;
tuple = itr.next();
otherTuple = otherItr.next();
} else if (tuple.index < otherTuple.index) {
tuple = itr.next();
} else {
otherTuple = otherItr.next();
}
}
while (itr.hasNext()) {
if (tuple.index == otherTuple.index) {
score += tuple.value * otherTuple.value;
}
tuple = itr.next();
}
while (otherItr.hasNext()) {
if (tuple.index == otherTuple.index) {
score += tuple.value * otherTuple.value;
}
otherTuple = otherItr.next();
}
if (tuple.index == otherTuple.index) {
score += tuple.value * otherTuple.value;
}
}
return score;
} else if (other instanceof DenseVector) {
double score = 0.0;
for (int i = 0; i < indices.length; i++) {
score += other.get(indices[i]) * values[i];
}
return score;
} else {
throw new IllegalArgumentException("Unknown vector subclass " + other.getClass().getCanonicalName() + " for input");
}
}
|
@Test
public void overlappingDot() {
SparseVector a = generateVectorA();
SparseVector b = generateVectorB();
assertEquals(a.dot(b), b.dot(a), 1e-10);
assertEquals(-15.0, a.dot(b), 1e-10);
}
|
public String convert(Object o) {
StringBuilder buf = new StringBuilder();
Converter<Object> p = headTokenConverter;
while (p != null) {
buf.append(p.convert(o));
p = p.getNext();
}
return buf.toString();
}
|
@Test
public void convertMultipleDates() {
Calendar cal = Calendar.getInstance();
cal.set(2003, 4, 20, 17, 55);
FileNamePattern fnp = new FileNamePattern("foo-%d{yyyy.MM, aux}/%d{yyyy.MM.dd}.txt", context);
assertEquals("foo-2003.05/2003.05.20.txt", fnp.convert(cal.getTime()));
}
|
public static <T> T loadWithSecrets(Map<String, Object> map, Class<T> clazz, SourceContext sourceContext) {
return loadWithSecrets(map, clazz, secretName -> sourceContext.getSecret(secretName));
}
|
@Test
public void testDefaultValue() {
// test required field.
Assert.expectThrows(IllegalArgumentException.class,
() -> IOConfigUtils.loadWithSecrets(new HashMap<>(), TestDefaultConfig.class, new TestSinkContext()));
// test all default value.
Map<String, Object> configMap = new HashMap<>();
configMap.put("testRequired", "test");
TestDefaultConfig testDefaultConfig =
IOConfigUtils.loadWithSecrets(configMap, TestDefaultConfig.class, new TestSinkContext());
// if there is default value for a required field and no value provided when load config,
// it should not throw exception but use the default value.
Assert.assertEquals(testDefaultConfig.getTestDefaultRequired(), "defaultRequired");
Assert.assertEquals(testDefaultConfig.getDefaultStr(), "defaultStr");
Assert.assertEquals(testDefaultConfig.isDefaultBool(), true);
Assert.assertEquals(testDefaultConfig.getDefaultInt(), 100);
Assert.assertEquals(testDefaultConfig.getDefaultLong(), 100);
Assert.assertEquals(testDefaultConfig.getDefaultDouble(), 100.12,0.00001);
Assert.assertEquals(testDefaultConfig.getDefaultFloat(), 100.10,0.00001);
Assert.assertEquals(testDefaultConfig.getNoDefault(), 0);
}
|
public static long readInt64(ByteBuffer buf) throws BufferUnderflowException {
return buf.order(ByteOrder.LITTLE_ENDIAN).getLong();
}
|
@Test
public void testReadInt64() {
assertEquals(258L, ByteUtils.readInt64(new byte[]{2, 1, 0, 0, 0, 0, 0, 0}, 0));
assertEquals(258L, ByteUtils.readInt64(new byte[]{2, 1, 0, 0, 0, 0, 0, 0, 3, 4}, 0));
assertEquals(772L, ByteUtils.readInt64(new byte[]{1, 2, 4, 3, 0, 0, 0, 0, 0, 0}, 2));
assertEquals(-1L, ByteUtils.readInt64(new byte[]{-1, -1, -1, -1, -1, -1, -1, -1}, 0));
}
|
static public boolean notMarkedWithNoAutoStart(Object o) {
if (o == null) {
return false;
}
Class<?> clazz = o.getClass();
NoAutoStart a = clazz.getAnnotation(NoAutoStart.class);
return a == null;
}
|
@Test
public void markedWithNoAutoStart() {
DoNotAutoStart o = new DoNotAutoStart();
assertFalse(NoAutoStartUtil.notMarkedWithNoAutoStart(o));
}
|
public Collection<Task> createTasks(final Consumer<byte[], byte[]> consumer,
final Map<TaskId, Set<TopicPartition>> tasksToBeCreated) {
final List<Task> createdTasks = new ArrayList<>();
for (final Map.Entry<TaskId, Set<TopicPartition>> newTaskAndPartitions : tasksToBeCreated.entrySet()) {
final TaskId taskId = newTaskAndPartitions.getKey();
final LogContext logContext = getLogContext(taskId);
final Set<TopicPartition> partitions = newTaskAndPartitions.getValue();
final ProcessorTopology topology = topologyMetadata.buildSubtopology(taskId);
final ProcessorStateManager stateManager = new ProcessorStateManager(
taskId,
Task.TaskType.ACTIVE,
eosEnabled(applicationConfig),
logContext,
stateDirectory,
storeChangelogReader,
topology.storeToChangelogTopic(),
partitions,
stateUpdaterEnabled);
final InternalProcessorContext<Object, Object> context = new ProcessorContextImpl(
taskId,
applicationConfig,
stateManager,
streamsMetrics,
cache
);
createdTasks.add(
createActiveTask(
taskId,
partitions,
consumer,
logContext,
topology,
stateManager,
context
)
);
}
return createdTasks;
}
|
@Test
public void shouldThrowStreamsExceptionOnErrorCloseThreadProducerIfEosV2Enabled() {
properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2);
mockClientSupplier.setApplicationIdForProducer("appId");
createTasks();
mockClientSupplier.producers.get(0).closeException = new RuntimeException("KABOOM!");
final StreamsException thrown = assertThrows(
StreamsException.class,
activeTaskCreator::closeThreadProducerIfNeeded
);
assertThat(thrown.getMessage(), is("Thread producer encounter error trying to close."));
assertThat(thrown.getCause().getMessage(), is("KABOOM!"));
}
|
public static String loadClassWithBackwardCompatibleCheck(String className) {
return PLUGINS_BACKWARD_COMPATIBLE_CLASS_NAME_MAP.getOrDefault(className, className);
}
|
@Test
public void testBackwardCompatible() {
Assert.assertEquals(PluginManager
.loadClassWithBackwardCompatibleCheck("org.apache.pinot.core.realtime.stream.SimpleAvroMessageDecoder"),
"org.apache.pinot.plugin.inputformat.avro.SimpleAvroMessageDecoder");
Assert.assertEquals(PluginManager
.loadClassWithBackwardCompatibleCheck("org.apache.pinot.core.realtime.stream.SimpleAvroMessageDecoder"),
"org.apache.pinot.plugin.inputformat.avro.SimpleAvroMessageDecoder");
Assert.assertEquals(PluginManager
.loadClassWithBackwardCompatibleCheck("org.apache.pinot.core.realtime.impl.kafka.KafkaAvroMessageDecoder"),
"org.apache.pinot.plugin.inputformat.avro.KafkaAvroMessageDecoder");
Assert.assertEquals(PluginManager
.loadClassWithBackwardCompatibleCheck("org.apache.pinot.core.realtime.impl.kafka.KafkaJSONMessageDecoder"),
"org.apache.pinot.plugin.stream.kafka.KafkaJSONMessageDecoder");
// RecordReader
Assert.assertEquals(
PluginManager.loadClassWithBackwardCompatibleCheck("org.apache.pinot.core.data.readers.AvroRecordReader"),
"org.apache.pinot.plugin.inputformat.avro.AvroRecordReader");
Assert.assertEquals(
PluginManager.loadClassWithBackwardCompatibleCheck("org.apache.pinot.core.data.readers.CSVRecordReader"),
"org.apache.pinot.plugin.inputformat.csv.CSVRecordReader");
Assert.assertEquals(
PluginManager.loadClassWithBackwardCompatibleCheck("org.apache.pinot.core.data.readers.JSONRecordReader"),
"org.apache.pinot.plugin.inputformat.json.JSONRecordReader");
Assert.assertEquals(
PluginManager.loadClassWithBackwardCompatibleCheck("org.apache.pinot.orc.data.readers.ORCRecordReader"),
"org.apache.pinot.plugin.inputformat.orc.ORCRecordReader");
Assert.assertEquals(
PluginManager.loadClassWithBackwardCompatibleCheck("org.apache.pinot.parquet.data.readers.ParquetRecordReader"),
"org.apache.pinot.plugin.inputformat.parquet.ParquetRecordReader");
Assert.assertEquals(
PluginManager.loadClassWithBackwardCompatibleCheck("org.apache.pinot.core.data.readers.ThriftRecordReader"),
"org.apache.pinot.plugin.inputformat.thrift.ThriftRecordReader");
// PinotFS
Assert.assertEquals(PluginManager.loadClassWithBackwardCompatibleCheck("org.apache.pinot.filesystem.AzurePinotFS"),
"org.apache.pinot.plugin.filesystem.AzurePinotFS");
Assert.assertEquals(PluginManager.loadClassWithBackwardCompatibleCheck("org.apache.pinot.filesystem.HadoopPinotFS"),
"org.apache.pinot.plugin.filesystem.HadoopPinotFS");
Assert.assertEquals(PluginManager.loadClassWithBackwardCompatibleCheck("org.apache.pinot.filesystem.LocalPinotFS"),
"org.apache.pinot.spi.filesystem.LocalPinotFS");
// StreamConsumerFactory
Assert.assertEquals(PluginManager
.loadClassWithBackwardCompatibleCheck("org.apache.pinot.core.realtime.impl.kafka.KafkaConsumerFactory"),
"org.apache.pinot.plugin.stream.kafka09.KafkaConsumerFactory");
Assert.assertEquals(PluginManager
.loadClassWithBackwardCompatibleCheck("org.apache.pinot.core.realtime.impl.kafka2.KafkaConsumerFactory"),
"org.apache.pinot.plugin.stream.kafka20.KafkaConsumerFactory");
}
|
@Override
protected boolean hasEntityUuidPermission(String permission, String entityUuid) {
return false;
}
|
@Test
public void hasProjectUuidPermission() {
assertThat(githubWebhookUserSession.hasEntityUuidPermission("perm", "project")).isFalse();
}
|
public static String getFullGcsPath(String... pathParts) {
checkArgument(pathParts.length != 0, "Must provide at least one path part");
checkArgument(
stream(pathParts).noneMatch(Strings::isNullOrEmpty), "No path part can be null or empty");
return String.format("gs://%s", String.join("/", pathParts));
}
|
@Test
public void testGetFullGcsPathOneEmptyValue() {
assertThrows(
IllegalArgumentException.class,
() -> ArtifactUtils.getFullGcsPath("bucket", "", "dir2", "file"));
}
|
@Override
public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) {
ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new);
String columnName = shardingValue.getColumnName();
ShardingSpherePreconditions.checkState(algorithmExpression.contains(columnName), () -> new MismatchedInlineShardingAlgorithmExpressionAndColumnException(algorithmExpression, columnName));
try {
return InlineExpressionParserFactory.newInstance(algorithmExpression).evaluateWithArgs(Collections.singletonMap(columnName, shardingValue.getValue()));
} catch (final MissingMethodException ignored) {
throw new MismatchedInlineShardingAlgorithmExpressionAndColumnException(algorithmExpression, columnName);
}
}
|
@SuppressWarnings({"unchecked", "rawtypes"})
@Test
void assertDoShardingWithRangeShardingConditionValue() {
List<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1", "t_order_2", "t_order_3");
Collection<String> actual = inlineShardingAlgorithm.doSharding(availableTargetNames, new RangeShardingValue<>("t_order", "order_id", DATA_NODE_INFO, mock(Range.class)));
assertTrue(actual.containsAll(availableTargetNames));
}
|
@Override
public NullsOrderType getDefaultNullsOrderType() {
return NullsOrderType.FIRST;
}
|
@Test
void assertGetDefaultNullsOrderType() {
assertThat(dialectDatabaseMetaData.getDefaultNullsOrderType(), is(NullsOrderType.FIRST));
}
|
@Override
public void executeSystemTask(WorkflowSystemTask systemTask, String taskId, int callbackTime) {
try {
Task task = executionDAOFacade.getTaskById(taskId);
if (task == null) {
LOG.error("TaskId: {} could not be found while executing SystemTask", taskId);
return;
}
LOG.debug("Task: {} fetched from execution DAO for taskId: {}", task, taskId);
String queueName = QueueUtils.getQueueName(task);
if (task.getStatus().isTerminal()) {
// Tune the SystemTaskWorkerCoordinator's queues - if the queue size is very big this can
// happen!
LOG.info("Task {}/{} was already completed.", task.getTaskType(), task.getTaskId());
queueDAO.remove(queueName, task.getTaskId());
return;
}
String workflowId = task.getWorkflowInstanceId();
Workflow workflow = executionDAOFacade.getWorkflowById(workflowId, true);
if (task.getStartTime() == 0) {
task.setStartTime(System.currentTimeMillis());
executionDAOFacade.updateTask(task);
Monitors.recordQueueWaitTime(task.getTaskDefName(), task.getQueueWaitTime());
}
if (workflow.getStatus().isTerminal()) {
LOG.info(
"Workflow {} has been completed for {}/{}",
workflow.getWorkflowId(),
systemTask.getName(),
task.getTaskId());
if (!task.getStatus().isTerminal()) {
task.setStatus(CANCELED);
}
executionDAOFacade.updateTask(task);
queueDAO.remove(queueName, task.getTaskId());
return;
}
LOG.debug("Executing {}/{}-{}", task.getTaskType(), task.getTaskId(), task.getStatus());
if (task.getStatus() == SCHEDULED || !systemTask.isAsyncComplete(task)) {
task.setPollCount(task.getPollCount() + 1);
// removed poll count DB update here
}
deciderService.populateTaskData(task);
// Stop polling for asyncComplete system tasks that are not in SCHEDULED state
if (systemTask.isAsyncComplete(task) && task.getStatus() != SCHEDULED) {
queueDAO.remove(QueueUtils.getQueueName(task), task.getTaskId());
return;
}
taskRunner.runMaestroTask(this, workflow, task, systemTask);
if (!task.getStatus().isTerminal()) {
task.setCallbackAfterSeconds(callbackTime);
try {
configureCallbackInterval(task); // overwrite if needed
} catch (Exception e) {
LOG.error(
"Error configuring callback interval for task [{}]. Please investigate it",
task.getTaskId(),
e);
}
}
updateTask(new TaskResult(task));
LOG.debug(
"Done Executing {}/{}-{} output={}",
task.getTaskType(),
task.getTaskId(),
task.getStatus(),
task.getOutputData());
} catch (Exception e) {
Monitors.error("MaestroWorkflowExecutor", "executeSystemTask");
LOG.error("Error executing system task - {}, with id: {}", systemTask, taskId, e);
}
}
|
@Test
public void testExecuteSystemTaskThrowUnexpectedException() {
String workflowId = "workflow-id";
String taskId = "task-id-1";
Task maestroTask = new Task();
maestroTask.setTaskType(Constants.MAESTRO_TASK_NAME);
maestroTask.setReferenceTaskName("maestroTask");
maestroTask.setWorkflowInstanceId(workflowId);
maestroTask.setScheduledTime(System.currentTimeMillis());
maestroTask.setTaskId(taskId);
maestroTask.setStatus(Task.Status.IN_PROGRESS);
maestroTask.setStartTime(123);
maestroTask.setCallbackAfterSeconds(0);
Workflow workflow = new Workflow();
workflow.setWorkflowId(workflowId);
workflow.setStatus(Workflow.WorkflowStatus.RUNNING);
when(executionDAOFacade.getTaskById(anyString())).thenReturn(maestroTask);
when(executionDAOFacade.getWorkflowById(anyString(), anyBoolean())).thenReturn(workflow);
task2.setShouldThrow(true);
maestroWorkflowExecutor.executeSystemTask(task2, taskId, 30);
assertEquals(Task.Status.IN_PROGRESS, maestroTask.getStatus());
assertEquals(1, maestroTask.getPollCount());
verify(executionDAOFacade, times(0)).updateTask(any());
assertEquals(0, maestroTask.getCallbackAfterSeconds());
}
|
public Collection<String> getUsedConversionClasses(Schema schema) {
Collection<String> result = new HashSet<>();
for (Conversion<?> conversion : getUsedConversions(schema)) {
result.add(conversion.getClass().getCanonicalName());
}
return result;
}
|
@Test
void getUsedConversionClassesForNullableLogicalTypesInArray() throws Exception {
SpecificCompiler compiler = createCompiler();
final Schema schema = new Schema.Parser().parse(
"{\"type\":\"record\",\"name\":\"NullableLogicalTypesArray\",\"namespace\":\"org.apache.avro.codegentest.testdata\",\"doc\":\"Test nested types with logical types in generated Java classes\",\"fields\":[{\"name\":\"arrayOfLogicalType\",\"type\":{\"type\":\"array\",\"items\":[\"null\",{\"type\":\"int\",\"logicalType\":\"date\"}]}}]}");
final Collection<String> usedConversionClasses = compiler.getUsedConversionClasses(schema);
assertEquals(1, usedConversionClasses.size());
assertEquals("org.apache.avro.data.TimeConversions.DateConversion", usedConversionClasses.iterator().next());
}
|
public static void displayWelcomeMessage(
final int consoleWidth,
final PrintWriter writer
) {
final String[] lines = {
"",
"===========================================",
"= _ _ ____ ____ =",
"= | | _____ __ _| | _ \\| __ ) =",
"= | |/ / __|/ _` | | | | | _ \\ =",
"= | <\\__ \\ (_| | | |_| | |_) | =",
"= |_|\\_\\___/\\__, |_|____/|____/ =",
"= |_| =",
"= The Database purpose-built =",
"= for stream processing apps =",
"==========================================="
};
final String copyrightMsg = "Copyright 2017-2022 Confluent Inc.";
final Integer logoWidth = Arrays.stream(lines)
.map(String::length)
.reduce(0, Math::max);
// Don't want to display the logo if it'll just end up getting wrapped and looking hideous
if (consoleWidth < logoWidth) {
writer.println("ksqlDB, " + copyrightMsg);
} else {
final int paddingChars = (consoleWidth - logoWidth) / 2;
final String leftPadding = IntStream.range(0, paddingChars)
.mapToObj(idx -> " ")
.collect(Collectors.joining());
Arrays.stream(lines)
.forEach(line -> writer.println(leftPadding + line));
writer.println();
writer.println(copyrightMsg);
}
writer.println();
writer.flush();
}
|
@Test
public void shouldFlushWriterWhenOutputtingLongMessage() {
// When:
WelcomeMsgUtils.displayWelcomeMessage(80, mockPrintWriter);
// Then:
Mockito.verify(mockPrintWriter).flush();
}
|
public static Expression generateFilterExpression(SearchArgument sarg) {
return translate(sarg.getExpression(), sarg.getLeaves());
}
|
@Test
public void testUnsupportedBetweenOperandEmptyLeaves() {
SearchArgument.Builder builder = SearchArgumentFactory.newBuilder();
final SearchArgument arg =
new MockSearchArgument(
builder
.startAnd()
.between("salary", PredicateLeaf.Type.LONG, 9000L, 15000L)
.end()
.build());
assertThatThrownBy(() -> HiveIcebergFilterFactory.generateFilterExpression(arg))
.isInstanceOf(UnsupportedOperationException.class)
.hasMessage("Missing leaf literals: Leaf[empty]");
}
|
@SafeVarargs
public static Optional<Predicate<Throwable>> createExceptionsPredicate(
Predicate<Throwable> exceptionPredicate,
Class<? extends Throwable>... exceptions) {
return PredicateCreator.createExceptionsPredicate(exceptions)
.map(predicate -> exceptionPredicate == null ? predicate : predicate.or(exceptionPredicate))
.or(() -> Optional.ofNullable(exceptionPredicate));
}
|
@Test
public void buildComplexRecordExceptionsPredicate() {
Predicate<Throwable> exceptionPredicate = t -> t instanceof IOException;
Predicate<Throwable> predicate = PredicateCreator
.createExceptionsPredicate(exceptionPredicate, RuntimeException.class)
.orElseThrow();
then(predicate.test(new RuntimeException())).isTrue();
then(predicate.test(new IllegalArgumentException())).isTrue();
then(predicate.test(new Throwable())).isFalse();
then(predicate.test(new Exception())).isFalse();
then(predicate.test(new IOException())).isTrue();
}
|
@Override
public void updateArticle(ArticleUpdateReqVO updateReqVO) {
// 校验存在
validateArticleExists(updateReqVO.getId());
// 校验分类存在
validateArticleCategoryExists(updateReqVO.getCategoryId());
// 更新
ArticleDO updateObj = ArticleConvert.INSTANCE.convert(updateReqVO);
articleMapper.updateById(updateObj);
}
|
@Test
public void testUpdateArticle_notExists() {
// 准备参数
ArticleUpdateReqVO reqVO = randomPojo(ArticleUpdateReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> articleService.updateArticle(reqVO), ARTICLE_NOT_EXISTS);
}
|
public static SchemaAndValue parseString(String value) {
if (value == null) {
return NULL_SCHEMA_AND_VALUE;
}
if (value.isEmpty()) {
return new SchemaAndValue(Schema.STRING_SCHEMA, value);
}
ValueParser parser = new ValueParser(new Parser(value));
return parser.parse(false);
}
|
@Test
public void shouldNotParseAsArrayWithoutCommas() {
SchemaAndValue schemaAndValue = Values.parseString("[0 1 2]");
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals("[0 1 2]", schemaAndValue.value());
}
|
public ServiceInfo processServiceInfo(String json) {
ServiceInfo serviceInfo = JacksonUtils.toObj(json, ServiceInfo.class);
serviceInfo.setJsonFromServer(json);
return processServiceInfo(serviceInfo);
}
|
@Test
void testProcessServiceInfoForOlder() {
ServiceInfo info = new ServiceInfo("a@@b@@c");
Instance instance1 = createInstance("1.1.1.1", 1);
Instance instance2 = createInstance("1.1.1.2", 2);
List<Instance> hosts = new ArrayList<>();
hosts.add(instance1);
hosts.add(instance2);
info.setHosts(hosts);
info.setLastRefTime(System.currentTimeMillis());
holder.processServiceInfo(info);
ServiceInfo olderInfo = new ServiceInfo("a@@b@@c");
olderInfo.setLastRefTime(0L);
final ServiceInfo actual = holder.processServiceInfo(olderInfo);
assertEquals(olderInfo, actual);
}
|
@VisibleForTesting
ImmutableList<AggregationKeyResult> extractValues(PivotResult pivotResult) throws EventProcessorException {
final ImmutableList.Builder<AggregationKeyResult> results = ImmutableList.builder();
// Example PivotResult structures. The row value "key" is composed of: "metric/<function>/<field>/<series-id>"
// The row "key" always contains the date range bucket value as first element.
//
// With group-by:
// {
// "rows": [
// {
// "key": ["2020-03-27T16:23:12Z", "php", "box2"],
// "values": [
// {
// "key": ["metric/count/source/abc123"],
// "value": 86,
// "rollup": true,
// "source": "row-leaf"
// },
// {
// "key": ["metric/card/source/abc123"],
// "value": 1,
// "rollup": true,
// "source": "row-leaf"
// }
// ],
// "source": "leaf"
// },
// {
// "key": ["2020-03-27T16:23:12Z", "php"],
// "values": [
// {
// "key": ["metric/count/source/abc123"],
// "value": 86,
// "rollup": true,
// "source": "row-inner"
// },
// {
// "key": ["metric/card/source/abc123"],
// "value": 1,
// "rollup": true,
// "source": "row-inner"
// }
// ],
// "source": "non-leaf"
// },
// {
// "key": ["2020-03-27T16:23:12Z", "sshd","box2"],
// "values": [
// {
// "key": ["metric/count/source/abc123"],
// "value": 5,
// "rollup": true,
// "source": "row-leaf"
// },
// {
// "key": ["metric/card/source/abc123"],
// "value": 1,
// "rollup": true,
// "source": "row-leaf"
// }
// ],
// "source": "leaf"
// }
// ]
//}
//
// Without group-by:
// {
// "rows": [
// {
// "key": ["2020-03-27T16:23:12Z"],
// "values": [
// {
// "key": ["metric/count/source/abc123"],
// "value": 18341,
// "rollup": true,
// "source": "row-leaf"
// },
// {
// "key": ["metric/card/source/abc123"],
// "value": 1,
// "rollup": true,
// "source": "row-leaf"
// }
// ],
// "source": "leaf"
// }
// ]
//}
for (final PivotResult.Row row : pivotResult.rows()) {
if (!"leaf".equals(row.source())) {
// "non-leaf" values can show up when the "rollup" feature is enabled in the pivot search type
continue;
}
// Safety guard against programming errors
if (row.key().size() == 0 || isNullOrEmpty(row.key().get(0))) {
throw new EventProcessorException("Invalid row key! Expected at least the date range timestamp value: " + row.key().toString(), true, eventDefinition);
}
// We always wrap aggregations in date range buckets so we can run aggregations for multiple ranges at once.
// The timestamp value of the date range bucket will be part of the result.
final String timeKey = row.key().get(0);
final ImmutableList<String> groupKey;
if (row.key().size() > 1) {
// The date range bucket value must not be exposed to consumers as part of the key so they
// don't have to unwrap the key all the time.
groupKey = row.key().subList(1, row.key().size());
} else {
groupKey = ImmutableList.of();
}
final ImmutableList.Builder<AggregationSeriesValue> values = ImmutableList.builder();
for (final PivotResult.Value value : row.values()) {
if (!"row-leaf".equals(value.source())) {
// "row-inner" values can show up when the "rollup" feature is enabled in the pivot search type
continue;
}
for (var series : config.series()) {
if (!value.key().isEmpty() && value.key().get(0).equals(metricName(series))) {
// Some Elasticsearch aggregations can return a "null" value. (e.g. avg on a non-existent field)
// We are using NaN in that case to make sure our conditions will work.
final Object maybeNumberValue = firstNonNull(value.value(), Double.NaN);
if (maybeNumberValue instanceof Number) {
final double numberValue = ((Number) maybeNumberValue).doubleValue();
final AggregationSeriesValue seriesValue = AggregationSeriesValue.builder()
.key(groupKey)
.value(numberValue)
.series(series)
.build();
values.add(seriesValue);
} else {
// Should not happen
throw new IllegalStateException("Got unexpected non-number value for " + series.toString() + " " + row.toString() + " " + value.toString());
}
}
}
}
DateTime resultTimestamp;
try {
resultTimestamp = DateTime.parse(timeKey).withZone(DateTimeZone.UTC);
} catch (IllegalArgumentException e) {
throw new IllegalStateException("Failed to create event for: " + eventDefinition.title() + " (possibly due to non-existing grouping fields)", e);
}
results.add(AggregationKeyResult.builder()
.key(groupKey)
.timestamp(resultTimestamp)
.seriesValues(values.build())
.build());
}
return results.build();
}
|
@Test
public void testExtractValuesWithoutGroupBy() throws Exception {
final long WINDOW_LENGTH = 30000;
final AbsoluteRange timerange = AbsoluteRange.create(DateTime.now(DateTimeZone.UTC).minusSeconds(3600), DateTime.now(DateTimeZone.UTC));
final SeriesSpec seriesCount = Count.builder().id("abc123").field("source").build();
final SeriesSpec seriesCountNoField = Count.builder().id("abc123").build();
final SeriesSpec seriesCard = Cardinality.builder().id("abc123").field("source").build();
final AggregationEventProcessorConfig config = AggregationEventProcessorConfig.builder()
.query("")
.streams(Collections.emptySet())
.groupBy(Collections.emptyList())
.series(ImmutableList.of(seriesCount, seriesCountNoField, seriesCard))
.conditions(null)
.searchWithinMs(30000)
.executeEveryMs(30000)
.build();
final AggregationEventProcessorParameters parameters = AggregationEventProcessorParameters.builder()
.streams(Collections.emptySet())
.timerange(timerange)
.batchSize(500)
.build();
final PivotAggregationSearch pivotAggregationSearch = new PivotAggregationSearch(
config,
parameters,
new AggregationSearch.User("test", DateTimeZone.UTC),
eventDefinition,
Collections.emptyList(),
searchJobService,
queryEngine,
EventsConfigurationTestProvider.create(),
moreSearch,
permittedStreams,
notificationService,
new QueryStringDecorators(Optional.empty())
);
final PivotResult pivotResult = PivotResult.builder()
.id("test")
.effectiveTimerange(timerange)
.total(1)
.addRow(PivotResult.Row.builder()
.key(ImmutableList.of(timerange.getTo().toString()))
.addValue(PivotResult.Value.create(ImmutableList.of("metric/count(source)"), 42, true, "row-leaf"))
.addValue(PivotResult.Value.create(ImmutableList.of("metric/count()"), 23, true, "row-leaf"))
.addValue(PivotResult.Value.create(ImmutableList.of("metric/card(source)"), 1, true, "row-leaf"))
.source("leaf")
.build())
.build();
final ImmutableList<AggregationKeyResult> results = pivotAggregationSearch.extractValues(pivotResult);
assertThat(results.size()).isEqualTo(1);
assertThat(results.get(0)).isEqualTo(AggregationKeyResult.builder()
.key(ImmutableList.of())
.timestamp(timerange.getTo())
.seriesValues(ImmutableList.of(
AggregationSeriesValue.builder()
.key(ImmutableList.of())
.value(42.0)
.series(seriesCount)
.build(),
AggregationSeriesValue.builder()
.key(ImmutableList.of())
.value(23.0)
.series(seriesCountNoField)
.build(),
AggregationSeriesValue.builder()
.key(ImmutableList.of())
.value(1.0)
.series(seriesCard)
.build()
))
.build());
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName( "list" ) List list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
return FEELFnResult.ofResult( BigDecimal.valueOf( list.size() ) );
}
|
@Test
void invokeParamListNonEmpty() {
FunctionTestUtil.assertResult(countFunction.invoke(Arrays.asList(1, 2, "test")), BigDecimal.valueOf(3));
}
|
@Override
public Object decrypt(final Object cipherValue, final AlgorithmSQLContext algorithmSQLContext) {
return cryptographicAlgorithm.decrypt(cipherValue);
}
|
@Test
void assertDecryptNullValue() {
assertNull(encryptAlgorithm.decrypt(null, mock(AlgorithmSQLContext.class)));
}
|
public static Matches matches(String regex) {
return matches(regex, 0);
}
|
@Test
@Category(NeedsRunner.class)
public void testMatchesName() {
PCollection<String> output =
p.apply(Create.of("a", "x xxx", "x yyy", "x zzz"))
.apply(Regex.matches("x (?<namedgroup>[xyz]*)", "namedgroup"));
PAssert.that(output).containsInAnyOrder("xxx", "yyy", "zzz");
p.run();
}
|
static ResourceMethodConfigElement parse(RestLiMethodConfig.ConfigType configType, String key, Object value)
throws ResourceMethodConfigParsingException
{
ParsingErrorListener errorListener = new ParsingErrorListener();
ANTLRInputStream input = new ANTLRInputStream(key);
ResourceMethodKeyLexer lexer = new ResourceMethodKeyLexer(input);
lexer.removeErrorListeners();
lexer.addErrorListener(errorListener);
CommonTokenStream tokens = new CommonTokenStream(lexer);
ResourceMethodKeyParser parser = new ResourceMethodKeyParser(tokens);
parser.removeErrorListeners();
parser.addErrorListener(errorListener);
ResourceMethodKeyParser.KeyContext keyTree = parser.key();
if (!errorListener.hasErrors())
{
Optional<String> resourceName = handlingWildcard(keyTree.restResource());
Optional<ResourceMethod> opType = getOpType(keyTree.operation());
Optional<String> opName = opType.flatMap(method -> getOpName(method, keyTree.operation()));
return new ResourceMethodConfigElement(key, coerceValue(configType, value), configType, resourceName, opType, opName);
}
else
{
throw new ResourceMethodConfigParsingException(
"Error" + ((errorListener.errorsSize() > 1) ? "s" : "") + " parsing key: " + key + "\n" + errorListener);
}
}
|
@Test(dataProvider = "invalidConfigs", expectedExceptions = {ResourceMethodConfigParsingException.class})
public void testInvalidTimeoutConfigParsing(RestLiMethodConfig.ConfigType configType,
String configKey,
Object configValue) throws ResourceMethodConfigParsingException
{
ResourceMethodConfigElement.parse(configType, configKey, configValue);
}
|
public List<JobVertex> getVerticesSortedTopologicallyFromSources()
throws InvalidProgramException {
// early out on empty lists
if (this.taskVertices.isEmpty()) {
return Collections.emptyList();
}
List<JobVertex> sorted = new ArrayList<JobVertex>(this.taskVertices.size());
Set<JobVertex> remaining = new LinkedHashSet<JobVertex>(this.taskVertices.values());
// start by finding the vertices with no input edges
// and the ones with disconnected inputs (that refer to some standalone data set)
{
Iterator<JobVertex> iter = remaining.iterator();
while (iter.hasNext()) {
JobVertex vertex = iter.next();
if (vertex.hasNoConnectedInputs()) {
sorted.add(vertex);
iter.remove();
}
}
}
int startNodePos = 0;
// traverse from the nodes that were added until we found all elements
while (!remaining.isEmpty()) {
// first check if we have more candidates to start traversing from. if not, then the
// graph is cyclic, which is not permitted
if (startNodePos >= sorted.size()) {
throw new InvalidProgramException("The job graph is cyclic.");
}
JobVertex current = sorted.get(startNodePos++);
addNodesThatHaveNoNewPredecessors(current, sorted, remaining);
}
return sorted;
}
|
@Test
public void testTopologicalSort2() {
try {
JobVertex source1 = new JobVertex("source1");
JobVertex source2 = new JobVertex("source2");
JobVertex root = new JobVertex("root");
JobVertex l11 = new JobVertex("layer 1 - 1");
JobVertex l12 = new JobVertex("layer 1 - 2");
JobVertex l13 = new JobVertex("layer 1 - 3");
JobVertex l2 = new JobVertex("layer 2");
root.connectNewDataSetAsInput(
l13, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
root.connectNewDataSetAsInput(
source2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
root.connectNewDataSetAsInput(
l2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
l2.connectNewDataSetAsInput(
l11, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
l2.connectNewDataSetAsInput(
l12, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
l11.connectNewDataSetAsInput(
source1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
l12.connectNewDataSetAsInput(
source1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
l12.connectNewDataSetAsInput(
source2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
l13.connectNewDataSetAsInput(
source2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
JobGraph graph =
JobGraphTestUtils.streamingJobGraph(source1, source2, root, l11, l13, l12, l2);
List<JobVertex> sorted = graph.getVerticesSortedTopologicallyFromSources();
assertEquals(7, sorted.size());
assertBefore(source1, root, sorted);
assertBefore(source2, root, sorted);
assertBefore(l11, root, sorted);
assertBefore(l12, root, sorted);
assertBefore(l13, root, sorted);
assertBefore(l2, root, sorted);
assertBefore(l11, l2, sorted);
assertBefore(l12, l2, sorted);
assertBefore(l2, root, sorted);
assertBefore(source1, l2, sorted);
assertBefore(source2, l2, sorted);
assertBefore(source2, l13, sorted);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
|
public static Date parseDate2(String datetimeStr) {
if (StringUtils.isEmpty(datetimeStr)) {
return null;
}
try {
datetimeStr = datetimeStr.trim();
int len = datetimeStr.length();
if (datetimeStr.contains("-") && datetimeStr.contains(":") && datetimeStr.contains(".")) {
// 包含日期+时间+毫秒
// 取毫秒位数
int msLen = len - datetimeStr.indexOf(".") - 1;
StringBuilder ms = new StringBuilder();
for (int i = 0; i < msLen; i++) {
ms.append("S");
}
String formatter = "yyyy-MM-dd HH:mm:ss." + ms;
DateTimeFormatter dateTimeFormatter = dateFormatterCache.get(formatter);
LocalDateTime dateTime = LocalDateTime.parse(datetimeStr, dateTimeFormatter);
return Date.from(dateTime.atZone(ZoneId.systemDefault()).toInstant());
} else if (datetimeStr.contains("-") && datetimeStr.contains(":")) {
// 包含日期+时间
// 判断包含时间位数
int i = datetimeStr.indexOf(":");
i = datetimeStr.indexOf(":", i + 1);
String formatter;
if (i > -1) {
formatter = "yyyy-MM-dd HH:mm:ss";
} else {
formatter = "yyyy-MM-dd HH:mm";
}
DateTimeFormatter dateTimeFormatter = dateFormatterCache.get(formatter);
LocalDateTime dateTime = LocalDateTime.parse(datetimeStr, dateTimeFormatter);
return Date.from(dateTime.atZone(ZoneId.systemDefault()).toInstant());
} else if (datetimeStr.contains("-")) {
// 只包含日期
String formatter = "yyyy-MM-dd";
DateTimeFormatter dateTimeFormatter = dateFormatterCache.get(formatter);
LocalDate localDate = LocalDate.parse(datetimeStr, dateTimeFormatter);
return Date.from(localDate.atStartOfDay().atZone(ZoneId.systemDefault()).toInstant());
} else if (datetimeStr.contains(":")) {
// 只包含时间
String formatter;
if (datetimeStr.contains(".")) {
// 包含毫秒
int msLen = len - datetimeStr.indexOf(".") - 1;
StringBuilder ms = new StringBuilder();
for (int i = 0; i < msLen; i++) {
ms.append("S");
}
formatter = "HH:mm:ss." + ms;
} else {
// 判断包含时间位数
int i = datetimeStr.indexOf(":");
i = datetimeStr.indexOf(":", i + 1);
if (i > -1) {
formatter = "HH:mm:ss";
} else {
formatter = "HH:mm";
}
}
DateTimeFormatter dateTimeFormatter = dateFormatterCache.get(formatter);
LocalTime localTime = LocalTime.parse(datetimeStr, dateTimeFormatter);
LocalDate localDate = LocalDate.of(1970, Month.JANUARY, 1);
LocalDateTime localDateTime = LocalDateTime.of(localDate, localTime);
return Date.from(localDateTime.atZone(ZoneId.systemDefault()).toInstant());
}
} catch (Throwable e) {
logger.error(e.getMessage(), e);
}
return null;
}
|
@PrepareForTest(StringUtils.class)
@Test
public void parseDate2InputNotNullOutputNull2() throws Exception {
// Setup mocks
PowerMockito.mockStatic(StringUtils.class);
// Arrange
final String datetimeStr = "1a 2b 3c";
final Method isEmptyMethod =
DTUMemberMatcher.method(StringUtils.class, "isEmpty", String.class);
PowerMockito.doReturn(false)
.when(StringUtils.class, isEmptyMethod)
.withArguments(or(isA(String.class), isNull(String.class)));
// Act
final Date actual = Util.parseDate2(datetimeStr);
// Assert result
Assert.assertNull(actual);
}
|
@Override
public Object toConnectRow(final Object ksqlData) {
if (!(ksqlData instanceof Struct)) {
return ksqlData;
}
final Schema schema = getSchema();
final Struct struct = new Struct(schema);
Struct originalData = (Struct) ksqlData;
Schema originalSchema = originalData.schema();
if (originalSchema.name() == null && schema.name() != null) {
originalSchema = AvroSchemas.getAvroCompatibleConnectSchema(
originalSchema, schema.name()
);
originalData = ConnectSchemas.withCompatibleRowSchema(originalData, originalSchema);
}
validate(originalSchema, schema);
copyStruct(originalData, originalSchema, struct, schema);
return struct;
}
|
@Test
public void shouldTransformStructWithDefaultValue() {
// Given:
final Schema schema = SchemaBuilder.struct()
.field("f1", SchemaBuilder.OPTIONAL_STRING_SCHEMA)
.field("f2", SchemaBuilder.OPTIONAL_INT32_SCHEMA)
.field("f3", SchemaBuilder.int64().defaultValue(123L))
.build();
final Struct struct = new Struct(ORIGINAL_SCHEMA)
.put("f1", "abc")
.put("f2", 12);
// When:
final Object object = new AvroSRSchemaDataTranslator(schema).toConnectRow(struct);
// Then:
assertThat(object, instanceOf(Struct.class));
assertThat(((Struct) object).schema(), sameInstance(schema));
assertThat(((Struct) object).get("f3"), is(123L));
}
|
public CompletableFuture<Void> handlePullQuery(
final ServiceContext serviceContext,
final PullPhysicalPlan pullPhysicalPlan,
final ConfiguredStatement<Query> statement,
final RoutingOptions routingOptions,
final PullQueryWriteStream pullQueryQueue,
final CompletableFuture<Void> shouldCancelRequests
) {
final List<KsqlPartitionLocation> allLocations = pullPhysicalPlan.getMaterialization().locator()
.locate(
pullPhysicalPlan.getKeys(),
routingOptions,
routingFilterFactory,
pullPhysicalPlan.getPlanType() == PullPhysicalPlanType.RANGE_SCAN
);
final Map<Integer, List<Host>> emptyPartitions = allLocations.stream()
.filter(loc -> loc.getNodes().stream().noneMatch(node -> node.getHost().isSelected()))
.collect(Collectors.toMap(
KsqlPartitionLocation::getPartition,
loc -> loc.getNodes().stream().map(KsqlNode::getHost).collect(Collectors.toList())));
if (!emptyPartitions.isEmpty()) {
final MaterializationException materializationException = new MaterializationException(
"Unable to execute pull query. "
+ emptyPartitions.entrySet()
.stream()
.map(kv -> String.format(
"Partition %s failed to find valid host. Hosts scanned: %s",
kv.getKey(), kv.getValue()))
.collect(Collectors.joining(", ", "[", "]")));
LOG.debug(materializationException.getMessage());
throw materializationException;
}
// at this point we should filter out the hosts that we should not route to
final List<KsqlPartitionLocation> locations = allLocations
.stream()
.map(KsqlPartitionLocation::removeFilteredHosts)
.collect(Collectors.toList());
final CompletableFuture<Void> completableFuture = new CompletableFuture<>();
coordinatorExecutorService.submit(() -> {
try {
executeRounds(serviceContext, pullPhysicalPlan, statement, routingOptions,
locations, pullQueryQueue, shouldCancelRequests);
completableFuture.complete(null);
} catch (Throwable t) {
completableFuture.completeExceptionally(t);
}
});
return completableFuture;
}
|
@Test
public void forwardingError_throwsError() {
// Given:
locate(location5);
when(ksqlClient.makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any(), any()))
.thenThrow(new RuntimeException("Network Error"));
// When:
CompletableFuture<Void> future = haRouting.handlePullQuery(
serviceContext, pullPhysicalPlan, statement, routingOptions,
pullQueryQueue, disconnect);
final Exception e = assertThrows(
ExecutionException.class,
future::get
);
// Then:
assertThat(pullQueryQueue.size(), is(0));
assertThat(Throwables.getRootCause(e).getMessage(),
containsString("Exhausted standby hosts to try."));
}
|
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
}
|
@Test
public void testGoodStateParameterSuperclassStateType() throws Exception {
DoFnSignatures.getSignature(
new DoFn<KV<String, Integer>, Long>() {
@StateId("my-id")
private final StateSpec<CombiningState<Integer, int[], Integer>> state =
StateSpecs.combining(Sum.ofIntegers());
@ProcessElement
public void myProcessElement(
ProcessContext context,
@StateId("my-id") GroupingState<Integer, Integer> groupingState) {}
}.getClass());
}
|
public Collection<PluginUpdateAggregate> aggregate(@Nullable Collection<PluginUpdate> pluginUpdates) {
if (pluginUpdates == null || pluginUpdates.isEmpty()) {
return Collections.emptyList();
}
Map<Plugin, PluginUpdateAggregateBuilder> builders = new HashMap<>();
for (PluginUpdate pluginUpdate : pluginUpdates) {
Plugin plugin = pluginUpdate.getPlugin();
PluginUpdateAggregateBuilder builder = builders.get(plugin);
if (builder == null) {
builder = PluginUpdateAggregateBuilder.builderFor(plugin);
builders.put(plugin, builder);
}
builder.add(pluginUpdate);
}
return Lists.newArrayList(transform(builders.values(), PluginUpdateAggregateBuilder::build));
}
|
@Test
public void aggregates_groups_pluginUpdate_per_plugin_key() {
Collection<PluginUpdateAggregator.PluginUpdateAggregate> aggregates = underTest.aggregate(ImmutableList.of(
createPluginUpdate("key1"),
createPluginUpdate("key1"),
createPluginUpdate("key0"),
createPluginUpdate("key2"),
createPluginUpdate("key0")));
assertThat(aggregates).hasSize(3);
assertThat(aggregates).extracting("plugin.key").containsOnlyOnce("key1", "key0", "key2");
}
|
public DataTable subTable(int fromRow, int fromColumn) {
return subTable(fromRow, fromColumn, height(), width());
}
|
@Test
void subTable_throws_for_invalid_from_to_row() {
DataTable table = createSimpleTable();
assertThrows(IllegalArgumentException.class, () -> table.subTable(2, 0, 1, 1));
}
|
public Image getIcon(String pluginId) {
return getVersionedSecretsExtension(pluginId).getIcon(pluginId);
}
|
@Test
void getIcon_shouldDelegateToVersionedExtension() {
SecretsExtensionV1 secretsExtensionV1 = mock(SecretsExtensionV1.class);
Map<String, VersionedSecretsExtension> secretsExtensionMap = Map.of("1.0", secretsExtensionV1);
extension = new SecretsExtension(pluginManager, extensionsRegistry, secretsExtensionMap);
when(pluginManager.resolveExtensionVersion(PLUGIN_ID, SECRETS_EXTENSION, SUPPORTED_VERSIONS)).thenReturn(SecretsExtensionV1.VERSION);
this.extension.getIcon(PLUGIN_ID);
verify(secretsExtensionV1).getIcon(PLUGIN_ID);
}
|
@Override
public List<DictDataDO> getDictDataList(Integer status, String dictType) {
List<DictDataDO> list = dictDataMapper.selectListByStatusAndDictType(status, dictType);
list.sort(COMPARATOR_TYPE_AND_SORT);
return list;
}
|
@Test
public void testGetDictDataList() {
// mock 数据
DictDataDO dictDataDO01 = randomDictDataDO().setDictType("yunai").setSort(2)
.setStatus(CommonStatusEnum.ENABLE.getStatus());
dictDataMapper.insert(dictDataDO01);
DictDataDO dictDataDO02 = randomDictDataDO().setDictType("yunai").setSort(1)
.setStatus(CommonStatusEnum.ENABLE.getStatus());
dictDataMapper.insert(dictDataDO02);
DictDataDO dictDataDO03 = randomDictDataDO().setDictType("yunai").setSort(3)
.setStatus(CommonStatusEnum.DISABLE.getStatus());
dictDataMapper.insert(dictDataDO03);
DictDataDO dictDataDO04 = randomDictDataDO().setDictType("yunai2").setSort(3)
.setStatus(CommonStatusEnum.DISABLE.getStatus());
dictDataMapper.insert(dictDataDO04);
// 准备参数
Integer status = CommonStatusEnum.ENABLE.getStatus();
String dictType = "yunai";
// 调用
List<DictDataDO> dictDataDOList = dictDataService.getDictDataList(status, dictType);
// 断言
assertEquals(2, dictDataDOList.size());
assertPojoEquals(dictDataDO02, dictDataDOList.get(0));
assertPojoEquals(dictDataDO01, dictDataDOList.get(1));
}
|
public static void validate(
FederationPolicyInitializationContext policyContext, String myType)
throws FederationPolicyInitializationException {
if (myType == null) {
throw new FederationPolicyInitializationException(
"The myType parameter" + " should not be null.");
}
if (policyContext == null) {
throw new FederationPolicyInitializationException(
"The FederationPolicyInitializationContext provided is null. Cannot"
+ " reinitialize " + "successfully.");
}
if (policyContext.getFederationStateStoreFacade() == null) {
throw new FederationPolicyInitializationException(
"The FederationStateStoreFacade provided is null. Cannot"
+ " reinitialize successfully.");
}
if (policyContext.getFederationSubclusterResolver() == null) {
throw new FederationPolicyInitializationException(
"The FederationSubclusterResolver provided is null. Cannot"
+ " reinitialize successfully.");
}
if (policyContext.getSubClusterPolicyConfiguration() == null) {
throw new FederationPolicyInitializationException(
"The SubClusterPolicyConfiguration provided is null. Cannot "
+ "reinitialize successfully.");
}
String intendedType =
policyContext.getSubClusterPolicyConfiguration().getType();
if (!myType.equals(intendedType)) {
throw new FederationPolicyInitializationException(
"The FederationPolicyConfiguration carries a type (" + intendedType
+ ") different then mine (" + myType
+ "). Cannot reinitialize successfully.");
}
}
|
@Test
public void correcInit() throws Exception {
FederationPolicyInitializationContextValidator.validate(context,
MockPolicyManager.class.getCanonicalName());
}
|
@Override
public void onProjectsDeleted(Set<DeletedProject> projects) {
checkNotNull(projects, "projects can't be null");
if (projects.isEmpty()) {
return;
}
Arrays.stream(listeners)
.forEach(safelyCallListener(listener -> listener.onProjectsDeleted(projects)));
}
|
@Test
public void onProjectsDeleted_throws_NPE_if_set_is_null_even_if_no_listeners() {
assertThatThrownBy(() -> underTestNoListeners.onProjectsDeleted(null))
.isInstanceOf(NullPointerException.class)
.hasMessage("projects can't be null");
}
|
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
boolean isFirstLine = true;
for (Item entry : entries.values()) {
if (isFirstLine) {
isFirstLine = false;
} else {
builder.append("\n");
}
builder.append(entry);
}
return builder.toString();
}
|
@Test
public void testPathToString() {
assertEquals("root string", "[]", DisplayData.Path.root().toString());
assertEquals("single component", "[a]", DisplayData.Path.absolute("a").toString());
assertEquals("hierarchy", "[a/b/c]", DisplayData.Path.absolute("a", "b", "c").toString());
}
|
@Override
public String getDisplayName()
{
return ARRAY + "(" + elementType.getDisplayName() + ")";
}
|
@Test
public void testDisplayName()
{
ArrayType type = new ArrayType(BOOLEAN);
assertEquals(type.getDisplayName(), "array(boolean)");
}
|
public static boolean areKerberosCredentialsValid(
UserGroupInformation ugi, boolean useTicketCache) {
Preconditions.checkState(isKerberosSecurityEnabled(ugi));
// note: UGI::hasKerberosCredentials inaccurately reports false
// for logins based on a keytab (fixed in Hadoop 2.6.1, see HADOOP-10786),
// so we check only in ticket cache scenario.
if (useTicketCache && !ugi.hasKerberosCredentials()) {
if (hasHDFSDelegationToken(ugi)) {
LOG.warn(
"Hadoop security is enabled but current login user does not have Kerberos credentials, "
+ "use delegation token instead. Flink application will terminate after token expires.");
return true;
} else {
LOG.error(
"Hadoop security is enabled, but current login user has neither Kerberos credentials "
+ "nor delegation tokens!");
return false;
}
}
return true;
}
|
@Test
public void testShouldReturnTrueIfTicketCacheIsNotUsed() {
UserGroupInformation.setConfiguration(
getHadoopConfigWithAuthMethod(AuthenticationMethod.KERBEROS));
UserGroupInformation user = createTestUser(AuthenticationMethod.KERBEROS);
boolean result = HadoopUtils.areKerberosCredentialsValid(user, false);
assertTrue(result);
}
|
public String getEcosystem(DefCveItem cve) {
final List<Reference> references = Optional.ofNullable(cve)
.map(DefCveItem::getCve)
.map(CveItem::getReferences)
.orElse(null);
if (Objects.nonNull(references)) {
for (Reference r : references) {
final Hit<String> ecosystem = search.findFirst(r.getUrl());
if (ecosystem != null) {
return ecosystem.value;
}
}
}
return null;
}
|
@Test
public void testGetEcosystemMustHandleNullCve() {
// Given
UrlEcosystemMapper mapper = new UrlEcosystemMapper();
DefCveItem cveItem = new DefCveItem();
// When
String output = mapper.getEcosystem(cveItem);
// Then
assertNull(output);
}
|
public K getKeyInternal() {
return key;
}
|
@Test
public void testGetKeyInternal() {
assertEquals(0, replicatedRecord.getHits());
assertEquals("key", replicatedRecord.getKeyInternal());
assertEquals(0, replicatedRecord.getHits());
}
|
@POST
@Path("/select-idp")
@Consumes(MediaType.APPLICATION_FORM_URLENCODED)
public Response postSelectIdp(
@CookieParam("session_id") String sessionId,
@FormParam("identityProvider") String identityProvider) {
var redirect =
authService.selectedIdentityProvider(new SelectedIdpRequest(sessionId, identityProvider));
return Response.seeOther(redirect).build();
}
|
@Test
void selectIdp_passParams() {
var sessionId = IdGenerator.generateID();
var selectedIdpIssuer = "https://aok-testfalen.example.com";
var idpRedirect = URI.create(selectedIdpIssuer).resolve("/auth/login");
var authService = mock(AuthService.class);
when(authService.selectedIdentityProvider(any())).thenReturn(idpRedirect);
var sut = new AuthEndpoint(authService);
// when
try (var res = sut.postSelectIdp(sessionId, selectedIdpIssuer)) {
// then
var captor = ArgumentCaptor.forClass(SelectedIdpRequest.class);
verify(authService).selectedIdentityProvider(captor.capture());
var req = captor.getValue();
assertEquals(selectedIdpIssuer, req.selectedIdentityProvider());
assertEquals(sessionId, req.sessionId());
}
}
|
@ExecuteOn(TaskExecutors.IO)
@Post(uri = "/replay/by-ids")
@Operation(tags = {"Executions"}, summary = "Create new executions from old ones. Keep the flow revision")
@ApiResponse(responseCode = "200", description = "On success", content = {@Content(schema = @Schema(implementation = BulkResponse.class))})
@ApiResponse(responseCode = "422", description = "Replayed with errors", content = {@Content(schema = @Schema(implementation = BulkErrorResponse.class))})
public MutableHttpResponse<?> replayByIds(
@Parameter(description = "The execution id") @Body List<String> executionsId
) throws Exception {
List<Execution> executions = new ArrayList<>();
Set<ManualConstraintViolation<String>> invalids = new HashSet<>();
for (String executionId : executionsId) {
Optional<Execution> execution = executionRepository.findById(tenantService.resolveTenant(), executionId);
if (execution.isEmpty()) {
invalids.add(ManualConstraintViolation.of(
"execution not found",
executionId,
String.class,
"execution",
executionId
));
} else {
executions.add(execution.get());
}
}
if (!invalids.isEmpty()) {
return HttpResponse.badRequest(BulkErrorResponse
.builder()
.message("invalid bulk replay")
.invalids(invalids)
.build()
);
}
for (Execution execution : executions) {
Execution replay = executionService.replay(execution, null, null);
executionQueue.emit(replay);
eventPublisher.publishEvent(new CrudEvent<>(replay, execution, CrudEventType.CREATE));
}
return HttpResponse.ok(BulkResponse.builder().count(executions.size()).build());
}
|
@Test
void replayByIds() throws TimeoutException {
Execution execution1 = runnerUtils.runOne(null, "io.kestra.tests", "each-sequential-nested");
Execution execution2 = runnerUtils.runOne(null, "io.kestra.tests", "each-sequential-nested");
assertThat(execution1.getState().isTerminated(), is(true));
assertThat(execution2.getState().isTerminated(), is(true));
PagedResults<?> executions = client.toBlocking().retrieve(
GET("/api/v1/executions/search"), PagedResults.class
);
assertThat(executions.getTotal(), is(2L));
// replay executions
BulkResponse replayResponse = client.toBlocking().retrieve(
HttpRequest.POST(
"/api/v1/executions/replay/by-ids",
List.of(execution1.getId(), execution2.getId())
),
BulkResponse.class
);
assertThat(replayResponse.getCount(), is(2));
executions = client.toBlocking().retrieve(
GET("/api/v1/executions/search"), PagedResults.class
);
assertThat(executions.getTotal(), is(4L));
}
|
@SuppressWarnings("unchecked")
@VisibleForTesting
Schema<T> initializeSchema() throws ClassNotFoundException {
if (StringUtils.isEmpty(this.pulsarSinkConfig.getTypeClassName())) {
return (Schema<T>) Schema.BYTES;
}
Class<?> typeArg = Reflections.loadClass(this.pulsarSinkConfig.getTypeClassName(), functionClassLoader);
if (Void.class.equals(typeArg)) {
// return type is 'void', so there's no schema to check
return null;
}
ConsumerConfig consumerConfig = new ConsumerConfig();
consumerConfig.setSchemaProperties(pulsarSinkConfig.getSchemaProperties());
if (!StringUtils.isEmpty(pulsarSinkConfig.getSchemaType())) {
if (GenericRecord.class.isAssignableFrom(typeArg)) {
consumerConfig.setSchemaType(SchemaType.AUTO_CONSUME.toString());
SchemaType configuredSchemaType = SchemaType.valueOf(pulsarSinkConfig.getSchemaType());
if (SchemaType.AUTO_CONSUME != configuredSchemaType) {
log.info("The configured schema type {} is not able to write GenericRecords."
+ " So overwrite the schema type to be {}", configuredSchemaType, SchemaType.AUTO_CONSUME);
}
} else {
consumerConfig.setSchemaType(pulsarSinkConfig.getSchemaType());
}
return (Schema<T>) topicSchema.getSchema(pulsarSinkConfig.getTopic(), typeArg,
consumerConfig, false);
} else {
consumerConfig.setSchemaType(pulsarSinkConfig.getSerdeClassName());
return (Schema<T>) topicSchema.getSchema(pulsarSinkConfig.getTopic(), typeArg,
consumerConfig, false, functionClassLoader);
}
}
|
@Test
public void testInitializeSchema() throws Exception {
PulsarClient pulsarClient = getPulsarClient();
// generic record type (no serde and no schema type)
PulsarSinkConfig pulsarSinkConfig = getPulsarConfigs();
pulsarSinkConfig.setSerdeClassName(null);
pulsarSinkConfig.setTypeClassName(GenericRecord.class.getName());
PulsarSink sink = new PulsarSink(
pulsarClient, pulsarSinkConfig, new HashMap<>(), mock(ComponentStatsManager.class),
Thread.currentThread().getContextClassLoader(), producerCache);
Schema<?> schema = sink.initializeSchema();
assertTrue(schema instanceof AutoConsumeSchema);
// generic record type (default serde and no schema type)
pulsarSinkConfig = getPulsarConfigs();
pulsarSinkConfig.setTypeClassName(GenericRecord.class.getName());
sink = new PulsarSink(
pulsarClient, pulsarSinkConfig, new HashMap<>(), mock(ComponentStatsManager.class),
Thread.currentThread().getContextClassLoader(), producerCache);
schema = sink.initializeSchema();
assertTrue(schema instanceof AutoConsumeSchema);
// generic record type (no serde and wrong schema type)
pulsarSinkConfig = getPulsarConfigs();
pulsarSinkConfig.setSerdeClassName(null);
pulsarSinkConfig.setSchemaType(SchemaType.AVRO.toString());
pulsarSinkConfig.setTypeClassName(GenericRecord.class.getName());
sink = new PulsarSink(
pulsarClient, pulsarSinkConfig, new HashMap<>(), mock(ComponentStatsManager.class),
Thread.currentThread().getContextClassLoader(), producerCache);
schema = sink.initializeSchema();
assertTrue(schema instanceof AutoConsumeSchema);
// generic record type (no serde and AUTO_CONSUME schema type)
pulsarSinkConfig = getPulsarConfigs();
pulsarSinkConfig.setSerdeClassName(null);
pulsarSinkConfig.setSchemaType(SchemaType.AUTO_CONSUME.toString());
pulsarSinkConfig.setTypeClassName(GenericRecord.class.getName());
sink = new PulsarSink(
pulsarClient, pulsarSinkConfig, new HashMap<>(), mock(ComponentStatsManager.class),
Thread.currentThread().getContextClassLoader(), producerCache);
schema = sink.initializeSchema();
assertTrue(schema instanceof AutoConsumeSchema);
// generic record type (default serde and AUTO_CONSUME schema type)
pulsarSinkConfig = getPulsarConfigs();
pulsarSinkConfig.setSchemaType(SchemaType.AUTO_CONSUME.toString());
pulsarSinkConfig.setTypeClassName(GenericRecord.class.getName());
sink = new PulsarSink(
pulsarClient, pulsarSinkConfig, new HashMap<>(), mock(ComponentStatsManager.class),
Thread.currentThread().getContextClassLoader(), producerCache);
schema = sink.initializeSchema();
assertTrue(schema instanceof AutoConsumeSchema);
}
|
public static ReplaceFirst replaceFirst(String regex, String replacement) {
return replaceFirst(Pattern.compile(regex), replacement);
}
|
@Test
@Category(NeedsRunner.class)
public void testReplaceFirst() {
PCollection<String> output =
p.apply(Create.of("xjx", "yjy", "zjz")).apply(Regex.replaceFirst("[xyz]", "new"));
PAssert.that(output).containsInAnyOrder("newjx", "newjy", "newjz");
p.run();
}
|
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(
MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
}
|
@Test
void testAbstractAndInterfaceTypes() {
// interface
RichMapFunction<String, ?> function =
new RichMapFunction<String, Testable>() {
private static final long serialVersionUID = 1L;
@Override
public Testable map(String value) throws Exception {
return null;
}
};
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(function, BasicTypeInfo.STRING_TYPE_INFO);
assertThat(ti).isInstanceOf(GenericTypeInfo.class);
// abstract class with out class member
RichMapFunction<String, ?> function2 =
new RichMapFunction<String, AbstractClassWithoutMember>() {
private static final long serialVersionUID = 1L;
@Override
public AbstractClassWithoutMember map(String value) throws Exception {
return null;
}
};
ti = TypeExtractor.getMapReturnTypes(function2, BasicTypeInfo.STRING_TYPE_INFO);
assertThat(ti).isInstanceOf(GenericTypeInfo.class);
// abstract class with class member
RichMapFunction<String, ?> function3 =
new RichMapFunction<String, AbstractClassWithMember>() {
private static final long serialVersionUID = 1L;
@Override
public AbstractClassWithMember map(String value) throws Exception {
return null;
}
};
ti = TypeExtractor.getMapReturnTypes(function3, BasicTypeInfo.STRING_TYPE_INFO);
assertThat(ti).isInstanceOf(PojoTypeInfo.class);
}
|
@Udtf
public <T> List<List<T>> cube(final List<T> columns) {
if (columns == null) {
return Collections.emptyList();
}
return createAllCombinations(columns);
}
|
@Test
public void shouldHandleOneNull() {
// Given:
final Object[] oneNull = {1, null};
// When:
final List<List<Object>> result = cubeUdtf.cube(Arrays.asList(oneNull));
// Then:
assertThat(result.size(), is(2));
assertThat(result.get(0), is(Arrays.asList(null, null)));
assertThat(result.get(1), is(Arrays.asList(1, null)));
}
|
public static File generate(String content, int width, int height, File targetFile) {
String extName = FileUtil.extName(targetFile);
switch (extName) {
case QR_TYPE_SVG:
String svg = generateAsSvg(content, new QrConfig(width, height));
FileUtil.writeString(svg, targetFile, StandardCharsets.UTF_8);
break;
case QR_TYPE_TXT:
String txt = generateAsAsciiArt(content, new QrConfig(width, height));
FileUtil.writeString(txt, targetFile, StandardCharsets.UTF_8);
break;
default:
final BufferedImage image = generate(content, width, height);
ImgUtil.write(image, targetFile);
break;
}
return targetFile;
}
|
@Test
@Disabled
public void generateToFileTest() {
final QrConfig qrConfig = QrConfig.create()
.setForeColor(Color.BLUE)
.setBackColor(new Color(0,200,255))
.setWidth(0)
.setHeight(0).setMargin(1);
final File qrFile = QrCodeUtil.generate("https://hutool.cn/", qrConfig, FileUtil.touch("d:/test/ascii_art_qr_code.txt"));
//final BufferedReader reader = FileUtil.getReader(qrFile, StandardCharsets.UTF_8);
//reader.lines().forEach(Console::log);
}
|
@Override
public AuthorizationPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) {
Capabilities capabilities = capabilities(descriptor.id());
PluggableInstanceSettings authConfigSettings = authConfigSettings(descriptor.id());
PluggableInstanceSettings roleSettings = roleSettings(descriptor.id(), capabilities);
Image image = image(descriptor.id());
return new AuthorizationPluginInfo(descriptor, authConfigSettings, roleSettings, image, capabilities);
}
|
@Test
public void shouldBuildPluginInfoWithAuthSettings() {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
List<PluginConfiguration> pluginConfigurations = List.of(
new PluginConfiguration("username", new Metadata(true, false)),
new PluginConfiguration("password", new Metadata(true, true))
);
when(extension.getAuthConfigMetadata(descriptor.id())).thenReturn(pluginConfigurations);
when(extension.getAuthConfigView(descriptor.id())).thenReturn("auth_config");
AuthorizationPluginInfo pluginInfo = new AuthorizationPluginInfoBuilder(extension).pluginInfoFor(descriptor);
assertThat(pluginInfo.getAuthConfigSettings(), is(new PluggableInstanceSettings(pluginConfigurations, new PluginView("auth_config"))));
}
|
public FilePath createTempDir(final String prefix, final String suffix) throws IOException, InterruptedException {
try {
String[] s;
if (suffix == null || suffix.isBlank()) {
s = new String[]{prefix, "tmp"}; // see File.createTempFile - tmp is used if suffix is null
} else {
s = new String[]{prefix, suffix};
}
String name = String.join(".", s);
return new FilePath(this, act(new CreateTempDir(name)));
} catch (IOException e) {
throw new IOException("Failed to create a temp directory on " + remote, e);
}
}
|
@Issue("JENKINS-48227")
@Test
public void testCreateTempDir() throws IOException, InterruptedException {
final File srcFolder = temp.newFolder("src");
final FilePath filePath = new FilePath(srcFolder);
FilePath x = filePath.createTempDir("jdk", "dmg");
FilePath y = filePath.createTempDir("jdk", "pkg");
FilePath z = filePath.createTempDir("jdk", null);
assertNotNull("FilePath x should not be null", x);
assertNotNull("FilePath y should not be null", y);
assertNotNull("FilePath z should not be null", z);
assertTrue(x.getName().contains("jdk.dmg"));
assertTrue(y.getName().contains("jdk.pkg"));
assertTrue(z.getName().contains("jdk.tmp"));
}
|
public static String calculateMemoryWithDefaultOverhead(String memory) {
long memoryMB = convertToBytes(memory) / M;
long memoryOverheadMB = Math.max((long) (memoryMB * 0.1f), MINIMUM_OVERHEAD);
return (memoryMB + memoryOverheadMB) + "Mi";
}
|
@Test
void testExceptionMaxLong() {
assertThrows(NumberFormatException.class, () -> {
K8sUtils.calculateMemoryWithDefaultOverhead("10000000Tb");
});
}
|
public boolean containsPK(List<String> cols) {
if (cols == null) {
return false;
}
List<String> pk = getPrimaryKeyOnlyName();
if (pk.isEmpty()) {
return false;
}
//at least contain one pk
if (cols.containsAll(pk)) {
return true;
} else {
return CollectionUtils.toUpperList(cols).containsAll(CollectionUtils.toUpperList(pk));
}
}
|
@Test
public void testContainsPKWithNoMatch() {
List<String> cols = Collections.singletonList("other");
assertFalse(tableMeta.containsPK(cols));
}
|
@Override
public ConsumerBuilder<T> maxAcknowledgmentGroupSize(int messageNum) {
checkArgument(messageNum > 0, "acknowledgementsGroupSize needs to be > 0");
conf.setMaxAcknowledgmentGroupSize(messageNum);
return this;
}
|
@Test
public void testMaxAcknowledgmentGroupSizeInvalid() {
try {
consumerBuilderImpl.maxAcknowledgmentGroupSize(0);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// expect exception
assertEquals(e.getMessage(), "acknowledgementsGroupSize needs to be > 0");
}
}
|
@Override
public void run() {
if (!ignoreListenShutdownHook && destroyed.compareAndSet(false, true)) {
if (logger.isInfoEnabled()) {
logger.info("Run shutdown hook now.");
}
doDestroy();
}
}
|
@Test
public void testDestoryNoModuleManagedExternally() {
boolean hasModuleManagedExternally = false;
for (ModuleModel moduleModel : applicationModel.getModuleModels()) {
if (moduleModel.isLifeCycleManagedExternally()) {
hasModuleManagedExternally = true;
break;
}
}
Assertions.assertFalse(hasModuleManagedExternally);
dubboShutdownHook.run();
Assertions.assertTrue(applicationModel.isDestroyed());
}
|
public static HazelcastSqlOperatorTable instance() {
return INSTANCE;
}
|
@Test
public void testNoOverride() {
Map<BiTuple<String, SqlSyntax>, SqlOperator> map = new HashMap<>();
for (SqlOperator operator : HazelcastSqlOperatorTable.instance().getOperatorList()) {
BiTuple<String, SqlSyntax> key = BiTuple.of(operator.getName(), operator.getSyntax());
SqlOperator oldOperator = map.put(key, operator);
assertNull("Duplicate operator \"" + operator.getName(), oldOperator);
}
}
|
public List<String> targetedUrls(final UUID aci) {
final DynamicTurnConfiguration turnConfig = dynamicConfigurationManager.getConfiguration().getTurnConfiguration();
final Optional<TurnUriConfiguration> enrolled = turnConfig.getUriConfigs().stream()
.filter(config -> config.getEnrolledAcis().contains(aci))
.findFirst();
return enrolled
.map(turnUriConfiguration -> turnUriConfiguration.getUris().stream().toList())
.orElse(Collections.emptyList());
}
|
@Test
public void testExplicitEnrollment() throws JsonProcessingException {
final String configString = """
captcha:
scoreFloor: 1.0
turn:
secret: bloop
uriConfigs:
- uris:
- enrolled.org
weight: 0
enrolledAcis:
- 732506d7-d04f-43a4-b1d7-8a3a91ebe8a6
- uris:
- unenrolled.org
weight: 1
""";
DynamicConfiguration config = DynamicConfigurationManager
.parseConfiguration(configString, DynamicConfiguration.class)
.orElseThrow();
@SuppressWarnings("unchecked")
DynamicConfigurationManager<DynamicConfiguration> mockDynamicConfigManager = mock(
DynamicConfigurationManager.class);
when(mockDynamicConfigManager.getConfiguration()).thenReturn(config);
final DynamicConfigTurnRouter configTurnRouter = new DynamicConfigTurnRouter(mockDynamicConfigManager);
List<String> urls = configTurnRouter.targetedUrls(UUID.fromString("732506d7-d04f-43a4-b1d7-8a3a91ebe8a6"));
assertThat(urls.getFirst()).isEqualTo("enrolled.org");
urls = configTurnRouter.targetedUrls(UUID.randomUUID());
assertTrue(urls.isEmpty());
}
|
private static String randomString(Random random, char[] alphabet, int numRandomChars) {
// The buffer most hold the size of the requested number of random chars and the chunk separators ('-').
int bufferSize = numRandomChars + ((numRandomChars - 1) / RANDOM_STRING_CHUNK_SIZE);
CharBuffer charBuffer = CharBuffer.allocate(bufferSize);
try {
randomString(charBuffer, random, alphabet, numRandomChars);
} catch (IOException e) {
// This should never happen if we calcuate the buffer size correctly.
throw new AssertionError(e);
}
return charBuffer.flip().toString();
}
|
@Test(expected = NegativeArraySizeException.class)
public void testNegativeArraySizeException() {
// Boundary test
StringUtils.randomString(-1);
}
|
public static Ip4Address valueOf(int value) {
byte[] bytes =
ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array();
return new Ip4Address(bytes);
}
|
@Test
public void testValueOfStringIPv4() {
Ip4Address ipAddress;
ipAddress = Ip4Address.valueOf("1.2.3.4");
assertThat(ipAddress.toString(), is("1.2.3.4"));
ipAddress = Ip4Address.valueOf("0.0.0.0");
assertThat(ipAddress.toString(), is("0.0.0.0"));
ipAddress = Ip4Address.valueOf("255.255.255.255");
assertThat(ipAddress.toString(), is("255.255.255.255"));
}
|
public Future<KafkaVersionChange> reconcile() {
return getVersionFromController()
.compose(i -> getPods())
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
}
|
@Test
public void testDowngradeFailsWithNewProtocolVersions(VertxTestContext context) {
String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION;
String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION;
String oldLogMessageFormatVersion = KafkaVersionTestUtils.LATEST_FORMAT_VERSION;
String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION;
String interBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION;
String logMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION;
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion),
mockNewCluster(
null,
mockSps(oldKafkaVersion),
mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion)
)
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.failing(c -> context.verify(() -> {
assertThat(c.getClass(), is(KafkaUpgradeException.class));
assertThat(c.getMessage(), is("log.message.format.version (" + oldInterBrokerProtocolVersion + ") and inter.broker.protocol.version (" + oldLogMessageFormatVersion + ") used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to (" + KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION + ")"));
async.flag();
})));
}
|
public static Value convertBoxedJavaType(Object boxed) {
if (boxed == null) {
return null;
}
final Class<?> clazz = boxed.getClass();
if (clazz == String.class) {
return new StringValue((String) boxed);
} else if (clazz == Integer.class) {
return new IntValue((Integer) boxed);
} else if (clazz == Long.class) {
return new LongValue((Long) boxed);
} else if (clazz == Double.class) {
return new DoubleValue((Double) boxed);
} else if (clazz == Float.class) {
return new FloatValue((Float) boxed);
} else if (clazz == Boolean.class) {
return new BooleanValue((Boolean) boxed);
} else if (clazz == Byte.class) {
return new ByteValue((Byte) boxed);
} else if (clazz == Short.class) {
return new ShortValue((Short) boxed);
} else if (clazz == Character.class) {
return new CharValue((Character) boxed);
} else {
throw new IllegalArgumentException("Object is no primitive Java type.");
}
}
|
@Test
void testJavaToValueConversion() {
assertThat(JavaToValueConverter.convertBoxedJavaType(null)).isNull();
assertThat(JavaToValueConverter.convertBoxedJavaType("123Test"))
.isEqualTo(new StringValue("123Test"));
assertThat(JavaToValueConverter.convertBoxedJavaType((byte) 44))
.isEqualTo(new ByteValue((byte) 44));
assertThat(JavaToValueConverter.convertBoxedJavaType((short) 10000))
.isEqualTo(new ShortValue((short) 10000));
assertThat(JavaToValueConverter.convertBoxedJavaType(3567564))
.isEqualTo(new IntValue(3567564));
assertThat(JavaToValueConverter.convertBoxedJavaType(767692734L))
.isEqualTo(new LongValue(767692734));
assertThat(JavaToValueConverter.convertBoxedJavaType(17.5f))
.isEqualTo(new FloatValue(17.5f));
assertThat(JavaToValueConverter.convertBoxedJavaType(3.1415926))
.isEqualTo(new DoubleValue(3.1415926));
assertThat(JavaToValueConverter.convertBoxedJavaType(true))
.isEqualTo(new BooleanValue(true));
assertThat(JavaToValueConverter.convertBoxedJavaType('@')).isEqualTo(new CharValue('@'));
assertThatThrownBy(() -> JavaToValueConverter.convertBoxedJavaType(new ArrayList<>()))
.isInstanceOf(IllegalArgumentException.class);
}
|
@Override
public WindowStoreIterator<V> backwardFetch(final K key,
final Instant timeFrom,
final Instant timeTo) throws IllegalArgumentException {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType);
for (final ReadOnlyWindowStore<K, V> windowStore : stores) {
try {
final WindowStoreIterator<V> result = windowStore.backwardFetch(key, timeFrom, timeTo);
if (!result.hasNext()) {
result.close();
} else {
return result;
}
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException(
"State store is not available anymore and may have been migrated to another instance; " +
"please re-discover its location from the state metadata.");
}
}
return KeyValueIterators.emptyWindowStoreIterator();
}
|
@Test
public void shouldFindValueForKeyWhenMultiStoresBackwards() {
final ReadOnlyWindowStoreStub<String, String> secondUnderlying = new
ReadOnlyWindowStoreStub<>(WINDOW_SIZE);
stubProviderTwo.addStore(storeName, secondUnderlying);
underlyingWindowStore.put("key-one", "value-one", 0L);
secondUnderlying.put("key-two", "value-two", 10L);
final List<KeyValue<Long, String>> keyOneResults =
StreamsTestUtils.toList(windowStore.backwardFetch("key-one", ofEpochMilli(0L), ofEpochMilli(1L)));
final List<KeyValue<Long, String>> keyTwoResults =
StreamsTestUtils.toList(windowStore.backwardFetch("key-two", ofEpochMilli(10L), ofEpochMilli(11L)));
assertEquals(Collections.singletonList(KeyValue.pair(0L, "value-one")), keyOneResults);
assertEquals(Collections.singletonList(KeyValue.pair(10L, "value-two")), keyTwoResults);
}
|
@Override
public void failover(NamedNode master) {
connection.sync(RedisCommands.SENTINEL_FAILOVER, master.getName());
}
|
@Test
public void testFailover() throws InterruptedException {
Collection<RedisServer> masters = connection.masters();
connection.failover(masters.iterator().next());
Thread.sleep(10000);
RedisServer newMaster = connection.masters().iterator().next();
assertThat(masters.iterator().next().getPort()).isNotEqualTo(newMaster.getPort());
}
|
@Override
public List<Object> handle(String targetName, List<Object> instances, RequestData requestData) {
if (requestData == null) {
return super.handle(targetName, instances, null);
}
if (!shouldHandle(instances)) {
return instances;
}
List<Object> result = routerConfig.isUseRequestRouter()
? getTargetInstancesByRequest(targetName, instances, requestData.getTag())
: getTargetInstancesByRules(targetName, instances, requestData.getPath(), requestData.getTag());
return super.handle(targetName, result, requestData);
}
|
@Test
public void testGetTargetInstancesWithOneInstance() {
List<Object> instances = new ArrayList<>();
ServiceInstance instance1 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0");
instances.add(instance1);
Map<String, List<String>> header = new HashMap<>();
header.put("bar", Collections.singletonList("bar1"));
List<Object> targetInstances = flowRouteHandler.handle("foo", instances, new RequestData(header, null, null));
Assert.assertEquals(1, targetInstances.size());
Assert.assertEquals(instances, targetInstances);
}
|
public CredentialRetriever googleApplicationDefaultCredentials() {
return () -> {
try {
if (imageReference.getRegistry().endsWith("gcr.io")
|| imageReference.getRegistry().endsWith("docker.pkg.dev")) {
GoogleCredentials googleCredentials = googleCredentialsProvider.get();
logger.accept(LogEvent.info("Google ADC found"));
if (googleCredentials.createScopedRequired()) { // not scoped if service account
// The short-lived OAuth2 access token to be generated from the service account with
// refreshIfExpired() below will have one-hour expiry (as of Aug 2019). Instead of using
// an access token, it is technically possible to use the service account private key to
// auth with GCR, but it does not worth writing complex code to achieve that.
logger.accept(LogEvent.info("ADC is a service account. Setting GCS read-write scope"));
List<String> scope = Collections.singletonList(OAUTH_SCOPE_STORAGE_READ_WRITE);
googleCredentials = googleCredentials.createScoped(scope);
}
googleCredentials.refreshIfExpired();
logGotCredentialsFrom("Google Application Default Credentials");
AccessToken accessToken = googleCredentials.getAccessToken();
// https://cloud.google.com/container-registry/docs/advanced-authentication#access_token
return Optional.of(Credential.from("oauth2accesstoken", accessToken.getTokenValue()));
}
} catch (IOException ex) { // Includes the case where ADC is simply not available.
logger.accept(
LogEvent.info("ADC not present or error fetching access token: " + ex.getMessage()));
}
return Optional.empty();
};
}
|
@Test
public void testGoogleApplicationDefaultCredentials_adcNotPresent()
throws CredentialRetrievalException {
CredentialRetrieverFactory credentialRetrieverFactory =
new CredentialRetrieverFactory(
ImageReference.of("awesome.gcr.io", "repository", null),
mockLogger,
mockDockerCredentialHelperFactory,
() -> {
throw new IOException("ADC not present");
},
Collections.emptyMap());
Assert.assertFalse(
credentialRetrieverFactory.googleApplicationDefaultCredentials().retrieve().isPresent());
Mockito.verify(mockLogger)
.accept(LogEvent.info("ADC not present or error fetching access token: ADC not present"));
}
|
public FEELFnResult<TemporalAccessor> invoke(@ParameterName("from") String val) {
if ( val == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null"));
}
try {
TemporalAccessor parsed = FEEL_TIME.parse(val);
if (parsed.query(TemporalQueries.offset()) != null) {
// it is an offset-zoned time, so I can know for certain an OffsetTime
OffsetTime asOffSetTime = parsed.query(OffsetTime::from);
return FEELFnResult.ofResult(asOffSetTime);
} else if (parsed.query(TemporalQueries.zone()) == null) {
// if it does not contain any zone information at all, then I know for certain is a local time.
LocalTime asLocalTime = parsed.query(LocalTime::from);
return FEELFnResult.ofResult(asLocalTime);
} else if (parsed.query(TemporalQueries.zone()) != null) {
boolean hasSeconds = timeStringWithSeconds(val);
LocalTime asLocalTime = parsed.query(LocalTime::from);
ZoneId zoneId = parsed.query(TemporalQueries.zone());
ZoneTime zoneTime = ZoneTime.of(asLocalTime, zoneId, hasSeconds);
return FEELFnResult.ofResult(zoneTime);
}
return FEELFnResult.ofResult(parsed);
} catch (DateTimeException e) {
return manageDateTimeException(e, val);
}
}
|
@Test
void invokeStringParamNoOffset() {
FunctionTestUtil.assertResult(timeFunction.invoke("10:15:06"), LocalTime.of(10, 15, 6));
}
|
public NearCachePreloaderConfig setStoreInitialDelaySeconds(int storeInitialDelaySeconds) {
this.storeInitialDelaySeconds = checkPositive("storeInitialDelaySeconds",
storeInitialDelaySeconds);
return this;
}
|
@Test(expected = IllegalArgumentException.class)
public void setStoreInitialDelaySeconds_withNegative() {
config.setStoreInitialDelaySeconds(-1);
}
|
public boolean isValid(String value) {
if (value == null) {
return false;
}
URI uri; // ensure value is a valid URI
try {
uri = new URI(value);
} catch (URISyntaxException e) {
return false;
}
// OK, perfom additional validation
String scheme = uri.getScheme();
if (!isValidScheme(scheme)) {
return false;
}
String authority = uri.getRawAuthority();
if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority
return true; // this is a local file - nothing more to do here
} else if ("file".equals(scheme) && authority != null && authority.contains(":")) {
return false;
} else {
// Validate the authority
if (!isValidAuthority(authority)) {
return false;
}
}
if (!isValidPath(uri.getRawPath())) {
return false;
}
if (!isValidQuery(uri.getRawQuery())) {
return false;
}
if (!isValidFragment(uri.getRawFragment())) {
return false;
}
return true;
}
|
@Test
public void testValidator411() {
UrlValidator urlValidator = new UrlValidator();
assertTrue(urlValidator.isValid("http://example.rocks:/"));
assertTrue(urlValidator.isValid("http://example.rocks:0/"));
assertTrue(urlValidator.isValid("http://example.rocks:65535/"));
assertFalse(urlValidator.isValid("http://example.rocks:65536/"));
assertFalse(urlValidator.isValid("http://example.rocks:100000/"));
}
|
public void timePasses() {
if (state.getClass().equals(PeacefulState.class)) {
changeStateTo(new AngryState(this));
} else {
changeStateTo(new PeacefulState(this));
}
}
|
@Test
void testTimePasses() {
final var mammoth = new Mammoth();
mammoth.observe();
assertEquals("The mammoth is calm and peaceful.", appender.getLastMessage());
assertEquals(1, appender.getLogSize());
mammoth.timePasses();
assertEquals("The mammoth gets angry!", appender.getLastMessage());
assertEquals(2, appender.getLogSize());
mammoth.observe();
assertEquals("The mammoth is furious!", appender.getLastMessage());
assertEquals(3, appender.getLogSize());
mammoth.timePasses();
assertEquals("The mammoth calms down.", appender.getLastMessage());
assertEquals(4, appender.getLogSize());
mammoth.observe();
assertEquals("The mammoth is calm and peaceful.", appender.getLastMessage());
assertEquals(5, appender.getLogSize());
}
|
synchronized boolean tryToMoveTo(State to) {
boolean res = false;
State currentState = state;
if (TRANSITIONS.get(currentState).contains(to)) {
this.state = to;
res = true;
}
LOG.debug("{} tryToMoveTo from {} to {} => {}", Thread.currentThread().getName(), currentState, to, res);
return res;
}
|
@Test
public void STOPPED_is_not_allowed_from_RESTARTING() {
assertThat(newNodeLifecycle(RESTARTING).tryToMoveTo(STOPPED)).isFalse();
}
|
public Connector newConnector(String connectorClassOrAlias) {
Class<? extends Connector> klass = connectorClass(connectorClassOrAlias);
return newPlugin(klass);
}
|
@Test
public void shouldThrowIfNoDefaultConstructor() {
assertThrows(ConnectException.class, () -> plugins.newConnector(
TestPlugin.BAD_PACKAGING_NO_DEFAULT_CONSTRUCTOR_CONNECTOR.className()
));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.