focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void refreshAdminAcls() throws IOException {
UserGroupInformation user = checkAcls("refreshAdminAcls");
Configuration conf = createConf();
adminAcl = new AccessControlList(conf.get(JHAdminConfig.JHS_ADMIN_ACL,
JHAdminConfig.DEFAULT_JHS_ADMIN_ACL));
HSAuditLogger.logSuccess(user.getShortUserName(), "refreshAdminAcls",
HISTORY_ADMIN_SERVER);
}
|
@Test
public void testRefreshAdminAcls() throws Exception {
// Setting current user to admin acl
conf.set(JHAdminConfig.JHS_ADMIN_ACL, UserGroupInformation.getCurrentUser()
.getUserName());
String[] args = new String[1];
args[0] = "-refreshAdminAcls";
hsAdminClient.run(args);
// Now I should be able to run any hsadmin command without any exception
// being thrown
args[0] = "-refreshSuperUserGroupsConfiguration";
hsAdminClient.run(args);
// Lets remove current user from admin acl
conf.set(JHAdminConfig.JHS_ADMIN_ACL, "notCurrentUser");
args[0] = "-refreshAdminAcls";
hsAdminClient.run(args);
// Now I should get an exception if i run any hsadmin command
Throwable th = null;
args[0] = "-refreshSuperUserGroupsConfiguration";
try {
hsAdminClient.run(args);
} catch (Exception e) {
th = e;
}
assertTrue(th instanceof RemoteException);
}
|
public static AvroGenericCoder of(Schema schema) {
return AvroGenericCoder.of(schema);
}
|
@Test
public void testDeterministicUnorderedMap() {
assertNonDeterministic(
AvroCoder.of(UnorderedMapClass.class),
reasonField(
UnorderedMapClass.class,
"mapField",
"java.util.Map<java.lang.String, java.lang.String> "
+ "may not be deterministically ordered"));
}
|
List<Condition> run(boolean useKRaft) {
List<Condition> warnings = new ArrayList<>();
checkKafkaReplicationConfig(warnings);
checkKafkaBrokersStorage(warnings);
if (useKRaft) {
// Additional checks done for KRaft clusters
checkKRaftControllerStorage(warnings);
checkKRaftControllerCount(warnings);
checkKafkaMetadataVersion(warnings);
checkInterBrokerProtocolVersionInKRaft(warnings);
checkLogMessageFormatVersionInKRaft(warnings);
} else {
// Additional checks done for ZooKeeper-based clusters
checkKafkaLogMessageFormatVersion(warnings);
checkKafkaInterBrokerProtocolVersion(warnings);
checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings);
}
return warnings;
}
|
@Test
public void testMetadataVersionMatchesKafkaVersionWithDefaultKafkaVersion() {
Kafka kafka = new KafkaBuilder(KAFKA)
.editSpec()
.editKafka()
.withMetadataVersion(KafkaVersionTestUtils.LATEST_METADATA_VERSION)
.endKafka()
.endSpec()
.build();
KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, KafkaVersionTestUtils.LATEST_METADATA_VERSION));
List<Condition> warnings = checker.run(true);
assertThat(warnings, hasSize(0));
}
|
@Override
public void pluginLoaded(GoPluginDescriptor pluginDescriptor) {
if (notificationExtension.canHandlePlugin(pluginDescriptor.id())) {
try {
notificationPluginRegistry.registerPlugin(pluginDescriptor.id());
List<String> notificationsInterestedIn = notificationExtension.getNotificationsOfInterestFor(pluginDescriptor.id());
if (notificationsInterestedIn != null && !notificationsInterestedIn.isEmpty()) {
checkNotificationTypes(pluginDescriptor, notificationsInterestedIn);
notificationPluginRegistry.registerPluginInterests(pluginDescriptor.id(), notificationsInterestedIn);
}
} catch (Exception e) {
LOGGER.warn("Error occurred during plugin notification interest registration.", e);
}
}
}
|
@Test
public void shouldRegisterPluginInterestsOnPluginLoad() {
NotificationPluginRegistrar notificationPluginRegistrar = new NotificationPluginRegistrar(pluginManager, notificationExtension, notificationPluginRegistry);
notificationPluginRegistrar.pluginLoaded(GoPluginDescriptor.builder().id(PLUGIN_ID_1).isBundledPlugin(true).build());
verify(notificationPluginRegistry).registerPluginInterests(PLUGIN_ID_1, List.of(PIPELINE_STATUS, STAGE_STATUS, JOB_STATUS));
notificationPluginRegistrar.pluginLoaded(GoPluginDescriptor.builder().id(PLUGIN_ID_2).isBundledPlugin(true).build());
verify(notificationPluginRegistry).registerPluginInterests(PLUGIN_ID_2, List.of(PIPELINE_STATUS));
notificationPluginRegistrar.pluginLoaded(GoPluginDescriptor.builder().id(PLUGIN_ID_3).isBundledPlugin(true).build());
verify(notificationPluginRegistry).registerPluginInterests(PLUGIN_ID_3, List.of(STAGE_STATUS));
}
|
public static Field[] getAllFields(final Class<?> targetClazz) {
if (targetClazz == Object.class || targetClazz.isInterface()) {
return EMPTY_FIELD_ARRAY;
}
// get from the cache
Field[] fields = CLASS_FIELDS_CACHE.get(targetClazz);
if (fields != null) {
return fields;
}
// load current class declared fields
fields = targetClazz.getDeclaredFields();
final LinkedList<Field> fieldList = new LinkedList<>(Arrays.asList(fields));
// remove the static or synthetic fields
fieldList.removeIf(f -> Modifier.isStatic(f.getModifiers()) || f.isSynthetic());
// load super class all fields, and add to the field list
Field[] superFields = getAllFields(targetClazz.getSuperclass());
if (CollectionUtils.isNotEmpty(superFields)) {
fieldList.addAll(Arrays.asList(superFields));
}
// list to array
Field[] resultFields;
if (!fieldList.isEmpty()) {
resultFields = fieldList.toArray(new Field[0]);
} else {
// reuse the EMPTY_FIELD_ARRAY
resultFields = EMPTY_FIELD_ARRAY;
}
// set cache
CLASS_FIELDS_CACHE.put(targetClazz, resultFields);
return resultFields;
}
|
@Test
public void testGetAllFields() {
// TestClass
this.testGetAllFieldsInternal(TestClass.class, "f1", "f2");
// TestSuperClass
this.testGetAllFieldsInternal(TestSuperClass.class, "f2");
// EmptyClass
this.testGetAllFieldsInternal(EmptyClass.class);
// TestInterface
this.testGetAllFieldsInternal(TestInterface.class);
// Object
this.testGetAllFieldsInternal(Object.class);
// case: The fields of EmptyClass is `EMPTY_FIELD_ARRAY`
Assertions.assertSame(ReflectionUtil.EMPTY_FIELD_ARRAY, ReflectionUtil.getAllFields(EmptyClass.class));
// case: The fields of TestInterface is `EMPTY_FIELD_ARRAY`
Assertions.assertSame(ReflectionUtil.EMPTY_FIELD_ARRAY, ReflectionUtil.getAllFields(TestInterface.class));
// case: The fields of Object is `EMPTY_FIELD_ARRAY`
Assertions.assertSame(ReflectionUtil.EMPTY_FIELD_ARRAY, ReflectionUtil.getAllFields(Object.class));
}
|
public byte[] remove() throws Exception {
byte[] bytes = internalElement(true, null);
if (bytes == null) {
throw new NoSuchElementException();
}
return bytes;
}
|
@Test
public void testRemova1() throws Exception {
CuratorFramework clients[] = null;
try {
String dir = "/testRemove1";
final int num_clients = 1;
clients = new CuratorFramework[num_clients];
SimpleDistributedQueue queueHandles[] = new SimpleDistributedQueue[num_clients];
for (int i = 0; i < clients.length; i++) {
clients[i] = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
clients[i].start();
queueHandles[i] = new SimpleDistributedQueue(clients[i], dir);
}
try {
queueHandles[0].remove();
} catch (NoSuchElementException e) {
return;
}
assertTrue(false);
} finally {
closeAll(clients);
}
}
|
public List<String> toPrefix(String in) {
List<String> tokens = buildTokens(alignINClause(in));
List<String> output = new ArrayList<>();
List<String> stack = new ArrayList<>();
for (String token : tokens) {
if (isOperand(token)) {
if (token.equals(")")) {
while (openParanthesesFound(stack)) {
output.add(stack.remove(stack.size() - 1));
}
if (!stack.isEmpty()) {
// temporarily fix for issue #189
stack.remove(stack.size() - 1);
}
} else {
while (openParanthesesFound(stack) && !hasHigherPrecedence(token, stack.get(stack.size() - 1))) {
output.add(stack.remove(stack.size() - 1));
}
stack.add(token);
}
} else {
output.add(token);
}
}
while (!stack.isEmpty()) {
output.add(stack.remove(stack.size() - 1));
}
return output;
}
|
@Test
public void parseAEqB() {
String query = "a = b";
List<String> list = parser.toPrefix(query);
assertEquals(Arrays.asList("a", "b", "="), list);
}
|
static double atan2(double y, double x) {
// kludge to prevent 0/0 condition
double absY = Math.abs(y) + 1e-10;
double r, angle;
if (x < 0.0) {
r = (x + absY) / (absY - x);
angle = PI3_4;
} else {
r = (x - absY) / (x + absY);
angle = PI_4;
}
angle += (0.1963 * r * r - 0.9817) * r;
if (y < 0.0)
// negate if in quad III or IV
return -angle;
return angle;
}
|
@Test
public void testAtan2() {
// assertEquals(0, AngleCalc.atan2(0, 0), 1e-4);
// assertEquals(0, AngleCalc.atan2(-0.002, 0), 1e-4);
assertEquals(45, AngleCalc.atan2(5, 5) * 180 / Math.PI, 1e-2);
assertEquals(-45, AngleCalc.atan2(-5, 5) * 180 / Math.PI, 1e-2);
assertEquals(11.14, AngleCalc.atan2(1, 5) * 180 / Math.PI, 1);
assertEquals(180, AngleCalc.atan2(0, -5) * 180 / Math.PI, 1e-2);
assertEquals(-90, AngleCalc.atan2(-5, 0) * 180 / Math.PI, 1e-2);
assertEquals(90, Math.atan2(1, 0) * 180 / Math.PI, 1e-2);
assertEquals(90, AngleCalc.atan2(1, 0) * 180 / Math.PI, 1e-2);
}
|
@Override
public void run(Runnable task) {
long startNanos = System.nanoTime();
boolean publishCurrentTask = publishCurrentTask();
if (publishCurrentTask) {
currentTask = task;
}
try {
task.run();
} finally {
if (publishCurrentTask) {
currentTask = null;
}
record(task, startNanos);
}
}
|
@Test
public void runTask() {
final AtomicLong counter = new AtomicLong();
operationRunner.run(() -> counter.incrementAndGet());
assertEquals(1, counter.get());
}
|
public Resource getMinimumCapacityInInterval(ReservationInterval interval) {
Resource minCapacity =
Resource.newInstance(Integer.MAX_VALUE, Integer.MAX_VALUE);
long start = interval.getStartTime();
long end = interval.getEndTime();
NavigableMap<Long, Resource> capacityRange =
getRangeOverlapping(start, end).getCumulative();
if (!capacityRange.isEmpty()) {
for (Map.Entry<Long, Resource> entry : capacityRange.entrySet()) {
if (entry.getValue() != null) {
minCapacity =
Resources.componentwiseMin(minCapacity, entry.getValue());
}
}
}
return minCapacity;
}
|
@Test
public void testGetMinimumCapacityInInterval() {
long[] timeSteps = { 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L };
int[] alloc = { 2, 5, 7, 10, 3, 4, 0, 8 };
RLESparseResourceAllocation rleSparseVector = ReservationSystemTestUtil
.generateRLESparseResourceAllocation(alloc, timeSteps);
LOG.info(rleSparseVector.toString());
Assert.assertEquals(rleSparseVector.getMinimumCapacityInInterval(
new ReservationInterval(1L, 3L)), Resource.newInstance(5, 5));
Assert.assertEquals(rleSparseVector.getMinimumCapacityInInterval(
new ReservationInterval(2L, 5L)), Resource.newInstance(3, 3));
Assert.assertEquals(rleSparseVector.getMinimumCapacityInInterval(
new ReservationInterval(1L, 7L)), Resource.newInstance(0, 0));
}
|
ObjectFactory loadObjectFactory() {
Class<? extends ObjectFactory> objectFactoryClass = options.getObjectFactoryClass();
ClassLoader classLoader = classLoaderSupplier.get();
ServiceLoader<ObjectFactory> loader = ServiceLoader.load(ObjectFactory.class, classLoader);
if (objectFactoryClass == null) {
return loadSingleObjectFactoryOrDefault(loader);
}
return loadSelectedObjectFactory(loader, objectFactoryClass);
}
|
@Test
void test_case_4() {
io.cucumber.core.backend.Options options = () -> null;
ObjectFactoryServiceLoader loader = new ObjectFactoryServiceLoader(
() -> new ServiceLoaderTestClassLoader(ObjectFactory.class,
DefaultObjectFactory.class,
OtherFactory.class),
options);
assertThat(loader.loadObjectFactory(), instanceOf(OtherFactory.class));
}
|
@Override
public void write(T record) {
recordConsumer.startMessage();
try {
messageWriter.writeTopLevelMessage(record);
} catch (RuntimeException e) {
Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record;
LOG.error("Cannot write message {}: {}", e.getMessage(), m);
throw e;
}
recordConsumer.endMessage();
}
|
@Test
public void testProto3WrappedMessageUnwrappedRoundTrip() throws Exception {
TestProto3.WrappedMessage.Builder msg = TestProto3.WrappedMessage.newBuilder();
msg.setWrappedDouble(DoubleValue.of(0.577));
msg.setWrappedFloat(FloatValue.of(3.1415f));
msg.setWrappedInt64(Int64Value.of(1_000_000_000L * 4));
msg.setWrappedUInt64(UInt64Value.of(1_000_000_000L * 9));
msg.setWrappedInt32(Int32Value.of(1_000_000 * 3));
msg.setWrappedUInt32(UInt32Value.of(1_000_000 * 8));
msg.setWrappedBool(BoolValue.of(true));
msg.setWrappedString(StringValue.of("Good Will Hunting"));
msg.setWrappedBytes(BytesValue.of(ByteString.copyFrom("someText", "UTF-8")));
// Write them out and read them back
Path tmpFilePath = TestUtils.someTemporaryFilePath();
ParquetWriter<MessageOrBuilder> writer = ProtoParquetWriter.<MessageOrBuilder>builder(tmpFilePath)
.withMessage(TestProto3.WrappedMessage.class)
.config(ProtoWriteSupport.PB_UNWRAP_PROTO_WRAPPERS, "true")
.build();
writer.write(msg);
writer.close();
List<TestProto3.WrappedMessage> gotBack = TestUtils.readMessages(tmpFilePath, TestProto3.WrappedMessage.class);
TestProto3.WrappedMessage gotBackFirst = gotBack.get(0);
assertEquals(0.577, gotBackFirst.getWrappedDouble().getValue(), 1e-5);
assertEquals(3.1415f, gotBackFirst.getWrappedFloat().getValue(), 1e-5f);
assertEquals(1_000_000_000L * 4, gotBackFirst.getWrappedInt64().getValue());
assertEquals(1_000_000_000L * 9, gotBackFirst.getWrappedUInt64().getValue());
assertEquals(1_000_000 * 3, gotBackFirst.getWrappedInt32().getValue());
assertEquals(1_000_000 * 8, gotBackFirst.getWrappedUInt32().getValue());
assertEquals(BoolValue.of(true), gotBackFirst.getWrappedBool());
assertEquals("Good Will Hunting", gotBackFirst.getWrappedString().getValue());
assertEquals(
ByteString.copyFrom("someText", "UTF-8"),
gotBackFirst.getWrappedBytes().getValue());
}
|
public ChannelFuture handshake(Channel channel, FullHttpRequest req) {
return handshake(channel, req, null, channel.newPromise());
}
|
@Test
public void testWebSocketServerHandshakeException() {
WebSocketServerHandshaker serverHandshaker = newHandshaker("ws://example.com/chat",
"chat", WebSocketDecoderConfig.DEFAULT);
FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET,
"ws://example.com/chat");
request.headers().set("x-client-header", "value");
try {
serverHandshaker.handshake(null, request, null, null);
} catch (WebSocketServerHandshakeException exception) {
assertNotNull(exception.getMessage());
assertEquals(request.headers(), exception.request().headers());
assertEquals(HttpMethod.GET, exception.request().method());
} finally {
request.release();
}
}
|
public static BuildInfo getBuildInfo() {
if (Overrides.isEnabled()) {
// never use cache when override is enabled -> we need to re-parse everything
Overrides overrides = Overrides.fromProperties();
return getBuildInfoInternalVersion(overrides);
}
return BUILD_INFO_CACHE;
}
|
@Test
public void testOverrideBuildVersion() {
System.setProperty(HAZELCAST_INTERNAL_OVERRIDE_VERSION, "99.99.99");
BuildInfo buildInfo = BuildInfoProvider.getBuildInfo();
assertEquals("99.99.99", buildInfo.getVersion());
System.clearProperty(HAZELCAST_INTERNAL_OVERRIDE_VERSION);
}
|
@Override
public ByteBuf slice() {
return newSharedLeakAwareByteBuf(super.slice());
}
|
@Test
public void testWrapSlice2() {
assertWrapped(newBuffer(8).slice(0, 1));
}
|
@Override
public double p(int k) {
if (k <= 0) {
return 0.0;
} else {
return Math.pow(1 - p, k - 1) * p;
}
}
|
@Test
public void testP() {
System.out.println("p");
ShiftedGeometricDistribution instance = new ShiftedGeometricDistribution(0.3);
instance.rand();
assertEquals(0.3, instance.p(1), 1E-6);
assertEquals(0.21, instance.p(2), 1E-6);
assertEquals(0.147, instance.p(3), 1E-6);
assertEquals(0.1029, instance.p(4), 1E-6);
assertEquals(0.07203, instance.p(5), 1E-6);
assertEquals(0.008474257, instance.p(11), 1E-6);
assertEquals(0.0002393768, instance.p(21), 1E-6);
}
|
public String process(final Expression expression) {
return formatExpression(expression);
}
|
@Test
public void shouldThrowOnTimestampTimeLEQ() {
// Given:
final ComparisonExpression compExp = new ComparisonExpression(
Type.LESS_THAN_OR_EQUAL,
TIMESTAMPCOL,
TIMECOL
);
// Then:
final Exception e = assertThrows(KsqlException.class, () -> sqlToJavaVisitor.process(compExp));
assertThat(e.getMessage(), is("Unexpected comparison to TIME: TIMESTAMP"));
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
public void testRoundTripSerdeWithV2TableMetadata() throws Exception {
String tableMetadataJson = readTableMetadataInputFile("TableMetadataV2Valid.json");
TableMetadata v2Metadata =
TableMetadataParser.fromJson(TEST_METADATA_LOCATION, tableMetadataJson);
// Convert the TableMetadata JSON from the file to an object and then back to JSON so that
// missing fields
// are filled in with their default values.
String json =
String.format(
"{\"metadata-location\":\"%s\",\"metadata\":%s,\"config\":{\"foo\":\"bar\"}}",
TEST_METADATA_LOCATION, TableMetadataParser.toJson(v2Metadata));
LoadTableResponse resp =
LoadTableResponse.builder().withTableMetadata(v2Metadata).addAllConfig(CONFIG).build();
assertRoundTripSerializesEquallyFrom(json, resp);
}
|
@Override
public RestResponse<KsqlEntityList> makeKsqlRequest(
final URI serverEndPoint,
final String sql,
final Map<String, ?> requestProperties) {
final KsqlTarget target = sharedClient
.target(serverEndPoint);
return getTarget(target)
.postKsqlRequest(sql, requestProperties, Optional.empty());
}
|
@Test
public void shouldSetAuthHeaderOnTarget() {
// When:
client.makeKsqlRequest(SERVER_ENDPOINT, "Sql", ImmutableMap.of());
// Then:
verify(target).authorizationHeader(AUTH_HEADER);
}
|
void handleLine(final String line) {
final String trimmedLine = Optional.ofNullable(line).orElse("").trim();
if (trimmedLine.isEmpty()) {
return;
}
handleStatements(trimmedLine);
}
|
@Test
public void shouldDescribeVariadicObjectAggregateFunction() {
final String expectedSummary =
"Name : OBJ_COL_ARG\n"
+ "Author : Confluent\n"
+ "Overview : Returns an array of rows where all the given columns are non-null.\n"
+ "Type : AGGREGATE\n"
+ "Jar : internal\n"
+ "Variations : \n";
final String expectedVariant =
"\tVariation : OBJ_COL_ARG(val1 INT, val2 ANY[])\n"
+ "\tReturns : ARRAY<INT>\n"
+ "\tDescription : Testing factory";
localCli.handleLine("describe function obj_col_arg;");
final String output = terminal.getOutputString();
assertThat(output, containsString(expectedSummary));
assertThat(output, containsString(expectedVariant));
}
|
static void checkValidCollectionName(String databaseName, String collectionName) {
String fullCollectionName = databaseName + "." + collectionName;
if (collectionName.length() < MIN_COLLECTION_NAME_LENGTH) {
throw new IllegalArgumentException("Collection name cannot be empty.");
}
if (fullCollectionName.length() > MAX_COLLECTION_NAME_LENGTH) {
throw new IllegalArgumentException(
"Collection name "
+ fullCollectionName
+ " cannot be longer than "
+ MAX_COLLECTION_NAME_LENGTH
+ " characters, including the database name and dot.");
}
if (ILLEGAL_COLLECTION_CHARS.matcher(collectionName).find()) {
throw new IllegalArgumentException(
"Collection name "
+ collectionName
+ " is not a valid name. Only letters, numbers, hyphens, underscores and exclamation points are allowed.");
}
if (collectionName.charAt(0) != '_' && !Character.isLetter(collectionName.charAt(0))) {
throw new IllegalArgumentException(
"Collection name " + collectionName + " must start with a letter or an underscore.");
}
String illegalKeyword = "system.";
if (collectionName.startsWith(illegalKeyword)) {
throw new IllegalArgumentException(
"Collection name "
+ collectionName
+ " cannot start with the prefix \""
+ illegalKeyword
+ "\".");
}
}
|
@Test
public void testCheckValidCollectionNameThrowsErrorWhenNameIsTooLong() {
assertThrows(
IllegalArgumentException.class,
() -> checkValidCollectionName(StringUtils.repeat("a", 1), StringUtils.repeat("b", 100)));
assertThrows(
IllegalArgumentException.class,
() -> checkValidCollectionName(StringUtils.repeat("a", 50), StringUtils.repeat("b", 50)));
assertThrows(
IllegalArgumentException.class,
() -> checkValidCollectionName(StringUtils.repeat("a", 100), StringUtils.repeat("b", 1)));
}
|
@Override
public void build(T instance) {
super.build(instance);
if (!StringUtils.isEmpty(version)) {
instance.setVersion(version);
}
if (!StringUtils.isEmpty(group)) {
instance.setGroup(group);
}
if (deprecated != null) {
instance.setDeprecated(deprecated);
}
if (delay != null) {
instance.setDelay(delay);
}
if (export != null) {
instance.setExport(export);
}
if (weight != null) {
instance.setWeight(weight);
}
if (!StringUtils.isEmpty(document)) {
instance.setDocument(document);
}
if (dynamic != null) {
instance.setDynamic(dynamic);
}
if (!StringUtils.isEmpty(token)) {
instance.setToken(token);
}
if (!StringUtils.isEmpty(accesslog)) {
instance.setAccesslog(accesslog);
}
if (protocols != null) {
instance.setProtocols(protocols);
}
if (!StringUtils.isEmpty(protocolIds)) {
instance.setProtocolIds(protocolIds);
}
if (executes != null) {
instance.setExecutes(executes);
}
if (register != null) {
instance.setRegister(register);
}
if (warmup != null) {
instance.setWarmup(warmup);
}
if (!StringUtils.isEmpty(serialization)) {
instance.setSerialization(serialization);
}
if (executor != null) {
instance.setExecutor(executor);
}
if (StringUtils.isNotBlank(preferSerialization)) {
instance.setPreferSerialization(preferSerialization);
}
}
|
@Test
void build() {
ProtocolConfig protocol = new ProtocolConfig();
ServiceBuilder builder = new ServiceBuilder();
builder.version("version")
.group("group")
.deprecated(true)
.delay(1000)
.export(false)
.weight(1)
.document("document")
.dynamic(true)
.token("token")
.accesslog("accesslog")
.addProtocol(protocol)
.protocolIds("protocolIds")
.tag("tag")
.executes(100)
.register(false)
.warmup(200)
.serialization("serialization")
.id("id");
ServiceConfig config = builder.build();
ServiceConfig config2 = builder.build();
Assertions.assertEquals("id", config.getId());
Assertions.assertEquals("version", config.getVersion());
Assertions.assertEquals("group", config.getGroup());
Assertions.assertEquals("document", config.getDocument());
Assertions.assertEquals("token", config.getToken());
Assertions.assertEquals("accesslog", config.getAccesslog());
Assertions.assertEquals("protocolIds", config.getProtocolIds());
Assertions.assertEquals("tag", config.getTag());
Assertions.assertEquals("serialization", config.getSerialization());
Assertions.assertTrue(config.isDeprecated());
Assertions.assertFalse(config.getExport());
Assertions.assertTrue(config.isDynamic());
Assertions.assertFalse(config.isRegister());
Assertions.assertEquals(1000, config.getDelay());
Assertions.assertEquals(1, config.getWeight());
Assertions.assertEquals(100, config.getExecutes());
Assertions.assertEquals(200, config.getWarmup());
Assertions.assertNotSame(config, config2);
}
|
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
try {
return serializer.serialize(topic, value == null ? null : value.toString());
} catch (SerializationException e) {
throw new DataException("Failed to serialize to a string: ", e);
}
}
|
@Test
public void testNullToBytes() {
assertNull(converter.fromConnectData(TOPIC, Schema.OPTIONAL_STRING_SCHEMA, null));
}
|
public static Builder builder(Credentials credentials) {
return new Builder(credentials);
}
|
@Test
public void testCreateWithCredentials() {
Credentials credentials = mock(Credentials.class);
ClassicTemplateClient.builder(credentials).build();
// Lack of exception is all we really can test
}
|
@Override
public BackgroundException map(final GenericException e) {
final StringBuilder buffer = new StringBuilder();
this.append(buffer, e.getMessage());
final StatusLine status = e.getHttpStatusLine();
if(null != status) {
this.append(buffer, String.format("%d %s", status.getStatusCode(), status.getReasonPhrase()));
}
switch(e.getHttpStatusCode()) {
case HttpStatus.SC_BAD_REQUEST:
return new LoginFailureException(buffer.toString(), e);
}
return new DefaultHttpResponseExceptionMappingService().map(new HttpResponseException(e.getHttpStatusCode(), buffer.toString()));
}
|
@Test
public void testMap() {
assertEquals("Message. 500 reason. Please contact your web hosting service provider for assistance.", new SwiftExceptionMappingService().map(
new GenericException("message", null, new StatusLine() {
@Override
public ProtocolVersion getProtocolVersion() {
throw new UnsupportedOperationException();
}
@Override
public int getStatusCode() {
return 500;
}
@Override
public String getReasonPhrase() {
return "reason";
}
})).getDetail());
}
|
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
}
|
@Test
public void should_clone_empty_collection() {
List<?> original = new ArrayList<>();
Object cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
}
|
public double[] shap(DataFrame data) {
// Binds the formula to the data frame's schema in case that
// it is different from that of training data.
formula.bind(data.schema());
return shap(data.stream().parallel());
}
|
@Test
public void testShap() {
MathEx.setSeed(19650218); // to get repeatable results.
GradientTreeBoost model = GradientTreeBoost.fit(Iris.formula, Iris.data, 100, 20, 6, 5, 0.05, 0.7);
String[] fields = model.schema().names();
double[] importance = model.importance();
double[] shap = model.shap(Iris.data);
System.out.println("----- importance -----");
for (int i = 0; i < importance.length; i++) {
System.out.format("%-15s %.4f%n", fields[i], importance[i]);
}
System.out.println("----- SHAP -----");
for (int i = 0; i < fields.length; i++) {
System.out.format("%-15s %.4f %.4f %.4f%n", fields[i], shap[2*i], shap[2*i+1], shap[2*i+2]);
}
}
|
@Override
public boolean equals(Object obj) {
if ( this == obj ) {
return true;
}
if ( obj == null ) {
return false;
}
if ( getClass() != obj.getClass() ) {
return false;
}
final SelectionParameters other = (SelectionParameters) obj;
if ( !equals( this.qualifiers, other.qualifiers ) ) {
return false;
}
if ( !Objects.equals( this.qualifyingNames, other.qualifyingNames ) ) {
return false;
}
if ( !Objects.equals( this.conditionQualifiers, other.conditionQualifiers ) ) {
return false;
}
if ( !Objects.equals( this.conditionQualifyingNames, other.conditionQualifyingNames ) ) {
return false;
}
if ( !Objects.equals( this.sourceRHS, other.sourceRHS ) ) {
return false;
}
return equals( this.resultType, other.resultType );
}
|
@Test
public void testEqualsSameInstance() {
List<String> qualifyingNames = Arrays.asList( "language", "german" );
TypeMirror resultType = new TestTypeMirror( "resultType" );
List<TypeMirror> qualifiers = new ArrayList<>();
qualifiers.add( new TestTypeMirror( "org.mapstruct.test.SomeType" ) );
qualifiers.add( new TestTypeMirror( "org.mapstruct.test.SomeOtherType" ) );
SelectionParameters params = new SelectionParameters( qualifiers, qualifyingNames, resultType, typeUtils );
assertThat( params.equals( params ) ).as( "Self equals" ).isTrue();
}
|
public int size() {
return versionWindow.length;
}
|
@Test
public void testSize() {
VersionTally instance = new VersionTally(TESTNET);
assertEquals(TESTNET.getMajorityWindow(), instance.size());
}
|
@Override
protected double maintain() {
List<Node> provisionedSnapshot;
try {
NodeList nodes;
// Host and child nodes are written in separate transactions, but both are written while holding the
// unallocated lock. Hold the unallocated lock while reading nodes to ensure we get all the children
// of newly provisioned hosts.
try (Mutex ignored = nodeRepository().nodes().lockUnallocated()) {
nodes = nodeRepository().nodes().list();
}
provisionedSnapshot = provision(nodes);
} catch (NodeAllocationException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts: " + e.getMessage());
return 0; // avoid removing excess hosts
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts", e);
return 0; // avoid removing excess hosts
}
return markForRemoval(provisionedSnapshot);
}
|
@Test
public void respects_exclusive_allocation() {
tester = new DynamicProvisioningTester(Cloud.builder().name(CloudName.AWS).dynamicProvisioning(true).allowHostSharing(false).build(), new MockNameResolver());
NodeResources resources1 = new NodeResources(24, 64, 100, 10);
setPreprovisionCapacityFlag(tester,
new ClusterCapacity(1, resources1.vcpu(), resources1.memoryGiB(), resources1.diskGb(),
resources1.bandwidthGbps(), resources1.diskSpeed().name(),
resources1.storageType().name(), resources1.architecture().name(),
"container"),
new ClusterCapacity(1, resources1.vcpu(), resources1.memoryGiB(), resources1.diskGb(),
resources1.bandwidthGbps(), resources1.diskSpeed().name(),
resources1.storageType().name(), resources1.architecture().name(),
null));
tester.maintain();
// Hosts are provisioned
assertEquals(2, tester.provisionedHostsMatching(resources1));
assertEquals(0, tester.hostProvisioner.deprovisionedHosts());
assertEquals(Optional.empty(), tester.nodeRepository.nodes().node("host100").flatMap(Node::exclusiveToApplicationId));
assertEquals(Optional.empty(), tester.nodeRepository.nodes().node("host101").flatMap(Node::exclusiveToApplicationId));
// Next maintenance run does nothing
tester.assertNodesUnchanged();
// One host is allocated exclusively to some other application
tester.nodeRepository.nodes().write(tester.nodeRepository.nodes().node("host100").get()
.withExclusiveToApplicationId(ApplicationId.from("t", "a", "i")),
() -> { });
tester.maintain();
// New hosts are provisioned, and the empty exclusive host is deallocated
assertEquals(2, tester.provisionedHostsMatching(resources1));
assertEquals(1, tester.hostProvisioner.deprovisionedHosts());
// Next maintenance run does nothing
tester.assertNodesUnchanged();
}
|
@Override
protected void init() throws ServiceException {
LOG.info("Using FileSystemAccess JARs version [{}]", VersionInfo.getVersion());
String security = getServiceConfig().get(AUTHENTICATION_TYPE, "simple").trim();
if (security.equals("kerberos")) {
String defaultName = getServer().getName();
String keytab = System.getProperty("user.home") + "/" + defaultName + ".keytab";
keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim();
if (keytab.length() == 0) {
throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_KEYTAB);
}
String principal = defaultName + "/localhost@LOCALHOST";
principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim();
if (principal.length() == 0) {
throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL);
}
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
try {
UserGroupInformation.loginUserFromKeytab(principal, keytab);
} catch (IOException ex) {
throw new ServiceException(FileSystemAccessException.ERROR.H02, ex.getMessage(), ex);
}
LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
} else if (security.equals("simple")) {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
UserGroupInformation.setConfiguration(conf);
LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
} else {
throw new ServiceException(FileSystemAccessException.ERROR.H09, security);
}
String hadoopConfDirProp = getServiceConfig().get(HADOOP_CONF_DIR, getServer().getConfigDir());
File hadoopConfDir = new File(hadoopConfDirProp).getAbsoluteFile();
if (!hadoopConfDir.exists()) {
hadoopConfDir = new File(getServer().getConfigDir()).getAbsoluteFile();
}
if (!hadoopConfDir.exists()) {
throw new ServiceException(FileSystemAccessException.ERROR.H10, hadoopConfDir);
}
try {
serviceHadoopConf = loadHadoopConf(hadoopConfDir);
fileSystemConf = getNewFileSystemConfiguration();
} catch (IOException ex) {
throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex);
}
if (LOG.isDebugEnabled()) {
LOG.debug("FileSystemAccess FileSystem configuration:");
for (Map.Entry entry : serviceHadoopConf) {
LOG.debug(" {} = {}", entry.getKey(), entry.getValue());
}
}
setRequiredServiceHadoopConf(serviceHadoopConf);
nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST));
}
|
@Test
@TestDir
public void serviceHadoopConfCustomDir() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String hadoopConfDir = new File(dir, "confx").getAbsolutePath();
new File(hadoopConfDir).mkdirs();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.config.dir", hadoopConfDir);
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
OutputStream os = new FileOutputStream(hdfsSite);
Configuration hadoopConf = new Configuration(false);
hadoopConf.set("foo", "BAR");
hadoopConf.writeXml(os);
os.close();
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR");
server.destroy();
}
|
@ExecuteOn(TaskExecutors.IO)
@Get
@Operation(tags = {"KV"}, summary = "List all keys for a namespace")
public List<KVEntry> list(
@Parameter(description = "The namespace id") @PathVariable String namespace
) throws IOException, URISyntaxException {
return kvStore(namespace).list();
}
|
@SuppressWarnings("unchecked")
@Test
void list() throws IOException {
Instant before = Instant.now().minusMillis(100);
Instant myKeyExpirationDate = Instant.now().plus(Duration.ofMinutes(5)).truncatedTo(ChronoUnit.MILLIS);
Instant mySecondKeyExpirationDate = Instant.now().plus(Duration.ofMinutes(10)).truncatedTo(ChronoUnit.MILLIS);
storageInterface.put(null, toKVUri(NAMESPACE, "my-key"), new StorageObject(Map.of("expirationDate", myKeyExpirationDate.toString()), new ByteArrayInputStream("my-value".getBytes())));
storageInterface.put(null, toKVUri(NAMESPACE, "my-second-key"), new StorageObject(Map.of("expirationDate", mySecondKeyExpirationDate.toString()), new ByteArrayInputStream("my-second-value".getBytes())));
Instant after = Instant.now().plusMillis(100);
List<KVEntry> res = client.toBlocking().retrieve(HttpRequest.GET("/api/v1/namespaces/" + NAMESPACE + "/kv"), Argument.of(List.class, KVEntry.class));
res.stream().forEach(entry -> {
assertThat(entry.creationDate().isAfter(before) && entry.creationDate().isBefore(after), is(true));
assertThat(entry.updateDate().isAfter(before) && entry.updateDate().isBefore(after), is(true));
});
assertThat(res.stream().filter(entry -> entry.key().equals("my-key")).findFirst().get().expirationDate(), is(myKeyExpirationDate));
assertThat(res.stream().filter(entry -> entry.key().equals("my-second-key")).findFirst().get().expirationDate(), is(mySecondKeyExpirationDate));
}
|
public static ErrorProneOptions processArgs(Iterable<String> args) {
Preconditions.checkNotNull(args);
ImmutableList.Builder<String> remainingArgs = ImmutableList.builder();
/* By default, we throw an error when an unknown option is passed in, if for example you
* try to disable a check that doesn't match any of the known checks. This catches typos from
* the command line.
*
* You can pass the IGNORE_UNKNOWN_CHECKS_FLAG to opt-out of that checking. This allows you to
* use command lines from different versions of error-prone interchangeably.
*/
boolean patchLocationSet = false;
boolean patchCheckSet = false;
Builder builder = new Builder();
for (String arg : args) {
switch (arg) {
case IGNORE_SUPPRESSION_ANNOTATIONS:
builder.setIgnoreSuppressionAnnotations(true);
break;
case IGNORE_UNKNOWN_CHECKS_FLAG:
builder.setIgnoreUnknownChecks(true);
break;
case DISABLE_WARNINGS_IN_GENERATED_CODE_FLAG:
builder.setDisableWarningsInGeneratedCode(true);
break;
case ERRORS_AS_WARNINGS_FLAG:
builder.setDropErrorsToWarnings(true);
break;
case SUGGESTIONS_AS_WARNINGS_FLAG:
builder.setSuggestionsAsWarnings(true);
break;
case ENABLE_ALL_CHECKS:
builder.setEnableAllChecksAsWarnings(true);
break;
case DISABLE_ALL_CHECKS:
builder.setDisableAllChecks(true);
break;
case COMPILING_TEST_ONLY_CODE:
builder.setTestOnlyTarget(true);
break;
case COMPILING_PUBLICLY_VISIBLE_CODE:
builder.setPubliclyVisibleTarget(true);
break;
case DISABLE_ALL_WARNINGS:
builder.setDisableAllWarnings(true);
break;
default:
if (arg.startsWith(SEVERITY_PREFIX)) {
builder.parseSeverity(arg);
} else if (arg.startsWith(ErrorProneFlags.PREFIX)) {
builder.parseFlag(arg);
} else if (arg.startsWith(PATCH_OUTPUT_LOCATION)) {
patchLocationSet = true;
String remaining = arg.substring(PATCH_OUTPUT_LOCATION.length());
if (remaining.equals("IN_PLACE")) {
builder.patchingOptionsBuilder().inPlace(true);
} else {
if (remaining.isEmpty()) {
throw new InvalidCommandLineOptionException("invalid flag: " + arg);
}
builder.patchingOptionsBuilder().baseDirectory(remaining);
}
} else if (arg.startsWith(PATCH_CHECKS_PREFIX)) {
patchCheckSet = true;
String remaining = arg.substring(PATCH_CHECKS_PREFIX.length());
if (remaining.startsWith("refaster:")) {
// Refaster rule, load from InputStream at file
builder
.patchingOptionsBuilder()
.customRefactorer(
() -> {
String path = remaining.substring("refaster:".length());
try (InputStream in =
Files.newInputStream(FileSystems.getDefault().getPath(path));
ObjectInputStream ois = new ObjectInputStream(in)) {
return (CodeTransformer) ois.readObject();
} catch (IOException | ClassNotFoundException e) {
throw new RuntimeException("Can't load Refaster rule from " + path, e);
}
});
} else {
Iterable<String> checks =
Splitter.on(',').trimResults().omitEmptyStrings().split(remaining);
builder.patchingOptionsBuilder().namedCheckers(ImmutableSet.copyOf(checks));
}
} else if (arg.startsWith(PATCH_IMPORT_ORDER_PREFIX)) {
String remaining = arg.substring(PATCH_IMPORT_ORDER_PREFIX.length());
ImportOrganizer importOrganizer = ImportOrderParser.getImportOrganizer(remaining);
builder.patchingOptionsBuilder().importOrganizer(importOrganizer);
} else if (arg.startsWith(EXCLUDED_PATHS_PREFIX)) {
String pathRegex = arg.substring(EXCLUDED_PATHS_PREFIX.length());
builder.setExcludedPattern(Pattern.compile(pathRegex));
} else {
if (arg.startsWith(PREFIX)) {
throw new InvalidCommandLineOptionException("invalid flag: " + arg);
}
remainingArgs.add(arg);
}
}
}
if (patchCheckSet && !patchLocationSet) {
throw new InvalidCommandLineOptionException(
"-XepPatchLocation must be specified when -XepPatchChecks is");
}
return builder.build(remainingArgs.build());
}
|
@Test
public void malformedOptionThrowsProperException() {
List<String> badArgs =
Arrays.asList(
"-Xep:Foo:WARN:jfkdlsdf", // too many parts
"-Xep:", // no check name
"-Xep:Foo:FJDKFJSD"); // nonexistent severity level
badArgs.forEach(
arg -> {
InvalidCommandLineOptionException expected =
assertThrows(
InvalidCommandLineOptionException.class,
() -> ErrorProneOptions.processArgs(Arrays.asList(arg)));
assertThat(expected).hasMessageThat().contains("invalid flag");
});
}
|
public final StringSubject hasMessageThat() {
StandardSubjectBuilder check = check("getMessage()");
if (actual instanceof ErrorWithFacts && ((ErrorWithFacts) actual).facts().size() > 1) {
check =
check.withMessage(
"(Note from Truth: When possible, instead of asserting on the full message, assert"
+ " about individual facts by using ExpectFailure.assertThat.)");
}
return check.that(checkNotNull(actual).getMessage());
}
|
@Test
public void hasMessageThat_null() {
assertThat(new NullPointerException()).hasMessageThat().isNull();
assertThat(new NullPointerException(null)).hasMessageThat().isNull();
}
|
@Override
public String arguments() {
ArrayList<String> args = new ArrayList<>();
if (buildFile != null) {
args.add("-f \"" + FilenameUtils.separatorsToUnix(buildFile) + "\"");
}
if (target != null) {
args.add(target);
}
return StringUtils.join(args, " ");
}
|
@Test
public void shouldNotSetTargetOnBuilderWhenNotSet() {
assertThat(antTask.arguments(), is(""));
}
|
@Override
public DeserializationHandlerResponse handle(
final ProcessorContext context,
final ConsumerRecord<byte[], byte[]> record,
final Exception exception
) {
log.debug(
String.format("Exception caught during Deserialization, "
+ "taskId: %s, topic: %s, partition: %d, offset: %d",
context.taskId(), record.topic(), record.partition(), record.offset()),
exception
);
streamsErrorCollector.recordError(record.topic());
if (isCausedByAuthorizationError(exception)) {
log.info(
String.format(
"Authorization error when attempting to access the schema during deserialization. "
+ "taskId: %s, topic: %s, partition: %d, offset: %d",
context.taskId(), record.topic(), record.partition(), record.offset()));
return DeserializationHandlerResponse.FAIL;
}
return DeserializationHandlerResponse.CONTINUE;
}
|
@Test
public void shouldReturnContinueForRegularExceptions() {
assertThat(exceptionHandler.handle(context, record, mock(Exception.class)),
equalTo(DeserializationHandlerResponse.CONTINUE));
}
|
@Override
public void validate() throws TelegramApiValidationException {
if (inlineQueryId.isEmpty()) {
throw new TelegramApiValidationException("InlineQueryId can't be empty", this);
}
for (InlineQueryResult result : results) {
result.validate();
}
if (button != null) {
button.validate();
}
}
|
@Test
void testResultsMustBePresent() {
answerInlineQuery.setInlineQueryId("RANDOMEID");
try {
answerInlineQuery.validate();
} catch (TelegramApiValidationException e) {
assertEquals("Results array can't be null", e.getMessage());
}
}
|
public static <K, V extends OrderedSPI<?>> Map<K, V> getServices(final Class<V> serviceInterface, final Collection<K> types) {
return getServices(serviceInterface, types, Comparator.naturalOrder());
}
|
@Test
void assertGetServicesFromCache() {
OrderedInterfaceFixture key = new OrderedInterfaceFixtureImpl();
assertThat(OrderedSPILoader.getServices(OrderedSPIFixture.class, Collections.singleton(key)),
is(OrderedSPILoader.getServices(OrderedSPIFixture.class, Collections.singleton(key))));
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
if(new DefaultPathContainerService().isContainer(file)) {
return PathAttributes.EMPTY;
}
final Path query;
if(file.isPlaceholder()) {
query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes());
}
else {
query = file;
}
final AttributedList<Path> list;
if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) {
list = new DriveTeamDrivesListService(session, fileid).list(file.getParent(), listener);
}
else {
list = new FileidDriveListService(session, fileid, query).list(file.getParent(), listener);
}
final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(session.getCaseSensitivity(), file));
if(null == found) {
throw new NotfoundException(file.getAbsolute());
}
return found.attributes();
}
|
@Test
public void testFindMyDrive() throws Exception {
final DriveAttributesFinderFeature f = new DriveAttributesFinderFeature(session, new DriveFileIdProvider(session));
assertEquals(PathAttributes.EMPTY, f.find(DriveHomeFinderService.MYDRIVE_FOLDER));
}
|
@Override
public GetApplicationAttemptReportResponse getApplicationAttemptReport(
GetApplicationAttemptReportRequest request)
throws YarnException, IOException {
if (request == null || request.getApplicationAttemptId() == null
|| request.getApplicationAttemptId().getApplicationId() == null) {
routerMetrics.incrAppAttemptReportFailedRetrieved();
String msg = "Missing getApplicationAttemptReport request or applicationId " +
"or applicationAttemptId information.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_APPLICATION_ATTEMPT_REPORT, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, null);
}
long startTime = clock.getTime();
SubClusterId subClusterId = null;
ApplicationId applicationId = request.getApplicationAttemptId().getApplicationId();
try {
subClusterId = getApplicationHomeSubCluster(applicationId);
} catch (YarnException e) {
routerMetrics.incrAppAttemptReportFailedRetrieved();
String msgFormat = "ApplicationAttempt %s belongs to " +
"Application %s does not exist in FederationStateStore.";
ApplicationAttemptId applicationAttemptId = request.getApplicationAttemptId();
RouterAuditLogger.logFailure(user.getShortUserName(), GET_APPLICATION_ATTEMPT_REPORT, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msgFormat, applicationAttemptId, applicationId);
RouterServerUtil.logAndThrowException(e, msgFormat, applicationAttemptId, applicationId);
}
ApplicationClientProtocol clientRMProxy =
getClientRMProxyForSubCluster(subClusterId);
GetApplicationAttemptReportResponse response = null;
try {
response = clientRMProxy.getApplicationAttemptReport(request);
} catch (Exception e) {
routerMetrics.incrAppAttemptReportFailedRetrieved();
String msg = String.format(
"Unable to get the applicationAttempt report for %s to SubCluster %s.",
request.getApplicationAttemptId(), subClusterId.getId());
RouterAuditLogger.logFailure(user.getShortUserName(), GET_APPLICATION_ATTEMPT_REPORT, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, e);
}
if (response == null) {
LOG.error("No response when attempting to retrieve the report of "
+ "the applicationAttempt {} to SubCluster {}.",
request.getApplicationAttemptId(), subClusterId.getId());
}
long stopTime = clock.getTime();
routerMetrics.succeededAppAttemptReportRetrieved(stopTime - startTime);
RouterAuditLogger.logSuccess(user.getShortUserName(), GET_APPLICATION_ATTEMPT_REPORT,
TARGET_CLIENT_RM_SERVICE);
return response;
}
|
@Test
public void testGetApplicationAttemptEmptyRequest()
throws Exception {
LOG.info("Test FederationClientInterceptor: Get ApplicationAttempt Report - Empty.");
// null request1
LambdaTestUtils.intercept(YarnException.class,
"Missing getApplicationAttemptReport request or applicationId " +
"or applicationAttemptId information.",
() -> interceptor.getApplicationAttemptReport(null));
// null request2
LambdaTestUtils.intercept(YarnException.class,
"Missing getApplicationAttemptReport request or applicationId " +
"or applicationAttemptId information.",
() -> interceptor.getApplicationAttemptReport(
GetApplicationAttemptReportRequest.newInstance(null)));
// null request3
LambdaTestUtils.intercept(YarnException.class,
"Missing getApplicationAttemptReport request or applicationId " +
"or applicationAttemptId information.",
() -> interceptor.getApplicationAttemptReport(
GetApplicationAttemptReportRequest.newInstance(
ApplicationAttemptId.newInstance(null, 1))));
}
|
public CompletableFuture<Result> getTerminationFuture() {
return terminationFuture;
}
|
@Test
void testShouldShutdownIfRegistrationWithJobManagerFails() throws Exception {
Configuration configuration = createConfiguration();
configuration.set(
TaskManagerOptions.REGISTRATION_TIMEOUT, TimeUtils.parseDuration("10 ms"));
taskManagerRunner = createTaskManagerRunner(configuration);
assertThatFuture(taskManagerRunner.getTerminationFuture())
.eventuallySucceeds()
.isEqualTo(TaskManagerRunner.Result.FAILURE);
}
|
public String getJsonPath() {
return getPropertyAsString(JSONPATH);
}
|
@Test
void testGetJsonPath() {
JSONPathAssertion instance = new JSONPathAssertion();
String expResult = "";
String result = instance.getJsonPath();
assertEquals(expResult, result);
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldChooseSpecificOverVarArgsInMiddle() {
// Given:
givenFunctions(
function(EXPECTED, -1, INT, INT, STRING, STRING, STRING, STRING, INT),
function(OTHER, 2, INT, INT, STRING_VARARGS, STRING, STRING, STRING, INT)
);
// When:
final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(
SqlArgument.of(SqlTypes.INTEGER), SqlArgument.of(SqlTypes.INTEGER),
SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING),
SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING),
SqlArgument.of(SqlTypes.INTEGER)
));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
}
|
@Override
public String toString() {
return partitionMaps.entrySet().stream()
.flatMap(this::toStrings)
.collect(Collectors.joining(", ", "{", "}"));
}
|
@Test
public void testToString() {
PartitionMap<String> map = PartitionMap.create(SPECS);
// empty map
assertThat(map.toString()).isEqualTo("{}");
// single entry
map.put(BY_DATA_SPEC.specId(), Row.of("aaa"), "v1");
assertThat(map.toString()).isEqualTo("{data=aaa -> v1}");
// multiple entries
map.put(BY_DATA_SPEC.specId(), CustomRow.of("bbb"), "v2");
map.put(BY_DATA_CATEGORY_BUCKET_SPEC.specId(), Row.of("ccc", 2), "v3");
assertThat(map.toString())
.contains("data=aaa -> v1")
.contains("data=bbb -> v2")
.contains("data=ccc/category_bucket=2 -> v3");
}
|
@VisibleForTesting
List<Image> getCachedBaseImages()
throws IOException, CacheCorruptedException, BadContainerConfigurationFormatException,
LayerCountMismatchException, UnlistedPlatformInManifestListException,
PlatformNotFoundInBaseImageException {
ImageReference baseImage = buildContext.getBaseImageConfiguration().getImage();
Optional<ImageMetadataTemplate> metadata =
buildContext.getBaseImageLayersCache().retrieveMetadata(baseImage);
if (!metadata.isPresent()) {
return Collections.emptyList();
}
ManifestTemplate manifestList = metadata.get().getManifestList();
List<ManifestAndConfigTemplate> manifestsAndConfigs = metadata.get().getManifestsAndConfigs();
if (manifestList == null) {
Verify.verify(manifestsAndConfigs.size() == 1);
ManifestAndConfigTemplate manifestAndConfig = manifestsAndConfigs.get(0);
Optional<Image> cachedImage = getBaseImageIfAllLayersCached(manifestAndConfig, true);
if (!cachedImage.isPresent()) {
return Collections.emptyList();
}
return Collections.singletonList(cachedImage.get());
}
// Manifest list cached. Identify matching platforms and check if all of them are cached.
ImmutableList.Builder<Image> images = ImmutableList.builder();
for (Platform platform : buildContext.getContainerConfiguration().getPlatforms()) {
String manifestDigest =
lookUpPlatformSpecificImageManifest((ManifestListTemplate) manifestList, platform);
Optional<ManifestAndConfigTemplate> manifestAndConfigFound =
manifestsAndConfigs.stream()
.filter(entry -> manifestDigest.equals(entry.getManifestDigest()))
.findFirst();
if (!manifestAndConfigFound.isPresent()) {
return Collections.emptyList();
}
Optional<Image> cachedImage =
getBaseImageIfAllLayersCached(manifestAndConfigFound.get(), false);
if (!cachedImage.isPresent()) {
return Collections.emptyList();
}
images.add(cachedImage.get());
}
return images.build();
}
|
@Test
public void testGetCachedBaseImages_manifestListCached_partialMatches()
throws InvalidImageReferenceException, IOException, CacheCorruptedException,
UnlistedPlatformInManifestListException, BadContainerConfigurationFormatException,
LayerCountMismatchException, PlatformNotFoundInBaseImageException {
ImageReference imageReference = ImageReference.parse("cat");
Mockito.when(buildContext.getBaseImageConfiguration())
.thenReturn(ImageConfiguration.builder(imageReference).build());
ManifestListTemplate manifestList = Mockito.mock(ManifestListTemplate.class);
Mockito.when(manifestList.getDigestsForPlatform("arch1", "os1"))
.thenReturn(Arrays.asList("sha256:digest1"));
Mockito.when(manifestList.getDigestsForPlatform("arch2", "os2"))
.thenReturn(Arrays.asList("sha256:digest2"));
ImageMetadataTemplate imageMetadata =
new ImageMetadataTemplate(
manifestList,
Arrays.asList(
new ManifestAndConfigTemplate(
Mockito.mock(BuildableManifestTemplate.class),
new ContainerConfigurationTemplate(),
"sha256:digest1")));
Mockito.when(cache.retrieveMetadata(imageReference)).thenReturn(Optional.of(imageMetadata));
Mockito.when(
cache.areAllLayersCached(imageMetadata.getManifestsAndConfigs().get(0).getManifest()))
.thenReturn(true);
Mockito.when(containerConfig.getPlatforms())
.thenReturn(ImmutableSet.of(new Platform("arch1", "os1"), new Platform("arch2", "os2")));
Assert.assertEquals(Arrays.asList(), pullBaseImageStep.getCachedBaseImages());
}
|
@Override
public int compare(String version1, String version2) {
if(ObjectUtil.equal(version1, version2)) {
return 0;
}
if (version1 == null && version2 == null) {
return 0;
} else if (version1 == null) {// null或""视为最小版本,排在前
return -1;
} else if (version2 == null) {
return 1;
}
return CompareUtil.compare(Version.of(version1), Version.of(version2));
}
|
@Test
public void versionComparatorTest4() {
int compare = VersionComparator.INSTANCE.compare("1.13.0", "1.12.1c");
assertTrue(compare > 0);
// 自反测试
compare = VersionComparator.INSTANCE.compare("1.12.1c", "1.13.0");
assertTrue(compare < 0);
}
|
public static Metric metric(String name) {
return MetricsImpl.metric(name, Unit.COUNT);
}
|
@Test
public void unusedMetrics() {
pipeline.readFrom(TestSources.items(0L, 1L, 2L, 3L, 4L))
.filter(l -> {
boolean pass = l % 2 == 0;
if (!pass) {
Metrics.metric("dropped"); //retrieve "dropped" counter, but never use it
}
//not even retrieve "total" counter
return pass;
})
.writeTo(Sinks.noop());
Job job = runPipeline(pipeline.toDag());
JobMetricsChecker checker = new JobMetricsChecker(job);
checker.assertSummedMetricValue("dropped", 0);
checker.assertNoMetricValues("total");
}
|
public static <T> SVM<T> fit(T[] x, MercerKernel<T> kernel) {
return fit(x, kernel, 0.5, 1E-3);
}
|
@Test
public void testSixClusters() throws Exception {
System.out.println("Six clusters");
CSVFormat format = CSVFormat.Builder.create().setDelimiter(' ').build();
double[][] data = Read.csv(Paths.getTestData("clustering/rem.txt"), format).toArray();
SVM<double[]> model = SVM.fit(data, new GaussianKernel(1.0), 0.2, 1E-3);
double[] x = new double[201];
double[] y = new double[201];
for (int i = 0; i < x.length; i++) {
x[i] = -5 + i * 0.1;
y[i] = -5 + i * 0.1;
}
double[][] grid = new double[201][201];
for (int i = 0; i < grid.length; i++) {
for (int j = 0; j < grid[i].length; j++) {
double[] point = {-5 + i * 0.1, -5 + j * 0.1};
grid[j][i] = model.score(point);
}
}
// ScatterPlot.of(data).canvas().window();
// Heatmap.of(x, y, grid).canvas().window();
java.nio.file.Path temp = Write.object(model);
Read.object(temp);
}
|
public void addForwardedField(int input, int sourceField, int targetField) {
Map<Integer, FieldSet> fieldMapping;
if (input != 0 && input != 1) {
throw new IndexOutOfBoundsException();
} else if (input == 0) {
fieldMapping = this.fieldMapping1;
} else {
fieldMapping = this.fieldMapping2;
}
if (isTargetFieldPresent(targetField, fieldMapping)) {
throw new InvalidSemanticAnnotationException(
"Target field " + targetField + " was added twice to input " + input);
}
FieldSet targetFields = fieldMapping.get(sourceField);
if (targetFields != null) {
fieldMapping.put(sourceField, targetFields.addField(targetField));
} else {
fieldMapping.put(sourceField, new FieldSet(targetField));
}
}
|
@Test
void testAddForwardedFieldsTargetTwice1() {
assertThatThrownBy(
() -> {
DualInputSemanticProperties sp = new DualInputSemanticProperties();
sp.addForwardedField(0, 0, 2);
sp.addForwardedField(0, 1, 2);
})
.isInstanceOf(SemanticProperties.InvalidSemanticAnnotationException.class);
}
|
public static GenericRecord rewriteRecord(GenericRecord oldRecord, Schema newSchema) {
GenericRecord newRecord = new GenericData.Record(newSchema);
boolean isSpecificRecord = oldRecord instanceof SpecificRecordBase;
for (Schema.Field f : newSchema.getFields()) {
if (!(isSpecificRecord && isMetadataField(f.name()))) {
copyOldValueOrSetDefault(oldRecord, newRecord, f);
}
}
return newRecord;
}
|
@Test
public void testMetadataField() {
GenericRecord rec = new GenericData.Record(new Schema.Parser().parse(EXAMPLE_SCHEMA));
rec.put("_row_key", "key1");
rec.put("non_pii_col", "val1");
rec.put("pii_col", "val2");
rec.put("timestamp", 3.5);
GenericRecord rec1 = HoodieAvroUtils.rewriteRecord(rec, new Schema.Parser().parse(SCHEMA_WITH_METADATA_FIELD));
assertNull(rec1.get("_hoodie_commit_time"));
assertNull(rec1.get("nullable_field"));
assertNull(rec1.get("nullable_field_wo_default"));
}
|
public static String cleanInvalid(String fileName) {
return StrUtil.isBlank(fileName) ? fileName : ReUtil.delAll(FILE_NAME_INVALID_PATTERN_WIN, fileName);
}
|
@Test
public void cleanInvalidTest(){
String name = FileNameUtil.cleanInvalid("1\n2\n");
assertEquals("12", name);
name = FileNameUtil.cleanInvalid("\r1\r\n2\n");
assertEquals("12", name);
}
|
@Override
public String toString() {
return "Route{" +
"customId='" + customId + '\'' +
", exchangesTotal=" + exchangesTotal +
", id='" + id + '\'' +
", totalProcessingTime=" + totalProcessingTime +
", components=" + components +
", componentsMap=" + componentsMap +
'}';
}
|
@Test
public void testToString() {
String toString = getInstance().toString();
assertNotNull(toString);
assertTrue(toString.contains("Route"));
}
|
@Override
public MastershipRole getRole(DeviceId deviceId) {
checkNotNull(deviceId, DEVICE_NULL);
// TODO hard coded to master for now.
return MastershipRole.MASTER;
}
|
@Test(expected = NullPointerException.class)
public void testGetRoleByNullId() {
manager.registerTenantId(TenantId.tenantId(tenantIdValue1));
VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1));
DeviceService deviceService = manager.get(virtualNetwork.id(), DeviceService.class);
// test the getRole() method using a null device identifier
deviceService.getRole(null);
}
|
public static String stripTrailingSlash(String path) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(path), "path must not be null or empty");
String result = path;
while (!result.endsWith("://") && result.endsWith("/")) {
result = result.substring(0, result.length() - 1);
}
return result;
}
|
@Test
void testStripTrailingSlashForRootPathWithTrailingSlash() {
String rootPath = "blobstore://";
String rootPathWithTrailingSlash = rootPath + "/";
assertThat(LocationUtil.stripTrailingSlash(rootPathWithTrailingSlash))
.as("Should be root path")
.isEqualTo(rootPath);
}
|
@Description("Returns the Geometry value that represents the point set difference of two geometries")
@ScalarFunction("ST_Difference")
@SqlType(GEOMETRY_TYPE_NAME)
public static Slice stDifference(@SqlType(GEOMETRY_TYPE_NAME) Slice left, @SqlType(GEOMETRY_TYPE_NAME) Slice right)
{
OGCGeometry leftGeometry = EsriGeometrySerde.deserialize(left);
OGCGeometry rightGeometry = EsriGeometrySerde.deserialize(right);
verifySameSpatialReference(leftGeometry, rightGeometry);
return EsriGeometrySerde.serialize(leftGeometry.difference(rightGeometry));
}
|
@Test
public void testSTDifference()
{
assertFunction("ST_AsText(ST_Difference(ST_GeometryFromText('POINT (50 100)'), ST_GeometryFromText('POINT (150 150)')))", VARCHAR, "POINT (50 100)");
assertFunction("ST_AsText(ST_Difference(ST_GeometryFromText('MULTIPOINT (50 100, 50 200)'), ST_GeometryFromText('POINT (50 100)')))", VARCHAR, "POINT (50 200)");
assertFunction("ST_AsText(ST_Difference(ST_GeometryFromText('LINESTRING (50 100, 50 200)'), ST_GeometryFromText('LINESTRING (50 50, 50 150)')))", VARCHAR, "LINESTRING (50 150, 50 200)");
assertFunction("ST_AsText(ST_Difference(ST_GeometryFromText('MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))'), ST_GeometryFromText('MULTILINESTRING ((2 1, 4 1), (3 3, 7 3))')))", VARCHAR, "MULTILINESTRING ((1 1, 2 1), (4 1, 5 1), (2 4, 4 4))");
assertFunction("ST_AsText(ST_Difference(ST_GeometryFromText('POLYGON ((1 1, 1 4, 4 4, 4 1, 1 1))'), ST_GeometryFromText('POLYGON ((2 2, 2 5, 5 5, 5 2, 2 2))')))", VARCHAR, "POLYGON ((1 1, 1 4, 2 4, 2 2, 4 2, 4 1, 1 1))");
assertFunction("ST_AsText(ST_Difference(ST_GeometryFromText('MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((0 0, 0 2, 2 2, 2 0, 0 0)))'), ST_GeometryFromText('POLYGON ((0 1, 3 1, 3 3, 0 3, 0 1))')))", VARCHAR, "POLYGON ((1 1, 2 1, 2 0, 0 0, 0 1, 1 1))");
}
|
@Override
public void onMsg(TbContext ctx, TbMsg msg) throws ExecutionException, InterruptedException, TbNodeException {
var msgDataAsJsonNode = TbMsgSource.DATA.equals(fetchTo) ? getMsgDataAsObjectNode(msg) : null;
processFieldsData(ctx, msg, msg.getOriginator(), msgDataAsJsonNode, config.isIgnoreNullStrings());
}
|
@Test
public void givenUnsupportedEntityType_whenOnMsg_thenShouldTellFailureWithSameMsg() throws TbNodeException, ExecutionException, InterruptedException {
// GIVEN
config.setDataMapping(Map.of(
"name", "originatorName",
"type", "originatorType",
"label", "originatorLabel"));
config.setIgnoreNullStrings(false);
config.setFetchTo(TbMsgSource.METADATA);
node.config = config;
node.fetchTo = TbMsgSource.METADATA;
var msgMetaData = new TbMsgMetaData(Map.of(
"testKey1", "testValue1",
"testKey2", "123"));
var msgData = "[\"value1\",\"value2\"]";
msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, new DashboardId(UUID.randomUUID()), msgMetaData, msgData);
when(ctxMock.getDbCallbackExecutor()).thenReturn(DB_EXECUTOR);
// WHEN
node.onMsg(ctxMock, msg);
// THEN
var actualMessageCaptor = ArgumentCaptor.forClass(TbMsg.class);
verify(ctxMock, times(1)).tellFailure(actualMessageCaptor.capture(), any());
verify(ctxMock, never()).tellSuccess(any());
assertThat(actualMessageCaptor.getValue().getData()).isEqualTo(msgData);
assertThat(actualMessageCaptor.getValue().getMetaData()).isEqualTo(msgMetaData);
}
|
@Override
public CloseableIterator<T> iterator() {
ParallelIterator<T> iter =
new ParallelIterator<>(iterables, workerPool, approximateMaxQueueSize);
addCloseable(iter);
return iter;
}
|
@Test
public void closeMoreDataParallelIteratorWithoutCompleteIteration() {
ExecutorService executor = Executors.newFixedThreadPool(1);
Iterator<Integer> integerIterator =
new Iterator<Integer>() {
private int number = 1;
@Override
public boolean hasNext() {
if (number > 1000) {
return false;
}
number++;
return true;
}
@Override
public Integer next() {
try {
// sleep to control number generate rate
Thread.sleep(10);
} catch (InterruptedException e) {
// Sleep interrupted, we ignore it!
}
return number;
}
};
Iterable<CloseableIterable<Integer>> transform =
Iterables.transform(
Lists.newArrayList(1),
item ->
new CloseableIterable<Integer>() {
@Override
public void close() {}
@Override
public CloseableIterator<Integer> iterator() {
return CloseableIterator.withClose(integerIterator);
}
});
ParallelIterable<Integer> parallelIterable = new ParallelIterable<>(transform, executor);
ParallelIterator<Integer> iterator = (ParallelIterator<Integer>) parallelIterable.iterator();
assertThat(iterator.hasNext()).isTrue();
assertThat(iterator.next()).isNotNull();
Awaitility.await("Queue is populated")
.atMost(5, TimeUnit.SECONDS)
.untilAsserted(() -> queueHasElements(iterator));
iterator.close();
Awaitility.await("Queue is cleared")
.atMost(5, TimeUnit.SECONDS)
.untilAsserted(
() ->
assertThat(iterator.queueSize())
.as("Queue is not empty after cleaning")
.isEqualTo(0));
}
|
@Nullable
public PasswordAlgorithm forPassword(String hashedPassword) {
for (PasswordAlgorithm passwordAlgorithm : passwordAlgorithms.values()) {
if (passwordAlgorithm.supports(hashedPassword))
return passwordAlgorithm;
}
return null;
}
|
@Test
public void testForPasswordShouldReturnFirstAlgorithm() throws Exception {
when(passwordAlgorithm1.supports(anyString())).thenReturn(true);
final PasswordAlgorithmFactory passwordAlgorithmFactory = new PasswordAlgorithmFactory(passwordAlgorithms, passwordAlgorithm2);
assertThat(passwordAlgorithmFactory.forPassword("foobar")).isEqualTo(passwordAlgorithm1);
}
|
@Override
public void isEqualTo(@Nullable Object expected) {
super.isEqualTo(expected);
}
|
@Test
public void isEqualTo_WithoutToleranceParameter_Fail_NotEqual() {
expectFailureWhenTestingThat(array(2.2d)).isEqualTo(array(OVER_2POINT2));
assertFailureValue("expected", "[2.2000000000000006]");
assertFailureValue("but was", "[2.2]");
assertFailureValue("differs at index", "[0]");
}
|
public String getDashboardUrl() {
verifyInitialized();
return dashboardUrl;
}
|
@Test
public void getDashboardUrl_should_fail_if_not_initialized() {
assertThatThrownBy(() -> underTest.getDashboardUrl())
.isInstanceOf(IllegalStateException.class);
}
|
public static String delAll(String regex, CharSequence content) {
if (StrUtil.hasEmpty(regex, content)) {
return StrUtil.str(content);
}
final Pattern pattern = PatternPool.get(regex, Pattern.DOTALL);
return delAll(pattern, content);
}
|
@Test
public void issueI6GIMTTest(){
assertEquals(StrUtil.EMPTY, ReUtil.delAll("[\\s]*", " "));
}
|
public static FeeFilterMessage read(ByteBuffer payload) throws BufferUnderflowException, ProtocolException {
Coin feeRate = Coin.read(payload);
check(feeRate.signum() >= 0, () -> new ProtocolException("fee rate out of range: " + feeRate));
return new FeeFilterMessage(feeRate);
}
|
@Test(expected = ProtocolException.class)
@Parameters(method = "invalidFeeRates")
public void invalid(Coin feeRate) {
byte[] buf = ByteBuffer.allocate(Long.BYTES).order(ByteOrder.LITTLE_ENDIAN).putLong(feeRate.getValue()).array();
FeeFilterMessage ffm = FeeFilterMessage.read(ByteBuffer.wrap(buf));
}
|
public TolerantIntegerComparison isNotWithin(int tolerance) {
return new TolerantIntegerComparison() {
@Override
public void of(int expected) {
Integer actual = IntegerSubject.this.actual;
checkNotNull(
actual, "actual value cannot be null. tolerance=%s expected=%s", tolerance, expected);
checkTolerance(tolerance);
if (equalWithinTolerance(actual, expected, tolerance)) {
failWithoutActual(
fact("expected not to be", Integer.toString(expected)),
butWas(),
fact("within tolerance", Integer.toString(tolerance)));
}
}
};
}
|
@Test
public void isNotWithinOf() {
assertThatIsNotWithinFails(20000, 0, 20000);
assertThatIsNotWithinFails(20000, 1, 20000);
assertThatIsNotWithinFails(20000, 10000, 20000);
assertThatIsNotWithinFails(20000, 10000, 30000);
assertThatIsNotWithinFails(Integer.MIN_VALUE, 1, Integer.MIN_VALUE + 1);
assertThatIsNotWithinFails(Integer.MAX_VALUE, 1, Integer.MAX_VALUE - 1);
assertThatIsNotWithinFails(Integer.MAX_VALUE / 2, Integer.MAX_VALUE, -Integer.MAX_VALUE / 2);
assertThatIsNotWithinFails(-Integer.MAX_VALUE / 2, Integer.MAX_VALUE, Integer.MAX_VALUE / 2);
assertThat(20000).isNotWithin(9999).of(30000);
assertThat(20000).isNotWithin(10000).of(30001);
assertThat(Integer.MIN_VALUE).isNotWithin(0).of(Integer.MAX_VALUE);
assertThat(Integer.MAX_VALUE).isNotWithin(0).of(Integer.MIN_VALUE);
assertThat(Integer.MIN_VALUE).isNotWithin(1).of(Integer.MIN_VALUE + 2);
assertThat(Integer.MAX_VALUE).isNotWithin(1).of(Integer.MAX_VALUE - 2);
// Don't fall for rollover
assertThat(Integer.MIN_VALUE).isNotWithin(1).of(Integer.MAX_VALUE);
assertThat(Integer.MAX_VALUE).isNotWithin(1).of(Integer.MIN_VALUE);
}
|
@Override
public void destroy() {
if (evictionScheduler != null) {
evictionScheduler.remove(getRawName());
}
removeListeners();
}
|
@Test
public void testDestroy() {
RSetCache<String> cache = redisson.getSetCache("test");
EvictionScheduler evictionScheduler = ((Redisson)redisson).getEvictionScheduler();
Map<?, ?> map = Reflect.on(evictionScheduler).get("tasks");
assertThat(map.isEmpty()).isFalse();
cache.destroy();
assertThat(map.isEmpty()).isTrue();
}
|
public static boolean isKafkaInvokeBySermant(StackTraceElement[] stackTrace) {
return isInvokeBySermant(KAFKA_CONSUMER_CLASS_NAME, KAFKA_CONSUMER_CONTROLLER_CLASS_NAME, stackTrace);
}
|
@Test
public void testNotInvokeBySermantWithNestedInvoke() {
StackTraceElement[] stackTrace = new StackTraceElement[5];
stackTrace[0] = new StackTraceElement("testClass0", "testMethod0", "testFileName0", 0);
stackTrace[1] = new StackTraceElement("testClass1", "testMethod1", "testFileName1", 1);
stackTrace[2] = new StackTraceElement("org.apache.kafka.clients.consumer.KafkaConsumer", "unsubscribe",
"testFileName2", 2);
stackTrace[3] = new StackTraceElement("org.apache.kafka.clients.consumer.KafkaConsumer", "subscribe",
"testFileName3", 3);
stackTrace[4] = new StackTraceElement("testClass4", "testMethod4", "testFileName4", 4);
Assert.assertFalse(InvokeUtils.isKafkaInvokeBySermant(stackTrace));
}
|
private String format(double score) {
int NUM_DECIMAL_PLACES = 3;
return String.format("%." + NUM_DECIMAL_PLACES + "f", score);
}
|
@Test
public void durationFormatOnShortTimesWorks() {
Duration oneMinfiveSec = Duration.ofMillis(5_123).plusMinutes(1);
assertThat(format(oneMinfiveSec), equalTo("1m 5.123 sec"));
}
|
public static <T> Inner<T> fields(String... fields) {
return fields(FieldAccessDescriptor.withFieldNames(fields));
}
|
@Test
@Category(NeedsRunner.class)
public void testDropNestedField() {
Schema expectedSchema =
Schema.builder().addStringField("string").addStringField("field2").build();
PCollection<Row> result =
pipeline
.apply(
Create.of(
nestedRow(simpleRow(1, "one")),
nestedRow(simpleRow(2, "two")),
nestedRow(simpleRow(3, "three")))
.withRowSchema(NESTED_SCHEMA))
.apply(DropFields.fields("nested.field1"));
assertEquals(expectedSchema, result.getSchema());
List<Row> expectedRows =
Lists.newArrayList(
Row.withSchema(expectedSchema).addValues("foo", "one").build(),
Row.withSchema(expectedSchema).addValues("foo", "two").build(),
Row.withSchema(expectedSchema).addValues("foo", "three").build());
PAssert.that(result).containsInAnyOrder(expectedRows);
pipeline.run();
}
|
Future<Boolean> canRollController(int nodeId) {
LOGGER.debugCr(reconciliation, "Determining whether controller pod {} can be rolled", nodeId);
return describeMetadataQuorum().map(info -> {
boolean canRoll = isQuorumHealthyWithoutNode(nodeId, info);
if (!canRoll) {
LOGGER.debugCr(reconciliation, "Not restarting controller pod {}. Restart would affect the quorum health", nodeId);
}
return canRoll;
}).recover(error -> {
LOGGER.warnCr(reconciliation, "Error determining whether it is safe to restart controller pod {}", nodeId, error);
return Future.failedFuture(error);
});
}
|
@Test
public void shouldHandlePartiallyIncompleteQuorumData(VertxTestContext context) {
Map<Integer, OptionalLong> controllers = new HashMap<>();
controllers.put(1, OptionalLong.of(10000L));
controllers.put(2, OptionalLong.empty()); // Simulating incomplete data
controllers.put(3, OptionalLong.of(9500L));
Admin admin = setUpMocks(1, controllers);
KafkaQuorumCheck quorumCheck = new KafkaQuorumCheck(Reconciliation.DUMMY_RECONCILIATION, admin, vertx, CONTROLLER_QUORUM_FETCH_TIMEOUT_MS);
quorumCheck.canRollController(1).onComplete(context.succeeding(result -> {
context.verify(() -> assertFalse(result));
context.completeNow();
}));
}
|
public SalesforceInsertMeta() {
super(); // allocate BaseStepMeta
}
|
@Test
public void testSalesforceInsertMeta() throws KettleException {
List<String> attributes = new ArrayList<String>();
attributes.addAll( SalesforceMetaTest.getDefaultAttributes() );
attributes.addAll( Arrays.asList( "batchSize", "salesforceIDFieldName", "updateLookup", "updateStream",
"useExternalId", "rollbackAllChangesOnError" ) );
Map<String, String> getterMap = new HashMap<String, String>();
Map<String, String> setterMap = new HashMap<String, String>();
Map<String, FieldLoadSaveValidator<?>> fieldLoadSaveValidators = new HashMap<String, FieldLoadSaveValidator<?>>();
fieldLoadSaveValidators.put( "updateLookup",
new ArrayLoadSaveValidator<String>( new StringLoadSaveValidator(), 50 ) );
fieldLoadSaveValidators.put( "updateStream",
new ArrayLoadSaveValidator<String>( new StringLoadSaveValidator(), 50 ) );
fieldLoadSaveValidators.put( "useExternalId",
new ArrayLoadSaveValidator<Boolean>( new BooleanLoadSaveValidator(), 50 ) );
LoadSaveTester loadSaveTester =
new LoadSaveTester( SalesforceInsertMeta.class, attributes, getterMap, setterMap,
fieldLoadSaveValidators, new HashMap<String, FieldLoadSaveValidator<?>>() );
loadSaveTester.testRepoRoundTrip();
loadSaveTester.testXmlRoundTrip();
}
|
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(REDSHIFT_BOOLEAN);
builder.dataType(REDSHIFT_BOOLEAN);
break;
case TINYINT:
case SMALLINT:
builder.columnType(REDSHIFT_SMALLINT);
builder.dataType(REDSHIFT_SMALLINT);
break;
case INT:
builder.columnType(REDSHIFT_INTEGER);
builder.dataType(REDSHIFT_INTEGER);
break;
case BIGINT:
builder.columnType(REDSHIFT_BIGINT);
builder.dataType(REDSHIFT_BIGINT);
break;
case FLOAT:
builder.columnType(REDSHIFT_REAL);
builder.dataType(REDSHIFT_REAL);
break;
case DOUBLE:
builder.columnType(REDSHIFT_DOUBLE_PRECISION);
builder.dataType(REDSHIFT_DOUBLE_PRECISION);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%d,%d)", REDSHIFT_NUMERIC, precision, scale));
builder.dataType(REDSHIFT_NUMERIC);
builder.precision(precision);
builder.scale(scale);
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(
String.format(
"%s(%d)",
REDSHIFT_CHARACTER_VARYING, MAX_CHARACTER_VARYING_LENGTH));
builder.dataType(REDSHIFT_CHARACTER_VARYING);
builder.length((long) MAX_CHARACTER_VARYING_LENGTH);
} else if (column.getColumnLength() <= MAX_CHARACTER_VARYING_LENGTH) {
builder.columnType(
String.format(
"%s(%d)",
REDSHIFT_CHARACTER_VARYING, column.getColumnLength()));
builder.dataType(REDSHIFT_CHARACTER_VARYING);
builder.length(column.getColumnLength());
} else {
log.warn(
"The length of string column {} is {}, which exceeds the maximum length of {}, "
+ "the length will be set to {}",
column.getName(),
column.getColumnLength(),
MAX_SUPER_LENGTH,
MAX_SUPER_LENGTH);
builder.columnType(REDSHIFT_SUPER);
builder.dataType(REDSHIFT_SUPER);
}
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(
String.format(
"%s(%d)", REDSHIFT_BINARY_VARYING, MAX_BINARY_VARYING_LENGTH));
builder.dataType(REDSHIFT_BINARY_VARYING);
} else if (column.getColumnLength() <= MAX_BINARY_VARYING_LENGTH) {
builder.columnType(
String.format(
"%s(%d)", REDSHIFT_BINARY_VARYING, column.getColumnLength()));
builder.dataType(REDSHIFT_BINARY_VARYING);
builder.length(column.getColumnLength());
} else {
builder.columnType(
String.format(
"%s(%d)", REDSHIFT_BINARY_VARYING, MAX_BINARY_VARYING_LENGTH));
builder.dataType(REDSHIFT_BINARY_VARYING);
log.warn(
"The length of binary column {} is {}, which exceeds the maximum length of {}, "
+ "the length will be set to {}",
column.getName(),
column.getColumnLength(),
MAX_BINARY_VARYING_LENGTH,
MAX_BINARY_VARYING_LENGTH);
}
break;
case TIME:
Integer timeScale = column.getScale();
if (timeScale != null && timeScale > MAX_TIME_SCALE) {
timeScale = MAX_TIME_SCALE;
log.warn(
"The time column {} type time({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to time({})",
column.getName(),
column.getScale(),
MAX_SCALE,
timeScale);
}
builder.columnType(REDSHIFT_TIME);
builder.dataType(REDSHIFT_TIME);
builder.scale(timeScale);
break;
case TIMESTAMP:
Integer timestampScale = column.getScale();
if (timestampScale != null && timestampScale > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(REDSHIFT_TIMESTAMP);
builder.dataType(REDSHIFT_TIMESTAMP);
builder.scale(timestampScale);
break;
case MAP:
case ARRAY:
case ROW:
builder.columnType(REDSHIFT_SUPER);
builder.dataType(REDSHIFT_SUPER);
break;
default:
try {
return super.reconvert(column);
} catch (SeaTunnelRuntimeException e) {
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.REDSHIFT,
column.getDataType().getSqlType().name(),
column.getName());
}
}
return builder.build();
}
|
@Test
public void testReconvertDate() {
Column column =
PhysicalColumn.builder()
.name("test")
.dataType(LocalTimeType.LOCAL_DATE_TYPE)
.build();
BasicTypeDefine typeDefine = RedshiftTypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(RedshiftTypeConverter.PG_DATE, typeDefine.getColumnType());
Assertions.assertEquals(RedshiftTypeConverter.PG_DATE, typeDefine.getDataType());
}
|
@ExecuteOn(TaskExecutors.IO)
@Post(uri = "/export/by-ids", produces = MediaType.APPLICATION_OCTET_STREAM)
@Operation(
tags = {"Templates"},
summary = "Export templates as a ZIP archive of yaml sources."
)
public HttpResponse<byte[]> exportByIds(
@Parameter(description = "A list of tuple flow ID and namespace as template identifiers") @Body List<IdWithNamespace> ids
) throws IOException {
var templates = ids.stream()
.map(id -> templateRepository.findById(tenantService.resolveTenant(), id.getNamespace(), id.getId()).orElseThrow())
.toList();
var bytes = zipTemplates(templates);
return HttpResponse.ok(bytes).header("Content-Disposition", "attachment; filename=\"templates.zip\"");
}
|
@Test
void exportByIds() throws IOException {
// create 3 templates, so we can retrieve them by id
var template1 = client.toBlocking().retrieve(POST("/api/v1/templates", createTemplate()), Template.class);
var template2 = client.toBlocking().retrieve(POST("/api/v1/templates", createTemplate()), Template.class);
var template3 = client.toBlocking().retrieve(POST("/api/v1/templates", createTemplate()), Template.class);
List<IdWithNamespace> ids = List.of(
new IdWithNamespace("kestra.test", template1.getId()),
new IdWithNamespace("kestra.test", template2.getId()),
new IdWithNamespace("kestra.test", template3.getId()));
byte[] zip = client.toBlocking().retrieve(HttpRequest.POST("/api/v1/templates/export/by-ids?namespace=kestra.test", ids),
Argument.of(byte[].class));
File file = File.createTempFile("templates", ".zip");
Files.write(file.toPath(), zip);
try(ZipFile zipFile = new ZipFile(file)) {
assertThat(zipFile.stream().count(), is(3L));
}
file.delete();
}
|
public Future<CaReconciliationResult> reconcile(Clock clock) {
return reconcileCas(clock)
.compose(i -> verifyClusterCaFullyTrustedAndUsed())
.compose(i -> reconcileClusterOperatorSecret(clock))
.compose(i -> rollingUpdateForNewCaKey())
.compose(i -> maybeRemoveOldClusterCaCertificates())
.map(i -> new CaReconciliationResult(clusterCa, clientsCa));
}
|
@Test
public void testRollingReasonsWithClusterCAKeyNotTrusted(Vertx vertx, VertxTestContext context) {
Kafka kafka = new KafkaBuilder(KAFKA)
.editSpec()
.withNewEntityOperator()
.endEntityOperator()
.withNewCruiseControl()
.endCruiseControl()
.withNewKafkaExporter()
.endKafkaExporter()
.endSpec()
.build();
Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
SecretOperator secretOps = supplier.secretOperations;
ArgumentCaptor<Secret> clusterCaCert = ArgumentCaptor.forClass(Secret.class);
ArgumentCaptor<Secret> clusterCaKey = ArgumentCaptor.forClass(Secret.class);
ArgumentCaptor<Secret> clientsCaCert = ArgumentCaptor.forClass(Secret.class);
ArgumentCaptor<Secret> clientsCaKey = ArgumentCaptor.forClass(Secret.class);
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> {
Secret s = clusterCaCert.getValue();
s.getMetadata().setAnnotations(Map.of(Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION, "1"));
return Future.succeededFuture(ReconcileResult.created(s));
});
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> {
Secret s = clusterCaKey.getValue();
s.getMetadata().setAnnotations(Map.of(Ca.ANNO_STRIMZI_IO_CA_KEY_GENERATION, "1"));
return Future.succeededFuture(ReconcileResult.created(s));
});
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clientsCaCertificateSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clusterOperatorCertsSecretName(NAME)), any())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(List.of()));
Map<String, String> generationAnnotations =
Map.of(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0", Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, "0");
StrimziPodSetOperator spsOps = supplier.strimziPodSetOperator;
when(spsOps.getAsync(eq(NAMESPACE), eq(KafkaResources.zookeeperComponentName(NAME)))).thenReturn(Future.succeededFuture());
PodOperator mockPodOps = supplier.podOperations;
when(mockPodOps.listAsync(any(), any(Labels.class))).thenAnswer(i -> {
List<Pod> pods = new ArrayList<>();
// adding a terminating Cruise Control pod to test that it's skipped during the key generation check
Pod ccPod = podWithNameAndAnnotations("my-cluster-cruise-control", false, false, generationAnnotations);
ccPod.getMetadata().setDeletionTimestamp("2023-06-08T16:23:18Z");
pods.add(ccPod);
// adding Kafka pods with old CA cert and key generation
pods.add(podWithNameAndAnnotations("my-cluster-controllers-3", false, true, generationAnnotations));
pods.add(podWithNameAndAnnotations("my-cluster-controllers-4", false, true, generationAnnotations));
pods.add(podWithNameAndAnnotations("my-cluster-controllers-5", false, true, generationAnnotations));
pods.add(podWithNameAndAnnotations("my-cluster-brokers-0", true, false, generationAnnotations));
pods.add(podWithNameAndAnnotations("my-cluster-brokers-1", true, false, generationAnnotations));
pods.add(podWithNameAndAnnotations("my-cluster-brokers-2", true, false, generationAnnotations));
return Future.succeededFuture(pods);
});
Map<String, Deployment> deps = new HashMap<>();
deps.put("my-cluster-entity-operator", deploymentWithName("my-cluster-entity-operator"));
deps.put("my-cluster-cruise-control", deploymentWithName("my-cluster-cruise-control"));
deps.put("my-cluster-kafka-exporter", deploymentWithName("my-cluster-kafka-exporter"));
DeploymentOperator depsOperator = supplier.deploymentOperations;
when(depsOperator.getAsync(any(), any())).thenAnswer(i -> Future.succeededFuture(deps.get(i.getArgument(1))));
Checkpoint async = context.checkpoint();
MockCaReconciler mockCaReconciler = new MockCaReconciler(reconciliation, kafka, new ClusterOperatorConfig.ClusterOperatorConfigBuilder(ResourceUtils.dummyClusterOperatorConfig(), KafkaVersionTestUtils.getKafkaVersionLookup()).with(ClusterOperatorConfig.OPERATION_TIMEOUT_MS.key(), "1").build(),
supplier, vertx, CERT_MANAGER, PASSWORD_GENERATOR);
mockCaReconciler
.reconcile(Clock.systemUTC())
.onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(mockCaReconciler.isClusterCaNeedFullTrust, is(true));
assertThat(mockCaReconciler.kPodRollReasons.contains(RestartReason.CLUSTER_CA_CERT_KEY_REPLACED), is(true));
assertThat(mockCaReconciler.deploymentRollReason.size() == 3, is(true));
for (String reason: mockCaReconciler.deploymentRollReason) {
assertThat(reason.equals(RestartReason.CLUSTER_CA_CERT_KEY_REPLACED.getDefaultNote()), is(true));
}
async.flag();
})));
}
|
private static void convertToTelemetry(JsonElement jsonElement, long systemTs, Map<Long, List<KvEntry>> result, PostTelemetryMsg.Builder builder) {
if (jsonElement.isJsonObject()) {
parseObject(systemTs, result, builder, jsonElement.getAsJsonObject());
} else if (jsonElement.isJsonArray()) {
jsonElement.getAsJsonArray().forEach(je -> {
if (je.isJsonObject()) {
parseObject(systemTs, result, builder, je.getAsJsonObject());
} else {
throw new JsonSyntaxException(CAN_T_PARSE_VALUE + je);
}
});
} else {
throw new JsonSyntaxException(CAN_T_PARSE_VALUE + jsonElement);
}
}
|
@Test
public void testParseAsDouble() {
var result = JsonConverter.convertToTelemetry(JsonParser.parseString("{\"meterReadingDelta\": 1.1}"), 0L);
Assertions.assertEquals(1.1, result.get(0L).get(0).getDoubleValue().get(), 0.0);
}
|
public static <T> T copyProperties(Object source, Class<T> tClass, String... ignoreProperties) {
if (null == source) {
return null;
}
T target = ReflectUtil.newInstanceIfPossible(tClass);
copyProperties(source, target, CopyOptions.create().setIgnoreProperties(ignoreProperties));
return target;
}
|
@Test
public void copyBeanPropertiesFilterTest() {
final Food info = new Food();
info.setBookID("0");
info.setCode("");
final Food newFood = new Food();
final CopyOptions copyOptions = CopyOptions.create().setPropertiesFilter((f, v) -> !(v instanceof CharSequence) || StrUtil.isNotBlank(v.toString()));
BeanUtil.copyProperties(info, newFood, copyOptions);
assertEquals(info.getBookID(), newFood.getBookID());
assertNull(newFood.getCode());
}
|
@Override
public Object remove(String key) {
return threadLocal.get().remove(key);
}
|
@Test
public void testRemove() {
// Test putting and removing a value
contextCore.put("key", "value");
assertEquals("value", contextCore.remove("key"));
assertNull(contextCore.get("key"));
}
|
@Override
public Set<MaintenanceEvent> readEvents(Duration timeout) throws SamplingException {
LOG.debug("Reading maintenance events.");
long eventReadPeriodEndMs = _kafkaCruiseControl.timeMs();
if (refreshPartitionAssignment()) {
_lastEventReadPeriodEndTimeMs = eventReadPeriodEndMs;
return Collections.emptySet();
}
long timeoutEndMs = eventReadPeriodEndMs + timeout.toMillis();
Set<MaintenanceEvent> maintenanceEvents = new HashSet<>();
try {
Map<TopicPartition, Long> endOffsets = seekToRelevantOffsets();
LOG.debug("Started to consume from maintenance event topic partitions {}.", _currentPartitionAssignment);
_consumer.resume(_consumer.paused());
Set<TopicPartition> partitionsToPause = new HashSet<>();
do {
ConsumerRecords<String, MaintenancePlan> records = _consumer.poll(timeout);
for (ConsumerRecord<String, MaintenancePlan> record : records) {
if (record == null) {
// This means that the record cannot be parsed because the maintenance plan version is not supported. It might
// happen when existing maintenance plans have been updated and the current code is still old. We simply ignore
// that plan in this case (see MaintenancePlanSerde.MaintenancePlanTypeAdapter#verifyTypeAndVersion(String, byte).
LOG.warn("Cannot parse record, please update your Cruise Control version.");
continue;
}
long planGenerationTimeMs = record.value().timeMs();
if (planGenerationTimeMs + _maintenancePlanExpirationMs < eventReadPeriodEndMs) {
LOG.warn("Discarding the expired plan {}. (Expired: {} Evaluated: {}).", record.value(),
planGenerationTimeMs + _maintenancePlanExpirationMs, eventReadPeriodEndMs);
} else if (planGenerationTimeMs >= eventReadPeriodEndMs) {
TopicPartition tp = new TopicPartition(record.topic(), record.partition());
LOG.debug("Saw plan {} generated after the end time of event read period {}. Pausing {} at offset {}.",
record.value(), eventReadPeriodEndMs, tp, record.offset());
partitionsToPause.add(tp);
} else {
addMaintenancePlan(record.value(), maintenanceEvents);
}
}
if (!partitionsToPause.isEmpty()) {
_consumer.pause(partitionsToPause);
partitionsToPause.clear();
}
} while (!consumptionDone(_consumer, endOffsets) && _kafkaCruiseControl.timeMs() < timeoutEndMs);
if (maintenanceEvents.size() > 0) {
LOG.info("Retrieved {} maintenance plans from partitions {} (range [{},{}]).", maintenanceEvents.size(),
_currentPartitionAssignment, _lastEventReadPeriodEndTimeMs, eventReadPeriodEndMs);
}
} finally {
_lastEventReadPeriodEndTimeMs = eventReadPeriodEndMs;
}
return maintenanceEvents;
}
|
@Test
public void testMaintenanceEventTopicCreationUpdateAndRead()
throws ExecutionException, InterruptedException, SamplingException {
// Verify that the maintenance event topic has been created with the desired properties.
verify(TEST_TOPIC_PARTITION_COUNT, TEST_TOPIC_REPLICATION_FACTOR, TEST_TOPIC_RETENTION_TIME_MS, false);
// Verify that the maintenance event topic properties can be updated if the topic already exists with different properties.
String newPartitionCount = String.valueOf(Integer.parseInt(TEST_TOPIC_PARTITION_COUNT) * 2);
String newRF = String.valueOf(Short.parseShort(TEST_TOPIC_REPLICATION_FACTOR) + 1);
String newRetentionMs = String.valueOf(Long.MAX_VALUE);
KafkaCruiseControl mockKafkaCruiseControl = EasyMock.mock(KafkaCruiseControl.class);
Map<String, Object> parameterConfigOverrides = Map.of(KAFKA_CRUISE_CONTROL_OBJECT_CONFIG, mockKafkaCruiseControl,
MAINTENANCE_EVENT_TOPIC_REPLICATION_FACTOR_CONFIG, newRF,
MAINTENANCE_EVENT_TOPIC_PARTITION_COUNT_CONFIG, newPartitionCount,
MAINTENANCE_EVENT_TOPIC_RETENTION_MS_CONFIG, newRetentionMs);
// The current time is expected to cause (1) a valid rebalance plan creation, but (2) an expired demote broker plan.
long currentMockTime = TEST_REBALANCE_PLAN_TIME + DEFAULT_MAINTENANCE_PLAN_EXPIRATION_MS;
EasyMock.expect(mockKafkaCruiseControl.timeMs()).andReturn(currentMockTime).anyTimes();
EasyMock.expect(mockKafkaCruiseControl.adminClient())
.andReturn(createAdminClient(KafkaCruiseControlUtils.parseAdminClientConfigs(_config))).anyTimes();
EasyMock.expect(mockKafkaCruiseControl.config()).andReturn(_config).anyTimes();
EasyMock.replay(mockKafkaCruiseControl);
MaintenanceEventReader maintenanceEventReader = _config.getConfiguredInstance(AnomalyDetectorConfig.MAINTENANCE_EVENT_READER_CLASS_CONFIG,
MaintenanceEventTopicReader.class,
parameterConfigOverrides);
assertNotNull(maintenanceEventReader);
verify(newPartitionCount, newRF, newRetentionMs, true);
Set<MaintenanceEvent> events = maintenanceEventReader.readEvents(TEST_TIMEOUT);
EasyMock.verify(mockKafkaCruiseControl);
assertEquals(1, events.size());
MaintenanceEvent maintenanceEvent = events.iterator().next();
assertEquals(MAINTENANCE_EVENT, maintenanceEvent.anomalyType());
assertEquals(currentMockTime, maintenanceEvent.detectionTimeMs());
assertEquals(REBALANCE, maintenanceEvent.maintenanceEventType());
}
|
public String abbreviate(String fqClassName) {
StringBuilder buf = new StringBuilder(targetLength);
if (fqClassName == null) {
throw new IllegalArgumentException("Class name may not be null");
}
int inLen = fqClassName.length();
if (inLen < targetLength) {
return fqClassName;
}
int[] dotIndexesArray = new int[ClassicConstants.MAX_DOTS];
// a.b.c contains 2 dots but 2+1 parts.
// see also http://jira.qos.ch/browse/LBCLASSIC-110
int[] lengthArray = new int[ClassicConstants.MAX_DOTS + 1];
int dotCount = computeDotIndexes(fqClassName, dotIndexesArray);
// System.out.println();
// System.out.println("Dot count for [" + className + "] is " + dotCount);
// if there are not dots than abbreviation is not possible
if (dotCount == 0) {
return fqClassName;
}
// printArray("dotArray: ", dotArray);
computeLengthArray(fqClassName, dotIndexesArray, lengthArray, dotCount);
// printArray("lengthArray: ", lengthArray);
for (int i = 0; i <= dotCount; i++) {
if (i == 0) {
buf.append(fqClassName.substring(0, lengthArray[i] - 1));
} else {
buf.append(fqClassName.substring(dotIndexesArray[i - 1],
dotIndexesArray[i - 1] + lengthArray[i]));
}
// System.out.println("i=" + i + ", buf=" + buf);
}
return buf.toString();
}
|
@Test
public void testNoDot() {
TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(1);
String name = "hello";
assertEquals(name, abbreviator.abbreviate(name));
}
|
@Override
public WxMaPhoneNumberInfo getWxMaPhoneNumberInfo(Integer userType, String phoneCode) {
WxMaService service = getWxMaService(userType);
try {
return service.getUserService().getPhoneNoInfo(phoneCode);
} catch (WxErrorException e) {
log.error("[getPhoneNoInfo][userType({}) phoneCode({}) 获得手机号失败]", userType, phoneCode, e);
throw exception(SOCIAL_CLIENT_WEIXIN_MINI_APP_PHONE_CODE_ERROR);
}
}
|
@Test
public void testGetWxMaPhoneNumberInfo_success() throws WxErrorException {
// 准备参数
Integer userType = randomPojo(UserTypeEnum.class).getValue();
String phoneCode = randomString();
// mock 方法
WxMaUserService userService = mock(WxMaUserService.class);
when(wxMaService.getUserService()).thenReturn(userService);
WxMaPhoneNumberInfo phoneNumber = randomPojo(WxMaPhoneNumberInfo.class);
when(userService.getPhoneNoInfo(eq(phoneCode))).thenReturn(phoneNumber);
// 调用
WxMaPhoneNumberInfo result = socialClientService.getWxMaPhoneNumberInfo(userType, phoneCode);
// 断言
assertSame(phoneNumber, result);
}
|
public static ByteArrayClassLoader compile(
ClassLoader parentClassLoader, CompileUnit... compileUnits) {
final Map<String, byte[]> classes = toBytecode(parentClassLoader, compileUnits);
// Set up a class loader that finds and defined the generated classes.
return new ByteArrayClassLoader(classes, parentClassLoader);
}
|
@Test
public void compile() throws Exception {
CompileUnit unit1 =
new CompileUnit(
"demo.pkg1",
"A",
(""
+ "package demo.pkg1;\n"
+ "import demo.pkg2.*;\n"
+ "public class A {\n"
+ " public static String main() { return B.hello(); }\n"
+ " public static String hello() { return \"HELLO\"; }\n"
+ "}"));
CompileUnit unit2 =
new CompileUnit(
"demo.pkg2",
"B",
(""
+ "package demo.pkg2;\n"
+ "import demo.pkg1.*;\n"
+ "public class B {\n"
+ " public static String hello() { return A.hello(); }\n"
+ "}"));
ClassLoader classLoader =
JaninoUtils.compile(Thread.currentThread().getContextClassLoader(), unit1, unit2);
Assert.assertEquals(
"HELLO", classLoader.loadClass("demo.pkg1.A").getMethod("main").invoke(null));
}
|
public static void close(AutoCloseable... closeables) {
if (CollectionUtils.isNotEmpty(closeables)) {
for (AutoCloseable closeable : closeables) {
close(closeable);
}
}
}
|
@Test
public void testIgnoreExceptionOnClose() {
FakeResource resource = new FakeResource() {
@Override
public void close() throws Exception {
super.close();
throw new Exception("Ops!");
}
};
IOUtil.close(resource);
Assertions.assertTrue(resource.isClose());
}
|
@GET
@Path("version")
@Produces({MediaType.APPLICATION_JSON})
public Map<String, Object> version() {
return SUPPORTED_VERSIONS;
}
|
@Test
public void testVersions() {
assertEquals(server.version().get("version"), "v1");
}
|
public static boolean matchInterface(String address, String interfaceMask) {
final AddressMatcher mask;
try {
mask = getAddressMatcher(interfaceMask);
} catch (Exception e) {
return false;
}
return mask.match(address);
}
|
@Test
public void testMatchInterface_whenInvalidInterface_thenReturnFalse() {
assertFalse(AddressUtil.matchInterface("10.235.194.23", "bar"));
}
|
@Override public HashSlotCursor16byteKey cursor() {
return new CursorLongKey2();
}
|
@Test
@RequireAssertEnabled
public void testCursor_advance_afterAdvanceReturnsFalse() {
insert(randomKey(), randomKey());
HashSlotCursor16byteKey cursor = hsa.cursor();
cursor.advance();
cursor.advance();
assertThrows(AssertionError.class, cursor::advance);
}
|
public static void init() {
initHandlers();
initChains();
initPaths();
initDefaultHandlers();
ModuleRegistry.registerModule(HandlerConfig.CONFIG_NAME, Handler.class.getName(), Config.getNoneDecryptedInstance().getJsonMapConfigNoCache(HandlerConfig.CONFIG_NAME), null);
}
|
@Test
public void validConfig_init_handlersCreated() {
Handler.init();
Map<String, List<HttpHandler>> handlers = Handler.handlerListById;
Assert.assertEquals(1, handlers.get("third").size());
Assert.assertEquals(2, handlers.get("secondBeforeFirst").size());
}
|
@Override
public synchronized void editSchedule() {
updateConfigIfNeeded();
long startTs = clock.getTime();
CSQueue root = scheduler.getRootQueue();
Resource clusterResources = Resources.clone(scheduler.getClusterResource());
containerBasedPreemptOrKill(root, clusterResources);
if (LOG.isDebugEnabled()) {
LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms.");
}
}
|
@Test
public void testPerQueueDisablePreemption() {
int[][] qData = new int[][]{
// / A B C
{ 100, 55, 25, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 0, 54, 46 }, // used
{ 10, 10, 0, 0 }, // pending
{ 0, 0, 0, 0 }, // reserved
// appA appB appC
{ 3, 1, 1, 1 }, // apps
{ -1, 1, 1, 1 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
conf.setPreemptionDisabled(QUEUE_B, true);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// Since queueB is not preemptable, get resources from queueC
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appC)));
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB)));
// Since queueB is preemptable, resources will be preempted
// from both queueB and queueC. Test must be reset so that the mDisp
// event handler will count only events from the following test and not the
// previous one.
setup();
conf.setPreemptionDisabled(QUEUE_B, false);
ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData);
policy2.editSchedule();
verify(mDisp, times(4)).handle(argThat(new IsPreemptionRequestFor(appB)));
verify(mDisp, times(6)).handle(argThat(new IsPreemptionRequestFor(appC)));
}
|
public static HttpAction buildUnauthenticatedAction(final WebContext context) {
val hasHeader = context.getResponseHeader(HttpConstants.AUTHENTICATE_HEADER).isPresent();
if (alwaysUse401ForUnauthenticated) {
// add the WWW-Authenticate header to be compliant with the HTTP spec if it does not already exist
if (!hasHeader) {
context.setResponseHeader(HttpConstants.AUTHENTICATE_HEADER, HttpConstants.BEARER_HEADER_PREFIX + "realm=\"pac4j\"");
}
return new UnauthorizedAction();
} else {
if (hasHeader) {
return new UnauthorizedAction();
} else {
return new ForbiddenAction();
}
}
}
|
@Test
public void testBuildUnauthenticated401WithHeader() {
final WebContext context = MockWebContext.create();
context.setResponseHeader(HttpConstants.AUTHENTICATE_HEADER, VALUE);
val action = HttpActionHelper.buildUnauthenticatedAction(context);
assertTrue(action instanceof UnauthorizedAction);
assertEquals(VALUE, context.getResponseHeader(HttpConstants.AUTHENTICATE_HEADER).get());
}
|
@Override
public Settings getSettings() {
return new Settings() {
@Override
@CheckForNull
public String getString(String key) {
return config.getConfiguration().get(key).orElse(null);
}
@Override
public String[] getStringArray(String key) {
return config.getConfiguration().getStringArray(key);
}
};
}
|
@Test
public void get_string_settings() {
MapSettings serverSettings = new MapSettings();
serverSettings.setProperty("prop", "value");
when(settingsRepository.getConfiguration()).thenReturn(serverSettings.asConfig());
MeasureComputerContextImpl underTest = newContext(FILE_1_REF);
assertThat(underTest.getSettings().getString("prop")).isEqualTo("value");
assertThat(underTest.getSettings().getString("unknown")).isNull();
}
|
@PublicEvolving
public static <IN1, IN2, OUT> TypeInformation<OUT> getJoinReturnTypes(
JoinFunction<IN1, IN2, OUT> joinInterface,
TypeInformation<IN1> in1Type,
TypeInformation<IN2> in2Type) {
return getJoinReturnTypes(joinInterface, in1Type, in2Type, null, false);
}
|
@Test
void testInputInferenceWithCustomTupleAndRichFunction() {
JoinFunction<
CustomTuple2WithArray<Long>,
CustomTuple2WithArray<Long>,
CustomTuple2WithArray<Long>>
function = new JoinWithCustomTuple2WithArray<>();
TypeInformation<?> ti =
TypeExtractor.getJoinReturnTypes(
function,
new TypeHint<CustomTuple2WithArray<Long>>() {}.getTypeInfo(),
new TypeHint<CustomTuple2WithArray<Long>>() {}.getTypeInfo());
assertThat(ti.isTupleType()).isTrue();
TupleTypeInfo<?> tti = (TupleTypeInfo<?>) ti;
assertThat(tti.getTypeAt(1)).isEqualTo(BasicTypeInfo.LONG_TYPE_INFO);
assertThat(tti.getTypeAt(0) instanceof ObjectArrayTypeInfo<?, ?>).isTrue();
ObjectArrayTypeInfo<?, ?> oati = (ObjectArrayTypeInfo<?, ?>) tti.getTypeAt(0);
assertThat(oati.getComponentInfo()).isEqualTo(BasicTypeInfo.LONG_TYPE_INFO);
}
|
SObjectNode addNode(final SObjectNode node) {
final String givenObjectType = node.getObjectType();
if (objectType != null && !objectType.equals(givenObjectType)) {
throw new IllegalArgumentException(
"SObjectTree can hold only records of the same type, previously given: " + objectType
+ ", and now trying to add: "
+ givenObjectType);
}
objectType = givenObjectType;
records.add(node);
return node;
}
|
@Test
public void shouldSerializeToJson() throws JsonProcessingException {
final ObjectMapper mapper = JsonUtils.createObjectMapper();
mapper.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
final ObjectWriter writer = mapper.writerFor(SObjectTree.class);
final SObjectTree tree = new SObjectTree();
final SObjectNode account1 = new SObjectNode(tree, simpleAccount);
account1.addChild("Contacts", smith);
account1.addChild("Contacts", evans);
tree.addNode(account1);
final SObjectNode account2 = new SObjectNode(tree, simpleAccount2);
tree.addNode(account2);
final String json = writer.writeValueAsString(tree);
final String expected = "{" + "\"records\":[" + "{" + "\"Industry\":\"Banking\"," + "\"Name\":\"SampleAccount\","
+ "\"NumberOfEmployees\":100,"
+ "\"Phone\":\"1234567890\"," + "\"Website\":\"www.salesforce.com\"," + "\"attributes\":{"
+ "\"referenceId\":\"ref1\"," + "\"type\":\"Account\","
+ "\"url\":null" + "}," + "\"Contacts\":{" + "\"records\":[" + "{"
+ "\"Email\":\"[email protected]\"," + "\"LastName\":\"Smith\","
+ "\"Title\":\"President\"," + "\"attributes\":{" + "\"referenceId\":\"ref2\","
+ "\"type\":\"Contact\"," + "\"url\":null" + "}" + "}," + "{"
+ "\"Email\":\"[email protected]\"," + "\"LastName\":\"Evans\","
+ "\"Title\":\"Vice President\"," + "\"attributes\":{"
+ "\"referenceId\":\"ref3\"," + "\"type\":\"Contact\"," + "\"url\":null" + "}" + "}" + "]" + "}"
+ "}," + "{" + "\"Industry\":\"Banking\","
+ "\"Name\":\"SampleAccount2\"," + "\"NumberOfEmployees\":100," + "\"Phone\":\"1234567890\","
+ "\"Website\":\"www.salesforce2.com\","
+ "\"attributes\":{" + "\"referenceId\":\"ref4\"," + "\"type\":\"Account\"," + "\"url\":null"
+ "}" + "}" + "]" + "}";
assertEquals(expected, json, "Should serialize to JSON as in Salesforce example");
}
|
@VisibleForTesting
static void validateUpsertAndDedupConfig(TableConfig tableConfig, Schema schema) {
if (tableConfig.getUpsertMode() == UpsertConfig.Mode.NONE && (tableConfig.getDedupConfig() == null
|| !tableConfig.getDedupConfig().isDedupEnabled())) {
return;
}
boolean isUpsertEnabled = tableConfig.getUpsertMode() != UpsertConfig.Mode.NONE;
boolean isDedupEnabled = tableConfig.getDedupConfig() != null && tableConfig.getDedupConfig().isDedupEnabled();
// check both upsert and dedup are not enabled simultaneously
Preconditions.checkState(!(isUpsertEnabled && isDedupEnabled),
"A table can have either Upsert or Dedup enabled, but not both");
// check table type is realtime
Preconditions.checkState(tableConfig.getTableType() == TableType.REALTIME,
"Upsert/Dedup table is for realtime table only.");
// primary key exists
Preconditions.checkState(CollectionUtils.isNotEmpty(schema.getPrimaryKeyColumns()),
"Upsert/Dedup table must have primary key columns in the schema");
// replica group is configured for routing
Preconditions.checkState(
tableConfig.getRoutingConfig() != null && isRoutingStrategyAllowedForUpsert(tableConfig.getRoutingConfig()),
"Upsert/Dedup table must use strict replica-group (i.e. strictReplicaGroup) based routing");
Preconditions.checkState(tableConfig.getTenantConfig().getTagOverrideConfig() == null || (
tableConfig.getTenantConfig().getTagOverrideConfig().getRealtimeConsuming() == null
&& tableConfig.getTenantConfig().getTagOverrideConfig().getRealtimeCompleted() == null),
"Invalid tenant tag override used for Upsert/Dedup table");
// specifically for upsert
UpsertConfig upsertConfig = tableConfig.getUpsertConfig();
if (upsertConfig != null) {
// no startree index
Preconditions.checkState(CollectionUtils.isEmpty(tableConfig.getIndexingConfig().getStarTreeIndexConfigs())
&& !tableConfig.getIndexingConfig().isEnableDefaultStarTree(),
"The upsert table cannot have star-tree index.");
// comparison column exists
List<String> comparisonColumns = upsertConfig.getComparisonColumns();
if (comparisonColumns != null) {
for (String column : comparisonColumns) {
Preconditions.checkState(schema.hasColumn(column), "The comparison column does not exist on schema");
}
}
// Delete record column exist and is a BOOLEAN field
String deleteRecordColumn = upsertConfig.getDeleteRecordColumn();
if (deleteRecordColumn != null) {
FieldSpec fieldSpec = schema.getFieldSpecFor(deleteRecordColumn);
Preconditions.checkState(fieldSpec != null,
String.format("Column %s specified in deleteRecordColumn does not exist", deleteRecordColumn));
Preconditions.checkState(fieldSpec.isSingleValueField(),
String.format("The deleteRecordColumn - %s must be a single-valued column", deleteRecordColumn));
DataType dataType = fieldSpec.getDataType();
Preconditions.checkState(dataType == DataType.BOOLEAN || dataType == DataType.STRING || dataType.isNumeric(),
String.format("The deleteRecordColumn - %s must be of type: String / Boolean / Numeric",
deleteRecordColumn));
}
String outOfOrderRecordColumn = upsertConfig.getOutOfOrderRecordColumn();
Preconditions.checkState(outOfOrderRecordColumn == null || !upsertConfig.isDropOutOfOrderRecord(),
"outOfOrderRecordColumn and dropOutOfOrderRecord shouldn't exist together for upsert table");
if (outOfOrderRecordColumn != null) {
FieldSpec fieldSpec = schema.getFieldSpecFor(outOfOrderRecordColumn);
Preconditions.checkState(
fieldSpec != null && fieldSpec.isSingleValueField() && fieldSpec.getDataType() == DataType.BOOLEAN,
"The outOfOrderRecordColumn must be a single-valued BOOLEAN column");
}
}
if (upsertConfig != null && upsertConfig.isEnableDeletedKeysCompactionConsistency()) {
// enableDeletedKeysCompactionConsistency shouldn't exist with metadataTTL
Preconditions.checkState(upsertConfig.getMetadataTTL() == 0,
"enableDeletedKeysCompactionConsistency and metadataTTL shouldn't exist together for upsert table");
// enableDeletedKeysCompactionConsistency shouldn't exist with enablePreload
Preconditions.checkState(!upsertConfig.isEnablePreload(),
"enableDeletedKeysCompactionConsistency and enablePreload shouldn't exist together for upsert table");
// enableDeletedKeysCompactionConsistency should exist with deletedKeysTTL
Preconditions.checkState(upsertConfig.getDeletedKeysTTL() > 0,
"enableDeletedKeysCompactionConsistency should exist with deletedKeysTTL for upsert table");
// enableDeletedKeysCompactionConsistency should exist with enableSnapshot
Preconditions.checkState(upsertConfig.isEnableSnapshot(),
"enableDeletedKeysCompactionConsistency should exist with enableSnapshot for upsert table");
// enableDeletedKeysCompactionConsistency should exist with UpsertCompactionTask
TableTaskConfig taskConfig = tableConfig.getTaskConfig();
Preconditions.checkState(taskConfig != null
&& taskConfig.getTaskTypeConfigsMap().containsKey(UPSERT_COMPACTION_TASK_TYPE),
"enableDeletedKeysCompactionConsistency should exist with UpsertCompactionTask for upsert table");
}
Preconditions.checkState(
tableConfig.getInstanceAssignmentConfigMap() == null || !tableConfig.getInstanceAssignmentConfigMap()
.containsKey(InstancePartitionsType.COMPLETED),
"InstanceAssignmentConfig for COMPLETED is not allowed for upsert tables");
validateAggregateMetricsForUpsertConfig(tableConfig);
validateTTLForUpsertConfig(tableConfig, schema);
}
|
@Test
public void testValidateUpsertConfig() {
Schema schema =
new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension("myCol", FieldSpec.DataType.STRING)
.build();
UpsertConfig upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
TableConfig tableConfig =
new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setUpsertConfig(upsertConfig).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail();
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "Upsert/Dedup table is for realtime table only.");
}
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setUpsertConfig(upsertConfig).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail();
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "Upsert/Dedup table must have primary key columns in the schema");
}
schema =
new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension("myCol", FieldSpec.DataType.STRING)
.setPrimaryKeyColumns(Lists.newArrayList("myCol")).build();
Map<String, String> streamConfigs = getStreamConfigs();
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setUpsertConfig(upsertConfig)
.setStreamConfigs(streamConfigs).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail();
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"Upsert/Dedup table must use strict replica-group (i.e. strictReplicaGroup) based routing");
}
// invalid tag override with upsert
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN)
.setUpsertConfig(new UpsertConfig(UpsertConfig.Mode.FULL)).setStreamConfigs(getStreamConfigs())
.setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.setTagOverrideConfig(new TagOverrideConfig("T1_REALTIME", "T2_REALTIME")).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail("Tag override must not be allowed with upsert");
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "Invalid tenant tag override used for Upsert/Dedup table");
}
// tag override even with same tag for CONSUMING and COMPLETED with upsert should fail
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN)
.setUpsertConfig(new UpsertConfig(UpsertConfig.Mode.FULL)).setStreamConfigs(getStreamConfigs())
.setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.setTagOverrideConfig(new TagOverrideConfig("T1_REALTIME", "T1_REALTIME")).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail("Tag override must not be allowed with upsert");
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "Invalid tenant tag override used for Upsert/Dedup table");
}
// empty tag override with upsert should pass
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN)
.setUpsertConfig(new UpsertConfig(UpsertConfig.Mode.FULL)).setStreamConfigs(getStreamConfigs())
.setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.setTagOverrideConfig(new TagOverrideConfig(null, null)).build();
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setUpsertConfig(upsertConfig)
.setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.setStreamConfigs(streamConfigs).build();
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
StarTreeIndexConfig starTreeIndexConfig = new StarTreeIndexConfig(Lists.newArrayList("myCol"), null,
Collections.singletonList(
new AggregationFunctionColumnPair(AggregationFunctionType.COUNT, "myCol").toColumnName()), null, 10);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setUpsertConfig(upsertConfig)
.setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.setStarTreeIndexConfigs(Lists.newArrayList(starTreeIndexConfig)).setStreamConfigs(streamConfigs).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail();
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "The upsert table cannot have star-tree index.");
}
//With Aggregate Metrics
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setUpsertConfig(upsertConfig)
.setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.setStreamConfigs(streamConfigs).setAggregateMetrics(true).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail();
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "Metrics aggregation and upsert cannot be enabled together");
}
//With aggregation Configs in Ingestion Config
IngestionConfig ingestionConfig = new IngestionConfig();
ingestionConfig.setAggregationConfigs(Collections.singletonList(new AggregationConfig("twiceSum", "SUM(twice)")));
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setUpsertConfig(upsertConfig)
.setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.setStreamConfigs(streamConfigs).setIngestionConfig(ingestionConfig).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail();
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "Metrics aggregation and upsert cannot be enabled together");
}
//With aggregation Configs in Ingestion Config and IndexingConfig at the same time
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setUpsertConfig(upsertConfig)
.setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.setStreamConfigs(streamConfigs).setAggregateMetrics(true).setIngestionConfig(ingestionConfig).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail();
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"Metrics aggregation cannot be enabled in the Indexing Config and Ingestion Config at the same time");
}
// Table upsert with delete column
String stringTypeDelCol = "stringTypeDelCol";
String delCol = "myDelCol";
String mvCol = "mvCol";
String timestampCol = "timestampCol";
String invalidCol = "invalidCol";
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).setPrimaryKeyColumns(Lists.newArrayList("myPkCol"))
.addSingleValueDimension("myCol", FieldSpec.DataType.STRING)
.addSingleValueDimension(stringTypeDelCol, FieldSpec.DataType.STRING)
.addSingleValueDimension(delCol, FieldSpec.DataType.BOOLEAN)
.addSingleValueDimension(timestampCol, FieldSpec.DataType.TIMESTAMP)
.addMultiValueDimension(mvCol, FieldSpec.DataType.STRING).build();
streamConfigs = getStreamConfigs();
streamConfigs.put("stream.kafka.consumer.type", "simple");
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setDeleteRecordColumn(stringTypeDelCol);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.fail("Shouldn't fail table creation when delete column type is single-valued.");
}
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setDeleteRecordColumn(delCol);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.fail("Shouldn't fail table creation when delete column type is single-valued.");
}
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setDeleteRecordColumn(timestampCol);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false)).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail("Should have failed table creation when delete column type is timestamp.");
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"The deleteRecordColumn - timestampCol must be of type: String / Boolean / Numeric");
}
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setDeleteRecordColumn(invalidCol);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail("Should have failed table creation when invalid delete column entered.");
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "Column invalidCol specified in deleteRecordColumn does not exist");
}
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setDeleteRecordColumn(mvCol);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
Assert.fail("Should have failed table creation when delete column type is multi-valued.");
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "The deleteRecordColumn - mvCol must be a single-valued column");
}
// upsert deleted-keys-ttl configs with no deleted column
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).setPrimaryKeyColumns(Lists.newArrayList("myPkCol"))
.addSingleValueDimension("myCol", FieldSpec.DataType.STRING)
.addSingleValueDimension(delCol, FieldSpec.DataType.BOOLEAN).build();
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setDeletedKeysTTL(3600);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false)).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "Deleted Keys TTL can only be enabled with deleteRecordColumn set.");
}
upsertConfig.setDeleteRecordColumn(delCol);
// multiple comparison columns set for deleted-keys-ttl
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).setPrimaryKeyColumns(Lists.newArrayList("myPkCol"))
.addSingleValueDimension("myCol", FieldSpec.DataType.STRING)
.addDateTime(TIME_COLUMN, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS")
.addSingleValueDimension(delCol, FieldSpec.DataType.BOOLEAN).build();
upsertConfig.setComparisonColumns(Lists.newArrayList(TIME_COLUMN, "myCol"));
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false)).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"MetadataTTL / DeletedKeysTTL does not work with multiple comparison columns");
}
// comparison column with non-numeric type
upsertConfig.setComparisonColumns(Lists.newArrayList("myCol"));
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false)).build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"MetadataTTL / DeletedKeysTTL must have comparison column: myCol in numeric type, found: STRING");
}
// time column as comparison column
upsertConfig.setComparisonColumns(Lists.newArrayList(TIME_COLUMN));
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false)).build();
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
// upsert out-of-order configs
String outOfOrderRecordColumn = "outOfOrderRecordColumn";
boolean dropOutOfOrderRecord = true;
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).setPrimaryKeyColumns(Lists.newArrayList("myPkCol"))
.addSingleValueDimension("myCol", FieldSpec.DataType.STRING)
.addSingleValueDimension(outOfOrderRecordColumn, FieldSpec.DataType.BOOLEAN).build();
streamConfigs = getStreamConfigs();
streamConfigs.put("stream.kafka.consumer.type", "simple");
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setDropOutOfOrderRecord(dropOutOfOrderRecord);
upsertConfig.setOutOfOrderRecordColumn(outOfOrderRecordColumn);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"outOfOrderRecordColumn and dropOutOfOrderRecord shouldn't exist together for upsert table");
}
// outOfOrderRecordColumn not of type BOOLEAN
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).setPrimaryKeyColumns(Lists.newArrayList("myPkCol"))
.addSingleValueDimension("myCol", FieldSpec.DataType.STRING)
.addSingleValueDimension(outOfOrderRecordColumn, FieldSpec.DataType.STRING).build();
streamConfigs = getStreamConfigs();
streamConfigs.put("stream.kafka.consumer.type", "simple");
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setOutOfOrderRecordColumn(outOfOrderRecordColumn);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(), "The outOfOrderRecordColumn must be a single-valued BOOLEAN column");
}
// test enableDeletedKeysCompactionConsistency shouldn't exist with metadataTTL
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setEnableDeletedKeysCompactionConsistency(true);
upsertConfig.setMetadataTTL(1.0);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"enableDeletedKeysCompactionConsistency and metadataTTL shouldn't exist together for upsert table");
}
// test enableDeletedKeysCompactionConsistency shouldn't exist with enablePreload
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setEnableDeletedKeysCompactionConsistency(true);
upsertConfig.setEnablePreload(true);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"enableDeletedKeysCompactionConsistency and enablePreload shouldn't exist together for upsert table");
}
// test enableDeletedKeysCompactionConsistency should exist with deletedKeysTTL
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setEnableDeletedKeysCompactionConsistency(true);
upsertConfig.setDeletedKeysTTL(0);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"enableDeletedKeysCompactionConsistency should exist with deletedKeysTTL for upsert table");
}
// test enableDeletedKeysCompactionConsistency should exist with enableSnapshot
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setEnableDeletedKeysCompactionConsistency(true);
upsertConfig.setDeletedKeysTTL(100);
upsertConfig.setEnableSnapshot(false);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"enableDeletedKeysCompactionConsistency should exist with enableSnapshot for upsert table");
}
// test enableDeletedKeysCompactionConsistency should exist with UpsertCompactionTask
upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL);
upsertConfig.setEnableDeletedKeysCompactionConsistency(true);
upsertConfig.setDeletedKeysTTL(100);
upsertConfig.setEnableSnapshot(true);
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setStreamConfigs(streamConfigs)
.setUpsertConfig(upsertConfig).setRoutingConfig(
new RoutingConfig(null, null, RoutingConfig.STRICT_REPLICA_GROUP_INSTANCE_SELECTOR_TYPE, false))
.build();
try {
TableConfigUtils.validateUpsertAndDedupConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.assertEquals(e.getMessage(),
"enableDeletedKeysCompactionConsistency should exist with UpsertCompactionTask for upsert table");
}
}
|
@Override
public String loadSecretKey() {
try {
SecreteKey secreteKey = mSecretKeyManager.loadSecretKey();
SAEncryptListener mEncryptListener = mSecretKeyManager.getEncryptListener(secreteKey);
if (mEncryptListener == null) {
return "";
}
return secreteKey.toString();
} catch (JSONException e) {
SALog.printStackTrace(e);
}
return null;
}
|
@Test
public void loadSecretKey() {
SAHelper.initSensors(mApplication);
SAEncryptAPIImpl encryptAPIImpl = new SAEncryptAPIImpl(SensorsDataAPI.sharedInstance(mApplication).getSAContextManager());
encryptAPIImpl.loadSecretKey();
}
|
@Override
public RestLiResponseData<BatchCreateResponseEnvelope> buildRestLiResponseData(Request request,
RoutingResult routingResult,
Object result,
Map<String, String> headers,
List<HttpCookie> cookies)
{
Object altKey = null;
if (routingResult.getContext().hasParameter(RestConstants.ALT_KEY_PARAM))
{
altKey = routingResult.getContext().getParameter(RestConstants.ALT_KEY_PARAM);
}
final ProtocolVersion protocolVersion = ProtocolVersionUtil.extractProtocolVersion(headers);
final ResourceContext resourceContext = routingResult.getContext();
if (result instanceof BatchCreateKVResult && resourceContext.isReturnEntityRequested())
{
BatchCreateKVResult<?, ?> list = (BatchCreateKVResult<?, ?>) result;
if (list.getResults() == null)
{
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Unexpected null encountered. Null List inside of a BatchCreateKVResult returned by the resource method: " + routingResult
.getResourceMethod());
}
List<BatchCreateResponseEnvelope.CollectionCreateResponseItem> collectionCreateList = new ArrayList<>(list.getResults().size());
TimingContextUtil.beginTiming(routingResult.getContext().getRawRequestContext(),
FrameworkTimingKeys.SERVER_RESPONSE_RESTLI_PROJECTION_APPLY.key());
for (CreateKVResponse<?, ?> createKVResponse : list.getResults())
{
if (createKVResponse == null)
{
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Unexpected null encountered. Null element inside of List inside of a BatchCreateKVResult returned by the resource method: "
+ routingResult.getResourceMethod());
}
else
{
Object id = ResponseUtils.translateCanonicalKeyToAlternativeKeyIfNeeded(createKVResponse.getId(), routingResult);
if (createKVResponse.getError() == null)
{
DataMap entityData = createKVResponse.getEntity() != null ? createKVResponse.getEntity().data() : null;
final DataMap data = RestUtils.projectFields(entityData, resourceContext);
CreateIdEntityStatus<Object, RecordTemplate> entry = new CreateIdEntityStatus<>(
createKVResponse.getStatus().getCode(),
id,
new AnyRecord(data),
getLocationUri(request, id, altKey, protocolVersion), // location uri
null,
protocolVersion);
collectionCreateList.add(new BatchCreateResponseEnvelope.CollectionCreateResponseItem(entry));
}
else
{
collectionCreateList.add(new BatchCreateResponseEnvelope.CollectionCreateResponseItem(createKVResponse.getError()));
}
}
}
TimingContextUtil.endTiming(routingResult.getContext().getRawRequestContext(),
FrameworkTimingKeys.SERVER_RESPONSE_RESTLI_PROJECTION_APPLY.key());
return new RestLiResponseDataImpl<>(new BatchCreateResponseEnvelope(HttpStatus.S_200_OK, collectionCreateList, true), headers, cookies);
}
else
{
List<? extends CreateResponse> createResponses = extractCreateResponseList(result);
//Verify that a null list was not passed into the BatchCreateResult. If so, this is a developer error.
if (createResponses == null)
{
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Unexpected null encountered. Null List inside of a BatchCreateResult returned by the resource method: " + routingResult
.getResourceMethod());
}
List<BatchCreateResponseEnvelope.CollectionCreateResponseItem> collectionCreateList = new ArrayList<>(createResponses.size());
for (CreateResponse createResponse : createResponses)
{
//Verify that a null element was not passed into the BatchCreateResult list. If so, this is a developer error.
if (createResponse == null)
{
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Unexpected null encountered. Null element inside of List inside of a BatchCreateResult returned by the resource method: "
+ routingResult.getResourceMethod());
}
else
{
Object id = ResponseUtils.translateCanonicalKeyToAlternativeKeyIfNeeded(createResponse.getId(), routingResult);
if (createResponse.getError() == null)
{
CreateIdStatus<Object> entry = new CreateIdStatus<>(
createResponse.getStatus().getCode(),
id,
getLocationUri(request, id, altKey, protocolVersion), // location uri
null,
protocolVersion);
collectionCreateList.add(new BatchCreateResponseEnvelope.CollectionCreateResponseItem(entry));
}
else
{
collectionCreateList.add(new BatchCreateResponseEnvelope.CollectionCreateResponseItem(createResponse.getError()));
}
}
}
return new RestLiResponseDataImpl<>(new BatchCreateResponseEnvelope(HttpStatus.S_200_OK, collectionCreateList, false), headers, cookies);
}
}
|
@Test(dataProvider = "exceptionTestData")
public void testBuilderExceptions(Object result, String expectedErrorMessage) throws URISyntaxException
{
Map<String, String> headers = ResponseBuilderUtil.getHeaders();
ResourceMethodDescriptor mockDescriptor = getMockResourceMethodDescriptor(null);
ServerResourceContext mockContext = getMockResourceContext(null);
RoutingResult routingResult = new RoutingResult(mockContext, mockDescriptor);
BatchCreateResponseBuilder responseBuilder = new BatchCreateResponseBuilder(null);
RestRequest request = new RestRequestBuilder(new URI("/foo")).build();
try
{
responseBuilder.buildRestLiResponseData(request, routingResult, result, headers, Collections.emptyList());
Assert.fail("buildRestLiResponseData should have thrown an exception because of null elements");
}
catch (RestLiServiceException e)
{
Assert.assertTrue(e.getMessage().contains(expectedErrorMessage));
}
}
|
@Nullable
DockerCredentialHelper getCredentialHelperFor(String registry) {
List<Predicate<String>> registryMatchers = getRegistryMatchersFor(registry);
Map.Entry<String, String> firstCredHelperMatch =
findFirstInMapByKey(dockerConfigTemplate.getCredHelpers(), registryMatchers);
if (firstCredHelperMatch != null) {
return new DockerCredentialHelper(
firstCredHelperMatch.getKey(),
Paths.get("docker-credential-" + firstCredHelperMatch.getValue()));
}
if (dockerConfigTemplate.getCredsStore() != null) {
return new DockerCredentialHelper(
registry, Paths.get("docker-credential-" + dockerConfigTemplate.getCredsStore()));
}
return null;
}
|
@Test
public void testGetCredentialHelperFor_withHttps() throws URISyntaxException, IOException {
Path json = Paths.get(Resources.getResource("core/json/dockerconfig.json").toURI());
DockerConfig dockerConfig =
new DockerConfig(JsonTemplateMapper.readJsonFromFile(json, DockerConfigTemplate.class));
Assert.assertEquals(
Paths.get("docker-credential-credHelper for https__with.protocol.in.helpers"),
dockerConfig.getCredentialHelperFor("with.protocol.in.helpers").getCredentialHelper());
}
|
public static Http2Headers toHttp2Headers(HttpMessage in, boolean validateHeaders) {
HttpHeaders inHeaders = in.headers();
final Http2Headers out = new DefaultHttp2Headers(validateHeaders, inHeaders.size());
if (in instanceof HttpRequest) {
HttpRequest request = (HttpRequest) in;
String host = inHeaders.getAsString(HttpHeaderNames.HOST);
if (isOriginForm(request.uri()) || isAsteriskForm(request.uri())) {
out.path(new AsciiString(request.uri()));
setHttp2Scheme(inHeaders, out);
} else {
URI requestTargetUri = URI.create(request.uri());
out.path(toHttp2Path(requestTargetUri));
// Take from the request-line if HOST header was empty
host = isNullOrEmpty(host) ? requestTargetUri.getAuthority() : host;
setHttp2Scheme(inHeaders, requestTargetUri, out);
}
setHttp2Authority(host, out);
out.method(request.method().asciiName());
} else if (in instanceof HttpResponse) {
HttpResponse response = (HttpResponse) in;
out.status(response.status().codeAsText());
}
// Add the HTTP headers which have not been consumed above
toHttp2Headers(inHeaders, out);
return out;
}
|
@Test
public void handlesRequestWithDoubleSlashPath() throws Exception {
boolean validateHeaders = true;
HttpRequest msg = new DefaultHttpRequest(
HttpVersion.HTTP_1_1, HttpMethod.GET, "//path/to/something", validateHeaders);
HttpHeaders inHeaders = msg.headers();
inHeaders.add(CONNECTION, "foo, bar");
inHeaders.add(HOST, "example.com");
inHeaders.add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http");
inHeaders.add("hello", "world");
Http2Headers out = HttpConversionUtil.toHttp2Headers(msg, validateHeaders);
assertEquals(new AsciiString("//path/to/something"), out.path());
assertEquals(new AsciiString("http"), out.scheme());
assertEquals(new AsciiString("example.com"), out.authority());
assertEquals(HttpMethod.GET.asciiName(), out.method());
}
|
@VisibleForTesting
static Optional<String> checkSchemas(
final LogicalSchema schema,
final LogicalSchema other
) {
final Optional<String> keyError = checkSchemas(schema.key(), other.key(), "key ")
.map(msg -> "Key columns must be identical. " + msg);
if (keyError.isPresent()) {
return keyError;
}
return checkSchemas(schema.columns(), other.columns(), "");
}
|
@Test
public void shouldEnforceNoRemovedKeyColumns() {
// Given:
final LogicalSchema someSchema = LogicalSchema.builder()
.keyColumn(ColumnName.of("k0"), SqlTypes.INTEGER)
.keyColumn(ColumnName.of("k1"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of("f0"), SqlTypes.BIGINT)
.build();
final LogicalSchema otherSchema = LogicalSchema.builder()
.keyColumn(ColumnName.of("k0"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of("f0"), SqlTypes.BIGINT)
.build();
// When:
final Optional<String> s = StructuredDataSource.checkSchemas(someSchema, otherSchema);
// Then:
assertThat(s.isPresent(), is(true));
assertThat(s.get(), containsString("The following key columns are changed, missing or reordered: [`k1` INTEGER KEY]"));
}
|
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
}
|
@Test
void testParseWithUuidGeneratorArgument() {
RuntimeOptionsBuilder optionsBuilder = parser.parse("--uuid-generator",
IncrementingUuidGenerator.class.getName());
assertNotNull(optionsBuilder);
RuntimeOptions options = optionsBuilder.build();
assertNotNull(options);
assertThat(options.getUuidGeneratorClass(), Is.is(equalTo(IncrementingUuidGenerator.class)));
}
|
@SuppressWarnings("unchecked")
public final void isLessThan(@Nullable T other) {
if (checkNotNull((Comparable<Object>) actual).compareTo(checkNotNull(other)) >= 0) {
failWithActual("expected to be less than", other);
}
}
|
@Test
public void rawComparableType() {
assertThat(new RawComparableType(3)).isLessThan(new RawComparableType(4));
}
|
public Optional<Path> transactionIndex() {
return transactionIndex;
}
|
@Test
public void testOptionalTransactionIndex() {
File dir = TestUtils.tempDirectory();
LogSegmentData logSegmentDataWithTransactionIndex = new LogSegmentData(
new File(dir, "log-segment").toPath(),
new File(dir, "offset-index").toPath(),
new File(dir, "time-index").toPath(),
Optional.of(new File(dir, "transaction-index").toPath()),
new File(dir, "producer-snapshot").toPath(),
ByteBuffer.allocate(1)
);
Assertions.assertTrue(logSegmentDataWithTransactionIndex.transactionIndex().isPresent());
LogSegmentData logSegmentDataWithNoTransactionIndex = new LogSegmentData(
new File(dir, "log-segment").toPath(),
new File(dir, "offset-index").toPath(),
new File(dir, "time-index").toPath(),
Optional.empty(),
new File(dir, "producer-snapshot").toPath(),
ByteBuffer.allocate(1)
);
assertFalse(logSegmentDataWithNoTransactionIndex.transactionIndex().isPresent());
}
|
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(
MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
}
|
@Test
void testFunctionDependingOnInputWithFunctionHierarchy2() {
IdentityMapper5<String> function = new IdentityMapper5<String>();
@SuppressWarnings({"rawtypes", "unchecked"})
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(
function,
new TupleTypeInfo(
BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO));
assertThat(ti.isTupleType()).isTrue();
TupleTypeInfo<?> tti = (TupleTypeInfo<?>) ti;
assertThat(tti.getTypeAt(0)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
assertThat(tti.getTypeAt(1)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
}
|
public static String generateFileName(String string) {
string = StringUtils.stripAccents(string);
StringBuilder buf = new StringBuilder();
for (int i = 0; i < string.length(); i++) {
char c = string.charAt(i);
if (Character.isSpaceChar(c)
&& (buf.length() == 0 || Character.isSpaceChar(buf.charAt(buf.length() - 1)))) {
continue;
}
if (ArrayUtils.contains(validChars, c)) {
buf.append(c);
}
}
String filename = buf.toString().trim();
if (TextUtils.isEmpty(filename)) {
return randomString(8);
} else if (filename.length() >= MAX_FILENAME_LENGTH) {
return filename.substring(0, MAX_FILENAME_LENGTH - MD5_HEX_LENGTH - 1) + "_" + md5(filename);
} else {
return filename;
}
}
|
@Test
public void testInvalidInput() {
String result = FileNameGenerator.generateFileName("???");
assertFalse(TextUtils.isEmpty(result));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.