focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
String getProviderURL(LdapName baseDN) throws NamingException
{
StringBuffer ldapURL = new StringBuffer();
try
{
for ( String host : hosts )
{
// Create a correctly-encoded ldap URL for the PROVIDER_URL
final URI uri = new URI(sslEnabled ? "ldaps" : "ldap", null, host, port, "/" + baseDN.toString(), null, null);
ldapURL.append(uri.toASCIIString());
ldapURL.append(" ");
}
return ldapURL.toString().trim();
}
catch ( Exception e )
{
Log.error( "Unable to generate provider URL for baseDN: '{}'.", baseDN, e );
throw new NamingException( "Unable to generate provider URL for baseDN: '"+baseDN+"': " + e.getMessage() );
}
}
|
@Test
public void testGetProviderURLWithSpaces() throws Exception
{
// Setup fixture.
final Map<String, String> properties = new HashMap<>();
properties.put("ldap.host", "localhost");
properties.put("ldap.port", "389");
properties.put("ldap.sslEnabled", "false");
final LdapManager manager = new LdapManager( properties );
final LdapName name = new LdapName("ou=people,dc=example with spaces,dc=org");
// Execute system under test.
final String result = manager.getProviderURL( name );
// Verify result.
assertEquals("ldap://localhost:389/ou=people,dc=example%20with%20spaces,dc=org", result);
}
|
@Override
public void convertWeightsForChildQueues(FSQueue queue,
CapacitySchedulerConfiguration csConfig) {
List<FSQueue> children = queue.getChildQueues();
if (queue instanceof FSParentQueue || !children.isEmpty()) {
QueuePath queuePath = new QueuePath(queue.getName());
if (queue.getName().equals(ROOT_QUEUE)) {
csConfig.setNonLabeledQueueWeight(queuePath, queue.getWeight());
}
children.forEach(fsQueue -> csConfig.setNonLabeledQueueWeight(
new QueuePath(fsQueue.getName()), fsQueue.getWeight()));
csConfig.setAutoQueueCreationV2Enabled(queuePath, true);
}
}
|
@Test
public void testNoChildQueueConversion() {
FSQueue root = createFSQueues();
converter.convertWeightsForChildQueues(root, csConfig);
assertEquals("root weight", 1.0f,
csConfig.getNonLabeledQueueWeight(ROOT), 0.0f);
assertEquals("Converted items", 21,
csConfig.getPropsWithPrefix(PREFIX).size());
}
|
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
}
|
@Test
public void testShowTablePattern() throws AnalysisException, DdlException {
ShowTableStmt stmt = new ShowTableStmt("testDb", false, "empty%");
ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertFalse(resultSet.next());
}
|
public Set<MessageQueueAssignment> queryAssignment(final String topic, final String consumerGroup,
final String strategyName, final MessageModel messageModel, int timeout)
throws RemotingException, InterruptedException, MQBrokerException {
String brokerAddr = this.findBrokerAddrByTopic(topic);
if (null == brokerAddr) {
this.updateTopicRouteInfoFromNameServer(topic);
brokerAddr = this.findBrokerAddrByTopic(topic);
}
if (null != brokerAddr) {
return this.mQClientAPIImpl.queryAssignment(brokerAddr, topic, consumerGroup, clientId, strategyName,
messageModel, timeout);
}
return null;
}
|
@Test
public void testQueryAssignment() throws MQBrokerException, RemotingException, InterruptedException {
topicRouteTable.put(topic, createTopicRouteData());
brokerAddrTable.put(defaultBroker, createBrokerAddrMap());
consumerTable.put(group, createMQConsumerInner());
Set<MessageQueueAssignment> actual = mqClientInstance.queryAssignment(topic, group, "", MessageModel.CLUSTERING, 1000);
assertNotNull(actual);
assertEquals(0, actual.size());
}
|
@GetMapping(value = "/{id}")
public Mono<Post> get(@PathVariable(value = "id") Long id) {
return this.posts.findById(id);
}
|
@Test
public void getPostById() throws Exception {
this.rest
.get()
.uri("/posts/1")
.accept(APPLICATION_JSON)
.exchange()
.expectBody()
.jsonPath("$.title")
.isEqualTo("post one");
this.rest
.get()
.uri("/posts/2")
.accept(APPLICATION_JSON)
.exchange()
.expectBody()
.jsonPath("$.title")
.isEqualTo("post two");
}
|
protected boolean isNodeEmpty(JsonNode json) {
if (json.isArray()) {
return isListEmpty((ArrayNode) json);
} else if (json.isObject()) {
return isObjectEmpty((ObjectNode) json);
} else {
return isEmptyText(json);
}
}
|
@Test
public void isNodeEmpty_objectNode() {
ObjectNode objectNode = new ObjectNode(factory);
assertThat(expressionEvaluator.isNodeEmpty(objectNode)).isTrue();
}
|
public static Write ingestMessages(String hl7v2Store) {
return write(hl7v2Store).setWriteMethod(Write.WriteMethod.INGEST).build();
}
|
@Test
public void testHL7v2IOFailedWrites() {
Message msg = new Message().setData("");
List<HL7v2Message> emptyMessages = Collections.singletonList(HL7v2Message.fromModel(msg));
PCollection<HL7v2Message> messages =
pipeline.apply(Create.of(emptyMessages).withCoder(new HL7v2MessageCoder()));
HL7v2IO.Write.Result writeResult =
messages.apply(
HL7v2IO.ingestMessages(
"projects/foo/locations/us-central1/datasets/bar/hl7V2Stores/baz"));
PCollection<HealthcareIOError<HL7v2Message>> failedInserts =
writeResult.getFailedInsertsWithErr();
PCollection<Long> failedMsgs = failedInserts.apply(Count.globally());
PAssert.thatSingleton(failedMsgs).isEqualTo(1L);
pipeline.run();
}
|
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 2) {
onInvalidDataReceived(device, data);
return;
}
// Read the Op Code
final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0);
// Estimate the expected operand size based on the Op Code
int expectedOperandSize;
switch (opCode) {
case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE ->
// UINT8
expectedOperandSize = 1;
case OP_CODE_CALIBRATION_VALUE_RESPONSE ->
// Calibration Value
expectedOperandSize = 10;
case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE,
OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE,
OP_CODE_HYPO_ALERT_LEVEL_RESPONSE,
OP_CODE_HYPER_ALERT_LEVEL_RESPONSE,
OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE,
OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE ->
// SFLOAT
expectedOperandSize = 2;
case OP_CODE_RESPONSE_CODE ->
// Request Op Code (UINT8), Response Code Value (UINT8)
expectedOperandSize = 2;
default -> {
onInvalidDataReceived(device, data);
return;
}
}
// Verify packet length
if (data.size() != 1 + expectedOperandSize && data.size() != 1 + expectedOperandSize + 2) {
onInvalidDataReceived(device, data);
return;
}
// Verify CRC if present
final boolean crcPresent = data.size() == 1 + expectedOperandSize + 2; // opCode + expected operand + CRC
if (crcPresent) {
final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 1 + expectedOperandSize);
final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 1 + expectedOperandSize);
if (expectedCrc != actualCrc) {
onCGMSpecificOpsResponseReceivedWithCrcError(device, data);
return;
}
}
switch (opCode) {
case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE -> {
final int interval = data.getIntValue(Data.FORMAT_UINT8, 1);
onContinuousGlucoseCommunicationIntervalReceived(device, interval, crcPresent);
return;
}
case OP_CODE_CALIBRATION_VALUE_RESPONSE -> {
final float glucoseConcentrationOfCalibration = data.getFloatValue(Data.FORMAT_SFLOAT, 1);
final int calibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 3);
final int calibrationTypeAndSampleLocation = data.getIntValue(Data.FORMAT_UINT8, 5);
@SuppressLint("WrongConstant") final int calibrationType = calibrationTypeAndSampleLocation & 0x0F;
final int calibrationSampleLocation = calibrationTypeAndSampleLocation >> 4;
final int nextCalibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 6);
final int calibrationDataRecordNumber = data.getIntValue(Data.FORMAT_UINT16_LE, 8);
final int calibrationStatus = data.getIntValue(Data.FORMAT_UINT8, 10);
onContinuousGlucoseCalibrationValueReceived(device, glucoseConcentrationOfCalibration,
calibrationTime, nextCalibrationTime, calibrationType, calibrationSampleLocation,
calibrationDataRecordNumber, new CGMCalibrationStatus(calibrationStatus), crcPresent);
return;
}
case OP_CODE_RESPONSE_CODE -> {
final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 1); // ignore
final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 2);
if (responseCode == CGM_RESPONSE_SUCCESS) {
onCGMSpecificOpsOperationCompleted(device, requestCode, crcPresent);
} else {
onCGMSpecificOpsOperationError(device, requestCode, responseCode, crcPresent);
}
return;
}
}
// Read SFLOAT value
final float value = data.getFloatValue(Data.FORMAT_SFLOAT, 1);
switch (opCode) {
case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE ->
onContinuousGlucosePatientHighAlertReceived(device, value, crcPresent);
case OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE ->
onContinuousGlucosePatientLowAlertReceived(device, value, crcPresent);
case OP_CODE_HYPO_ALERT_LEVEL_RESPONSE ->
onContinuousGlucoseHypoAlertReceived(device, value, crcPresent);
case OP_CODE_HYPER_ALERT_LEVEL_RESPONSE ->
onContinuousGlucoseHyperAlertReceived(device, value, crcPresent);
case OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE ->
onContinuousGlucoseRateOfDecreaseAlertReceived(device, value, crcPresent);
case OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE ->
onContinuousGlucoseRateOfIncreaseAlertReceived(device, value, crcPresent);
}
}
|
@Test
public void onContinuousGlucoseCalibrationValueReceived() {
final MutableData data = new MutableData(new byte[11]);
data.setValue(6, Data.FORMAT_UINT8, 0);
data.setValue(1, 2, Data.FORMAT_SFLOAT, 1);
data.setValue(10, Data.FORMAT_UINT16_LE, 3);
data.setValue(0x32, Data.FORMAT_UINT8, 5);
data.setValue(20, Data.FORMAT_UINT16_LE, 6);
data.setValue(1, Data.FORMAT_UINT16_LE, 8);
data.setValue(0b100, Data.FORMAT_UINT8, 10);
callback.onDataReceived(null, data);
assertTrue(valueReceived);
assertFalse(secured);
}
|
@Override
public ProcResult fetchResult() throws AnalysisException {
Preconditions.checkNotNull(globalStateMgr);
BaseProcResult result = new BaseProcResult();
result.setNames(TITLE_NAMES);
List<String> dbNames = globalStateMgr.getLocalMetastore().listDbNames();
if (dbNames == null || dbNames.isEmpty()) {
// empty
return result;
}
// get info
List<List<Comparable>> dbInfos = new ArrayList<List<Comparable>>();
for (String dbName : dbNames) {
Database db = globalStateMgr.getDb(dbName);
if (db == null) {
continue;
}
List<Comparable> dbInfo = new ArrayList<Comparable>();
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
int tableNum = db.getTables().size();
dbInfo.add(db.getId());
dbInfo.add(dbName);
dbInfo.add(tableNum);
long dataQuota = db.getDataQuota();
Pair<Double, String> quotaUnitPair = DebugUtil.getByteUint(dataQuota);
String readableQuota = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(quotaUnitPair.first) + " "
+ quotaUnitPair.second;
dbInfo.add(readableQuota);
dbInfo.add(TimeUtils.longToTimeString(db.getLastCheckTime()));
long replicaQuota = db.getReplicaQuota();
dbInfo.add(replicaQuota);
} finally {
locker.unLockDatabase(db, LockType.READ);
}
dbInfos.add(dbInfo);
}
// order by dbId, asc
ListComparator<List<Comparable>> comparator = new ListComparator<List<Comparable>>(0);
Collections.sort(dbInfos, comparator);
// set result
for (List<Comparable> info : dbInfos) {
List<String> row = new ArrayList<String>(info.size());
for (Comparable comparable : info) {
row.add(comparable.toString());
}
result.addRow(row);
}
return result;
}
|
@Test
public void testFetchResultInvalid() throws AnalysisException {
new Expectations(globalStateMgr) {
{
globalStateMgr.getLocalMetastore().listDbNames();
minTimes = 0;
result = null;
}
};
DbsProcDir dir;
ProcResult result;
dir = new DbsProcDir(null);
try {
result = dir.fetchResult();
} catch (NullPointerException e) {
e.printStackTrace();
}
dir = new DbsProcDir(globalStateMgr);
result = dir.fetchResult();
Assert.assertEquals(
Lists.newArrayList("DbId", "DbName", "TableNum", "Quota", "LastConsistencyCheckTime", "ReplicaQuota"),
result.getColumnNames());
List<List<String>> rows = Lists.newArrayList();
Assert.assertEquals(rows, result.getRows());
}
|
@SuppressWarnings({
"nullness" // TODO(https://github.com/apache/beam/issues/20497)
})
public static Row structToBeamRow(Struct struct, Schema schema) {
Map<String, @Nullable Object> structValues =
schema.getFields().stream()
.collect(
HashMap::new,
(map, field) -> map.put(field.getName(), getStructValue(struct, field)),
Map::putAll);
return Row.withSchema(schema).withFieldValues(structValues).build();
}
|
@Test
public void testStructToBeamRowFailsColumnsDontMatch() {
Schema schema = Schema.builder().addInt64Field("f_int64").build();
Struct struct = Struct.newBuilder().set("f_different_field").to(5L).build();
Exception exception =
assertThrows(
IllegalArgumentException.class, () -> StructUtils.structToBeamRow(struct, schema));
checkMessage("Field not found: f_int64", exception.getMessage());
}
|
public int[] findMatchingLines(List<String> left, List<String> right) {
int[] index = new int[right.size()];
int dbLine = left.size();
int reportLine = right.size();
try {
PathNode node = new MyersDiff<String>().buildPath(left, right);
while (node.prev != null) {
PathNode prevNode = node.prev;
if (!node.isSnake()) {
// additions
reportLine -= (node.j - prevNode.j);
// removals
dbLine -= (node.i - prevNode.i);
} else {
// matches
for (int i = node.i; i > prevNode.i; i--) {
index[reportLine - 1] = dbLine;
reportLine--;
dbLine--;
}
}
node = prevNode;
}
} catch (DifferentiationFailedException e) {
LOG.error("Error finding matching lines", e);
return index;
}
return index;
}
|
@Test
public void shouldDetectWhenEndingWithModifiedLines() {
List<String> database = new ArrayList<>();
database.add("line - 0");
database.add("line - 1");
database.add("line - 2");
database.add("line - 3");
List<String> report = new ArrayList<>();
report.add("line - 0");
report.add("line - 1");
report.add("line - 2 - modified");
report.add("line - 3 - modified");
int[] diff = new SourceLinesDiffFinder().findMatchingLines(database, report);
assertThat(diff).containsExactly(1, 2, 0, 0);
}
|
public Command create(
final ConfiguredStatement<? extends Statement> statement,
final KsqlExecutionContext context) {
return create(statement, context.getServiceContext(), context);
}
|
@Test
public void shouldValidateTerminateAllQuery() {
// Given:
givenTerminateAll();
// When:
commandFactory.create(configuredStatement, executionContext);
// Then:
verify(query1).close();
verify(query2).close();
}
|
@Override
public boolean match(Message msg, StreamRule rule) {
Double msgVal = getDouble(msg.getField(rule.getField()));
if (msgVal == null) {
return false;
}
Double ruleVal = getDouble(rule.getValue());
if (ruleVal == null) {
return false;
}
return rule.getInverted() ^ (msgVal > ruleVal);
}
|
@Test
public void testMissedInvertedMatch() {
StreamRule rule = getSampleRule();
rule.setValue("25");
rule.setInverted(true);
Message msg = getSampleMessage();
msg.addField("something", "30");
StreamRuleMatcher matcher = getMatcher(rule);
assertFalse(matcher.match(msg, rule));
}
|
ControllerResult<Map<String, ApiError>> updateFeatures(
Map<String, Short> updates,
Map<String, FeatureUpdate.UpgradeType> upgradeTypes,
boolean validateOnly
) {
TreeMap<String, ApiError> results = new TreeMap<>();
List<ApiMessageAndVersion> records =
BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP);
for (Entry<String, Short> entry : updates.entrySet()) {
results.put(entry.getKey(), updateFeature(entry.getKey(), entry.getValue(),
upgradeTypes.getOrDefault(entry.getKey(), FeatureUpdate.UpgradeType.UPGRADE), records));
}
if (validateOnly) {
return ControllerResult.of(Collections.emptyList(), results);
} else {
return ControllerResult.atomicOf(records, results);
}
}
|
@Test
public void testUnsafeDowngradeIsTemporarilyDisabled() {
FeatureControlManager manager = TEST_MANAGER_BUILDER1.build();
assertEquals(ControllerResult.of(Collections.emptyList(),
singletonMap(MetadataVersion.FEATURE_NAME, new ApiError(Errors.INVALID_UPDATE_VERSION,
"Invalid metadata.version 4. Unsafe metadata downgrade is not supported in this version."))),
manager.updateFeatures(
singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV0.featureLevel()),
singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE),
true));
}
|
public static AuditManagerS3A stubAuditManager() {
return new NoopAuditManagerS3A();
}
|
@Test
public void testNoopAuditManager() throws Throwable {
AuditManagerS3A manager = AuditIntegration.stubAuditManager();
assertThat(manager.createTransferListener())
.describedAs("transfer listener")
.isNotNull();
}
|
public void acquireWriteLock(String key) {
getLock(key).writeLock().lock();
}
|
@Test
public void shouldEnforceMutualExclusionOfWriteLockForGivenName() throws InterruptedException {
readWriteLock.acquireWriteLock("foo");
new Thread(() -> {
readWriteLock.acquireWriteLock("foo");
numberOfLocks++;
}).start();
Thread.sleep(1000);
assertThat(numberOfLocks, is(0));
}
|
public final T apply(Schema left, Schema right) {
return visit(this, Context.EMPTY, FieldType.row(left), FieldType.row(right));
}
|
@Test
public void testListCommonFields() {
assertThat(
new ListCommonFields().apply(LEFT, RIGHT),
containsInAnyOrder("f0", "f1", "f2", "f3", "f3.f0", "f3.f1"));
}
|
Object getCellValue(Cell cell, Schema.FieldType type) {
ByteString cellValue = cell.getValue();
int valueSize = cellValue.size();
switch (type.getTypeName()) {
case BOOLEAN:
checkArgument(valueSize == 1, message("Boolean", 1));
return cellValue.toByteArray()[0] != 0;
case BYTE:
checkArgument(valueSize == 1, message("Byte", 1));
return cellValue.toByteArray()[0];
case INT16:
checkArgument(valueSize == 2, message("Int16", 2));
return Shorts.fromByteArray(cellValue.toByteArray());
case INT32:
checkArgument(valueSize == 4, message("Int32", 4));
return Ints.fromByteArray(cellValue.toByteArray());
case INT64:
checkArgument(valueSize == 8, message("Int64", 8));
return Longs.fromByteArray(cellValue.toByteArray());
case FLOAT:
checkArgument(valueSize == 4, message("Float", 4));
return Float.intBitsToFloat(Ints.fromByteArray(cellValue.toByteArray()));
case DOUBLE:
checkArgument(valueSize == 8, message("Double", 8));
return Double.longBitsToDouble(Longs.fromByteArray(cellValue.toByteArray()));
case DATETIME:
return DateTime.parse(cellValue.toStringUtf8());
case STRING:
return cellValue.toStringUtf8();
case BYTES:
return cellValue.toByteArray();
case LOGICAL_TYPE:
String identifier = checkArgumentNotNull(type.getLogicalType()).getIdentifier();
throw new IllegalStateException("Unsupported logical type: " + identifier);
default:
throw new IllegalArgumentException(
String.format("Unsupported cell value type '%s'.", type.getTypeName()));
}
}
|
@Test
public void shouldParseByteType() {
byte[] value = new byte[] {2};
assertEquals((byte) 2, PARSER.getCellValue(cell(value), BYTE));
}
|
@Override
public void cleanHistoryConfig() {
Timestamp startTime = getBeforeStamp(TimeUtils.getCurrentTime(), 24 * getRetentionDays());
int pageSize = 1000;
LOGGER.warn("clearConfigHistory, getBeforeStamp:{}, pageSize:{}", startTime, pageSize);
getHistoryConfigInfoPersistService().removeConfigHistory(startTime, pageSize);
}
|
@Test
public void testCleanHistoryConfig() throws Exception {
defaultHistoryConfigCleaner.cleanHistoryConfig();
Mockito.verify(historyConfigInfoPersistService, Mockito.times(1))
.removeConfigHistory(any(Timestamp.class), anyInt());
}
|
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
}
|
@Test
public void testExpire() throws InterruptedException {
RMapCacheNative<String, String> cache = redisson.getMapCacheNative("simple");
cache.put("0", "8", Duration.ofSeconds(1));
cache.expire(Duration.ofMillis(100));
Thread.sleep(500);
Assertions.assertEquals(0, cache.size());
cache.destroy();
}
|
public static Comparator<StructLike> forType(Types.StructType struct) {
return new StructLikeComparator(struct);
}
|
@Test
public void testUuid() {
assertComparesCorrectly(
Comparators.forType(Types.UUIDType.get()),
UUID.fromString("81873e7d-1374-4493-8e1d-9095eff7046c"),
UUID.fromString("fd02441d-1423-4a3f-8785-c7dd5647e26b"));
}
|
public static Date getDate(Object date) {
return getDate(date, Calendar.getInstance().getTime());
}
|
@Test
@SuppressWarnings({ "UndefinedEquals", "JavaUtilDate" })
public void testGetDateObjectDateWithTimeAndNullDefault() {
Date time = new Date();
assertEquals(time, Converter.getDate(time, null));
}
|
@Override
protected void doProcess(Exchange exchange, MetricsEndpoint endpoint, MetricRegistry registry, String metricsName)
throws Exception {
Message in = exchange.getIn();
Meter meter = registry.meter(metricsName);
Long mark = endpoint.getMark();
Long finalMark = getLongHeader(in, HEADER_METER_MARK, mark);
if (finalMark == null) {
meter.mark();
} else {
meter.mark(finalMark);
}
}
|
@Test
public void testProcessMarkSetOverrideByHeaderValue() throws Exception {
when(endpoint.getMark()).thenReturn(MARK);
when(in.getHeader(HEADER_METER_MARK, MARK, Long.class)).thenReturn(MARK + 101);
producer.doProcess(exchange, endpoint, registry, METRICS_NAME);
inOrder.verify(exchange, times(1)).getIn();
inOrder.verify(registry, times(1)).meter(METRICS_NAME);
inOrder.verify(endpoint, times(1)).getMark();
inOrder.verify(in, times(1)).getHeader(HEADER_METER_MARK, MARK, Long.class);
inOrder.verify(meter, times(1)).mark(MARK + 101);
inOrder.verifyNoMoreInteractions();
}
|
static JavaType constructType(Type type) {
try {
return constructTypeInner(type);
} catch (Exception e) {
throw new InvalidDataTableTypeException(type, e);
}
}
|
@Test
<T> void type_variables_are_not_allowed() {
Type typeVariable = new TypeReference<List<List<T>>>() {
}.getType();
InvalidDataTableTypeException exception = assertThrows(
InvalidDataTableTypeException.class,
() -> TypeFactory.constructType(typeVariable));
assertThat(exception.getMessage(), is("" +
"Can't create a data table type for type java.util.List<java.util.List<T>>. " +
"Type contained a type variable T. Types must explicit."));
}
|
@Override
public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
if(proxy.isSupported(source, target)) {
return proxy.copy(source, target, status, callback, listener);
}
// Copy between encrypted and unencrypted data room
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) {
// File key must be set for new upload
status.setFilekey(SDSTripleCryptEncryptorFeature.generateFileKey());
}
final Path result = copy.copy(source, target, status, callback, listener);
nodeid.cache(target, null);
return result.withAttributes(new SDSAttributesFinderFeature(session, nodeid).find(result));
}
|
@Test
public void testCopyFileWithRenameBetweenEncryptedDataRooms() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room1 = new SDSDirectoryFeature(session, nodeid).createRoom(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), true);
room1.attributes().getAcl().addAll(new Acl.EmailUser(System.getProperties().getProperty("dracoon.user")), SDSPermissionsFeature.DELETE_ROLE);
final Path room2 = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final EncryptRoomRequest encrypt = new EncryptRoomRequest().isEncrypted(true);
new NodesApi(session.getClient()).encryptRoom(encrypt, Long.parseLong(new SDSNodeIdProvider(session).getVersionId(room2)), StringUtils.EMPTY, null);
room2.attributes().withCustom(KEY_ENCRYPTED, String.valueOf(true));
final byte[] content = RandomUtils.nextBytes(32769);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final Path test = new Path(room1, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SDSEncryptionBulkFeature bulk = new SDSEncryptionBulkFeature(session, nodeid);
bulk.pre(Transfer.Type.upload, Collections.singletonMap(new TransferItem(test), status), new DisabledConnectionCallback());
final TripleCryptWriteFeature writer = new TripleCryptWriteFeature(session, nodeid, new SDSDirectS3MultipartWriteFeature(session, nodeid));
final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
final Path target = new Path(room2, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SDSCopyFeature proxy = new SDSCopyFeature(session, nodeid);
final SDSDelegatingCopyFeature feature = new SDSDelegatingCopyFeature(session, nodeid, proxy);
assertNotNull(feature.copy(test, target, new TransferStatus().withLength(content.length), new DisabledConnectionCallback() {
@Override
public void warn(final Host bookmark, final String title, final String message, final String defaultButton, final String cancelButton, final String preference) {
//
}
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
}, new DisabledStreamListener()).attributes().getVersionId());
assertFalse(proxy.isSupported(test, target));
assertTrue(feature.isSupported(test, target));
assertTrue(new SDSFindFeature(session, nodeid).find(test));
assertTrue(new SDSFindFeature(session, nodeid).find(target));
final byte[] compare = new byte[content.length];
final InputStream stream = new TripleCryptReadFeature(session, nodeid, new SDSReadFeature(session, nodeid)).read(target, new TransferStatus().withLength(content.length), new DisabledConnectionCallback() {
@Override
public void warn(final Host bookmark, final String title, final String message, final String defaultButton, final String cancelButton, final String preference) {
//
}
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(content, compare);
new SDSDeleteFeature(session, nodeid).delete(Arrays.asList(room1, room2), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static Object parse(String element) throws PathSegment.PathSegmentSyntaxException
{
Queue<Token> tokens = tokenizeElement(element);
Object result = parseElement(tokens);
if (!tokens.isEmpty())
{
throw new PathSegment.PathSegmentSyntaxException("tokens left over after parsing; first excess token: " + tokens.peek().toErrorString() );
}
return result;
}
|
@Test(dataProvider = "unicode")
public void testUnicode(String decodable, Object expectedObj) throws PathSegment.PathSegmentSyntaxException
{
Object actualObj = URIElementParser.parse(decodable);
Assert.assertEquals(actualObj, expectedObj);
}
|
public ProtocolBuilder server(String server) {
this.server = server;
return getThis();
}
|
@Test
void server() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.server("server");
Assertions.assertEquals("server", builder.build().getServer());
}
|
@NotNull @Override
public Optional<? extends Algorithm> parse(
@Nullable String str, @NotNull DetectionLocation detectionLocation) {
if (str == null) {
return Optional.empty();
}
return switch (str) {
case "DHE", "DH" -> Optional.of(new DH(KeyAgreement.class, detectionLocation));
case "SRP" -> Optional.of(new SRP(detectionLocation));
case "RSA" -> Optional.of(new RSA(KeyAgreement.class, detectionLocation));
case "ECDH", "ECDHE" -> Optional.of(new ECDH(detectionLocation));
case "ECCPWD" -> Optional.of(new ECCPWD(detectionLocation));
case "PSK" -> Optional.of(new PSK(detectionLocation));
case "KRB5" -> Optional.of(new Kerberos(5, detectionLocation));
case "GOSTR341112" -> Optional.of(new GOSTR341112(detectionLocation));
case "GOSTR341112 256" -> Optional.of(new GOSTR341112(256, detectionLocation));
default -> Optional.empty();
};
}
|
@Test
public void test() {
final DetectionLocation testDetectionLocation =
new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL");
final KeyExchangeAlgorithmMapper mapper = new KeyExchangeAlgorithmMapper();
final Collection<String> kexCollection =
JsonCipherSuites.CIPHER_SUITES.values().stream()
.map(JsonCipherSuite::getKexAlgorithm)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.toSet());
for (String kex : kexCollection) {
if (Objects.equals(kex, "NULL")) {
continue;
}
try {
assertThat(mapper.parse(kex, testDetectionLocation)).isPresent();
} catch (AssertionError e) {
System.out.println("Can't map '" + kex + "'");
throw e;
}
}
}
|
public static synchronized HostPasswordStore get() {
try {
return new PasswordStoreFactory().create();
}
catch(FactoryException e) {
return new DisabledPasswordStore();
}
}
|
@Test
public void testGet() {
assertNotSame(PasswordStoreFactory.get(), PasswordStoreFactory.get());
}
|
@PostMapping
@Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX
+ "namespaces", action = ActionTypes.WRITE, signType = SignType.CONSOLE)
public Result<Boolean> createNamespace(NamespaceForm namespaceForm) throws NacosException {
namespaceForm.validate();
String namespaceId = namespaceForm.getNamespaceId();
String namespaceName = namespaceForm.getNamespaceName();
String namespaceDesc = namespaceForm.getNamespaceDesc();
if (StringUtils.isBlank(namespaceId)) {
namespaceId = UUID.randomUUID().toString();
} else {
namespaceId = namespaceId.trim();
if (!namespaceIdCheckPattern.matcher(namespaceId).matches()) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE,
"namespaceId [" + namespaceId + "] mismatch the pattern");
}
if (namespaceId.length() > NAMESPACE_ID_MAX_LENGTH) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE,
"too long namespaceId, over " + NAMESPACE_ID_MAX_LENGTH);
}
// check unique
if (namespacePersistService.tenantInfoCountByTenantId(namespaceId) > 0) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE,
"the namespaceId is existed, namespaceId: " + namespaceForm.getNamespaceId());
}
}
// contains illegal chars
if (!namespaceNameCheckPattern.matcher(namespaceName).matches()) {
throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE,
"namespaceName [" + namespaceName + "] contains illegal char");
}
return Result.success(namespaceOperationService.createNamespace(namespaceId, namespaceName, namespaceDesc));
}
|
@Test
void testEditNamespaceWithIllegalName() {
NamespaceForm form = new NamespaceForm();
form.setNamespaceId("test-id");
form.setNamespaceDesc("testDesc");
form.setNamespaceName("test@Name");
assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form));
form.setNamespaceName("test#Name");
assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form));
form.setNamespaceName("test$Name");
assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form));
form.setNamespaceName("test%Name");
assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form));
form.setNamespaceName("test^Name");
assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form));
form.setNamespaceName("test&Name");
assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form));
form.setNamespaceName("test*Name");
assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form));
}
|
@Override
public ValidationResult responseMessageForIsPackageConfigurationValid(String responseBody) {
return jsonResultMessageHandler.toValidationResult(responseBody);
}
|
@Test
public void shouldBuildValidationResultForCheckRepositoryConfigurationValidResponse() throws Exception {
String responseBody = "[{\"key\":\"key-one\",\"message\":\"incorrect value\"},{\"message\":\"general error\"}]";
ValidationResult validationResult = messageHandler.responseMessageForIsPackageConfigurationValid(responseBody);
assertValidationError(validationResult.getErrors().get(0), "key-one", "incorrect value");
assertValidationError(validationResult.getErrors().get(1), "", "general error");
}
|
public static VirtualSlotRef read(DataInput in) throws IOException {
VirtualSlotRef virtualSlotRef = new VirtualSlotRef(null, Type.BIGINT, null, new ArrayList<>());
virtualSlotRef.readFields(in);
return virtualSlotRef;
}
|
@Test
public void read() throws IOException {
virtualSlot.write(dos);
virtualSlot.setRealSlots(slots);
VirtualSlotRef v = VirtualSlotRef.read(dis);
Assert.assertEquals(3, v.getRealSlots().size());
}
|
@Override
public <VO, VR> KStream<K, VR> outerJoin(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return outerJoin(otherStream, toValueJoinerWithKey(joiner), windows);
}
|
@Test
public void shouldNotAllowNullJoinWindowsOnOuterJoinWithStreamJoined() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.outerJoin(
testStream,
MockValueJoiner.TOSTRING_JOINER,
null,
StreamJoined.as("name")));
assertThat(exception.getMessage(), equalTo("windows can't be null"));
}
|
@Override
public void error(String code, String cause, String extendedInformation, String msg) {
if (getDisabled()) {
return;
}
try {
getLogger().error(appendContextMessageWithInstructions(code, cause, extendedInformation, msg));
} catch (Throwable t) {
// ignored.
}
}
|
@Test
void testGetLogger() {
Assertions.assertThrows(RuntimeException.class, () -> {
Logger failLogger = mock(Logger.class);
FailsafeErrorTypeAwareLogger failsafeLogger = new FailsafeErrorTypeAwareLogger(failLogger);
doThrow(new RuntimeException()).when(failLogger).error(anyString());
failsafeLogger.getLogger().error("should get error");
});
}
|
@Override
public void run() {
try { // make sure we call afterRun() even on crashes
// and operate countdown latches, else we may hang the parallel runner
if (steps == null) {
beforeRun();
}
if (skipped) {
return;
}
int count = steps.size();
int index = 0;
while ((index = nextStepIndex()) < count) {
currentStep = steps.get(index);
execute(currentStep);
if (currentStepResult != null) { // can be null if debug step-back or hook skip
result.addStepResult(currentStepResult);
}
}
} catch (Exception e) {
if (currentStepResult != null) {
result.addStepResult(currentStepResult);
}
logError("scenario [run] failed\n" + StringUtils.throwableToString(e));
currentStepResult = result.addFakeStepResult("scenario [run] failed", e);
} finally {
if (!skipped) {
afterRun();
if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) {
featureRuntime.suite.abort();
}
}
if (caller.isNone()) {
logAppender.close(); // reclaim memory
}
}
}
|
@Test
void testCallOnce() {
run(
"def uuid = function(){ return java.util.UUID.randomUUID() + '' }",
"def first = callonce uuid",
"def second = callonce uuid"
);
matchVar("first", get("second"));
}
|
@Override
public V get(K name) {
return null;
}
|
@Test
public void testGet() {
assertNull(HEADERS.get("name1"));
}
|
public static void main(String[] args) throws Exception {
main(System::getenv);
}
|
@Test
@SuppressWarnings("FutureReturnValueIgnored") // failure will cause test to timeout.
public void testLaunchFnHarnessAndTeardownCleanly() throws Exception {
Function<String, String> environmentVariableMock = mock(Function.class);
PipelineOptions options = PipelineOptionsFactory.create();
when(environmentVariableMock.apply("HARNESS_ID")).thenReturn("id");
when(environmentVariableMock.apply("PIPELINE_OPTIONS"))
.thenReturn(PipelineOptionsTranslation.toJson(options));
List<BeamFnApi.LogEntry> logEntries = new ArrayList<>();
List<BeamFnApi.InstructionResponse> instructionResponses = mock(List.class);
BeamFnLoggingGrpc.BeamFnLoggingImplBase loggingService =
new BeamFnLoggingGrpc.BeamFnLoggingImplBase() {
@Override
public StreamObserver<BeamFnApi.LogEntry.List> logging(
StreamObserver<LogControl> responseObserver) {
return TestStreams.withOnNext(
(BeamFnApi.LogEntry.List entries) ->
logEntries.addAll(entries.getLogEntriesList()))
.withOnCompleted(responseObserver::onCompleted)
.build();
}
};
BeamFnControlGrpc.BeamFnControlImplBase controlService =
new BeamFnControlGrpc.BeamFnControlImplBase() {
@Override
public StreamObserver<InstructionResponse> control(
StreamObserver<InstructionRequest> responseObserver) {
CountDownLatch waitForResponses =
new CountDownLatch(1 /* number of responses expected */);
options
.as(ExecutorOptions.class)
.getScheduledExecutorService()
.submit(
() -> {
responseObserver.onNext(INSTRUCTION_REQUEST);
Uninterruptibles.awaitUninterruptibly(waitForResponses);
responseObserver.onCompleted();
});
return TestStreams.withOnNext(
(InstructionResponse t) -> {
instructionResponses.add(t);
waitForResponses.countDown();
})
.withOnCompleted(waitForResponses::countDown)
.build();
}
};
Server loggingServer = ServerBuilder.forPort(0).addService(loggingService).build();
loggingServer.start();
try {
Server controlServer = ServerBuilder.forPort(0).addService(controlService).build();
controlServer.start();
try {
Endpoints.ApiServiceDescriptor loggingDescriptor =
Endpoints.ApiServiceDescriptor.newBuilder()
.setUrl("localhost:" + loggingServer.getPort())
.build();
Endpoints.ApiServiceDescriptor controlDescriptor =
Endpoints.ApiServiceDescriptor.newBuilder()
.setUrl("localhost:" + controlServer.getPort())
.build();
when(environmentVariableMock.apply("LOGGING_API_SERVICE_DESCRIPTOR"))
.thenReturn(TextFormat.printer().printToString(loggingDescriptor));
when(environmentVariableMock.apply("CONTROL_API_SERVICE_DESCRIPTOR"))
.thenReturn(TextFormat.printer().printToString(controlDescriptor));
FnHarness.main(environmentVariableMock);
} finally {
controlServer.shutdownNow();
}
} finally {
loggingServer.shutdownNow();
}
// Verify that we first run onStartup functions before even reading the environment, and that
// we then call beforeProcessing functions before executing instructions.
InOrder inOrder =
inOrder(onStartupMock, beforeProcessingMock, environmentVariableMock, instructionResponses);
inOrder.verify(onStartupMock).run();
inOrder.verify(environmentVariableMock, atLeastOnce()).apply(any());
inOrder.verify(beforeProcessingMock).accept(any());
inOrder.verify(instructionResponses).add(INSTRUCTION_RESPONSE);
}
|
public Map<String, String> getHeaders()
{
return _headers;
}
|
@Test
public void testHeadersCaseInsensitiveAdd()
{
final long id = 42l;
GetRequestBuilder<Long, TestRecord> builder = generateDummyRequestBuilder();
Request<TestRecord> request = builder
.id(id)
.addHeader("header", "value1")
.addHeader("HEADER", "value2")
.build();
Assert.assertEquals(request.getHeaders().get("HEADER"), "value1,value2");
}
|
@Override
public void run() {
ContainerId containerId = container.getContainerId();
String containerIdStr = containerId.toString();
LOG.info("Cleaning up container " + containerIdStr);
try {
context.getNMStateStore().storeContainerKilled(containerId);
} catch (IOException e) {
LOG.error("Unable to mark container " + containerId
+ " killed in store", e);
}
// launch flag will be set to true if process already launched,
// in process of launching, or failed to launch.
boolean alreadyLaunched = !launch.markLaunched() ||
launch.isLaunchCompleted();
if (!alreadyLaunched) {
LOG.info("Container " + containerIdStr + " not launched."
+ " No cleanup needed to be done");
return;
}
LOG.debug("Marking container {} as inactive", containerIdStr);
// this should ensure that if the container process has not launched
// by this time, it will never be launched
exec.deactivateContainer(containerId);
Path pidFilePath = launch.getPidFilePath();
LOG.debug("Getting pid for container {} to kill"
+ " from pid file {}", containerIdStr, pidFilePath != null ?
pidFilePath : "null");
// however the container process may have already started
try {
// get process id from pid file if available
// else if shell is still active, get it from the shell
String processId = launch.getContainerPid();
// kill process
String user = container.getUser();
if (processId != null) {
signalProcess(processId, user, containerIdStr);
} else {
// Normally this means that the process was notified about
// deactivateContainer above and did not start.
// Since we already set the state to RUNNING or REINITIALIZING
// we have to send a killed event to continue.
if (!launch.isLaunchCompleted()) {
LOG.warn("Container clean up before pid file created "
+ containerIdStr);
dispatcher.getEventHandler().handle(
new ContainerExitEvent(container.getContainerId(),
ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
Shell.WINDOWS ?
ContainerExecutor.ExitCode.FORCE_KILLED.getExitCode() :
ContainerExecutor.ExitCode.TERMINATED.getExitCode(),
"Container terminated before pid file created."));
// There is a possibility that the launch grabbed the file name before
// the deactivateContainer above but it was slow enough to avoid
// getContainerPid.
// Increasing YarnConfiguration.NM_PROCESS_KILL_WAIT_MS
// reduces the likelihood of this race condition and process leak.
}
}
// rm container in docker
if (DockerLinuxContainerRuntime.isDockerContainerRequested(conf,
container.getLaunchContext().getEnvironment())) {
rmDockerContainerDelayed();
}
} catch (Exception e) {
String message =
"Exception when trying to cleanup container " + containerIdStr
+ ": " + StringUtils.stringifyException(e);
LOG.warn(message);
dispatcher.getEventHandler().handle(
new ContainerDiagnosticsUpdateEvent(containerId, message));
} finally {
// cleanup pid file if present
if (pidFilePath != null) {
try {
FileContext lfs = FileContext.getLocalFSFileContext();
lfs.delete(pidFilePath, false);
lfs.delete(pidFilePath.suffix(EXIT_CODE_FILE_SUFFIX), false);
} catch (IOException ioe) {
LOG.warn("{} exception trying to delete pid file {}. Ignoring.",
containerId, pidFilePath, ioe);
}
}
}
try {
// Reap the container
launch.reapContainer();
} catch (IOException ioe) {
LOG.warn("{} exception trying to reap container. Ignoring.", containerId,
ioe);
}
}
|
@Test
public void testNoCleanupWhenContainerNotLaunched() throws IOException {
cleanup.run();
verify(launch, Mockito.times(0)).signalContainer(
Mockito.any(SignalContainerCommand.class));
}
|
@Override
public final long readLong() throws EOFException {
final long l = readLong(pos);
pos += LONG_SIZE_IN_BYTES;
return l;
}
|
@Test
public void testReadLongByteOrder() throws Exception {
long readLong = in.readLong(LITTLE_ENDIAN);
long longB = Bits.readLongL(INIT_DATA, 0);
assertEquals(longB, readLong);
}
|
public static String sanitizeUri(String uri) {
// use xxxxx as replacement as that works well with JMX also
String sanitized = uri;
if (uri != null) {
sanitized = ALL_SECRETS.matcher(sanitized).replaceAll("$1=xxxxxx");
sanitized = USERINFO_PASSWORD.matcher(sanitized).replaceFirst("$1xxxxxx$3");
}
return sanitized;
}
|
@Test
public void testSanitizeUriWithUserInfoAndColonPassword() {
String uri = "sftp://USERNAME:HARRISON:[email protected]";
String expected = "sftp://USERNAME:[email protected]";
assertEquals(expected, URISupport.sanitizeUri(uri));
}
|
public static Builder newTimestampColumnDefBuilder() {
return new Builder();
}
|
@Test
public void build_column_def_with_only_required_attributes() {
TimestampColumnDef def = newTimestampColumnDefBuilder()
.setColumnName("created_at")
.build();
assertThat(def.getName()).isEqualTo("created_at");
assertThat(def.isNullable()).isTrue();
assertThat(def.getDefaultValue()).isNull();
}
|
public static String normalize(String path) {
if (path == null) {
return null;
}
//兼容Windows下的共享目录路径(原始路径如果以\\开头,则保留这种路径)
if (path.startsWith("\\\\")) {
return path;
}
// 兼容Spring风格的ClassPath路径,去除前缀,不区分大小写
String pathToUse = StrUtil.removePrefixIgnoreCase(path, URLUtil.CLASSPATH_URL_PREFIX);
// 去除file:前缀
pathToUse = StrUtil.removePrefixIgnoreCase(pathToUse, URLUtil.FILE_URL_PREFIX);
// 识别home目录形式,并转换为绝对路径
if (StrUtil.startWith(pathToUse, '~')) {
pathToUse = getUserHomePath() + pathToUse.substring(1);
}
// 统一使用斜杠
pathToUse = pathToUse.replaceAll("[/\\\\]+", StrUtil.SLASH);
// 去除开头空白符,末尾空白符合法,不去除
pathToUse = StrUtil.trimStart(pathToUse);
// issue#IAB65V 去除尾部的换行符
pathToUse = StrUtil.trim(pathToUse, 1, (c)->c == '\n' || c == '\r');
String prefix = StrUtil.EMPTY;
int prefixIndex = pathToUse.indexOf(StrUtil.COLON);
if (prefixIndex > -1) {
// 可能Windows风格路径
prefix = pathToUse.substring(0, prefixIndex + 1);
if (StrUtil.startWith(prefix, StrUtil.C_SLASH)) {
// 去除类似于/C:这类路径开头的斜杠
prefix = prefix.substring(1);
}
if (false == prefix.contains(StrUtil.SLASH)) {
pathToUse = pathToUse.substring(prefixIndex + 1);
} else {
// 如果前缀中包含/,说明非Windows风格path
prefix = StrUtil.EMPTY;
}
}
if (pathToUse.startsWith(StrUtil.SLASH)) {
prefix += StrUtil.SLASH;
pathToUse = pathToUse.substring(1);
}
List<String> pathList = StrUtil.split(pathToUse, StrUtil.C_SLASH);
List<String> pathElements = new LinkedList<>();
int tops = 0;
String element;
for (int i = pathList.size() - 1; i >= 0; i--) {
element = pathList.get(i);
// 只处理非.的目录,即只处理非当前目录
if (false == StrUtil.DOT.equals(element)) {
if (StrUtil.DOUBLE_DOT.equals(element)) {
tops++;
} else {
if (tops > 0) {
// 有上级目录标记时按照个数依次跳过
tops--;
} else {
// Normal path element found.
pathElements.add(0, element);
}
}
}
}
// issue#1703@Github
if (tops > 0 && StrUtil.isEmpty(prefix)) {
// 只有相对路径补充开头的..,绝对路径直接忽略之
while (tops-- > 0) {
//遍历完节点发现还有上级标注(即开头有一个或多个..),补充之
// Normal path element found.
pathElements.add(0, StrUtil.DOUBLE_DOT);
}
}
return prefix + CollUtil.join(pathElements, StrUtil.SLASH);
}
|
@Test
public void normalizeBlankTest() {
assertEquals("C:/aaa ", FileUtil.normalize("C:\\aaa "));
}
|
@Override
public int hashCode() {
return Objects.hash(from, to);
}
|
@Test
public void testHashCode() {
assertThat(new LineRange(12, 15)).hasSameHashCodeAs(new LineRange(12, 15));
}
|
@Override
public void encode(DataSchema schema) throws IOException
{
// Initialize a new builder for the preferred encoding style
_builder = _encodingStyle.newBuilderInstance(_writer);
// Set and write root namespace/package
if (schema instanceof NamedDataSchema)
{
NamedDataSchema namedSchema = (NamedDataSchema) schema;
boolean hasNamespace = StringUtils.isNotBlank(namedSchema.getNamespace());
boolean hasPackage = StringUtils.isNotBlank(namedSchema.getPackage());
if (hasNamespace || hasPackage)
{
if (hasNamespace)
{
markSchemaElementStartLocation();
_builder.write("namespace")
.writeSpace()
.writeIdentifier(namedSchema.getNamespace())
.newline();
recordSchemaElementLocation(namedSchema.getNamespace());
_namespace = namedSchema.getNamespace();
}
if (hasPackage)
{
_builder.write("package")
.writeSpace()
.writeIdentifier(namedSchema.getPackage())
.newline();
_package = namedSchema.getPackage();
}
_builder.newline();
}
}
// Compute imports
if (_typeReferenceFormat != TypeReferenceFormat.DENORMALIZE)
{
_importsByLocalName = computeImports(schema, _namespace);
}
else
{
_importsByLocalName = Collections.emptyMap();
}
// Write imports sorted by fully qualified name
if (_importsByLocalName.size() > 0)
{
for (Name importName : new TreeSet<>(_importsByLocalName.values()))
{
_builder.write("import")
.writeSpace()
.writeIdentifier(importName.getFullName())
.newline();
}
_builder.newline();
}
// Write the schema
writeInlineSchema(schema);
}
|
@Test
public void testEncodeRecordWithEmptyDataMapInProperty() throws IOException
{
RecordDataSchema source =
new RecordDataSchema(new Name("com.linkedin.test.RecordDataSchema"), RecordDataSchema.RecordType.RECORD);
Map<String, Object> properties = new HashMap<>();
properties.put("empty", new DataMap());
source.setProperties(properties);
// schema to pdl
StringWriter writer = new StringWriter();
SchemaToPdlEncoder encoder = new SchemaToPdlEncoder(writer);
encoder.setTypeReferenceFormat(SchemaToPdlEncoder.TypeReferenceFormat.PRESERVE);
encoder.encode(source);
DataSchema encoded = TestUtil.dataSchemaFromPdlString(writer.toString());
assertTrue(encoded instanceof RecordDataSchema);
assertEquals(source.getProperties(), encoded.getProperties());
assertEquals(source, encoded);
}
|
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(XUGU_BOOLEAN);
builder.dataType(XUGU_BOOLEAN);
break;
case TINYINT:
builder.columnType(XUGU_TINYINT);
builder.dataType(XUGU_TINYINT);
break;
case SMALLINT:
builder.columnType(XUGU_SMALLINT);
builder.dataType(XUGU_SMALLINT);
break;
case INT:
builder.columnType(XUGU_INTEGER);
builder.dataType(XUGU_INTEGER);
break;
case BIGINT:
builder.columnType(XUGU_BIGINT);
builder.dataType(XUGU_BIGINT);
break;
case FLOAT:
builder.columnType(XUGU_FLOAT);
builder.dataType(XUGU_FLOAT);
break;
case DOUBLE:
builder.columnType(XUGU_DOUBLE);
builder.dataType(XUGU_DOUBLE);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", XUGU_NUMERIC, precision, scale));
builder.dataType(XUGU_NUMERIC);
builder.precision(precision);
builder.scale(scale);
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(XUGU_BLOB);
builder.dataType(XUGU_BLOB);
} else if (column.getColumnLength() <= MAX_BINARY_LENGTH) {
builder.columnType(XUGU_BINARY);
builder.dataType(XUGU_BINARY);
} else {
builder.columnType(XUGU_BLOB);
builder.dataType(XUGU_BLOB);
}
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(String.format("%s(%s)", XUGU_VARCHAR, MAX_VARCHAR_LENGTH));
builder.dataType(XUGU_VARCHAR);
} else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", XUGU_VARCHAR, column.getColumnLength()));
builder.dataType(XUGU_VARCHAR);
} else {
builder.columnType(XUGU_CLOB);
builder.dataType(XUGU_CLOB);
}
break;
case DATE:
builder.columnType(XUGU_DATE);
builder.dataType(XUGU_DATE);
break;
case TIME:
builder.dataType(XUGU_TIME);
if (column.getScale() != null && column.getScale() > 0) {
Integer timeScale = column.getScale();
if (timeScale > MAX_TIME_SCALE) {
timeScale = MAX_TIME_SCALE;
log.warn(
"The time column {} type time({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to time({})",
column.getName(),
column.getScale(),
MAX_SCALE,
timeScale);
}
builder.columnType(String.format("%s(%s)", XUGU_TIME, timeScale));
builder.scale(timeScale);
} else {
builder.columnType(XUGU_TIME);
}
break;
case TIMESTAMP:
if (column.getScale() == null || column.getScale() <= 0) {
builder.columnType(XUGU_TIMESTAMP);
} else {
int timestampScale = column.getScale();
if (column.getScale() > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(String.format("TIMESTAMP(%s)", timestampScale));
builder.scale(timestampScale);
}
builder.dataType(XUGU_TIMESTAMP);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.XUGU,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
}
|
@Test
public void testReconvertByte() {
Column column = PhysicalColumn.builder().name("test").dataType(BasicType.BYTE_TYPE).build();
BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(XuguTypeConverter.XUGU_TINYINT, typeDefine.getColumnType());
Assertions.assertEquals(XuguTypeConverter.XUGU_TINYINT, typeDefine.getDataType());
}
|
public ProviderConfig build() {
ProviderConfig provider = new ProviderConfig();
super.build(provider);
provider.setHost(host);
provider.setPort(port);
provider.setContextpath(contextpath);
provider.setThreadpool(threadpool);
provider.setThreadname(threadname);
provider.setThreads(threads);
provider.setIothreads(iothreads);
provider.setAlive(alive);
provider.setQueues(queues);
provider.setAccepts(accepts);
provider.setCodec(codec);
provider.setPayload(payload);
provider.setCharset(charset);
provider.setBuffer(buffer);
provider.setTransporter(transporter);
provider.setExchanger(exchanger);
provider.setDispatcher(dispatcher);
provider.setNetworker(networker);
provider.setServer(server);
provider.setClient(client);
provider.setTelnet(telnet);
provider.setPrompt(prompt);
provider.setStatus(status);
provider.setWait(wait);
provider.setDefault(isDefault);
return provider;
}
|
@Test
void build() {
ProviderBuilder builder = ProviderBuilder.newBuilder();
builder.host("host")
.port(8080)
.contextPath("contextpath")
.threadPool("mockthreadpool")
.threads(2)
.ioThreads(3)
.queues(4)
.accepts(5)
.codec("mockcodec")
.charset("utf-8")
.payload(6)
.buffer(1024)
.transporter("mocktransporter")
.exchanger("mockexchanger")
.dispatcher("mockdispatcher")
.networker("networker")
.server("server")
.client("client")
.telnet("mocktelnethandler")
.prompt("prompt")
.status("mockstatuschecker")
.wait(Integer.valueOf(1000))
.isDefault(true)
.id("id");
ProviderConfig config = builder.build();
ProviderConfig config2 = builder.build();
Assertions.assertEquals(8080, config.getPort());
Assertions.assertEquals(2, config.getThreads());
Assertions.assertEquals(3, config.getIothreads());
Assertions.assertEquals(4, config.getQueues());
Assertions.assertEquals(5, config.getAccepts());
Assertions.assertEquals(6, config.getPayload());
Assertions.assertEquals(1024, config.getBuffer());
Assertions.assertEquals(1000, config.getWait());
Assertions.assertEquals("host", config.getHost());
Assertions.assertEquals("contextpath", config.getContextpath());
Assertions.assertEquals("mockthreadpool", config.getThreadpool());
Assertions.assertEquals("mockcodec", config.getCodec());
Assertions.assertEquals("utf-8", config.getCharset());
Assertions.assertEquals("mocktransporter", config.getTransporter());
Assertions.assertEquals("mockexchanger", config.getExchanger());
Assertions.assertEquals("mockdispatcher", config.getDispatcher());
Assertions.assertEquals("networker", config.getNetworker());
Assertions.assertEquals("server", config.getServer());
Assertions.assertEquals("client", config.getClient());
Assertions.assertEquals("mocktelnethandler", config.getTelnet());
Assertions.assertEquals("prompt", config.getPrompt());
Assertions.assertEquals("mockstatuschecker", config.getStatus());
Assertions.assertTrue(config.isDefault());
Assertions.assertEquals("id", config.getId());
Assertions.assertNotSame(config, config2);
}
|
public void add(String property, JsonElement value) {
members.put(property, value == null ? JsonNull.INSTANCE : value);
}
|
@Test
public void testPropertyWithQuotes() {
JsonObject jsonObj = new JsonObject();
jsonObj.add("a\"b", new JsonPrimitive("c\"d"));
String json = new Gson().toJson(jsonObj);
assertThat(json).isEqualTo("{\"a\\\"b\":\"c\\\"d\"}");
}
|
public ProtocolBuilder transporter(String transporter) {
this.transporter = transporter;
return getThis();
}
|
@Test
void transporter() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.transporter("mocktransporter");
Assertions.assertEquals("mocktransporter", builder.build().getTransporter());
}
|
@Override
public void exportData(JsonWriter writer) throws IOException {
// version tag at the root
writer.name(THIS_VERSION);
writer.beginObject();
// clients list
writer.name(CLIENTS);
writer.beginArray();
writeClients(writer);
writer.endArray();
writer.name(GRANTS);
writer.beginArray();
writeGrants(writer);
writer.endArray();
writer.name(WHITELISTEDSITES);
writer.beginArray();
writeWhitelistedSites(writer);
writer.endArray();
writer.name(BLACKLISTEDSITES);
writer.beginArray();
writeBlacklistedSites(writer);
writer.endArray();
writer.name(AUTHENTICATIONHOLDERS);
writer.beginArray();
writeAuthenticationHolders(writer);
writer.endArray();
writer.name(ACCESSTOKENS);
writer.beginArray();
writeAccessTokens(writer);
writer.endArray();
writer.name(REFRESHTOKENS);
writer.beginArray();
writeRefreshTokens(writer);
writer.endArray();
writer.name(SYSTEMSCOPES);
writer.beginArray();
writeSystemScopes(writer);
writer.endArray();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.exportExtensionData(writer);
break;
}
}
writer.endObject(); // end mitreid-connect-1.3
}
|
@Test
public void testExportRefreshTokens() throws IOException, ParseException {
String expiration1 = "2014-09-10T22:49:44.090+00:00";
Date expirationDate1 = formatter.parse(expiration1, Locale.ENGLISH);
ClientDetailsEntity mockedClient1 = mock(ClientDetailsEntity.class);
when(mockedClient1.getClientId()).thenReturn("mocked_client_1");
AuthenticationHolderEntity mockedAuthHolder1 = mock(AuthenticationHolderEntity.class);
when(mockedAuthHolder1.getId()).thenReturn(1L);
OAuth2RefreshTokenEntity token1 = new OAuth2RefreshTokenEntity();
token1.setId(1L);
token1.setClient(mockedClient1);
token1.setExpiration(expirationDate1);
token1.setJwt(JWTParser.parse("eyJhbGciOiJub25lIn0.eyJqdGkiOiJmOTg4OWQyOS0xMTk1LTQ4ODEtODgwZC1lZjVlYzAwY2Y4NDIifQ."));
token1.setAuthenticationHolder(mockedAuthHolder1);
String expiration2 = "2015-01-07T18:31:50.079+00:00";
Date expirationDate2 = formatter.parse(expiration2, Locale.ENGLISH);
ClientDetailsEntity mockedClient2 = mock(ClientDetailsEntity.class);
when(mockedClient2.getClientId()).thenReturn("mocked_client_2");
AuthenticationHolderEntity mockedAuthHolder2 = mock(AuthenticationHolderEntity.class);
when(mockedAuthHolder2.getId()).thenReturn(2L);
OAuth2RefreshTokenEntity token2 = new OAuth2RefreshTokenEntity();
token2.setId(2L);
token2.setClient(mockedClient2);
token2.setExpiration(expirationDate2);
token2.setJwt(JWTParser.parse("eyJhbGciOiJub25lIn0.eyJqdGkiOiJlYmEyYjc3My0xNjAzLTRmNDAtOWQ3MS1hMGIxZDg1OWE2MDAifQ."));
token2.setAuthenticationHolder(mockedAuthHolder2);
Set<OAuth2RefreshTokenEntity> allRefreshTokens = ImmutableSet.of(token1, token2);
Mockito.when(clientRepository.getAllClients()).thenReturn(new HashSet<ClientDetailsEntity>());
Mockito.when(approvedSiteRepository.getAll()).thenReturn(new HashSet<ApprovedSite>());
Mockito.when(wlSiteRepository.getAll()).thenReturn(new HashSet<WhitelistedSite>());
Mockito.when(blSiteRepository.getAll()).thenReturn(new HashSet<BlacklistedSite>());
Mockito.when(authHolderRepository.getAll()).thenReturn(new ArrayList<AuthenticationHolderEntity>());
Mockito.when(tokenRepository.getAllAccessTokens()).thenReturn(new HashSet<OAuth2AccessTokenEntity>());
Mockito.when(tokenRepository.getAllRefreshTokens()).thenReturn(allRefreshTokens);
Mockito.when(sysScopeRepository.getAll()).thenReturn(new HashSet<SystemScope>());
// do the data export
StringWriter stringWriter = new StringWriter();
JsonWriter writer = new JsonWriter(stringWriter);
writer.beginObject();
dataService.exportData(writer);
writer.endObject();
writer.close();
// parse the output as a JSON object for testing
JsonElement elem = new JsonParser().parse(stringWriter.toString());
JsonObject root = elem.getAsJsonObject();
// make sure the root is there
assertThat(root.has(MITREidDataService.MITREID_CONNECT_1_3), is(true));
JsonObject config = root.get(MITREidDataService.MITREID_CONNECT_1_3).getAsJsonObject();
// make sure all the root elements are there
assertThat(config.has(MITREidDataService.CLIENTS), is(true));
assertThat(config.has(MITREidDataService.GRANTS), is(true));
assertThat(config.has(MITREidDataService.WHITELISTEDSITES), is(true));
assertThat(config.has(MITREidDataService.BLACKLISTEDSITES), is(true));
assertThat(config.has(MITREidDataService.REFRESHTOKENS), is(true));
assertThat(config.has(MITREidDataService.ACCESSTOKENS), is(true));
assertThat(config.has(MITREidDataService.SYSTEMSCOPES), is(true));
assertThat(config.has(MITREidDataService.AUTHENTICATIONHOLDERS), is(true));
// make sure the root elements are all arrays
assertThat(config.get(MITREidDataService.CLIENTS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.GRANTS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.WHITELISTEDSITES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.BLACKLISTEDSITES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.REFRESHTOKENS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.ACCESSTOKENS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.SYSTEMSCOPES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.AUTHENTICATIONHOLDERS).isJsonArray(), is(true));
// check our refresh token list (this test)
JsonArray refreshTokens = config.get(MITREidDataService.REFRESHTOKENS).getAsJsonArray();
assertThat(refreshTokens.size(), is(2));
// check for both of our refresh tokens in turn
Set<OAuth2RefreshTokenEntity> checked = new HashSet<>();
for (JsonElement e : refreshTokens) {
assertThat(e.isJsonObject(), is(true));
JsonObject token = e.getAsJsonObject();
OAuth2RefreshTokenEntity compare = null;
if (token.get("id").getAsLong() == token1.getId()) {
compare = token1;
} else if (token.get("id").getAsLong() == token2.getId()) {
compare = token2;
}
if (compare == null) {
fail("Could not find matching id: " + token.get("id").getAsString());
} else {
assertThat(token.get("id").getAsLong(), equalTo(compare.getId()));
assertThat(token.get("clientId").getAsString(), equalTo(compare.getClient().getClientId()));
assertThat(token.get("expiration").getAsString(), equalTo(formatter.print(compare.getExpiration(), Locale.ENGLISH)));
assertThat(token.get("value").getAsString(), equalTo(compare.getValue()));
assertThat(token.get("authenticationHolderId").getAsLong(), equalTo(compare.getAuthenticationHolder().getId()));
checked.add(compare);
}
}
// make sure all of our refresh tokens were found
assertThat(checked.containsAll(allRefreshTokens), is(true));
}
|
@Override
public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options) {
final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> topicFutures = new HashMap<>(newTopics.size());
final CreatableTopicCollection topics = new CreatableTopicCollection();
for (NewTopic newTopic : newTopics) {
if (topicNameIsUnrepresentable(newTopic.name())) {
KafkaFutureImpl<TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
newTopic.name() + "' cannot be represented in a request."));
topicFutures.put(newTopic.name(), future);
} else if (!topicFutures.containsKey(newTopic.name())) {
topicFutures.put(newTopic.name(), new KafkaFutureImpl<>());
topics.add(newTopic.convertToCreatableTopic());
}
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreateTopicsCall(options, topicFutures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreateTopicsResult(new HashMap<>(topicFutures));
}
|
@Test
public void testCreateTopics() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(
expectCreateTopicsRequestWithTopics("myTopic"),
prepareCreateTopicsResponse("myTopic", Errors.NONE));
KafkaFuture<Void> future = env.adminClient().createTopics(
singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))),
new CreateTopicsOptions().timeoutMs(10000)).all();
future.get();
}
}
|
public OffsetFetchResponseData.OffsetFetchResponseGroup fetchAllOffsets(
OffsetFetchRequestData.OffsetFetchRequestGroup request,
long lastCommittedOffset
) throws ApiException {
final boolean requireStable = lastCommittedOffset == Long.MAX_VALUE;
try {
validateOffsetFetch(request, lastCommittedOffset);
} catch (GroupIdNotFoundException ex) {
return new OffsetFetchResponseData.OffsetFetchResponseGroup()
.setGroupId(request.groupId())
.setTopics(Collections.emptyList());
}
final List<OffsetFetchResponseData.OffsetFetchResponseTopics> topicResponses = new ArrayList<>();
final TimelineHashMap<String, TimelineHashMap<Integer, OffsetAndMetadata>> groupOffsets =
offsets.offsetsByGroup.get(request.groupId(), lastCommittedOffset);
if (groupOffsets != null) {
groupOffsets.entrySet(lastCommittedOffset).forEach(topicEntry -> {
final String topic = topicEntry.getKey();
final TimelineHashMap<Integer, OffsetAndMetadata> topicOffsets = topicEntry.getValue();
final OffsetFetchResponseData.OffsetFetchResponseTopics topicResponse =
new OffsetFetchResponseData.OffsetFetchResponseTopics().setName(topic);
topicResponses.add(topicResponse);
topicOffsets.entrySet(lastCommittedOffset).forEach(partitionEntry -> {
final int partition = partitionEntry.getKey();
final OffsetAndMetadata offsetAndMetadata = partitionEntry.getValue();
if (requireStable && hasPendingTransactionalOffsets(request.groupId(), topic, partition)) {
topicResponse.partitions().add(new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(partition)
.setErrorCode(Errors.UNSTABLE_OFFSET_COMMIT.code())
.setCommittedOffset(INVALID_OFFSET)
.setCommittedLeaderEpoch(-1)
.setMetadata(""));
} else {
topicResponse.partitions().add(new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(partition)
.setCommittedOffset(offsetAndMetadata.committedOffset)
.setCommittedLeaderEpoch(offsetAndMetadata.leaderEpoch.orElse(-1))
.setMetadata(offsetAndMetadata.metadata));
}
});
});
}
return new OffsetFetchResponseData.OffsetFetchResponseGroup()
.setGroupId(request.groupId())
.setTopics(topicResponses);
}
|
@Test
public void testFetchAllOffsetsWithUnknownGroup() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build();
assertEquals(Collections.emptyList(), context.fetchAllOffsets("group", Long.MAX_VALUE));
}
|
public FEELFnResult<Object> invoke(@ParameterName("list") List list) {
if ( list == null || list.isEmpty() ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty"));
} else {
try {
return FEELFnResult.ofResult(Collections.min(list, new InterceptNotComparableComparator()));
} catch (ClassCastException e) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable"));
}
}
}
|
@Test
void invokeArrayOfChronoPeriods() {
final ChronoPeriod p1Period = Period.parse("P1Y");
final ChronoPeriod p1Comparable = ComparablePeriod.parse("P1Y");
final ChronoPeriod p2Period = Period.parse("P1M");
final ChronoPeriod p2Comparable = ComparablePeriod.parse("P1M");
Predicate<ChronoPeriod> assertion = i -> i.get(ChronoUnit.YEARS) == 0 && i.get(ChronoUnit.MONTHS) == 1;
FunctionTestUtil.assertPredicateOnResult(minFunction.invoke(new Object[]{p2Period}), ChronoPeriod.class,
assertion);
FunctionTestUtil.assertPredicateOnResult(minFunction.invoke(new Object[]{p2Comparable}), ChronoPeriod.class,
assertion);
FunctionTestUtil.assertPredicateOnResult(minFunction.invoke(new Object[]{p1Period, p2Period}),
ChronoPeriod.class, assertion);
FunctionTestUtil.assertPredicateOnResult(minFunction.invoke(new Object[]{p1Comparable, p2Period}),
ChronoPeriod.class, assertion);
FunctionTestUtil.assertPredicateOnResult(minFunction.invoke(new Object[]{p1Period, p2Comparable}),
ChronoPeriod.class, assertion);
FunctionTestUtil.assertPredicateOnResult(minFunction.invoke(new Object[]{p1Comparable, p2Comparable}),
ChronoPeriod.class, assertion);
}
|
public long scan(
final UnsafeBuffer termBuffer,
final long rebuildPosition,
final long hwmPosition,
final long nowNs,
final int termLengthMask,
final int positionBitsToShift,
final int initialTermId)
{
boolean lossFound = false;
int rebuildOffset = (int)rebuildPosition & termLengthMask;
if (rebuildPosition < hwmPosition)
{
final int rebuildTermCount = (int)(rebuildPosition >>> positionBitsToShift);
final int hwmTermCount = (int)(hwmPosition >>> positionBitsToShift);
final int rebuildTermId = initialTermId + rebuildTermCount;
final int hwmTermOffset = (int)hwmPosition & termLengthMask;
final int limitOffset = rebuildTermCount == hwmTermCount ? hwmTermOffset : termLengthMask + 1;
rebuildOffset = scanForGap(termBuffer, rebuildTermId, rebuildOffset, limitOffset, this);
if (rebuildOffset < limitOffset)
{
if (scannedTermOffset != activeTermOffset || scannedTermId != activeTermId)
{
activateGap(nowNs);
lossFound = true;
}
checkTimerExpiry(nowNs);
}
}
return pack(rebuildOffset, lossFound);
}
|
@Test
void shouldNakMissingData()
{
final long rebuildPosition = ACTIVE_TERM_POSITION;
final long hwmPosition = ACTIVE_TERM_POSITION + (ALIGNED_FRAME_LENGTH * 3L);
insertDataFrame(offsetOfMessage(0));
insertDataFrame(offsetOfMessage(2));
lossDetector.scan(termBuffer, rebuildPosition, hwmPosition, currentTime, MASK, POSITION_BITS_TO_SHIFT, TERM_ID);
currentTime = TimeUnit.MILLISECONDS.toNanos(40);
lossDetector.scan(termBuffer, rebuildPosition, hwmPosition, currentTime, MASK, POSITION_BITS_TO_SHIFT, TERM_ID);
verify(lossHandler).onGapDetected(TERM_ID, offsetOfMessage(1), gapLength());
}
|
public KVConfigManager getKvConfigManager() {
return kvConfigManager;
}
|
@Test
public void getKvConfigManager() {
KVConfigManager manager = namesrvController.getKvConfigManager();
Assert.assertNotNull(manager);
}
|
@Override
public ExecuteContext before(ExecuteContext context) {
if (shouldHandle(context)) {
ThreadLocalUtils.removeRequestTag();
ServerWebExchange exchange = (ServerWebExchange) context.getArguments()[0];
ServerHttpRequest request = exchange.getRequest();
HttpHeaders headers = request.getHeaders();
String path = request.getURI().getPath();
String methodName = request.getMethod().name();
Map<String, List<String>> queryParams = request.getQueryParams();
handlers.forEach(handler -> ThreadLocalUtils
.addRequestTag(handler.getRequestTag(path, methodName, headers, queryParams)));
}
return context;
}
|
@Test
public void testBefore() {
// Test the before method
interceptor.before(context);
RequestTag requestTag = ThreadLocalUtils.getRequestTag();
Map<String, List<String>> header = requestTag.getTag();
Assert.assertNotNull(header);
Assert.assertEquals(2, header.size());
Assert.assertEquals("bar1", header.get("bar").get(0));
Assert.assertEquals("foo1", header.get("foo").get(0));
}
|
public String serializeToString() {
switch (bitmapType) {
case EMPTY:
break;
case SINGLE_VALUE:
return String.format("%s", singleValue);
case BITMAP_VALUE:
return this.bitmap.serializeToString();
case SET_VALUE:
return setToString();
}
return "";
}
|
@Test
public void testSerializeToString() {
Assert.assertEquals(emptyBitmap.serializeToString(), "");
Assert.assertEquals(singleBitmap.serializeToString(), "1");
Assert.assertEquals(mediumBitmap.serializeToString(), "0,1,2,3,4,5,6,7,8,9");
Assert.assertEquals(largeBitmap.serializeToString(), "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20," +
"21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39");
BitmapValue bitmap = new BitmapValue();
bitmap.add(1);
bitmap.add(2);
bitmap.add(3);
bitmap.add(100);
bitmap.add(5);
bitmap.add(102);
Assert.assertEquals(bitmap.setToString(), "1,2,3,5,100,102");
}
|
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response createVirtualNetwork(InputStream stream) {
try {
final TenantId tid = TenantId.tenantId(getFromJsonStream(stream, "id").asText());
VirtualNetwork newVnet = vnetAdminService.createVirtualNetwork(tid);
UriBuilder locationBuilder = uriInfo.getBaseUriBuilder()
.path("vnets")
.path(newVnet.id().toString());
return Response
.created(locationBuilder.build())
.build();
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
|
@Test
public void testPostVirtualNetwork() {
expect(mockVnetAdminService.createVirtualNetwork(tenantId2)).andReturn(vnet1);
expectLastCall();
replay(mockVnetAdminService);
WebTarget wt = target();
InputStream jsonStream = TenantWebResourceTest.class
.getResourceAsStream("post-tenant.json");
Response response = wt.path("vnets").request(MediaType.APPLICATION_JSON_TYPE)
.post(Entity.json(jsonStream));
assertThat(response.getStatus(), is(HttpURLConnection.HTTP_CREATED));
String location = response.getLocation().getPath();
assertThat(location, Matchers.startsWith("/vnets/" + vnet1.id().toString()));
verify(mockVnetAdminService);
}
|
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
return usage(args);
}
String action = args[0];
String name = args[1];
int result;
if (A_LOAD.equals(action)) {
result = loadClass(name);
} else if (A_CREATE.equals(action)) {
//first load to separate load errors from create
result = loadClass(name);
if (result == SUCCESS) {
//class loads, so instantiate it
result = createClassInstance(name);
}
} else if (A_RESOURCE.equals(action)) {
result = loadResource(name);
} else if (A_PRINTRESOURCE.equals(action)) {
result = dumpResource(name);
} else {
result = usage(args);
}
return result;
}
|
@Test
public void testCreateFailsPrivateClass() throws Throwable {
run(FindClass.E_CREATE_FAILED,
FindClass.A_CREATE,
"org.apache.hadoop.util.TestFindClass$PrivateClass");
}
|
public static void readJobRep( Object object, Repository rep, ObjectId id_step, List<DatabaseMeta> databases ) throws KettleException {
try {
String jobXML = rep.getJobEntryAttributeString( id_step, "job-xml" );
ByteArrayInputStream bais = new ByteArrayInputStream( jobXML.getBytes() );
Document doc = XMLParserFactoryProducer.createSecureDocBuilderFactory().newDocumentBuilder().parse( bais );
read( object, doc.getDocumentElement() );
} catch ( ParserConfigurationException ex ) {
throw new KettleException( ex.getMessage(), ex );
} catch ( SAXException ex ) {
throw new KettleException( ex.getMessage(), ex );
} catch ( IOException ex ) {
throw new KettleException( ex.getMessage(), ex );
}
}
|
@Test( expected = NullPointerException.class )
public void readingJobRepoThrowsExceptionWhenParsingXmlWithBigAmountOfExternalEntities() throws Exception {
SerializationHelper.readJobRep( null, repo, null, new ArrayList<>() );
}
|
public InetAddress resolve(final String name, final String uriParamName, final boolean isReResolution)
{
InetAddress resolvedAddress = null;
try
{
resolvedAddress = InetAddress.getByName(name);
}
catch (final UnknownHostException ignore)
{
}
return resolvedAddress;
}
|
@Test
void resolveReturnsNullForUnknownHost()
{
final String hostName = UUID.randomUUID().toString();
final String uriParamName = "endpoint";
final boolean isReResolution = false;
assertNull(nameResolver.resolve(hostName, uriParamName, isReResolution));
}
|
@Override
public void init() throws LoadException {
createEtlJobConf();
}
|
@Test
public void testRangePartitionHashDistribution(@Injectable SparkLoadJob sparkLoadJob,
@Injectable SparkResource resource,
@Injectable BrokerDesc brokerDesc,
@Mocked GlobalStateMgr globalStateMgr,
@Injectable Database database,
@Injectable OlapTable table)
throws LoadException, DdlException, AnalysisException {
long dbId = 0L;
long tableId = 1L;
// c1 is partition column, c2 is distribution column
List<Column> columns = Lists.newArrayList();
columns.add(new Column("c1", Type.INT, true, null, false, null, ""));
columns.add(new Column("c2", ScalarType.createVarchar(10), true, null, false, null, ""));
columns.add(new Column("c3", Type.INT, false, AggregateType.SUM, false, null, ""));
Map<ColumnId, Column> idToColumn = Maps.newTreeMap(ColumnId.CASE_INSENSITIVE_ORDER);
for (Column column : columns) {
idToColumn.put(column.getColumnId(), column);
}
// indexes
Map<Long, List<Column>> indexIdToSchema = Maps.newHashMap();
long index1Id = 3L;
indexIdToSchema.put(index1Id, columns);
long index2Id = 4L;
indexIdToSchema.put(index2Id, Lists.newArrayList(columns.get(0), columns.get(2)));
// partition and distribution info
long partition1Id = 2L;
long partition2Id = 5L;
// partition3 is temporary partition
long partition3Id = 6L;
int distributionColumnIndex = 1;
DistributionInfo distributionInfo =
new HashDistributionInfo(3, Lists.newArrayList(columns.get(distributionColumnIndex)));
Partition partition1 = new Partition(partition1Id, "p1", null,
distributionInfo);
Partition partition2 = new Partition(partition2Id, "p2", null,
new HashDistributionInfo(4, Lists.newArrayList(columns.get(distributionColumnIndex))));
Partition partition3 = new Partition(partition3Id, "tp3", null,
distributionInfo);
int partitionColumnIndex = 0;
List<Partition> partitions = Lists.newArrayList(partition1, partition2);
RangePartitionInfo partitionInfo =
new RangePartitionInfo(Lists.newArrayList(columns.get(partitionColumnIndex)));
PartitionKeyDesc partitionKeyDesc1 = new PartitionKeyDesc(Lists.newArrayList(new PartitionValue("10")));
SingleRangePartitionDesc partitionDesc1 = new SingleRangePartitionDesc(false, "p1", partitionKeyDesc1, null);
partitionDesc1.analyze(1, null);
partitionInfo.handleNewSinglePartitionDesc(MetaUtils.buildIdToColumn(columns),
partitionDesc1, partition1Id, false);
PartitionKeyDesc partitionKeyDesc2 = new PartitionKeyDesc(Lists.newArrayList(new PartitionValue("20")));
SingleRangePartitionDesc partitionDesc2 = new SingleRangePartitionDesc(false, "p2", partitionKeyDesc2, null);
partitionDesc2.analyze(1, null);
partitionInfo.handleNewSinglePartitionDesc(MetaUtils.buildIdToColumn(columns),
partitionDesc2, partition2Id, false);
PartitionKeyDesc partitionKeyDesc3 = new PartitionKeyDesc(Lists.newArrayList(new PartitionValue("10")));
SingleRangePartitionDesc partitionDesc3 = new SingleRangePartitionDesc(false, "tp3", partitionKeyDesc1, null);
partitionDesc3.analyze(1, null);
partitionInfo.handleNewSinglePartitionDesc(MetaUtils.buildIdToColumn(columns),
partitionDesc3, partition3Id, true);
new Expectations() {
{
globalStateMgr.getDb(dbId);
result = database;
database.getTable(tableId);
result = table;
table.getPartitions();
result = partitions;
table.getIndexIdToSchema();
result = indexIdToSchema;
table.getDefaultDistributionInfo();
result = distributionInfo;
table.getSchemaHashByIndexId(index1Id);
result = 123;
table.getSchemaHashByIndexId(index2Id);
result = 234;
table.getPartitionInfo();
result = partitionInfo;
table.getPartition(partition1Id);
result = partition1;
table.getPartition(partition2Id);
result = partition2;
table.getPartition(partition3Id);
result = partition3;
table.getKeysTypeByIndexId(index1Id);
result = KeysType.AGG_KEYS;
table.getKeysTypeByIndexId(index2Id);
result = KeysType.AGG_KEYS;
table.getBaseIndexId();
result = index1Id;
table.getIdToColumn();
result = idToColumn;
}
};
// case 0: partition is null in load stmt
// file group
Map<BrokerFileGroupAggInfo.FileGroupAggKey, List<BrokerFileGroup>> aggKeyToFileGroups = Maps.newHashMap();
List<BrokerFileGroup> brokerFileGroups = Lists.newArrayList();
DataDescription desc = new DataDescription("testTable", null, Lists.newArrayList("abc.txt"),
null, null, null, null, false, null);
BrokerFileGroup brokerFileGroup = new BrokerFileGroup(desc);
brokerFileGroups.add(brokerFileGroup);
BrokerFileGroupAggInfo.FileGroupAggKey aggKey = new BrokerFileGroupAggInfo.FileGroupAggKey(tableId, null);
aggKeyToFileGroups.put(aggKey, brokerFileGroups);
// create pending task
SparkLoadPendingTask task = new SparkLoadPendingTask(sparkLoadJob, aggKeyToFileGroups, resource, brokerDesc);
EtlJobConfig etlJobConfig = Deencapsulation.getField(task, "etlJobConfig");
Assert.assertEquals(null, etlJobConfig);
task.init();
etlJobConfig = Deencapsulation.getField(task, "etlJobConfig");
Assert.assertTrue(etlJobConfig != null);
// check table id
Map<Long, EtlTable> idToEtlTable = etlJobConfig.tables;
Assert.assertEquals(1, idToEtlTable.size());
Assert.assertTrue(idToEtlTable.containsKey(tableId));
// check indexes
EtlTable etlTable = idToEtlTable.get(tableId);
List<EtlIndex> etlIndexes = etlTable.indexes;
Assert.assertEquals(2, etlIndexes.size());
Assert.assertEquals(index1Id, etlIndexes.get(0).indexId);
Assert.assertEquals(index2Id, etlIndexes.get(1).indexId);
// check base index columns
EtlIndex baseIndex = etlIndexes.get(0);
Assert.assertTrue(baseIndex.isBaseIndex);
Assert.assertEquals(3, baseIndex.columns.size());
for (int i = 0; i < columns.size(); i++) {
Assert.assertEquals(columns.get(i).getName(), baseIndex.columns.get(i).columnName);
}
Assert.assertEquals("AGGREGATE", baseIndex.indexType);
// check partitions
EtlPartitionInfo etlPartitionInfo = etlTable.partitionInfo;
Assert.assertEquals("RANGE", etlPartitionInfo.partitionType);
List<String> partitionColumns = etlPartitionInfo.partitionColumnRefs;
Assert.assertEquals(1, partitionColumns.size());
Assert.assertEquals(columns.get(partitionColumnIndex).getName(), partitionColumns.get(0));
List<String> distributionColumns = etlPartitionInfo.distributionColumnRefs;
Assert.assertEquals(1, distributionColumns.size());
Assert.assertEquals(columns.get(distributionColumnIndex).getName(), distributionColumns.get(0));
List<EtlPartition> etlPartitions = etlPartitionInfo.partitions;
Assert.assertEquals(2, etlPartitions.size());
Assert.assertEquals(partition1Id, etlPartitions.get(0).partitionId);
Assert.assertEquals(partition2Id, etlPartitions.get(1).partitionId);
// check file group
List<EtlFileGroup> etlFileGroups = etlTable.fileGroups;
Assert.assertEquals(1, etlFileGroups.size());
// case 1: temporary partition in load stmt
// file group
aggKeyToFileGroups = Maps.newHashMap();
brokerFileGroups = Lists.newArrayList();
PartitionNames partitionNames = new PartitionNames(true, Lists.newArrayList("tp3"));
desc = new DataDescription("testTable", partitionNames, Lists.newArrayList("abc.txt"),
null, null, null, null, false, null);
brokerFileGroup = new BrokerFileGroup(desc);
brokerFileGroups.add(brokerFileGroup);
aggKey = new BrokerFileGroupAggInfo.FileGroupAggKey(tableId, Lists.newArrayList(partition3Id));
aggKeyToFileGroups.put(aggKey, brokerFileGroups);
// create pending task
task = new SparkLoadPendingTask(sparkLoadJob, aggKeyToFileGroups, resource, brokerDesc);
task.init();
etlJobConfig = Deencapsulation.getField(task, "etlJobConfig");
Assert.assertTrue(etlJobConfig != null);
idToEtlTable = etlJobConfig.tables;
etlTable = idToEtlTable.get(tableId);
// check partitions
etlPartitionInfo = etlTable.partitionInfo;
Assert.assertEquals("RANGE", etlPartitionInfo.partitionType);
partitionColumns = etlPartitionInfo.partitionColumnRefs;
Assert.assertEquals(1, partitionColumns.size());
Assert.assertEquals(columns.get(partitionColumnIndex).getName(), partitionColumns.get(0));
distributionColumns = etlPartitionInfo.distributionColumnRefs;
Assert.assertEquals(1, distributionColumns.size());
Assert.assertEquals(columns.get(distributionColumnIndex).getName(), distributionColumns.get(0));
etlPartitions = etlPartitionInfo.partitions;
Assert.assertEquals(1, etlPartitions.size());
Assert.assertEquals(partition3Id, etlPartitions.get(0).partitionId);
}
|
@Override
public void judgeContinueToExecute(final SQLStatement statement) throws SQLException {
ShardingSpherePreconditions.checkState(statement instanceof CommitStatement || statement instanceof RollbackStatement,
() -> new SQLFeatureNotSupportedException("Current transaction is aborted, commands ignored until end of transaction block."));
}
|
@Test
void assertJudgeContinueToExecuteWithCommitStatement() {
assertDoesNotThrow(() -> allowedSQLStatementHandler.judgeContinueToExecute(mock(CommitStatement.class)));
}
|
@Override
public void registerInstance(String namespaceId, String serviceName, Instance instance) throws NacosException {
NamingUtils.checkInstanceIsLegal(instance);
boolean ephemeral = instance.isEphemeral();
String clientId = IpPortBasedClient.getClientId(instance.toInetAddr(), ephemeral);
createIpPortClientIfAbsent(clientId);
Service service = getService(namespaceId, serviceName, ephemeral);
clientOperationService.registerInstance(service, instance, clientId);
}
|
@Test
void testRegisterInstanceWithInvalidClusterName() throws NacosException {
Throwable exception = assertThrows(NacosException.class, () -> {
Instance instance = new Instance();
instance.setEphemeral(true);
instance.setClusterName("cluster1,cluster2");
new InstanceOperatorClientImpl(null, null, null, null, null, null, null).registerInstance("ns-01", "serviceName01", instance);
});
assertTrue(exception.getMessage()
.contains("Instance 'clusterName' should be characters with only 0-9a-zA-Z-. (current: cluster1,cluster2)"));
}
|
@Override
public List<?> deserialize(final String topic, final byte[] bytes) {
if (bytes == null) {
return null;
}
try {
final String recordCsvString = new String(bytes, StandardCharsets.UTF_8);
final List<CSVRecord> csvRecords = CSVParser.parse(recordCsvString, csvFormat)
.getRecords();
if (csvRecords.isEmpty()) {
throw new SerializationException("No fields in record");
}
final CSVRecord csvRecord = csvRecords.get(0);
if (csvRecord == null || csvRecord.size() == 0) {
throw new SerializationException("No fields in record.");
}
SerdeUtils.throwOnColumnCountMismatch(parsers.size(), csvRecord.size(), false, topic);
final List<Object> values = new ArrayList<>(parsers.size());
final Iterator<Parser> pIt = parsers.iterator();
for (int i = 0; i < csvRecord.size(); i++) {
final String value = csvRecord.get(i);
final Parser parser = pIt.next();
final Object parsed = value == null || value.isEmpty()
? null
: parser.parse(value);
values.add(parsed);
}
return values;
} catch (final Exception e) {
throw new SerializationException("Error deserializing delimited", e);
}
}
|
@Test
public void shouldDeserializeDelimitedCorrectly() {
// Given:
final byte[] bytes = "1511897796092,1,item_1,10.0,10.10,100,10,100,ew==\r\n".getBytes(StandardCharsets.UTF_8);
// When:
final List<?> result = deserializer.deserialize("", bytes);
// Then:
assertThat(result, contains(1511897796092L, 1L, "item_1", 10.0, new BigDecimal("10.10"), new Time(100), new Date(864000000), new Timestamp(100),
ByteBuffer.wrap(new byte[] {123})));
}
|
public static List<AclEntry> replaceAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
// Replacement is done separately for each scope: access and default.
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry aclSpecEntry: aclSpec) {
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
}
// Copy existing entries if the scope was not replaced.
for (AclEntry existingEntry: existingAcl) {
if (!scopeDirty.contains(existingEntry.getScope())) {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
|
@Test(expected=AclException.class)
public void testReplaceAclEntriesResultTooLarge() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayListWithCapacity(32);
aclSpec.add(aclEntry(ACCESS, USER, ALL));
for (int i = 1; i <= 29; ++i) {
aclSpec.add(aclEntry(ACCESS, USER, "user" + i, READ));
}
aclSpec.add(aclEntry(ACCESS, GROUP, READ));
aclSpec.add(aclEntry(ACCESS, OTHER, NONE));
// The ACL spec now has 32 entries. Automatic mask calculation will push it
// over the limit to 33.
replaceAclEntries(existing, aclSpec);
}
|
int asyncBackups(int requestedSyncBackups, int requestedAsyncBackups, boolean syncForced) {
if (syncForced || requestedAsyncBackups == 0) {
// if syncForced, then there will never be any async backups (they are forced to become sync)
// if there are no asyncBackups then we are also done.
return 0;
}
InternalPartitionService partitionService = node.getPartitionService();
int maxBackupCount = partitionService.getMaxAllowedBackupCount();
return min(maxBackupCount - requestedSyncBackups, requestedAsyncBackups);
}
|
@Test
public void asyncBackups_whenForceSyncEnabled() {
setup(true);
// when forceSync is enabled, then async should always be 0
assertEquals(0, backupHandler.asyncBackups(0, 0, FORCE_SYNC_ENABLED));
assertEquals(0, backupHandler.asyncBackups(0, 1, FORCE_SYNC_ENABLED));
assertEquals(0, backupHandler.asyncBackups(2, 0, FORCE_SYNC_ENABLED));
assertEquals(0, backupHandler.asyncBackups(2, 1, FORCE_SYNC_ENABLED));
// see what happens when we reach maximum number of backups
assertEquals(0, backupHandler.asyncBackups(0, BACKUPS + 1, FORCE_SYNC_ENABLED));
}
|
Future<Boolean> canRollController(int nodeId) {
LOGGER.debugCr(reconciliation, "Determining whether controller pod {} can be rolled", nodeId);
return describeMetadataQuorum().map(info -> {
boolean canRoll = isQuorumHealthyWithoutNode(nodeId, info);
if (!canRoll) {
LOGGER.debugCr(reconciliation, "Not restarting controller pod {}. Restart would affect the quorum health", nodeId);
}
return canRoll;
}).recover(error -> {
LOGGER.warnCr(reconciliation, "Error determining whether it is safe to restart controller pod {}", nodeId, error);
return Future.failedFuture(error);
});
}
|
@Test
public void canRollActiveControllerEvenSizedCluster(VertxTestContext context) {
Map<Integer, OptionalLong> controllers = new HashMap<>();
controllers.put(1, OptionalLong.of(10000L));
controllers.put(2, OptionalLong.of(9500L));
controllers.put(3, OptionalLong.of(9700L));
controllers.put(4, OptionalLong.of(9600L));
Admin admin = setUpMocks(1, controllers);
KafkaQuorumCheck quorumCheck = new KafkaQuorumCheck(Reconciliation.DUMMY_RECONCILIATION, admin, vertx, CONTROLLER_QUORUM_FETCH_TIMEOUT_MS);
quorumCheck.canRollController(1).onComplete(context.succeeding(result -> {
context.verify(() -> assertTrue(result));
context.completeNow();
}));
}
|
public static String builderData(final String paramType, final String paramName, final ServerWebExchange exchange) {
return newInstance(paramType).builder(paramName, exchange);
}
|
@Test
public void testBuildHostData() {
ConfigurableApplicationContext context = mock(ConfigurableApplicationContext.class);
SpringBeanUtils.getInstance().setApplicationContext(context);
RemoteAddressResolver remoteAddressResolver = new RemoteAddressResolver() {
};
ServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("/http")
.remoteAddress(new InetSocketAddress("localhost", 8085))
.build());
when(context.getBean(RemoteAddressResolver.class)).thenReturn(remoteAddressResolver);
assertEquals("localhost", ParameterDataFactory.builderData("host", null, exchange));
}
|
public static String keyToString(Object key,
URLEscaper.Escaping escaping,
UriComponent.Type componentType,
boolean full,
ProtocolVersion version)
{
if (version.compareTo(AllProtocolVersions.RESTLI_PROTOCOL_2_0_0.getProtocolVersion()) >= 0)
{
return keyToStringV2(key, escaping, componentType, full);
}
else
{
return keyToStringV1(key, escaping, full);
}
}
|
@Test(dataProvider = TestConstants.RESTLI_PROTOCOL_1_2_PREFIX + "complexKey")
public void testComplexKeyToString(ProtocolVersion version, String full, String notFull)
{
MyComplexKey myComplexKey1 = new MyComplexKey();
myComplexKey1.setA("stringVal");
myComplexKey1.setB(3);
MyComplexKey myComplexKey2 = new MyComplexKey();
myComplexKey2.setA("anotherStringVal");
myComplexKey2.setB(4);
ComplexResourceKey<MyComplexKey, MyComplexKey> complexKey =
new ComplexResourceKey<>(myComplexKey1, myComplexKey2);
String complexKeyString = URIParamUtils.keyToString(complexKey, NO_ESCAPING, null, true, version);
Assert.assertEquals(complexKeyString, full);
// not full
String complexKeyStringNotFull = URIParamUtils.keyToString(complexKey, NO_ESCAPING, null, false, version);
Assert.assertEquals(complexKeyStringNotFull, notFull);
}
|
@Override
public WatchKey register(final Watchable folder, final WatchEvent.Kind<?>[] events,
final WatchEvent.Modifier... modifiers) throws IOException {
if(null == monitor) {
monitor = FileSystems.getDefault().newWatchService();
}
final WatchKey key = folder.register(monitor, events, modifiers);
if(log.isInfoEnabled()) {
log.info(String.format("Registered for events for %s", key));
}
return key;
}
|
@Test(expected = IOException.class)
public void testNotfound() throws Exception {
final FileWatcher watcher = new FileWatcher(new NIOEventWatchService());
final Local file = new Local(System.getProperty("java.io.tmpdir") + "/notfound", UUID.randomUUID().toString());
assertFalse(file.exists());
watcher.register(file.getParent(), new FileWatcher.DefaultFileFilter(file), new DisabledFileWatcherListener());
}
|
@Override
@Nullable
public double[] readDoubleArray(@Nonnull String fieldName) throws IOException {
return readIncompatibleField(fieldName, DOUBLE_ARRAY, super::readDoubleArray);
}
|
@Test
public void testReadDoubleArray() throws Exception {
assertNull(reader.readDoubleArray("NO SUCH FIELD"));
}
|
@Override
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
try {
String partitionColumn = job.get(Constants.JDBC_PARTITION_COLUMN);
int numPartitions = job.getInt(Constants.JDBC_NUM_PARTITIONS, -1);
String lowerBound = job.get(Constants.JDBC_LOW_BOUND);
String upperBound = job.get(Constants.JDBC_UPPER_BOUND);
InputSplit[] splits;
if (!job.getBoolean(Constants.JDBC_SPLIT_QUERY, true) || numPartitions <= 1) {
// We will not split this query if:
// 1. hive.sql.query.split is set to false (either manually or automatically by calcite
// 2. numPartitions == 1
splits = new InputSplit[1];
splits[0] = new JdbcInputSplit(FileInputFormat.getInputPaths(job)[0]);
LOGGER.info("Creating 1 input split " + splits[0]);
return splits;
}
dbAccessor = DatabaseAccessorFactory.getAccessor(job);
Path[] tablePaths = FileInputFormat.getInputPaths(job);
// We will split this query into n splits
LOGGER.debug("Creating {} input splits", numPartitions);
if (partitionColumn != null) {
List<String> columnNames = dbAccessor.getColumnNames(job);
if (!columnNames.contains(partitionColumn)) {
throw new IOException("Cannot find partitionColumn:" + partitionColumn + " in " + columnNames);
}
List<TypeInfo> hiveColumnTypesList = dbAccessor.getColumnTypes(job);
TypeInfo typeInfo = hiveColumnTypesList.get(columnNames.indexOf(partitionColumn));
if (!(typeInfo instanceof PrimitiveTypeInfo)) {
throw new IOException(partitionColumn + " is a complex type, only primitive type can be a partition column");
}
if (lowerBound == null || upperBound == null) {
Pair<String, String> boundary = dbAccessor.getBounds(job, partitionColumn, lowerBound == null,
upperBound == null);
if (lowerBound == null) {
lowerBound = boundary.getLeft();
}
if (upperBound == null) {
upperBound = boundary.getRight();
}
}
if (lowerBound == null) {
throw new IOException("lowerBound of " + partitionColumn + " cannot be null");
}
if (upperBound == null) {
throw new IOException("upperBound of " + partitionColumn + " cannot be null");
}
IntervalSplitter intervalSplitter = IntervalSplitterFactory.newIntervalSpitter(typeInfo);
List<MutablePair<String, String>> intervals = intervalSplitter.getIntervals(lowerBound, upperBound, numPartitions,
typeInfo);
if (intervals.size()<=1) {
LOGGER.debug("Creating 1 input splits");
splits = new InputSplit[1];
splits[0] = new JdbcInputSplit(FileInputFormat.getInputPaths(job)[0]);
return splits;
}
intervals.get(0).setLeft(null);
intervals.get(intervals.size()-1).setRight(null);
splits = new InputSplit[intervals.size()];
for (int i = 0; i < intervals.size(); i++) {
splits[i] = new JdbcInputSplit(partitionColumn, intervals.get(i).getLeft(), intervals.get(i).getRight(), tablePaths[0]);
}
} else {
int numRecords = dbAccessor.getTotalNumberOfRecords(job);
if (numRecords < numPartitions) {
numPartitions = numRecords;
}
int numRecordsPerSplit = numRecords / numPartitions;
int numSplitsWithExtraRecords = numRecords % numPartitions;
LOGGER.debug("Num records = {}", numRecords);
splits = new InputSplit[numPartitions];
int offset = 0;
for (int i = 0; i < numPartitions; i++) {
int numRecordsInThisSplit = numRecordsPerSplit;
if (i < numSplitsWithExtraRecords) {
numRecordsInThisSplit++;
}
splits[i] = new JdbcInputSplit(numRecordsInThisSplit, offset, tablePaths[0]);
offset += numRecordsInThisSplit;
}
}
dbAccessor = null;
LOGGER.info("Num input splits created {}", splits.length);
for (InputSplit split : splits) {
LOGGER.info("split:" + split.toString());
}
return splits;
}
catch (Exception e) {
LOGGER.error("Error while splitting input data.", e);
throw new IOException(e);
}
}
|
@Test
public void testIntervalSplit_Timestamp() throws HiveJdbcDatabaseAccessException, IOException {
JdbcInputFormat f = new JdbcInputFormat();
when(mockDatabaseAccessor.getColumnNames(any(Configuration.class))).thenReturn(Lists.newArrayList("a"));
when(mockDatabaseAccessor.getBounds(any(Configuration.class), any(String.class), anyBoolean(), anyBoolean()))
.thenReturn(new ImmutablePair<String, String>("2010-01-01 00:00:00.000000000", "2018-01-01 " +
"12:00:00.000000000"));
List<TypeInfo> columnTypes = Collections.singletonList(TypeInfoFactory.timestampTypeInfo);
when(mockDatabaseAccessor.getColumnTypes(any(Configuration.class))).thenReturn(columnTypes);
JobConf conf = new JobConf();
conf.set("mapred.input.dir", "/temp");
conf.set("hive.sql.partitionColumn", "a");
conf.set("hive.sql.numPartitions", "2");
InputSplit[] splits = f.getSplits(conf, -1);
assertThat(splits, is(notNullValue()));
assertThat(splits.length, is(2));
assertNull(((JdbcInputSplit)splits[0]).getLowerBound());
assertEquals(((JdbcInputSplit)splits[0]).getUpperBound(), "2014-01-01 06:00:00.0");
assertEquals(((JdbcInputSplit)splits[1]).getLowerBound(), "2014-01-01 06:00:00.0");
assertNull(((JdbcInputSplit)splits[1]).getUpperBound());
}
|
protected Authorization parseAuthLine(String line) throws ParseException {
String[] tokens = line.split("\\s+");
String keyword = tokens[0].toLowerCase();
switch (keyword) {
case "topic":
return createAuthorization(line, tokens);
case "user":
m_parsingUsersSpecificSection = true;
m_currentUser = tokens[1];
m_parsingPatternSpecificSection = false;
return null;
case "pattern":
m_parsingUsersSpecificSection = false;
m_currentUser = "";
m_parsingPatternSpecificSection = true;
return createAuthorization(line, tokens);
default:
throw new ParseException(String.format("invalid line definition found %s", line), 1);
}
}
|
@Test
public void testParseAuthLineValid_read() throws ParseException {
Authorization authorization = authorizator.parseAuthLine("topic read /weather/italy/anemometer");
// Verify
assertEquals(R_ANEMOMETER, authorization);
}
|
@Override
public Set<GPUInfo> retrieveResourceInfo(long gpuAmount) throws Exception {
Preconditions.checkArgument(
gpuAmount > 0,
"The gpuAmount should be positive when retrieving the GPU resource information.");
final Set<GPUInfo> gpuResources = new HashSet<>();
String output = executeDiscoveryScript(discoveryScriptFile, gpuAmount, args);
if (!output.isEmpty()) {
String[] indexes = output.split(",");
for (String index : indexes) {
if (!StringUtils.isNullOrWhitespaceOnly(index)) {
gpuResources.add(new GPUInfo(index.trim()));
}
}
}
LOG.info("Discover GPU resources: {}.", gpuResources);
return Collections.unmodifiableSet(gpuResources);
}
|
@Test
void testGPUDriverWithTestScript() throws Exception {
final int gpuAmount = 2;
final Configuration config = new Configuration();
config.set(GPUDriverOptions.DISCOVERY_SCRIPT_PATH, TESTING_DISCOVERY_SCRIPT_PATH);
final GPUDriver gpuDriver = new GPUDriver(config);
final Set<GPUInfo> gpuResource = gpuDriver.retrieveResourceInfo(gpuAmount);
assertThat(gpuResource).hasSize(gpuAmount);
}
|
public static <T> Deserializer<T> deserializer(final Class<T> clazz) {
return new InternalTopicDeserializer<>(clazz);
}
|
@Test
public void shouldUsePlanMapperForDeserialize() {
// When:
final Expression deserialized = InternalTopicSerdes.deserializer(Expression.class).deserialize(
"",
"\"(123 + 456)\"".getBytes(Charset.defaultCharset())
);
// Then:
assertThat(deserialized, equalTo(EXPRESSION));
}
|
@Override
public List<String> getTenantIdList(int page, int pageSize) {
PaginationHelper<Map<String, Object>> helper = createPaginationHelper();
ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO);
int from = (page - 1) * pageSize;
MapperResult mapperResult = configInfoMapper.getTenantIdList(new MapperContext(from, pageSize));
Page<Map<String, Object>> pageList = helper.fetchPageLimit(mapperResult.getSql(),
mapperResult.getParamList().toArray(), page, pageSize, MAP_ROW_MAPPER);
return pageList.getPageItems().stream().map(map -> String.valueOf(map.get("TENANT_ID")))
.collect(Collectors.toList());
}
|
@Test
void testGetTenantIdList() {
//mock select config state
List<String> tenantStrings = Arrays.asList("tenant1", "tenant2", "tenant3");
Map<String, Object> g1 = new HashMap<>();
g1.put("TENANT_ID", tenantStrings.get(0));
Map<String, Object> g2 = new HashMap<>();
g2.put("TENANT_ID", tenantStrings.get(1));
Map<String, Object> g3 = new HashMap<>();
g3.put("TENANT_ID", tenantStrings.get(2));
List<Map<String, Object>> params = new ArrayList<>();
params.addAll(Arrays.asList(g1, g2, g3));
when(databaseOperate.queryMany(anyString(), eq(new Object[] {}), eq(MAP_ROW_MAPPER))).thenReturn(params);
int page = 10;
int pageSize = 100;
//execute return mock obj
List<String> returnTenants = embeddedConfigInfoPersistService.getTenantIdList(page, pageSize);
//expect check
assertEquals(tenantStrings, returnTenants);
}
|
@Override
public Map<Errors, Integer> errorCounts() {
if (data.errorCode() != Errors.NONE.code())
// Minor optimization since the top-level error applies to all partitions
return Collections.singletonMap(error(), data.partitionErrors().size() + 1);
Map<Errors, Integer> errors = errorCounts(data.partitionErrors().stream().map(p -> Errors.forCode(p.errorCode())));
updateErrorCounts(errors, Errors.forCode(data.errorCode())); // top level error
return errors;
}
|
@Test
public void testErrorCountsWithTopLevelError() {
List<StopReplicaPartitionError> errors = new ArrayList<>();
errors.add(new StopReplicaPartitionError().setTopicName("foo").setPartitionIndex(0));
errors.add(new StopReplicaPartitionError().setTopicName("foo").setPartitionIndex(1)
.setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code()));
StopReplicaResponse response = new StopReplicaResponse(new StopReplicaResponseData()
.setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code())
.setPartitionErrors(errors));
assertEquals(Collections.singletonMap(Errors.UNKNOWN_SERVER_ERROR, 3), response.errorCounts());
}
|
@Override
public File selectAllocationBaseDirectory(int idx) {
return allocationBaseDirs[idx];
}
|
@Test
void selectAllocationBaseDir() {
for (int i = 0; i < allocBaseFolders.length; ++i) {
assertThat(directoryProvider.selectAllocationBaseDirectory(i))
.isEqualTo(allocBaseFolders[i]);
}
}
|
public static boolean isEmpty(String str) {
return str == null || str.length() == 0;
}
|
@Test
public void testIsEmpty() {
Assert.assertFalse(StringUtil.isEmpty("bar"));
Assert.assertTrue(StringUtil.isEmpty(""));
}
|
boolean needsMigration() {
File mappingFile = UserIdMapper.getConfigFile(usersDirectory);
if (mappingFile.exists() && mappingFile.isFile()) {
LOGGER.finest("User mapping file already exists. No migration needed.");
return false;
}
File[] userDirectories = listUserDirectories();
return userDirectories != null && userDirectories.length > 0;
}
|
@Test
public void needsMigrationNoneExisting() throws IOException {
UserIdMigrator migrator = createUserIdMigrator();
assertThat(migrator.needsMigration(), is(false));
}
|
public static String processingLogTopic(
final ProcessingLogConfig config,
final KsqlConfig ksqlConfig
) {
final String topicNameConfig = config.getString(ProcessingLogConfig.TOPIC_NAME);
if (topicNameConfig.equals(ProcessingLogConfig.TOPIC_NAME_NOT_SET)) {
return String.format(
"%s%s",
ksqlConfig.getString(KsqlConfig.KSQL_SERVICE_ID_CONFIG),
ProcessingLogConfig.TOPIC_NAME_DEFAULT_SUFFIX
);
} else {
return topicNameConfig;
}
}
|
@Test
public void shouldReturnProcessingLogTopic() {
// Given/When
final ProcessingLogConfig processingLogConfig = new ProcessingLogConfig(ImmutableMap.of());
final String processingLogTopic = ReservedInternalTopics.processingLogTopic(
processingLogConfig, ksqlConfig);
// Then
assertThat(processingLogTopic, is("default_ksql_processing_log"));
}
|
@Override
public Flux<RawMetric> retrieve(KafkaCluster c, Node node) {
log.debug("Retrieving metrics from prometheus exporter: {}:{}", node.host(), c.getMetricsConfig().getPort());
MetricsConfig metricsConfig = c.getMetricsConfig();
var webClient = new WebClientConfigurator()
.configureBufferSize(DataSize.ofMegabytes(20))
.configureBasicAuth(metricsConfig.getUsername(), metricsConfig.getPassword())
.configureSsl(
c.getOriginalProperties().getSsl(),
new ClustersProperties.KeystoreConfig(
metricsConfig.getKeystoreLocation(),
metricsConfig.getKeystorePassword()))
.build();
return retrieve(webClient, node.host(), c.getMetricsConfig());
}
|
@Test
void callsSecureMetricsEndpointAndConvertsResponceToRawMetric() {
var url = mockWebServer.url("/metrics");
mockWebServer.enqueue(prepareResponse());
MetricsConfig metricsConfig = prepareMetricsConfig(url.port(), "username", "password");
StepVerifier.create(retriever.retrieve(WebClient.create(), url.host(), metricsConfig))
.expectNextSequence(expectedRawMetrics())
// third metric should not be present, since it has "NaN" value
.verifyComplete();
}
|
public static long makeTemplate(TemplateMakerConfig templateMakerConfig) {
Meta meta = templateMakerConfig.getMeta();
String originProjectPath = templateMakerConfig.getOriginProjectPath();
TemplateMakerFileConfig templateMakerFileConfig = templateMakerConfig.getFileConfig();
TemplateMakerModelConfig templateMakerModelConfig = templateMakerConfig.getModelConfig();
Long id = templateMakerConfig.getId();
TemplateMakerOutputConfig templateMakerOutputConfig = templateMakerConfig.getOutputConfig();
return makeTemplate(meta, originProjectPath, templateMakerFileConfig, templateMakerModelConfig, templateMakerOutputConfig, id);
}
|
@Test
public void makeTemplateBug2() {
System.out.println("------------------- 测试Spring boot init 项目 makeTemplateBug2 ------------------");
Meta meta = new Meta();
// 基本信息
meta.setName("spring boot init ");
meta.setDescription("spring boot 初始化项目");
String projectPath = System.getProperty("user.dir");
String originProjectPath = FileUtil.normalize(new File(projectPath).getParent() + File.separator + "code-generator-demo-projects" + File.separator + "springboot-init");
// String fileInputPath2 = "src/main/java/com/enndfp/springbootinit/common";
String fileInputPath2 = "./";
TemplateMakerModelConfig templateMakerModelConfig = new TemplateMakerModelConfig();
ArrayList<TemplateMakerModelConfig.ModelInfoConfig> modelInfoConfigList = new ArrayList<>();
TemplateMakerModelConfig.ModelInfoConfig modelInfoConfig1 = new TemplateMakerModelConfig.ModelInfoConfig();
modelInfoConfig1.setFieldName("className");
modelInfoConfig1.setType("String");
modelInfoConfig1.setDescription("类名替换");
modelInfoConfig1.setReplaceText("BaseResponse");
modelInfoConfigList.add(modelInfoConfig1);
templateMakerModelConfig.setModels(modelInfoConfigList);
TemplateMakerFileConfig makerFileConfig = new TemplateMakerFileConfig();
TemplateMakerFileConfig.FileInfoConfig fileInfoConfig = new TemplateMakerFileConfig.FileInfoConfig();
fileInfoConfig.setPath(fileInputPath2);
makerFileConfig.setFiles(Collections.singletonList(fileInfoConfig));
long id = TemplateMaker.makeTemplate(meta, originProjectPath, makerFileConfig, templateMakerModelConfig, null, 1753283378534051840L);
System.out.println("id:" + id);
System.out.println("--------------------- 测试完成Spring boot init 项目 makeTemplateBug2 -----------------------");
}
|
@Override
public ObjectNode encode(Host host, CodecContext context) {
checkNotNull(host, NULL_OBJECT_MSG);
final JsonCodec<HostLocation> locationCodec =
context.codec(HostLocation.class);
// keep fields in string for compatibility
final ObjectNode result = context.mapper().createObjectNode()
.put(HOST_ID, host.id().toString())
.put(MAC, host.mac().toString())
.put(VLAN, host.vlan().toString())
.put(INNER_VLAN, host.innerVlan().toString())
// use a 4-digit hex string in coding an ethernet type
.put(OUTER_TPID, String.format("0x%04x", host.tpid().toShort()))
.put(IS_CONFIGURED, host.configured())
.put(IS_SUSPENDED, host.suspended());
final ArrayNode jsonIpAddresses = result.putArray(IP_ADDRESSES);
for (final IpAddress ipAddress : host.ipAddresses()) {
jsonIpAddresses.add(ipAddress.toString());
}
result.set(IP_ADDRESSES, jsonIpAddresses);
final ArrayNode jsonLocations = result.putArray(HOST_LOCATIONS);
for (final HostLocation location : host.locations()) {
jsonLocations.add(locationCodec.encode(location, context));
}
result.set(HOST_LOCATIONS, jsonLocations);
if (host.auxLocations() != null) {
final ArrayNode jsonAuxLocations = result.putArray(AUX_LOCATIONS);
for (final HostLocation auxLocation : host.auxLocations()) {
jsonAuxLocations.add(locationCodec.encode(auxLocation, context));
}
result.set(AUX_LOCATIONS, jsonAuxLocations);
}
return annotate(result, host, context);
}
|
@Test
public void testEncode() throws IOException {
InputStream jsonStream = HostCodec.class.getResourceAsStream(JSON_FILE);
JsonNode jsonString = context.mapper().readTree(jsonStream);
ObjectNode expected = hostCodec.encode(HOST, context);
// Host ID is not a field in Host but rather derived from MAC + VLAN.
// Derived information should not be part of the JSON really.
// However, we keep it as is for backward compatibility.
expected.remove(HostCodec.HOST_ID);
assertEquals(expected, jsonString);
}
|
@Override
@Nullable
public char[] readCharArray() throws EOFException {
int len = readInt();
if (len == NULL_ARRAY_LENGTH) {
return null;
}
if (len > 0) {
char[] values = new char[len];
for (int i = 0; i < len; i++) {
values[i] = readChar();
}
return values;
}
return new char[0];
}
|
@Test
public void testReadCharArray() throws Exception {
byte[] bytesBE = {0, 0, 0, 0, 0, 0, 0, 1, 0, 1, -1, -1, -1, -1};
byte[] bytesLE = {0, 0, 0, 0, 1, 0, 0, 0, 1, 0, -1, -1, -1, -1};
in.init((byteOrder == BIG_ENDIAN ? bytesBE : bytesLE), 0);
in.position(10);
char[] theNullArray = in.readCharArray();
in.position(0);
char[] theZeroLengthArray = in.readCharArray();
in.position(4);
char[] booleanArray = in.readCharArray();
assertNull(theNullArray);
assertArrayEquals(new char[0], theZeroLengthArray);
assertArrayEquals(new char[]{1}, booleanArray);
}
|
public void listenToService(String serviceName)
{
_watchedServiceResources.computeIfAbsent(serviceName, k ->
{
XdsClient.NodeResourceWatcher watcher = getServiceResourceWatcher(serviceName);
_xdsClient.watchXdsResource(D2_SERVICE_NODE_PREFIX + serviceName, watcher);
return watcher;
});
}
|
@Test(dataProvider = "provideTransportClientProperties")
public void testListenToService(Map<String, Object> clientOverride, Map<String, Object> original,
Map<String, Object> overridden)
{
XdsToD2PropertiesAdaptorFixture fixture = new XdsToD2PropertiesAdaptorFixture();
String serviceName = "FooService";
fixture.getSpiedAdaptor(Collections.singletonMap(serviceName, clientOverride))
.listenToService(serviceName);
verify(fixture._xdsClient).watchXdsResource(eq("/d2/services/" + serviceName), anyNodeWatcher());
NodeResourceWatcher symlinkNodeWatcher = fixture._nodeWatcher;
symlinkNodeWatcher.onChanged(new XdsClient.NodeUpdate(XdsD2.Node.newBuilder()
.setData(
ByteString.copyFrom(
new ServicePropertiesJsonSerializer().toBytes(
new ServiceProperties(
serviceName,
PRIMARY_CLUSTER_NAME,
"",
Collections.singletonList("relative"),
Collections.emptyMap(),
original,
Collections.emptyMap(), Collections.emptyList(), Collections.emptySet()
)
)
)
)
.setStat(XdsD2.Stat.newBuilder().setMzxid(1L).build())
.build())
);
verify(fixture._serviceEventBus).publishInitialize(serviceName,
new ServiceStoreProperties(serviceName, PRIMARY_CLUSTER_NAME, "",
Collections.singletonList("relative"),
Collections.emptyMap(),
overridden,
Collections.<String, String>emptyMap(), Collections.emptyList(), Collections.emptySet())
);
}
|
public static int readUint16(ByteBuffer buf) throws BufferUnderflowException {
return Short.toUnsignedInt(buf.order(ByteOrder.LITTLE_ENDIAN).getShort());
}
|
@Test(expected = ArrayIndexOutOfBoundsException.class)
public void testReadUint16ThrowsException1() {
ByteUtils.readUint16(new byte[]{1}, 2);
}
|
protected synchronized void addSoftwareIdentifiers(Set<Identifier> identifiers) {
this.softwareIdentifiers.addAll(identifiers);
}
|
@Test
public void testAddSoftwareIdentifiers() {
Set<Identifier> identifiers = new HashSet<>();
Dependency instance = new Dependency();
instance.addSoftwareIdentifiers(identifiers);
assertNotNull(instance.getSoftwareIdentifiers());
}
|
public static SchemaKStream<?> buildSource(
final PlanBuildContext buildContext,
final DataSource dataSource,
final QueryContext.Stacker contextStacker
) {
final boolean windowed = dataSource.getKsqlTopic().getKeyFormat().isWindowed();
switch (dataSource.getDataSourceType()) {
case KSTREAM:
return windowed
? buildWindowedStream(
buildContext,
dataSource,
contextStacker
) : buildStream(
buildContext,
dataSource,
contextStacker
);
case KTABLE:
return windowed
? buildWindowedTable(
buildContext,
dataSource,
contextStacker
) : buildTable(
buildContext,
dataSource,
contextStacker
);
default:
throw new UnsupportedOperationException("Source type:" + dataSource.getDataSourceType());
}
}
|
@Test
public void shouldBuildWindowedStream() {
// Given:
givenWindowedStream();
// When:
final SchemaKStream<?> result = SchemaKSourceFactory.buildSource(
buildContext,
dataSource,
contextStacker
);
// Then:
assertThat(result, not(instanceOf(SchemaKTable.class)));
assertThat(result.getSourceStep(), instanceOf(WindowedStreamSource.class));
assertValidSchema(result);
assertThat(result.getSourceStep().getSources(), is(empty()));
}
|
public Map<String, Object> valuesWithPrefixAllOrNothing(String prefix) {
Map<String, Object> withPrefix = originalsWithPrefix(prefix, true);
if (withPrefix.isEmpty()) {
return new RecordingMap<>(values(), "", true);
} else {
Map<String, Object> result = new RecordingMap<>(prefix, true);
for (Map.Entry<String, ?> entry : withPrefix.entrySet()) {
ConfigDef.ConfigKey configKey = definition.configKeys().get(entry.getKey());
if (configKey != null)
result.put(entry.getKey(), definition.parseValue(configKey, entry.getValue(), true));
}
return result;
}
}
|
@Test
public void testValuesWithPrefixAllOrNothing() {
String prefix1 = "prefix1.";
String prefix2 = "prefix2.";
Properties props = new Properties();
props.put("sasl.mechanism", "PLAIN");
props.put("prefix1.sasl.mechanism", "GSSAPI");
props.put("prefix1.sasl.kerberos.kinit.cmd", "/usr/bin/kinit2");
props.put("prefix1.ssl.truststore.location", "my location");
props.put("sasl.kerberos.service.name", "service name");
props.put("ssl.keymanager.algorithm", "algorithm");
TestSecurityConfig config = new TestSecurityConfig(props);
Map<String, Object> valuesWithPrefixAllOrNothing1 = config.valuesWithPrefixAllOrNothing(prefix1);
// All prefixed values are there
assertEquals("GSSAPI", valuesWithPrefixAllOrNothing1.get("sasl.mechanism"));
assertEquals("/usr/bin/kinit2", valuesWithPrefixAllOrNothing1.get("sasl.kerberos.kinit.cmd"));
assertEquals("my location", valuesWithPrefixAllOrNothing1.get("ssl.truststore.location"));
// Non-prefixed values are missing
assertFalse(valuesWithPrefixAllOrNothing1.containsKey("sasl.kerberos.service.name"));
assertFalse(valuesWithPrefixAllOrNothing1.containsKey("ssl.keymanager.algorithm"));
Map<String, Object> valuesWithPrefixAllOrNothing2 = config.valuesWithPrefixAllOrNothing(prefix2);
assertTrue(valuesWithPrefixAllOrNothing2.containsKey("sasl.kerberos.service.name"));
assertTrue(valuesWithPrefixAllOrNothing2.containsKey("ssl.keymanager.algorithm"));
}
|
@Override
public void execute(final ConnectionSession connectionSession) throws SQLException {
MetaDataContexts metaDataContexts = ProxyContext.getInstance().getContextManager().getMetaDataContexts();
JDBCExecutor jdbcExecutor = new JDBCExecutor(BackendExecutorContext.getInstance().getExecutorEngine(), connectionSession.getConnectionContext());
try (SQLFederationEngine sqlFederationEngine = new SQLFederationEngine(databaseName, PG_CATALOG, metaDataContexts.getMetaData(), metaDataContexts.getStatistics(), jdbcExecutor)) {
DriverExecutionPrepareEngine<JDBCExecutionUnit, Connection> prepareEngine = createDriverExecutionPrepareEngine(metaDataContexts, connectionSession);
SQLFederationContext context = new SQLFederationContext(false,
new QueryContext(sqlStatementContext, sql, parameters, SQLHintUtils.extractHint(sql), connectionSession.getConnectionContext(), metaDataContexts.getMetaData()),
metaDataContexts.getMetaData(), connectionSession.getProcessId());
ShardingSphereDatabase database = metaDataContexts.getMetaData().getDatabase(databaseName);
ResultSet resultSet = sqlFederationEngine.executeQuery(prepareEngine,
createOpenGaussSystemCatalogAdminQueryCallback(database.getProtocolType(), database.getResourceMetaData(), sqlStatementContext.getSqlStatement()), context);
queryResultMetaData = new JDBCQueryResultMetaData(resultSet.getMetaData());
mergedResult = new IteratorStreamMergedResult(Collections.singletonList(new JDBCMemoryQueryResult(resultSet, connectionSession.getProtocolType())));
}
}
|
@Test
void assertExecuteSelectVersion() throws SQLException {
when(ProxyContext.getInstance()).thenReturn(mock(ProxyContext.class, RETURNS_DEEP_STUBS));
RuleMetaData ruleMetaData = mock(RuleMetaData.class);
when(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getGlobalRuleMetaData()).thenReturn(ruleMetaData);
ConfigurationProperties props = new ConfigurationProperties(new Properties());
when(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getProps()).thenReturn(props);
Map<String, ShardingSphereDatabase> databases = createShardingSphereDatabaseMap();
SQLFederationRule sqlFederationRule = new SQLFederationRule(new SQLFederationRuleConfiguration(false, false, new CacheOption(1, 1L)), databases);
OpenGaussSelectStatement sqlStatement = createSelectStatementForVersion();
ShardingSphereMetaData metaData =
new ShardingSphereMetaData(databases, mock(ResourceMetaData.class, RETURNS_DEEP_STUBS), new RuleMetaData(Collections.singletonList(sqlFederationRule)), props);
when(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData()).thenReturn(metaData);
SelectStatementContext sqlStatementContext = new SelectStatementContext(metaData, Collections.emptyList(), sqlStatement, "sharding_db", Collections.emptyList());
OpenGaussSystemCatalogAdminQueryExecutor executor =
new OpenGaussSystemCatalogAdminQueryExecutor(sqlStatementContext, "select VERSION()", "sharding_db", Collections.emptyList());
ConnectionSession connectionSession = mock(ConnectionSession.class);
when(connectionSession.getProtocolType()).thenReturn(TypedSPILoader.getService(DatabaseType.class, "openGauss"));
ConnectionContext connectionContext = mockConnectionContext();
when(connectionSession.getConnectionContext()).thenReturn(connectionContext);
executor.execute(connectionSession);
QueryResultMetaData actualMetaData = executor.getQueryResultMetaData();
assertThat(actualMetaData.getColumnCount(), is(1));
assertThat(actualMetaData.getColumnType(1), is(Types.VARCHAR));
MergedResult actualResult = executor.getMergedResult();
assertTrue(actualResult.next());
assertThat((String) actualResult.getValue(1, String.class), containsString("ShardingSphere-Proxy"));
}
|
private HostId(MacAddress mac, VlanId vlanId) {
this.mac = mac;
this.vlanId = vlanId;
}
|
@Test
public void basics() {
new EqualsTester()
.addEqualityGroup(hostId(MAC1, VLAN1), hostId(MAC1, VLAN1))
.addEqualityGroup(hostId(MAC2, VLAN2), hostId(MAC2, VLAN2))
.testEquals();
}
|
@Override
public Stream<MappingField> resolveAndValidateFields(
boolean isKey,
List<MappingField> userFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey);
PortableId portableId = getPortableId(fieldsByPath, options, isKey);
ClassDefinition classDefinition = serializationService.getPortableContext()
.lookupClassDefinition(portableId);
// Fallback option for the case, when the portable objects were not de/serialized yet
// and user fields were not provided by the user explicitly. In this case we try to
// manually create a Portable instance and register its ClassDefinition.
if (userFields.isEmpty() && classDefinition == null) {
SerializationServiceV1 ss = (SerializationServiceV1) serializationService;
// Try to create a Portable instance with the default constructor,
// register its ClassDefinition, and throw object away.
var tempPortableObj = ss.getPortableSerializer()
.createNewPortableInstance(portableId.getFactoryId(), portableId.getClassId());
if (tempPortableObj != null) {
try {
ss.getPortableContext().lookupOrRegisterClassDefinition(tempPortableObj);
} catch (Exception e) {
// If the default constructor doesn't make Portable fields non-null,we're done:
// we can't register the class, so we interrupt the execution with the exception.
throw QueryException.error("Cannot create mapping for Portable type. "
+ "Please, provide the explicit definition for all columns.");
}
classDefinition = serializationService.getPortableContext().lookupClassDefinition(portableId);
}
}
return userFields.isEmpty()
? resolveFields(isKey, classDefinition)
: resolveAndValidateFields(isKey, fieldsByPath, classDefinition);
}
|
@Test
@Parameters({
"true, __key",
"false, this"
})
public void when_fieldIsObjectAndClassDefinitionDoesNotExist_then_throws(boolean key, String prefix) {
InternalSerializationService ss = new DefaultSerializationServiceBuilder().build();
Map<String, String> options = ImmutableMap.of(
(key ? OPTION_KEY_FACTORY_ID : OPTION_VALUE_FACTORY_ID), "1",
(key ? OPTION_KEY_CLASS_ID : OPTION_VALUE_CLASS_ID), "2",
(key ? OPTION_KEY_CLASS_VERSION : OPTION_VALUE_CLASS_VERSION), "3"
);
List<MappingField> fields = singletonList(
field("object", QueryDataType.OBJECT, prefix + ".object")
);
// TODO: fix portable nested types support?
//noinspection ResultOfMethodCallIgnored
assertThatThrownBy(() -> INSTANCE.resolveAndValidateFields(key, fields, options, ss).collect(toList()))
.isInstanceOf(QueryException.class)
.hasMessageContaining("Cannot derive Portable type for '" + QueryDataTypeFamily.OBJECT + "'");
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
Long typeDefineLength = typeDefine.getLength();
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.columnLength(typeDefineLength)
.scale(typeDefine.getScale())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String irisDataType = typeDefine.getDataType().toUpperCase();
long charOrBinaryLength =
Objects.nonNull(typeDefineLength) && typeDefineLength > 0 ? typeDefineLength : 1;
switch (irisDataType) {
case IRIS_NULL:
builder.dataType(BasicType.VOID_TYPE);
break;
case IRIS_BIT:
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case IRIS_NUMERIC:
case IRIS_MONEY:
case IRIS_SMALLMONEY:
case IRIS_NUMBER:
case IRIS_DEC:
case IRIS_DECIMAL:
DecimalType decimalType;
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) {
decimalType =
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale());
} else {
decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
}
builder.dataType(decimalType);
builder.columnLength(Long.valueOf(decimalType.getPrecision()));
builder.scale(decimalType.getScale());
break;
case IRIS_INT:
case IRIS_INTEGER:
case IRIS_MEDIUMINT:
builder.dataType(BasicType.INT_TYPE);
break;
case IRIS_ROWVERSION:
case IRIS_BIGINT:
case IRIS_SERIAL:
builder.dataType(BasicType.LONG_TYPE);
break;
case IRIS_TINYINT:
builder.dataType(BasicType.BYTE_TYPE);
break;
case IRIS_SMALLINT:
builder.dataType(BasicType.SHORT_TYPE);
break;
case IRIS_FLOAT:
builder.dataType(BasicType.FLOAT_TYPE);
break;
case IRIS_DOUBLE:
case IRIS_REAL:
case IRIS_DOUBLE_PRECISION:
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case IRIS_CHAR:
case IRIS_CHAR_VARYING:
case IRIS_CHARACTER_VARYING:
case IRIS_NATIONAL_CHAR:
case IRIS_NATIONAL_CHAR_VARYING:
case IRIS_NATIONAL_CHARACTER:
case IRIS_NATIONAL_CHARACTER_VARYING:
case IRIS_NATIONAL_VARCHAR:
case IRIS_NCHAR:
case IRIS_SYSNAME:
case IRIS_VARCHAR2:
case IRIS_VARCHAR:
case IRIS_NVARCHAR:
case IRIS_UNIQUEIDENTIFIER:
case IRIS_GUID:
case IRIS_CHARACTER:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(charOrBinaryLength);
break;
case IRIS_NTEXT:
case IRIS_CLOB:
case IRIS_LONG_VARCHAR:
case IRIS_LONG:
case IRIS_LONGTEXT:
case IRIS_MEDIUMTEXT:
case IRIS_TEXT:
case IRIS_LONGVARCHAR:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(Long.valueOf(Integer.MAX_VALUE));
break;
case IRIS_DATE:
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case IRIS_TIME:
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
break;
case IRIS_DATETIME:
case IRIS_DATETIME2:
case IRIS_SMALLDATETIME:
case IRIS_TIMESTAMP:
case IRIS_TIMESTAMP2:
case IRIS_POSIXTIME:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
break;
case IRIS_BINARY:
case IRIS_BINARY_VARYING:
case IRIS_RAW:
case IRIS_VARBINARY:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(charOrBinaryLength);
break;
case IRIS_LONGVARBINARY:
case IRIS_BLOB:
case IRIS_IMAGE:
case IRIS_LONG_BINARY:
case IRIS_LONG_RAW:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(Long.valueOf(Integer.MAX_VALUE));
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.IRIS, irisDataType, typeDefine.getName());
}
return builder.build();
}
|
@Test
public void testConvertBigint() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("bigint")
.dataType("bigint")
.build();
Column column = IrisTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.LONG_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
}
|
@Override
public String toString() {
return reference;
}
|
@Test
public void testUnique() {
final Path t_noregion = new Path("/", EnumSet.of(Path.Type.directory));
assertEquals("[directory]-/", new DefaultPathPredicate(t_noregion).toString());
final Path t_region = new Path("/", EnumSet.of(Path.Type.directory));
t_region.attributes().setRegion("r");
assertEquals("[directory]-/", new DefaultPathPredicate(t_region).toString());
assertEquals(new DefaultPathPredicate(t_noregion), new DefaultPathPredicate(t_region));
}
|
@Override
public ExecuteContext after(ExecuteContext context) {
ThreadLocalUtils.removeRequestData();
ThreadLocalUtils.removeRequestTag();
return context;
}
|
@Test
public void testAfter() {
ThreadLocalUtils.setRequestTag(new RequestTag(null));
ThreadLocalUtils.setRequestData(new RequestData(null, null, null));
interceptor.after(context);
Assert.assertNull(ThreadLocalUtils.getRequestTag());
Assert.assertNull(ThreadLocalUtils.getRequestData());
}
|
@Override
public PageResult<NotifyMessageDO> getMyMyNotifyMessagePage(NotifyMessageMyPageReqVO pageReqVO, Long userId, Integer userType) {
return notifyMessageMapper.selectPage(pageReqVO, userId, userType);
}
|
@Test
public void testGetMyNotifyMessagePage() {
// mock 数据
NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> { // 等会查询到
o.setUserId(1L);
o.setUserType(UserTypeEnum.ADMIN.getValue());
o.setReadStatus(true);
o.setCreateTime(buildTime(2022, 1, 2));
o.setTemplateParams(randomTemplateParams());
});
notifyMessageMapper.insert(dbNotifyMessage);
// 测试 userId 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserId(2L)));
// 测试 userType 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserType(UserTypeEnum.MEMBER.getValue())));
// 测试 readStatus 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setReadStatus(false)));
// 测试 createTime 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setCreateTime(buildTime(2022, 2, 1))));
// 准备参数
Long userId = 1L;
Integer userType = UserTypeEnum.ADMIN.getValue();
NotifyMessageMyPageReqVO reqVO = new NotifyMessageMyPageReqVO();
reqVO.setReadStatus(true);
reqVO.setCreateTime(buildBetweenTime(2022, 1, 1, 2022, 1, 10));
// 调用
PageResult<NotifyMessageDO> pageResult = notifyMessageService.getMyMyNotifyMessagePage(reqVO, userId, userType);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbNotifyMessage, pageResult.getList().get(0));
}
|
@Override public boolean isNoop() {
return true;
}
|
@Test void isNoop() {
assertThat(span.isNoop()).isTrue();
}
|
public DrlxParseResult drlxParse(Class<?> patternType, String bindingId, String expression) {
return drlxParse(patternType, bindingId, expression, false);
}
|
@Test
public void testNullSafeExpressionsWithNotIn() {
SingleDrlxParseSuccess result = (SingleDrlxParseSuccess) parser.drlxParse(Person.class, "$p", "address!.city not in (\"Milan\", \"Tokyo\")");
List<Expression> nullSafeExpressions = result.getNullSafeExpressions();
assertThat(nullSafeExpressions).hasSize(1);
assertThat(nullSafeExpressions.get(0).toString()).isEqualTo("_this.getAddress() != null");
// null check is done after the first constraint
assertThat(result.getExpr().toString()).isEqualTo("!D.eval(org.drools.model.operators.InOperator.INSTANCE, _this.getAddress().getCity(), \"Milan\", \"Tokyo\")");
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.