focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public List<StorageObjectOrIOException> getObjects(List<GcsPath> gcsPaths) throws IOException {
if (gcsPaths.isEmpty()) {
return ImmutableList.of();
} else if (gcsPaths.size() == 1) {
GcsPath path = gcsPaths.get(0);
try {
StorageObject object = getObject(path);
return ImmutableList.of(StorageObjectOrIOException.create(object));
} catch (IOException e) {
return ImmutableList.of(StorageObjectOrIOException.create(e));
} catch (Exception e) {
IOException ioException =
new IOException(String.format("Error trying to get %s: %s", path, e));
return ImmutableList.of(StorageObjectOrIOException.create(ioException));
}
}
List<StorageObjectOrIOException[]> results = new ArrayList<>();
executeBatches(makeGetBatches(gcsPaths, results));
ImmutableList.Builder<StorageObjectOrIOException> ret = ImmutableList.builder();
for (StorageObjectOrIOException[] result : results) {
ret.add(result[0]);
}
return ret.build();
}
|
@Test
public void testGetObjects() throws IOException {
GcsUtil gcsUtil = gcsOptionsWithTestCredential().getGcsUtil();
Storage mockStorage = Mockito.mock(Storage.class);
gcsUtil.setStorageClient(mockStorage);
gcsUtil.setBatchRequestSupplier(FakeBatcher::new);
Storage.Objects mockStorageObjects = Mockito.mock(Storage.Objects.class);
Storage.Objects.Get mockGetRequest = Mockito.mock(Storage.Objects.Get.class);
StorageObject object = new StorageObject();
when(mockGetRequest.execute()).thenReturn(object);
when(mockStorageObjects.get(any(), any())).thenReturn(mockGetRequest);
when(mockStorage.objects()).thenReturn(mockStorageObjects);
List<StorageObjectOrIOException> results = gcsUtil.getObjects(makeGcsPaths("s", 1));
assertEquals(object, results.get(0).storageObject());
}
|
public ConfigCheckResult checkConfig() {
Optional<Long> appId = getAppId();
if (appId.isEmpty()) {
return failedApplicationStatus(INVALID_APP_ID_STATUS);
}
GithubAppConfiguration githubAppConfiguration = new GithubAppConfiguration(appId.get(), gitHubSettings.privateKey(), gitHubSettings.apiURLOrDefault());
return checkConfig(githubAppConfiguration);
}
|
@Test
public void checkConfig_whenGithubAppConfigurationNotComplete_shouldReturnFailedAppCheck() {
when(gitHubSettings.appId()).thenReturn(APP_ID);
ConfigCheckResult checkResult = configValidator.checkConfig();
assertThat(checkResult.application().autoProvisioning()).isEqualTo(ConfigStatus.failed(INCOMPLETE_APP_CONFIG_STATUS));
assertThat(checkResult.application().jit()).isEqualTo(ConfigStatus.failed(INCOMPLETE_APP_CONFIG_STATUS));
assertThat(checkResult.installations()).isEmpty();
}
|
@Override
public Infinispan create(URI uri) {
try {
return new HotRod(HotRodURI.create(uri).toConfigurationBuilder().build());
} catch (Throwable t) {
// Not a Hot Rod URI
return null;
}
}
|
@Test
public void testHotRodInstantiationByConfiguration() {
HotRodConfiguration configuration = new HotRodConfigurationBuilder().build();
try (Infinispan infinispan = Infinispan.create(configuration)) {
assertTrue(infinispan instanceof HotRod);
}
}
|
@Override
public Collection<RedisServer> masters() {
List<Map<String, String>> masters = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_MASTERS);
return toRedisServersList(masters);
}
|
@Test
public void testMasters() {
Collection<RedisServer> masters = connection.masters();
assertThat(masters).hasSize(1);
}
|
void truncateTable() throws KettleDatabaseException {
if ( !meta.isPartitioningEnabled() && !meta.isTableNameInField() ) {
// Only the first one truncates in a non-partitioned step copy
//
if ( meta.truncateTable()
&& ( ( getCopy() == 0 && getUniqueStepNrAcrossSlaves() == 0 ) || !Utils.isEmpty( getPartitionID() ) ) ) {
data.db.truncateTable( environmentSubstitute( meta.getSchemaName() ), environmentSubstitute( meta
.getTableName() ) );
}
}
}
|
@Test
public void testTruncateTable_off() throws Exception {
tableOutputSpy.truncateTable();
verify( db, never() ).truncateTable( anyString(), anyString() );
}
|
public static int checkMaxBytesLength(long length) {
if (length < 0) {
throw new AvroRuntimeException("Malformed data. Length is negative: " + length);
}
if (length > MAX_ARRAY_VM_LIMIT) {
throw new UnsupportedOperationException(
"Cannot read arrays longer than " + MAX_ARRAY_VM_LIMIT + " bytes in Java library");
}
if (length > maxBytesLength) {
throw new SystemLimitException("Bytes length " + length + " exceeds maximum allowed");
}
return (int) length;
}
|
@Test
void testCheckMaxBytesLength() {
helpCheckSystemLimits(SystemLimitException::checkMaxBytesLength, MAX_BYTES_LENGTH_PROPERTY, ERROR_VM_LIMIT_BYTES,
"Bytes length 1024 exceeds maximum allowed");
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public void updateFileConfigMaster(Long id) {
// 校验存在
validateFileConfigExists(id);
// 更新其它为非 master
fileConfigMapper.updateBatch(new FileConfigDO().setMaster(false));
// 更新
fileConfigMapper.updateById(new FileConfigDO().setId(id).setMaster(true));
// 清空缓存
clearCache(null, true);
}
|
@Test
public void testUpdateFileConfigMaster_success() {
// mock 数据
FileConfigDO dbFileConfig = randomFileConfigDO().setMaster(false);
fileConfigMapper.insert(dbFileConfig);// @Sql: 先插入出一条存在的数据
FileConfigDO masterFileConfig = randomFileConfigDO().setMaster(true);
fileConfigMapper.insert(masterFileConfig);// @Sql: 先插入出一条存在的数据
// 调用
fileConfigService.updateFileConfigMaster(dbFileConfig.getId());
// 断言数据
assertTrue(fileConfigMapper.selectById(dbFileConfig.getId()).getMaster());
assertFalse(fileConfigMapper.selectById(masterFileConfig.getId()).getMaster());
// 验证 cache
assertNull(fileConfigService.getClientCache().getIfPresent(0L));
}
|
@Bean
public PluginDataHandler sofaPluginDataHandler() {
return new SofaPluginDataHandler();
}
|
@Test
public void testSofaPluginDataHandler() {
applicationContextRunner.run(context -> {
PluginDataHandler handler = context.getBean("sofaPluginDataHandler", PluginDataHandler.class);
assertNotNull(handler);
}
);
}
|
public static Expression and(Expression... expressions)
{
return and(Arrays.asList(expressions));
}
|
@Test
public void testAnd()
{
Expression a = name("a");
Expression b = name("b");
Expression c = name("c");
Expression d = name("d");
Expression e = name("e");
assertEquals(
ExpressionUtils.and(a, b, c, d, e),
and(and(and(a, b), and(c, d)), e));
assertEquals(
ExpressionUtils.combineConjuncts(a, b, a, c, d, c, e),
and(and(and(a, b), and(c, d)), e));
}
|
@Override
public List<AdminUserDO> getUserListByPostIds(Collection<Long> postIds) {
if (CollUtil.isEmpty(postIds)) {
return Collections.emptyList();
}
Set<Long> userIds = convertSet(userPostMapper.selectListByPostIds(postIds), UserPostDO::getUserId);
if (CollUtil.isEmpty(userIds)) {
return Collections.emptyList();
}
return userMapper.selectBatchIds(userIds);
}
|
@Test
public void testUserListByPostIds() {
// 准备参数
Collection<Long> postIds = asSet(10L, 20L);
// mock user1 数据
AdminUserDO user1 = randomAdminUserDO(o -> o.setPostIds(asSet(10L, 30L)));
userMapper.insert(user1);
userPostMapper.insert(new UserPostDO().setUserId(user1.getId()).setPostId(10L));
userPostMapper.insert(new UserPostDO().setUserId(user1.getId()).setPostId(30L));
// mock user2 数据
AdminUserDO user2 = randomAdminUserDO(o -> o.setPostIds(singleton(100L)));
userMapper.insert(user2);
userPostMapper.insert(new UserPostDO().setUserId(user2.getId()).setPostId(100L));
// 调用
List<AdminUserDO> result = userService.getUserListByPostIds(postIds);
// 断言
assertEquals(1, result.size());
assertEquals(user1, result.get(0));
}
|
@Override
public Checksum compute(final InputStream in, final TransferStatus status) throws BackgroundException {
IOUtils.closeQuietly(in);
return Checksum.NONE;
}
|
@Test
public void compute() throws Exception {
final NullInputStream in = new NullInputStream(0L);
new DisabledChecksumCompute().compute(in, new TransferStatus());
assertEquals(-1, in.read());
assertEquals(-1, in.read());
}
|
@Udf(description = "Converts the number of days since 1970-01-01 00:00:00 UTC/GMT to a date "
+ "string using the given format pattern. The format pattern should be in the format"
+ " expected by java.time.format.DateTimeFormatter")
public String formatDate(
@UdfParameter(
description = "The date to convert") final Date date,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
if (date == null || formatPattern == null) {
return null;
}
try {
final DateTimeFormatter formatter = formatters.get(formatPattern);
return LocalDate.ofEpochDay(TimeUnit.MILLISECONDS.toDays(date.getTime())).format(formatter);
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to format date " + date
+ " with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void shouldReturnNullOnNullDate() {
// When:
final String result = udf.formatDate(null, "yyyy-MM-dd");
// Then:
assertThat(result, is(nullValue()));
}
|
@VisibleForTesting
WxMaService getWxMaService(Integer userType) {
// 第一步,查询 DB 的配置项,获得对应的 WxMaService 对象
SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType(
SocialTypeEnum.WECHAT_MINI_APP.getType(), userType);
if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) {
return wxMaServiceCache.getUnchecked(client.getClientId() + ":" + client.getClientSecret());
}
// 第二步,不存在 DB 配置项,则使用 application-*.yaml 对应的 WxMaService 对象
return wxMaService;
}
|
@Test
public void testGetWxMaService_clientDisable() {
// 准备参数
Integer userType = randomPojo(UserTypeEnum.class).getValue();
// mock 数据
SocialClientDO client = randomPojo(SocialClientDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())
.setUserType(userType).setSocialType(SocialTypeEnum.WECHAT_MINI_APP.getType()));
socialClientMapper.insert(client);
// 调用
WxMaService result = socialClientService.getWxMaService(userType);
// 断言
assertSame(wxMaService, result);
}
|
public static Expression convert(Filter[] filters) {
Expression expression = Expressions.alwaysTrue();
for (Filter filter : filters) {
Expression converted = convert(filter);
Preconditions.checkArgument(
converted != null, "Cannot convert filter to Iceberg: %s", filter);
expression = Expressions.and(expression, converted);
}
return expression;
}
|
@Test
public void testNestedInInsideNot() {
Not filter =
Not.apply(And.apply(EqualTo.apply("col1", 1), In.apply("col2", new Integer[] {1, 2})));
Expression converted = SparkFilters.convert(filter);
assertThat(converted).as("Expression should not be converted").isNull();
}
|
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
InvokeMode invokeMode = RpcUtils.getInvokeMode(invoker.getUrl(), invocation);
if (InvokeMode.SYNC == invokeMode) {
return syncInvoke(invoker, invocation);
} else {
return asyncInvoke(invoker, invocation);
}
}
|
@Test
public void testInvokeAsync() {
Invocation invocation = DubboTestUtil.getDefaultMockInvocationOne();
Invoker invoker = DubboTestUtil.getDefaultMockInvoker();
when(invocation.getAttachment(ASYNC_KEY)).thenReturn(Boolean.TRUE.toString());
final Result result = mock(Result.class);
when(invoker.invoke(invocation)).thenAnswer(invocationOnMock -> {
verifyInvocationStructureForAsyncCall(invoker, invocation);
return result;
});
consumerFilter.invoke(invoker, invocation);
verify(invoker).invoke(invocation);
Context context = ContextUtil.getContext();
assertNotNull(context);
}
|
@Override
public Optional<String> buildEstimatedCountSQL(final String qualifiedTableName) {
return Optional.of(String.format("SELECT TABLE_ROWS FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = ? AND TABLE_NAME = '%s'", qualifiedTableName));
}
|
@Test
void assertBuilderEstimateCountSQLWithoutKeyword() {
Optional<String> actual = sqlBuilder.buildEstimatedCountSQL("t_order");
assertTrue(actual.isPresent());
assertThat(actual.get(), is("SELECT TABLE_ROWS FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = ? AND TABLE_NAME = 't_order'"));
}
|
@Override
public void stopTrackScreenOrientation() {
}
|
@Test
public void stopTrackScreenOrientation() {
mSensorsAPI.stopTrackScreenOrientation();
}
|
@Override
public StatusOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final ObjectReader reader = session.getClient().getJSON().getContext(null).readerFor(FileKey.class);
if(log.isDebugEnabled()) {
log.debug(String.format("Read file key for file %s", file));
}
if(null == status.getFilekey()) {
status.setFilekey(SDSTripleCryptEncryptorFeature.generateFileKey());
}
final FileKey fileKey = reader.readValue(status.getFilekey().array());
return new TripleCryptEncryptingOutputStream(session, nodeid, proxy.write(file, status, callback),
Crypto.createFileEncryptionCipher(TripleCryptConverter.toCryptoPlainFileKey(fileKey)), status
);
}
catch(CryptoSystemException | UnknownVersionException e) {
throw new TripleCryptExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
}
|
@Test
public void testWriteMultipart() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final EncryptRoomRequest encrypt = new EncryptRoomRequest().isEncrypted(true);
new NodesApi(session.getClient()).encryptRoom(encrypt, Long.parseLong(new SDSNodeIdProvider(session).getVersionId(room)), StringUtils.EMPTY, null);
room.attributes().withCustom(KEY_ENCRYPTED, String.valueOf(true));
final byte[] content = RandomUtils.nextBytes(32769);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final Path test = new Path(room, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final SDSEncryptionBulkFeature bulk = new SDSEncryptionBulkFeature(session, nodeid);
bulk.pre(Transfer.Type.upload, Collections.singletonMap(new TransferItem(test), status), new DisabledConnectionCallback());
final TripleCryptWriteFeature writer = new TripleCryptWriteFeature(session, nodeid, new SDSDirectS3MultipartWriteFeature(session, nodeid));
final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
assertNotNull(test.attributes().getVersionId());
assertTrue(new DefaultFindFeature(session).find(test));
assertEquals(content.length, new SDSAttributesFinderFeature(session, nodeid).find(test).getSize());
final byte[] compare = new byte[content.length];
final InputStream stream = new TripleCryptReadFeature(session, nodeid, new SDSReadFeature(session, nodeid)).read(test, new TransferStatus(), new DisabledConnectionCallback() {
@Override
public void warn(final Host bookmark, final String title, final String message, final String defaultButton, final String cancelButton, final String preference) {
//
}
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(content, compare);
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@VisibleForTesting
static void validateIntervalFreshness(IntervalFreshness intervalFreshness) {
if (!NumberUtils.isParsable(intervalFreshness.getInterval())) {
throw new ValidationException(
String.format(
"The interval freshness value '%s' is an illegal integer type value.",
intervalFreshness.getInterval()));
}
if (!NumberUtils.isDigits(intervalFreshness.getInterval())) {
throw new ValidationException(
"The freshness interval currently only supports integer type values.");
}
}
|
@Test
void testIllegalIntervalFreshness() {
assertThatThrownBy(() -> validateIntervalFreshness(IntervalFreshness.ofMinute("2efedd")))
.isInstanceOf(ValidationException.class)
.hasMessageContaining(
"The interval freshness value '2efedd' is an illegal integer type value.");
assertThatThrownBy(() -> validateIntervalFreshness(IntervalFreshness.ofMinute("2.5")))
.isInstanceOf(ValidationException.class)
.hasMessageContaining(
"The freshness interval currently only supports integer type values.");
}
|
public static String localDateTimeToString(final LocalDateTime localDateTime) {
return DATE_TIME_FORMATTER.format(localDateTime);
}
|
@Test
public void testLocalDateTimeToString() {
LocalDateTime localDateTime = LocalDateTime.of(2020, 1, 1, 23, 50, 0, 0);
assertEquals("2020-01-01 23:50:00", DateUtils.localDateTimeToString(localDateTime));
}
|
public static HL7v2Read readAllRequests() {
return new HL7v2Read();
}
|
@Test
public void testHL7v2IOFailedReadsByParameter() {
List<HL7v2ReadParameter> badReadParameters =
Arrays.asList(
HL7v2ReadParameter.of(
"metadata-foo", "projects/a/locations/b/datasets/c/hl7V2Stores/d/messages/foo"),
HL7v2ReadParameter.of(
"metadata-bar", "projects/a/locations/b/datasets/c/hl7V2Stores/d/messages/bar"));
PCollection<HL7v2ReadParameter> parameters = pipeline.apply(Create.of(badReadParameters));
HL7v2IO.HL7v2Read.Result readResult = parameters.apply(HL7v2IO.readAllRequests());
PCollection<HealthcareIOError<HL7v2ReadParameter>> failed = readResult.getFailedReads();
PCollection<HL7v2ReadResponse> messages = readResult.getMessages();
PCollection<HL7v2ReadParameter> failedParameters =
failed.apply("Map to parameters", ParDo.of(new MapHealthCareIOErrorToReadParameter()));
PAssert.that(failedParameters).containsInAnyOrder(badReadParameters);
PAssert.that(messages).empty();
pipeline.run();
}
|
public int deleteById(ObjectId id) {
final WriteResult<ContentPackInstallation, ObjectId> writeResult = dbCollection.removeById(id);
return writeResult.getN();
}
|
@Test
@MongoDBFixtures("ContentPackInstallationPersistenceServiceTest.json")
public void deleteById() {
final ObjectId objectId = new ObjectId("5b4c935b4b900a0000000001");
final int deletedContentPacks = persistenceService.deleteById(objectId);
final Set<ContentPackInstallation> contentPacks = persistenceService.loadAll();
assertThat(deletedContentPacks).isEqualTo(1);
assertThat(contentPacks)
.hasSize(3)
.noneSatisfy(contentPack -> assertThat(contentPack.id()).isEqualTo(objectId));
}
|
void add(final long recordingId, final long recordingDescriptorOffset)
{
ensurePositive(recordingId, "recordingId");
ensurePositive(recordingDescriptorOffset, "recordingDescriptorOffset");
final int nextPosition = count << 1;
long[] index = this.index;
if (nextPosition > 0)
{
if (recordingId <= index[nextPosition - 2])
{
throw new IllegalArgumentException("recordingId " + recordingId +
" is less than or equal to the last recordingId " + index[nextPosition - 2]);
}
if (nextPosition == index.length)
{
index = expand(index);
this.index = index;
}
}
index[nextPosition] = recordingId;
index[nextPosition + 1] = recordingDescriptorOffset;
count++;
}
|
@Test
void addThrowsIllegalArgumentExceptionIfRecordingOffsetIsNegative()
{
assertThrows(IllegalArgumentException.class, () -> catalogIndex.add(1024, Integer.MIN_VALUE));
}
|
public static StatementExecutorResponse execute(
final ConfiguredStatement<AssertSchema> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
return AssertExecutor.execute(
statement.getMaskedStatementText(),
statement.getStatement(),
executionContext.getKsqlConfig().getInt(KSQL_ASSERT_SCHEMA_DEFAULT_TIMEOUT_MS),
serviceContext,
(stmt, sc) -> assertSchema(
sc.getSchemaRegistryClient(),
((AssertSchema) stmt).getSubject(),
((AssertSchema) stmt).getId(),
stmt.checkExists()),
(str, stmt) -> new AssertSchemaEntity(
str,
((AssertSchema) stmt).getSubject(),
((AssertSchema) stmt).getId(),
stmt.checkExists())
);
}
|
@Test
public void shouldAssertNotExistSchemaBySubject() {
// Given
final AssertSchema assertSchema = new AssertSchema(Optional.empty(), Optional.of("abc"), Optional.empty(), Optional.empty(), false);
final ConfiguredStatement<AssertSchema> statement = ConfiguredStatement
.of(KsqlParser.PreparedStatement.of("", assertSchema),
SessionConfig.of(ksqlConfig, ImmutableMap.of()));
// When:
final Optional<KsqlEntity> entity = AssertSchemaExecutor
.execute(statement, mock(SessionProperties.class), engine, serviceContext).getEntity();
// Then:
assertThat("expected response!", entity.isPresent());
assertThat(((AssertSchemaEntity) entity.get()).getSubject(), is(Optional.of("abc")));
assertThat(((AssertSchemaEntity) entity.get()).getId(), is(Optional.empty()));
assertThat(((AssertSchemaEntity) entity.get()).getExists(), is(false));
}
|
public void abortTransaction(long transactionId, boolean abortPrepared, String reason,
TxnCommitAttachment txnCommitAttachment,
List<TabletCommitInfo> finishedTablets,
List<TabletFailInfo> failedTablets)
throws UserException {
if (transactionId < 0) {
LOG.info("transaction id is {}, less than 0, maybe this is an old type load job, ignore abort operation",
transactionId);
return;
}
TransactionState transactionState = null;
readLock();
try {
transactionState = idToRunningTransactionState.get(transactionId);
} finally {
readUnlock();
}
if (transactionState == null) {
// If the transaction state does not exist, this task might have been aborted by
// the txntimeoutchecker thread. We need to perform some additional work.
processNotFoundTxn(transactionId, reason, txnCommitAttachment);
throw new TransactionNotFoundException(transactionId);
}
// update transaction state extra if exists
if (txnCommitAttachment != null) {
transactionState.setTxnCommitAttachment(txnCommitAttachment);
}
// before state transform
TxnStateChangeCallback callback = transactionState.beforeStateTransform(TransactionStatus.ABORTED);
boolean txnOperated = false;
transactionState.writeLock();
try {
writeLock();
try {
txnOperated = unprotectAbortTransaction(transactionId, abortPrepared, reason);
} finally {
writeUnlock();
transactionState.afterStateTransform(TransactionStatus.ABORTED, txnOperated, callback, reason);
}
persistTxnStateInTxnLevelLock(transactionState);
} finally {
transactionState.writeUnlock();
}
if (!txnOperated || transactionState.getTransactionStatus() != TransactionStatus.ABORTED) {
return;
}
LOG.info("transaction:[{}] successfully rollback", transactionState);
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
return;
}
for (Long tableId : transactionState.getTableIdList()) {
Table table = db.getTable(tableId);
if (table == null) {
continue;
}
TransactionStateListener listener = stateListenerFactory.create(this, table);
if (listener != null) {
listener.postAbort(transactionState, finishedTablets, failedTablets);
}
}
}
|
@Test
public void testAbortTransaction() throws UserException {
DatabaseTransactionMgr masterDbTransMgr =
masterTransMgr.getDatabaseTransactionMgr(GlobalStateMgrTestUtil.testDbId1);
long txnId2 = lableToTxnId.get(GlobalStateMgrTestUtil.testTxnLable2);
masterDbTransMgr.abortTransaction(txnId2, "test abort transaction", null);
assertEquals(6, masterDbTransMgr.getRunningTxnNums());
assertEquals(0, masterDbTransMgr.getRunningRoutineLoadTxnNums());
assertEquals(2, masterDbTransMgr.getFinishedTxnNums());
assertEquals(8, masterDbTransMgr.getTransactionNum());
assertEquals(TTransactionStatus.ABORTED, masterDbTransMgr.getTxnStatus(txnId2));
long txnId3 = lableToTxnId.get(GlobalStateMgrTestUtil.testTxnLable3);
masterDbTransMgr.abortTransaction(txnId3, "test abort transaction", null);
assertEquals(5, masterDbTransMgr.getRunningTxnNums());
assertEquals(0, masterDbTransMgr.getRunningRoutineLoadTxnNums());
assertEquals(3, masterDbTransMgr.getFinishedTxnNums());
assertEquals(8, masterDbTransMgr.getTransactionNum());
assertEquals(TTransactionStatus.ABORTED, masterDbTransMgr.getTxnStatus(txnId3));
}
|
public boolean isEnabled() {
BasicAuthConfiguration basicAuthConfiguration = configuration();
if (basicAuthConfiguration == null) {
return false;
}
return Boolean.TRUE.equals(basicAuthConfiguration.getEnabled()) && basicAuthConfiguration.getUsername() != null && basicAuthConfiguration.getPassword() != null;
}
|
@Test
void initFromYamlConfig() throws TimeoutException {
assertThat(basicAuthService.isEnabled(), is(true));
assertConfigurationMatchesApplicationYaml();
awaitOssAuthEventApiCall("[email protected]");
}
|
@Override
public Object execute(String command, byte[]... args) {
for (Method method : this.getClass().getDeclaredMethods()) {
if (method.getName().equalsIgnoreCase(command)
&& Modifier.isPublic(method.getModifiers())
&& (method.getParameterTypes().length == args.length)) {
try {
Object t = execute(method, args);
if (t instanceof String) {
return ((String) t).getBytes();
}
return t;
} catch (IllegalArgumentException e) {
if (isPipelined()) {
throw new RedisPipelineException(e);
}
throw new InvalidDataAccessApiUsageException(e.getMessage(), e);
}
}
}
throw new UnsupportedOperationException();
}
|
@Test
public void testExecute() {
Long s = (Long) connection.execute("ttl", "key".getBytes());
assertThat(s).isEqualTo(-2);
connection.execute("flushDb");
}
|
@Override
public String toString() {
return String.format("Bulkhead '%s'", this.name);
}
|
@Test
public void testToString() {
String result = bulkhead.toString();
assertThat(result).isEqualTo("Bulkhead 'test'");
}
|
@Override
public NodeId getMasterFor(DeviceId deviceId) {
checkNotNull(deviceId, DEVICE_ID_NULL);
return store.getMaster(networkId, deviceId);
}
|
@Test
public void getMasterFor() {
mastershipMgr1.setRole(NID_LOCAL, VDID1, MASTER);
mastershipMgr1.setRole(NID_OTHER, VDID2, MASTER);
assertEquals("wrong master:", NID_LOCAL, mastershipMgr1.getMasterFor(VDID1));
assertEquals("wrong master:", NID_OTHER, mastershipMgr1.getMasterFor(VDID2));
//have NID_OTHER hand over VDID2 to NID_LOCAL
mastershipMgr1.setRole(NID_LOCAL, VDID2, MASTER);
assertEquals("wrong master:", NID_LOCAL, mastershipMgr1.getMasterFor(VDID2));
}
|
public final void setStrictness(Strictness strictness) {
Objects.requireNonNull(strictness);
this.strictness = strictness;
}
|
@Test
public void testCapitalizedTrueFailWhenStrict() {
JsonReader reader = new JsonReader(reader("TRUE"));
reader.setStrictness(Strictness.STRICT);
IOException expected = assertThrows(IOException.class, reader::nextBoolean);
assertThat(expected)
.hasMessageThat()
.startsWith(
"Use JsonReader.setStrictness(Strictness.LENIENT) to accept malformed JSON"
+ " at line 1 column 1 path $\n");
reader = new JsonReader(reader("True"));
reader.setStrictness(Strictness.STRICT);
expected = assertThrows(IOException.class, reader::nextBoolean);
assertThat(expected)
.hasMessageThat()
.startsWith(
"Use JsonReader.setStrictness(Strictness.LENIENT) to accept malformed JSON"
+ " at line 1 column 1 path $\n");
}
|
static void maybeReportHybridDiscoveryIssue(PluginDiscoveryMode discoveryMode, PluginScanResult serviceLoadingScanResult, PluginScanResult mergedResult) {
SortedSet<PluginDesc<?>> missingPlugins = new TreeSet<>();
mergedResult.forEach(missingPlugins::add);
serviceLoadingScanResult.forEach(missingPlugins::remove);
if (missingPlugins.isEmpty()) {
if (discoveryMode == PluginDiscoveryMode.HYBRID_WARN || discoveryMode == PluginDiscoveryMode.HYBRID_FAIL) {
log.warn("All plugins have ServiceLoader manifests, consider reconfiguring {}={}",
WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.SERVICE_LOAD);
}
} else {
String message = String.format(
"One or more plugins are missing ServiceLoader manifests may not be usable with %s=%s: %s%n" +
"Read the documentation at %s for instructions on migrating your plugins " +
"to take advantage of the performance improvements of %s mode.",
WorkerConfig.PLUGIN_DISCOVERY_CONFIG,
PluginDiscoveryMode.SERVICE_LOAD,
missingPlugins.stream()
.map(pluginDesc -> pluginDesc.location() + "\t" + pluginDesc.className() + "\t" + pluginDesc.type() + "\t" + pluginDesc.version())
.collect(Collectors.joining("\n", "[\n", "\n]")),
"https://kafka.apache.org/documentation.html#connect_plugindiscovery",
PluginDiscoveryMode.SERVICE_LOAD
);
if (discoveryMode == PluginDiscoveryMode.HYBRID_WARN) {
log.warn("{} To silence this warning, set {}={} in the worker config.",
message, WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.ONLY_SCAN);
} else if (discoveryMode == PluginDiscoveryMode.HYBRID_FAIL) {
throw new ConnectException(String.format("%s To silence this error, set %s=%s in the worker config.",
message, WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.HYBRID_WARN));
}
}
}
|
@Test
public void testServiceLoadNoPlugins() {
try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Plugins.class)) {
Plugins.maybeReportHybridDiscoveryIssue(PluginDiscoveryMode.SERVICE_LOAD, empty, empty);
assertTrue(logCaptureAppender.getEvents().stream().noneMatch(e -> e.getLevel().contains("ERROR") || e.getLevel().equals("WARN")));
}
}
|
public ResourceConfigs toResourceConfigs() {
final ResourceConfigs resourceConfigs = new ResourceConfigs();
for (Resource resource : this) {
resourceConfigs.add(new ResourceConfig(resource.getName()));
}
return resourceConfigs;
}
|
@Test
void shouldConvertResourceListToResourceConfigs() {
final Resources resources = new Resources(new Resource("foo"), new Resource("bar"));
final ResourceConfigs resourceConfigs = resources.toResourceConfigs();
assertThat(resourceConfigs.size()).isEqualTo(2);
assertThat(resourceConfigs.get(0)).isEqualTo(new ResourceConfig("foo"));
assertThat(resourceConfigs.get(1)).isEqualTo(new ResourceConfig("bar"));
}
|
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
if (!configured())
throw new IllegalStateException("Callback handler not configured");
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback)
try {
handleTokenCallback((OAuthBearerTokenCallback) callback);
} catch (KafkaException e) {
throw new IOException(e.getMessage(), e);
}
else if (callback instanceof SaslExtensionsCallback)
try {
handleExtensionsCallback((SaslExtensionsCallback) callback);
} catch (KafkaException e) {
throw new IOException(e.getMessage(), e);
}
else
throw new UnsupportedCallbackException(callback);
}
}
|
@Test
public void addsExtensions() throws IOException, UnsupportedCallbackException {
Map<String, String> options = new HashMap<>();
options.put("unsecuredLoginExtension_testId", "1");
OAuthBearerUnsecuredLoginCallbackHandler callbackHandler = createCallbackHandler(options, new MockTime());
SaslExtensionsCallback callback = new SaslExtensionsCallback();
callbackHandler.handle(new Callback[] {callback});
assertEquals("1", callback.extensions().map().get("testId"));
}
|
@Override
public SelResult childrenAccept(SelParserVisitor visitor, Object data) {
SelResult res = SelResult.NONE;
if (children != null) {
for (int i = 0; i < children.length; ++i) {
res = (SelResult) children[i].jjtAccept(visitor, data);
switch (res) {
case BREAK:
return SelResult.BREAK;
case CONTINUE:
return SelResult.CONTINUE;
case RETURN:
return SelResult.RETURN;
}
}
}
return res;
}
|
@Test
public void testVisitedBreakNode() {
root.jjtAddChild(breakNode, 2);
root.jjtAddChild(breakNode, 1);
root.jjtAddChild(breakNode, 0);
SelResult res = root.childrenAccept(null, null);
assertEquals(SelResult.BREAK, res);
assertArrayEquals(new int[] {1, 0, 0, 0, 0}, visited);
}
|
public <T> Span nextSpanWithParent(SamplerFunction<T> samplerFunction, T arg,
@Nullable TraceContext parent) {
return _toSpan(parent, nextContext(samplerFunction, arg, parent));
}
|
@Test void nextSpanWithParent_overrideToMakeNewTrace() {
Span span;
try (Scope scope = currentTraceContext.newScope(context)) {
span = tracer.nextSpanWithParent(deferDecision(), false, null);
}
assertThat(span.context().parentId()).isNull();
}
|
public static boolean getBool(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing boolean: %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(
pNode != null && !pNode.isNull() && pNode.isBoolean(),
"Cannot parse to a boolean value: %s: %s",
property,
pNode);
return pNode.asBoolean();
}
|
@Test
public void getBool() throws JsonProcessingException {
assertThatThrownBy(() -> JsonUtil.getBool("x", JsonUtil.mapper().readTree("{}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing boolean: x");
assertThatThrownBy(() -> JsonUtil.getBool("x", JsonUtil.mapper().readTree("{\"x\": null}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse to a boolean value: x: null");
assertThatThrownBy(() -> JsonUtil.getBool("x", JsonUtil.mapper().readTree("{\"x\": \"23\"}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse to a boolean value: x: \"23\"");
assertThatThrownBy(() -> JsonUtil.getBool("x", JsonUtil.mapper().readTree("{\"x\": \"true\"}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse to a boolean value: x: \"true\"");
assertThat(JsonUtil.getBool("x", JsonUtil.mapper().readTree("{\"x\": true}"))).isTrue();
assertThat(JsonUtil.getBool("x", JsonUtil.mapper().readTree("{\"x\": false}"))).isFalse();
}
|
@Override
public ConfigFileList responseMessageForConfigFiles(String responseBody) {
ConfigFilesResponseMessage response = codec.getGson().fromJson(responseBody, ConfigFilesResponseMessage.class);
return ConfigFileList.from(response.getFiles());
}
|
@Test
public void shouldReturnErrorWhenInvalidResponseJSON() {
assertTrue(handler.responseMessageForConfigFiles("{\"files\": null}").hasErrors());
assertTrue(handler.responseMessageForConfigFiles("{\"blah\": [\"file\"]}").hasErrors());
assertTrue(handler.responseMessageForConfigFiles("{}").hasErrors());
}
|
@Override
public void clearLossHistoryStats(MdId mdName, MaIdShort maName,
MepId mepId) throws CfmConfigException {
throw new UnsupportedOperationException("Not yet implemented");
}
|
@Test
public void testClearAllLossHistoryStatsOnMep() throws CfmConfigException {
//TODO: Implement underlying method
try {
soamManager.clearLossHistoryStats(MDNAME1, MANAME1, MEPID1);
fail("Expecting UnsupportedOperationException");
} catch (UnsupportedOperationException e) {
}
}
|
public String getString(String path) {
return ObjectConverter.convertObjectTo(get(path), String.class);
}
|
@Test public void
can_parse_json_attributes_starting_with_a_number() {
// Given
String json = "{\n" +
" \"6269f15a0bb9b1b7d86ae718e84cddcd\" : {\n" +
" \"attr1\":\"val1\",\n" +
" \"attr2\":\"val2\",\n" +
" \"attrx\":\"valx\"\n" +
" }\n" +
"}";
// When
JsonPath jsonPath = new JsonPath(json);
// Then
assertThat(jsonPath.getString("6269f15a0bb9b1b7d86ae718e84cddcd.attr1"), equalTo("val1"));
}
|
@Override
public void metricChange(final KafkaMetric metric) {
if (!THROUGHPUT_METRIC_NAMES.contains(metric.metricName().name())
|| !StreamsMetricsImpl.TOPIC_LEVEL_GROUP.equals(metric.metricName().group())) {
return;
}
addMetric(
metric,
getQueryId(metric),
getTopic(metric)
);
}
|
@Test
public void shouldIgnoreNonThroughputMetric() {
// When:
listener.metricChange(mockMetric(
"other-metric",
2D,
STREAMS_TAGS_TASK_1)
);
// Then:
assertThrows(AssertionError.class, () -> verifyAndGetMetric("other-metric", QUERY_ONE_TAGS));
}
|
public static boolean isJavaIdentifier(String name) {
if (name == null) {
return false;
}
int size = name.length();
if (size < 1) {
return false;
}
if (Character.isJavaIdentifierStart(name.charAt(0))) {
for (int i = 1; i < size; i++) {
if (!Character.isJavaIdentifierPart(name.charAt(i))) {
return false;
}
}
return true;
}
return false;
}
|
@Test
public void testIsJavaIdentifier() {
assertTrue(StringHelper.isJavaIdentifier("foo"));
assertFalse(StringHelper.isJavaIdentifier("foo.bar"));
assertFalse(StringHelper.isJavaIdentifier(""));
assertFalse(StringHelper.isJavaIdentifier(null));
}
|
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
}
|
@Test
public void testInvalidSpdyHeadersFrameStreamId() throws Exception {
short type = 8;
byte flags = 0;
int length = 4;
int streamId = 0; // invalid stream identifier
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeControlFrameHeader(buf, type, flags, length);
buf.writeInt(streamId);
decoder.decode(buf);
verify(delegate).readFrameError(anyString());
assertFalse(buf.isReadable());
buf.release();
}
|
@Override
public Comparable convert(Comparable value) {
if (!(value instanceof CompositeValue)) {
throw new IllegalArgumentException("Cannot convert [" + value + "] to composite");
}
CompositeValue compositeValue = (CompositeValue) value;
Comparable[] components = compositeValue.getComponents();
Comparable[] converted = new Comparable[components.length];
for (int i = 0; i < components.length; ++i) {
Comparable component = components[i];
if (component == NULL || component == NEGATIVE_INFINITY || component == POSITIVE_INFINITY) {
converted[i] = component;
} else {
converted[i] = converters[i].convert(component);
}
}
return new CompositeValue(converted);
}
|
@Test
public void testConversion() {
assertEquals(value(1), converter(INTEGER_CONVERTER).convert(value(1)));
assertEquals(value(1), converter(INTEGER_CONVERTER).convert(value("1")));
assertEquals(value(1, true), converter(INTEGER_CONVERTER, BOOLEAN_CONVERTER).convert(value(1, true)));
assertEquals(value(1, false), converter(INTEGER_CONVERTER, BOOLEAN_CONVERTER).convert(value(1.0, "non-true")));
assertEquals(value(1, true, "foo"),
converter(INTEGER_CONVERTER, BOOLEAN_CONVERTER, STRING_CONVERTER).convert(value(1, true, "foo")));
assertEquals(value(1, false, "1"),
converter(INTEGER_CONVERTER, BOOLEAN_CONVERTER, STRING_CONVERTER).convert(value(1.0, "non-true", 1)));
}
|
@Override
public void start() {
if (isStarted()) return;
try {
ServerSocket socket = getServerSocketFactory().createServerSocket(
getPort(), getBacklog(), getInetAddress());
ServerListener<RemoteReceiverClient> listener = createServerListener(socket);
runner = createServerRunner(listener, getContext().getScheduledExecutorService());
runner.setContext(getContext());
getContext().getScheduledExecutorService().execute(runner);
super.start();
} catch (Exception ex) {
addError("server startup error: " + ex, ex);
}
}
|
@Test
public void testStartWhenAlreadyStarted() throws Exception {
appender.start();
appender.start();
assertEquals(1, runner.getStartCount());
}
|
@Override
public YamlShardingCacheOptionsConfiguration swapToYamlConfiguration(final ShardingCacheOptionsConfiguration data) {
YamlShardingCacheOptionsConfiguration result = new YamlShardingCacheOptionsConfiguration();
result.setSoftValues(data.isSoftValues());
result.setInitialCapacity(data.getInitialCapacity());
result.setMaximumSize(data.getMaximumSize());
return result;
}
|
@Test
void assertSwapToYamlConfiguration() {
YamlShardingCacheOptionsConfiguration actual = new YamlShardingCacheOptionsConfigurationSwapper().swapToYamlConfiguration(new ShardingCacheOptionsConfiguration(true, 128, 1024));
assertTrue(actual.isSoftValues());
assertThat(actual.getInitialCapacity(), is(128));
assertThat(actual.getMaximumSize(), is(1024));
}
|
public ApplicationBuilder architecture(String architecture) {
this.architecture = architecture;
return getThis();
}
|
@Test
void architecture() {
ApplicationBuilder builder = new ApplicationBuilder();
builder.architecture("architecture");
Assertions.assertEquals("architecture", builder.build().getArchitecture());
}
|
public static RuleDescriptionSectionContextDto of(String key, String displayName) {
return new RuleDescriptionSectionContextDto(key, displayName);
}
|
@Test
void check_of_with_key_is_empty() {
assertThatThrownBy(() -> RuleDescriptionSectionContextDto.of("", CONTEXT_DISPLAY_NAME))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(KEY_MUST_BE_SET_ERROR);
}
|
@Override
public ParallelismAndInputInfos decideParallelismAndInputInfosForVertex(
JobVertexID jobVertexId,
List<BlockingResultInfo> consumedResults,
int vertexInitialParallelism,
int vertexMinParallelism,
int vertexMaxParallelism) {
checkArgument(
vertexInitialParallelism == ExecutionConfig.PARALLELISM_DEFAULT
|| vertexInitialParallelism > 0);
checkArgument(
vertexMinParallelism == ExecutionConfig.PARALLELISM_DEFAULT
|| vertexMinParallelism > 0);
checkArgument(
vertexMaxParallelism > 0
&& vertexMaxParallelism >= vertexInitialParallelism
&& vertexMaxParallelism >= vertexMinParallelism);
if (consumedResults.isEmpty()) {
// source job vertex
int parallelism =
vertexInitialParallelism > 0
? vertexInitialParallelism
: computeSourceParallelismUpperBound(jobVertexId, vertexMaxParallelism);
return new ParallelismAndInputInfos(parallelism, Collections.emptyMap());
} else {
int minParallelism = Math.max(globalMinParallelism, vertexMinParallelism);
int maxParallelism = globalMaxParallelism;
if (vertexInitialParallelism == ExecutionConfig.PARALLELISM_DEFAULT
&& vertexMaxParallelism < minParallelism) {
LOG.info(
"The vertex maximum parallelism {} is smaller than the minimum parallelism {}. "
+ "Use {} as the lower bound to decide parallelism of job vertex {}.",
vertexMaxParallelism,
minParallelism,
vertexMaxParallelism,
jobVertexId);
minParallelism = vertexMaxParallelism;
}
if (vertexInitialParallelism == ExecutionConfig.PARALLELISM_DEFAULT
&& vertexMaxParallelism < maxParallelism) {
LOG.info(
"The vertex maximum parallelism {} is smaller than the global maximum parallelism {}. "
+ "Use {} as the upper bound to decide parallelism of job vertex {}.",
vertexMaxParallelism,
maxParallelism,
vertexMaxParallelism,
jobVertexId);
maxParallelism = vertexMaxParallelism;
}
checkState(maxParallelism >= minParallelism);
if (vertexInitialParallelism == ExecutionConfig.PARALLELISM_DEFAULT
&& areAllInputsAllToAll(consumedResults)
&& !areAllInputsBroadcast(consumedResults)) {
return decideParallelismAndEvenlyDistributeData(
jobVertexId,
consumedResults,
vertexInitialParallelism,
minParallelism,
maxParallelism);
} else {
return decideParallelismAndEvenlyDistributeSubpartitions(
jobVertexId,
consumedResults,
vertexInitialParallelism,
minParallelism,
maxParallelism);
}
}
}
|
@Test
void testParallelismAlreadyDecided() {
final DefaultVertexParallelismAndInputInfosDecider decider =
createDecider(MIN_PARALLELISM, MAX_PARALLELISM, DATA_VOLUME_PER_TASK);
AllToAllBlockingResultInfo allToAllBlockingResultInfo =
createAllToAllBlockingResultInfo(
new long[] {10L, 15L, 13L, 12L, 1L, 10L, 8L, 20L, 12L, 17L});
ParallelismAndInputInfos parallelismAndInputInfos =
decider.decideParallelismAndInputInfosForVertex(
new JobVertexID(),
Collections.singletonList(allToAllBlockingResultInfo),
3,
MIN_PARALLELISM,
MAX_PARALLELISM);
assertThat(parallelismAndInputInfos.getParallelism()).isEqualTo(3);
assertThat(parallelismAndInputInfos.getJobVertexInputInfos()).hasSize(1);
checkAllToAllJobVertexInputInfo(
Iterables.getOnlyElement(
parallelismAndInputInfos.getJobVertexInputInfos().values()),
Arrays.asList(new IndexRange(0, 2), new IndexRange(3, 5), new IndexRange(6, 9)));
}
|
public String characterEncoding()
{
return characterEncoding;
}
|
@Test
void shouldReturnUsAsciiWhenCharacterEncodingNotSpecifiedForTypeChar() throws Exception
{
final String testXmlString =
"<types>" +
" <type name=\"testCharDefaultCharacterEncoding\" primitiveType=\"char\" length=\"5\"/>" +
"</types>";
final Map<String, Type> map = parseTestXmlWithMap("/types/type", testXmlString);
assertThat(
(((EncodedDataType)map.get("testCharDefaultCharacterEncoding")).characterEncoding()), is("US-ASCII"));
}
|
public static <T> Either<String, T> resolveImportDMN(Import importElement, Collection<T> dmns, Function<T, QName> idExtractor) {
final String importerDMNNamespace = ((Definitions) importElement.getParent()).getNamespace();
final String importerDMNName = ((Definitions) importElement.getParent()).getName();
final String importNamespace = importElement.getNamespace();
final String importName = importElement.getName();
final String importLocationURI = importElement.getLocationURI(); // This is optional
final String importModelName = importElement.getAdditionalAttributes().get(TImport.MODELNAME_QNAME);
LOGGER.debug("Resolving an Import in DMN Model with name={} and namespace={}. " +
"Importing a DMN model with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
List<T> matchingDMNList = dmns.stream()
.filter(m -> idExtractor.apply(m).getNamespaceURI().equals(importNamespace))
.toList();
if (matchingDMNList.size() == 1) {
T located = matchingDMNList.get(0);
// Check if the located DMN Model in the NS, correspond for the import `drools:modelName`.
if (importModelName == null || idExtractor.apply(located).getLocalPart().equals(importModelName)) {
LOGGER.debug("DMN Model with name={} and namespace={} successfully imported a DMN " +
"with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
return Either.ofRight(located);
} else {
LOGGER.error("DMN Model with name={} and namespace={} can't import a DMN with namespace={}, name={}, modelName={}, " +
"located within namespace only {} but does not match for the actual modelName",
importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, idExtractor.apply(located));
return Either.ofLeft(String.format(
"DMN Model with name=%s and namespace=%s can't import a DMN with namespace=%s, name=%s, modelName=%s, " +
"located within namespace only %s but does not match for the actual modelName",
importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, idExtractor.apply(located)));
}
} else {
List<T> usingNSandName = matchingDMNList.stream()
.filter(dmn -> idExtractor.apply(dmn).getLocalPart().equals(importModelName))
.toList();
if (usingNSandName.size() == 1) {
LOGGER.debug("DMN Model with name={} and namespace={} successfully imported a DMN " +
"with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
return Either.ofRight(usingNSandName.get(0));
} else if (usingNSandName.isEmpty()) {
LOGGER.error("DMN Model with name={} and namespace={} failed to import a DMN with namespace={} name={} locationURI={}, modelName={}.",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
return Either.ofLeft(String.format(
"DMN Model with name=%s and namespace=%s failed to import a DMN with namespace=%s name=%s locationURI=%s, modelName=%s. ",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName));
} else {
LOGGER.error("DMN Model with name={} and namespace={} detected a collision ({} elements) trying to import a DMN with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, usingNSandName.size(), importNamespace, importName, importLocationURI, importModelName);
return Either.ofLeft(String.format(
"DMN Model with name=%s and namespace=%s detected a collision trying to import a DMN with %s namespace, " +
"%s name and modelName %s. There are %s DMN files with the same namespace in your project. " +
"Please change the DMN namespaces and make them unique to fix this issue.",
importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, usingNSandName.size()));
}
}
}
|
@Test
void nSandModelNameWithAlias() {
final Import i = makeImport("ns1", "aliased", "m1");
final List<QName> available = Arrays.asList(new QName("ns1", "m1"),
new QName("ns2", "m2"),
new QName("ns3", "m3"));
final Either<String, QName> result = ImportDMNResolverUtil.resolveImportDMN(i, available, Function.identity());
assertThat(result.isRight()).isTrue();
assertThat(result.getOrElse(null)).isEqualTo(new QName("ns1", "m1"));
}
|
public static boolean isProcessAlive(long pid, String user) throws IOException {
if (ServerUtils.IS_ON_WINDOWS) {
return isWindowsProcessAlive(pid, user);
}
return isPosixProcessAlive(pid, user);
}
|
@Test
public void testIsProcessAlive() throws Exception {
// specific selected process should not be alive for a randomly generated user
String randomUser = RandomStringUtils.randomAlphanumeric(12);
// get list of few running processes
Collection<Long> pids = getRunningProcessIds(null);
assertFalse(pids.isEmpty());
for (long pid: pids) {
boolean status = ServerUtils.isProcessAlive(pid, randomUser);
assertFalse(status, "Random user " + randomUser + " is not expected to own any process");
}
boolean status = false;
String currentUser = System.getProperty("user.name");
for (long pid: pids) {
// at least one pid will be owned by the current user (doing the testing)
if (ServerUtils.isProcessAlive(pid, currentUser)) {
status = true;
break;
}
}
assertTrue(status, "Expecting user " + currentUser + " to own at least one process");
}
|
public CoordinatorResult<TxnOffsetCommitResponseData, CoordinatorRecord> commitTransactionalOffset(
RequestContext context,
TxnOffsetCommitRequestData request
) throws ApiException {
validateTransactionalOffsetCommit(context, request);
final TxnOffsetCommitResponseData response = new TxnOffsetCommitResponseData();
final List<CoordinatorRecord> records = new ArrayList<>();
final long currentTimeMs = time.milliseconds();
request.topics().forEach(topic -> {
final TxnOffsetCommitResponseTopic topicResponse = new TxnOffsetCommitResponseTopic().setName(topic.name());
response.topics().add(topicResponse);
topic.partitions().forEach(partition -> {
if (isMetadataInvalid(partition.committedMetadata())) {
topicResponse.partitions().add(new TxnOffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code()));
} else {
log.debug("[GroupId {}] Committing transactional offsets {} for partition {}-{} from member {} with leader epoch {}.",
request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(),
request.memberId(), partition.committedLeaderEpoch());
topicResponse.partitions().add(new TxnOffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.NONE.code()));
final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest(
partition,
currentTimeMs
);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
request.groupId(),
topic.name(),
partition.partitionIndex(),
offsetAndMetadata,
metadataImage.features().metadataVersion()
));
}
});
});
if (!records.isEmpty()) {
metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size());
}
return new CoordinatorResult<>(records, response);
}
|
@Test
public void testGenericGroupTransactionalOffsetCommitWithUnknownGroupId() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build();
assertThrows(IllegalGenerationException.class, () -> context.commitTransactionalOffset(
new TxnOffsetCommitRequestData()
.setGroupId("foo")
.setMemberId("member")
.setGenerationId(10)
.setTopics(Collections.singletonList(
new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic()
.setName("bar")
.setPartitions(Collections.singletonList(
new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition()
.setPartitionIndex(0)
.setCommittedOffset(100L)
.setCommittedLeaderEpoch(10)
.setCommittedMetadata("metadata")
))
))
));
}
|
@Bean
public RegisterClientServerDisruptorPublisher registerClientServerDisruptorPublisher(final List<ShenyuClientRegisterService> shenyuClientRegisterService, final DiscoveryService discoveryService) {
RegisterClientServerDisruptorPublisher publisher = RegisterClientServerDisruptorPublisher.getInstance();
Map<String, ShenyuClientRegisterService> registerServiceMap = shenyuClientRegisterService.stream().collect(Collectors.toMap(ShenyuClientRegisterService::rpcType, Function.identity()));
publisher.start(registerServiceMap, discoveryService);
return publisher;
}
|
@Test
public void testRegisterClientServerDisruptorPublisher() {
DiscoveryService discoveryService = mock(DiscoveryService.class);
List<ShenyuClientRegisterService> shenyuClientRegisterService = new ArrayList<>();
RegisterClientServerDisruptorPublisher publisher = registerCenterConfiguration
.registerClientServerDisruptorPublisher(shenyuClientRegisterService, discoveryService);
assertNotNull(publisher);
}
|
@Override
public List<MethodMetadata> parseAndValidateMetadata(Class<?> targetType) {
List<MethodMetadata> methodsMetadata = this.delegate.parseAndValidateMetadata(targetType);
for (final MethodMetadata metadata : methodsMetadata) {
final Type type = metadata.returnType();
if (!isReactive(type)) {
throw new IllegalArgumentException(String.format(
"Method %s of contract %s doesn't returns a org.reactivestreams.Publisher",
metadata.configKey(), targetType.getSimpleName()));
}
/*
* we will need to change the return type of the method to match the return type contained
* within the Publisher
*/
Type[] actualTypes = ((ParameterizedType) type).getActualTypeArguments();
if (actualTypes.length > 1) {
throw new IllegalStateException("Expected only one contained type.");
} else {
Class<?> actual = Types.getRawType(actualTypes[0]);
if (Stream.class.isAssignableFrom(actual)) {
throw new IllegalArgumentException(
"Streams are not supported when using Reactive Wrappers");
}
metadata.returnType(type);
}
}
return methodsMetadata;
}
|
@Test
void onlyReactiveReturnTypesSupported() {
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> {
Contract contract = new ReactiveDelegatingContract(new Contract.Default());
contract.parseAndValidateMetadata(TestSynchronousService.class);
});
}
|
private <T> T accept(Expression<T> expr) {
return expr.accept(this);
}
|
@Test
public void testNot() throws Exception {
final Expr.Greater trueExpr = Expr.Greater.create(Expr.NumberValue.create(2), Expr.NumberValue.create(1));
final Expr.Greater falseExpr = Expr.Greater.create(Expr.NumberValue.create(1), Expr.NumberValue.create(2));
assertThat(Expr.Not.create(falseExpr).accept(new BooleanNumberConditionsVisitor()))
.isTrue();
assertThat(Expr.Not.create(trueExpr).accept(new BooleanNumberConditionsVisitor()))
.isFalse();
assertThat(loadCondition("condition-not.json").accept(new BooleanNumberConditionsVisitor()))
.isTrue();
}
|
@Override
public boolean hasNext() {
return supplyWithLock(
() -> {
if (cacheQueue.size() > 0) {
return true;
} else if (closed) {
return false;
} else {
waitCacheNotEmpty();
return hasNext();
}
});
}
|
@Test
void testHasNext() throws ExecutionException, InterruptedException {
CompletableFuture<Object> udfTrigger = new CompletableFuture<>();
CompletableFuture<Object> udfReadIteratorFinishIdentifier = new CompletableFuture<>();
CompletableFuture<Object> udfFinishTrigger = new CompletableFuture<>();
MapPartitionIterator<String> iterator =
new MapPartitionIterator<>(
inputIterator -> {
try {
udfTrigger.get();
} catch (InterruptedException | ExecutionException e) {
ExceptionUtils.rethrow(e);
}
for (int index = 0; index < RECORD_NUMBER; ++index) {
inputIterator.next();
}
udfReadIteratorFinishIdentifier.complete(null);
try {
udfFinishTrigger.get();
} catch (InterruptedException | ExecutionException e) {
ExceptionUtils.rethrow(e);
}
});
// 1.Test hasNext() when the cache is not empty in the MapPartitionIterator.
addRecordToIterator(RECORD_NUMBER, iterator);
assertThat(iterator.hasNext()).isTrue();
// 2.Test hasNext() when the cache is empty in the MapPartitionIterator.
udfTrigger.complete(null);
udfReadIteratorFinishIdentifier.get();
assertThat(udfReadIteratorFinishIdentifier).isCompleted();
CompletableFuture<Object> mockedUDFThread1 = new CompletableFuture<>();
CompletableFuture<Boolean> hasNextFinishIdentifier1 = new CompletableFuture<>();
mockedUDFThread1.thenRunAsync(
() -> {
boolean hasNext = iterator.hasNext();
hasNextFinishIdentifier1.complete(hasNext);
});
mockedUDFThread1.complete(null);
assertThat(hasNextFinishIdentifier1).isNotCompleted();
iterator.addRecord(RECORD);
hasNextFinishIdentifier1.get();
assertThat(hasNextFinishIdentifier1).isCompletedWithValue(true);
iterator.next();
// 2.Test hasNext() when the MapPartitionIterator is closed.
CompletableFuture<Object> mockedUDFThread2 = new CompletableFuture<>();
CompletableFuture<Boolean> hasNextFinishIdentifier2 = new CompletableFuture<>();
mockedUDFThread2.thenRunAsync(
() -> {
boolean hasNext = iterator.hasNext();
hasNextFinishIdentifier2.complete(hasNext);
udfFinishTrigger.complete(null);
});
mockedUDFThread2.complete(null);
assertThat(hasNextFinishIdentifier2).isNotCompleted();
iterator.close();
assertThat(hasNextFinishIdentifier2).isCompletedWithValue(false);
assertThat(udfFinishTrigger).isCompleted();
}
|
@Override
public ColumnStatistic getColumnStatistic(Table table, String column) {
Preconditions.checkState(table != null);
// get Statistics Table column info, just return default column statistics
if (StatisticUtils.statisticTableBlackListCheck(table.getId())) {
return ColumnStatistic.unknown();
}
if (!StatisticUtils.checkStatisticTableStateNormal()) {
return ColumnStatistic.unknown();
}
try {
CompletableFuture<Optional<ColumnStatistic>> result =
cachedStatistics.get(new ColumnStatsCacheKey(table.getId(), column));
if (result.isDone()) {
Optional<ColumnStatistic> realResult;
realResult = result.get();
return realResult.orElseGet(ColumnStatistic::unknown);
} else {
return ColumnStatistic.unknown();
}
} catch (Exception e) {
LOG.warn("Failed to execute getColumnStatistic", e);
return ColumnStatistic.unknown();
}
}
|
@Test
public void testGetColumnStatistic(@Mocked CachedStatisticStorage cachedStatisticStorage) {
Database db = connectContext.getGlobalStateMgr().getDb("test");
OlapTable table = (OlapTable) db.getTable("t0");
new Expectations() {
{
cachedStatisticStorage.getColumnStatistic(table, "v1");
result = ColumnStatistic.builder().setDistinctValuesCount(888).build();
minTimes = 0;
cachedStatisticStorage.getColumnStatistic(table, "v2");
result = ColumnStatistic.builder().setDistinctValuesCount(999).build();
minTimes = 0;
cachedStatisticStorage.getColumnStatistic(table, "v3");
result = ColumnStatistic.builder().setDistinctValuesCount(666).build();
minTimes = 0;
}
};
ColumnStatistic columnStatistic1 =
Deencapsulation.invoke(cachedStatisticStorage, "getColumnStatistic", table, "v1");
Assert.assertEquals(888, columnStatistic1.getDistinctValuesCount(), 0.001);
ColumnStatistic columnStatistic2 =
Deencapsulation.invoke(cachedStatisticStorage, "getColumnStatistic", table, "v2");
Assert.assertEquals(999, columnStatistic2.getDistinctValuesCount(), 0.001);
ColumnStatistic columnStatistic3 =
Deencapsulation.invoke(cachedStatisticStorage, "getColumnStatistic", table, "v3");
Assert.assertEquals(666, columnStatistic3.getDistinctValuesCount(), 0.001);
}
|
public void updateQuarantineState(PartitionState newPartitionState,
PartitionState oldPartitionState, long clusterAvgLatency)
{
long quarantineLatency = Math.max((long) (clusterAvgLatency * _relativeLatencyLowThresholdFactor),
MIN_QUARANTINE_LATENCY_MS);
quarantineLatency = Math.min(MAX_QUARANTINE_LATENCY_MS, quarantineLatency);
long currentTime = _clock.currentTimeMillis();
// Step 0: Pre-check if quarantine method works for clients, if it works, we will mark _quarantineEnabled as true
preCheckQuarantine(newPartitionState, quarantineLatency);
// Step 1: check if quarantine state still applies. If not, remove it from the quarantine map
checkAndRemoveQuarantine(newPartitionState);
// Step 2: Handle special clients recovery logic from the recovery map
handleClientsRecovery(newPartitionState);
// Step 3: Enroll new quarantine and recovery map
enrollNewQuarantineAndRecovery(newPartitionState, oldPartitionState, quarantineLatency, currentTime);
}
|
@Test
public void testQuarantineNotEnabledInConfig()
{
setup(RelativeLoadBalancerStrategyFactory.DEFAULT_QUARANTINE_MAX_PERCENT, false, false);
PartitionState state = new PartitionStateTestDataBuilder()
.setTrackerClientStateMap(TrackerClientMockHelper.mockTrackerClients(2),
Arrays.asList(StateUpdater.MIN_HEALTH_SCORE, 0.6),
Arrays.asList(TrackerClientState.HealthState.UNHEALTHY, TrackerClientState.HealthState.UNHEALTHY),
Arrays.asList(20, 20))
.build();
_quarantineManager.updateQuarantineState(state, state, DEFAULT_AVG_CLUSTER_LATENCY);
Mockito.verifyZeroInteractions(_executorService);
assertTrue(state.getQuarantineMap().isEmpty(), "Quarantine should not be enabled.");
}
|
@Override
public byte[] putIfAbsent(final Bytes key,
final byte[] valueAndTimestamp) {
final byte[] previous = wrapped().putIfAbsent(key, valueAndTimestamp);
if (previous == null) {
// then it was absent
log(key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? context.timestamp() : timestamp(valueAndTimestamp));
}
return previous;
}
|
@Test
public void shouldReturnNullOnPutIfAbsentWhenNoPreviousValue() {
assertThat(store.putIfAbsent(hi, rawThere), is(nullValue()));
}
|
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) {
// Set of Visited Schemas
IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>();
// Stack that contains the Schams to process and afterVisitNonTerminal
// functions.
// Deque<Either<Schema, Supplier<SchemaVisitorAction>>>
// Using either has a cost which we want to avoid...
Deque<Object> dq = new ArrayDeque<>();
dq.addLast(start);
Object current;
while ((current = dq.pollLast()) != null) {
if (current instanceof Supplier) {
// we are executing a non terminal post visit.
SchemaVisitorAction action = ((Supplier<SchemaVisitorAction>) current).get();
switch (action) {
case CONTINUE:
break;
case SKIP_SUBTREE:
throw new UnsupportedOperationException();
case SKIP_SIBLINGS:
while (dq.getLast() instanceof Schema) {
dq.removeLast();
}
break;
case TERMINATE:
return visitor.get();
default:
throw new UnsupportedOperationException("Invalid action " + action);
}
} else {
Schema schema = (Schema) current;
boolean terminate;
if (!visited.containsKey(schema)) {
Schema.Type type = schema.getType();
switch (type) {
case ARRAY:
terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType()));
visited.put(schema, schema);
break;
case RECORD:
terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema)
.collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator());
visited.put(schema, schema);
break;
case UNION:
terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes());
visited.put(schema, schema);
break;
case MAP:
terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType()));
visited.put(schema, schema);
break;
case NULL:
case BOOLEAN:
case BYTES:
case DOUBLE:
case ENUM:
case FIXED:
case FLOAT:
case INT:
case LONG:
case STRING:
terminate = visitTerminal(visitor, schema, dq);
break;
default:
throw new UnsupportedOperationException("Invalid type " + type);
}
} else {
terminate = visitTerminal(visitor, schema, dq);
}
if (terminate) {
return visitor.get();
}
}
}
return visitor.get();
}
|
@Test
void visit5() {
String s5 = "{\"type\": \"record\", \"name\": \"c1\", \"fields\": ["
+ "{\"name\": \"f1\", \"type\": {\"type\": \"record\", \"name\": \"c2\", \"fields\": "
+ "[{\"name\": \"f11\", \"type\": \"int\"}]}}," + "{\"name\": \"f2\", \"type\": \"long\"}" + "]}";
assertEquals("c1.c2.\"int\"!\"long\"!", Schemas.visit(new Schema.Parser().parse(s5), new TestVisitor()));
}
|
public static Permission getPermission(String name, String serviceName, String... actions) {
PermissionFactory permissionFactory = PERMISSION_FACTORY_MAP.get(serviceName);
if (permissionFactory == null) {
throw new IllegalArgumentException("No permissions found for service: " + serviceName);
}
return permissionFactory.create(name, actions);
}
|
@Test
public void getPermission_CountdownLatch() {
Permission permission = ActionConstants.getPermission("foo", CountDownLatchServiceUtil.SERVICE_NAME);
assertNotNull(permission);
assertTrue(permission instanceof CountDownLatchPermission);
}
|
@Override
public Graph<Entity> resolveForInstallation(Entity entity,
Map<String, ValueReference> parameters,
Map<EntityDescriptor, Entity> entities) {
if (entity instanceof EntityV1) {
return resolveForInstallation((EntityV1) entity, parameters, entities);
} else {
throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass());
}
}
|
@Test
@MongoDBFixtures({"LookupCacheFacadeTest.json", "LookupDataAdapterFacadeTest.json", "LookupTableFacadeTest.json"})
public void resolveEntity() {
final Entity entity = EntityV1.builder()
.id(ModelId.of("5adf24dd4b900a0fdb4e530d"))
.type(ModelTypes.LOOKUP_TABLE_V1)
.data(objectMapper.convertValue(LookupTableEntity.create(
ValueReference.of(DefaultEntityScope.NAME),
ValueReference.of("http-dsv-no-cache"),
ValueReference.of("HTTP DSV without Cache"),
ValueReference.of("HTTP DSV without Cache"),
ValueReference.of("5adf24b24b900a0fdb4e52dd"),
ValueReference.of("5adf24a04b900a0fdb4e52c8"),
ValueReference.of("Default single value"),
ValueReference.of(LookupDefaultValue.Type.STRING),
ValueReference.of("Default multi value"),
ValueReference.of(LookupDefaultValue.Type.OBJECT)), JsonNode.class))
.build();
final Entity cacheEntity = EntityV1.builder()
.id(ModelId.of("5adf24b24b900a0fdb4e52dd"))
.type(ModelTypes.LOOKUP_CACHE_V1)
.data(objectMapper.convertValue(LookupCacheEntity.create(
ValueReference.of(DefaultEntityScope.NAME),
ValueReference.of("no-op-cache"),
ValueReference.of("No-op cache"),
ValueReference.of("No-op cache"),
ReferenceMapUtils.toReferenceMap(ImmutableMap.of("type", "none"))
), JsonNode.class))
.build();
final Entity dataAdapterEntity = EntityV1.builder()
.id(ModelId.of("5adf24a04b900a0fdb4e52c8"))
.type(ModelTypes.LOOKUP_ADAPTER_V1)
.data(objectMapper.convertValue(LookupDataAdapterEntity.create(
ValueReference.of(DefaultEntityScope.NAME),
ValueReference.of("http-dsv"),
ValueReference.of("HTTP DSV"),
ValueReference.of("HTTP DSV"),
ReferenceMapUtils.toReferenceMap(Collections.emptyMap())
), JsonNode.class))
.build();
final Map<EntityDescriptor, Entity> entities = ImmutableMap.of(
cacheEntity.toEntityDescriptor(), cacheEntity,
dataAdapterEntity.toEntityDescriptor(), dataAdapterEntity);
final Graph<Entity> graph = facade.resolveForInstallation(entity, Collections.emptyMap(), entities);
assertThat(graph.nodes())
.hasSize(3)
.containsOnly(entity, cacheEntity, dataAdapterEntity);
}
|
public static long deriveChainId(long v) {
if (v == LOWER_REAL_V || v == (LOWER_REAL_V + 1)) {
return 0L;
}
return (v - CHAIN_ID_INC) / 2;
}
|
@Test
void deriveChainIdWhenMainNet() {
long v = 37;
long chainId = TransactionUtils.deriveChainId(v);
assertEquals(1, chainId);
}
|
public List<DataRecord> merge(final List<DataRecord> dataRecords) {
Map<DataRecord.Key, DataRecord> result = new HashMap<>();
dataRecords.forEach(each -> {
if (PipelineSQLOperationType.INSERT == each.getType()) {
mergeInsert(each, result);
} else if (PipelineSQLOperationType.UPDATE == each.getType()) {
mergeUpdate(each, result);
} else if (PipelineSQLOperationType.DELETE == each.getType()) {
mergeDelete(each, result);
}
});
return new ArrayList<>(result.values());
}
|
@Test
void assertInsertBeforeDelete() {
DataRecord beforeDataRecord = mockInsertDataRecord(1, 10, 50);
DataRecord afterDataRecord = mockDeleteDataRecord(1, 10, 50);
Collection<DataRecord> actual = groupEngine.merge(Arrays.asList(beforeDataRecord, afterDataRecord));
assertThat(actual.size(), is(1));
assertThat(actual.iterator().next(), sameInstance(afterDataRecord));
}
|
static boolean differenceGreaterThan(
Instant preexistingExecutionTime, Instant potentiallyNewExecutionTime, Duration delta) {
final Duration difference =
Duration.between(preexistingExecutionTime, potentiallyNewExecutionTime).abs();
return difference.toMillis() > delta.toMillis();
}
|
@Test
void test_differenceGreaterThan() {
assertTrue(
differenceGreaterThan(clock.now(), clock.now().minusSeconds(10), Duration.ofSeconds(1)));
assertTrue(
differenceGreaterThan(clock.now(), clock.now().plusSeconds(10), Duration.ofSeconds(1)));
assertFalse(
differenceGreaterThan(clock.now(), clock.now().minusSeconds(10), Duration.ofSeconds(11)));
assertFalse(
differenceGreaterThan(clock.now(), clock.now().plusSeconds(10), Duration.ofSeconds(11)));
}
|
public static Predicate parse(String expression)
{
final Stack<Predicate> predicateStack = new Stack<>();
final Stack<Character> operatorStack = new Stack<>();
final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll("");
final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true);
boolean isTokenMode = true;
while (true)
{
final Character operator;
final String token;
if (isTokenMode)
{
if (tokenizer.hasMoreTokens())
{
token = tokenizer.nextToken();
}
else
{
break;
}
if (OPERATORS.contains(token))
{
operator = token.charAt(0);
}
else
{
operator = null;
}
}
else
{
operator = operatorStack.pop();
token = null;
}
isTokenMode = true;
if (operator == null)
{
try
{
predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance());
}
catch (ClassCastException e)
{
throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e);
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
else
{
if (operatorStack.empty() || operator == '(')
{
operatorStack.push(operator);
}
else if (operator == ')')
{
while (operatorStack.peek() != '(')
{
evaluate(predicateStack, operatorStack);
}
operatorStack.pop();
}
else
{
if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek()))
{
evaluate(predicateStack, operatorStack);
isTokenMode = false;
}
operatorStack.push(operator);
}
}
}
while (!operatorStack.empty())
{
evaluate(predicateStack, operatorStack);
}
if (predicateStack.size() > 1)
{
throw new RuntimeException("Invalid logical expression");
}
return predicateStack.pop();
}
|
@Test
public void testPredicate()
{
final Predicate parsed = PredicateExpressionParser.parse("com.linkedin.data.it.AlwaysTruePredicate");
Assert.assertEquals(parsed.getClass(), AlwaysTruePredicate.class);
}
|
@Override
public KeyValueIterator<Windowed<K>, V> fetch(final K key) {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlySessionStore<K, V>> stores = storeProvider.stores(storeName, queryableStoreType);
for (final ReadOnlySessionStore<K, V> store : stores) {
try {
final KeyValueIterator<Windowed<K>, V> result = store.fetch(key);
if (!result.hasNext()) {
result.close();
} else {
return result;
}
} catch (final InvalidStateStoreException ise) {
throw new InvalidStateStoreException("State store [" + storeName + "] is not available anymore" +
" and may have been migrated to another instance; " +
"please re-discover its location from the state metadata. " +
"Original error message: " + ise);
}
}
return KeyValueIterators.emptyIterator();
}
|
@Test
public void shouldFetchKeyRangeAcrossStoresWithNullKeyFrom() {
final ReadOnlySessionStoreStub<String, Long> secondUnderlying = new
ReadOnlySessionStoreStub<>();
stubProviderTwo.addStore(storeName, secondUnderlying);
underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 0L);
secondUnderlying.put(new Windowed<>("b", new SessionWindow(0, 0)), 10L);
final List<KeyValue<Windowed<String>, Long>> results = StreamsTestUtils.toList(sessionStore.fetch(null, "b"));
assertThat(results, equalTo(Arrays.asList(
KeyValue.pair(new Windowed<>("a", new SessionWindow(0, 0)), 0L),
KeyValue.pair(new Windowed<>("b", new SessionWindow(0, 0)), 10L))));
}
|
@Override
public TopicConfig examineTopicConfig(String addr,
String topic) throws RemotingSendRequestException, RemotingConnectException, RemotingTimeoutException, InterruptedException, MQBrokerException {
return defaultMQAdminExtImpl.examineTopicConfig(addr, topic);
}
|
@Test
public void testExamineTopicConfig() throws MQBrokerException, RemotingException, InterruptedException {
TopicConfig topicConfig = defaultMQAdminExt.examineTopicConfig("127.0.0.1:10911", "topic_test_examine_topicConfig");
assertThat(topicConfig.getTopicName().equals("topic_test_examine_topicConfig")).isTrue();
}
|
public SpoutOutputCollector getCollector() {
return collector;
}
|
@Test
public void testReadFailures() throws Exception {
// 1) create couple of input files to read
Path file1 = new Path(source.toString() + "/file1.txt");
Path file2 = new Path(source.toString() + "/file2.txt");
createTextFile(file1, 6);
createTextFile(file2, 7);
assertEquals(2, listDir(source).size());
// 2) run spout
try (
AutoCloseableHdfsSpout closeableSpout = makeSpout(MockTextFailingReader.class.getName(), MockTextFailingReader.defaultFields)) {
HdfsSpout spout = closeableSpout.spout;
Map<String, Object> conf = getCommonConfigs();
openSpout(spout, 0, conf);
List<String> res = runSpout(spout, "r11");
String[] expected = new String[]{ "[line 0]", "[line 1]", "[line 2]", "[line 0]", "[line 1]", "[line 2]" };
assertArrayEquals(expected, res.toArray());
// 3) make sure 6 lines (3 from each file) were read in all
assertEquals(((MockCollector) spout.getCollector()).lines.size(), 6);
ArrayList<Path> badFiles = HdfsUtils.listFilesByModificationTime(fs, badfiles, 0);
assertEquals(badFiles.size(), 2);
}
}
|
public int readInt1() {
return byteBuf.readUnsignedByte();
}
|
@Test
void assertReadInt1() {
when(byteBuf.readUnsignedByte()).thenReturn((short) 1);
assertThat(new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).readInt1(), is(1));
}
|
private static Path toPath(@NonNull File file) throws IOException {
try {
return file.toPath();
} catch (InvalidPathException e) {
throw new IOException(e);
}
}
|
@Test
public void badPath() throws Exception {
final File newFile = tmp.newFile();
File parentExistsAndIsAFile = new File(newFile, "badChild");
assertTrue(newFile.exists());
assertFalse(parentExistsAndIsAFile.exists());
final IOException e = assertThrows(IOException.class,
() -> new AtomicFileWriter(parentExistsAndIsAFile.toPath(), StandardCharsets.UTF_8));
assertThat(e.getMessage(),
containsString("exists and is neither a directory nor a symlink to a directory"));
}
|
public Future<KafkaVersionChange> reconcile() {
return getVersionFromController()
.compose(i -> getPods())
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
}
|
@Test
public void testDowngradeFailsWithNewProtocolVersionInOnePod(VertxTestContext context) {
String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION;
String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION;
String oldLogMessageFormatVersion = KafkaVersionTestUtils.LATEST_FORMAT_VERSION;
String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION;
String interBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION;
String logMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION;
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion),
mockNewCluster(
null,
mockSps(oldKafkaVersion),
mockMixedPods(oldKafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion, oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion)
)
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.failing(c -> context.verify(() -> {
assertThat(c.getClass(), is(KafkaUpgradeException.class));
assertThat(c.getMessage(), is("log.message.format.version (" + oldInterBrokerProtocolVersion + ") and inter.broker.protocol.version (" + oldLogMessageFormatVersion + ") used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to (" + KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION + ")"));
async.flag();
})));
}
|
public static SortOrder buildSortOrder(Table table) {
return buildSortOrder(table.schema(), table.spec(), table.sortOrder());
}
|
@Test
public void testEmptySpecsV1() {
PartitionSpec spec = PartitionSpec.unpartitioned();
SortOrder order = SortOrder.builderFor(SCHEMA).withOrderId(1).asc("id", NULLS_LAST).build();
TestTables.TestTable table = TestTables.create(tableDir, "test", SCHEMA, spec, order, 1);
// pass PartitionSpec.unpartitioned() on purpose as it has an empty schema
SortOrder actualOrder = SortOrderUtil.buildSortOrder(table.schema(), spec, table.sortOrder());
assertThat(actualOrder.orderId()).as("Order ID must be fresh").isOne();
assertThat(actualOrder.fields()).as("Order must have 1 field").hasSize(1);
assertThat(actualOrder.fields().get(0).sourceId()).as("Field id must be fresh").isOne();
assertThat(actualOrder.fields().get(0).direction()).as("Direction must match").isEqualTo(ASC);
assertThat(actualOrder.fields().get(0).nullOrder())
.as("Null order must match")
.isEqualTo(NULLS_LAST);
}
|
@Override
public Optional<Decision> onBufferConsumed(BufferIndexAndChannel consumedBuffer) {
return Optional.of(Decision.builder().addBufferToRelease(consumedBuffer).build());
}
|
@Test
void testOnBufferConsumed() {
BufferIndexAndChannel bufferIndexAndChannel = new BufferIndexAndChannel(0, 0);
Optional<Decision> consumedDecision = spillStrategy.onBufferConsumed(bufferIndexAndChannel);
assertThat(consumedDecision)
.hasValueSatisfying(
(decision -> {
assertThat(decision.getBufferToRelease())
.hasSize(1)
.hasEntrySatisfying(
0,
(list) ->
assertThat(list)
.containsExactly(
bufferIndexAndChannel));
assertThat(decision.getBufferToSpill()).isEmpty();
}));
}
|
public void copyTo(HttpServletRequest httpRequest, HttpServletResponse httpResponse)
throws IOException {
if (shouldMock()) {
return;
}
assert httpRequest != null;
assert httpResponse != null;
final long start = System.currentTimeMillis();
int dataLength = -1;
try {
final URLConnection connection = openConnection();
// pour traductions
connection.setRequestProperty("Accept-Language",
httpRequest.getHeader("Accept-Language"));
connection.connect();
httpResponse.setContentType(connection.getContentType());
// Content-Disposition pour téléchargement hs_err_pid par exemple
final String contentDisposition = connection.getHeaderField("Content-Disposition");
if (contentDisposition != null) {
httpResponse.setHeader("Content-Disposition", contentDisposition);
}
final OutputStream output = httpResponse.getOutputStream();
dataLength = pump(output, connection);
} finally {
LOG.info("http call done in " + (System.currentTimeMillis() - start) + " ms with "
+ dataLength / 1024 + " KB read for " + url);
}
}
|
@Test
public void testCopyTo() throws IOException {
Utils.setProperty(Parameters.PARAMETER_SYSTEM_PREFIX + "mockLabradorRetriever", "false");
final File file = File.createTempFile("testLabradorRetriever", null);
try {
final HttpServletRequest request = createNiceMock(HttpServletRequest.class);
final HttpServletResponse response = createNiceMock(HttpServletResponse.class);
// si le fichier n'était pas vide il faudrait retourner un ByteArrayOutputStream
// pour response.getOutputStream() en utilisant expect et replay
new LabradorRetriever(file.toURI().toURL()).copyTo(request, response);
} finally {
if (!file.delete()) {
file.deleteOnExit();
}
}
}
|
@Override
public List<OptExpression> transform(OptExpression input, OptimizerContext context) {
LogicalOlapScanOperator scan = (LogicalOlapScanOperator) input.getOp();
PhysicalOlapScanOperator physicalOlapScan = new PhysicalOlapScanOperator(scan);
physicalOlapScan.setSalt(scan.getSalt());
physicalOlapScan.setColumnAccessPaths(scan.getColumnAccessPaths());
OptExpression result = new OptExpression(physicalOlapScan);
return Lists.newArrayList(result);
}
|
@Test
public void transform(@Mocked OlapTable table) {
LogicalOlapScanOperator logical = new LogicalOlapScanOperator(table, Maps.newHashMap(), Maps.newHashMap(),
null, -1, ConstantOperator.createBoolean(true),
1, Lists.newArrayList(1L, 2L, 3L), null,
false, Lists.newArrayList(4L), null, null, false);
List<OptExpression> output =
new OlapScanImplementationRule().transform(new OptExpression(logical), new OptimizerContext(
new Memo(), new ColumnRefFactory()));
assertEquals(1, output.size());
PhysicalOlapScanOperator physical = (PhysicalOlapScanOperator) output.get(0).getOp();
assertEquals(1, physical.getSelectedIndexId());
assertEquals(3, physical.getSelectedPartitionId().size());
assertEquals(1, physical.getSelectedTabletId().size());
assertEquals(ConstantOperator.createBoolean(true), physical.getPredicate());
}
|
public ObjectRecipient getOwner() {
if ( obj != null ) {
return obj.getOwner();
} else {
return null;
}
}
|
@Test
public void testGetOwner() {
assertEquals( RECIPIENT0, repositoryObjectAcls.getOwner().getName() );
repositoryObjectAcls = new UIRepositoryObjectAcls();
assertNull( repositoryObjectAcls.getOwner() );
}
|
IdBatchAndWaitTime newIdBaseLocal(int batchSize) {
return newIdBaseLocal(Clock.currentTimeMillis(), getNodeId(), batchSize);
}
|
@Test
public void when_currentTimeBeforeAllowedRange_then_fail() {
long lowestGoodTimestamp = DEFAULT_EPOCH_START - (1L << DEFAULT_BITS_TIMESTAMP);
gen.newIdBaseLocal(lowestGoodTimestamp, 0, 1);
assertThatThrownBy(() -> gen.newIdBaseLocal(lowestGoodTimestamp - 1, 0, 1)).isInstanceOf(HazelcastException.class)
.hasMessage("Current time out of allowed range");
}
|
@ThriftField(1)
public List<PrestoThriftRange> getRanges()
{
return ranges;
}
|
@Test
public void testFromValueSetNone()
{
PrestoThriftValueSet thriftValueSet = fromValueSet(ValueSet.none(BIGINT));
assertNotNull(thriftValueSet.getRangeValueSet());
assertEquals(thriftValueSet.getRangeValueSet().getRanges(), ImmutableList.of());
}
|
@Deprecated(forRemoval=true, since = "13.0")
public static byte[] convertJavaToOctetStream(Object source, MediaType sourceMediaType, Marshaller marshaller) throws IOException, InterruptedException {
if (source == null) return null;
if (!sourceMediaType.match(MediaType.APPLICATION_OBJECT)) {
throw new EncodingException("sourceMediaType not conforming to application/x-java-object!");
}
return marshaller.objectToByteBuffer(decodeObjectContent(source, sourceMediaType));
}
|
@Test
public void testJavaToOctetStreamConversion() throws IOException, InterruptedException {
Marshaller marshaller = new ProtoStreamMarshaller();
String string = "I've seen things you people wouldn't believe.";
Double number = 12.1d;
Instant complex = Instant.now();
byte[] binary = new byte[]{1, 2, 3};
MediaType stringType = APPLICATION_OBJECT.withParameter("type", "java.lang.String");
byte[] result = StandardConversions.convertJavaToOctetStream(string, stringType, marshaller);
assertArrayEquals(marshaller.objectToByteBuffer(string), result);
MediaType doubleType = APPLICATION_OBJECT.withParameter("type", "java.lang.Double");
result = StandardConversions.convertJavaToOctetStream(number, doubleType, marshaller);
assertArrayEquals(marshaller.objectToByteBuffer(number), result);
MediaType customType = APPLICATION_OBJECT.withParameter("type", complex.getClass().getName());
result = StandardConversions.convertJavaToOctetStream(complex, customType, marshaller);
assertArrayEquals(marshaller.objectToByteBuffer(complex), result);
MediaType byteArrayType = APPLICATION_OBJECT.withParameter("type", "ByteArray");
result = StandardConversions.convertJavaToOctetStream(binary, byteArrayType, marshaller);
assertArrayEquals(marshaller.objectToByteBuffer(binary), result);
}
|
public boolean isValid(String value) {
if (value == null) {
return false;
}
URI uri; // ensure value is a valid URI
try {
uri = new URI(value);
} catch (URISyntaxException e) {
return false;
}
// OK, perfom additional validation
String scheme = uri.getScheme();
if (!isValidScheme(scheme)) {
return false;
}
String authority = uri.getRawAuthority();
if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority
return true; // this is a local file - nothing more to do here
} else if ("file".equals(scheme) && authority != null && authority.contains(":")) {
return false;
} else {
// Validate the authority
if (!isValidAuthority(authority)) {
return false;
}
}
if (!isValidPath(uri.getRawPath())) {
return false;
}
if (!isValidQuery(uri.getRawQuery())) {
return false;
}
if (!isValidFragment(uri.getRawFragment())) {
return false;
}
return true;
}
|
@Test
public void testValidator235() {
String version = System.getProperty("java.version");
if (version.compareTo("1.6") < 0) {
System.out.println("Cannot run Unicode IDN tests");
return; // Cannot run the test
}
UrlValidator validator = new UrlValidator();
assertTrue("xn--d1abbgf6aiiy.xn--p1ai should validate", validator.isValid("http://xn--d1abbgf6aiiy.xn--p1ai"));
assertTrue("президент.рф should validate", validator.isValid("http://президент.рф"));
assertTrue("www.b\u00fccher.ch should validate", validator.isValid("http://www.b\u00fccher.ch"));
assertFalse("www.\uFFFD.ch FFFD should fail", validator.isValid("http://www.\uFFFD.ch"));
assertTrue("www.b\u00fccher.ch should validate", validator.isValid("ftp://www.b\u00fccher.ch"));
assertFalse("www.\uFFFD.ch FFFD should fail", validator.isValid("ftp://www.\uFFFD.ch"));
}
|
@Override
@DSTransactional // 多数据源,使用 @DSTransactional 保证本地事务,以及数据源的切换
public void updateTenant(TenantSaveReqVO updateReqVO) {
// 校验存在
TenantDO tenant = validateUpdateTenant(updateReqVO.getId());
// 校验租户名称是否重复
validTenantNameDuplicate(updateReqVO.getName(), updateReqVO.getId());
// 校验租户域名是否重复
validTenantWebsiteDuplicate(updateReqVO.getWebsite(), updateReqVO.getId());
// 校验套餐被禁用
TenantPackageDO tenantPackage = tenantPackageService.validTenantPackage(updateReqVO.getPackageId());
// 更新租户
TenantDO updateObj = BeanUtils.toBean(updateReqVO, TenantDO.class);
tenantMapper.updateById(updateObj);
// 如果套餐发生变化,则修改其角色的权限
if (ObjectUtil.notEqual(tenant.getPackageId(), updateReqVO.getPackageId())) {
updateTenantRoleMenu(tenant.getId(), tenantPackage.getMenuIds());
}
}
|
@Test
public void testUpdateTenant_system() {
// mock 数据
TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setPackageId(PACKAGE_ID_SYSTEM));
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
// 准备参数
TenantSaveReqVO reqVO = randomPojo(TenantSaveReqVO.class, o -> {
o.setId(dbTenant.getId()); // 设置更新的 ID
});
// 调用,校验业务异常
assertServiceException(() -> tenantService.updateTenant(reqVO), TENANT_CAN_NOT_UPDATE_SYSTEM);
}
|
@Override
public boolean sendHeartbeatMessage(int leaderId) {
var leaderInstance = instanceMap.get(leaderId);
return leaderInstance.isAlive();
}
|
@Test
void testSendHeartbeatMessage() {
var instance1 = new BullyInstance(null, 1, 1);
Map<Integer, Instance> instanceMap = Map.of(1, instance1);
var messageManager = new BullyMessageManager(instanceMap);
assertTrue(messageManager.sendHeartbeatMessage(1));
}
|
public static <InputT, OutputT> MapElements<InputT, OutputT> via(
final InferableFunction<InputT, OutputT> fn) {
return new MapElements<>(fn, fn.getInputTypeDescriptor(), fn.getOutputTypeDescriptor());
}
|
@Test
@Category(NeedsRunner.class)
public void testSimpleFunctionOutputTypeDescriptor() throws Exception {
PCollection<String> output =
pipeline
.apply(Create.of("hello"))
.apply(
MapElements.via(
new SimpleFunction<String, String>() {
@Override
public String apply(String input) {
return input;
}
}));
assertThat(
output.getTypeDescriptor(),
equalTo((TypeDescriptor<String>) new TypeDescriptor<String>() {}));
assertThat(
pipeline.getCoderRegistry().getCoder(output.getTypeDescriptor()),
equalTo(pipeline.getCoderRegistry().getCoder(new TypeDescriptor<String>() {})));
// Make sure the pipeline runs too
pipeline.run();
}
|
public static TypeDescriptor javaTypeForFieldType(FieldType fieldType) {
switch (fieldType.getTypeName()) {
case LOGICAL_TYPE:
// TODO: shouldn't we handle this differently?
return javaTypeForFieldType(fieldType.getLogicalType().getBaseType());
case ARRAY:
return TypeDescriptors.lists(javaTypeForFieldType(fieldType.getCollectionElementType()));
case ITERABLE:
return TypeDescriptors.iterables(
javaTypeForFieldType(fieldType.getCollectionElementType()));
case MAP:
return TypeDescriptors.maps(
javaTypeForFieldType(fieldType.getMapKeyType()),
javaTypeForFieldType(fieldType.getMapValueType()));
case ROW:
return TypeDescriptors.rows();
default:
return PRIMITIVE_MAPPING.get(fieldType.getTypeName());
}
}
|
@Test
public void testArrayTypeToJavaType() {
assertEquals(
TypeDescriptors.lists(TypeDescriptors.longs()),
FieldTypeDescriptors.javaTypeForFieldType(FieldType.array(FieldType.INT64)));
assertEquals(
TypeDescriptors.lists(TypeDescriptors.lists(TypeDescriptors.longs())),
FieldTypeDescriptors.javaTypeForFieldType(
FieldType.array(FieldType.array(FieldType.INT64))));
}
|
@Nonnull
public static <T> T checkNonNullAndSerializable(@Nonnull T object, @Nonnull String objectName) {
//noinspection ConstantConditions
if (object == null) {
throw new IllegalArgumentException('"' + objectName + "\" must not be null");
}
checkSerializable(object, objectName);
return object;
}
|
@Test
public void whenNullToCheckNonNullAndSerializable_thenThrowException() {
assertThatThrownBy(() -> Util.checkNonNullAndSerializable(null, "object"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("\"object\" must not be null");
}
|
public List<? extends MqttProperty> getProperties(int propertyId) {
if (propertyId == MqttPropertyType.USER_PROPERTY.value) {
return userProperties == null ? Collections.<MqttProperty>emptyList() : userProperties;
}
if (propertyId == MqttPropertyType.SUBSCRIPTION_IDENTIFIER.value) {
return subscriptionIds == null ? Collections.<MqttProperty>emptyList() : subscriptionIds;
}
IntObjectHashMap<MqttProperty> props = this.props;
return (props == null || !props.containsKey(propertyId)) ?
Collections.<MqttProperty>emptyList() :
Collections.singletonList(props.get(propertyId));
}
|
@Test
public void testGetProperties() {
MqttProperties props = createSampleProperties();
assertEquals(
Collections.singletonList(new MqttProperties.StringProperty(CONTENT_TYPE.value(), "text/plain")),
props.getProperties(CONTENT_TYPE.value()));
List<MqttProperties.IntegerProperty> expectedSubscriptionIds = new ArrayList<MqttProperties.IntegerProperty>();
expectedSubscriptionIds.add(new MqttProperties.IntegerProperty(SUBSCRIPTION_IDENTIFIER.value(), 10));
expectedSubscriptionIds.add(new MqttProperties.IntegerProperty(SUBSCRIPTION_IDENTIFIER.value(), 20));
assertEquals(
expectedSubscriptionIds,
props.getProperties(SUBSCRIPTION_IDENTIFIER.value()));
List<MqttProperties.UserProperty> expectedUserProps = new ArrayList<MqttProperties.UserProperty>();
expectedUserProps.add(new MqttProperties.UserProperty("isSecret", "true"));
expectedUserProps.add(new MqttProperties.UserProperty("tag", "firstTag"));
expectedUserProps.add(new MqttProperties.UserProperty("tag", "secondTag"));
List<MqttProperties.UserProperty> actualUserProps =
(List<MqttProperties.UserProperty>) props.getProperties(USER_PROPERTY.value());
assertEquals(expectedUserProps, actualUserProps);
}
|
public static <T> RetryTransformer<T> of(Retry retry) {
return new RetryTransformer<>(retry);
}
|
@Test
public void retryOnResultUsingMaybe() throws InterruptedException {
RetryConfig config = RetryConfig.<String>custom()
.retryOnResult("retry"::equals)
.waitDuration(Duration.ofMillis(50))
.maxAttempts(3).build();
Retry retry = Retry.of("testName", config);
given(helloWorldService.returnHelloWorld())
.willReturn("retry")
.willReturn("success");
Maybe.fromCallable(helloWorldService::returnHelloWorld)
.compose(RetryTransformer.of(retry))
.test()
.await()
.assertValueCount(1)
.assertValue("success")
.assertComplete();
then(helloWorldService).should(times(2)).returnHelloWorld();
Retry.Metrics metrics = retry.getMetrics();
assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero();
assertThat(metrics.getNumberOfSuccessfulCallsWithRetryAttempt()).isEqualTo(1);
}
|
@ApiOperation(value = "Get form data", tags = { "Forms" }, notes = "")
@ApiResponses(value = {
@ApiResponse(code = 200, message = "Indicates that form data could be queried."),
@ApiResponse(code = 404, message = "Indicates that form data could not be found.") })
@GetMapping(value = "/form/form-data", produces = "application/json")
public FormDataResponse getFormData(@RequestParam(value = "taskId", required = false) String taskId,
@RequestParam(value = "processDefinitionId", required = false) String processDefinitionId) {
if (taskId == null && processDefinitionId == null) {
throw new FlowableIllegalArgumentException("The taskId or processDefinitionId parameter has to be provided");
}
if (taskId != null && processDefinitionId != null) {
throw new FlowableIllegalArgumentException("Not both a taskId and a processDefinitionId parameter can be provided");
}
FormData formData = null;
String id = null;
if (taskId != null) {
formData = formService.getTaskFormData(taskId);
id = taskId;
} else {
formData = formService.getStartFormData(processDefinitionId);
id = processDefinitionId;
}
if (formData == null) {
throw new FlowableObjectNotFoundException("Could not find a form data with id '" + id + "'.", FormData.class);
}
if (restApiInterceptor != null) {
restApiInterceptor.accessFormData(formData);
}
return restResponseFactory.createFormDataResponse(formData);
}
|
@Test
@Deployment
public void testGetFormData() throws Exception {
Map<String, Object> variableMap = new HashMap<>();
variableMap.put("SpeakerName", "John Doe");
Address address = new Address();
variableMap.put("address", address);
ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("oneTaskProcess", variableMap);
Task task = taskService.createTaskQuery().processInstanceId(processInstance.getId()).singleResult();
CloseableHttpResponse response = executeRequest(
new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_FORM_DATA) + "?taskId=" + task.getId()), HttpStatus.SC_OK);
// Check resulting task
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThatJson(responseNode)
.when(Option.IGNORING_EXTRA_FIELDS, Option.IGNORING_ARRAY_ORDER)
.isEqualTo("{"
+ "formProperties: [ {"
+ " id: 'room',"
+ " name: null,"
+ " type: null,"
+ " value: null,"
+ " readable: true,"
+ " writable: true,"
+ " required: false"
+ "},"
+ "{"
+ " id: 'duration',"
+ " name: null,"
+ " type: 'long',"
+ " value: null,"
+ " readable: true,"
+ " writable: true,"
+ " required: false"
+ "},"
+ "{"
+ " id: 'speaker',"
+ " name: null,"
+ " type: null,"
+ " value: 'John Doe',"
+ " readable: true,"
+ " writable: false,"
+ " required: false"
+ "},"
+ "{"
+ " id: 'street',"
+ " name: null,"
+ " type: null,"
+ " value: null,"
+ " readable: true,"
+ " writable: true,"
+ " required: true"
+ "},"
+ "{"
+ " id: 'start',"
+ " name: null,"
+ " type: 'date',"
+ " value: null,"
+ " readable: true,"
+ " writable: true,"
+ " required: false,"
+ " datePattern: 'dd-MMM-yyyy'"
+ "},"
+ "{"
+ " id: 'end',"
+ " name: 'End',"
+ " type: 'date',"
+ " value: null,"
+ " readable: true,"
+ " writable: true,"
+ " required: false,"
+ " datePattern: 'dd/MM/yyyy'"
+ "},"
+ "{"
+ "id: 'direction',"
+ " name: null,"
+ " type: 'enum',"
+ " value: null,"
+ " readable: true,"
+ " writable: true,"
+ " required: false,"
+ " datePattern: null,"
+ " enumValues: [ {"
+ " id: 'left',"
+ " name: 'Go Left'"
+ " },"
+ " {"
+ " id: 'right',"
+ " name: 'Go Right'"
+ " },"
+ " {"
+ " id: 'up',"
+ " name: 'Go Up'"
+ " },"
+ " {"
+ " id: 'down',"
+ " name: 'Go Down'"
+ " } ]"
+ "} ]"
+ "}"
);
response = executeRequest(new HttpGet(
SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_FORM_DATA) + "?processDefinitionId=" + processInstance
.getProcessDefinitionId()),
HttpStatus.SC_OK);
// Check resulting task
responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThatJson(responseNode)
.when(Option.IGNORING_EXTRA_FIELDS, Option.IGNORING_ARRAY_ORDER)
.isEqualTo("{"
+ "formProperties: [ {"
+ " id: 'number',"
+ " name: 'Number',"
+ " type: 'long',"
+ " value: null,"
+ " readable: true,"
+ " writable: true,"
+ " required: false"
+ "},"
+ "{"
+ " id: 'description',"
+ " name: 'Description',"
+ " type: null,"
+ " value: null,"
+ " readable: true,"
+ " writable: true,"
+ " required: false"
+ "} ]"
+ "}"
);
closeResponse(executeRequest(new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_FORM_DATA) + "?processDefinitionId=123"),
HttpStatus.SC_NOT_FOUND));
closeResponse(executeRequest(new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_FORM_DATA) + "?processDefinitionId2=123"),
HttpStatus.SC_BAD_REQUEST));
}
|
public void appEnterBackground() {
synchronized (mTrackTimer) {
try {
for (Map.Entry<String, EventTimer> entry : mTrackTimer.entrySet()) {
if (entry != null) {
if ("$AppEnd".equals(entry.getKey())) {
continue;
}
EventTimer eventTimer = entry.getValue();
if (eventTimer != null && !eventTimer.isPaused()) {
long eventAccumulatedDuration = eventTimer.getEventAccumulatedDuration() + SystemClock.elapsedRealtime() - eventTimer.getStartTime();
eventTimer.setEventAccumulatedDuration(eventAccumulatedDuration);
eventTimer.setStartTime(SystemClock.elapsedRealtime());
}
}
}
} catch (Exception e) {
SALog.printStackTrace(e);
}
}
}
|
@Test
public void appEnterBackground() {
mInstance.addEventTimer("EventTimer", new EventTimer(TimeUnit.SECONDS, 10000L));
mInstance.appEnterBackground();
Assert.assertEquals(100, mInstance.getEventTimer("EventTimer").getStartTime());
}
|
private Map<String, String> parseParamsFromConfig() {
Map<String, String> params = new HashMap<>();
switch (Config.cloud_native_storage_type.toLowerCase()) {
case "s3":
params.put(CloudConfigurationConstants.AWS_S3_ACCESS_KEY, Config.aws_s3_access_key);
params.put(CloudConfigurationConstants.AWS_S3_SECRET_KEY, Config.aws_s3_secret_key);
params.put(CloudConfigurationConstants.AWS_S3_REGION, Config.aws_s3_region);
params.put(CloudConfigurationConstants.AWS_S3_ENDPOINT, Config.aws_s3_endpoint);
params.put(CloudConfigurationConstants.AWS_S3_EXTERNAL_ID, Config.aws_s3_external_id);
params.put(CloudConfigurationConstants.AWS_S3_IAM_ROLE_ARN, Config.aws_s3_iam_role_arn);
params.put(CloudConfigurationConstants.AWS_S3_USE_AWS_SDK_DEFAULT_BEHAVIOR,
String.valueOf(Config.aws_s3_use_aws_sdk_default_behavior));
params.put(CloudConfigurationConstants.AWS_S3_USE_INSTANCE_PROFILE,
String.valueOf(Config.aws_s3_use_instance_profile));
break;
case "hdfs":
// TODO
break;
case "azblob":
params.put(CloudConfigurationConstants.AZURE_BLOB_SHARED_KEY, Config.azure_blob_shared_key);
params.put(CloudConfigurationConstants.AZURE_BLOB_SAS_TOKEN, Config.azure_blob_sas_token);
params.put(CloudConfigurationConstants.AZURE_BLOB_ENDPOINT, Config.azure_blob_endpoint);
break;
default:
return params;
}
return params;
}
|
@Test
public void testParseParamsFromConfig() {
SharedDataStorageVolumeMgr sdsvm = new SharedDataStorageVolumeMgr();
Map<String, String> params = Deencapsulation.invoke(sdsvm, "parseParamsFromConfig");
Assert.assertEquals("access_key", params.get(AWS_S3_ACCESS_KEY));
Assert.assertEquals("secret_key", params.get(AWS_S3_SECRET_KEY));
Assert.assertEquals("region", params.get(AWS_S3_REGION));
Assert.assertEquals("endpoint", params.get(AWS_S3_ENDPOINT));
Config.cloud_native_storage_type = "aaa";
params = Deencapsulation.invoke(sdsvm, "parseParamsFromConfig");
Assert.assertEquals(0, params.size());
}
|
@Override
public boolean isEmpty() {
return state.isEmpty();
}
|
@Test
void testIsEmpty() throws Exception {
assertThat(mapState.isEmpty()).isFalse();
}
|
@Override
public AppResponse process(Flow flow, CancelFlowRequest request) {
Map<String, Object> logOptions = new HashMap<>();
if (appAuthenticator != null && appAuthenticator.getAccountId() != null) logOptions.put(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId());
if ("upgrade_rda_widchecker".equals(appSession.getAction())) logOptions.put(HIDDEN, true);
if (appSession.getRdaSessionId() != null) {
rdaClient.cancel(appSession.getRdaSessionId());
}
digidClient.remoteLog(getLogCode(appSession.getAction()), logOptions);
return new OkResponse();
}
|
@Test
public void processReturnOkResponseWithAppAutheticatorAndRdaSession() {
mockedAppSession.setRdaSessionId("123");
AppResponse appResponse = cancelled.process(mockedFlow, cancelFlowRequest);
verify(rdaClient, times(1)).cancel(mockedAppSession.getRdaSessionId());
assertTrue(appResponse instanceof OkResponse);
}
|
@VisibleForTesting
Object evaluate(final GenericRow row) {
return term.getValue(new TermEvaluationContext(row));
}
|
@Test
public void shouldHandleFunctionCallsWithGenerics() {
// Given:
final UdfFactory udfFactory = mock(UdfFactory.class);
final KsqlScalarFunction udf = mock(KsqlScalarFunction.class);
when(udf.newInstance(any())).thenReturn(new AddUdf());
givenUdf("FOO", udfFactory, udf);
when(udf.parameters()).thenReturn(ImmutableList.of(GenericType.of("T"), GenericType.of("T")));
// When:
InterpretedExpression interpreter1 = interpreter(
new FunctionCall(
FunctionName.of("FOO"),
ImmutableList.of(
new IntegerLiteral(1),
new IntegerLiteral(1))
)
);
final Object object = interpreter1.evaluate(ROW);
// Then:
assertThat(object, is(2));
}
|
@Override
public KTable<K, V> filter(final Predicate<? super K, ? super V> predicate) {
Objects.requireNonNull(predicate, "predicate can't be null");
return doFilter(predicate, NamedInternal.empty(), null, false);
}
|
@Test
public void shouldNotAllowNullPredicateOnFilter() {
assertThrows(NullPointerException.class, () -> table.filter(null));
}
|
public static Instruction transition(Integer tableId) {
checkNotNull(tableId, "Table id cannot be null");
return new TableTypeTransition(tableId);
}
|
@Test
public void testTransitionMethod() {
final Instruction instruction = Instructions.transition(1);
final Instructions.TableTypeTransition tableInstruction =
checkAndConvert(instruction,
Instruction.Type.TABLE,
Instructions.TableTypeTransition.class);
assertThat(tableInstruction.tableId(), is(1));
}
|
public void asyncRenameFiles(
List<CompletableFuture<?>> renameFileFutures,
AtomicBoolean cancelled,
Path writePath,
Path targetPath,
List<String> fileNames) {
FileSystem fileSystem;
try {
fileSystem = FileSystem.get(writePath.toUri(), conf);
} catch (Exception e) {
Log.error("Failed to get fileSystem", e);
throw new StarRocksConnectorException("Failed to move data files to target location. " +
"Failed to get file system on path %s. msg: %s", writePath, e.getMessage());
}
for (String fileName : fileNames) {
Path source = new Path(writePath, fileName);
Path target = new Path(targetPath, fileName);
renameFileFutures.add(CompletableFuture.runAsync(() -> {
if (cancelled.get()) {
return;
}
try {
if (fileSystem.exists(target)) {
throw new StarRocksConnectorException("Failed to move data files from %s to target location %s. msg:" +
" target location already exists", source, target);
}
if (!fileSystem.rename(source, target)) {
throw new StarRocksConnectorException("Failed to move data files from %s to target location %s. msg:" +
" rename operation failed", source, target);
}
} catch (IOException e) {
LOG.error("Failed to rename data files", e);
throw new StarRocksConnectorException("Failed to move data files from %s to final location %s. msg: %s",
source, target, e.getMessage());
}
}, updateRemoteFilesExecutor));
}
}
|
@Test
public void asyncRenameFilesTest() {
HiveRemoteFileIO hiveRemoteFileIO = new HiveRemoteFileIO(new Configuration());
FileSystem fs = new MockedRemoteFileSystem(HDFS_HIVE_TABLE);
hiveRemoteFileIO.setFileSystem(fs);
FeConstants.runningUnitTest = true;
ExecutorService executorToRefresh = Executors.newFixedThreadPool(5);
ExecutorService executorToLoad = Executors.newFixedThreadPool(5);
CachingRemoteFileIO cachingFileIO = new CachingRemoteFileIO(hiveRemoteFileIO, executorToRefresh, 10, 10, 10);
RemoteFileOperations ops = new RemoteFileOperations(cachingFileIO, executorToLoad, executorToLoad,
false, true, new Configuration());
List<CompletableFuture<?>> futures = new ArrayList<>();
Path writePath = new Path("hdfs://hadoop01:9000/tmp/starrocks/queryid");
Path targetPath = new Path("hdfs://hadoop01:9000/user/hive/warehouse/test.db/t1");
List<String> fileNames = Lists.newArrayList("file1");
ExceptionChecker.expectThrowsWithMsg(
StarRocksConnectorException.class,
"Failed to move data files to target location." +
" Failed to get file system on path hdfs://hadoop01:9000/tmp/starrocks/queryid",
() -> ops.asyncRenameFiles(futures, new AtomicBoolean(true), writePath, targetPath, fileNames));
RemoteFileOperations ops1 = new RemoteFileOperations(cachingFileIO, executorToLoad, Executors.newSingleThreadExecutor(),
false, true, new Configuration());
FileSystem mockedFs = new MockedRemoteFileSystem(HDFS_HIVE_TABLE) {
@Override
public boolean exists(Path path) {
return true;
}
};
new MockUp<FileSystem>() {
@Mock
public FileSystem get(URI uri, Configuration conf) throws IOException {
return mockedFs;
}
};
ExceptionChecker.expectThrowsWithMsg(
StarRocksConnectorException.class,
"Failed to move data files from hdfs://hadoop01:9000/tmp/starrocks/queryid/file1 to" +
" target location hdfs://hadoop01:9000/user/hive/warehouse/test.db/t1/file1." +
" msg: target location already exists",
() -> {
ops1.asyncRenameFiles(futures, new AtomicBoolean(false), writePath, targetPath, fileNames);
getFutureValue(futures.get(0), StarRocksConnectorException.class);
});
new MockUp<FileSystem>() {
@Mock
public FileSystem get(URI uri, Configuration conf) throws IOException {
return fs;
}
};
ExceptionChecker.expectThrowsWithMsg(
StarRocksConnectorException.class,
"Failed to move data files from hdfs://hadoop01:9000/tmp/starrocks/queryid/file1 to" +
" target location hdfs://hadoop01:9000/user/hive/warehouse/test.db/t1/file1." +
" msg: rename operation failed",
() -> {
futures.clear();
ops.asyncRenameFiles(futures, new AtomicBoolean(false), writePath, targetPath, fileNames);
getFutureValue(futures.get(0), StarRocksConnectorException.class);
});
}
|
@Override
public final boolean offer(int ordinal, @Nonnull Object item) {
if (ordinal == -1) {
return offerInternal(allEdges, item);
} else {
if (ordinal == bucketCount()) {
// ordinal beyond bucketCount will add to snapshot queue, which we don't allow through this method
throw new IllegalArgumentException("Illegal edge ordinal: " + ordinal);
}
singleEdge[0] = ordinal;
return offerInternal(singleEdge, item);
}
}
|
@Test
public void when_offer2FailsAndDifferentItemOffered_then_fail() {
do_when_offerDifferent_then_fail(e -> outbox.offer(0, e));
}
|
@Override
public void checkCanCreateView(Identity identity, AccessControlContext context, CatalogSchemaTableName view)
{
if (!canAccessCatalog(identity, view.getCatalogName(), ALL)) {
denyCreateView(view.toString());
}
}
|
@Test
public void testRefreshing()
throws Exception
{
TransactionManager transactionManager = createTestTransactionManager();
AccessControlManager accessControlManager = new AccessControlManager(transactionManager);
File configFile = newTemporaryFile();
configFile.deleteOnExit();
copy(getResourceFile("catalog.json"), configFile);
accessControlManager.setSystemAccessControl(FileBasedSystemAccessControl.NAME, ImmutableMap.of(
SECURITY_CONFIG_FILE, configFile.getAbsolutePath(),
SECURITY_REFRESH_PERIOD, "1ms"));
transaction(transactionManager, accessControlManager)
.execute(transactionId -> {
accessControlManager.checkCanCreateView(transactionId, alice, context, aliceView);
accessControlManager.checkCanCreateView(transactionId, alice, context, aliceView);
accessControlManager.checkCanCreateView(transactionId, alice, context, aliceView);
});
copy(getResourceFile("security-config-file-with-unknown-rules.json"), configFile);
sleep(2);
assertThatThrownBy(() -> transaction(transactionManager, accessControlManager)
.execute(transactionId -> {
accessControlManager.checkCanCreateView(transactionId, alice, context, aliceView);
}))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageStartingWith("Invalid JSON file");
// test if file based cached control was not cached somewhere
assertThatThrownBy(() -> transaction(transactionManager, accessControlManager)
.execute(transactionId -> {
accessControlManager.checkCanCreateView(transactionId, alice, context, aliceView);
}))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageStartingWith("Invalid JSON file");
copy(getResourceFile("catalog.json"), configFile);
sleep(2);
transaction(transactionManager, accessControlManager)
.execute(transactionId -> {
accessControlManager.checkCanCreateView(transactionId, alice, context, aliceView);
});
}
|
public static DefaultProcessCommands secondary(File directory, int processNumber) {
return new DefaultProcessCommands(directory, processNumber, false);
}
|
@Test
public void secondary_fails_if_processNumber_is_higher_than_MAX_PROCESSES() throws Exception {
int processNumber = MAX_PROCESSES + 1;
expectProcessNumberNoValidIAE(() -> {
try (DefaultProcessCommands secondary = DefaultProcessCommands.secondary(temp.newFolder(), processNumber)) {
}
}, processNumber);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.