focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public void setName( String name ) {
this.name = name;
}
|
@Test
public void setName() {
JobScheduleParam jobScheduleParam = mock( JobScheduleParam.class );
doCallRealMethod().when( jobScheduleParam ).setName( any() );
String name = "hitachi";
jobScheduleParam.setName( name );
Assert.assertEquals( name, ReflectionTestUtils.getField( jobScheduleParam, "name" ) );
}
|
@Override
public void collect(MetricsEmitter metricsEmitter) {
for (Map.Entry<MetricKey, KafkaMetric> entry : ledger.getMetrics()) {
MetricKey metricKey = entry.getKey();
KafkaMetric metric = entry.getValue();
try {
collectMetric(metricsEmitter, metricKey, metric);
} catch (Exception e) {
// catch and log to continue processing remaining metrics
log.error("Error processing Kafka metric {}", metricKey, e);
}
}
}
|
@Test
public void testSecondCollectCumulative() {
Sensor sensor = metrics.sensor("test");
sensor.add(metricName, new CumulativeSum());
sensor.record();
sensor.record();
time.sleep(60 * 1000L);
collector.collect(testEmitter);
// Update it again by 5 and advance time by another 60 seconds.
sensor.record();
sensor.record();
sensor.record();
sensor.record();
sensor.record();
time.sleep(60 * 1000L);
testEmitter.reset();
collector.collect(testEmitter);
List<SinglePointMetric> result = testEmitter.emittedMetrics();
assertEquals(2, result.size());
Metric cumulative = result.stream()
.flatMap(metrics -> Stream.of(metrics.builder().build()))
.filter(metric -> metric.getName().equals("test.domain.group1.name1")).findFirst().get();
NumberDataPoint point = cumulative.getSum().getDataPoints(0);
assertEquals(AggregationTemporality.AGGREGATION_TEMPORALITY_CUMULATIVE, cumulative.getSum().getAggregationTemporality());
assertTrue(cumulative.getSum().getIsMonotonic());
assertEquals(7d, point.getAsDouble(), 0.0);
assertEquals(TimeUnit.SECONDS.toNanos(Instant.ofEpochSecond(121L).getEpochSecond()) +
Instant.ofEpochSecond(121L).getNano(), point.getTimeUnixNano());
assertEquals(TimeUnit.SECONDS.toNanos(Instant.ofEpochSecond(1L).getEpochSecond()) +
Instant.ofEpochSecond(1L).getNano(), point.getStartTimeUnixNano());
}
|
public void setSendFullErrorException(boolean sendFullErrorException) {
this.sendFullErrorException = sendFullErrorException;
}
|
@Test
void handleFlowableConflictExceptionWithoutSendFullErrorException() throws Exception {
testController.exceptionSupplier = () -> new FlowableConflictException("task already exists");
handlerAdvice.setSendFullErrorException(false);
String body = mockMvc.perform(get("/"))
.andExpect(status().isConflict())
.andReturn()
.getResponse()
.getContentAsString();
assertThatJson(body)
.isEqualTo("{"
+ " message: 'Conflict',"
+ " exception: 'task already exists'"
+ "}");
}
|
public static int getDefaultResolutionIndex(final Context context,
final List<VideoStream> videoStreams) {
final String defaultResolution = computeDefaultResolution(context,
R.string.default_resolution_key, R.string.default_resolution_value);
return getDefaultResolutionWithDefaultFormat(context, defaultResolution, videoStreams);
}
|
@Test
public void getDefaultResolutionTest() {
final List<VideoStream> testList = new ArrayList<>(List.of(
generateVideoStream("mpeg_4-720", MediaFormat.MPEG_4, "720p", false),
generateVideoStream("v3gpp-240", MediaFormat.v3GPP, "240p", false),
generateVideoStream("webm-480", MediaFormat.WEBM, "480p", false),
generateVideoStream("webm-240", MediaFormat.WEBM, "240p", false),
generateVideoStream("mpeg_4-240", MediaFormat.MPEG_4, "240p", false),
generateVideoStream("webm-144", MediaFormat.WEBM, "144p", false),
generateVideoStream("mpeg_4-360", MediaFormat.MPEG_4, "360p", false),
generateVideoStream("webm-360", MediaFormat.WEBM, "360p", false)));
VideoStream result = testList.get(ListHelper.getDefaultResolutionIndex(
"720p", BEST_RESOLUTION_KEY, MediaFormat.MPEG_4, testList));
assertEquals("720p", result.getResolution());
assertEquals(MediaFormat.MPEG_4, result.getFormat());
// Have resolution and the format
result = testList.get(ListHelper.getDefaultResolutionIndex(
"480p", BEST_RESOLUTION_KEY, MediaFormat.WEBM, testList));
assertEquals("480p", result.getResolution());
assertEquals(MediaFormat.WEBM, result.getFormat());
// Have resolution but not the format
result = testList.get(ListHelper.getDefaultResolutionIndex(
"480p", BEST_RESOLUTION_KEY, MediaFormat.MPEG_4, testList));
assertEquals("480p", result.getResolution());
assertEquals(MediaFormat.WEBM, result.getFormat());
// Have resolution and the format
result = testList.get(ListHelper.getDefaultResolutionIndex(
"240p", BEST_RESOLUTION_KEY, MediaFormat.WEBM, testList));
assertEquals("240p", result.getResolution());
assertEquals(MediaFormat.WEBM, result.getFormat());
// The best resolution
result = testList.get(ListHelper.getDefaultResolutionIndex(
BEST_RESOLUTION_KEY, BEST_RESOLUTION_KEY, MediaFormat.WEBM, testList));
assertEquals("720p", result.getResolution());
assertEquals(MediaFormat.MPEG_4, result.getFormat());
// Doesn't have the 60fps variant and format
result = testList.get(ListHelper.getDefaultResolutionIndex(
"720p60", BEST_RESOLUTION_KEY, MediaFormat.WEBM, testList));
assertEquals("720p", result.getResolution());
assertEquals(MediaFormat.MPEG_4, result.getFormat());
// Doesn't have the 60fps variant
result = testList.get(ListHelper.getDefaultResolutionIndex(
"480p60", BEST_RESOLUTION_KEY, MediaFormat.WEBM, testList));
assertEquals("480p", result.getResolution());
assertEquals(MediaFormat.WEBM, result.getFormat());
// Doesn't have the resolution, will return the best one
result = testList.get(ListHelper.getDefaultResolutionIndex(
"2160p60", BEST_RESOLUTION_KEY, MediaFormat.WEBM, testList));
assertEquals("720p", result.getResolution());
assertEquals(MediaFormat.MPEG_4, result.getFormat());
}
|
@SuppressWarnings("unchecked")
public static int compare(Comparable lhs, Comparable rhs) {
assert lhs != null;
assert rhs != null;
if (lhs.getClass() == rhs.getClass()) {
return lhs.compareTo(rhs);
}
if (lhs instanceof Number && rhs instanceof Number) {
return Numbers.compare(lhs, rhs);
}
return lhs.compareTo(rhs);
}
|
@Test
public void testCompare() {
assertNotEquals(0, compare(0, 1));
assertNotEquals(0, compare("foo", "bar"));
assertEquals(0, compare(0, 0));
assertEquals(0, compare(1.0, 1.0));
assertEquals(0, compare("foo", "foo"));
assertThat(compare(0, 1)).isNegative();
assertThat(compare(1, 0)).isPositive();
assertThat(compare("a", "b")).isNegative();
assertThat(compare("b", "a")).isPositive();
}
|
public static int checkBackupCount(int newBackupCount, int currentAsyncBackupCount) {
if (newBackupCount < 0) {
throw new IllegalArgumentException("backup-count can't be smaller than 0");
}
if (currentAsyncBackupCount < 0) {
throw new IllegalArgumentException("async-backup-count can't be smaller than 0");
}
if (newBackupCount > MAX_BACKUP_COUNT) {
throw new IllegalArgumentException("backup-count can't be larger than than " + MAX_BACKUP_COUNT);
}
if (newBackupCount + currentAsyncBackupCount > MAX_BACKUP_COUNT) {
throw new IllegalArgumentException("the sum of backup-count and async-backup-count can't be larger than than "
+ MAX_BACKUP_COUNT);
}
return newBackupCount;
}
|
@Test
public void checkBackupCount() {
checkBackupCount(-1, 0, false);
checkBackupCount(-1, -1, false);
checkBackupCount(0, -1, false);
checkBackupCount(0, 0, true);
checkBackupCount(0, 1, true);
checkBackupCount(1, 1, true);
checkBackupCount(2, 1, true);
checkBackupCount(1, 2, true);
checkBackupCount(MAX_BACKUP_COUNT, 0, true);
checkBackupCount(0, MAX_BACKUP_COUNT, true);
checkBackupCount(MAX_BACKUP_COUNT, 1, false);
checkBackupCount(MAX_BACKUP_COUNT + 1, 0, false);
checkBackupCount(0, MAX_BACKUP_COUNT + 1, false);
}
|
public Object getCell(final int columnIndex) {
Preconditions.checkArgument(columnIndex > 0 && columnIndex < data.size() + 1);
return data.get(columnIndex - 1);
}
|
@Test
void assertGetCellWithProperties() {
LocalDataQueryResultRow actual = new LocalDataQueryResultRow(new Properties(), PropertiesBuilder.build(new Property("foo", "bar")));
assertThat(actual.getCell(1), is(""));
assertThat(actual.getCell(2), is("{\"foo\":\"bar\"}"));
}
|
@Override
public OAuth2AccessTokenDO grantAuthorizationCodeForAccessToken(String clientId, String code,
String redirectUri, String state) {
OAuth2CodeDO codeDO = oauth2CodeService.consumeAuthorizationCode(code);
Assert.notNull(codeDO, "授权码不能为空"); // 防御性编程
// 校验 clientId 是否匹配
if (!StrUtil.equals(clientId, codeDO.getClientId())) {
throw exception(ErrorCodeConstants.OAUTH2_GRANT_CLIENT_ID_MISMATCH);
}
// 校验 redirectUri 是否匹配
if (!StrUtil.equals(redirectUri, codeDO.getRedirectUri())) {
throw exception(ErrorCodeConstants.OAUTH2_GRANT_REDIRECT_URI_MISMATCH);
}
// 校验 state 是否匹配
state = StrUtil.nullToDefault(state, ""); // 数据库 state 为 null 时,会设置为 "" 空串
if (!StrUtil.equals(state, codeDO.getState())) {
throw exception(ErrorCodeConstants.OAUTH2_GRANT_STATE_MISMATCH);
}
// 创建访问令牌
return oauth2TokenService.createAccessToken(codeDO.getUserId(), codeDO.getUserType(),
codeDO.getClientId(), codeDO.getScopes());
}
|
@Test
public void testGrantAuthorizationCodeForAccessToken() {
// 准备参数
String clientId = randomString();
String code = randomString();
List<String> scopes = Lists.newArrayList("read", "write");
String redirectUri = randomString();
String state = randomString();
// mock 方法(code)
OAuth2CodeDO codeDO = randomPojo(OAuth2CodeDO.class, o -> {
o.setClientId(clientId);
o.setRedirectUri(redirectUri);
o.setState(state);
o.setScopes(scopes);
});
when(oauth2CodeService.consumeAuthorizationCode(eq(code))).thenReturn(codeDO);
// mock 方法(创建令牌)
OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class);
when(oauth2TokenService.createAccessToken(eq(codeDO.getUserId()), eq(codeDO.getUserType()),
eq(codeDO.getClientId()), eq(codeDO.getScopes()))).thenReturn(accessTokenDO);
// 调用,并断言
assertPojoEquals(accessTokenDO, oauth2GrantService.grantAuthorizationCodeForAccessToken(
clientId, code, redirectUri, state));
}
|
public static boolean verify(String token, byte[] key) {
return JWT.of(token).setKey(key).verify();
}
|
@Test
public void verifyTest(){
String token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9." +
"eyJ1c2VyX25hbWUiOiJhZG1pbiIsInNjb3BlIjpbImFsbCJdLCJleHAiOjE2MjQwMDQ4MjIsInVzZXJJZCI6MSwiYXV0aG9yaXRpZXMiOlsiUk9MRV_op5LoibLkuozlj7ciLCJzeXNfbWVudV8xIiwiUk9MRV_op5LoibLkuIDlj7ciLCJzeXNfbWVudV8yIl0sImp0aSI6ImQ0YzVlYjgwLTA5ZTctNGU0ZC1hZTg3LTVkNGI5M2FhNmFiNiIsImNsaWVudF9pZCI6ImhhbmR5LXNob3AifQ." +
"aixF1eKlAKS_k3ynFnStE7-IRGiD5YaqznvK2xEjBew";
final boolean verify = JWTUtil.verify(token, "123456".getBytes());
assertTrue(verify);
}
|
@Override
public ResultSet getVersionColumns(final String catalog, final String schema, final String table) {
return null;
}
|
@Test
void assertGetVersionColumns() {
assertNull(metaData.getVersionColumns("", "", ""));
}
|
public Node parse() throws ScanException {
if (tokenList == null || tokenList.isEmpty())
return null;
return E();
}
|
@Test
public void variable() throws ScanException {
Tokenizer tokenizer = new Tokenizer("${abc}");
Parser parser = new Parser(tokenizer.tokenize());
Node node = parser.parse();
Node witness = new Node(Node.Type.VARIABLE, new Node(Node.Type.LITERAL, "abc"));
assertEquals(witness, node);
}
|
public boolean usesBuckets( @NonNull VFSConnectionDetails details ) throws KettleException {
return details.hasBuckets() && getResolvedRootPath( details ) == null;
}
|
@Test
public void testUsesBucketsReturnsFalseIfNoBuckets() throws KettleException {
when( vfsConnectionDetails.hasBuckets() ).thenReturn( false );
// when( vfsConnectionDetails.getRootPath() ).thenReturn();
assertFalse( vfsConnectionManagerHelper.usesBuckets( vfsConnectionDetails ) );
}
|
public Timestamp parseToTimestamp(final String text, final ZoneId zoneId) {
return Timestamp.from(parseZoned(text, zoneId).toInstant());
}
|
@Test
public void shouldParseToTimestamp() {
// Given
final String format = "yyyy-MM-dd HH";
final String timestamp = "1605-11-05 10";
// When
final Timestamp ts = new StringToTimestampParser(format).parseToTimestamp(timestamp, ZoneId.systemDefault());
// Then
assertThat(ts.getTime(), is(
FIFTH_OF_NOVEMBER
.withHour(10)
.withZoneSameInstant(ZID)
.toInstant()
.toEpochMilli()));
}
|
public String toServerErrorMessage() {
return fields.entrySet().stream().map(entry -> entry.getKey() + entry.getValue()).collect(Collectors.joining("\0"));
}
|
@Test
void assertToServerErrorMessage() {
PostgreSQLErrorResponsePacket responsePacket = createErrorResponsePacket();
String expectedMessage = "SFATAL\0VFATAL\0C3D000\0Mdatabase \"test\" does not exist\0Ddetail\0Hhint\0P1\0p2\0qinternal query\0"
+ "Wwhere\0stest\0ttable\0ccolumn\0ddata type\0nconstraint\0Ffile\0L3\0Rroutine";
assertThat(responsePacket.toServerErrorMessage(), is(expectedMessage));
}
|
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
}
|
@Test
public void testFetchDisconnectedShouldClearPreferredReadReplica() {
buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(),
Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis());
subscriptions.assignFromUser(singleton(tp0));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
// Set preferred read replica to node=1
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(1)));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
// Verify
Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(1, selected.id());
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// Disconnect - preferred read replica should be cleared.
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0), true);
consumerClient.poll(time.timer(0));
assertFalse(fetcher.hasCompletedFetches());
fetchRecords();
selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(-1, selected.id());
}
|
public Path getParent() {
final String path = uri.getPath();
final int lastSlash = path.lastIndexOf('/');
final int start = hasWindowsDrive(path, true) ? 3 : 0;
if ((path.length() == start)
|| // empty path
(lastSlash == start && path.length() == start + 1)) { // at root
return null;
}
String parent;
if (lastSlash == -1) {
parent = CUR_DIR;
} else {
final int end = hasWindowsDrive(path, true) ? 3 : 0;
parent = path.substring(0, lastSlash == end ? end + 1 : lastSlash);
}
return new Path(uri.getScheme(), uri.getAuthority(), parent);
}
|
@Test
void testGetParent() {
Path p = new Path("/my/fancy/path");
assertThat(p.getParent().toUri().getPath()).isEqualTo("/my/fancy");
p = new Path("/my/other/fancy/path/");
assertThat(p.getParent().toUri().getPath()).isEqualTo("/my/other/fancy");
p = new Path("hdfs:///my/path");
assertThat(p.getParent().toUri().getPath()).isEqualTo("/my");
p = new Path("hdfs:///myPath/");
assertThat(p.getParent().toUri().getPath()).isEqualTo("/");
p = new Path("/");
assertThat(p.getParent()).isNull();
p = new Path("C:/my/windows/path");
assertThat(p.getParent().toUri().getPath()).isEqualTo("/C:/my/windows");
}
|
@ConstantFunction.List(list = {
@ConstantFunction(name = "convert_tz", argTypes = {DATE, VARCHAR, VARCHAR}, returnType = DATETIME),
@ConstantFunction(name = "convert_tz", argTypes = {DATETIME, VARCHAR, VARCHAR}, returnType = DATETIME)
})
public static ConstantOperator convert_tz(ConstantOperator arg, ConstantOperator fromTz, ConstantOperator toTz) {
LocalDateTime dt = arg.getDatetime();
ZoneId oldZone = ZoneId.of(fromTz.getVarchar());
ZoneId newZone = ZoneId.of(toTz.getVarchar());
LocalDateTime newDateTime = dt.atZone(oldZone).withZoneSameInstant(newZone).toLocalDateTime();
return ConstantOperator.createDatetime(newDateTime);
}
|
@Test
public void convert_tz() {
ConstantOperator olddt = ConstantOperator.createDatetime(LocalDateTime.of(2019, 8, 1, 13, 21, 3));
assertEquals("2019-07-31T22:21:03",
ScalarOperatorFunctions.convert_tz(olddt,
ConstantOperator.createVarchar("Asia/Shanghai"),
ConstantOperator.createVarchar("America/Los_Angeles")).getDatetime().toString());
ConstantOperator oldd = ConstantOperator.createDate(LocalDateTime.of(2019, 8, 1, 0, 0, 0));
assertEquals("2019-07-31T09:00",
ScalarOperatorFunctions.convert_tz(oldd,
ConstantOperator.createVarchar("Asia/Shanghai"),
ConstantOperator.createVarchar("America/Los_Angeles")).getDatetime().toString());
}
|
@Override
public Connection getConnection() throws SQLException {
Connection connection = dataSource.getConnection();
return getConnectionProxy(connection);
}
|
@Test
public void testGetConnection() throws SQLException {
// Mock
Driver driver = Mockito.mock(Driver.class);
JDBC4MySQLConnection connection = Mockito.mock(JDBC4MySQLConnection.class);
Mockito.when(connection.getAutoCommit()).thenReturn(true);
DatabaseMetaData metaData = Mockito.mock(DatabaseMetaData.class);
Mockito.when(metaData.getURL()).thenReturn("jdbc:mysql:xxx");
Mockito.when(connection.getMetaData()).thenReturn(metaData);
Mockito.when(driver.connect(any(), any())).thenReturn(connection);
DruidDataSource druidDataSource = new DruidDataSource();
druidDataSource.setDriver(driver);
DataSourceProxyXA dataSourceProxyXA = new DataSourceProxyXA(druidDataSource);
Connection connFromDataSourceProxyXA = dataSourceProxyXA.getConnection();
Assertions.assertFalse(connFromDataSourceProxyXA instanceof ConnectionProxyXA);
RootContext.bind("test");
connFromDataSourceProxyXA = dataSourceProxyXA.getConnection();
Assertions.assertTrue(connFromDataSourceProxyXA instanceof ConnectionProxyXA);
ConnectionProxyXA connectionProxyXA = (ConnectionProxyXA)dataSourceProxyXA.getConnection();
Connection wrappedConnection = connectionProxyXA.getWrappedConnection();
Assertions.assertTrue(wrappedConnection instanceof PooledConnection);
Connection wrappedPhysicalConn = ((PooledConnection)wrappedConnection).getConnection();
Assertions.assertSame(wrappedPhysicalConn, connection);
XAConnection xaConnection = connectionProxyXA.getWrappedXAConnection();
Connection connectionInXA = xaConnection.getConnection();
Assertions.assertTrue(connectionInXA instanceof JDBC4ConnectionWrapper);
tearDown();
}
|
@Override
public void handlerPlugin(final PluginData pluginData) {
if (null != pluginData && pluginData.getEnabled()) {
SofaRegisterConfig sofaRegisterConfig = GsonUtils.getInstance().fromJson(pluginData.getConfig(), SofaRegisterConfig.class);
if (Objects.isNull(sofaRegisterConfig)) {
return;
}
SofaRegisterConfig exist = Singleton.INST.get(SofaRegisterConfig.class);
if (Objects.isNull(exist) || !sofaRegisterConfig.equals(exist)) {
// If it is null, initialize it
ApplicationConfigCache.getInstance().init(sofaRegisterConfig);
ApplicationConfigCache.getInstance().invalidateAll();
}
Singleton.INST.single(SofaRegisterConfig.class, sofaRegisterConfig);
}
}
|
@Test
public void testPluginEnable() {
PluginData pluginData = new PluginData("", "", registryConfig, "1", true, null);
sofaPluginDataHandler.handlerPlugin(pluginData);
assertEquals("127.0.0.1:2181", Singleton.INST.get(SofaRegisterConfig.class).getRegister());
}
|
@Override
public V fetch(final K key,
final long time) {
return getValueOrNull(inner.fetch(key, time));
}
|
@Test
public void shouldReturnPlainKeyValuePairsOnSingleKeyFetchLongParameters() {
when(mockedWindowTimestampIterator.next())
.thenReturn(KeyValue.pair(21L, ValueAndTimestamp.make("value1", 22L)))
.thenReturn(KeyValue.pair(42L, ValueAndTimestamp.make("value2", 23L)));
when(mockedWindowTimestampStore.fetch("key1", Instant.ofEpochMilli(21L), Instant.ofEpochMilli(42L)))
.thenReturn(mockedWindowTimestampIterator);
final WindowStoreIterator<String> iterator =
readOnlyWindowStoreFacade.fetch("key1", Instant.ofEpochMilli(21L), Instant.ofEpochMilli(42L));
assertThat(iterator.next(), is(KeyValue.pair(21L, "value1")));
assertThat(iterator.next(), is(KeyValue.pair(42L, "value2")));
}
|
@Override
public Checksum compute(final InputStream in, final TransferStatus status) throws BackgroundException {
final byte[] digest = this.digest("SHA-256", this.normalize(in, status), status);
return new Checksum(HashAlgorithm.sha256, digest);
}
|
@Test
public void testNormalize() throws Exception {
assertEquals("c96c6d5be8d08a12e7b5cdc1b207fa6b2430974c86803d8891675e76fd992c20",
new SHA256ChecksumCompute().compute(IOUtils.toInputStream("input", Charset.defaultCharset()),
new TransferStatus()).hash);
assertEquals("c96c6d5be8d08a12e7b5cdc1b207fa6b2430974c86803d8891675e76fd992c20",
new SHA256ChecksumCompute().compute(IOUtils.toInputStream("_input", Charset.defaultCharset()),
new TransferStatus().withOffset(1)).hash);
assertEquals("c96c6d5be8d08a12e7b5cdc1b207fa6b2430974c86803d8891675e76fd992c20",
new SHA256ChecksumCompute().compute(IOUtils.toInputStream("_input_", Charset.defaultCharset()),
new TransferStatus().withOffset(1).withLength(5)).hash);
}
|
public static long getMapSize(byte zoomLevel, int tileSize) {
if (zoomLevel < 0) {
throw new IllegalArgumentException("zoom level must not be negative: " + zoomLevel);
}
return (long) tileSize << zoomLevel;
}
|
@Test
public void getMapSizeTest() {
for (int tileSize : TILE_SIZES) {
for (byte zoomLevel = ZOOM_LEVEL_MIN; zoomLevel <= ZOOM_LEVEL_MAX; ++zoomLevel) {
long factor = Math.round(MercatorProjection.zoomLevelToScaleFactor(zoomLevel));
Assert.assertEquals(tileSize * factor, MercatorProjection.getMapSize(zoomLevel, tileSize));
Assert.assertEquals(MercatorProjection.getMapSizeWithScaleFactor(MercatorProjection.zoomLevelToScaleFactor(zoomLevel), tileSize),
MercatorProjection.getMapSize(zoomLevel, tileSize));
}
verifyInvalidGetMapSize((byte) -1, tileSize);
}
}
|
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof RollbackRule)) {
return false;
}
RollbackRule rhs = (RollbackRule) other;
return this.exceptionName.equals(rhs.exceptionName);
}
|
@Test
public void equalsTest(){
RollbackRule otherRollbackRuleByName = new RollbackRule(Exception.class.getName());
RollbackRule otherRollbackRuleByName2 = new NoRollbackRule(Exception.class.getName());
Assertions.assertNotEquals("", otherRollbackRuleByName.getExceptionName());
Assertions.assertEquals(otherRollbackRuleByName, otherRollbackRuleByName);
Assertions.assertEquals(otherRollbackRuleByName, otherRollbackRuleByName2);
}
|
public SearchQuery parse(String encodedQueryString) {
if (Strings.isNullOrEmpty(encodedQueryString) || "*".equals(encodedQueryString)) {
return new SearchQuery(encodedQueryString);
}
final var queryString = URLDecoder.decode(encodedQueryString, StandardCharsets.UTF_8);
final Matcher matcher = querySplitterMatcher(requireNonNull(queryString).trim());
final ImmutableMultimap.Builder<String, FieldValue> builder = ImmutableMultimap.builder();
final ImmutableSet.Builder<String> disallowedKeys = ImmutableSet.builder();
while (matcher.find()) {
final String entry = matcher.group();
if (!entry.contains(":")) {
builder.put(withPrefixIfNeeded(defaultField), createFieldValue(defaultFieldKey.getFieldType(), entry, false));
continue;
}
final Iterator<String> entryFields = FIELD_VALUE_SPLITTER.splitToList(entry).iterator();
checkArgument(entryFields.hasNext(), INVALID_ENTRY_MESSAGE, entry);
final String key = entryFields.next();
// Skip if there are no valid k/v pairs. (i.e. "action:")
if (!entryFields.hasNext()) {
continue;
}
final boolean negate = key.startsWith("-");
final String cleanKey = key.replaceFirst("^-", "");
final String value = entryFields.next();
VALUE_SPLITTER.splitToList(value).forEach(v -> {
if (!dbFieldMapping.containsKey(cleanKey)) {
disallowedKeys.add(cleanKey);
}
final SearchQueryField translatedKey = dbFieldMapping.get(cleanKey);
if (translatedKey != null) {
builder.put(withPrefixIfNeeded(translatedKey.getDbField()), createFieldValue(translatedKey.getFieldType(), v, negate));
} else {
builder.put(withPrefixIfNeeded(defaultField), createFieldValue(defaultFieldKey.getFieldType(), v, negate));
}
});
checkArgument(!entryFields.hasNext(), INVALID_ENTRY_MESSAGE, entry);
}
return new SearchQuery(queryString, builder.build(), disallowedKeys.build());
}
|
@Test
void fieldPrefixIsAddedToAllFieldsIfSpecified() {
final SearchQueryParser parser = new SearchQueryParser("name", ImmutableSet.of("name", "breed"), "pets.");
final SearchQuery searchQuery = parser.parse("Bobby breed:terrier");
final Multimap<String, SearchQueryParser.FieldValue> queryMap = searchQuery.getQueryMap();
assertThat(queryMap.keySet().size()).isEqualTo(2);
assertThat(queryMap.keySet()).containsOnly("pets.name", "pets.breed");
assertThat(queryMap.get("pets.name")).containsOnly(new SearchQueryParser.FieldValue("Bobby", false));
assertThat(queryMap.get("pets.breed")).containsOnly(new SearchQueryParser.FieldValue("terrier", false));
assertThat(searchQuery.hasDisallowedKeys()).isFalse();
}
|
public String expand(final String remote) {
return this.expand(remote, PREFIX);
}
|
@Test
public void testExpand() {
final String expanded = new TildePathExpander(new Path("/home/jenkins", EnumSet.of(Path.Type.directory)))
.expand("~/f", "~/");
assertEquals("/home/jenkins/f", expanded);
}
|
@Override
public void stop() throws Exception {
if (!running) {
return;
}
running = false;
LOG.info("Stopping ZooKeeperJobGraphStoreWatcher ");
pathCache.close();
}
|
@Test
void testJobGraphAddedAndRemovedShouldNotifyGraphStoreListener() throws Exception {
try (final CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(
configuration, NoOpFatalErrorHandler.INSTANCE)) {
final CuratorFramework client = curatorFrameworkWrapper.asCuratorFramework();
final JobGraphStoreWatcher jobGraphStoreWatcher =
createAndStartJobGraphStoreWatcher(client);
final ZooKeeperStateHandleStore<JobGraph> stateHandleStore =
createStateHandleStore(client);
final JobGraph jobGraph = JobGraphTestUtils.emptyJobGraph();
final JobID jobID = jobGraph.getJobID();
stateHandleStore.addAndLock("/" + jobID, jobGraph);
CommonTestUtils.waitUntilCondition(
() -> testingJobGraphListener.getAddedJobGraphs().size() > 0);
assertThat(testingJobGraphListener.getAddedJobGraphs()).containsExactly(jobID);
stateHandleStore.releaseAndTryRemove("/" + jobID);
CommonTestUtils.waitUntilCondition(
() -> testingJobGraphListener.getRemovedJobGraphs().size() > 0);
assertThat(testingJobGraphListener.getRemovedJobGraphs()).containsExactly(jobID);
jobGraphStoreWatcher.stop();
}
}
|
public static KMeans fit(double[][] data, int k) {
return fit(data, k, 100, 1E-4);
}
|
@Test
public void testBBD64() {
System.out.println("BBD 64");
MathEx.setSeed(19650218); // to get repeatable results.
KMeans model = KMeans.fit(x, 64);
System.out.println(model);
double r = RandIndex.of(y, model.y);
double r2 = AdjustedRandIndex.of(y, model.y);
System.out.format("Training rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2);
assertEquals(0.4714, r, 1E-4);
assertEquals(0.0185, r2, 1E-4);
System.out.format("MI = %.2f%n", MutualInformation.of(y, model.y));
System.out.format("NMI.joint = %.2f%%%n", 100 * NormalizedMutualInformation.joint(y, model.y));
System.out.format("NMI.max = %.2f%%%n", 100 * NormalizedMutualInformation.max(y, model.y));
System.out.format("NMI.min = %.2f%%%n", 100 * NormalizedMutualInformation.min(y, model.y));
System.out.format("NMI.sum = %.2f%%%n", 100 * NormalizedMutualInformation.sum(y, model.y));
System.out.format("NMI.sqrt = %.2f%%%n", 100 * NormalizedMutualInformation.sqrt(y, model.y));
}
|
@Override
public PageData<Customer> findByTenantId(UUID tenantId, PageLink pageLink) {
return findCustomersByTenantId(tenantId, pageLink);
}
|
@Test
public void testFindByTenantId() {
UUID tenantId1 = Uuids.timeBased();
UUID tenantId2 = Uuids.timeBased();
for (int i = 0; i < 20; i++) {
createCustomer(tenantId1, i);
createCustomer(tenantId2, i * 2);
}
PageLink pageLink = new PageLink(15, 0, "CUSTOMER");
PageData<Customer> customers1 = customerDao.findCustomersByTenantId(tenantId1, pageLink);
assertEquals(15, customers1.getData().size());
pageLink = pageLink.nextPageLink();
PageData<Customer> customers2 = customerDao.findCustomersByTenantId(tenantId1, pageLink);
assertEquals(5, customers2.getData().size());
}
|
@Override
public void removeProject(Project project) {
synchronized (this) {
if (projects.getCurrentProject() == project) {
closeCurrentProject();
}
projects.removeProject((ProjectImpl) project);
}
}
|
@Test
public void testRemoveProject() {
ProjectControllerImpl pc = new ProjectControllerImpl();
pc.addProjectListener(projectListener);
Project project = pc.newProject();
pc.removeProject(project);
Assert.assertFalse(pc.hasCurrentProject());
Assert.assertTrue(pc.getAllProjects().isEmpty());
Assert.assertTrue(project.isClosed());
Mockito.verify(projectListener).closed(project);
}
|
public Optional<VoterSet> addVoter(VoterNode voter) {
if (voters.containsKey(voter.voterKey().id())) {
return Optional.empty();
}
HashMap<Integer, VoterNode> newVoters = new HashMap<>(voters);
newVoters.put(voter.voterKey().id(), voter);
return Optional.of(new VoterSet(newVoters));
}
|
@Test
void testAddVoter() {
Map<Integer, VoterSet.VoterNode> aVoterMap = voterMap(IntStream.of(1, 2, 3), true);
VoterSet voterSet = VoterSet.fromMap(new HashMap<>(aVoterMap));
assertEquals(Optional.empty(), voterSet.addVoter(voterNode(1, true)));
VoterSet.VoterNode voter4 = voterNode(4, true);
aVoterMap.put(voter4.voterKey().id(), voter4);
assertEquals(Optional.of(VoterSet.fromMap(new HashMap<>(aVoterMap))), voterSet.addVoter(voter4));
}
|
@Benchmark
@Threads(16) // Use several threads since we expect contention during logging
public void testSkippedLogging(ZeroExpectedCallsLoggingClientAndService client) {
LOG.trace("no log");
}
|
@Test
public void testSkippedLogging() throws Exception {
ZeroExpectedCallsLoggingClientAndService service =
new ZeroExpectedCallsLoggingClientAndService();
new BeamFnLoggingClientBenchmark().testSkippedLogging(service);
service.tearDown();
}
|
@Override
public void putJobGraph(JobGraph jobGraph) throws Exception {
checkNotNull(jobGraph, "Job graph");
final JobID jobID = jobGraph.getJobID();
final String name = jobGraphStoreUtil.jobIDToName(jobID);
LOG.debug("Adding job graph {} to {}.", jobID, jobGraphStateHandleStore);
boolean success = false;
while (!success) {
synchronized (lock) {
verifyIsRunning();
final R currentVersion = jobGraphStateHandleStore.exists(name);
if (!currentVersion.isExisting()) {
try {
jobGraphStateHandleStore.addAndLock(name, jobGraph);
addedJobGraphs.add(jobID);
success = true;
} catch (StateHandleStore.AlreadyExistException ignored) {
LOG.warn("{} already exists in {}.", jobGraph, jobGraphStateHandleStore);
}
} else if (addedJobGraphs.contains(jobID)) {
try {
jobGraphStateHandleStore.replace(name, currentVersion, jobGraph);
LOG.info("Updated {} in {}.", jobGraph, getClass().getSimpleName());
success = true;
} catch (StateHandleStore.NotExistException ignored) {
LOG.warn("{} does not exists in {}.", jobGraph, jobGraphStateHandleStore);
}
} else {
throw new IllegalStateException(
"Trying to update a graph you didn't "
+ "#getAllSubmittedJobGraphs() or #putJobGraph() yourself before.");
}
}
}
LOG.info("Added {} to {}.", jobGraph, jobGraphStateHandleStore);
}
|
@Test
public void testPutJobGraphWhenNotExist() throws Exception {
final CompletableFuture<JobGraph> addFuture = new CompletableFuture<>();
final TestingStateHandleStore<JobGraph> stateHandleStore =
builder.setExistsFunction(ignore -> IntegerResourceVersion.notExisting())
.setAddFunction(
(ignore, state) -> {
addFuture.complete(state);
return jobGraphStorageHelper.store(state);
})
.build();
final JobGraphStore jobGraphStore = createAndStartJobGraphStore(stateHandleStore);
jobGraphStore.putJobGraph(testingJobGraph);
final JobGraph actual = addFuture.get(timeout, TimeUnit.MILLISECONDS);
assertThat(actual.getJobID(), is(testingJobGraph.getJobID()));
}
|
public static boolean splitKeyRangeContains(
Object[] key, Object[] splitKeyStart, Object[] splitKeyEnd) {
// for all range
if (splitKeyStart == null && splitKeyEnd == null) {
return true;
}
// first split
if (splitKeyStart == null) {
int[] upperBoundRes = new int[key.length];
for (int i = 0; i < key.length; i++) {
upperBoundRes[i] = compareObjects(key[i], splitKeyEnd[i]);
}
return Arrays.stream(upperBoundRes).anyMatch(value -> value < 0)
&& Arrays.stream(upperBoundRes).allMatch(value -> value <= 0);
}
// last split
else if (splitKeyEnd == null) {
int[] lowerBoundRes = new int[key.length];
for (int i = 0; i < key.length; i++) {
lowerBoundRes[i] = compareObjects(key[i], splitKeyStart[i]);
}
return Arrays.stream(lowerBoundRes).allMatch(value -> value >= 0);
}
// other split
else {
int[] lowerBoundRes = new int[key.length];
int[] upperBoundRes = new int[key.length];
for (int i = 0; i < key.length; i++) {
lowerBoundRes[i] = compareObjects(key[i], splitKeyStart[i]);
upperBoundRes[i] = compareObjects(key[i], splitKeyEnd[i]);
}
return Arrays.stream(lowerBoundRes).anyMatch(value -> value >= 0)
&& (Arrays.stream(upperBoundRes).anyMatch(value -> value < 0)
&& Arrays.stream(upperBoundRes).allMatch(value -> value <= 0));
}
}
|
@Test
public void testSplitKeyRangeContains() {
// table with only one split
assertKeyRangeContains(new Object[] {100L}, null, null);
// the last split
assertKeyRangeContains(new Object[] {101L}, new Object[] {100L}, null);
// the first split
assertKeyRangeContains(new Object[] {101L}, null, new Object[] {1024L});
// general splits
assertKeyRangeContains(new Object[] {100L}, new Object[] {1L}, new Object[] {1024L});
assertFalse(
splitKeyRangeContains(new Object[] {0L}, new Object[] {1L}, new Object[] {1024L}));
// split key from binlog may have different type
assertKeyRangeContains(
new Object[] {BigInteger.valueOf(100L)}, new Object[] {1L}, new Object[] {1024L});
assertFalse(
splitKeyRangeContains(
new Object[] {BigInteger.valueOf(0L)},
new Object[] {1L},
new Object[] {1024L}));
}
|
public Blade register(@NonNull Object bean) {
this.ioc.addBean(bean);
return this;
}
|
@Test
public void testRegister() {
Blade blade = Blade.create();
BladeClassDefineType object = new BladeClassDefineType();
blade.register(object);
assertEquals(object, blade.ioc().getBean(BladeClassDefineType.class));
}
|
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR
if (splittee == null || splitChar == null) {
return new String[0];
}
final String EMPTY_ELEMENT = "";
int spot;
final int splitLength = splitChar.length();
final String adjacentSplit = splitChar + splitChar;
final int adjacentSplitLength = adjacentSplit.length();
if (truncate) {
while ((spot = splittee.indexOf(adjacentSplit)) != -1) {
splittee = splittee.substring(0, spot + splitLength)
+ splittee.substring(spot + adjacentSplitLength, splittee.length());
}
if (splittee.startsWith(splitChar)) {
splittee = splittee.substring(splitLength);
}
if (splittee.endsWith(splitChar)) { // Remove trailing splitter
splittee = splittee.substring(0, splittee.length() - splitLength);
}
}
List<String> returns = new ArrayList<>();
final int length = splittee.length(); // This is the new length
int start = 0;
spot = 0;
while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) {
if (spot > 0) {
returns.add(splittee.substring(start, spot));
} else {
returns.add(EMPTY_ELEMENT);
}
start = spot + splitLength;
}
if (start < length) {
returns.add(splittee.substring(start));
} else if (spot == length - splitLength) {// Found splitChar at end of line
returns.add(EMPTY_ELEMENT);
}
return returns.toArray(new String[returns.size()]);
}
|
@Test
public void testSplitStringStringTrueWithLeadingSplitChars() {
// Test leading split characters
assertThat("Ignore leading split chars", JOrphanUtils.split(",,a,bc", ",", true),
CoreMatchers.equalTo(new String[]{"a", "bc"}));
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldHandleQualifiedSelectStarOnRightJoinSource() {
// Given:
final SingleStatementContext stmt =
givenQuery("SELECT TEST2.* FROM TEST1 JOIN TEST2 WITHIN 1 SECOND ON TEST1.ID = TEST2.ID;");
// When:
final Query result = (Query) builder.buildStatement(stmt);
// Then:
assertThat(result.getSelect(),
is(new Select(ImmutableList.of(new AllColumns(Optional.of(TEST2_NAME))))));
}
|
@HighFrequencyInvocation
public boolean accept(final Grantee grantee) {
return grantee.username.equalsIgnoreCase(username) && isPermittedHost(grantee);
}
|
@Test
void assertAccept() {
Grantee grantee = new Grantee("name", "%");
assertTrue(grantee.accept(new Grantee("name", "")));
assertTrue(grantee.accept(new Grantee("name", "127.0.0.1")));
}
|
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
}
|
@Test(description = "no NPE resolving map")
public void testTicket2793() {
Reader reader = new Reader(new OpenAPI());
OpenAPI openAPI = reader.read(Ticket2793Resource.class);
String yaml = "openapi: 3.0.1\n" +
"paths:\n" +
" /distances:\n" +
" get:\n" +
" operationId: getDistances\n" +
" responses:\n" +
" \"200\":\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" $ref: '#/components/schemas/DistancesResponse'\n" +
"components:\n" +
" schemas:\n" +
" DistancesResponse:\n" +
" type: object\n" +
" properties:\n" +
" empty:\n" +
" type: boolean\n";
SerializationMatchers.assertEqualsToYaml(openAPI, yaml);
}
|
public static void validate(TableConfig tableConfig, @Nullable Schema schema) {
validate(tableConfig, schema, null);
}
|
@Test
public void testValidateFieldConfig() {
Schema schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME)
.addDateTime(TIME_COLUMN, FieldSpec.DataType.LONG, "1:HOURS:EPOCH", "1:HOURS")
.addSingleValueDimension("myCol1", FieldSpec.DataType.STRING)
.addMultiValueDimension("myCol2", FieldSpec.DataType.INT)
.addSingleValueDimension("intCol", FieldSpec.DataType.INT).build();
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("myCol1")).build();
try {
FieldConfig fieldConfig = new FieldConfig("myCol1", FieldConfig.EncodingType.RAW, null, null, null, null, null);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
} catch (Exception e) {
Assert.fail("all nullable fields set for fieldConfig should pass", e);
}
try {
FieldConfig fieldConfig =
new FieldConfig("myCol1", FieldConfig.EncodingType.DICTIONARY, FieldConfig.IndexType.FST, null, null);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail for with conflicting encoding type of myCol1");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(),
"FieldConfig encoding type is different from indexingConfig for column: myCol1");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("myCol1")).build();
try {
FieldConfig fieldConfig =
new FieldConfig("myCol1", FieldConfig.EncodingType.RAW, FieldConfig.IndexType.FST, null, null);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail since FST index is enabled on RAW encoding type");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(),
"Cannot create FST index on column: myCol1, it can only be applied to dictionary encoded single value "
+ "string columns");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build();
try {
FieldConfig fieldConfig =
new FieldConfig("myCol2", FieldConfig.EncodingType.DICTIONARY, FieldConfig.IndexType.FST, null, null);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail since FST index is enabled on multi value column");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(),
"Cannot create FST index on column: myCol2, it can only be applied to dictionary encoded single value "
+ "string columns");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build();
try {
FieldConfig fieldConfig =
new FieldConfig("intCol", FieldConfig.EncodingType.DICTIONARY, FieldConfig.IndexType.FST, null, null);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail since FST index is enabled on non String column");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(),
"Cannot create FST index on column: intCol, it can only be applied to dictionary encoded single value "
+ "string columns");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("myCol2", "intCol")).build();
try {
FieldConfig fieldConfig =
new FieldConfig("intCol", FieldConfig.EncodingType.RAW, FieldConfig.IndexType.TEXT, null, null);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail since TEXT index is enabled on non String column");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(),
"Cannot create text index on column: intCol, it can only be applied to string columns");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("myCol1")).build();
try {
FieldConfig fieldConfig =
new FieldConfig("myCol21", FieldConfig.EncodingType.RAW, FieldConfig.IndexType.FST, null, null);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail since field name is not present in schema");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(),
"Column: myCol21 defined in field config list must be a valid column defined in the schema");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build();
try {
FieldConfig fieldConfig = new FieldConfig("intCol", FieldConfig.EncodingType.DICTIONARY, Collections.emptyList(),
CompressionCodec.SNAPPY, null);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail since dictionary encoding does not support compression codec SNAPPY");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(), "Compression codec: SNAPPY is not applicable to dictionary encoded index");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build();
try {
FieldConfig fieldConfig = new FieldConfig("intCol", FieldConfig.EncodingType.RAW, Collections.emptyList(),
CompressionCodec.MV_ENTRY_DICT, null);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail since raw encoding does not support compression codec MV_ENTRY_DICT");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(), "Compression codec: MV_ENTRY_DICT is not applicable to raw index");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("myCol1")).build();
try {
// Enable forward index disabled flag for a raw column. This should succeed as though the forward index cannot
// be rebuilt without a dictionary, the constraint to have a dictionary has been lifted.
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
FieldConfig fieldConfig =
new FieldConfig("myCol1", FieldConfig.EncodingType.RAW, null, null, null, null, fieldConfigProperties);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
} catch (Exception e) {
Assert.fail("Validation should pass since forward index can be disabled for a column without a dictionary");
}
try {
// Enable forward index disabled flag for a column without inverted index. This should succeed as though the
// forward index cannot be rebuilt without an inverted index, the constraint to have an inverted index has been
// lifted.
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
FieldConfig fieldConfig =
new FieldConfig("myCol2", FieldConfig.EncodingType.DICTIONARY, null, null, null, null, fieldConfigProperties);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
} catch (Exception e) {
Assert.fail("Validation should pass since forward index can be disabled for a column without an inverted index");
}
try {
// Enable forward index disabled flag for a column and verify that dictionary override options are not set.
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
tableConfig.getIndexingConfig().setOptimizeDictionaryForMetrics(true);
tableConfig.getIndexingConfig().setOptimizeDictionaryForMetrics(true);
TableConfigUtils.validate(tableConfig, schema);
} catch (Exception e) {
Assert.assertEquals(e.getMessage(), "Dictionary override optimization options (OptimizeDictionary, "
+ "optimizeDictionaryForMetrics) not supported with forward index for column: myCol2, disabled");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("myCol1")).setInvertedIndexColumns(Arrays.asList("myCol2")).build();
try {
// Enable forward index disabled flag for a column with inverted index
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
FieldConfig fieldConfig =
new FieldConfig("myCol2", FieldConfig.EncodingType.DICTIONARY, FieldConfig.IndexType.INVERTED, null, null,
null, fieldConfigProperties);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
} catch (Exception e) {
Assert.fail("Should not fail as myCol2 has forward index disabled but inverted index enabled");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("myCol1")).setInvertedIndexColumns(Arrays.asList("myCol2"))
.setSortedColumn("myCol2").build();
try {
// Enable forward index disabled flag for a column with inverted index and is sorted
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
FieldConfig fieldConfig =
new FieldConfig("myCol2", FieldConfig.EncodingType.DICTIONARY, FieldConfig.IndexType.INVERTED, null, null,
null, fieldConfigProperties);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
} catch (Exception e) {
Assert.fail("Should not fail for myCol2 with forward index disabled but is sorted, this is a no-op");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("myCol1")).setInvertedIndexColumns(Arrays.asList("myCol2"))
.setRangeIndexColumns(Arrays.asList("myCol2")).build();
try {
// Enable forward index disabled flag for a multi-value column with inverted index and range index
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
FieldConfig fieldConfig =
new FieldConfig("myCol2", FieldConfig.EncodingType.DICTIONARY, FieldConfig.IndexType.INVERTED,
Arrays.asList(FieldConfig.IndexType.INVERTED, FieldConfig.IndexType.RANGE), null, null,
fieldConfigProperties);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail for MV myCol2 with forward index disabled but has range and inverted index");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(), "Feature not supported for multi-value columns with range index. "
+ "Cannot disable forward index for column myCol2. Disable range index on this column to use this feature.");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setInvertedIndexColumns(Arrays.asList("myCol1")).setRangeIndexColumns(Arrays.asList("myCol1")).build();
try {
// Enable forward index disabled flag for a singe-value column with inverted index and range index v1
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
FieldConfig fieldConfig =
new FieldConfig("myCol1", FieldConfig.EncodingType.DICTIONARY, FieldConfig.IndexType.INVERTED,
Arrays.asList(FieldConfig.IndexType.INVERTED, FieldConfig.IndexType.RANGE), null, null,
fieldConfigProperties);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
tableConfig.getIndexingConfig().setRangeIndexVersion(1);
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should fail for SV myCol1 with forward index disabled but has range v1 and inverted index");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(), "Feature not supported for single-value columns with range index version "
+ "< 2. Cannot disable forward index for column myCol1. Either disable range index or create range index "
+ "with version >= 2 to use this feature.");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("myCol2")).setInvertedIndexColumns(Arrays.asList("myCol2")).build();
try {
// Enable forward index disabled flag for a column with inverted index and disable dictionary
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
FieldConfig fieldConfig =
new FieldConfig("myCol2", FieldConfig.EncodingType.RAW, FieldConfig.IndexType.INVERTED, null, null, null,
fieldConfigProperties);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should not be able to disable dictionary but keep inverted index");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(),
"Cannot create an Inverted index on column myCol2 specified in the noDictionaryColumns config");
}
// Tests the case when the field-config list marks a column as raw (non-dictionary) and enables
// inverted index on it
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build();
try {
FieldConfig fieldConfig = new FieldConfig.Builder("myCol2")
.withIndexTypes(Arrays.asList(FieldConfig.IndexType.INVERTED))
.withEncodingType(FieldConfig.EncodingType.RAW).build();
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should not be able to disable dictionary but keep inverted index");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(),
"Cannot create inverted index on column: myCol2, it can only be applied to dictionary encoded columns");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("myCol2")).build();
try {
// Enable forward index disabled flag for a column with FST index and disable dictionary
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
FieldConfig fieldConfig =
new FieldConfig("myCol2", FieldConfig.EncodingType.RAW, FieldConfig.IndexType.FST, null, null, null,
fieldConfigProperties);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
Assert.fail("Should not be able to disable dictionary but keep inverted index");
} catch (Exception e) {
Assert.assertEquals(e.getMessage(),
"Cannot create FST index on column: myCol2, it can only be applied to dictionary encoded single value "
+ "string columns");
}
tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setNoDictionaryColumns(Arrays.asList("intCol")).setRangeIndexColumns(Arrays.asList("intCol")).build();
try {
// Enable forward index disabled flag for a column with FST index and disable dictionary
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
FieldConfig fieldConfig =
new FieldConfig("intCol", FieldConfig.EncodingType.RAW, FieldConfig.IndexType.RANGE, null, null, null,
fieldConfigProperties);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
} catch (Exception e) {
Assert.fail("Range index with forward index disabled no dictionary column is allowed");
}
// Disabling forward index for realtime table will make the validation failed.
Map<String, String> streamConfigs = getStreamConfigs();
tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN)
.setNoDictionaryColumns(Arrays.asList("intCol")).setStreamConfigs(streamConfigs).build();
try {
// Enable forward index disabled flag for a column with inverted index index and disable dictionary
Map<String, String> fieldConfigProperties = new HashMap<>();
fieldConfigProperties.put(FieldConfig.FORWARD_INDEX_DISABLED, Boolean.TRUE.toString());
FieldConfig fieldConfig =
new FieldConfig("intCol", FieldConfig.EncodingType.RAW, FieldConfig.IndexType.INVERTED, null, null, null,
fieldConfigProperties);
tableConfig.setFieldConfigList(Arrays.asList(fieldConfig));
TableConfigUtils.validate(tableConfig, schema);
} catch (Exception e) {
Assert.assertEquals(e.getMessage(),
"Cannot disable forward index for column intCol, as the table type is REALTIME.");
}
}
|
public String accessSecret(String secretVersion) {
checkArgument(!secretVersion.isEmpty(), "secretVersion can not be empty");
checkIsUsable();
try {
SecretVersionName secretVersionName;
if (SecretVersionName.isParsableFrom(secretVersion)) {
secretVersionName = SecretVersionName.parse(secretVersion);
} else {
throw new IllegalArgumentException(
"Provided Secret must be in the form"
+ " projects/{project}/secrets/{secret}/versions/{secret_version}");
}
AccessSecretVersionResponse response =
secretManagerServiceClient.accessSecretVersion(secretVersionName);
return response.getPayload().getData().toStringUtf8();
} catch (Exception e) {
throw new SecretManagerResourceManagerException("Error while accessing a secret version", e);
}
}
|
@Test
public void testAccessSecretWithInvalidNameShouldFail() {
IllegalArgumentException exception =
assertThrows(IllegalArgumentException.class, () -> testManager.accessSecret(""));
assertThat(exception).hasMessageThat().contains("secretVersion can not be empty");
}
|
public SalesforceUpsertMeta() {
super(); // allocate BaseStepMeta
}
|
@Test
public void testSalesforceUpsertMeta() throws KettleException {
List<String> attributes = new ArrayList<String>();
attributes.addAll( SalesforceMetaTest.getDefaultAttributes() );
attributes.addAll( Arrays.asList( "upsertField", "batchSize", "salesforceIDFieldName", "updateLookup",
"updateStream", "useExternalId", "rollbackAllChangesOnError" ) );
Map<String, String> getterMap = new HashMap<String, String>();
Map<String, String> setterMap = new HashMap<String, String>();
Map<String, FieldLoadSaveValidator<?>> fieldLoadSaveValidators = new HashMap<String, FieldLoadSaveValidator<?>>();
fieldLoadSaveValidators.put( "updateLookup",
new ArrayLoadSaveValidator<String>( new StringLoadSaveValidator(), 50 ) );
fieldLoadSaveValidators.put( "updateStream",
new ArrayLoadSaveValidator<String>( new StringLoadSaveValidator(), 50 ) );
fieldLoadSaveValidators.put( "useExternalId",
new ArrayLoadSaveValidator<Boolean>( new BooleanLoadSaveValidator(), 50 ) );
LoadSaveTester loadSaveTester =
new LoadSaveTester( SalesforceUpsertMeta.class, attributes, getterMap, setterMap,
fieldLoadSaveValidators, new HashMap<String, FieldLoadSaveValidator<?>>() );
loadSaveTester.testRepoRoundTrip();
loadSaveTester.testXmlRoundTrip();
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void fabric0_12() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/logs/fabric-version-0.12.txt")),
CrashReportAnalyzer.Rule.FABRIC_VERSION_0_12);
}
|
public static <InputT extends PInput, OutputT extends POutput>
PTransform<InputT, OutputT> compose(SerializableFunction<InputT, OutputT> fn) {
return new PTransform<InputT, OutputT>() {
@Override
public OutputT expand(InputT input) {
return fn.apply(input);
}
};
}
|
@Test
public void testNamedCompose() {
PTransform<PCollection<Integer>, PCollection<Integer>> composed =
PTransform.compose("MyName", (PCollection<Integer> numbers) -> numbers);
assertEquals("MyName", composed.name);
}
|
@Override
public void execute(Context context) {
executeForBranch(treeRootHolder.getRoot());
}
|
@Test
public void execute_whenAnalyzerChangedAndLanguageNotSupported_shouldSkipRaisingEvent() {
QualityProfile qp1 = qp(QP_NAME_1, LANGUAGE_KEY_1, new Date());
mockLanguageInRepository(LANGUAGE_KEY_1);
when(measureRepository.getBaseMeasure(treeRootHolder.getRoot(), qualityProfileMetric)).thenReturn(Optional.of(newMeasure()));
when(measureRepository.getRawMeasure(treeRootHolder.getRoot(), qualityProfileMetric)).thenReturn(Optional.of(newMeasure(qp1)));
when(analysisMetadataHolder.getScannerPluginsByKey()).thenReturn(Collections.emptyMap());
when(analysisMetadataHolder.getBaseAnalysis()).thenReturn(new Analysis.Builder().setUuid("uuid").setCreatedAt(1L).build());
underTest.execute(new TestComputationStepContext());
verifyNoMoreInteractions(eventRepository);
}
|
public static Object getValueOrCachedValue(Record record, SerializationService serializationService) {
Object cachedValue = record.getCachedValueUnsafe();
if (cachedValue == NOT_CACHED) {
//record does not support caching at all
return record.getValue();
}
for (; ; ) {
if (cachedValue == null) {
Object valueBeforeCas = record.getValue();
if (!shouldCache(valueBeforeCas)) {
//it's either a null or value which we do not want to cache. let's just return it.
return valueBeforeCas;
}
Object fromCache = tryStoreIntoCache(record, valueBeforeCas, serializationService);
if (fromCache != null) {
return fromCache;
}
} else if (cachedValue instanceof Thread) {
//the cachedValue is either locked by another thread or it contains a wrapped thread
cachedValue = ThreadWrapper.unwrapOrNull(cachedValue);
if (cachedValue != null) {
//exceptional case: the cachedValue is not locked, it just contains an instance of Thread.
//this can happen when user put an instance of Thread into a map
//(=it should never happen, but never say never...)
return cachedValue;
}
//it looks like some other thread actually locked the cachedValue. let's give it another try (iteration)
} else {
//it's not the 'in-progress' marker/lock && it's not a null -> it has to be the actual cachedValue
return cachedValue;
}
Thread.yield();
cachedValue = record.getCachedValueUnsafe();
}
}
|
@Test
public void getValueOrCachedValue_whenRecordIsNotCachable_thenDoNotCache() {
String objectPayload = "foo";
Data dataPayload = serializationService.toData(objectPayload);
Record record = new DataRecordWithStats(dataPayload);
Object value = Records.getValueOrCachedValue(record, null);
assertSame(dataPayload, value);
}
|
@Override
@SuppressFBWarnings("PATH_TRAVERSAL_IN") // suppressing because we are using the getValidFilePath
public String getMimeType(String file) {
if (file == null || !file.contains(".")) {
return null;
}
String mimeType = null;
// may not work on Lambda until mailcap package is present https://github.com/aws/serverless-java-container/pull/504
try {
mimeType = Files.probeContentType(Paths.get(file));
} catch (IOException | InvalidPathException e) {
log("unable to probe for content type, will use fallback", e);
}
if (mimeType == null) {
try {
String mimeTypeGuess = URLConnection.guessContentTypeFromName(new File(file).getName());
if (mimeTypeGuess !=null) {
mimeType = mimeTypeGuess;
}
} catch (Exception e) {
log("couldn't find a better contentType than " + mimeType + " for file " + file, e);
}
}
return mimeType;
}
|
@Test
void getMimeType_nonExistentFileInTaskPath_expectNull() {
AwsServletContext ctx = new AwsServletContext(null);
assertNull(ctx.getMimeType("/var/task/nothing"));
}
|
@Override
public PageData<WidgetsBundle> findAllTenantWidgetsBundlesByTenantId(WidgetsBundleFilter widgetsBundleFilter, PageLink pageLink) {
return findTenantWidgetsBundlesByTenantIds(Arrays.asList(widgetsBundleFilter.getTenantId().getId(), NULL_UUID), widgetsBundleFilter, pageLink);
}
|
@Test
public void testFindAllWidgetsBundlesByTenantIdFullSearch() {
UUID tenantId1 = Uuids.timeBased();
UUID tenantId2 = Uuids.timeBased();
for (int i = 0; i < 10; i++) {
createWidgetBundles(5, tenantId1, "WB1_" + i + "_");
createWidgetBundles(3, tenantId2, "WB2_" + i + "_");
createSystemWidgetBundles(2, "WB_SYS_" + i + "_");
}
widgetsBundles = widgetsBundleDao.find(TenantId.SYS_TENANT_ID).stream().sorted(Comparator.comparing(WidgetsBundle::getTitle)).collect(Collectors.toList());;
assertEquals(100, widgetsBundles.size());
var widgetType1 = createAndSaveWidgetType(new TenantId(tenantId1), 1, "Test widget type 1", "This is the widget type 1", new String[]{"tag1", "Tag2", "TEST_TAG"});
var widgetType2 = createAndSaveWidgetType(new TenantId(tenantId2), 2, "Test widget type 2", "This is the widget type 2", new String[]{"tag3", "Tag5", "TEST_Tag2"});
var widgetsBundle1 = widgetsBundles.stream().filter(widgetsBundle -> widgetsBundle.getTenantId().getId().equals(tenantId1)).collect(Collectors.toList()).get(10);
widgetTypeDao.saveWidgetsBundleWidget(new WidgetsBundleWidget(widgetsBundle1.getId(), widgetType1.getId(), 0));
var widgetsBundle2 = widgetsBundles.stream().filter(widgetsBundle -> widgetsBundle.getTenantId().getId().equals(tenantId2)).collect(Collectors.toList()).get(15);
widgetTypeDao.saveWidgetsBundleWidget(new WidgetsBundleWidget(widgetsBundle2.getId(), widgetType2.getId(), 0));
var widgetsBundle3 = widgetsBundles.stream().filter(widgetsBundle -> widgetsBundle.getTenantId().getId().equals(tenantId2)).collect(Collectors.toList()).get(28);
widgetTypeDao.saveWidgetsBundleWidget(new WidgetsBundleWidget(widgetsBundle3.getId(), widgetType1.getId(), 0));
widgetTypeDao.saveWidgetsBundleWidget(new WidgetsBundleWidget(widgetsBundle3.getId(), widgetType2.getId(), 1));
PageLink pageLink = new PageLink(10, 0, "widget type 1", new SortOrder("title"));
PageData<WidgetsBundle> widgetsBundles1 = widgetsBundleDao.findAllTenantWidgetsBundlesByTenantId(WidgetsBundleFilter.fullSearchFromTenantId(TenantId.fromUUID(tenantId1)), pageLink);
assertEquals(1, widgetsBundles1.getData().size());
assertEquals(widgetsBundle1, widgetsBundles1.getData().get(0));
pageLink = new PageLink(10, 0, "Test widget type 2", new SortOrder("title"));
PageData<WidgetsBundle> widgetsBundles2 = widgetsBundleDao.findAllTenantWidgetsBundlesByTenantId(WidgetsBundleFilter.fullSearchFromTenantId(TenantId.fromUUID(tenantId1)), pageLink);
assertEquals(0, widgetsBundles2.getData().size());
PageData<WidgetsBundle> widgetsBundles3 = widgetsBundleDao.findAllTenantWidgetsBundlesByTenantId(WidgetsBundleFilter.fullSearchFromTenantId(TenantId.fromUUID(tenantId2)), pageLink);
assertEquals(2, widgetsBundles3.getData().size());
assertEquals(widgetsBundle2, widgetsBundles3.getData().get(0));
assertEquals(widgetsBundle3, widgetsBundles3.getData().get(1));
pageLink = new PageLink(10, 0, "ttt Tag2 ffff hhhh", new SortOrder("title"));
PageData<WidgetsBundle> widgetsBundles4 = widgetsBundleDao.findAllTenantWidgetsBundlesByTenantId(WidgetsBundleFilter.fullSearchFromTenantId(TenantId.fromUUID(tenantId1)), pageLink);
assertEquals(1, widgetsBundles4.getData().size());
assertEquals(widgetsBundle1, widgetsBundles4.getData().get(0));
PageData<WidgetsBundle> widgetsBundles5 = widgetsBundleDao.findAllTenantWidgetsBundlesByTenantId(WidgetsBundleFilter.fullSearchFromTenantId(TenantId.fromUUID(tenantId2)), pageLink);
assertEquals(1, widgetsBundles5.getData().size());
assertEquals(widgetsBundle3, widgetsBundles5.getData().get(0));
}
|
public Set<String> topicCleanupPolicy(String topic) {
Config topicConfig = describeTopicConfig(topic);
if (topicConfig == null) {
// The topic must not exist
log.debug("Unable to find topic '{}' when getting cleanup policy", topic);
return Collections.emptySet();
}
ConfigEntry entry = topicConfig.get(CLEANUP_POLICY_CONFIG);
if (entry != null && entry.value() != null) {
String policyStr = entry.value();
log.debug("Found cleanup.policy={} for topic '{}'", policyStr, topic);
return Arrays.stream(policyStr.split(","))
.map(String::trim)
.filter(s -> !s.isEmpty())
.map(String::toLowerCase)
.collect(Collectors.toSet());
}
// This is unexpected, as the topic config should include the cleanup.policy even if
// the topic settings don't override the broker's log.cleanup.policy. But just to be safe.
log.debug("Found no cleanup.policy for topic '{}'", topic);
return Collections.emptySet();
}
|
@Test
public void verifyingGettingTopicCleanupPolicies() {
String topicName = "myTopic";
Map<String, String> topicConfigs = Collections.singletonMap("cleanup.policy", "compact");
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList());
mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs);
TopicAdmin admin = new TopicAdmin(mockAdminClient);
Set<String> policies = admin.topicCleanupPolicy("myTopic");
assertEquals(1, policies.size());
assertEquals(TopicConfig.CLEANUP_POLICY_COMPACT, policies.iterator().next());
}
}
|
public static <T> T getLast(List<T> list) {
if (isEmpty(list)) {
return null;
}
int size;
while (true) {
size = list.size();
if (size == 0) {
return null;
}
try {
return list.get(size - 1);
} catch (IndexOutOfBoundsException ex) {
// catch the exception and continue to retry
}
}
}
|
@Test
public void test_getLast() {
// case 1: null
Assertions.assertNull(CollectionUtils.getLast(null));
// case 2: empty
List<String> emptyList = Collections.EMPTY_LIST;
Assertions.assertNull(CollectionUtils.getLast(emptyList));
// case 3: not empty
List<String> list = new ArrayList<>();
list.add("Foo");
list.add("Bar");
Assertions.assertEquals("Bar", CollectionUtils.getLast(list));
}
|
@Override
public Optional<Track<T>> clean(Track<T> track) {
NavigableSet<Point<T>> points = newTreeSet(track.points());
Set<Point<T>> badPoints = findPointsWithoutEnoughTimeSpacing(track);
points.removeAll(badPoints);
return (points.isEmpty())
? Optional.empty()
: Optional.of(Track.of(points));
}
|
@Test
public void testCleaningExample2() {
Track<NopHit> testTrack = createTrackFromResource(
HighFrequencyPointRemover.class,
"highFrequencyPoints_example2.txt"
);
Duration MIN_ALLOWABLE_SPACING = Duration.ofMillis(500);
DataCleaner<Track<NopHit>> smoother = new HighFrequencyPointRemover<>(MIN_ALLOWABLE_SPACING);
Optional<Track<NopHit>> cleanedTrack = smoother.clean(testTrack);
assertEquals(
406, testTrack.size(),
"This original track has 406 points"
);
assertEquals(
400, cleanedTrack.get().size(),
"The cleaned track should have 6 points removed "
+ "(2 points near the beginning of the track and the very last 4 points)"
);
}
|
static String generateDatabaseName(String baseString) {
return generateResourceId(
baseString,
ILLEGAL_DATABASE_NAME_CHARS,
REPLACE_DATABASE_NAME_CHAR,
MAX_DATABASE_NAME_LENGTH,
TIME_FORMAT);
}
|
@Test
public void testGenerateDatabaseNameShouldReplaceSpace() {
String testBaseString = "Test DB Name";
String actual = generateDatabaseName(testBaseString);
assertThat(actual).matches("test-db-name-\\d{8}-\\d{6}-\\d{6}");
}
|
int freeIps(Node host) {
if (host.type() == NodeType.host) {
return allNodes.eventuallyUnusedIpAddressCount(host);
}
return host.ipConfig().pool().findUnusedIpAddresses(allNodes).size();
}
|
@Test
public void freeIPs() {
assertEquals(2, capacity.freeIps(host1));
assertEquals(1, capacity.freeIps(host2));
assertEquals(1, capacity.freeIps(host3));
assertEquals(0, capacity.freeIps(host4));
}
|
@Override public V get(Object o) {
if (o == null) return null; // null keys are not allowed
int i = arrayIndexOfKey(o);
return i != -1 ? value(i + 1) : null;
}
|
@Test void mapKeys() {
array[0] = " 1";
array[1] = "one";
array[2] = "2 ";
array[3] = "two";
array[4] = " 3";
array[5] = "three";
Map<String, String> map = builder.mapKeys(o -> ((String) o).trim()).build(array);
assertSize(map, 3);
assertBaseCase(map);
assertThat(map).containsOnly(
entry("1", "one"),
entry("2", "two"),
entry("3", "three")
);
assertThat(map).hasToString(
"UnsafeArrayMap{1=one,2=two,3=three}"
);
assertThat(map.get("1")).isEqualTo("one");
assertThat(map.get("2")).isEqualTo("two");
assertThat(map.get("3")).isEqualTo("three");
}
|
protected TransformerInput buildTransformerInput(List<Long> tokens, int maxTokens, boolean isQuery) {
if (!isQuery) {
tokens = tokens.stream().filter(token -> !skipTokens.contains(token)).toList();
}
List<Long> inputIds = new ArrayList<>(maxTokens);
List<Long> attentionMask = new ArrayList<>(maxTokens);
if (tokens.size() > maxTokens - 3)
tokens = tokens.subList(0, maxTokens - 3);
inputIds.add(startSequenceToken);
inputIds.add(isQuery? querySequenceToken: documentSequenceToken);
inputIds.addAll(tokens);
inputIds.add(endSequenceToken);
int inputLength = inputIds.size();
long padTokenId = isQuery? maskSequenceToken: padSequenceToken;
int padding = isQuery? maxTokens - inputLength: 0;
for (int i = 0; i < padding; i++)
inputIds.add(padTokenId);
for (int i = 0; i < inputLength; i++)
attentionMask.add((long) 1);
for (int i = 0; i < padding; i++)
attentionMask.add((long) 0); // Do not attend to mask paddings
return new TransformerInput(inputIds, attentionMask);
}
|
@Test
public void testInputTensorsSentencePiece() {
// Sentencepiece tokenizer("this is a query !") -> [903, 83, 10, 41, 1294, 711]
// ! is mapped to 711 and is a punctuation character
List<Long> tokens = List.of(903L, 83L, 10L, 41L, 1294L, 711L);
ColBertEmbedder.TransformerInput input = multiLingualEmbedder.buildTransformerInput(tokens,10,true);
assertEquals(10,input.inputIds().size());
assertEquals(10,input.attentionMask().size());
assertEquals(List.of(0L, 3L, 903L, 83L, 10L, 41L, 1294L, 711L, 2L, 250001L),input.inputIds());
assertEquals(List.of(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 0L),input.attentionMask());
// NO padding for document side and 711 (punctuation) is now filtered out
input = multiLingualEmbedder.buildTransformerInput(tokens,10,false);
assertEquals(8,input.inputIds().size());
assertEquals(8,input.attentionMask().size());
assertEquals(List.of(0L, 4L, 903L, 83L, 10L, 41L, 1294L, 2L),input.inputIds());
assertEquals(List.of(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L),input.attentionMask());
input = multiLingualEmbedder.buildTransformerInput(List.of(711L), 5, true);
assertEquals(List.of(0L, 3L, 711L,2L, 250001L),input.inputIds());
assertEquals(List.of(1L, 1L, 1L, 1L, 0L),input.attentionMask());
input = multiLingualEmbedder.buildTransformerInput(List.of(711L), 5, false);
assertEquals(List.of(0L, 4L, 2L),input.inputIds());
assertEquals(List.of(1L, 1L, 1L),input.attentionMask());
}
|
public Mono<Void> createStreamAppAcl(KafkaCluster cluster, CreateStreamAppAclDTO request) {
return adminClientService.get(cluster)
.flatMap(ac -> createAclsWithLogging(ac, createStreamAppBindings(request)))
.then();
}
|
@Test
void createsStreamAppDependantAcls() {
ArgumentCaptor<Collection<AclBinding>> createdCaptor = ArgumentCaptor.forClass(Collection.class);
when(adminClientMock.createAcls(createdCaptor.capture()))
.thenReturn(Mono.empty());
var principal = UUID.randomUUID().toString();
var host = UUID.randomUUID().toString();
aclsService.createStreamAppAcl(
CLUSTER,
new CreateStreamAppAclDTO()
.principal(principal)
.host(host)
.inputTopics(List.of("t1"))
.outputTopics(List.of("t2", "t3"))
.applicationId("appId1")
).block();
// Read on input topics, Write on output topics
// ALL on applicationId-prefixed Groups and Topics
Collection<AclBinding> createdBindings = createdCaptor.getValue();
assertThat(createdBindings)
.hasSize(5)
.contains(new AclBinding(
new ResourcePattern(ResourceType.TOPIC, "t1", PatternType.LITERAL),
new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW)))
.contains(new AclBinding(
new ResourcePattern(ResourceType.TOPIC, "t2", PatternType.LITERAL),
new AccessControlEntry(principal, host, AclOperation.WRITE, AclPermissionType.ALLOW)))
.contains(new AclBinding(
new ResourcePattern(ResourceType.TOPIC, "t3", PatternType.LITERAL),
new AccessControlEntry(principal, host, AclOperation.WRITE, AclPermissionType.ALLOW)))
.contains(new AclBinding(
new ResourcePattern(ResourceType.GROUP, "appId1", PatternType.PREFIXED),
new AccessControlEntry(principal, host, AclOperation.ALL, AclPermissionType.ALLOW)))
.contains(new AclBinding(
new ResourcePattern(ResourceType.TOPIC, "appId1", PatternType.PREFIXED),
new AccessControlEntry(principal, host, AclOperation.ALL, AclPermissionType.ALLOW)));
}
|
public DdlCommandResult execute(
final String sql,
final DdlCommand ddlCommand,
final boolean withQuery,
final Set<SourceName> withQuerySources
) {
return execute(sql, ddlCommand, withQuery, withQuerySources, false);
}
|
@Test
public void shouldAlterTable() {
// Given:
alterSource = new AlterSourceCommand(EXISTING_TABLE, DataSourceType.KTABLE.getKsqlType(), NEW_COLUMNS);
// When:
final DdlCommandResult result = cmdExec.execute(SQL_TEXT, alterSource, false, NO_QUERY_SOURCES);
// Then:
assertThat(result.isSuccess(), is(true));
assertThat(metaStore.getSource(EXISTING_TABLE).getSchema().columns().size(), is(8));
assertThat(metaStore.getSource(EXISTING_TABLE).getSqlExpression(), is("sqlexpression\nsome ksql"));
}
|
@Override
public UrlPattern doGetPattern() {
return UrlPattern.builder()
.includes(MOVED_WEB_SERVICES)
.build();
}
|
@Test
public void do_get_pattern() {
assertThat(underTest.doGetPattern().matches("/api/components/update_key")).isTrue();
assertThat(underTest.doGetPattern().matches("/api/components/bulk_update_key")).isTrue();
assertThat(underTest.doGetPattern().matches("/api/projects/update_key")).isFalse();
}
|
public static TaskAndAction createRemoveTask(final TaskId taskId,
final CompletableFuture<StateUpdater.RemovedTaskResult> future) {
Objects.requireNonNull(taskId, "Task ID of task to remove is null!");
Objects.requireNonNull(future, "Future for task to remove is null!");
return new TaskAndAction(null, taskId, Action.REMOVE, future);
}
|
@Test
public void shouldThrowIfRemoveTaskActionIsCreatedWithNullFuture() {
final Exception exception = assertThrows(
NullPointerException.class,
() -> createRemoveTask(new TaskId(0, 0), null)
);
assertTrue(exception.getMessage().contains("Future for task to remove is null!"));
}
|
@SuppressWarnings({"unchecked", "UnstableApiUsage"})
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement) {
if (!(statement.getStatement() instanceof DropStatement)) {
return statement;
}
final DropStatement dropStatement = (DropStatement) statement.getStatement();
if (!dropStatement.isDeleteTopic()) {
return statement;
}
final SourceName sourceName = dropStatement.getName();
final DataSource source = metastore.getSource(sourceName);
if (source != null) {
if (source.isSource()) {
throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text());
}
checkTopicRefs(source);
deleteTopic(source);
final Closer closer = Closer.create();
closer.register(() -> deleteKeySubject(source));
closer.register(() -> deleteValueSubject(source));
try {
closer.close();
} catch (final KsqlException e) {
throw e;
} catch (final Exception e) {
throw new KsqlException(e);
}
} else if (!dropStatement.getIfExists()) {
throw new KsqlException("Could not find source to delete topic for: " + statement);
}
final T withoutDelete = (T) dropStatement.withoutDeleteClause();
final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";";
return statement.withStatement(withoutDeleteText, withoutDelete);
}
|
@Test
public void shouldDeleteAvroSchemaInSR() throws IOException, RestClientException {
// Given:
when(topic.getKeyFormat()).thenReturn(KeyFormat.of(FormatInfo.of(FormatFactory.AVRO.name()), SerdeFeatures.of(), Optional.empty()));
when(topic.getValueFormat()).thenReturn(ValueFormat.of(FormatInfo.of(FormatFactory.AVRO.name()),
SerdeFeatures.of()));
// When:
deleteInjector.inject(DROP_WITH_DELETE_TOPIC);
// Then:
verify(registryClient).deleteSubject(KsqlConstants.getSRSubject("something", true));
verify(registryClient).deleteSubject(KsqlConstants.getSRSubject("something", false));
}
|
public boolean shouldUpdateRoot() {
return updateRoot;
}
|
@Test
public void testUpdateRoot() {
final DistCpOptions options = new DistCpOptions.Builder(
Collections.singletonList(
new Path("hdfs://localhost:8020/source")),
new Path("hdfs://localhost:8020/target/"))
.withUpdateRoot(true)
.build();
Assert.assertTrue(options.shouldUpdateRoot());
}
|
long getNodeResultLimit(int ownedPartitions) {
return isQueryResultLimitEnabled ? (long) ceil(resultLimitPerPartition * ownedPartitions) : Long.MAX_VALUE;
}
|
@Test
public void testNodeResultLimitThreePartitions() {
initMocksWithConfiguration(200000, 3);
assertEquals(2547, limiter.getNodeResultLimit(3));
}
|
public String getLegacyColumnName( DatabaseMetaData dbMetaData, ResultSetMetaData rsMetaData, int index ) throws KettleDatabaseException {
if ( dbMetaData == null ) {
throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoDBMetaDataException" ) );
}
if ( rsMetaData == null ) {
throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoRSMetaDataException" ) );
}
try {
return dbMetaData.getDriverMajorVersion() > 3 ? rsMetaData.getColumnLabel( index ) : rsMetaData.getColumnName( index );
} catch ( Exception e ) {
throw new KettleDatabaseException( String.format( "%s: %s", BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameException" ), e.getMessage() ), e );
}
}
|
@Test
public void testGetLegacyColumnNameDriverLessOrEqualToThreeFieldContactLastName() throws Exception {
DatabaseMetaData databaseMetaData = mock( DatabaseMetaData.class );
doReturn( 3 ).when( databaseMetaData ).getDriverMajorVersion();
assertEquals( "CONTACTLASTNAME", new MySQLDatabaseMeta().getLegacyColumnName( databaseMetaData, getResultSetMetaData(), 3 ) );
}
|
public T run() throws Exception {
try {
return execute();
} catch(Exception e) {
if (e.getClass().equals(retryExceptionType)){
tries++;
if (MAX_RETRIES == tries) {
throw e;
} else {
return run();
}
} else {
throw e;
}
}
}
|
@Test
public void testRetrySuccess() {
Retry<Void> retriable = new Retry<Void>(NullPointerException.class) {
private int count = 0;
@Override
public Void execute() {
if (count < 1) {
count++;
throw new NullPointerException();
} else {
return null;
}
}
};
try {
retriable.run();
} catch (Exception e) {
Assert.fail();
}
}
|
public Page<Organization> getAllOrganizations(int pageIndex, int pageSize) {
return organizationRepository.findAll(PageRequest.of(pageIndex, pageSize));
}
|
@Test
public void getAllOrganizations() {
when(repositoryMock.findAll(any(Pageable.class))).thenReturn(getPageOrganizations());
Page<Organization> result = organizationServiceMock.getAllOrganizations(1, 10);
verify(repositoryMock, times(1)).findAll(any(Pageable.class));
assertNotNull(result);
}
|
boolean isInsideClosedOpen(Number toEvaluate) {
if (leftMargin == null) {
return toEvaluate.doubleValue() < rightMargin.doubleValue();
} else if (rightMargin == null) {
return toEvaluate.doubleValue() >= leftMargin.doubleValue();
} else {
return toEvaluate.doubleValue() >= leftMargin.doubleValue() && toEvaluate.doubleValue() < rightMargin.doubleValue();
}
}
|
@Test
void isInsideClosedOpen() {
KiePMMLInterval kiePMMLInterval = new KiePMMLInterval(null, 20, CLOSURE.CLOSED_OPEN);
assertThat(kiePMMLInterval.isInsideClosedOpen(10)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedOpen(20)).isFalse();
assertThat(kiePMMLInterval.isInsideClosedOpen(30)).isFalse();
kiePMMLInterval = new KiePMMLInterval(20, null, CLOSURE.CLOSED_OPEN);
assertThat(kiePMMLInterval.isInsideClosedOpen(30)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedOpen(20)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedOpen(10)).isFalse();
kiePMMLInterval = new KiePMMLInterval(20, 40, CLOSURE.CLOSED_OPEN);
assertThat(kiePMMLInterval.isInsideClosedOpen(30)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedOpen(10)).isFalse();
assertThat(kiePMMLInterval.isInsideClosedOpen(20)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedOpen(40)).isFalse();
assertThat(kiePMMLInterval.isInsideClosedOpen(50)).isFalse();
}
|
@Override
public String getMessage()
{
String message = super.getMessage();
if (message == null && getCause() != null) {
message = getCause().getMessage();
}
if (message == null) {
message = errorCode.getName();
}
return message;
}
|
@Test
public void testMessage()
{
PrestoException exception = new PrestoException(new TestErrorCode(), "test");
assertEquals(exception.getMessage(), "test");
exception = new PrestoException(new TestErrorCode(), new RuntimeException("test2"));
assertEquals(exception.getMessage(), "test2");
exception = new PrestoException(new TestErrorCode(), new RuntimeException());
assertEquals(exception.getMessage(), "test");
}
|
@SuppressWarnings({"unchecked", "rawtypes"})
public static int compareTo(final Comparable thisValue, final Comparable otherValue, final OrderDirection orderDirection, final NullsOrderType nullsOrderType,
final boolean caseSensitive) {
if (null == thisValue && null == otherValue) {
return 0;
}
if (null == thisValue) {
return NullsOrderType.FIRST == nullsOrderType ? -1 : 1;
}
if (null == otherValue) {
return NullsOrderType.FIRST == nullsOrderType ? 1 : -1;
}
if (!caseSensitive && thisValue instanceof String && otherValue instanceof String) {
return compareToCaseInsensitiveString((String) thisValue, (String) otherValue, orderDirection);
}
return OrderDirection.ASC == orderDirection ? thisValue.compareTo(otherValue) : -thisValue.compareTo(otherValue);
}
|
@Test
void assertCompareToStringWithCaseInsensitive() {
assertThat(CompareUtils.compareTo("A", "a", OrderDirection.DESC, NullsOrderType.FIRST, !caseSensitive), is(0));
}
|
@Override
public ServiceInfo queryInstancesOfService(String serviceName, String groupName, String clusters,
boolean healthyOnly) throws NacosException {
ServiceQueryRequest request = new ServiceQueryRequest(namespaceId, serviceName, groupName);
request.setCluster(clusters);
request.setHealthyOnly(healthyOnly);
QueryServiceResponse response = requestToServer(request, QueryServiceResponse.class);
return response.getServiceInfo();
}
|
@Test
void testQueryInstancesOfService() throws Exception {
QueryServiceResponse res = new QueryServiceResponse();
ServiceInfo info = new ServiceInfo(GROUP_NAME + "@@" + SERVICE_NAME + "@@" + CLUSTERS);
res.setServiceInfo(info);
when(this.rpcClient.request(any())).thenReturn(res);
ServiceInfo actual = client.queryInstancesOfService(SERVICE_NAME, GROUP_NAME, CLUSTERS, false);
assertEquals(info, actual);
}
|
@Override
public boolean isInputConsumable(
SchedulingExecutionVertex executionVertex,
Set<ExecutionVertexID> verticesToDeploy,
Map<ConsumedPartitionGroup, Boolean> consumableStatusCache) {
for (ConsumedPartitionGroup consumedPartitionGroup :
executionVertex.getConsumedPartitionGroups()) {
if (!consumableStatusCache.computeIfAbsent(
consumedPartitionGroup, this::isConsumableBasedOnFinishedProducers)) {
return false;
}
}
return true;
}
|
@Test
void testPartialFinishedBlockingInput() {
final TestingSchedulingTopology topology = new TestingSchedulingTopology();
final List<TestingSchedulingExecutionVertex> producers =
topology.addExecutionVertices().withParallelism(2).finish();
final List<TestingSchedulingExecutionVertex> consumer =
topology.addExecutionVertices().withParallelism(2).finish();
topology.connectAllToAll(producers, consumer)
.withResultPartitionState(ResultPartitionState.CREATED)
.withResultPartitionType(ResultPartitionType.BLOCKING)
.finish();
topology.getResultPartition(producers.get(0).getProducedResults().iterator().next().getId())
.markFinished();
PartialFinishedInputConsumableDecider inputConsumableDecider =
createPartialFinishedInputConsumableDecider();
assertThat(
inputConsumableDecider.isInputConsumable(
consumer.get(0), Collections.emptySet(), new HashMap<>()))
.isFalse();
assertThat(
inputConsumableDecider.isInputConsumable(
consumer.get(1), Collections.emptySet(), new HashMap<>()))
.isFalse();
}
|
public boolean isDisabled() {
return _disabled;
}
|
@Test
public void withEmptyConf()
throws JsonProcessingException {
String confStr = "{}";
IndexConfig config = JsonUtils.stringToObject(confStr, IndexConfig.class);
assertFalse(config.isDisabled(), "Unexpected disabled");
}
|
@Override
public void stop() {
LOG.debug("Notify {} handlers...", ServerStopHandler.class.getSimpleName());
for (ServerStopHandler handler : stopHandlers) {
handler.onServerStop(server);
}
}
|
@Test
public void notifyOnStop() {
ServerLifecycleNotifier notifier = new ServerLifecycleNotifier(server, new ServerStartHandler[] {start1, start2}, new ServerStopHandler[] {stop1, stop2});
notifier.stop();
verify(start1, never()).onServerStart(server);
verify(start2, never()).onServerStart(server);
verify(stop1).onServerStop(server);
verify(stop2).onServerStop(server);
}
|
@Nullable public static BaggageField getByName(@Nullable TraceContext context, String name) {
if (context == null) return null;
return ExtraBaggageContext.getFieldByName(context, validateName(name));
}
|
@Test void getByName_context_null() {
// permits unguarded use of CurrentTraceContext.get()
assertThat(BaggageField.getByName((TraceContext) null, "foo"))
.isNull();
}
|
public HeaderFields getUntreatedHeaders() {
return untreatedHeaders;
}
|
@Test
void testGetUntreatedHeaders() {
URI uri = URI.create("http://example.yahoo.com/test");
HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1);
httpReq.headers().add("key1", "value1");
httpReq.headers().add("key2", List.of("value1", "value2"));
DiscFilterRequest request = new DiscFilterRequest(httpReq);
HeaderFields headers = request.getUntreatedHeaders();
assertEquals(headers.keySet().size(), 2);
assertEquals(headers.get("key1").get(0), "value1");
assertEquals(headers.get("key2").get(0), "value1");
assertEquals(headers.get("key2").get(1), "value2");
}
|
public void lockClusterState(ClusterStateChange stateChange, Address initiator, UUID txnId, long leaseTime,
int memberListVersion, long partitionStateStamp) {
Preconditions.checkNotNull(stateChange);
clusterServiceLock.lock();
try {
if (!node.getNodeExtension().isStartCompleted()) {
throw new IllegalStateException("Can not lock cluster state! Startup is not completed yet!");
}
if (node.getClusterService().getClusterJoinManager().isMastershipClaimInProgress()) {
throw new IllegalStateException("Can not lock cluster state! Mastership claim is in progress!");
}
if (stateChange.isOfType(Version.class)) {
validateNodeCompatibleWith((Version) stateChange.getNewState());
validateClusterVersionChange((Version) stateChange.getNewState());
}
checkMemberListVersion(memberListVersion);
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
lockOrExtendClusterState(initiator, txnId, leaseTime);
try {
// check migration status and partition-state version again
// if partition state is changed then release the lock and fail.
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
} catch (IllegalStateException e) {
stateLockRef.set(LockGuard.NOT_LOCKED);
throw e;
}
} finally {
clusterServiceLock.unlock();
}
}
|
@Test
public void test_lockClusterState_success() throws Exception {
Address initiator = newAddress();
clusterStateManager.lockClusterState(ClusterStateChange.from(FROZEN), initiator, TXN, 1000, MEMBERLIST_VERSION,
PARTITION_STAMP);
assertLockedBy(initiator);
}
|
@Override
public void onHeartbeatSuccess(ConsumerGroupHeartbeatResponseData response) {
if (response.errorCode() != Errors.NONE.code()) {
String errorMessage = String.format(
"Unexpected error in Heartbeat response. Expected no error, but received: %s",
Errors.forCode(response.errorCode())
);
throw new IllegalArgumentException(errorMessage);
}
MemberState state = state();
if (state == MemberState.LEAVING) {
log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " +
"already leaving the group.", memberId, memberEpoch);
return;
}
if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) {
log.debug("Member {} with epoch {} received a successful response to the heartbeat " +
"to leave the group and completed the leave operation. ", memberId, memberEpoch);
return;
}
if (isNotInGroup()) {
log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" +
" so it's not a member of the group. ", memberId, state);
return;
}
// Update the group member id label in the client telemetry reporter if the member id has
// changed. Initially the member id is empty, and it is updated when the member joins the
// group. This is done here to avoid updating the label on every heartbeat response. Also
// check if the member id is null, as the schema defines it as nullable.
if (response.memberId() != null && !response.memberId().equals(memberId)) {
clientTelemetryReporter.ifPresent(reporter -> reporter.updateMetricsLabels(
Collections.singletonMap(ClientTelemetryProvider.GROUP_MEMBER_ID, response.memberId())));
}
this.memberId = response.memberId();
updateMemberEpoch(response.memberEpoch());
ConsumerGroupHeartbeatResponseData.Assignment assignment = response.assignment();
if (assignment != null) {
if (!state.canHandleNewAssignment()) {
// New assignment received but member is in a state where it cannot take new
// assignments (ex. preparing to leave the group)
log.debug("Ignoring new assignment {} received from server because member is in {} state.",
assignment, state);
return;
}
Map<Uuid, SortedSet<Integer>> newAssignment = new HashMap<>();
assignment.topicPartitions().forEach(topicPartition ->
newAssignment.put(topicPartition.topicId(), new TreeSet<>(topicPartition.partitions())));
processAssignmentReceived(newAssignment);
}
}
|
@Test
public void testHeartbeatSuccessfulResponseWhenLeavingGroupCompletesLeave() {
ConsumerMembershipManager membershipManager = createMemberInStableState();
mockLeaveGroup();
CompletableFuture<Void> leaveResult = membershipManager.leaveGroup();
verify(subscriptionState).unsubscribe();
assertEquals(MemberState.LEAVING, membershipManager.state());
assertFalse(leaveResult.isDone());
membershipManager.onHeartbeatRequestGenerated();
assertEquals(MemberState.UNSUBSCRIBED, membershipManager.state());
assertFalse(leaveResult.isDone());
membershipManager.onHeartbeatSuccess(createConsumerGroupHeartbeatResponse(createAssignment(true)).data());
assertSendLeaveCompleted(membershipManager, leaveResult);
}
|
@SuppressWarnings("unchecked")
public T getValue() {
final T value = (T) FROM_STRING.get(getConverterClass()).apply(JiveGlobals.getProperty(key), this);
if (value == null || (Collection.class.isAssignableFrom(value.getClass()) && ((Collection) value).isEmpty())) {
return defaultValue;
}
if (minValue != null && ((Comparable) minValue).compareTo(value) > 0) {
LOGGER.warn("Configured value of {} is less than the minimum value of {} for the SystemProperty {} - will use default value of {} instead",
value, minValue, key, defaultValue);
return defaultValue;
}
if (maxValue != null && ((Comparable) maxValue).compareTo(value) < 0) {
LOGGER.warn("Configured value of {} is more than the maximum value of {} for the SystemProperty {} - will use default value of {} instead",
value, maxValue, key, defaultValue);
return defaultValue;
}
return value;
}
|
@Test
public void willCreateAnInstantPropertyWithADefaultValue() {
final String key = "test.instant.property.with.default";
final Instant defaultValue = Instant.now().truncatedTo(ChronoUnit.MILLIS);
final SystemProperty<Instant> property = SystemProperty.Builder.ofType(Instant.class)
.setKey(key)
.setDefaultValue(defaultValue)
.setDynamic(true)
.build();
assertThat(property.getValue(), is(defaultValue));
}
|
@Override
public Optional<WindowType> windowType() {
return inner.windowType();
}
|
@Test
public void shouldReturnInnerWindowType() {
// Given:
when(inner.windowType()).thenReturn(Optional.of(WindowType.SESSION));
givenNoopTransforms();
// When:
final Optional<WindowType> windowType = materialization.windowType();
// Then:
assertThat(windowType, is(Optional.of(WindowType.SESSION)));
}
|
@Nonnull
public MappingResults applyToPrimaryResource(@Nonnull Mappings mappings) {
mappings = enrich(mappings);
WorkspaceResource resource = workspace.getPrimaryResource();
MappingResults results = new MappingResults(mappings, listeners.createBundledMappingApplicationListener())
.withAggregateManager(aggregateMappingManager);
// Apply mappings to all classes in the primary resource, collecting into the results model.
Mappings finalMappings = mappings;
ExecutorService service = ThreadUtil.phasingService(applierThreadPool);
Stream.concat(resource.jvmClassBundleStream(), resource.versionedJvmClassBundleStream()).forEach(bundle -> {
bundle.forEach(classInfo -> {
service.execute(() -> dumpIntoResults(results, workspace, resource, bundle, classInfo, finalMappings));
});
});
ThreadUtil.blockUntilComplete(service);
// Yield results
return results;
}
|
@Test
void applyOverlapping() {
String overlapInterfaceAName = OverlapInterfaceA.class.getName().replace('.', '/');
String overlapInterfaceBName = OverlapInterfaceB.class.getName().replace('.', '/');
String overlapClassABName = OverlapClassAB.class.getName().replace('.', '/');
String overlapCallerName = OverlapCaller.class.getName().replace('.', '/');
// Create mappings for all classes but the runner 'OverlapCaller'
Mappings mappings = mappingGenerator.generate(workspace, resource, inheritanceGraph, nameGenerator, new NameGeneratorFilter(null, true) {
@Override
public boolean shouldMapClass(@Nonnull ClassInfo info) {
return !info.getName().equals(overlapCallerName);
}
@Override
public boolean shouldMapMethod(@Nonnull ClassInfo owner, @Nonnull MethodMember method) {
return shouldMapClass(owner);
}
});
// Preview the mapping operation
MappingResults results = mappingApplier.applyToPrimaryResource(mappings);
assertNotNull(mappings.getMappedClassName(overlapInterfaceAName), "OverlapInterfaceA should be remapped");
assertNotNull(mappings.getMappedClassName(overlapInterfaceBName), "OverlapInterfaceB should be remapped");
assertNotNull(mappings.getMappedClassName(overlapClassABName), "OverlapClassAB should be remapped");
assertNull(mappings.getMappedClassName(overlapCallerName), "OverlapCaller should not be remapped");
assertTrue(results.wasMapped(overlapInterfaceAName), "OverlapInterfaceA should have updated");
assertTrue(results.wasMapped(overlapInterfaceBName), "OverlapInterfaceB should have updated");
assertTrue(results.wasMapped(overlapClassABName), "OverlapClassAB should have updated");
assertTrue(results.wasMapped(overlapCallerName), "OverlapCaller should have updated");
// Assert aggregate updated too.
results.apply();
AggregatedMappings aggregatedMappings = aggregateMappingManager.getAggregatedMappings();
assertNotNull(aggregatedMappings.getMappedClassName(overlapInterfaceAName),
"OverlapInterfaceA should be tracked in aggregate");
assertNotNull(aggregatedMappings.getMappedClassName(overlapInterfaceBName),
"OverlapInterfaceB should be tracked in aggregate");
assertNotNull(aggregatedMappings.getMappedClassName(overlapClassABName),
"OverlapClassAB should be tracked in aggregate");
assertNull(aggregatedMappings.getMappedClassName(overlapCallerName),
"OverlapCaller should not be tracked in aggregate");
// Assert that the method is still runnable.
runMapped(OverlapCaller.class, "run");
}
|
@Override public SlotAssignmentResult ensure(long key1, long key2) {
assert key1 != unassignedSentinel : "ensure() called with key1 == nullKey1 (" + unassignedSentinel + ')';
return super.ensure0(key1, key2);
}
|
@Test
public void testCursor_valueAddress() {
final SlotAssignmentResult slot = hsa.ensure(randomKey(), randomKey());
HashSlotCursor16byteKey cursor = hsa.cursor();
cursor.advance();
assertEquals(slot.address(), cursor.valueAddress());
}
|
public byte[] getAuthenticationPluginData() {
return Bytes.concat(authenticationPluginDataPart1, authenticationPluginDataPart2);
}
|
@Test
void assertGetAuthPluginDataWithoutArguments() {
MySQLAuthenticationPluginData actual = new MySQLAuthenticationPluginData();
assertThat(actual.getAuthenticationPluginDataPart1().length, is(8));
assertThat(actual.getAuthenticationPluginDataPart2().length, is(12));
assertThat(actual.getAuthenticationPluginData().length, is(20));
}
|
@Override
public String processLink(String link) {
var externalLink = externalUrlSupplier.getRaw();
if (StringUtils.isBlank(link)) {
return link;
}
if (externalLink == null || !linkInSite(externalLink, link)) {
return link;
}
return append(externalLink.toString(), link);
}
|
@Test
void processWhenLinkIsEmpty() {
assertThat(externalLinkProcessor.processLink(null)).isNull();
assertThat(externalLinkProcessor.processLink("")).isEmpty();
}
|
public static double of(double[] truth, double[] prediction) {
if (truth.length != prediction.length) {
throw new IllegalArgumentException(String.format("The vector sizes don't match: %d != %d.", truth.length, prediction.length));
}
double RSS = 0.0;
double TSS = 0.0;
double ybar = MathEx.mean(truth);
int n = truth.length;
for (int i = 0; i < n; i++) {
double r = truth[i] - prediction[i];
RSS += r * r;
double t = truth[i] - ybar;
TSS += t * t;
}
return 1.0 - RSS / TSS;
}
|
@Test
public void test() {
System.out.println("R2");
double[] truth = {
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2,
104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9
};
double[] prediction = {
83.60082, 86.94973, 88.09677, 90.73065, 96.53551, 97.83067,
98.12232, 99.87776, 103.20861, 105.08598, 107.33369, 109.57251,
112.98358, 113.92898, 115.50214, 117.54028,
};
double expResult = 0.9926;
double result = R2.of(truth, prediction);
assertEquals(expResult, result, 1E-4);
}
|
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
final ServletContext context = config.getServletContext();
if (null == registry) {
final Object registryAttr = context.getAttribute(METRICS_REGISTRY);
if (registryAttr instanceof MetricRegistry) {
this.registry = (MetricRegistry) registryAttr;
} else {
throw new ServletException("Couldn't find a MetricRegistry instance.");
}
}
this.allowedOrigin = context.getInitParameter(ALLOWED_ORIGIN);
this.jsonpParamName = context.getInitParameter(CALLBACK_PARAM);
setupMetricsModule(context);
}
|
@Test(expected = ServletException.class)
public void constructorWithRegistryAsArgumentUsesServletConfigWhenNullButWrongTypeInContext() throws Exception {
final ServletContext servletContext = mock(ServletContext.class);
final ServletConfig servletConfig = mock(ServletConfig.class);
when(servletConfig.getServletContext()).thenReturn(servletContext);
when(servletContext.getAttribute(eq(io.dropwizard.metrics.servlets.MetricsServlet.METRICS_REGISTRY)))
.thenReturn("IRELLEVANT_STRING");
final io.dropwizard.metrics.servlets.MetricsServlet metricsServlet = new MetricsServlet(null);
metricsServlet.init(servletConfig);
}
|
public List<NotificationChannel> getChannels() {
return Arrays.asList(channels);
}
|
@Test
public void shouldReturnChannels() {
assertThat(underTest.getChannels()).containsOnly(emailChannel, gtalkChannel);
}
|
private String getUpstreamIpFromHttpDomain() {
String domain = (String) exchange.getAttributes().get(Constants.HTTP_DOMAIN);
try {
if (StringUtils.isNotBlank(domain)) {
URL url = new URL(domain);
return url.getHost();
}
} catch (Exception e) {
LOG.error("get upstream ip error");
}
return "";
}
|
@Test
public void testGetUpstreamIpFromHttpDomain() throws Exception {
exchange.getAttributes().put(Constants.HTTP_DOMAIN, "http://localhost:9195/http/order/path/123/name");
loggingServerHttpResponse.setExchange(exchange);
Method method1 = loggingServerHttpResponse.getClass().getDeclaredMethod("getUpstreamIpFromHttpDomain");
method1.setAccessible(true);
String upstreamIpFromHttpDomain1 = (String) method1.invoke(loggingServerHttpResponse);
Assertions.assertEquals(upstreamIpFromHttpDomain1, "localhost");
exchange = Mockito.mock(ServerWebExchange.class);
ShenyuContext shenyuContext2 = new ShenyuContext();
shenyuContext2.setRpcType("http");
shenyuContext2.setStartDateTime(startDateTime);
exchange.getAttributes().put(Constants.CONTEXT, shenyuContext2);
loggingServerHttpResponse.setExchange(exchange);
Method method2 = loggingServerHttpResponse.getClass().getDeclaredMethod("getUpstreamIpFromHttpDomain");
method2.setAccessible(true);
String upstreamIpFromHttpDomain2 = (String) method2.invoke(loggingServerHttpResponse);
Assertions.assertEquals(upstreamIpFromHttpDomain2, "");
}
|
@Udf
public String lpad(
@UdfParameter(description = "String to be padded") final String input,
@UdfParameter(description = "Target length") final Integer targetLen,
@UdfParameter(description = "Padding string") final String padding) {
if (input == null) {
return null;
}
if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) {
return null;
}
final StringBuilder sb = new StringBuilder(targetLen + padding.length());
final int padUpTo = Math.max(targetLen - input.length(), 0);
for (int i = 0; i < padUpTo; i += padding.length()) {
sb.append(padding);
}
sb.setLength(padUpTo);
sb.append(input);
sb.setLength(targetLen);
return sb.toString();
}
|
@Test
public void shouldReturnNullForNullPaddingBytes() {
final ByteBuffer result = udf.lpad(BYTES_123, 4, null);
assertThat(result, is(nullValue()));
}
|
public static Pagination pageStartingAt(Integer offset, Integer total, Integer pageSize) {
return new Pagination(offset, total, pageSize);
}
|
@Test
public void shouldCreatePaginationWithLessEquals300Records() {
try {
Pagination.pageStartingAt(0, 1000, 300);
} catch (Exception e) {
fail();
}
}
|
@Deprecated
@Override
public void trackTimer(final String eventName, final TimeUnit timeUnit) {
}
|
@Test
public void trackTimer() {
mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
Assert.fail();
return false;
}
});
mSensorsAPI.trackTimer("TestTimerEvent", TimeUnit.SECONDS);
mSensorsAPI.trackTimerEnd("TestTimerEvent");
}
|
public static RunnerApi.Pipeline toProto(Pipeline pipeline) {
return toProto(pipeline, SdkComponents.create(pipeline.getOptions()));
}
|
@Test
public void testProtoDirectly() {
final RunnerApi.Pipeline pipelineProto = PipelineTranslation.toProto(pipeline, false);
pipeline.traverseTopologically(
new PipelineProtoVerificationVisitor(pipelineProto, pipeline.getCoderRegistry(), false));
}
|
@Override
public void registerBeanDefinitions(final AnnotationMetadata metadata, final BeanDefinitionRegistry registry) {
registerShenyuClients(metadata, registry);
}
|
@Test
public void registerBeanDefinitionsTest() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
final ShenyuSdkClient client = spy(ShenyuSdkClient.class);
((DefaultListableBeanFactory) context.getBeanFactory()).setAllowBeanDefinitionOverriding(false);
context.register(TopLevelSubLevelTestConfig.class);
context.register(SpringMvcContract.class);
context.registerBean("shenyuSdkClient", ShenyuSdkClient.class, () -> client);
context.refresh();
final TopLevelSubLevelTestConfig.TopLevelClient topClient = context.getBean(TopLevelSubLevelTestConfig.TopLevelClient.class);
final SubLevelClient subClient = context.getBean(SubLevelClient.class);
assertNotNull(topClient);
assertNotNull(subClient);
}
|
public static ThreadFactory namedThreads(String pattern) {
return new ThreadFactoryBuilder()
.setNameFormat(pattern)
.setUncaughtExceptionHandler((t, e) -> log.error("Uncaught exception on " + t.getName(), e))
.build();
}
|
@Test
public void exceptionHandler() throws InterruptedException {
ThreadFactory f = Tools.namedThreads("foo");
Thread t = f.newThread(() -> {
throw new IllegalStateException("BOOM!");
});
assertNotNull("thread should have exception handler", t.getUncaughtExceptionHandler());
t.start();
assertAfter(100, () -> assertEquals("incorrect thread state", Thread.State.TERMINATED, t.getState()));
}
|
public static long computeStartOfNextHour(long now) {
Calendar cal = Calendar.getInstance();
cal.setTime(new Date(now));
cal.set(Calendar.MILLISECOND, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MINUTE, 0);
cal.add(Calendar.HOUR, 1);
return cal.getTime().getTime();
}
|
@Test
public void testHour() {
// Mon Nov 20 18:05:17,522 GMT 2006
long now = 1164045917522L;
now = correctBasedOnTimeZone(now);
// Mon Nov 20 19:00:00 GMT 2006
long expected = 1164049200000L;
expected = correctBasedOnTimeZone(expected);
long computed = TimeUtil.computeStartOfNextHour(now);
Assertions.assertEquals(expected - now, 1000 * (42 + 60 * 54) + 478);
Assertions.assertEquals(expected, computed);
}
|
@Override
public void eventAdded( KettleLoggingEvent event ) {
Object messageObject = event.getMessage();
checkNotNull( messageObject, "Expected log message to be defined." );
if ( messageObject instanceof LogMessage ) {
LogMessage message = (LogMessage) messageObject;
LoggingObjectInterface loggingObject = logObjProvider.apply( message.getLogChannelId() );
if ( loggingObject == null || ( loggingObject.getObjectType() == GENERAL && "Y".equals( EnvUtil.getSystemProperty( Const.KETTLE_LOG_GENERAL_OBJECTS_TO_DI_LOGGER ) ) ) ) {
// this can happen if logObject has been discarded while log events are still in flight.
logToLogger( diLogger, message.getLevel(),
message.getSubject() + " " + message.getMessage() );
} else if ( loggingObject.getObjectType() == TRANS || loggingObject.getObjectType() == STEP || loggingObject.getObjectType() == DATABASE ) {
logToLogger( transLogger, message.getLevel(), loggingObject, message );
} else if ( loggingObject.getObjectType() == JOB || loggingObject.getObjectType() == JOBENTRY ) {
logToLogger( jobLogger, message.getLevel(), loggingObject, message );
}
}
}
|
@Test
public void testAddLogEventNoRegisteredLogObject() {
listener.eventAdded( logEvent );
verify( diLogger ).info( messageSub + " " + msgText );
when( message.getLevel() ).thenReturn( ERROR );
listener.eventAdded( logEvent );
verify( diLogger ).error( messageSub + " " + msgText );
verifyNoInteractions( transLogger );
verifyNoInteractions( jobLogger );
}
|
public static Object getNestedFieldVal(GenericRecord record, String fieldName, boolean returnNullIfNotFound, boolean consistentLogicalTimestampEnabled) {
String[] parts = fieldName.split("\\.");
GenericRecord valueNode = record;
for (int i = 0; i < parts.length; i++) {
String part = parts[i];
Object val;
try {
val = HoodieAvroUtils.getFieldVal(valueNode, part, returnNullIfNotFound);
} catch (AvroRuntimeException e) {
if (returnNullIfNotFound) {
return null;
} else {
throw new HoodieException(
fieldName + "(Part -" + parts[i] + ") field not found in record. Acceptable fields were :"
+ valueNode.getSchema().getFields().stream().map(Field::name).collect(Collectors.toList()));
}
}
if (i == parts.length - 1) {
// return, if last part of name
if (val == null) {
return null;
} else {
Schema fieldSchema = valueNode.getSchema().getField(part).schema();
return convertValueForSpecificDataTypes(fieldSchema, val, consistentLogicalTimestampEnabled);
}
} else {
if (!(val instanceof GenericRecord)) {
if (returnNullIfNotFound) {
return null;
} else {
throw new HoodieException("Cannot find a record at part value :" + part);
}
} else {
valueNode = (GenericRecord) val;
}
}
}
// This can only be reached if the length of parts is 0
if (returnNullIfNotFound) {
return null;
} else {
throw new HoodieException(
fieldName + " field not found in record. Acceptable fields were :"
+ valueNode.getSchema().getFields().stream().map(Field::name).collect(Collectors.toList()));
}
}
|
@Test
public void testGetNestedFieldValWithDecimalField() {
GenericRecord rec = new GenericData.Record(new Schema.Parser().parse(SCHEMA_WITH_DECIMAL_FIELD));
rec.put("key_col", "key");
BigDecimal bigDecimal = new BigDecimal("1234.5678");
ByteBuffer byteBuffer = ByteBuffer.wrap(bigDecimal.unscaledValue().toByteArray());
rec.put("decimal_col", byteBuffer);
Object decimalCol = HoodieAvroUtils.getNestedFieldVal(rec, "decimal_col", true, false);
assertEquals(bigDecimal, decimalCol);
Object obj = rec.get(1);
assertTrue(obj instanceof ByteBuffer);
ByteBuffer buffer = (ByteBuffer) obj;
assertEquals(0, buffer.position());
}
|
@Override
public SubmitApplicationResponse submitApplication(
SubmitApplicationRequest request) throws YarnException, IOException {
if (request == null || request.getApplicationSubmissionContext() == null
|| request.getApplicationSubmissionContext().getApplicationId() == null) {
routerMetrics.incrAppsFailedSubmitted();
String errMsg =
"Missing submitApplication request or applicationSubmissionContext information.";
RouterAuditLogger.logFailure(user.getShortUserName(), SUBMIT_NEW_APP, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, errMsg);
RouterServerUtil.logAndThrowException(errMsg, null);
}
long startTime = clock.getTime();
ApplicationId applicationId =
request.getApplicationSubmissionContext().getApplicationId();
List<SubClusterId> blacklist = new ArrayList<>();
try {
// We need to handle this situation,
// the user will provide us with an expected submitRetries,
// but if the number of Active SubClusters is less than this number at this time,
// we should provide a high number of retry according to the number of Active SubClusters.
int activeSubClustersCount = federationFacade.getActiveSubClustersCount();
int actualRetryNums = Math.min(activeSubClustersCount, numSubmitRetries);
// Try calling the SubmitApplication method
SubmitApplicationResponse response =
((FederationActionRetry<SubmitApplicationResponse>) (retryCount) ->
invokeSubmitApplication(blacklist, request, retryCount)).
runWithRetries(actualRetryNums, submitIntervalTime);
if (response != null) {
long stopTime = clock.getTime();
routerMetrics.succeededAppsSubmitted(stopTime - startTime);
return response;
}
} catch (Exception e) {
routerMetrics.incrAppsFailedSubmitted();
RouterAuditLogger.logFailure(user.getShortUserName(), SUBMIT_NEW_APP, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, e.getMessage(), applicationId);
RouterServerUtil.logAndThrowException(e.getMessage(), e);
}
routerMetrics.incrAppsFailedSubmitted();
String msg = String.format("Application %s with appId %s failed to be submitted.",
request.getApplicationSubmissionContext().getApplicationName(), applicationId);
RouterAuditLogger.logFailure(user.getShortUserName(), SUBMIT_NEW_APP, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg, applicationId);
throw new YarnException(msg);
}
|
@Test
public void testSubmitApplication()
throws YarnException, IOException {
LOG.info("Test FederationClientInterceptor: Submit Application.");
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
SubmitApplicationRequest request = mockSubmitApplicationRequest(appId);
SubmitApplicationResponse response = interceptor.submitApplication(request);
Assert.assertNotNull(response);
SubClusterId scIdResult = stateStoreUtil.queryApplicationHomeSC(appId);
Assert.assertNotNull(scIdResult);
Assert.assertTrue(subClusters.contains(scIdResult));
}
|
public static String followGoogleRedirectIfNeeded(final String url) {
// If the url is a redirect from a Google search, extract the actual URL
try {
final URL decoded = stringToURL(url);
if (decoded.getHost().contains("google") && decoded.getPath().equals("/url")) {
return decodeUrlUtf8(Parser.matchGroup1("&url=([^&]+)(?:&|$)", url));
}
} catch (final Exception ignored) {
}
// URL is not a Google search redirect
return url;
}
|
@Test
void testFollowGoogleRedirect() {
assertEquals("https://www.youtube.com/watch?v=Hu80uDzh8RY",
Utils.followGoogleRedirectIfNeeded("https://www.google.it/url?sa=t&rct=j&q=&esrc=s&cd=&cad=rja&uact=8&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DHu80uDzh8RY&source=video"));
assertEquals("https://www.youtube.com/watch?v=0b6cFWG45kA",
Utils.followGoogleRedirectIfNeeded("https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=video&cd=&cad=rja&uact=8&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3D0b6cFWG45kA"));
assertEquals("https://soundcloud.com/ciaoproduction",
Utils.followGoogleRedirectIfNeeded("https://www.google.com/url?sa=t&url=https%3A%2F%2Fsoundcloud.com%2Fciaoproduction&rct=j&q=&esrc=s&source=web&cd="));
assertEquals("https://www.youtube.com/watch?v=Hu80uDzh8RY¶m=xyz",
Utils.followGoogleRedirectIfNeeded("https://www.youtube.com/watch?v=Hu80uDzh8RY¶m=xyz"));
assertEquals("https://www.youtube.com/watch?v=Hu80uDzh8RY&url=hello",
Utils.followGoogleRedirectIfNeeded("https://www.youtube.com/watch?v=Hu80uDzh8RY&url=hello"));
}
|
public static int getIdleTimeout(URL url) {
int heartBeat = getHeartbeat(url);
// idleTimeout should be at least more than twice heartBeat because possible retries of client.
int idleTimeout = url.getParameter(Constants.HEARTBEAT_TIMEOUT_KEY, heartBeat * 3);
if (idleTimeout < heartBeat * 2) {
throw new IllegalStateException("idleTimeout < heartbeatInterval * 2");
}
return idleTimeout;
}
|
@Test
void testGetIdleTimeout() {
URL url1 = URL.valueOf("dubbo://127.0.0.1:12345?heartbeat=10000");
URL url2 = URL.valueOf("dubbo://127.0.0.1:12345?heartbeat=10000&heartbeat.timeout=50000");
URL url3 = URL.valueOf("dubbo://127.0.0.1:12345?heartbeat=10000&heartbeat.timeout=10000");
Assertions.assertEquals(UrlUtils.getIdleTimeout(url1), 30000);
Assertions.assertEquals(UrlUtils.getIdleTimeout(url2), 50000);
Assertions.assertThrows(RuntimeException.class, () -> UrlUtils.getIdleTimeout(url3));
}
|
@Override
public void setConf(Configuration conf) {
if (conf != null) {
conf = addSecurityConfiguration(conf);
}
super.setConf(conf);
}
|
@Test
public void testFailoverWithInvalidFenceArg() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "notforcefence"));
}
|
@Override
public ProtobufSystemInfo.Section toProtobuf() {
return toProtobuf(ManagementFactory.getMemoryMXBean());
}
|
@Test
public void toSystemInfoSection() {
JvmStateSection underTest = new JvmStateSection(PROCESS_NAME);
ProtobufSystemInfo.Section section = underTest.toProtobuf();
assertThat(section.getName()).isEqualTo(PROCESS_NAME);
assertThat(section.getAttributesCount()).isPositive();
assertThat(section.getAttributesList()).extracting("key")
.contains(
"Max Memory (MB)", "Free Memory (MB)",
"Heap Max (MB)",
"System Load Average", "Threads");
}
|
List<Condition> run(boolean useKRaft) {
List<Condition> warnings = new ArrayList<>();
checkKafkaReplicationConfig(warnings);
checkKafkaBrokersStorage(warnings);
if (useKRaft) {
// Additional checks done for KRaft clusters
checkKRaftControllerStorage(warnings);
checkKRaftControllerCount(warnings);
checkKafkaMetadataVersion(warnings);
checkInterBrokerProtocolVersionInKRaft(warnings);
checkLogMessageFormatVersionInKRaft(warnings);
} else {
// Additional checks done for ZooKeeper-based clusters
checkKafkaLogMessageFormatVersion(warnings);
checkKafkaInterBrokerProtocolVersion(warnings);
checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings);
}
return warnings;
}
|
@Test
public void testMetadataVersionMatchesKafkaVersionWithLongVersion() {
Kafka kafka = new KafkaBuilder(KAFKA)
.editSpec()
.editKafka()
.withVersion(KafkaVersionTestUtils.LATEST_KAFKA_VERSION)
.withMetadataVersion(KafkaVersionTestUtils.LATEST_METADATA_VERSION + "-IV0")
.endKafka()
.endSpec()
.build();
KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, KafkaVersionTestUtils.LATEST_METADATA_VERSION));
List<Condition> warnings = checker.run(true);
assertThat(warnings, hasSize(0));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.