focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public JdbcUrl parse(final String jdbcUrl) {
Matcher matcher = CONNECTION_URL_PATTERN.matcher(jdbcUrl);
ShardingSpherePreconditions.checkState(matcher.matches(), () -> new UnrecognizedDatabaseURLException(jdbcUrl, CONNECTION_URL_PATTERN.pattern().replaceAll("%", "%%")));
String authority = matcher.group(AUTHORITY_GROUP_KEY);
ShardingSpherePreconditions.checkNotNull(authority, () -> new UnrecognizedDatabaseURLException(jdbcUrl, CONNECTION_URL_PATTERN.pattern().replaceAll("%", "%%")));
return new JdbcUrl(parseHostname(authority), parsePort(authority), matcher.group(PATH_GROUP_KEY), parseQueryProperties(matcher.group(QUERY_GROUP_KEY)));
}
|
@Test
void assertParseMySQLJdbcUrl() {
JdbcUrl actual = new StandardJdbcUrlParser().parse("jdbc:mysql://127.0.0.1:3306/demo_ds?useSSL=false&sessionVariables=group_concat_max_len=204800,SQL_SAFE_UPDATES=0");
assertThat(actual.getHostname(), is("127.0.0.1"));
assertThat(actual.getPort(), is(3306));
assertThat(actual.getDatabase(), is("demo_ds"));
assertThat(actual.getQueryProperties().size(), is(2));
assertThat(actual.getQueryProperties().getProperty("useSSL"), is(Boolean.FALSE.toString()));
assertThat(actual.getQueryProperties().getProperty("sessionVariables"), is("group_concat_max_len=204800,SQL_SAFE_UPDATES=0"));
}
|
@Override
public double quantile(double p) {
if (p < 0.0 || p > 1.0) {
throw new IllegalArgumentException("Invalid p: " + p);
}
if (p == 0.0) {
return 0;
}
if (p == 1.0) {
return n;
}
// Starting guess near peak of density.
// Expand interval until we bracket.
int kl, ku, inc = 1;
int k = Math.max(0, Math.min(n, (int) (n * p)));
if (p < cdf(k)) {
do {
k = Math.max(k - inc, 0);
inc *= 2;
} while (p < cdf(k) && k > 0);
kl = k;
ku = k + inc / 2;
} else {
do {
k = Math.min(k + inc, n + 1);
inc *= 2;
} while (p > cdf(k));
ku = k;
kl = k - inc / 2;
}
return quantile(p, kl, ku);
}
|
@Test
public void testQuantile() {
System.out.println("quantile");
BinomialDistribution instance = new BinomialDistribution(100, 0.3);
instance.rand();
assertEquals(0, instance.quantile(0), 1E-7);
assertEquals(0, instance.quantile(0.00000000000000001), 1E-7);
assertEquals(17, instance.quantile(0.001), 1E-7);
assertEquals(20, instance.quantile(0.01), 1E-7);
assertEquals(24, instance.quantile(0.1), 1E-7);
assertEquals(26, instance.quantile(0.2), 1E-7);
assertEquals(30, instance.quantile(0.5), 1E-7);
assertEquals(36, instance.quantile(0.9), 1E-7);
assertEquals(100, instance.quantile(1.0), 1E-7);
}
|
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
if (tradingRecord != null && !tradingRecord.isClosed()) {
Num entryPrice = tradingRecord.getCurrentPosition().getEntry().getNetPrice();
Num currentPrice = this.referencePrice.getValue(index);
Num threshold = this.stopLossThreshold.getValue(index);
if (tradingRecord.getCurrentPosition().getEntry().isBuy()) {
return currentPrice.isLessThan(entryPrice.minus(threshold));
} else {
return currentPrice.isGreaterThan(entryPrice.plus(threshold));
}
}
return false;
}
|
@Test
public void testNoStopLoss() {
ZonedDateTime initialEndDateTime = ZonedDateTime.now();
for (int i = 0; i < 10; i++) {
series.addBar(initialEndDateTime.plusDays(i), 100, 105, 95, 100);
}
AverageTrueRangeStopLossRule rule = new AverageTrueRangeStopLossRule(series, 5, 2);
// Enter long position
TradingRecord tradingRecord = new BaseTradingRecord();
tradingRecord.enter(0, series.numOf(100), series.numOf(1));
// Price stays within stop loss range
series.addBar(series.getLastBar().getEndTime().plusDays(1), 98, 102, 97, 98);
assertFalse(rule.isSatisfied(10, tradingRecord));
}
|
public PublisherAgreement signPublisherAgreement(UserData user) {
checkApiUrl();
var eclipseToken = checkEclipseToken(user);
var headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
headers.setBearerAuth(eclipseToken.accessToken);
headers.setAccept(Arrays.asList(MediaType.APPLICATION_JSON));
var data = new SignAgreementParam(publisherAgreementVersion, user.getLoginName());
var request = new HttpEntity<>(data, headers);
var requestUrl = UrlUtil.createApiUrl(eclipseApiUrl, "openvsx", "publisher_agreement");
try {
var json = restTemplate.postForEntity(requestUrl, request, String.class);
// The request was successful: reactivate all previously published extensions
extensions.reactivateExtensions(user);
// Parse the response and store the publisher agreement metadata
return parseAgreementResponse(json);
} catch (RestClientException exc) {
String message = exc.getMessage();
var statusCode = HttpStatus.INTERNAL_SERVER_ERROR;
if (exc instanceof HttpStatusCodeException) {
var excStatus = ((HttpStatusCodeException) exc).getStatusCode();
// The endpoint yields 409 if the specified user has already signed a publisher agreement
if (excStatus == HttpStatus.CONFLICT) {
message = "A publisher agreement is already present for user " + user.getLoginName() + ".";
statusCode = HttpStatus.BAD_REQUEST;
} else if (excStatus == HttpStatus.BAD_REQUEST) {
var matcher = STATUS_400_MESSAGE.matcher(exc.getMessage());
if (matcher.matches()) {
message = matcher.group("message");
}
}
}
if (statusCode == HttpStatus.INTERNAL_SERVER_ERROR) {
message = "Request for signing publisher agreement failed: " + message;
}
String payload;
try {
payload = objectMapper.writeValueAsString(data);
} catch (JsonProcessingException exc2) {
payload = "<" + exc2.getMessage() + ">";
}
logger.error("Post request failed with URL: " + requestUrl + " Payload: " + payload, exc);
throw new ErrorResultException(message, statusCode);
}
}
|
@Test
public void testPublisherAgreementAlreadySigned() throws Exception {
var user = mockUser();
Mockito.when(restTemplate.postForEntity(any(String.class), any(), eq(String.class)))
.thenThrow(new HttpClientErrorException(HttpStatus.CONFLICT));
try {
eclipse.signPublisherAgreement(user);
fail("Expected an ErrorResultException");
} catch (ErrorResultException exc) {
assertThat(exc.getMessage()).isEqualTo("A publisher agreement is already present for user test.");
}
}
|
public boolean intersectsCircle(double pointX, double pointY, double radius) {
double halfWidth = getWidth() / 2;
double halfHeight = getHeight() / 2;
double centerDistanceX = Math.abs(pointX - getCenterX());
double centerDistanceY = Math.abs(pointY - getCenterY());
// is the circle is far enough away from the rectangle?
if (centerDistanceX > halfWidth + radius) {
return false;
} else if (centerDistanceY > halfHeight + radius) {
return false;
}
// is the circle close enough to the rectangle?
if (centerDistanceX <= halfWidth) {
return true;
} else if (centerDistanceY <= halfHeight) {
return true;
}
double cornerDistanceX = centerDistanceX - halfWidth;
double cornerDistanceY = centerDistanceY - halfHeight;
return cornerDistanceX * cornerDistanceX + cornerDistanceY * cornerDistanceY <= radius * radius;
}
|
@Test
public void intersectsCircleTest() {
Rectangle rectangle1 = create(1, 2, 3, 4);
Assert.assertTrue(rectangle1.intersectsCircle(1, 2, 0));
Assert.assertTrue(rectangle1.intersectsCircle(1, 2, 1));
Assert.assertTrue(rectangle1.intersectsCircle(1, 2, 10));
Assert.assertTrue(rectangle1.intersectsCircle(2, 3, 0));
Assert.assertTrue(rectangle1.intersectsCircle(2, 3, 1));
Assert.assertTrue(rectangle1.intersectsCircle(2, 3, 10));
Assert.assertTrue(rectangle1.intersectsCircle(3.5, 4, 0.5));
Assert.assertTrue(rectangle1.intersectsCircle(3, 4.5, 0.5));
Assert.assertTrue(rectangle1.intersectsCircle(4, 4, 1));
Assert.assertTrue(rectangle1.intersectsCircle(4, 4, 10));
Assert.assertFalse(rectangle1.intersectsCircle(0, 0, 0));
Assert.assertFalse(rectangle1.intersectsCircle(0, 1, 0));
Assert.assertFalse(rectangle1.intersectsCircle(0, 1, 1));
Assert.assertFalse(rectangle1.intersectsCircle(3.5, 4, 0.49999));
Assert.assertFalse(rectangle1.intersectsCircle(3, 4.5, 0.49999));
Assert.assertFalse(rectangle1.intersectsCircle(4, 5, 1));
Assert.assertFalse(rectangle1.intersectsCircle(4, 5, 1.4));
}
|
@Override
public void dumpAsCsv(ScoreMatrix scoreMatrix) {
if (configuration.getBoolean("sonar.filemove.dumpCsv").orElse(false)) {
try {
Path tempFile = fs.getTempDir().toPath()
.resolve(String.format("score-matrix-%s.csv", ceTask.getUuid()));
try (BufferedWriter writer = Files.newBufferedWriter(tempFile, UTF_8)) {
writer.write(scoreMatrix.toCsv(';'));
}
LOG.info("File move similarity score matrix dumped as CSV: {}", tempFile);
} catch (IOException e) {
LOG.error("Failed to dump ScoreMatrix as CSV", e);
}
}
}
|
@Test
public void dumpAsCsv_creates_csv_dump_of_score_matrix_if_property_is_true() throws IOException {
String taskUuid = "acme";
when(ceTask.getUuid()).thenReturn(taskUuid);
settings.setProperty("sonar.filemove.dumpCsv", "true");
underTest.dumpAsCsv(A_SCORE_MATRIX);
Collection<File> files = listDumpFilesForTaskUuid(taskUuid);
assertThat(files).hasSize(1);
assertThat(files.iterator().next()).hasContent(A_SCORE_MATRIX.toCsv(';'));
}
|
@Override
public String toString() {
return "NodeDetails{" +
"type=" + type +
", name='" + name + '\'' +
", host='" + host + '\'' +
", port=" + port +
", startedAt=" + startedAt +
'}';
}
|
@Test
public void verify_toString() {
String name = randomAlphanumeric(3);
String host = randomAlphanumeric(10);
int port = 1 + random.nextInt(10);
long startedAt = 1 + random.nextInt(666);
NodeDetails underTest = builderUnderTest
.setType(randomType)
.setName(name)
.setHost(host)
.setPort(port)
.setStartedAt(startedAt)
.build();
assertThat(underTest.toString())
.isEqualTo("NodeDetails{type=" + randomType + ", name='" + name + "', host='" + host + "', port=" + port + ", startedAt=" + startedAt + "}");
}
|
public void finishCreateReplicaTask(CreateReplicaTask task, TFinishTaskRequest request) {
long tabletId = task.getTabletId();
TabletSchedCtx tabletCtx = takeRunningTablets(tabletId);
if (tabletCtx == null) {
LOG.warn("tablet info does not exist, tablet:{} backend:{}", tabletId, task.getBackendId());
return;
}
// check if clone task success
if (request.getTask_status().getStatus_code() != TStatusCode.OK) {
LOG.warn("create replica task failed: {}", request.getTask_status().getError_msgs().get(0));
finalizeTabletCtx(tabletCtx, TabletSchedCtx.State.CANCELLED, request.getTask_status().getError_msgs().get(0));
return;
}
Replica replica = tabletCtx.getTablet().getReplicaByBackendId(task.getBackendId());
if (replica == null) {
LOG.warn("replica dose not exist, tablet:{} backend:{}", tabletId, task.getBackendId());
finalizeTabletCtx(tabletCtx, TabletSchedCtx.State.CANCELLED, "replica not exist");
return;
}
// write edit log
replica.setState(ReplicaState.NORMAL);
TabletMeta meta = GlobalStateMgr.getCurrentState().getTabletInvertedIndex().getTabletMeta(tabletId);
ReplicaPersistInfo info = ReplicaPersistInfo.createForAdd(meta.getDbId(),
meta.getTableId(), meta.getPhysicalPartitionId(), meta.getIndexId(),
tabletId, replica.getBackendId(), replica.getId(), replica.getVersion(),
replica.getSchemaHash(), replica.getDataSize(), replica.getRowCount(),
replica.getLastFailedVersion(), replica.getLastSuccessVersion(),
replica.getMinReadableVersion());
GlobalStateMgr.getCurrentState().getEditLog().logAddReplica(info);
finalizeTabletCtx(tabletCtx, TabletSchedCtx.State.FINISHED, "create replica finished");
LOG.info("create replica for recovery successfully, tablet:{} backend:{}", tabletId, task.getBackendId());
}
|
@Test
public void testFinishCreateReplicaTask() {
long beId = 10001L;
long dbId = 10002L;
long tblId = 10003L;
long partitionId = 10004L;
long indexId = 10005L;
long tabletId = 10006L;
long replicaId = 10007L;
long schemaId = indexId;
TTabletSchema tabletSchema = SchemaInfo.newBuilder().setId(schemaId)
.setKeysType(DUP_KEYS)
.setShortKeyColumnCount((short) 1)
.setSchemaHash(-1)
.setStorageType(TStorageType.COLUMN)
.addColumn(new Column("k1", Type.INT))
.build().toTabletSchema();
CreateReplicaTask createReplicaTask = CreateReplicaTask.newBuilder()
.setNodeId(beId)
.setDbId(dbId)
.setTableId(tblId)
.setPartitionId(partitionId)
.setIndexId(indexId)
.setVersion(1)
.setTabletId(tabletId)
.setStorageMedium(TStorageMedium.HDD)
.setPrimaryIndexCacheExpireSec(1)
.setTabletType(TTabletType.TABLET_TYPE_DISK)
.setCompressionType(TCompressionType.LZ4_FRAME)
.setTabletSchema(tabletSchema)
.build();
TabletMeta tabletMeta = new TabletMeta(dbId, tblId, partitionId, indexId, -1, TStorageMedium.HDD);
Replica replica = new Replica(replicaId, beId, -1, Replica.ReplicaState.RECOVER);
tabletInvertedIndex.addTablet(tabletId, tabletMeta);
tabletInvertedIndex.addReplica(tabletId, replica);
TabletSchedCtx ctx = new TabletSchedCtx(TabletSchedCtx.Type.REPAIR,
dbId, tblId, partitionId, indexId, tabletId, System.currentTimeMillis());
LocalTablet tablet = new LocalTablet(tabletId);
tablet.addReplica(replica);
ctx.setTablet(tablet);
TabletScheduler tabletScheduler = new TabletScheduler(new TabletSchedulerStat());
TFinishTaskRequest request = new TFinishTaskRequest();
TStatus status = new TStatus();
status.setStatus_code(TStatusCode.OK);
request.setTask_status(status);
// failure test: running tablet ctx is not exist
tabletScheduler.finishCreateReplicaTask(createReplicaTask, request);
Assert.assertEquals(Replica.ReplicaState.RECOVER, replica.getState());
// failure test: request not ok
tabletScheduler.addToRunningTablets(ctx);
status.setStatus_code(TStatusCode.CANCELLED);
status.setError_msgs(Lists.newArrayList("canceled"));
tabletScheduler.finishCreateReplicaTask(createReplicaTask, request);
Assert.assertEquals(Replica.ReplicaState.RECOVER, replica.getState());
// success
tabletScheduler.addToRunningTablets(ctx);
status.setStatus_code(TStatusCode.OK);
tabletScheduler.finishCreateReplicaTask(createReplicaTask, request);
Assert.assertEquals(Replica.ReplicaState.NORMAL, replica.getState());
}
|
public ConfigOperateResult insertOrUpdate(String srcIp, String srcUser, ConfigInfo configInfo,
Map<String, Object> configAdvanceInfo) {
try {
ConfigInfoStateWrapper configInfoState = findConfigInfoState(configInfo.getDataId(), configInfo.getGroup(),
configInfo.getTenant());
if (configInfoState == null) {
return addConfigInfo(srcIp, srcUser, configInfo, configAdvanceInfo);
} else {
return updateConfigInfo(configInfo, srcIp, srcUser, configAdvanceInfo);
}
} catch (Exception exception) {
LogUtil.FATAL_LOG.error("[db-error] try to update or add config failed, {}", exception.getMessage(),
exception);
throw exception;
}
}
|
@Test
void testInsertOrUpdateOfException() {
String dataId = "dataId";
String group = "group";
String tenant = "tenant";
//mock get config state
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}),
eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(null);
//mock insert config throw exception
long insertConfigIndoId = 12345678765L;
GeneratedKeyHolder generatedKeyHolder = TestCaseUtils.createGeneratedKeyHolder(insertConfigIndoId);
externalStorageUtilsMockedStatic.when(ExternalStorageUtils::createKeyHolder).thenReturn(generatedKeyHolder);
Mockito.when(jdbcTemplate.update(any(PreparedStatementCreator.class), any(KeyHolder.class)))
.thenThrow(new CannotGetJdbcConnectionException("mock fail"));
Map<String, Object> configAdvanceInfo = new HashMap<>();
configAdvanceInfo.put("config_tags", "tag1,tag2");
ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, null, "content");
try {
externalConfigInfoPersistService.insertOrUpdate("srcIp", "srcUser", configInfo, configAdvanceInfo);
assertTrue(false);
} catch (Exception e) {
assertEquals("mock fail", e.getMessage());
}
}
|
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
if (!(request instanceof HttpServletRequest) || !(response instanceof HttpServletResponse)
|| monitoringDisabled || !instanceEnabled) {
// si ce n'est pas une requête http ou si le monitoring est désactivé, on fait suivre
chain.doFilter(request, response);
return;
}
final HttpServletRequest httpRequest = (HttpServletRequest) request;
final HttpServletResponse httpResponse = (HttpServletResponse) response;
if (httpRequest.getRequestURI().equals(getMonitoringUrl(httpRequest))) {
doMonitoring(httpRequest, httpResponse);
return;
}
if (!httpCounter.isDisplayed() || isRequestExcluded((HttpServletRequest) request)) {
// si cette url est exclue ou si le counter http est désactivé, on ne monitore pas cette requête http
chain.doFilter(request, response);
return;
}
doFilter(chain, httpRequest, httpResponse);
}
|
@Test
public void testDoFilterNoHttp() throws ServletException, IOException {
final FilterChain servletChain = createNiceMock(FilterChain.class);
final ServletRequest servletRequest = createNiceMock(ServletRequest.class);
final ServletResponse servletResponse = createNiceMock(ServletResponse.class);
replay(servletRequest);
replay(servletResponse);
replay(servletChain);
monitoringFilter.doFilter(servletRequest, servletResponse, servletChain);
verify(servletRequest);
verify(servletResponse);
verify(servletChain);
final FilterChain servletChain2 = createNiceMock(FilterChain.class);
final HttpServletRequest servletRequest2 = createNiceMock(HttpServletRequest.class);
final ServletResponse servletResponse2 = createNiceMock(ServletResponse.class);
replay(servletRequest2);
replay(servletResponse2);
replay(servletChain2);
monitoringFilter.doFilter(servletRequest2, servletResponse2, servletChain2);
verify(servletRequest2);
verify(servletResponse2);
verify(servletChain2);
}
|
public AmazonInfo build() {
return new AmazonInfo(Name.Amazon.name(), metadata);
}
|
@Test
public void payloadWithClassAfterMetadata() throws IOException {
String json = "{"
+ " \"metadata\": {"
+ " \"instance-id\": \"i-12345\""
+ " },"
+ " \"@class\": \"com.netflix.appinfo.AmazonInfo\""
+ "}";
AmazonInfo info = newMapper().readValue(json, AmazonInfo.class);
AmazonInfo expected = AmazonInfo.Builder.newBuilder()
.addMetadata(AmazonInfo.MetaDataKey.instanceId, "i-12345")
.build();
Assert.assertEquals(expected, nonCompact(info));
}
|
public Map<String, BitSet> findMatchingRecords(String fieldName, String fieldValue) {
Map<String, BitSet> matches = new HashMap<String, BitSet>();
for(HollowTypeReadState typeState : readEngine.getTypeStates()) {
augmentMatchingRecords(typeState, fieldName, fieldValue, matches);
}
return matches;
}
|
@Test
public void matchesRecordsOfAnyType() {
HollowFieldMatchQuery query = new HollowFieldMatchQuery(stateEngine);
Map<String, BitSet> matches = query.findMatchingRecords("id", "2");
Assert.assertEquals(2, matches.size());
Assert.assertEquals(1, matches.get("TypeA").cardinality());
Assert.assertTrue(matches.get("TypeA").get(1));
Assert.assertEquals(1, matches.get("TypeB").cardinality());
Assert.assertTrue(matches.get("TypeB").get(1));
}
|
public Optional<BigDecimal> convertToUsd(final BigDecimal amount, final String currency) {
if ("USD".equalsIgnoreCase(currency)) {
return Optional.of(amount);
}
return Optional.ofNullable(cachedFixerValues.get(currency.toUpperCase(Locale.ROOT)))
.map(conversionRate -> amount.divide(conversionRate, 2, RoundingMode.HALF_EVEN));
}
|
@Test
void convertToUsd() {
final CurrencyConversionManager currencyConversionManager = new CurrencyConversionManager(mock(FixerClient.class),
mock(CoinMarketCapClient.class),
mock(FaultTolerantRedisCluster.class),
Collections.emptyList(),
EXECUTOR,
Clock.systemUTC());
currencyConversionManager.setCachedFixerValues(Map.of("JPY", BigDecimal.valueOf(154.757008), "GBP", BigDecimal.valueOf(0.81196)));
assertEquals(Optional.of(new BigDecimal("17.50")),
currencyConversionManager.convertToUsd(new BigDecimal("17.50"), "USD"));
assertEquals(Optional.of(new BigDecimal("17.50")),
currencyConversionManager.convertToUsd(new BigDecimal("17.50"), "usd"));
assertEquals(Optional.empty(),
currencyConversionManager.convertToUsd(new BigDecimal("10.00"), "XYZ"));
assertEquals(Optional.of(new BigDecimal("12.92")),
currencyConversionManager.convertToUsd(new BigDecimal("2000"), "JPY"));
assertEquals(Optional.of(new BigDecimal("12.32")),
currencyConversionManager.convertToUsd(new BigDecimal("10"), "GBP"));
}
|
@Override
public void run(DiagnosticsLogWriter writer) {
for (; ; ) {
Object item = logQueue.poll();
if (item == null) {
return;
}
if (item instanceof LifecycleEvent event) {
render(writer, event);
} else if (item instanceof MembershipEvent event) {
render(writer, event);
} else if (item instanceof MigrationState state) {
render(writer, state);
} else if (item instanceof ReplicaMigrationEvent event) {
render(writer, event);
} else if (item instanceof ConnectionEvent event) {
render(writer, event);
} else if (item instanceof Version version) {
render(writer, version);
}
}
}
|
@Test
public void testMembership() {
HazelcastInstance instance = hzFactory.newHazelcastInstance(config);
assertTrueEventually(() -> {
plugin.run(logWriter);
assertContains("MemberAdded[");
});
instance.shutdown();
assertTrueEventually(() -> {
plugin.run(logWriter);
assertContains("MemberRemoved[");
});
}
|
public String transform() throws ScanException {
StringBuilder stringBuilder = new StringBuilder();
compileNode(node, stringBuilder, new Stack<Node>());
return stringBuilder.toString();
}
|
@Test
public void LOGBACK744_withColon() throws ScanException {
String input = "%d{HH:mm:ss.SSS} host:${host} %logger{36} - %msg%n";
Node node = makeNode(input);
NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0);
System.out.println(nodeToStringTransformer.transform());
assertEquals("%d{HH:mm:ss.SSS} host:local %logger{36} - %msg%n", nodeToStringTransformer.transform());
}
|
@Override
public void validateSmsCode(SmsCodeValidateReqDTO reqDTO) {
validateSmsCode0(reqDTO.getMobile(), reqDTO.getCode(), reqDTO.getScene());
}
|
@Test
public void validateSmsCode_used() {
// 准备参数
SmsCodeValidateReqDTO reqDTO = randomPojo(SmsCodeValidateReqDTO.class, o -> {
o.setMobile("15601691300");
o.setScene(randomEle(SmsSceneEnum.values()).getScene());
});
// mock 数据
SqlConstants.init(DbType.MYSQL);
smsCodeMapper.insert(randomPojo(SmsCodeDO.class, o -> o.setMobile(reqDTO.getMobile())
.setScene(reqDTO.getScene()).setCode(reqDTO.getCode()).setUsed(true)
.setCreateTime(LocalDateTime.now())));
// 调用,并断言异常
assertServiceException(() -> smsCodeService.validateSmsCode(reqDTO),
SMS_CODE_USED);
}
|
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
}
|
@Test
public void shouldFormatSelectQueryCorrectly() {
final String statementString =
"CREATE STREAM S AS SELECT a.address->city FROM address a;";
final Statement statement = parseSingle(statementString);
assertThat(SqlFormatter.formatSql(statement), equalTo("CREATE STREAM S AS SELECT A.ADDRESS->CITY\n"
+ "FROM ADDRESS A\nEMIT CHANGES"));
}
|
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
super.userEventTriggered(ctx, evt);
if (evt instanceof Http2GoAwayFrame) {
Http2GoAwayFrame event = (Http2GoAwayFrame) evt;
ctx.close();
LOGGER.debug(
"Event triggered, event name is: " + event.name() + ", last stream id is: " + event.lastStreamId());
} else if (evt instanceof Http2ResetFrame) {
onResetRead(ctx, (Http2ResetFrame) evt);
}
}
|
@Test
void testUserEventTriggered() throws Exception {
// test Http2GoAwayFrame
Http2GoAwayFrame goAwayFrame = new DefaultHttp2GoAwayFrame(
Http2Error.NO_ERROR, ByteBufUtil.writeAscii(ByteBufAllocator.DEFAULT, "app_requested"));
handler.userEventTriggered(ctx, goAwayFrame);
Mockito.verify(ctx, Mockito.times(1)).close();
// test Http2ResetFrame
DefaultHttp2ResetFrame resetFrame = new DefaultHttp2ResetFrame(Http2Error.CANCEL);
handler.userEventTriggered(ctx, resetFrame);
Mockito.verify(ctx, Mockito.times(2)).close();
}
|
public boolean eval(StructLike data) {
return new EvalVisitor().eval(data);
}
|
@Test
public void testNotIn() {
assertThat(notIn("s", 7, 8, 9).literals()).hasSize(3);
assertThat(notIn("s", 7, 8.1, Long.MAX_VALUE).literals()).hasSize(3);
assertThat(notIn("s", "abc", "abd", "abc").literals()).hasSize(3);
assertThat(notIn("s").literals()).isEmpty();
assertThat(notIn("s", 5).literals()).hasSize(1);
assertThat(notIn("s", 5, 5).literals()).hasSize(2);
assertThat(notIn("s", Arrays.asList(5, 5)).literals()).hasSize(2);
assertThat(notIn("s", Collections.emptyList()).literals()).isEmpty();
Evaluator evaluator = new Evaluator(STRUCT, notIn("x", 7, 8, Long.MAX_VALUE));
assertThat(evaluator.eval(TestHelpers.Row.of(7, 8, null)))
.as("7 not in [7, 8] => false")
.isFalse();
assertThat(evaluator.eval(TestHelpers.Row.of(9, 8, null)))
.as("6 not in [7, 8] => true")
.isTrue();
Evaluator intSetEvaluator =
new Evaluator(STRUCT, notIn("x", Long.MAX_VALUE, Integer.MAX_VALUE, Long.MIN_VALUE));
assertThat(intSetEvaluator.eval(TestHelpers.Row.of(Integer.MAX_VALUE, 7.0, null)))
.as("Integer.MAX_VALUE not_in [Integer.MAX_VALUE] => false")
.isFalse();
assertThat(intSetEvaluator.eval(TestHelpers.Row.of(6, 6.8, null)))
.as("6 not_in [Integer.MAX_VALUE] => true")
.isTrue();
Evaluator integerEvaluator = new Evaluator(STRUCT, notIn("y", 7, 8, 9.1));
assertThat(integerEvaluator.eval(TestHelpers.Row.of(0, 7.0, null)))
.as("7.0 not in [7, 8, 9] => false")
.isFalse();
assertThat(integerEvaluator.eval(TestHelpers.Row.of(7, 9.1, null)))
.as("9.1 not in [7, 8, 9.1] => false")
.isFalse();
assertThat(integerEvaluator.eval(TestHelpers.Row.of(6, 6.8, null)))
.as("6.8 not in [7, 8, 9.1] => true")
.isTrue();
Evaluator structEvaluator = new Evaluator(STRUCT, notIn("s1.s2.s3.s4.i", 7, 8, 9));
assertThat(
structEvaluator.eval(
TestHelpers.Row.of(
7,
8,
null,
TestHelpers.Row.of(
TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(7)))))))
.as("7 not in [7, 8, 9] => false")
.isFalse();
assertThat(
structEvaluator.eval(
TestHelpers.Row.of(
6,
8,
null,
TestHelpers.Row.of(
TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(6)))))))
.as("6 not in [7, 8, 9] => true")
.isTrue();
StructType charSeqStruct = StructType.of(required(34, "s", Types.StringType.get()));
Evaluator charSeqEvaluator = new Evaluator(charSeqStruct, notIn("s", "abc", "abd", "abc"));
assertThat(charSeqEvaluator.eval(TestHelpers.Row.of(new Utf8("abc"))))
.as("utf8(abc) not in [string(abc), string(abd)] => false")
.isFalse();
assertThat(charSeqEvaluator.eval(TestHelpers.Row.of(new Utf8("abcd"))))
.as("utf8(abcd) not in [string(abc), string(abd)] => true")
.isTrue();
}
|
@SuppressJava6Requirement(reason = "Guarded with java version check")
static String base64(byte[] data) {
if (PlatformDependent.javaVersion() >= 8) {
return java.util.Base64.getEncoder().encodeToString(data);
}
String encodedString;
ByteBuf encodedData = Unpooled.wrappedBuffer(data);
try {
ByteBuf encoded = Base64.encode(encodedData);
try {
encodedString = encoded.toString(CharsetUtil.UTF_8);
} finally {
encoded.release();
}
} finally {
encodedData.release();
}
return encodedString;
}
|
@Test
public void testBase64() {
String base64 = WebSocketUtil.base64(EmptyArrays.EMPTY_BYTES);
assertNotNull(base64);
assertTrue(base64.isEmpty());
base64 = WebSocketUtil.base64("foo".getBytes(CharsetUtil.UTF_8));
assertEquals(base64, "Zm9v");
base64 = WebSocketUtil.base64("bar".getBytes(CharsetUtil.UTF_8));
ByteBuf src = Unpooled.wrappedBuffer(base64.getBytes(CharsetUtil.UTF_8));
try {
ByteBuf dst = Base64.decode(src);
try {
assertEquals(new String(ByteBufUtil.getBytes(dst), CharsetUtil.UTF_8), "bar");
} finally {
dst.release();
}
} finally {
src.release();
}
}
|
@Override
public void commitSync() {
commitSync(Duration.ofMillis(defaultApiTimeoutMs));
}
|
@Test
public void testCommitSyncAwaitsCommitAsyncButDoesNotFail() {
final TopicPartition tp = new TopicPartition("foo", 0);
final CompletableFuture<Void> asyncCommitFuture = setUpConsumerWithIncompleteAsyncCommit(tp);
// Mock to complete sync event
completeCommitSyncApplicationEventSuccessfully();
// Commit async is not completed yet, so commit sync should wait for it to complete (time out)
assertThrows(TimeoutException.class, () -> consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(20)), Duration.ofMillis(100)));
// Complete exceptionally async commit event
asyncCommitFuture.completeExceptionally(new KafkaException("Test exception"));
// Commit async is completed exceptionally, but this will be handled by commit callback - commit sync should not fail.
assertDoesNotThrow(() -> consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(20)), Duration.ofMillis(100)));
}
|
public static Search<?> searchResourcesWithGenericParameters(String fhirStore) {
return new Search<>(fhirStore);
}
|
@Test
public void test_FhirIO_failedSearchesWithGenericParameters() {
FhirSearchParameter<List<String>> input = FhirSearchParameter.of("resource-type-1", null);
FhirIO.Search.Result searchResult =
pipeline
.apply(
Create.of(input)
.withCoder(FhirSearchParameterCoder.of(ListCoder.of(StringUtf8Coder.of()))))
.apply(
(FhirIO.Search<List<String>>)
FhirIO.searchResourcesWithGenericParameters("bad-store"));
PCollection<HealthcareIOError<String>> failed = searchResult.getFailedSearches();
PCollection<String> failedMsgIds =
failed.apply(
MapElements.into(TypeDescriptors.strings()).via(HealthcareIOError::getDataResource));
PAssert.that(failedMsgIds).containsInAnyOrder(input.toString());
PAssert.that(searchResult.getResources()).empty();
PAssert.that(searchResult.getKeyedResources()).empty();
pipeline.run();
}
|
@Override
public void validateAction( RepositoryOperation... operations ) throws KettleException {
for ( RepositoryOperation operation : operations ) {
switch ( operation ) {
case EXECUTE_TRANSFORMATION:
case EXECUTE_JOB:
checkOperationAllowed( EXECUTE_CONTENT_ACTION );
break;
case MODIFY_TRANSFORMATION:
case MODIFY_JOB:
checkOperationAllowed( CREATE_CONTENT_ACTION );
break;
case SCHEDULE_TRANSFORMATION:
case SCHEDULE_JOB:
checkOperationAllowed( SCHEDULE_CONTENT_ACTION );
break;
case MODIFY_DATABASE:
checkOperationAllowed( MODIFY_DATABASE_ACTION );
break;
case SCHEDULER_EXECUTE:
checkOperationAllowed( SCHEDULER_EXECUTE_ACTION );
break;
}
}
}
|
@Test( expected = KettleException.class )
public void exceptionThrown_WhenOperationNotAllowed_ExecuteOperation() throws Exception {
setOperationPermissions( IAbsSecurityProvider.EXECUTE_CONTENT_ACTION, false );
provider.validateAction( RepositoryOperation.EXECUTE_TRANSFORMATION );
}
|
@Override
public long sleepTime(final long attempt) {
checkArgument(attempt >= 0, "attempt must not be negative (%s)", attempt);
final long exponentialSleepTime = initialWait * Math.round(Math.pow(2, attempt));
return exponentialSleepTime >= 0 && exponentialSleepTime < maxWait
? exponentialSleepTime
: maxWait;
}
|
@Test
void testMaxSleepTime() {
final long sleepTime = new ExponentialWaitStrategy(1, 1).sleepTime(100);
assertThat(sleepTime).isEqualTo(1L);
}
|
public long computeInflightTotalDiff() {
long diffTotal = 0L;
for (Entry<MessageQueue, OffsetWrapper> entry : this.offsetTable.entrySet()) {
diffTotal += entry.getValue().getPullOffset() - entry.getValue().getConsumerOffset();
}
return diffTotal;
}
|
@Test
public void testComputeInflightTotalDiff() {
ConsumeStats stats = new ConsumeStats();
MessageQueue messageQueue = Mockito.mock(MessageQueue.class);
OffsetWrapper offsetWrapper = Mockito.mock(OffsetWrapper.class);
Mockito.when(offsetWrapper.getBrokerOffset()).thenReturn(3L);
Mockito.when(offsetWrapper.getPullOffset()).thenReturn(2L);
stats.getOffsetTable().put(messageQueue, offsetWrapper);
MessageQueue messageQueue2 = Mockito.mock(MessageQueue.class);
OffsetWrapper offsetWrapper2 = Mockito.mock(OffsetWrapper.class);
Mockito.when(offsetWrapper.getBrokerOffset()).thenReturn(3L);
Mockito.when(offsetWrapper.getPullOffset()).thenReturn(2L);
stats.getOffsetTable().put(messageQueue2, offsetWrapper2);
Assert.assertEquals(2L, stats.computeInflightTotalDiff());
}
|
@Override
public List<Object> apply(ConsumerRecord<K, V> record) {
RecordTranslator<K, V> trans = topicToTranslator.getOrDefault(record.topic(), defaultTranslator);
return trans.apply(record);
}
|
@Test
public void testNullTranslation() {
ByTopicRecordTranslator<String, String> trans =
new ByTopicRecordTranslator<>((r) -> null, new Fields("key"));
ConsumerRecord<String, String> cr = new ConsumerRecord<>("TOPIC 1", 100, 100, "THE KEY", "THE VALUE");
assertNull(trans.apply(cr));
}
|
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
for (String pool : POOLS) {
for (int i = 0; i < ATTRIBUTES.length; i++) {
final String attribute = ATTRIBUTES[i];
final String name = NAMES[i];
try {
final ObjectName on = new ObjectName("java.nio:type=BufferPool,name=" + pool);
mBeanServer.getMBeanInfo(on);
gauges.put(name(pool, name), new JmxAttributeGauge(mBeanServer, on, attribute));
} catch (JMException ignored) {
LOGGER.debug("Unable to load buffer pool MBeans, possibly running on Java 6");
}
}
}
return Collections.unmodifiableMap(gauges);
}
|
@Test
public void includesAGaugeForMappedCount() throws Exception {
final Gauge gauge = (Gauge) buffers.getMetrics().get("mapped.count");
when(mBeanServer.getAttribute(mapped, "Count")).thenReturn(100);
assertThat(gauge.getValue())
.isEqualTo(100);
}
|
@Override
public void close() {
try {
out.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
|
@Test
void close() {
out.append("Hello");
assertThat(bytes, bytes(equalTo("")));
out.close();
assertThat(bytes, bytes(equalTo("Hello")));
}
|
@Override
public ResourceUsage getMemory() {
return memory;
}
|
@Test
public void testLocalBrokerDataDeserialization() throws JsonProcessingException {
ObjectReader LOAD_REPORT_READER = ObjectMapperFactory.getMapper().reader()
.forType(LoadManagerReport.class);
String data = "{\"webServiceUrl\":\"http://10.244.2.23:8080\",\"webServiceUrlTls\":\"https://10.244.2.23:8081\",\"pulsarServiceUrlTls\":\"pulsar+ssl://10.244.2.23:6651\",\"persistentTopicsEnabled\":true,\"nonPersistentTopicsEnabled\":false,\"cpu\":{\"usage\":3.1577712104798255,\"limit\":100.0},\"memory\":{\"usage\":614.0,\"limit\":1228.0},\"directMemory\":{\"usage\":32.0,\"limit\":1228.0},\"bandwidthIn\":{\"usage\":0.0,\"limit\":0.0},\"bandwidthOut\":{\"usage\":0.0,\"limit\":0.0},\"msgThroughputIn\":0.0,\"msgThroughputOut\":0.0,\"msgRateIn\":0.0,\"msgRateOut\":0.0,\"lastUpdate\":1650886425227,\"lastStats\":{\"pulsar/pulsar/10.244.2.23:8080/0x00000000_0xffffffff\":{\"msgRateIn\":0.0,\"msgThroughputIn\":0.0,\"msgRateOut\":0.0,\"msgThroughputOut\":0.0,\"consumerCount\":0,\"producerCount\":0,\"topics\":1,\"cacheSize\":0}},\"numTopics\":1,\"numBundles\":1,\"numConsumers\":0,\"numProducers\":0,\"bundles\":[\"pulsar/pulsar/10.244.2.23:8080/0x00000000_0xffffffff\"],\"lastBundleGains\":[],\"lastBundleLosses\":[],\"brokerVersionString\":\"2.11.0-hw-0.0.4-SNAPSHOT\",\"protocols\":{},\"advertisedListeners\":{},\"bundleStats\":{\"pulsar/pulsar/10.244.2.23:8080/0x00000000_0xffffffff\":{\"msgRateIn\":0.0,\"msgThroughputIn\":0.0,\"msgRateOut\":0.0,\"msgThroughputOut\":0.0,\"consumerCount\":0,\"producerCount\":0,\"topics\":1,\"cacheSize\":0}},\"maxResourceUsage\":0.49645519256591797,\"loadReportType\":\"LocalBrokerData\"}";
LoadManagerReport localBrokerData = LOAD_REPORT_READER.readValue(data);
Assert.assertEquals(localBrokerData.getMemory().limit, 1228.0d, 0.0001f);
Assert.assertEquals(localBrokerData.getMemory().usage, 614.0d, 0.0001f);
Assert.assertEquals(localBrokerData.getMemory().percentUsage(), ((float) localBrokerData.getMemory().usage) / ((float) localBrokerData.getMemory().limit) * 100, 0.0001f);
}
|
@Override
public void execute(String commandName, BufferedReader reader, BufferedWriter writer)
throws Py4JException, IOException {
String targetObjectId = reader.readLine();
String methodName = reader.readLine();
List<Object> arguments = getArguments(reader);
ReturnObject returnObject = invokeMethod(methodName, targetObjectId, arguments);
String returnCommand = Protocol.getOutputCommand(returnObject);
logger.finest("Returning command: " + returnCommand);
writer.write(returnCommand);
writer.flush();
}
|
@Test
public void testVoidMethod() {
String inputCommand = target + "\nmethod2\nsThis is a\tString\\n\ne\n";
try {
command.execute("c", new BufferedReader(new StringReader(inputCommand)), writer);
assertEquals("!yv\n", sWriter.toString());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
|
public static KeyValueBytesStoreSupplier persistentKeyValueStore(final String name) {
Objects.requireNonNull(name, "name cannot be null");
return new RocksDBKeyValueBytesStoreSupplier(name, false);
}
|
@Test
public void shouldCreateRocksDbStore() {
assertThat(
Stores.persistentKeyValueStore("store").get(),
allOf(not(instanceOf(RocksDBTimestampedStore.class)), instanceOf(RocksDBStore.class)));
}
|
public void setup(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to setup internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final Map<String, Map<String, String>> streamsSideTopicConfigs = topicConfigs.values().stream()
.collect(Collectors.toMap(
InternalTopicConfig::name,
topicConfig -> topicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention)
));
final Set<String> createdTopics = new HashSet<>();
final Set<String> topicStillToCreate = new HashSet<>(topicConfigs.keySet());
while (!topicStillToCreate.isEmpty()) {
final Set<NewTopic> newTopics = topicStillToCreate.stream()
.map(topicName -> new NewTopic(
topicName,
topicConfigs.get(topicName).numberOfPartitions(),
Optional.of(replicationFactor)
).configs(streamsSideTopicConfigs.get(topicName))
).collect(Collectors.toSet());
log.info("Going to create internal topics: " + newTopics);
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
processCreateTopicResults(createTopicsResult, topicStillToCreate, createdTopics, deadline);
maybeSleep(Collections.singletonList(topicStillToCreate), deadline, "created");
}
log.info("Completed setup of internal topics {}.", topicConfigs.keySet());
}
|
@Test
public void shouldThrowWhenCreateTopicsThrowsUnexpectedException() {
final AdminClient admin = mock(AdminClient.class);
final StreamsConfig streamsConfig = new StreamsConfig(config);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, streamsConfig);
final InternalTopicConfig internalTopicConfig = setupRepartitionTopicConfig(topic1, 1);
final KafkaFutureImpl<TopicMetadataAndConfig> createTopicFailFuture = new KafkaFutureImpl<>();
createTopicFailFuture.completeExceptionally(new IllegalStateException("Nobody expects the Spanish inquisition"));
final NewTopic newTopic = newTopic(topic1, internalTopicConfig, streamsConfig);
when(admin.createTopics(mkSet(newTopic)))
.thenAnswer(answer -> new MockCreateTopicsResult(mkMap(
mkEntry(topic1, createTopicFailFuture)
)));
assertThrows(StreamsException.class, () -> topicManager.setup(mkMap(
mkEntry(topic1, internalTopicConfig)
)));
}
|
public static HuaweiLtsLogCollectClient getHuaweiLtsLogCollectClient() {
return HUAWEI_LTS_LOG_COLLECT_CLIENT;
}
|
@Test
public void testGetHuaweiLtsLogCollectClient() {
Assertions.assertEquals(LoggingHuaweiLtsPluginDataHandler.getHuaweiLtsLogCollectClient().getClass(), HuaweiLtsLogCollectClient.class);
}
|
public IntervalSet negate() {
if (!isValid()) {
return IntervalSet.ALWAYS;
}
if (mStartMs == MIN_MS) {
if (mEndMs == MAX_MS) {
// this is ALWAYS, so the negation is never
return IntervalSet.NEVER;
}
return new IntervalSet(after(mEndMs));
}
// start is after min
if (mEndMs == MAX_MS) {
return new IntervalSet(before(mStartMs));
}
// start is after min, and end is before max. This requires 2 intervals.
return new IntervalSet(Lists.newArrayList(before(mStartMs), after(mEndMs)));
}
|
@Test
public void negateNever() {
List<Interval> neg = Interval.NEVER.negate().getIntervals();
Assert.assertTrue(neg.size() == 1);
Interval in = neg.get(0);
Assert.assertEquals(Interval.ALWAYS, in);
}
|
@Override
public Set<Link> getDeviceIngressLinks(DeviceId deviceId) {
return filter(links.values(), link -> deviceId.equals(link.dst().deviceId()));
}
|
@Test
public final void testGetDeviceIngressLinks() {
LinkKey linkId1 = LinkKey.linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
LinkKey linkId2 = LinkKey.linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
LinkKey linkId3 = LinkKey.linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
putLink(linkId1, DIRECT);
putLink(linkId2, DIRECT);
putLink(linkId3, DIRECT);
// DID1,P1 => DID2,P2
// DID2,P2 => DID1,P1
// DID1,P2 => DID2,P3
Set<Link> links1 = linkStore.getDeviceIngressLinks(DID2);
assertEquals(2, links1.size());
// check
Set<Link> links2 = linkStore.getDeviceIngressLinks(DID1);
assertEquals(1, links2.size());
assertLink(linkId2, DIRECT, links2.iterator().next());
}
|
@Override
public Decoder<Object> getMapValueDecoder() {
return mapValueDecoder;
}
|
@Test
public void shouldDeserializeTheMapCorrectly() throws Exception {
ByteBuf buf = ByteBufAllocator.DEFAULT.buffer();
buf.writeBytes(new ObjectMapper().writeValueAsBytes(map));
assertThat(mapCodec.getMapValueDecoder().decode(buf, new State()))
.isInstanceOf(Map.class)
.isEqualTo(map);
buf.release();
}
|
public void handle(SeckillWebMockRequestDTO request) {
prePreRequestHandlers.stream().sorted(Comparator.comparing(Ordered::getOrder))
.forEach(it -> {
try {
it.handle(request);
} catch (Exception e) {
log.warn("pre request handler error", e);
}
});
}
|
@Test
public void shouldHandleRequestEvenWhenOneHandlerFails() {
SeckillWebMockRequestDTO request = new SeckillWebMockRequestDTO();
doThrow(new RuntimeException()).when(handler1).handle(request);
doNothing().when(handler2).handle(request);
preRequestPipeline.handle(request);
verify(handler1, times(1)).handle(request);
verify(handler2, times(1)).handle(request);
}
|
CepRuntimeContext(final RuntimeContext runtimeContext) {
this.runtimeContext = checkNotNull(runtimeContext);
}
|
@Test
public void testCepRuntimeContext() {
final String taskName = "foobarTask";
final OperatorMetricGroup metricGroup =
UnregisteredMetricsGroup.createOperatorMetricGroup();
final int numberOfParallelSubtasks = 43;
final int indexOfSubtask = 42;
final int attemptNumber = 1337;
final String taskNameWithSubtask = "foobarTask (43/43)#1337";
final Map<String, String> globalJobParameters = new HashMap<>();
globalJobParameters.put("k1", "v1");
final ClassLoader userCodeClassLoader = mock(ClassLoader.class);
final DistributedCache distributedCache = mock(DistributedCache.class);
final boolean isObjectReused = true;
RuntimeContext mockedRuntimeContext = mock(RuntimeContext.class);
TaskInfoImpl taskInfo =
new TaskInfoImpl(
taskName,
numberOfParallelSubtasks,
indexOfSubtask,
numberOfParallelSubtasks,
attemptNumber);
when(mockedRuntimeContext.getTaskInfo()).thenReturn(taskInfo);
when(mockedRuntimeContext.getMetricGroup()).thenReturn(metricGroup);
when(mockedRuntimeContext.getGlobalJobParameters()).thenReturn(globalJobParameters);
when(mockedRuntimeContext.isObjectReuseEnabled()).thenReturn(isObjectReused);
when(mockedRuntimeContext.getUserCodeClassLoader()).thenReturn(userCodeClassLoader);
when(mockedRuntimeContext.getDistributedCache()).thenReturn(distributedCache);
RuntimeContext runtimeContext = new CepRuntimeContext(mockedRuntimeContext);
assertEquals(taskName, runtimeContext.getTaskInfo().getTaskName());
assertEquals(metricGroup, runtimeContext.getMetricGroup());
assertEquals(
numberOfParallelSubtasks,
runtimeContext.getTaskInfo().getNumberOfParallelSubtasks());
assertEquals(indexOfSubtask, runtimeContext.getTaskInfo().getIndexOfThisSubtask());
assertEquals(attemptNumber, runtimeContext.getTaskInfo().getAttemptNumber());
assertEquals(taskNameWithSubtask, runtimeContext.getTaskInfo().getTaskNameWithSubtasks());
assertEquals(globalJobParameters, runtimeContext.getGlobalJobParameters());
assertEquals(isObjectReused, runtimeContext.isObjectReuseEnabled());
assertEquals(userCodeClassLoader, runtimeContext.getUserCodeClassLoader());
assertEquals(distributedCache, runtimeContext.getDistributedCache());
try {
runtimeContext.getState(new ValueStateDescriptor<>("foobar", Integer.class, 42));
fail("Expected getState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getListState(new ListStateDescriptor<>("foobar", Integer.class));
fail("Expected getListState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getReducingState(
new ReducingStateDescriptor<>(
"foobar", mock(ReduceFunction.class), Integer.class));
fail("Expected getReducingState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getAggregatingState(
new AggregatingStateDescriptor<>(
"foobar", mock(AggregateFunction.class), Integer.class));
fail("Expected getAggregatingState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getMapState(
new MapStateDescriptor<>("foobar", Integer.class, String.class));
fail("Expected getMapState to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.addAccumulator("foobar", mock(Accumulator.class));
fail("Expected addAccumulator to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getAccumulator("foobar");
fail("Expected getAccumulator to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getIntCounter("foobar");
fail("Expected getIntCounter to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getLongCounter("foobar");
fail("Expected getLongCounter to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getDoubleCounter("foobar");
fail("Expected getDoubleCounter to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getHistogram("foobar");
fail("Expected getHistogram to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.hasBroadcastVariable("foobar");
fail("Expected hasBroadcastVariable to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getBroadcastVariable("foobar");
fail("Expected getBroadcastVariable to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
try {
runtimeContext.getBroadcastVariableWithInitializer(
"foobar", mock(BroadcastVariableInitializer.class));
fail(
"Expected getBroadcastVariableWithInitializer to fail with unsupported operation exception.");
} catch (UnsupportedOperationException e) {
// expected
}
}
|
public static URI getCanonicalUri(URI uri, int defaultPort) {
// skip if there is no authority, ie. "file" scheme or relative uri
String host = uri.getHost();
if (host == null) {
return uri;
}
String fqHost = canonicalizeHost(host);
int port = uri.getPort();
// short out if already canonical with a port
if (host.equals(fqHost) && port != -1) {
return uri;
}
// reconstruct the uri with the canonical host and port
try {
uri = new URI(uri.getScheme(), uri.getUserInfo(),
fqHost, (port == -1) ? defaultPort : port,
uri.getPath(), uri.getQuery(), uri.getFragment());
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
return uri;
}
|
@Test
public void testCanonicalUriWithPort() {
URI uri;
uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123"), 456);
assertEquals("scheme://host.a.b:123", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123/"), 456);
assertEquals("scheme://host.a.b:123/", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123/path"), 456);
assertEquals("scheme://host.a.b:123/path", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123/path?q#frag"), 456);
assertEquals("scheme://host.a.b:123/path?q#frag", uri.toString());
}
|
public void maybeFlushBatches(LeaderAndEpoch leaderAndEpoch) {
MetadataProvenance provenance = new MetadataProvenance(lastOffset, lastEpoch, lastContainedLogTimeMs);
LogDeltaManifest manifest = LogDeltaManifest.newBuilder()
.provenance(provenance)
.leaderAndEpoch(leaderAndEpoch)
.numBatches(numBatches)
.elapsedNs(totalBatchElapsedNs)
.numBytes(numBytes)
.build();
switch (transactionState) {
case STARTED_TRANSACTION:
case CONTINUED_TRANSACTION:
log.debug("handleCommit: not publishing since a transaction starting at {} is still in progress. " +
"{} batch(es) processed so far.", image.offset(), numBatches);
break;
case ABORTED_TRANSACTION:
log.debug("handleCommit: publishing empty delta between {} and {} from {} batch(es) " +
"since a transaction was aborted", image.offset(), manifest.provenance().lastContainedOffset(),
manifest.numBatches());
applyDeltaAndUpdate(new MetadataDelta.Builder().setImage(image).build(), manifest);
break;
case ENDED_TRANSACTION:
case NO_TRANSACTION:
if (log.isDebugEnabled()) {
log.debug("handleCommit: Generated a metadata delta between {} and {} from {} batch(es) in {} us.",
image.offset(), manifest.provenance().lastContainedOffset(),
manifest.numBatches(), NANOSECONDS.toMicros(manifest.elapsedNs()));
}
applyDeltaAndUpdate(delta, manifest);
break;
}
}
|
@Test
public void testMultipleTransactionsInOneBatch() {
List<ApiMessageAndVersion> batchRecords = new ArrayList<>();
batchRecords.addAll(TOPIC_TXN_BATCH_1);
batchRecords.addAll(TOPIC_TXN_BATCH_2);
batchRecords.addAll(TXN_BEGIN_SINGLETON);
batchRecords.addAll(TOPIC_NO_TXN_BATCH);
batchRecords.addAll(TXN_END_SINGLETON);
MockMetadataUpdater updater = new MockMetadataUpdater();
MockFaultHandler faultHandler = new MockFaultHandler("testMultipleTransactionsInOneBatch");
MetadataBatchLoader batchLoader = loadSingleBatch(updater, faultHandler, batchRecords);
assertEquals(1, updater.updates);
assertEquals(0, updater.latestManifest.numBytes());
assertEquals(15, updater.latestImage.provenance().lastContainedOffset());
assertEquals(42, updater.latestImage.provenance().lastContainedEpoch());
assertNotNull(updater.latestImage.topics().getTopic("foo"));
assertNull(updater.latestImage.topics().getTopic("bar"));
batchLoader.maybeFlushBatches(LEADER_AND_EPOCH);
assertEquals(2, updater.updates);
assertEquals(100, updater.latestManifest.numBytes());
assertEquals(20, updater.latestImage.provenance().lastContainedOffset());
assertEquals(42, updater.latestImage.provenance().lastContainedEpoch());
assertNotNull(updater.latestImage.topics().getTopic("foo"));
assertNotNull(updater.latestImage.topics().getTopic("bar"));
}
|
public static SerializerAdapter createSerializerAdapter(Serializer serializer) {
final SerializerAdapter s;
if (serializer instanceof StreamSerializer streamSerializer) {
s = new StreamSerializerAdapter(streamSerializer);
} else if (serializer instanceof ByteArraySerializer arraySerializer) {
s = new ByteArraySerializerAdapter(arraySerializer);
} else {
throw new IllegalArgumentException("Serializer " + serializer.getClass().getName()
+ " must be an instance of either StreamSerializer or ByteArraySerializer");
}
return s;
}
|
@Test
public void testCreateSerializerAdapter() {
// ArrayStreamSerializer is instance of StreamSerializer, hence using it as parameter
SerializerAdapter streamSerializerAdapter = SerializationUtil.createSerializerAdapter(new ArrayStreamSerializer());
assertEquals(streamSerializerAdapter.getClass(), StreamSerializerAdapter.class);
// CustomByteArraySerializer is instance of ByteArraySerializer, hence using it as parameter
SerializerAdapter byteArraySerializerAdapter = SerializationUtil.createSerializerAdapter(new CustomByteArraySerializer());
assertEquals(byteArraySerializerAdapter.getClass(), ByteArraySerializerAdapter.class);
}
|
public static boolean exists(String name) {
name = getWellFormName(name);
return STRING_ENV_MAP.containsKey(name);
}
|
@Test
public void testExistsForBlankName() {
assertFalse(Env.exists(""));
assertFalse(Env.exists(" "));
assertFalse(Env.exists(null));
}
|
@CheckForNull
public String getExternalUserAuthentication() {
SecurityRealm realm = securityRealmFactory.getRealm();
return realm == null ? null : realm.getName();
}
|
@Test
public void getExternalUserAuthentication_whenNotDefined_shouldReturnNull() {
assertThat(commonSystemInformation.getExternalUserAuthentication())
.isNull();
}
|
public static Dish createDish(Recipe recipe) {
Map<Product, BigDecimal> calculatedRecipeToGram = new HashMap<>();
recipe.getIngredientsProportion().forEach(((product, proportion) -> {
calculatedRecipeToGram.put(product, recipe.getBasePortionInGrams()
.multiply(proportion.divide(BigDecimal.valueOf(100), 2, RoundingMode.FLOOR)));
}));
return new Dish(calculatedRecipeToGram, recipe);
}
|
@Test
void createDish_numberOfFillers() {
Dish dish = Dish.createDish(recipe, BigDecimal.valueOf(1300));
assertAll("Should be correct",
() -> assertEquals(3, dish.getNumberOfFillers().size()),
() -> assertEquals(1, dish.getNumberOfFillers().get(Filler.FAT))
);
}
|
public long computeExpirationTime(final String pHttpExpiresHeader, final String pHttpCacheControlHeader, final long pNow) {
final Long override = Configuration.getInstance().getExpirationOverrideDuration();
if (override != null) {
return pNow + override;
}
final long extension = Configuration.getInstance().getExpirationExtendedDuration();
final Long cacheControlDuration = getHttpCacheControlDuration(pHttpCacheControlHeader);
if (cacheControlDuration != null) {
return pNow + cacheControlDuration * 1000 + extension;
}
final Long httpExpiresTime = getHttpExpiresTime(pHttpExpiresHeader);
if (httpExpiresTime != null) {
return httpExpiresTime + extension;
}
return pNow + OpenStreetMapTileProviderConstants.DEFAULT_MAXIMUM_CACHED_FILE_AGE + extension;
}
|
@Test
public void testCustomExpirationTimeWithHttpConnection() {
final long twentyMinutesInMillis = 20 * 60 * 1000;
final long thirtyMinutesInMillis = 30 * 60 * 1000;
final HttpURLConnection dummyConnection = new HttpURLConnection(null) {
@Override
public void disconnect() {
}
@Override
public boolean usingProxy() {
return false;
}
@Override
public void connect() {
}
@Override
public String getHeaderField(String name) {
return null;
}
};
final TileSourcePolicy tileSourcePolicy = new TileSourcePolicy() {
@Override
public long computeExpirationTime(String pHttpExpiresHeader, String pHttpCacheControlHeader, long pNow) {
return pNow + twentyMinutesInMillis;
}
@Override
public long computeExpirationTime(HttpURLConnection pHttpURLConnection, long pNow) {
return pNow + thirtyMinutesInMillis;
}
};
final long now = System.currentTimeMillis();
final long expected = now + thirtyMinutesInMillis;
Assert.assertEquals(
expected,
tileSourcePolicy.computeExpirationTime(dummyConnection, now));
}
|
public Map<String, String> clientTags() {
return data.clientTags().stream()
.collect(
Collectors.toMap(
clientTag -> new String(clientTag.key(), StandardCharsets.UTF_8),
clientTag -> new String(clientTag.value(), StandardCharsets.UTF_8)
)
);
}
|
@Test
public void shouldReturnMapOfClientTagsOnVersion11() {
final SubscriptionInfo info =
new SubscriptionInfo(11, LATEST_SUPPORTED_VERSION,
PID_1, "localhost:80", TASK_OFFSET_SUMS, IGNORED_UNIQUE_FIELD, IGNORED_ERROR_CODE, CLIENT_TAGS);
assertThat(info.clientTags(), is(CLIENT_TAGS));
}
|
public Map<String, String> getTypes(final Set<String> streamIds,
final Set<String> fields) {
final Map<String, Set<String>> allFieldTypes = this.get(streamIds);
final Map<String, String> result = new HashMap<>(fields.size());
fields.forEach(field -> {
final Set<String> fieldTypes = allFieldTypes.get(field);
typeFromFieldType(fieldTypes).ifPresent(s -> result.put(field, s));
});
return result;
}
|
@Test
void getTypesReturnsEmptyMapIfFieldTypesAreEmpty() {
final Pair<IndexFieldTypesService, StreamService> services = mockServices();
final FieldTypesLookup lookup = new FieldTypesLookup(services.getLeft(), services.getRight());
assertThat(lookup.getTypes(Set.of("SomeStream"), Set.of("somefield"))).isEmpty();
}
|
@Override
public void removeRule(final RuleData ruleData) {
String key = CacheKeyUtils.INST.getKey(ruleData);
CACHED_HANDLE.get().removeHandle(key);
FlowRuleManager.loadRules(FlowRuleManager.getRules()
.stream()
.filter(r -> !r.getResource().equals(key))
.collect(Collectors.toList()));
DegradeRuleManager.loadRules(DegradeRuleManager.getRules()
.stream()
.filter(r -> !r.getResource().equals(key))
.collect(Collectors.toList()));
}
|
@Test
public void removeRule() {
RuleData data = new RuleData();
data.setSelectorId("sentinel");
data.setId("removeRule");
SentinelHandle sentinelHandle = new SentinelHandle();
sentinelHandle.setFlowRuleCount(10);
sentinelHandle.setFlowRuleGrade(0);
sentinelHandle.setFlowRuleControlBehavior(0);
sentinelHandle.setDegradeRuleCount(1d);
sentinelHandle.setDegradeRuleGrade(0);
sentinelHandle.setDegradeRuleTimeWindow(5);
sentinelHandle.setDegradeRuleMinRequestAmount(5);
sentinelHandle.setDegradeRuleStatIntervals(10);
sentinelHandle.setDegradeRuleSlowRatioThreshold(0.5d);
data.setHandle(GsonUtils.getGson().toJson(sentinelHandle));
sentinelRuleHandle.handlerRule(data);
FlowRule flowRule = FlowRuleManager.getRules().get(0);
assertThat(flowRule.getCount(), is(10.0));
assertThat(flowRule.getResource(), is("sentinel_removeRule"));
DegradeRule degradeRule = DegradeRuleManager.getRules().get(0);
assertThat(degradeRule.getCount(), is(1.0));
assertThat(degradeRule.getResource(), is("sentinel_removeRule"));
sentinelRuleHandle.removeRule(data);
assertTrue(FlowRuleManager.getRules().isEmpty());
assertTrue(DegradeRuleManager.getRules().isEmpty());
}
|
@Override
public byte[] retrieveSecret(SecretIdentifier identifier) {
if (identifier != null && identifier.getKey() != null && !identifier.getKey().isEmpty()) {
try {
lock.lock();
loadKeyStore();
SecretKeyFactory factory = SecretKeyFactory.getInstance("PBE");
KeyStore.SecretKeyEntry secretKeyEntry = (KeyStore.SecretKeyEntry) keyStore.getEntry(identifier.toExternalForm(), protectionParameter);
//not found
if (secretKeyEntry == null) {
LOGGER.debug("requested secret {} not found", identifier.toExternalForm());
return null;
}
PBEKeySpec passwordBasedKeySpec = (PBEKeySpec) factory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class);
//base64 encoded char[]
char[] base64secret = passwordBasedKeySpec.getPassword();
byte[] secret = SecretStoreUtil.base64Decode(base64secret);
passwordBasedKeySpec.clearPassword();
LOGGER.debug("retrieved secret {}", identifier.toExternalForm());
return secret;
} catch (Exception e) {
throw new SecretStoreException.RetrievalException(identifier, e);
} finally {
releaseLock(lock);
}
}
return null;
}
|
@Test
public void retrieveWithInvalidInput() {
assertThat(keyStore.retrieveSecret(null)).isNull();
}
|
@Override
public GroupAssignment assign(
GroupSpec groupSpec,
SubscribedTopicDescriber subscribedTopicDescriber
) throws PartitionAssignorException {
if (groupSpec.memberIds().isEmpty()) {
return new GroupAssignment(Collections.emptyMap());
} else if (groupSpec.subscriptionType() == SubscriptionType.HOMOGENEOUS) {
return assignHomogeneousGroup(groupSpec, subscribedTopicDescriber);
} else {
return assignHeterogeneousGroup(groupSpec, subscribedTopicDescriber);
}
}
|
@Test
public void testStaticMembership() throws PartitionAssignorException {
SubscribedTopicDescriber subscribedTopicMetadata = new SubscribedTopicDescriberImpl(
Collections.singletonMap(
topic1Uuid,
new TopicMetadata(
topic1Uuid,
topic1Name,
3,
Collections.emptyMap()
)
)
);
Map<String, MemberSubscriptionAndAssignmentImpl> members = new TreeMap<>();
members.put(memberA, new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.of("instanceA"),
Collections.singleton(topic1Uuid),
Assignment.EMPTY
));
members.put(memberB, new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.of("instanceB"),
Collections.singleton(topic1Uuid),
Assignment.EMPTY
));
GroupSpec groupSpec = new GroupSpecImpl(
members,
SubscriptionType.HOMOGENEOUS,
invertedTargetAssignment(members)
);
GroupAssignment initialAssignment = assignor.assign(
groupSpec,
subscribedTopicMetadata
);
// Remove static memberA and add it back with a different member Id but same instance Id.
members.remove(memberA);
members.put("memberA1", new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.of("instanceA"),
Collections.singleton(topic1Uuid),
Assignment.EMPTY
));
groupSpec = new GroupSpecImpl(
members,
SubscriptionType.HOMOGENEOUS,
invertedTargetAssignment(members)
);
GroupAssignment reassignedAssignment = assignor.assign(
groupSpec,
subscribedTopicMetadata
);
// Assert that the assignment did not change
assertEquals(
initialAssignment.members().get(memberA).partitions(),
reassignedAssignment.members().get("memberA1").partitions()
);
assertEquals(
initialAssignment.members().get(memberB).partitions(),
reassignedAssignment.members().get(memberB).partitions()
);
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldBuildAssertTopic() {
// Given:
final SingleStatementContext stmt
= givenQuery("ASSERT TOPIC X;");
// When:
final AssertTopic assertTopic = (AssertTopic) builder.buildStatement(stmt);
// Then:
assertThat(assertTopic.getTopic(), is("X"));
assertThat(assertTopic.getConfig().size(), is(0));
assertThat(assertTopic.getTimeout(), is(Optional.empty()));
assertThat(assertTopic.checkExists(), is(true));
}
|
public ListStateDescriptor(String name, Class<T> elementTypeClass) {
super(name, new ListTypeInfo<>(elementTypeClass), null);
}
|
@Test
void testListStateDescriptor() throws Exception {
TypeSerializer<String> serializer =
new KryoSerializer<>(String.class, new SerializerConfigImpl());
ListStateDescriptor<String> descr = new ListStateDescriptor<>("testName", serializer);
assertThat(descr.getName()).isEqualTo("testName");
assertThat(descr.getSerializer()).isNotNull();
assertThat(descr.getSerializer()).isInstanceOf(ListSerializer.class);
assertThat(descr.getElementSerializer()).isNotNull();
assertThat(descr.getElementSerializer()).isEqualTo(serializer);
ListStateDescriptor<String> copy = CommonTestUtils.createCopySerializable(descr);
assertThat(copy.getName()).isEqualTo("testName");
assertThat(copy.getSerializer()).isNotNull();
assertThat(copy.getSerializer()).isInstanceOf(ListSerializer.class);
assertThat(copy.getElementSerializer()).isNotNull();
assertThat(copy.getElementSerializer()).isEqualTo(serializer);
}
|
public void initializeTypeState(Class<?> clazz) {
Objects.requireNonNull(clazz);
getTypeMapper(clazz, null, null);
}
|
@Test
public void testFailsToCreateSchemaIfThereAreDuplicateFields() {
try {
HollowObjectMapper mapper = new HollowObjectMapper(writeStateEngine);
mapper.initializeTypeState(Child.class);
Assert.fail("Expected Exception not thrown");
} catch (IllegalArgumentException e) {
Assert.assertEquals("Duplicate field name 'myField1' found in class hierarchy for class com.netflix.hollow.core.write.objectmapper.HollowObjectMapperTest$Child", e.getMessage());
}
}
|
@Override
public WatchKey register(final Watchable folder, final WatchEvent.Kind<?>[] events,
final WatchEvent.Modifier... modifiers) throws IOException {
if(null == monitor) {
monitor = FileSystems.getDefault().newWatchService();
}
final WatchKey key = folder.register(monitor, events, modifiers);
if(log.isInfoEnabled()) {
log.info(String.format("Registered for events for %s", key));
}
return key;
}
|
@Test
public void testRegister() throws Exception {
final RegisterWatchService fs = new NIOEventWatchService();
final Watchable folder = Paths.get(
File.createTempFile(UUID.randomUUID().toString(), "t").getParent());
final WatchKey key = fs.register(folder, new WatchEvent.Kind[]{ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY});
assertTrue(key.isValid());
fs.close();
assertFalse(key.isValid());
}
|
public static <E> ArrayList<E> newArrayList() {
return new ArrayList<>();
}
|
@Test
public void testAddToEmptyArrayList() {
List<String> list = Lists.newArrayList();
list.add("record1");
Assert.assertEquals(1, list.size());
Assert.assertEquals("record1", list.get(0));
}
|
@Override
public int sortTo(String destName, SortOrder order) {
return get(sortToAsync(destName, order));
}
|
@Test
public void testSortTo() {
RSet<String> list = redisson.getSet("list", IntegerCodec.INSTANCE);
list.add("1");
list.add("2");
list.add("3");
assertThat(list.sortTo("test3", SortOrder.DESC)).isEqualTo(3);
RList<String> list2 = redisson.getList("test3", StringCodec.INSTANCE);
assertThat(list2).containsExactly("3", "2", "1");
assertThat(list.sortTo("test4", SortOrder.ASC)).isEqualTo(3);
RList<String> list3 = redisson.getList("test4", StringCodec.INSTANCE);
assertThat(list3).containsExactly("1", "2", "3");
}
|
@Override
public URL getResource(String name) {
ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name);
log.trace("Received request to load resource '{}'", name);
for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) {
URL url = null;
switch (classLoadingSource) {
case APPLICATION:
url = super.getResource(name);
break;
case PLUGIN:
url = findResource(name);
break;
case DEPENDENCIES:
url = findResourceFromDependencies(name);
break;
}
if (url != null) {
log.trace("Found resource '{}' in {} classpath", name, classLoadingSource);
return url;
} else {
log.trace("Couldn't find resource '{}' in {}", name, classLoadingSource);
}
}
return null;
}
|
@Test
void parentLastGetResourceExistsInBothParentAndPlugin() throws URISyntaxException, IOException {
URL resource = parentLastPluginClassLoader.getResource("META-INF/file-in-both-parent-and-plugin");
assertFirstLine("plugin", resource);
}
|
public static ConjunctFuture<Void> completeAll(
Collection<? extends CompletableFuture<?>> futuresToComplete) {
return new CompletionConjunctFuture(futuresToComplete);
}
|
@Test
void testCompleteAll() {
final CompletableFuture<String> inputFuture1 = new CompletableFuture<>();
final CompletableFuture<Integer> inputFuture2 = new CompletableFuture<>();
final List<CompletableFuture<?>> futuresToComplete =
Arrays.asList(inputFuture1, inputFuture2);
final FutureUtils.ConjunctFuture<Void> completeFuture =
FutureUtils.completeAll(futuresToComplete);
assertThat(completeFuture).isNotDone();
assertThat(completeFuture.getNumFuturesCompleted()).isZero();
assertThat(completeFuture.getNumFuturesTotal()).isEqualTo(futuresToComplete.size());
inputFuture2.complete(42);
assertThat(completeFuture).isNotDone();
assertThat(completeFuture.getNumFuturesCompleted()).isOne();
inputFuture1.complete("foobar");
assertThat(completeFuture).isDone();
assertThat(completeFuture.getNumFuturesCompleted()).isEqualTo(2);
assertThatFuture(completeFuture).eventuallySucceeds();
}
|
@Override
public List<Connection> getConnections(final String databaseName, final String dataSourceName, final int connectionOffset, final int connectionSize,
final ConnectionMode connectionMode) throws SQLException {
Preconditions.checkNotNull(databaseName, "Current database name is null.");
Collection<Connection> connections;
String cacheKey = getKey(databaseName, dataSourceName);
synchronized (cachedConnections) {
connections = cachedConnections.get(cacheKey);
}
List<Connection> result;
int maxConnectionSize = connectionOffset + connectionSize;
if (connections.size() >= maxConnectionSize) {
result = new ArrayList<>(connections).subList(connectionOffset, maxConnectionSize);
} else if (connections.isEmpty()) {
Collection<Connection> newConnections = createNewConnections(databaseName, dataSourceName, maxConnectionSize, connectionMode);
result = new ArrayList<>(newConnections).subList(connectionOffset, maxConnectionSize);
synchronized (cachedConnections) {
cachedConnections.putAll(cacheKey, newConnections);
}
executeTransactionHooksAfterCreateConnections(result);
} else {
List<Connection> allConnections = new ArrayList<>(maxConnectionSize);
allConnections.addAll(connections);
List<Connection> newConnections = createNewConnections(databaseName, dataSourceName, maxConnectionSize - connections.size(), connectionMode);
allConnections.addAll(newConnections);
result = allConnections.subList(connectionOffset, maxConnectionSize);
synchronized (cachedConnections) {
cachedConnections.putAll(cacheKey, newConnections);
}
}
return result;
}
|
@Test
void assertGetConnectionsAndFailedToReplaySessionVariables() throws SQLException {
connectionSession.getRequiredSessionVariableRecorder().setVariable("key", "value");
Connection connection = null;
SQLException expectedException = new SQLException("");
try {
connection = mock(Connection.class, RETURNS_DEEP_STUBS);
when(connection.getMetaData().getDatabaseProductName()).thenReturn("PostgreSQL");
when(connection.createStatement().execute("SET key=value")).thenThrow(expectedException);
when(ProxyContext.getInstance().getBackendDataSource().getConnections(anyString(), anyString(), anyInt(), any(ConnectionMode.class))).thenReturn(Collections.singletonList(connection));
databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "", 0, 1, ConnectionMode.CONNECTION_STRICTLY);
} catch (final SQLException ex) {
assertThat(ex, is(expectedException));
verify(connection).close();
}
}
|
public static Compression.Algorithm getHFileCompressionAlgorithm(Map<String, String> paramsMap) {
String algoName = paramsMap.get(HFILE_COMPRESSION_ALGORITHM_NAME.key());
if (StringUtils.isNullOrEmpty(algoName)) {
return Compression.Algorithm.GZ;
}
return Compression.Algorithm.valueOf(algoName.toUpperCase());
}
|
@Test
public void testGetDefaultHFileCompressionAlgorithm() {
assertEquals(Compression.Algorithm.GZ, getHFileCompressionAlgorithm(Collections.emptyMap()));
}
|
public void logSlowQuery(final Statement statement, final long startTimeNanos, final JdbcSessionContext context) {
if ( logSlowQuery < 1 ) {
return;
}
if ( startTimeNanos <= 0 ) {
throw new IllegalArgumentException( "startTimeNanos [" + startTimeNanos + "] should be greater than 0" );
}
final long queryExecutionMillis = elapsedFrom( startTimeNanos );
if ( queryExecutionMillis > logSlowQuery ) {
final String sql = statement.toString();
logSlowQueryInternal( context, queryExecutionMillis, sql );
}
}
|
@Test
public void testLogSlowQueryFromStatementWhenLoggingDisabled() {
SqlStatementLogger sqlStatementLogger = new SqlStatementLogger( false, false, false, 0L );
AtomicInteger callCounterToString = new AtomicInteger();
Statement statement = mockStatementForCountingToString( callCounterToString );
sqlStatementLogger.logSlowQuery( statement, System.nanoTime(), null );
assertEquals( 0, callCounterToString.get() );
}
|
protected final void ensureCapacity(final int index, final int length)
{
if (index < 0 || length < 0)
{
throw new IndexOutOfBoundsException("negative value: index=" + index + " length=" + length);
}
final long resultingPosition = index + (long)length;
final int currentCapacity = capacity;
if (resultingPosition > currentCapacity)
{
if (resultingPosition > MAX_BUFFER_LENGTH)
{
throw new IndexOutOfBoundsException(
"index=" + index + " length=" + length + " maxCapacity=" + MAX_BUFFER_LENGTH);
}
final int newCapacity = calculateExpansion(currentCapacity, resultingPosition);
final ByteBuffer newBuffer = ByteBuffer.allocateDirect(newCapacity);
final long newAddress = address(newBuffer);
getBytes(0, newBuffer, 0, currentCapacity);
byteBuffer = newBuffer;
addressOffset = newAddress;
capacity = newCapacity;
}
}
|
@Test
void ensureCapacityThrowsIndexOutOfBoundsExceptionIfIndexIsNegative()
{
final ExpandableDirectByteBuffer buffer = new ExpandableDirectByteBuffer(1);
final IndexOutOfBoundsException exception =
assertThrowsExactly(IndexOutOfBoundsException.class, () -> buffer.ensureCapacity(-3, 4));
assertEquals("negative value: index=-3 length=4", exception.getMessage());
}
|
public static void createDir(String path) throws IOException {
Files.createDirectories(Paths.get(path));
}
|
@Test
public void createDir() throws IOException {
File tempDir = new File(mTestFolder.getRoot(), "tmp");
FileUtils.createDir(tempDir.getAbsolutePath());
assertTrue(FileUtils.exists(tempDir.getAbsolutePath()));
assertTrue(tempDir.delete());
}
|
static ProjectMeasuresQuery newProjectMeasuresQuery(List<Criterion> criteria, @Nullable Set<String> projectUuids) {
ProjectMeasuresQuery query = new ProjectMeasuresQuery();
Optional.ofNullable(projectUuids).ifPresent(query::setProjectUuids);
criteria.forEach(criterion -> processCriterion(criterion, query));
return query;
}
|
@Test
public void fail_to_create_query_on_qualifier_when_operator_is_not_equal() {
assertThatThrownBy(() -> {
newProjectMeasuresQuery(singletonList(Criterion.builder().setKey("qualifier").setOperator(GT).setValue("APP").build()), emptySet());
})
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Only equals operator is available for qualifier criteria");
}
|
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) {
final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps());
map.put(
MetricCollectors.RESOURCE_LABEL_PREFIX
+ StreamsConfig.APPLICATION_ID_CONFIG,
applicationId
);
// Streams client metrics aren't used in Confluent deployment
possiblyConfigureConfluentTelemetry(map);
return Collections.unmodifiableMap(map);
}
|
@Test
public void shouldFailOnProductionErrorByDefault() {
final KsqlConfig ksqlConfig = new KsqlConfig(Collections.emptyMap());
final Object result = ksqlConfig.getKsqlStreamConfigProps()
.get(StreamsConfig.DEFAULT_PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG);
assertThat(result, equalTo(LogAndFailProductionExceptionHandler.class));
}
|
@Override
public boolean askForNotificationPostPermission(@NonNull Activity activity) {
return PermissionRequestHelper.check(
activity, PermissionRequestHelper.NOTIFICATION_PERMISSION_REQUEST_CODE);
}
|
@Test
@Config(sdk = Build.VERSION_CODES.S_V2)
public void testAlwaysHavePermissionToPostNotification() {
try (var scenario = ActivityScenario.launch(TestFragmentActivity.class)) {
scenario
.moveToState(Lifecycle.State.RESUMED)
.onActivity(
activity -> {
Assert.assertTrue(mUnderTest.askForNotificationPostPermission(activity));
});
}
}
|
@Override
public void begin() {
if (!connection.getConnectionSession().getTransactionStatus().isInTransaction()) {
connection.getConnectionSession().getTransactionStatus().setInTransaction(true);
getTransactionContext().beginTransaction(String.valueOf(transactionType));
connection.closeHandlers(true);
connection.closeConnections(false);
}
for (TransactionHook each : transactionHooks) {
each.beforeBegin(getTransactionContext());
}
if (TransactionType.LOCAL == transactionType || null == distributionTransactionManager) {
localTransactionManager.begin();
} else {
distributionTransactionManager.begin();
}
for (TransactionHook each : transactionHooks) {
each.afterBegin(getTransactionContext());
}
}
|
@Test
void assertBeginForLocalTransaction() {
ContextManager contextManager = mockContextManager(TransactionType.LOCAL);
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
newBackendTransactionManager(TransactionType.LOCAL, false);
backendTransactionManager.begin();
verify(transactionStatus).setInTransaction(true);
verify(databaseConnectionManager).closeHandlers(true);
verify(databaseConnectionManager).closeConnections(false);
verify(localTransactionManager).begin();
}
|
public static Builder newBuilder() {
return new AutoValue_DLPInspectText.Builder();
}
|
@Test
public void throwsExceptionWhenDeidentifyConfigAndTemplatesAreEmpty() {
assertThrows(
"Either inspectTemplateName or inspectConfig must be supplied!",
IllegalArgumentException.class,
() ->
DLPInspectText.newBuilder()
.setProjectId(PROJECT_ID)
.setBatchSizeBytes(BATCH_SIZE_SMALL)
.setColumnDelimiter(DELIMITER)
.build());
}
|
public Result doWork() {
if (prepared)
throw new IllegalStateException("Call doWork only once!");
prepared = true;
if (!graph.isFrozen()) {
throw new IllegalStateException("Given BaseGraph has not been frozen yet");
}
if (chStore.getShortcuts() > 0) {
throw new IllegalStateException("Given CHStore already contains shortcuts");
}
allSW.start();
initFromGraph();
runGraphContraction();
allSW.stop();
logFinalGraphStats();
return new Result(
chConfig, chStore,
nodeContractor.getAddedShortcutsCount(),
lazyUpdateSW.getCurrentSeconds(),
periodicUpdateSW.getCurrentSeconds(),
neighborUpdateSW.getCurrentSeconds(),
allSW.getMillis()
);
}
|
@Test
public void testMoreComplexGraph() {
initShortcutsGraph(g, speedEnc);
PrepareContractionHierarchies prepare = createPrepareContractionHierarchies(g);
useNodeOrdering(prepare, new int[]{0, 5, 6, 7, 8, 10, 11, 13, 15, 1, 3, 9, 14, 16, 12, 4, 2});
PrepareContractionHierarchies.Result res = prepare.doWork();
assertEquals(7, res.getShortcuts());
}
|
@Override
public void setNoMorePages()
{
PendingRead pendingRead;
synchronized (this) {
state.compareAndSet(NO_MORE_BUFFERS, FLUSHING);
noMorePages.set(true);
pendingRead = this.pendingRead;
this.pendingRead = null;
log.info("Task %s: %s pages and %s bytes was written into TempStorage", taskId, totalStoragePagesAdded.get(), totalStorageBytesAdded.get());
}
if (pendingRead != null) {
processPendingRead(pendingRead);
}
checkFlushComplete();
}
|
@Test
public void testSimpleInMemory()
{
SpoolingOutputBuffer buffer = createSpoolingOutputBuffer();
// add three pages
for (int i = 0; i < 2; i++) {
addPage(buffer, createPage(i));
}
compareTotalBuffered(buffer, 2);
assertBufferResultEquals(TYPES, getBufferResult(buffer, BUFFER_ID, 0, sizeOfPages(1), MAX_WAIT), bufferResult(0, createPage(0)));
compareTotalBuffered(buffer, 2);
assertBufferResultEquals(TYPES, getBufferResult(buffer, BUFFER_ID, 1, sizeOfPages(1), MAX_WAIT), bufferResult(1, createPage(1)));
compareTotalBuffered(buffer, 1);
buffer.setNoMorePages();
assertBufferResultEquals(TYPES, getBufferResult(buffer, BUFFER_ID, 2, sizeOfPages(1), NO_WAIT), emptyResults(TASK_INSTANCE_ID, 2, true));
compareTotalBuffered(buffer, 0);
}
|
public void setExpression(final String expression) throws IllegalExpressionException {
MQELexer lexer = new MQELexer(CharStreams.fromString(expression));
lexer.addErrorListener(new ParseErrorListener());
MQEParser parser = new MQEParser(new CommonTokenStream(lexer));
parser.addErrorListener(new ParseErrorListener());
ParseTree tree;
try {
tree = parser.expression();
} catch (ParseCancellationException e) {
throw new IllegalExpressionException("Expression: " + expression + " error: " + e.getMessage());
}
try {
TRACE_CONTEXT.set(new DebuggingTraceContext(expression, false, false));
AlarmMQEVerifyVisitor visitor = new AlarmMQEVerifyVisitor();
ExpressionResult parseResult = visitor.visit(tree);
if (StringUtil.isNotBlank(parseResult.getError())) {
throw new IllegalExpressionException("Expression: " + expression + " error: " + parseResult.getError());
}
if (!parseResult.isBoolResult()) {
throw new IllegalExpressionException(
"Expression: " + expression + " root operation is not a Compare Operation.");
}
if (ExpressionResultType.SINGLE_VALUE != parseResult.getType()) {
throw new IllegalExpressionException(
"Expression: " + expression + " is not a SINGLE_VALUE result expression.");
}
verifyIncludeMetrics(visitor.getIncludeMetrics(), expression);
this.expression = expression;
this.includeMetrics = visitor.getIncludeMetrics();
this.maxTrendRange = visitor.getMaxTrendRange();
} finally {
TRACE_CONTEXT.remove();
}
}
|
@Test
public void testExpressionVerify() throws IllegalExpressionException {
AlarmRule rule = new AlarmRule();
//normal common metric
rule.setExpression("sum(service_percent < 85) >= 3");
//normal labeled metric
//4xx + 5xx > 10
rule.setExpression("sum(aggregate_labels(meter_status_code{_='4xx,5xx'},sum) > 10) > 3");
rule.setExpression("sum(aggregate_labels(meter_status_code,sum) > 10) > 3");
//4xx or 5xx > 10
rule.setExpression("sum(meter_status_code{_='4xx,5xx'} > 10) >= 3");
rule.setExpression("sum(meter_status_code > 10) >= 3");
//illegal expression
Assertions.assertThrows(IllegalExpressionException.class, () -> {
rule.setExpression("what? sum(service_percent < 85) >= 3");
});
//not exist metric
Assertions.assertEquals(
"Expression: sum(service_percent111 < 85) >= 3 error: Metric: [service_percent111] dose not exist.",
Assertions.assertThrows(IllegalExpressionException.class, () -> {
rule.setExpression("sum(service_percent111 < 85) >= 3");
}).getMessage()
);
//root operation is not a Compare Operation
Assertions.assertEquals(
"Expression: sum(service_percent < 85) + 3 root operation is not a Compare Operation.",
Assertions.assertThrows(IllegalExpressionException.class, () -> {
rule.setExpression("sum(service_percent < 85) + 3");
}).getMessage()
);
//not a SINGLE_VALUE result expression
Assertions.assertEquals(
"Expression: service_percent < 85 is not a SINGLE_VALUE result expression.",
Assertions.assertThrows(IllegalExpressionException.class, () -> {
rule.setExpression("service_percent < 85");
}).getMessage()
);
//not a common or labeled metric
Assertions.assertEquals(
"Expression: sum(record < 85) > 1 error: Metric dose not supported in alarm, metric: [record] is not a common or labeled metric.",
Assertions.assertThrows(IllegalExpressionException.class, () -> {
rule.setExpression("sum(record < 85) > 1");
}).getMessage()
);
//metrics in expression must have the same scope level
Assertions.assertTrue(Assertions.assertThrows(IllegalExpressionException.class, () -> {
rule.setExpression("sum(service_percent > endpoint_percent) >= 1");
}).getMessage().contains("The metrics in expression: sum(service_percent > endpoint_percent) >= 1 must have the same scope level, but got:"));
//trend expression
rule.setExpression("sum((increase(service_percent,5) + increase(service_percent,2)) > 0) >= 1");
Assertions.assertEquals(5, rule.getMaxTrendRange());
}
|
@Override
public SccResult<V, E> search(Graph<V, E> graph, EdgeWeigher<V, E> weigher) {
SccResult<V, E> result = new SccResult<>(graph);
for (V vertex : graph.getVertexes()) {
VertexData data = result.data(vertex);
if (data == null) {
connect(graph, vertex, weigher, result);
}
}
return result.build();
}
|
@Test
public void twoWeaklyConnectedClusters() {
graph = new AdjacencyListsGraph<>(vertexes(),
of(new TestEdge(A, B),
new TestEdge(B, C),
new TestEdge(C, D),
new TestEdge(D, A),
new TestEdge(E, F),
new TestEdge(F, G),
new TestEdge(G, H),
new TestEdge(H, E),
new TestEdge(B, E)));
TarjanGraphSearch<TestVertex, TestEdge> gs = new TarjanGraphSearch<>();
SccResult<TestVertex, TestEdge> result = gs.search(graph, null);
validate(result, 2);
validate(result, 0, 4, 4);
validate(result, 1, 4, 4);
}
|
public static UriTemplate create(String template, Charset charset) {
return new UriTemplate(template, true, charset);
}
|
@Test
void encodeVariables() {
String template = "https://www.example.com/{first}/{last}";
UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8);
Map<String, Object> variables = new LinkedHashMap<>();
variables.put("first", "John Jacob");
variables.put("last", "Jingleheimer Schmidt");
String expandedTemplate = uriTemplate.expand(variables);
assertThat(expandedTemplate)
.isEqualToIgnoringCase("https://www.example.com/John%20Jacob/Jingleheimer%20Schmidt");
}
|
public String write(final String rendered, final String inputFileName, final File outputPath) throws IOException {
Path writeOutputPath = outputFile(inputFileName, outputPath.getPath());
Files.writeString(writeOutputPath, rendered);
return writeOutputPath.toString();
}
|
@Test
public void testWrite() throws IOException {
Path path = Paths.get(temporaryDirectory.getPath());
String content = "x y z";
String outputPathStr = fileUtil.write(content, FOOBAR, path.toFile());
Path outputPath = Paths.get((outputPathStr));
String result = Files.readString(outputPath);
assertAll(
() -> assertTrue(Files.exists(outputPath)),
() -> assertTrue(result.length() > 0),
() -> assertTrue(result.startsWith("x")),
() -> assertTrue(result.endsWith("z")));
}
|
boolean isMapped(String userId) {
return idToDirectoryNameMap.containsKey(getIdStrategy().keyFor(userId));
}
|
@Test
public void testIsMapped() throws IOException {
UserIdMapper mapper = createUserIdMapper(IdStrategy.CASE_INSENSITIVE);
String user1 = "user1";
File directory = mapper.putIfAbsent(user1, true);
assertThat(mapper.isMapped(user1), is(true));
}
|
public static boolean isTimeoutException(Throwable exception) {
if (exception == null) return false;
if (exception instanceof ExecutionException) {
exception = exception.getCause();
if (exception == null) return false;
}
return exception instanceof TimeoutException;
}
|
@Test
public void testExecutionExceptionWithNullCauseIsNotTimeoutException() {
assertFalse(isTimeoutException(new ExecutionException(null)));
}
|
@Override
@SuppressWarnings("rawtypes")
public void report(SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers) {
final long timestamp = clock.getTime() / 1000;
// oh it'd be lovely to use Java 7 here
try {
graphite.connect();
for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
reportGauge(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Counter> entry : counters.entrySet()) {
reportCounter(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
reportHistogram(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Meter> entry : meters.entrySet()) {
reportMetered(entry.getKey(), entry.getValue(), timestamp);
}
for (Map.Entry<String, Timer> entry : timers.entrySet()) {
reportTimer(entry.getKey(), entry.getValue(), timestamp);
}
graphite.flush();
} catch (IOException e) {
LOGGER.warn("Unable to report to Graphite", graphite, e);
} finally {
try {
graphite.close();
} catch (IOException e1) {
LOGGER.warn("Error closing Graphite", graphite, e1);
}
}
}
|
@Test
public void reportsLongGaugeValues() throws Exception {
reporter.report(map("gauge", gauge(1L)),
map(),
map(),
map(),
map());
final InOrder inOrder = inOrder(graphite);
inOrder.verify(graphite).connect();
inOrder.verify(graphite).send("prefix.gauge", "1", timestamp);
inOrder.verify(graphite).flush();
inOrder.verify(graphite).close();
verifyNoMoreInteractions(graphite);
}
|
@Override
public PipelineDef parse(Path pipelineDefPath, Configuration globalPipelineConfig)
throws Exception {
return parse(mapper.readTree(pipelineDefPath.toFile()), globalPipelineConfig);
}
|
@Test
void testOverridingGlobalConfig() throws Exception {
URL resource = Resources.getResource("definitions/pipeline-definition-full.yaml");
YamlPipelineDefinitionParser parser = new YamlPipelineDefinitionParser();
PipelineDef pipelineDef =
parser.parse(
Paths.get(resource.toURI()),
Configuration.fromMap(
ImmutableMap.<String, String>builder()
.put("parallelism", "1")
.build()));
assertThat(pipelineDef).isEqualTo(fullDefWithGlobalConf);
}
|
public static int compareVersion(final String versionA, final String versionB) {
final String[] sA = versionA.split("\\.");
final String[] sB = versionB.split("\\.");
int expectSize = 3;
if (sA.length != expectSize || sB.length != expectSize) {
throw new IllegalArgumentException("version must be like x.y.z(-beta)");
}
int first = Objects.compare(sA[0], sB[0], STRING_COMPARATOR);
if (first != 0) {
return first;
}
int second = Objects.compare(sA[1], sB[1], STRING_COMPARATOR);
if (second != 0) {
return second;
}
return Objects.compare(sA[2].split("-")[0], sB[2].split("-")[0], STRING_COMPARATOR);
}
|
@Test
void testVersionCompareGt() {
assertTrue(VersionUtils.compareVersion("1.2.2", "1.2.1") > 0);
assertTrue(VersionUtils.compareVersion("2.2.0", "1.2.0") > 0);
assertTrue(VersionUtils.compareVersion("1.3.0", "1.2.0") > 0);
}
|
@SuppressWarnings("unchecked")
public static <T> NFAFactory<T> compileFactory(
final Pattern<T, ?> pattern, boolean timeoutHandling) {
if (pattern == null) {
// return a factory for empty NFAs
return new NFAFactoryImpl<>(
0,
Collections.<String, Long>emptyMap(),
Collections.<State<T>>emptyList(),
timeoutHandling);
} else {
final NFAFactoryCompiler<T> nfaFactoryCompiler = new NFAFactoryCompiler<>(pattern);
nfaFactoryCompiler.compileFactory();
return new NFAFactoryImpl<>(
nfaFactoryCompiler.getWindowTime(),
nfaFactoryCompiler.getWindowTimes(),
nfaFactoryCompiler.getStates(),
timeoutHandling);
}
}
|
@Test
public void testMultipleWindowTimeWithZeroLength() {
Pattern<Event, ?> pattern =
Pattern.<Event>begin("start")
.followedBy("middle")
.within(Time.seconds(10))
.followedBy("then")
.within(Time.seconds(0))
.followedBy("end");
NFACompiler.NFAFactoryCompiler<Event> factory =
new NFACompiler.NFAFactoryCompiler<>(pattern);
factory.compileFactory();
assertEquals(0, factory.getWindowTime());
}
|
public final void isSameInstanceAs(@Nullable Object expected) {
if (actual != expected) {
failEqualityCheck(
SAME_INSTANCE,
expected,
/*
* Pass through *whether* the values are equal so that failEqualityCheck() can print that
* information. But remove the description of the difference, which is always about
* content, since people calling isSameInstanceAs() are explicitly not interested in
* content, only object identity.
*/
compareForEquality(expected).withoutDescription());
}
}
|
@Test
public void isSameInstanceAsWithSameObject() {
Object a = new Object();
Object b = a;
assertThat(a).isSameInstanceAs(b);
}
|
PubSubMessage rowToMessage(Row row) {
row = castRow(row, row.getSchema(), schema);
PubSubMessage.Builder builder = PubSubMessage.newBuilder();
if (schema.hasField(MESSAGE_KEY_FIELD)) {
byte[] bytes = row.getBytes(MESSAGE_KEY_FIELD);
if (bytes != null) {
builder.setKey(ByteString.copyFrom(bytes));
}
}
if (schema.hasField(EVENT_TIMESTAMP_FIELD)) {
ReadableDateTime time = row.getDateTime(EVENT_TIMESTAMP_FIELD);
if (time != null) {
builder.setEventTime(Timestamps.fromMillis(time.getMillis()));
}
}
if (schema.hasField(ATTRIBUTES_FIELD)) {
Collection<Row> attributes = row.getArray(ATTRIBUTES_FIELD);
if (attributes != null) {
attributes.forEach(
entry -> {
AttributeValues.Builder valuesBuilder = AttributeValues.newBuilder();
Collection<byte[]> values =
checkArgumentNotNull(entry.getArray(ATTRIBUTES_VALUES_FIELD));
values.forEach(bytes -> valuesBuilder.addValues(ByteString.copyFrom(bytes)));
builder.putAttributes(
checkArgumentNotNull(entry.getString(ATTRIBUTES_KEY_FIELD)),
valuesBuilder.build());
});
}
}
if (payloadSerializer == null) {
byte[] payload = row.getBytes(PAYLOAD_FIELD);
if (payload != null) {
builder.setData(ByteString.copyFrom(payload));
}
} else {
Row payload = row.getRow(PAYLOAD_FIELD);
if (payload != null) {
builder.setData(ByteString.copyFrom(payloadSerializer.serialize(payload)));
}
}
return builder.build();
}
|
@Test
public void reorderRowToMessage() {
Schema schema =
Schema.builder()
.addByteArrayField(RowHandler.MESSAGE_KEY_FIELD)
.addByteArrayField(RowHandler.PAYLOAD_FIELD)
.build();
Schema rowSchema =
Schema.builder()
.addByteArrayField(RowHandler.PAYLOAD_FIELD)
.addByteArrayField(RowHandler.MESSAGE_KEY_FIELD)
.build();
RowHandler rowHandler = new RowHandler(schema);
Row row = Row.withSchema(rowSchema).attachValues("abc".getBytes(UTF_8), "def".getBytes(UTF_8));
PubSubMessage expected =
PubSubMessage.newBuilder()
.setData(ByteString.copyFromUtf8("abc"))
.setKey(ByteString.copyFromUtf8("def"))
.build();
assertEquals(expected, rowHandler.rowToMessage(row));
}
|
<T extends PipelineOptions> T as(Class<T> iface) {
checkNotNull(iface);
checkArgument(iface.isInterface(), "Not an interface: %s", iface);
T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
synchronized (this) {
// double check
existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
Registration<T> registration =
PipelineOptionsFactory.CACHE
.get()
.validateWellFormed(iface, computedProperties.knownInterfaces);
List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors();
Class<T> proxyClass = registration.getProxyClass();
existingOption =
InstanceBuilder.ofType(proxyClass)
.fromClass(proxyClass)
.withArg(InvocationHandler.class, this)
.build();
computedProperties =
computedProperties.updated(iface, existingOption, propertyDescriptors);
}
}
}
return existingOption;
}
|
@Test
public void testJsonConversionOfIgnoredProperty() throws Exception {
IgnoredProperty options = PipelineOptionsFactory.as(IgnoredProperty.class);
options.setValue("TestValue");
IgnoredProperty options2 = serializeDeserialize(IgnoredProperty.class, options);
assertNull(options2.getValue());
}
|
static StaticDataTask fromJson(JsonNode jsonNode) {
Preconditions.checkArgument(jsonNode != null, "Invalid JSON node for data task: null");
Preconditions.checkArgument(
jsonNode.isObject(), "Invalid JSON node for data task: non-object (%s)", jsonNode);
Schema schema = SchemaParser.fromJson(JsonUtil.get(SCHEMA, jsonNode));
Schema projectedSchema = SchemaParser.fromJson(JsonUtil.get(PROJECTED_SCHEMA, jsonNode));
DataFile metadataFile =
(DataFile)
ContentFileParser.fromJson(
JsonUtil.get(METADATA_FILE, jsonNode), PartitionSpec.unpartitioned());
JsonNode rowsArray = JsonUtil.get(ROWS, jsonNode);
Preconditions.checkArgument(
rowsArray.isArray(), "Invalid JSON node for rows: non-array (%s)", rowsArray);
StructLike[] rows = new StructLike[rowsArray.size()];
for (int i = 0; i < rowsArray.size(); ++i) {
JsonNode rowNode = rowsArray.get(i);
rows[i] = (StructLike) SingleValueParser.fromJson(schema.asStruct(), rowNode);
}
return new StaticDataTask(metadataFile, schema, projectedSchema, rows);
}
|
@Test
public void missingFields() throws Exception {
ObjectMapper mapper = new ObjectMapper();
String missingSchemaStr = "{}";
JsonNode missingSchemaNode = mapper.reader().readTree(missingSchemaStr);
assertThatThrownBy(() -> DataTaskParser.fromJson(missingSchemaNode))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Cannot parse missing field: schema");
String missingProjectionStr =
"{"
+ "\"schema\":{\"type\":\"struct\",\"schema-id\":0,"
+ "\"fields\":[{\"id\":1,\"name\":\"committed_at\",\"required\":true,\"type\":\"timestamptz\"}]}"
+ "}";
JsonNode missingProjectionNode = mapper.reader().readTree(missingProjectionStr);
assertThatThrownBy(() -> DataTaskParser.fromJson(missingProjectionNode))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Cannot parse missing field: projection");
String missingMetadataFileStr =
"{"
+ "\"schema\":{\"type\":\"struct\",\"schema-id\":0,"
+ "\"fields\":[{\"id\":1,\"name\":\"committed_at\",\"required\":true,\"type\":\"timestamptz\"}]},"
+ "\"projection\":{\"type\":\"struct\",\"schema-id\":0,"
+ "\"fields\":[{\"id\":1,\"name\":\"committed_at\",\"required\":true,\"type\":\"timestamptz\"}]}"
+ "}";
JsonNode missingMetadataFileNode = mapper.reader().readTree(missingMetadataFileStr);
assertThatThrownBy(() -> DataTaskParser.fromJson(missingMetadataFileNode))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Cannot parse missing field: metadata-file");
String missingTableRowsStr =
"{\"task-type\":\"data-task\","
+ "\"schema\":{\"type\":\"struct\",\"schema-id\":0,"
+ "\"fields\":[{\"id\":1,\"name\":\"committed_at\",\"required\":true,\"type\":\"timestamptz\"},"
+ "{\"id\":2,\"name\":\"snapshot_id\",\"required\":true,\"type\":\"long\"},"
+ "{\"id\":3,\"name\":\"parent_id\",\"required\":false,\"type\":\"long\"},"
+ "{\"id\":4,\"name\":\"operation\",\"required\":false,\"type\":\"string\"},"
+ "{\"id\":5,\"name\":\"manifest_list\",\"required\":false,\"type\":\"string\"},"
+ "{\"id\":6,\"name\":\"summary\",\"required\":false,\"type\":{\"type\":\"map\","
+ "\"key-id\":7,\"key\":\"string\",\"value-id\":8,"
+ "\"value\":\"string\",\"value-required\":true}}]},"
+ "\"projection\":{\"type\":\"struct\",\"schema-id\":0,"
+ "\"fields\":[{\"id\":1,\"name\":\"committed_at\",\"required\":true,\"type\":\"timestamptz\"},"
+ "{\"id\":2,\"name\":\"snapshot_id\",\"required\":true,\"type\":\"long\"},"
+ "{\"id\":3,\"name\":\"parent_id\",\"required\":false,\"type\":\"long\"},"
+ "{\"id\":4,\"name\":\"operation\",\"required\":false,\"type\":\"string\"},"
+ "{\"id\":5,\"name\":\"manifest_list\",\"required\":false,\"type\":\"string\"},"
+ "{\"id\":6,\"name\":\"summary\",\"required\":false,\"type\":{\"type\":\"map\","
+ "\"key-id\":7,\"key\":\"string\",\"value-id\":8,"
+ "\"value\":\"string\",\"value-required\":true}}]},"
+ "\"metadata-file\":{\"spec-id\":0,\"content\":\"DATA\","
+ "\"file-path\":\"/tmp/metadata2.json\","
+ "\"file-format\":\"METADATA\",\"partition\":{},"
+ "\"file-size-in-bytes\":0,\"record-count\":2,\"sort-order-id\":0}"
+ "}";
JsonNode missingTableRowsNode = mapper.reader().readTree(missingTableRowsStr);
assertThatThrownBy(() -> DataTaskParser.fromJson(missingTableRowsNode))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Cannot parse missing field: rows");
}
|
public org.slf4j.Logger logger() {
if (this.logger == null) {
LoggerContext loggerContext = new LoggerContext();
LogbackMDCAdapter mdcAdapter = new LogbackMDCAdapter();
loggerContext.setMDCAdapter(mdcAdapter);
loggerContext.start();
this.logger = loggerContext.getLogger(this.loggerName);
// unit test don't need the logqueue
if (this.logQueue != null && this.logEntry != null) {
ContextAppender contextAppender = new ContextAppender(this, this.logger, this.logQueue, this.logEntry);
contextAppender.setContext(loggerContext);
contextAppender.start();
this.logger.addAppender(contextAppender);
MDC.setContextMap(this.logEntry.toMap());
}
ForwardAppender forwardAppender = new ForwardAppender(this, this.logger);
forwardAppender.setContext(loggerContext);
forwardAppender.start();
this.logger.addAppender(forwardAppender);
this.logger.setLevel(this.loglevel);
this.logger.setAdditive(true);
}
return this.logger;
}
|
@Test
void logs() {
List<LogEntry> logs = new CopyOnWriteArrayList<>();
List<LogEntry> matchingLog;
Flux<LogEntry> receive = TestsUtils.receive(logQueue, either -> logs.add(either.getLeft()));
Flow flow = TestsUtils.mockFlow();
Execution execution = TestsUtils.mockExecution(flow, Map.of());
RunContextLogger runContextLogger = new RunContextLogger(
logQueue,
LogEntry.of(execution),
Level.TRACE
);
Logger logger = runContextLogger.logger();
logger.trace("trace");
logger.debug("debug");
logger.info("info");
logger.warn("warn");
logger.error("error");
matchingLog = TestsUtils.awaitLogs(logs, 5);
receive.blockLast();
assertThat(matchingLog.stream().filter(logEntry -> logEntry.getLevel().equals(Level.TRACE)).findFirst().orElse(null).getMessage(), is("trace"));
assertThat(matchingLog.stream().filter(logEntry -> logEntry.getLevel().equals(Level.DEBUG)).findFirst().orElse(null).getMessage(), is("debug"));
assertThat(matchingLog.stream().filter(logEntry -> logEntry.getLevel().equals(Level.INFO)).findFirst().orElse(null).getMessage(), is("info"));
assertThat(matchingLog.stream().filter(logEntry -> logEntry.getLevel().equals(Level.WARN)).findFirst().orElse(null).getMessage(), is("warn"));
assertThat(matchingLog.stream().filter(logEntry -> logEntry.getLevel().equals(Level.ERROR)).findFirst().orElse(null).getMessage(), is("error"));
}
|
public static Object[] realize(Object[] objs, Class<?>[] types) {
if (objs.length != types.length) {
throw new IllegalArgumentException("args.length != types.length");
}
Object[] dests = new Object[objs.length];
for (int i = 0; i < objs.length; i++) {
dests[i] = realize(objs[i], types[i]);
}
return dests;
}
|
@Test
void testException() throws Exception {
Map map = new HashMap();
map.put("message", "dubbo exception");
Object o = PojoUtils.realize(map, RuntimeException.class);
assertEquals(((Throwable) o).getMessage(), "dubbo exception");
}
|
public Map<String, String> connectorBaseConfig(SourceAndTarget sourceAndTarget, Class<?> connectorClass) {
Map<String, String> props = new HashMap<>();
props.putAll(rawProperties);
props.keySet().retainAll(allConfigNames());
props.putAll(stringsWithPrefix(CONFIG_PROVIDERS_CONFIG));
props.putAll(stringsWithPrefix("replication.policy"));
Map<String, String> sourceClusterProps = clusterProps(sourceAndTarget.source());
// attrs non prefixed with producer|consumer|admin
props.putAll(clusterConfigsWithPrefix(SOURCE_CLUSTER_PREFIX, sourceClusterProps));
// attrs prefixed with producer|consumer|admin
props.putAll(clientConfigsWithPrefix(SOURCE_PREFIX, sourceClusterProps));
Map<String, String> targetClusterProps = clusterProps(sourceAndTarget.target());
props.putAll(clusterConfigsWithPrefix(TARGET_CLUSTER_PREFIX, targetClusterProps));
props.putAll(clientConfigsWithPrefix(TARGET_PREFIX, targetClusterProps));
props.putIfAbsent(NAME, connectorClass.getSimpleName());
props.putIfAbsent(CONNECTOR_CLASS, connectorClass.getName());
props.putIfAbsent(SOURCE_CLUSTER_ALIAS, sourceAndTarget.source());
props.putIfAbsent(TARGET_CLUSTER_ALIAS, sourceAndTarget.target());
// override with connector-level properties
props.putAll(stringsWithPrefixStripped(sourceAndTarget.source() + "->"
+ sourceAndTarget.target() + "."));
// disabled by default
props.putIfAbsent(MirrorConnectorConfig.ENABLED, "false");
// don't transform -- the worker will handle transformation of Connector and Task configs
return props;
}
|
@Test
public void testConfigBackwardsCompatibilitySourceTarget() {
MirrorMakerConfig mirrorConfig = new MirrorMakerConfig(makeProps(
"clusters", "a, b",
"source->target.topics.blacklist", "topic3",
"source->target.groups.blacklist", "group-7",
"topic.filter.class", DefaultTopicFilter.class.getName()));
SourceAndTarget sourceAndTarget = new SourceAndTarget("source", "target");
Map<String, String> connectorProps = mirrorConfig.connectorBaseConfig(sourceAndTarget,
MirrorSourceConnector.class);
MirrorCheckpointConfig connectorConfig = new MirrorCheckpointConfig(connectorProps);
DefaultTopicFilter.TopicFilterConfig filterConfig =
new DefaultTopicFilter.TopicFilterConfig(connectorProps);
assertEquals(Collections.singletonList("topic3"), filterConfig.getList("topics.exclude"),
"Topics exclude should be backwards compatible.");
assertEquals(Collections.singletonList("group-7"), connectorConfig.getList("groups.exclude"),
"Groups exclude should be backwards compatible.");
}
|
public static Duration longest(Duration duration1, Duration duration2) {
return duration1.compareTo(duration2) > 0 ? duration1 : duration2;
}
|
@Test
public void longest() {
Duration d1 = Duration.ofMinutes(1); // shorter
Duration d2 = Duration.ofMinutes(1); // longer
assertEquals(d2, TimeUtils.longest(d1, d2));
assertEquals(d2, TimeUtils.longest(d2, d1));
assertEquals(d1, TimeUtils.longest(d1, d1));
assertEquals(d2, TimeUtils.longest(d2, d2));
}
|
@Override
public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation)
throws BrokerCapacityResolutionException {
if (brokerId >= 0) {
BrokerCapacityInfo capacity = capacitiesForBrokers.get(brokerId);
if (capacity != null) {
return capacity;
} else {
if (allowCapacityEstimation) {
String info = String.format("Missing broker id(%d) in capacity config file.", brokerId);
return new BrokerCapacityInfo(capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info,
capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(),
capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores());
} else {
throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the "
+ "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing "
+ "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile));
}
}
} else {
throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative.");
}
}
|
@Test
public void testParseConfigJbodFile() throws TimeoutException, BrokerCapacityResolutionException {
BrokerCapacityConfigResolver configResolver = getBrokerCapacityConfigResolver("testCapacityConfigJBOD.json", this.getClass());
assertEquals(2000000.0, configResolver.capacityForBroker("", "", 0, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false)
.capacity().get(Resource.DISK), 0.01);
assertEquals(2200000.0, configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true)
.capacity().get(Resource.DISK), 0.01);
assertEquals(200000.0, configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true)
.diskCapacityByLogDir().get("/tmp/kafka-logs-4"), 0.01);
assertFalse(configResolver.capacityForBroker("", "", 2, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false).isEstimated());
assertTrue(configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true).isEstimated());
assertTrue(configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true).estimationInfo().length() > 0);
}
|
public static Optional<Expression> convert(
org.apache.flink.table.expressions.Expression flinkExpression) {
if (!(flinkExpression instanceof CallExpression)) {
return Optional.empty();
}
CallExpression call = (CallExpression) flinkExpression;
Operation op = FILTERS.get(call.getFunctionDefinition());
if (op != null) {
switch (op) {
case IS_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::isNull);
case NOT_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::notNull);
case LT:
return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call);
case LT_EQ:
return convertFieldAndLiteral(
Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call);
case GT:
return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call);
case GT_EQ:
return convertFieldAndLiteral(
Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call);
case EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.isNaN(ref);
} else {
return Expressions.equal(ref, lit);
}
},
call);
case NOT_EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.notNaN(ref);
} else {
return Expressions.notEqual(ref, lit);
}
},
call);
case NOT:
return onlyChildAs(call, CallExpression.class)
.flatMap(FlinkFilters::convert)
.map(Expressions::not);
case AND:
return convertLogicExpression(Expressions::and, call);
case OR:
return convertLogicExpression(Expressions::or, call);
case STARTS_WITH:
return convertLike(call);
}
}
return Optional.empty();
}
|
@Test
public void testNotEqualsNaN() {
UnboundPredicate<Float> expected = org.apache.iceberg.expressions.Expressions.notNaN("field3");
Optional<org.apache.iceberg.expressions.Expression> actual =
FlinkFilters.convert(
resolve(Expressions.$("field3").isNotEqual(Expressions.lit(Float.NaN))));
assertThat(actual).isPresent();
assertPredicatesMatch(expected, actual.get());
Optional<org.apache.iceberg.expressions.Expression> actual1 =
FlinkFilters.convert(
resolve(Expressions.lit(Float.NaN).isNotEqual(Expressions.$("field3"))));
assertThat(actual1).isPresent();
assertPredicatesMatch(expected, actual1.get());
}
|
public static SslProvider chooseSslProvider() {
// Use openssl only if available and has ALPN support (ie. version > 1.0.2).
SslProvider sslProvider;
if (ALLOW_USE_OPENSSL.get() && OpenSsl.isAvailable() && SslProvider.isAlpnSupported(SslProvider.OPENSSL)) {
sslProvider = SslProvider.OPENSSL;
} else {
sslProvider = SslProvider.JDK;
}
return sslProvider;
}
|
@Test
void testDefaultSslProviderIsOpenSsl() {
assertEquals(SslProvider.OPENSSL, BaseSslContextFactory.chooseSslProvider());
}
|
@Override
public void cleanup() {
stopComponents();
}
|
@Test
public void cleanup_does_not_fail_even_if_stop_of_component_fails() {
parent.add(StopFailing.class);
MigrationContainerImpl underTest = new MigrationContainerImpl(parent, NoOpExecutor.class);
underTest.cleanup();
}
|
protected static VplsOperation getOptimizedVplsOperation(Deque<VplsOperation> operations) {
if (operations.isEmpty()) {
return null;
}
// no need to optimize if the queue contains only one operation
if (operations.size() == 1) {
return operations.getFirst();
}
final VplsOperation firstOperation = operations.peekFirst();
final VplsOperation lastOperation = operations.peekLast();
final VplsOperation.Operation firstOp = firstOperation.op();
final VplsOperation.Operation lastOp = lastOperation.op();
if (firstOp.equals(VplsOperation.Operation.REMOVE)) {
if (lastOp.equals(VplsOperation.Operation.REMOVE)) {
// case 1: both first and last operation are REMOVE; do remove
return firstOperation;
} else if (lastOp.equals(VplsOperation.Operation.ADD)) {
// case 2: if first is REMOVE, and last is ADD; do update
return VplsOperation.of(lastOperation.vpls(),
VplsOperation.Operation.UPDATE);
} else {
// case 3: first is REMOVE, last is UPDATE; do update
return lastOperation;
}
} else if (firstOp.equals(VplsOperation.Operation.ADD)) {
if (lastOp.equals(VplsOperation.Operation.REMOVE)) {
// case 4: first is ADD, last is REMOVE; nothing to do
return null;
} else if (lastOp.equals(VplsOperation.Operation.ADD)) {
// case 5: both first and last are ADD, do add
return VplsOperation.of(lastOperation.vpls(),
VplsOperation.Operation.ADD);
} else {
// case 6: first is ADD and last is update, do add
return VplsOperation.of(lastOperation.vpls(),
VplsOperation.Operation.ADD);
}
} else {
if (lastOp.equals(VplsOperation.Operation.REMOVE)) {
// case 7: last is remove, do remove
return lastOperation;
} else if (lastOp.equals(VplsOperation.Operation.ADD)) {
// case 8: do update only
return VplsOperation.of(lastOperation.vpls(),
VplsOperation.Operation.UPDATE);
} else {
// case 9: from UPDATE to UPDATE
// only need last UPDATE operation
return VplsOperation.of(lastOperation.vpls(),
VplsOperation.Operation.UPDATE);
}
}
}
|
@Test
public void testOptimizeOperationsAToR() {
Deque<VplsOperation> operations = new ArrayDeque<>();
VplsData vplsData = VplsData.of(VPLS1);
vplsData.addInterfaces(ImmutableSet.of(V100H1));
VplsOperation vplsOperation = VplsOperation.of(vplsData,
VplsOperation.Operation.ADD);
operations.add(vplsOperation);
vplsOperation = VplsOperation.of(vplsData,
VplsOperation.Operation.REMOVE);
operations.add(vplsOperation);
vplsOperation = VplsOperationManager.getOptimizedVplsOperation(operations);
assertNull(vplsOperation);
}
|
public OneMessageTransfer(ByteBuffer byteBufferHeader, SelectMappedBufferResult selectMappedBufferResult) {
this.byteBufferHeader = byteBufferHeader;
this.selectMappedBufferResult = selectMappedBufferResult;
}
|
@Test
public void OneMessageTransferTest() {
ByteBuffer byteBuffer = ByteBuffer.allocate(20);
byteBuffer.putInt(20);
SelectMappedBufferResult selectMappedBufferResult = new SelectMappedBufferResult(0,byteBuffer,20,new DefaultMappedFile());
OneMessageTransfer manyMessageTransfer = new OneMessageTransfer(byteBuffer,selectMappedBufferResult);
}
|
public SearchOptions setPage(int page, int pageSize) {
checkArgument(page >= 1, "Page must be greater or equal to 1 (got " + page + ")");
setLimit(pageSize);
int lastResultIndex = page * pageSize;
checkArgument(lastResultIndex <= MAX_RETURNABLE_RESULTS, "Can return only the first %s results. %sth result asked.", MAX_RETURNABLE_RESULTS, lastResultIndex);
setOffset(lastResultIndex - pageSize);
return this;
}
|
@Test
public void fail_if_result_after_first_10_000() {
assertThatThrownBy(() -> underTest.setPage(21, 500))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Can return only the first 10000 results. 10500th result asked.");
}
|
Map<String, File> scanExistingUsers() throws IOException {
Map<String, File> users = new HashMap<>();
File[] userDirectories = listUserDirectories();
if (userDirectories != null) {
for (File directory : userDirectories) {
String userId = idStrategy.idFromFilename(directory.getName());
users.put(userId, directory);
}
}
addEmptyUsernameIfExists(users);
return users;
}
|
@Test
public void scanExistingUsersBasic() throws IOException {
UserIdMigrator migrator = createUserIdMigrator();
Map<String, File> userMappings = migrator.scanExistingUsers();
assertThat(userMappings.keySet(), hasSize(2));
assertThat(userMappings.keySet(), hasItems("admin", "jane"));
}
|
@Override
public String getUrl() {
return url != null ? url.originalArgument() : null;
}
|
@Test
void shouldReturnNullIfUrlForMaterialNotSpecified() {
HgMaterialConfig config = hg();
assertThat(config.getUrl()).isNull();
}
|
public static <V> Read<V> read() {
return new AutoValue_SparkReceiverIO_Read.Builder<V>().build();
}
|
@Test
public void testReadObjectCreationFailsIfPullFrequencySecIsNull() {
assertThrows(
IllegalArgumentException.class,
() -> SparkReceiverIO.<String>read().withPullFrequencySec(null));
}
|
@Override
public MepLtCreate decode(ObjectNode json, CodecContext context) {
if (json == null || !json.isObject()) {
return null;
}
JsonNode linktraceNode = json.get(LINKTRACE);
JsonNode remoteMepIdNode = linktraceNode.get(REMOTE_MEP_ID);
JsonNode remoteMepMacNode = linktraceNode.get(REMOTE_MEP_MAC);
MepLtCreate.MepLtCreateBuilder ltCreateBuilder;
if (remoteMepIdNode != null) {
MepId remoteMepId = MepId.valueOf((short) remoteMepIdNode.asInt());
ltCreateBuilder = DefaultMepLtCreate.builder(remoteMepId);
} else if (remoteMepMacNode != null) {
MacAddress remoteMepMac = MacAddress.valueOf(
remoteMepMacNode.asText());
ltCreateBuilder = DefaultMepLtCreate.builder(remoteMepMac);
} else {
throw new IllegalArgumentException(
"Either a remoteMepId or a remoteMepMac");
}
JsonNode defaultTtlNode = linktraceNode.get(DEFAULT_TTL);
if (defaultTtlNode != null) {
short defaultTtl = (short) defaultTtlNode.asInt();
ltCreateBuilder.defaultTtl(defaultTtl);
}
JsonNode transmitLtmFlagsNode = linktraceNode.get(TRANSMIT_LTM_FLAGS);
if (transmitLtmFlagsNode != null) {
if (transmitLtmFlagsNode.asText().isEmpty()) {
ltCreateBuilder.transmitLtmFlags(BitSet.valueOf(new long[]{0}));
} else if (transmitLtmFlagsNode.asText().equals(USE_FDB_ONLY)) {
ltCreateBuilder.transmitLtmFlags(BitSet.valueOf(new long[]{1}));
} else {
throw new IllegalArgumentException("Expecting value 'use-fdb-only' " +
"or '' for " + TRANSMIT_LTM_FLAGS);
}
}
return ltCreateBuilder.build();
}
|
@Test
public void testDecodeMepLtCreateMepId() throws JsonProcessingException, IOException {
String linktraceString = "{\"linktrace\": { " +
"\"remoteMepId\": 20," +
"\"defaultTtl\": 21," +
"\"transmitLtmFlags\": \"use-fdb-only\"}}";
InputStream input = new ByteArrayInputStream(
linktraceString.getBytes(StandardCharsets.UTF_8));
JsonNode cfg = mapper.readTree(input);
MepLtCreate mepLtCreate = context
.codec(MepLtCreate.class).decode((ObjectNode) cfg, context);
assertNull(mepLtCreate.remoteMepAddress());
assertEquals(20, mepLtCreate.remoteMepId().id().shortValue());
assertEquals(21, mepLtCreate.defaultTtl().intValue());
assertEquals(BitSet.valueOf(new byte[]{1}), mepLtCreate.transmitLtmFlags());
}
|
protected Timestamp convertBigNumberToTimestamp( BigDecimal bd ) {
if ( bd == null ) {
return null;
}
return convertIntegerToTimestamp( bd.longValue() );
}
|
@Test
public void testConvertBigNumberToTimestamp_Nanoseconds() throws KettleValueException {
System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE,
Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE_NANOSECONDS );
ValueMetaTimestamp valueMetaTimestamp = new ValueMetaTimestamp();
Timestamp result = valueMetaTimestamp.convertBigNumberToTimestamp( BigDecimal.valueOf( TIMESTAMP_AS_NANOSECONDS ) );
assertEquals( TIMESTAMP_WITH_NANOSECONDS, result );
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.