focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static AvroGenericCoder of(Schema schema) {
return AvroGenericCoder.of(schema);
}
|
@Test
public void testDeterministicInt() {
assertDeterministic(AvroCoder.of(int.class));
}
|
@GetMapping("/{id}")
@RequiresPermissions("system:manager:list")
public ShenyuAdminResult detailDashboardUser(@PathVariable("id") final String id) {
DashboardUserEditVO dashboardUserEditVO = dashboardUserService.findById(id);
return Optional.ofNullable(dashboardUserEditVO)
.map(item -> ShenyuAdminResult.success(ShenyuResultMessage.DETAIL_SUCCESS, item))
.orElseGet(() -> ShenyuAdminResult.error(ShenyuResultMessage.DASHBOARD_QUERY_ERROR));
}
|
@Test
public void detailDashboardUser() throws Exception {
List<RoleVO> roles = new ArrayList<>();
roles.add(mock(RoleVO.class));
List<RoleVO> allRoles = new ArrayList<>();
allRoles.add(mock(RoleVO.class));
DashboardUserEditVO dashboardUserEditVO = DashboardUserEditVO.buildDashboardUserEditVO(dashboardUserVO, roles, allRoles);
given(dashboardUserService.findById(any())).willReturn(dashboardUserEditVO);
final String url = "/dashboardUser/1";
mockMvc.perform(get(url))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.DETAIL_SUCCESS)));
given(dashboardUserService.findById(any())).willReturn(null);
mockMvc.perform(get(url))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.DASHBOARD_QUERY_ERROR)));
}
|
public static void executeWithRetries(
final Function function,
final RetryBehaviour retryBehaviour
) throws Exception {
executeWithRetries(() -> {
function.call();
return null;
}, retryBehaviour);
}
|
@Test
public void shouldNotRetryIfSupplierThrowsNonRetriableException() throws Exception {
// Given:
final AtomicBoolean firstCall = new AtomicBoolean(true);
final Callable<Object> throwsNonRetriable = () -> {
if (firstCall.get()) {
firstCall.set(false);
throw new RuntimeException("First non-retry exception");
}
throw new RuntimeException("Test should not retry");
};
// When:
final RuntimeException e = assertThrows(
RuntimeException.class,
() -> ExecutorUtil.executeWithRetries(throwsNonRetriable, ON_RETRYABLE)
);
// Then:
assertThat(e.getMessage(), containsString("First non-retry exception"));
}
|
public PluginInfo extensionFor(String extensionName) {
return stream().filter(pluginInfo -> extensionName.equals(pluginInfo.getExtensionName())).findFirst().orElse(null);
}
|
@Test
public void shouldFindAnExtensionOfAGivenTypeIfItExists() {
NotificationPluginInfo notificationPluginInfo = new NotificationPluginInfo(null, null);
PluggableTaskPluginInfo pluggableTaskPluginInfo = new PluggableTaskPluginInfo(null, null, null);
CombinedPluginInfo pluginInfo = new CombinedPluginInfo(List.of(pluggableTaskPluginInfo, notificationPluginInfo));
assertThat(pluginInfo.extensionFor(NOTIFICATION_EXTENSION), is(notificationPluginInfo));
assertThat(pluginInfo.extensionFor(PLUGGABLE_TASK_EXTENSION), is(pluggableTaskPluginInfo));
assertThat(pluginInfo.extensionFor(ANALYTICS_EXTENSION), is(nullValue()));
}
|
@ParametersAreNonnullByDefault
@Override
public ParseASTNode load(final String sql) {
return sqlParserExecutor.parse(sql);
}
|
@Test
void assertParseTreeCacheLoader() throws ReflectiveOperationException {
SQLParserExecutor sqlParserExecutor = mock(SQLParserExecutor.class, RETURNS_DEEP_STUBS);
ParseTreeCacheLoader loader = new ParseTreeCacheLoader(TypedSPILoader.getService(DatabaseType.class, "FIXTURE"));
Plugins.getMemberAccessor().set(loader.getClass().getDeclaredField("sqlParserExecutor"), loader, sqlParserExecutor);
assertThat(loader.load(SQL), isA(ParseASTNode.class));
}
|
@Override
public void updateRouter(Router osRouter) {
checkNotNull(osRouter, ERR_NULL_ROUTER);
checkArgument(!Strings.isNullOrEmpty(osRouter.getId()), ERR_NULL_ROUTER_ID);
osRouterStore.updateRouter(osRouter);
log.info(String.format(MSG_ROUTER, osRouter.getId(), MSG_UPDATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testUpdateRouterWithNullId() {
final Router testRouter = NeutronRouter.builder()
.id(null)
.name(ROUTER_NAME)
.build();
target.updateRouter(testRouter);
}
|
@Override
public boolean isTextValid(String text, List<String> tags) {
Assert.isTrue(ENABLED, "敏感词功能未开启,请将 ENABLED 设置为 true");
// 无标签时,默认所有
if (CollUtil.isEmpty(tags)) {
return defaultSensitiveWordTrie.isValid(text);
}
// 有标签的情况
for (String tag : tags) {
SimpleTrie trie = tagSensitiveWordTries.get(tag);
if (trie == null) {
continue;
}
// 如果有一个标签不合法,则返回 false 不合法
if (!trie.isValid(text)) {
return false;
}
}
return true;
}
|
@Test
public void testIsTestValid_noTag() {
testInitLocalCache();
// 准备参数
String text = "你是傻瓜,你是笨蛋";
// 调用,断言
assertFalse(sensitiveWordService.isTextValid(text, null));
// 准备参数
String text2 = "你是白";
// 调用,断言
assertFalse(sensitiveWordService.isTextValid(text2, null));
}
|
public static UUnary create(Kind unaryOp, UExpression expression) {
checkArgument(
UNARY_OP_CODES.containsKey(unaryOp), "%s is not a recognized unary operation", unaryOp);
return new AutoValue_UUnary(unaryOp, expression);
}
|
@Test
public void unaryPlus() {
assertUnifiesAndInlines("+foo", UUnary.create(Kind.UNARY_PLUS, fooIdent));
}
|
public static <T> PTransform<PCollection<T>, PCollection<T>> intersectAll(
PCollection<T> rightCollection) {
checkNotNull(rightCollection, "rightCollection argument is null");
return new SetImpl<>(rightCollection, intersectAll());
}
|
@Test
@Category(NeedsRunner.class)
public void testIntersectionAllCollectionList() {
PCollection<String> third = p.apply("third", Create.of(Arrays.asList("a", "b", "f")));
PCollection<Row> thirdRows = p.apply("thirdRows", Create.of(toRows("a", "b", "f")));
PAssert.that(
PCollectionList.of(first)
.and(second)
.and(third)
.apply("stringsCols", Sets.intersectAll()))
.containsInAnyOrder("a", "b");
PCollection<Row> results =
PCollectionList.of(firstRows)
.and(secondRows)
.and(thirdRows)
.apply("rowCols", Sets.intersectAll());
PAssert.that(results).containsInAnyOrder(toRows("a", "b"));
assertEquals(schema, results.getSchema());
p.run();
}
|
public static TableElements parse(final String schema, final TypeRegistry typeRegistry) {
return new SchemaParser(typeRegistry).parse(schema);
}
|
@Test
public void shouldParseValidSchemaWithKeyField() {
// Given:
final String schema = "K STRING KEY, bar INT";
// When:
final TableElements elements = parser.parse(schema);
// Then:
assertThat(elements, contains(
new TableElement(ColumnName.of("K"), new Type(SqlTypes.STRING), KEY_CONSTRAINT),
new TableElement(BAR, new Type(SqlTypes.INTEGER))
));
}
|
@Override
public void execute(ComputationStep.Context context) {
executeForBranch(treeRootHolder.getRoot());
}
|
@Test
public void no_more_used_event_uses_language_key_in_message_if_language_not_found() {
QualityProfile qp = qp(QP_NAME_1, LANGUAGE_KEY_1, new Date());
qProfileStatusRepository.register(qp.getQpKey(), REMOVED);
mockQualityProfileMeasures(treeRootHolder.getRoot(), arrayOf(qp), null);
mockLanguageNotInRepository(LANGUAGE_KEY_1);
underTest.execute(new TestComputationStepContext());
verify(eventRepository).add(eventArgumentCaptor.capture());
verifyNoMoreInteractions(eventRepository);
verifyEvent(eventArgumentCaptor.getValue(), "Stop using \"" + qp.getQpName() + "\" (" + qp.getLanguageKey() + ")", null, null);
}
|
public boolean allowUsersToSignUp() {
return config.getBoolean(ALLOW_USERS_TO_SIGN_UP).orElseThrow(DEFAULT_VALUE_MISSING);
}
|
@Test
public void allow_users_to_sign_up() {
settings.setProperty("sonar.auth.bitbucket.allowUsersToSignUp", "true");
assertThat(underTest.allowUsersToSignUp()).isTrue();
settings.setProperty("sonar.auth.bitbucket.allowUsersToSignUp", "false");
assertThat(underTest.allowUsersToSignUp()).isFalse();
}
|
Map<Uuid, String> topicNames() {
return topicNames;
}
|
@Test
public void testTopicNamesCacheBuiltFromTopicIds() {
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put("topic1", Uuid.randomUuid());
topicIds.put("topic2", Uuid.randomUuid());
MetadataSnapshot cache = new MetadataSnapshot("clusterId",
Collections.singletonMap(6, new Node(6, "localhost", 2077)),
Collections.emptyList(),
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet(),
null,
topicIds);
Map<Uuid, String> expectedNamesCache =
topicIds.entrySet().stream().collect(Collectors.toMap(Map.Entry::getValue,
Map.Entry::getKey));
assertEquals(expectedNamesCache, cache.topicNames());
}
|
MetricsType getMetricsType(String remaining) {
String name = StringHelper.before(remaining, ":");
MetricsType type;
if (name == null) {
type = DEFAULT_METRICS_TYPE;
} else {
type = MetricsType.getByName(name);
}
if (type == null) {
throw new RuntimeCamelException("Unknown metrics type \"" + name + "\"");
}
return type;
}
|
@Test
public void testGetMetricsTypeNotFound() {
assertThrows(RuntimeCamelException.class,
() -> component.getMetricsType("unknown-metrics:metrics-name"));
}
|
@Override
public int readInt(@Nonnull String fieldName) throws IOException {
FieldDefinition fd = cd.getField(fieldName);
if (fd == null) {
return 0;
}
switch (fd.getType()) {
case INT:
return super.readInt(fieldName);
case BYTE:
return super.readByte(fieldName);
case CHAR:
return super.readChar(fieldName);
case SHORT:
return super.readShort(fieldName);
default:
throw createIncompatibleClassChangeError(fd, INT);
}
}
|
@Test(expected = IncompatibleClassChangeError.class)
public void testReadInt_IncompatibleClass() throws Exception {
reader.readInt("string");
}
|
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
DoubleColumnStatsDataInspector columnStatsData = doubleInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DoubleColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
DoubleColumnStatsMerger merger = new DoubleColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DoubleColumnStatsDataInspector newData = doubleInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs();
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDoubleStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats();
if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DoubleColumnStatsData aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DoubleColumnStatsDataInspector newData =
doubleInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDoubleStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(Math.max(aggregateData.getHighValue(),
newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDoubleStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getDoubleStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getDoubleStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
|
@Test
public void testAggregateSingleStat() throws MetaException {
List<String> partitions = Collections.singletonList("part1");
ColumnStatisticsData data1 = new ColStatsBuilder<>(double.class).numNulls(1).numDVs(2)
.low(1d).high(4d).hll(1, 4).build();
List<ColStatsObjWithSourceInfo> statsList =
Collections.singletonList(createStatsWithInfo(data1, TABLE, COL, partitions.get(0)));
DoubleColumnStatsAggregator aggregator = new DoubleColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
assertEqualStatistics(data1, computedStatsObj.getStatsData());
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) {
return invoke(n, BigDecimal.ZERO);
}
|
@Test
void invokeRoundingDown() {
FunctionTestUtil.assertResult(roundHalfUpFunction.invoke(BigDecimal.valueOf(10.24)), BigDecimal.valueOf(10));
FunctionTestUtil.assertResult(roundHalfUpFunction.invoke(BigDecimal.valueOf(10.24), BigDecimal.ONE),
BigDecimal.valueOf(10.2));
}
|
@Override
public void onCreate(
final ServiceContext serviceContext,
final MetaStore metaStore,
final QueryMetadata queryMetadata) {
if (perQuery.containsKey(queryMetadata.getQueryId())) {
return;
}
perQuery.put(
queryMetadata.getQueryId(),
new PerQueryListener(
metrics,
metricsPrefix,
queryMetadata.getQueryId().toString(),
metricsTags
)
);
}
|
@Test
public void shouldAddMetricWithSuppliedPrefix() {
// Given:
final String groupPrefix = "some-prefix-";
final Map<String, String> tags = new HashMap<>(metricsTags);
tags.put("status", TAG);
clearInvocations(metrics);
// When:
listener = new QueryStateMetricsReportingListener(metrics, groupPrefix, metricsTags);
listener.onCreate(serviceContext, metaStore, query);
// Then:
verify(metrics).metricName("query-status", groupPrefix + "ksql-queries",
"The current Kafka Streams status of the given query.",
tags);
verify(metrics).metricName("error-status", groupPrefix + "ksql-queries",
"The current error status of the given query, if the state is in ERROR state",
tags);
verify(metrics).metricName("ksql-query-status", groupPrefix + "ksql-queries",
"The current ksqlDB status of the given query.",
tags);
tags.put("query-id", QUERY_ID.toString());
verify(metrics).metricName(QueryStateMetricsReportingListener.QUERY_RESTART_METRIC_NAME, "some-prefix-ksql-queries",
QueryStateMetricsReportingListener.QUERY_RESTART_METRIC_DESCRIPTION,
tags);
}
|
@Override
public void prepare(ExecutorDetails exec) {
this.exec = exec;
}
|
@Test
void testWithBlackListedHosts() {
INimbus iNimbus = new INimbusTest();
double compPcore = 100;
double compOnHeap = 775;
double compOffHeap = 25;
int topo1NumSpouts = 1;
int topo1NumBolts = 5;
int topo1SpoutParallelism = 100;
int topo1BoltParallelism = 200;
final int numSupersPerRack = 10;
final int numPortsPerSuper = 66;
long compPerRack = (topo1NumSpouts * topo1SpoutParallelism + topo1NumBolts * topo1BoltParallelism + 10);
long compPerSuper = compPerRack / numSupersPerRack;
double cpuPerSuper = compPcore * compPerSuper;
double memPerSuper = (compOnHeap + compOffHeap) * compPerSuper;
double topo1MaxHeapSize = memPerSuper;
final String topoName1 = "topology1";
int numRacks = 3;
Map<String, SupervisorDetails> supMap = genSupervisorsWithRacks(numRacks, numSupersPerRack, numPortsPerSuper,
0, 0, cpuPerSuper, memPerSuper, new HashMap<>());
TestDNSToSwitchMapping testDNSToSwitchMapping = new TestDNSToSwitchMapping(supMap.values());
Config config = new Config();
config.putAll(createGrasClusterConfig(compPcore, compOnHeap, compOffHeap, null, null));
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, GenericResourceAwareStrategy.class.getName());
IScheduler scheduler = new ResourceAwareScheduler();
scheduler.prepare(config, new StormMetricsRegistry());
TopologyDetails td1 = genTopology(topoName1, config, topo1NumSpouts,
topo1NumBolts, topo1SpoutParallelism, topo1BoltParallelism, 0, 0, "user", topo1MaxHeapSize);
Topologies topologies = new Topologies(td1);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
cluster.setNetworkTopography(testDNSToSwitchMapping.getRackToHosts());
Map<String, List<String>> networkTopography = cluster.getNetworkTopography();
assertEquals(numRacks, networkTopography.size(), "Expecting " + numRacks + " racks found " + networkTopography.size());
assertTrue(networkTopography.size() >= 3, "Expecting racks count to be >= 3, found " + networkTopography.size());
Set<String> blackListedHosts = new HashSet<>();
List<SupervisorDetails> supArray = new ArrayList<>(supMap.values());
for (int i = 0 ; i < numSupersPerRack ; i++) {
blackListedHosts.add(supArray.get(i).getHost());
}
blacklistHostsAndSortNodes(blackListedHosts, supMap.values(), cluster, td1);
String rackToClear = cluster.getNetworkTopography().keySet().stream().findFirst().get();
blackListedHosts = new HashSet<>(cluster.getNetworkTopography().get(rackToClear));
blacklistHostsAndSortNodes(blackListedHosts, supMap.values(), cluster, td1);
}
|
public boolean checkStateUpdater(final long now,
final java.util.function.Consumer<Set<TopicPartition>> offsetResetter) {
addTasksToStateUpdater();
if (stateUpdater.hasExceptionsAndFailedTasks()) {
handleExceptionsFromStateUpdater();
}
if (stateUpdater.restoresActiveTasks()) {
handleRestoredTasksFromStateUpdater(now, offsetResetter);
}
return !stateUpdater.restoresActiveTasks()
&& !tasks.hasPendingTasksToInit();
}
|
@Test
public void shouldReturnCorrectBooleanWhenTryingToCompleteRestorationWithStateUpdater() {
final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, true);
when(stateUpdater.restoresActiveTasks()).thenReturn(false);
assertTrue(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter));
when(stateUpdater.restoresActiveTasks()).thenReturn(true);
assertFalse(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter));
}
|
@VisibleForTesting
void validateParentDept(Long id, Long parentId) {
if (parentId == null || DeptDO.PARENT_ID_ROOT.equals(parentId)) {
return;
}
// 1. 不能设置自己为父部门
if (Objects.equals(id, parentId)) {
throw exception(DEPT_PARENT_ERROR);
}
// 2. 父部门不存在
DeptDO parentDept = deptMapper.selectById(parentId);
if (parentDept == null) {
throw exception(DEPT_PARENT_NOT_EXITS);
}
// 3. 递归校验父部门,如果父部门是自己的子部门,则报错,避免形成环路
if (id == null) { // id 为空,说明新增,不需要考虑环路
return;
}
for (int i = 0; i < Short.MAX_VALUE; i++) {
// 3.1 校验环路
parentId = parentDept.getParentId();
if (Objects.equals(id, parentId)) {
throw exception(DEPT_PARENT_IS_CHILD);
}
// 3.2 继续递归下一级父部门
if (parentId == null || DeptDO.PARENT_ID_ROOT.equals(parentId)) {
break;
}
parentDept = deptMapper.selectById(parentId);
if (parentDept == null) {
break;
}
}
}
|
@Test
public void testValidateParentDept_parentError() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> deptService.validateParentDept(id, id),
DEPT_PARENT_ERROR);
}
|
public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) {
FunctionConfig mergedConfig = existingConfig.toBuilder().build();
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getJar())) {
mergedConfig.setJar(newConfig.getJar());
}
if (newConfig.getInputSpecs() == null) {
newConfig.setInputSpecs(new HashMap<>());
}
if (mergedConfig.getInputSpecs() == null) {
mergedConfig.setInputSpecs(new HashMap<>());
}
if (newConfig.getInputs() != null) {
newConfig.getInputs().forEach((topicName -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder().isRegexPattern(false).build());
}));
}
if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) {
newConfig.getInputSpecs().put(newConfig.getTopicsPattern(),
ConsumerConfig.builder()
.isRegexPattern(true)
.build());
}
if (newConfig.getCustomSerdeInputs() != null) {
newConfig.getCustomSerdeInputs().forEach((topicName, serdeClassName) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.serdeClassName(serdeClassName)
.isRegexPattern(false)
.build());
});
}
if (newConfig.getCustomSchemaInputs() != null) {
newConfig.getCustomSchemaInputs().forEach((topicName, schemaClassname) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.schemaType(schemaClassname)
.isRegexPattern(false)
.build());
});
}
if (!newConfig.getInputSpecs().isEmpty()) {
newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> {
if (!existingConfig.getInputSpecs().containsKey(topicName)) {
throw new IllegalArgumentException("Input Topics cannot be altered");
}
if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) {
throw new IllegalArgumentException(
"isRegexPattern for input topic " + topicName + " cannot be altered");
}
mergedConfig.getInputSpecs().put(topicName, consumerConfig);
});
}
if (!StringUtils.isEmpty(newConfig.getOutputSerdeClassName()) && !newConfig.getOutputSerdeClassName()
.equals(existingConfig.getOutputSerdeClassName())) {
throw new IllegalArgumentException("Output Serde mismatch");
}
if (!StringUtils.isEmpty(newConfig.getOutputSchemaType()) && !newConfig.getOutputSchemaType()
.equals(existingConfig.getOutputSchemaType())) {
throw new IllegalArgumentException("Output Schema mismatch");
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering()
.equals(existingConfig.getRetainOrdering())) {
throw new IllegalArgumentException("Retain Ordering cannot be altered");
}
if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering()
.equals(existingConfig.getRetainKeyOrdering())) {
throw new IllegalArgumentException("Retain Key Ordering cannot be altered");
}
if (!StringUtils.isEmpty(newConfig.getOutput())) {
mergedConfig.setOutput(newConfig.getOutput());
}
if (newConfig.getUserConfig() != null) {
mergedConfig.setUserConfig(newConfig.getUserConfig());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (newConfig.getRuntime() != null && !newConfig.getRuntime().equals(existingConfig.getRuntime())) {
throw new IllegalArgumentException("Runtime cannot be altered");
}
if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) {
throw new IllegalArgumentException("AutoAck cannot be altered");
}
if (newConfig.getMaxMessageRetries() != null) {
mergedConfig.setMaxMessageRetries(newConfig.getMaxMessageRetries());
}
if (!StringUtils.isEmpty(newConfig.getDeadLetterTopic())) {
mergedConfig.setDeadLetterTopic(newConfig.getDeadLetterTopic());
}
if (!StringUtils.isEmpty(newConfig.getSubName()) && !newConfig.getSubName()
.equals(existingConfig.getSubName())) {
throw new IllegalArgumentException("Subscription Name cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (newConfig.getWindowConfig() != null) {
mergedConfig.setWindowConfig(newConfig.getWindowConfig());
}
if (newConfig.getTimeoutMs() != null) {
mergedConfig.setTimeoutMs(newConfig.getTimeoutMs());
}
if (newConfig.getCleanupSubscription() != null) {
mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
}
|
@Test
public void testMergeDifferentUserConfig() {
FunctionConfig functionConfig = createFunctionConfig();
Map<String, String> myConfig = new HashMap<>();
myConfig.put("MyKey", "MyValue");
FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("userConfig", myConfig);
FunctionConfig mergedConfig = FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig);
assertEquals(
mergedConfig.getUserConfig(),
myConfig
);
mergedConfig.setUserConfig(functionConfig.getUserConfig());
assertEquals(
new Gson().toJson(functionConfig),
new Gson().toJson(mergedConfig)
);
}
|
public static MemberVersion of(int major, int minor, int patch) {
if (major == 0 && minor == 0 && patch == 0) {
return MemberVersion.UNKNOWN;
} else {
return new MemberVersion(major, minor, patch);
}
}
|
@Test
public void testSerialization() {
MemberVersion given = MemberVersion.of(3, 9, 1);
SerializationServiceV1 ss = new DefaultSerializationServiceBuilder().setVersion(SerializationServiceV1.VERSION_1).build();
MemberVersion deserialized = ss.toObject(ss.toData(given));
assertEquals(deserialized, given);
}
|
List<Condition> run(boolean useKRaft) {
List<Condition> warnings = new ArrayList<>();
checkKafkaReplicationConfig(warnings);
checkKafkaBrokersStorage(warnings);
if (useKRaft) {
// Additional checks done for KRaft clusters
checkKRaftControllerStorage(warnings);
checkKRaftControllerCount(warnings);
checkKafkaMetadataVersion(warnings);
checkInterBrokerProtocolVersionInKRaft(warnings);
checkLogMessageFormatVersionInKRaft(warnings);
} else {
// Additional checks done for ZooKeeper-based clusters
checkKafkaLogMessageFormatVersion(warnings);
checkKafkaInterBrokerProtocolVersion(warnings);
checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings);
}
return warnings;
}
|
@Test
public void checkReplicationFactorAndMinInSyncReplicasNotSet() {
Kafka kafka = new KafkaBuilder(KAFKA)
.editSpec()
.editKafka()
.withConfig(Map.of())
.endKafka()
.endSpec()
.build();
KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE);
List<Condition> warnings = checker.run(true);
assertThat(warnings, hasSize(2));
assertThat(warnings.get(0).getReason(), is("KafkaDefaultReplicationFactor"));
assertThat(warnings.get(0).getMessage(), is("default.replication.factor option is not configured. It defaults to 1 which does not guarantee reliability and availability. You should configure this option in .spec.kafka.config."));
assertThat(warnings.get(1).getReason(), is("KafkaMinInsyncReplicas"));
assertThat(warnings.get(1).getMessage(), is("min.insync.replicas option is not configured. It defaults to 1 which does not guarantee reliability and availability. You should configure this option in .spec.kafka.config."));
}
|
@Override
public void onMsg(TbContext ctx, TbMsg msg) {
locks.computeIfAbsent(msg.getOriginator(), SemaphoreWithTbMsgQueue::new)
.addToQueueAndTryProcess(msg, ctx, this::processMsgAsync);
}
|
@Test
public void test_sqrt_5_to_timeseries_and_data() {
var node = initNode(TbRuleNodeMathFunctionType.SQRT,
new TbMathResult(TbMathArgumentType.TIME_SERIES, "result", 3, true, false, DataConstants.SERVER_SCOPE),
new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "a")
);
TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, originator, TbMsgMetaData.EMPTY, JacksonUtil.newObjectNode().put("a", 5).toString());
when(telemetryService.saveAndNotify(any(), any(), any(TsKvEntry.class)))
.thenReturn(Futures.immediateFuture(null));
node.onMsg(ctx, msg);
ArgumentCaptor<TbMsg> msgCaptor = ArgumentCaptor.forClass(TbMsg.class);
verify(ctx, timeout(TIMEOUT)).tellSuccess(msgCaptor.capture());
verify(telemetryService, times(1)).saveAndNotify(any(), any(), any(TsKvEntry.class));
TbMsg resultMsg = msgCaptor.getValue();
assertNotNull(resultMsg);
assertNotNull(resultMsg.getData());
var resultJson = JacksonUtil.toJsonNode(resultMsg.getData());
assertTrue(resultJson.has("result"));
assertEquals(2.236, resultJson.get("result").asDouble(), 0.0);
}
|
@Override
public int configInfoCount() {
ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO);
String sql = configInfoMapper.count(null);
Integer result = databaseOperate.queryOne(sql, Integer.class);
if (result == null) {
throw new IllegalArgumentException("configInfoCount error");
}
return result;
}
|
@Test
void testConfigInfoCountByTenant() {
String tenant = "tenant124";
//mock total count
when(databaseOperate.queryOne(anyString(), eq(new Object[] {tenant}), eq(Integer.class))).thenReturn(new Integer(90));
int count = embeddedConfigInfoPersistService.configInfoCount(tenant);
assertEquals(90, count);
when(databaseOperate.queryOne(anyString(), eq(new Object[] {tenant}), eq(Integer.class))).thenReturn(null);
try {
embeddedConfigInfoPersistService.configInfoCount(tenant);
assertTrue(false);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
}
|
@Override
public boolean isPluginLoaded(String pluginId) {
final GoPluginDescriptor descriptor = getPluginDescriptorFor(pluginId);
return !(descriptor == null || descriptor.isInvalid());
}
|
@Test
void isPluginLoaded_shouldReturnTrueWhenPluginIsLoaded() {
final GoPluginDescriptor dockerPluginDescriptor = mock(GoPluginDescriptor.class);
when(dockerPluginDescriptor.isInvalid()).thenReturn(false);
when(registry.getPlugin("cd.go.elastic-agent.docker")).thenReturn(dockerPluginDescriptor);
DefaultPluginManager pluginManager = new DefaultPluginManager(monitor, registry, mock(GoPluginOSGiFramework.class), jarChangeListener, pluginRequestProcessorRegistry, systemEnvironment, pluginLoader);
assertThat(pluginManager.isPluginLoaded("cd.go.elastic-agent.docker")).isTrue();
}
|
@Override
@Nullable
public byte[] readByteArray(@Nonnull String fieldName) throws IOException {
return readIncompatibleField(fieldName, BYTE_ARRAY, super::readByteArray);
}
|
@Test(expected = IncompatibleClassChangeError.class)
public void testReadByteArray_IncompatibleClass() throws Exception {
reader.readByteArray("byte");
}
|
@Override
public <T> Serde<T> createSerde(
final Schema schema,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> srFactory,
final Class<T> targetType,
final boolean isKey
) {
validateSchema(schema);
final Optional<Schema> physicalSchema;
if (useSchemaRegistryFormat) {
physicalSchema = properties.getSchemaId().isPresent() ? Optional.of(
SerdeUtils.getAndTranslateSchemaById(srFactory, properties.getSchemaId()
.get(), new JsonSchemaTranslator())) : Optional.empty();
} else {
physicalSchema = Optional.empty();
}
final Converter converter = useSchemaRegistryFormat
? getSchemaRegistryConverter(srFactory.get(), ksqlConfig, properties.getSchemaId(), isKey)
: getConverter();
// The translators are used in the serializer & deserializzer only for JSON_SR formats
final ConnectDataTranslator dataTranslator = physicalSchema.isPresent()
? new ConnectSRSchemaDataTranslator(physicalSchema.get())
: new ConnectDataTranslator(schema);
final Supplier<Serializer<T>> serializer = () -> createSerializer(
targetType,
dataTranslator,
converter
);
final Deserializer<T> deserializer = createDeserializer(
ksqlConfig,
schema,
targetType,
dataTranslator,
converter
);
// Sanity check:
serializer.get();
return Serdes.serdeFrom(
new ThreadLocalSerializer<>(serializer),
deserializer
);
}
|
@Test
public void shouldThrowOnNestedMapWithNoneStringKeys() {
// Given
final ConnectSchema schemaWithNestedInvalidMap = (ConnectSchema) SchemaBuilder
.struct()
.field("f0", SchemaBuilder
.map(Schema.OPTIONAL_BOOLEAN_SCHEMA, Schema.OPTIONAL_STRING_SCHEMA)
.optional()
.build())
.build();
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> jsonFactory.createSerde(schemaWithNestedInvalidMap, config, srFactory, String.class, false)
);
// Then:
assertThat(e.getMessage(), containsString(
"JSON only supports MAP types with STRING keys"));
}
|
@Override
public String toString() {
return "ReshuffleTriggerStateMachine()";
}
|
@Test
public void testToString() {
TriggerStateMachine trigger = ReshuffleTriggerStateMachine.create();
assertEquals("ReshuffleTriggerStateMachine()", trigger.toString());
}
|
public boolean fileExists(String path) throws IOException, InvalidTokenException {
String url;
try {
url =
getUriBuilder()
.setPath(API_PATH_PREFIX + "/mounts/primary/files/info")
.setParameter("path", path)
.build()
.toString();
} catch (URISyntaxException e) {
throw new IllegalStateException("Could not produce url.", e);
}
Request.Builder requestBuilder = getRequestBuilder(url);
try (Response response = getResponse(requestBuilder)) {
int code = response.code();
if (code == 200) {
return true;
}
if (code == 404) {
return false;
}
throw new KoofrClientIOException(response);
}
}
|
@Test
public void testFileExistsNonExistent() throws Exception {
server.enqueue(new MockResponse().setResponseCode(404));
boolean exists = client.fileExists("/path/to/file");
assertFalse(exists);
assertEquals(1, server.getRequestCount());
final RecordedRequest recordedRequest = server.takeRequest();
assertEquals("GET", recordedRequest.getMethod());
assertEquals(
"/api/v2/mounts/primary/files/info?path=%2Fpath%2Fto%2Ffile", recordedRequest.getPath());
assertEquals("Bearer acc", recordedRequest.getHeader("Authorization"));
assertEquals("2.1", recordedRequest.getHeader("X-Koofr-Version"));
}
|
public final void tag(I input, ScopedSpan span) {
if (input == null) throw new NullPointerException("input == null");
if (span == null) throw new NullPointerException("span == null");
if (span.isNoop()) return;
tag(span, input, span.context());
}
|
@Test void tag_customizer_withContext() {
when(parseValue.apply(input, context)).thenReturn("value");
tag.tag(input, context, customizer);
verify(parseValue).apply(input, context);
verifyNoMoreInteractions(parseValue); // doesn't parse twice
verify(customizer).tag("key", "value");
verifyNoMoreInteractions(customizer); // doesn't tag twice
}
|
public void close() {
synchronized (LOCK) {
for (KafkaMbean mbean : this.mbeans.values())
unregister(mbean);
}
}
|
@Test
public void testJmxRegistrationSanitization() throws Exception {
Metrics metrics = new Metrics();
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
try {
metrics.addReporter(new JmxReporter());
Sensor sensor = metrics.sensor("kafka.requests");
sensor.add(metrics.metricName("name", "group", "desc", "id", "foo*"), new CumulativeSum());
sensor.add(metrics.metricName("name", "group", "desc", "id", "foo+"), new CumulativeSum());
sensor.add(metrics.metricName("name", "group", "desc", "id", "foo?"), new CumulativeSum());
sensor.add(metrics.metricName("name", "group", "desc", "id", "foo:"), new CumulativeSum());
sensor.add(metrics.metricName("name", "group", "desc", "id", "foo%"), new CumulativeSum());
assertTrue(server.isRegistered(new ObjectName(":type=group,id=\"foo\\*\"")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=group,id=\"foo\\*\""), "name"));
assertTrue(server.isRegistered(new ObjectName(":type=group,id=\"foo+\"")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=group,id=\"foo+\""), "name"));
assertTrue(server.isRegistered(new ObjectName(":type=group,id=\"foo\\?\"")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=group,id=\"foo\\?\""), "name"));
assertTrue(server.isRegistered(new ObjectName(":type=group,id=\"foo:\"")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=group,id=\"foo:\""), "name"));
assertTrue(server.isRegistered(new ObjectName(":type=group,id=foo%")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=group,id=foo%"), "name"));
metrics.removeMetric(metrics.metricName("name", "group", "desc", "id", "foo*"));
metrics.removeMetric(metrics.metricName("name", "group", "desc", "id", "foo+"));
metrics.removeMetric(metrics.metricName("name", "group", "desc", "id", "foo?"));
metrics.removeMetric(metrics.metricName("name", "group", "desc", "id", "foo:"));
metrics.removeMetric(metrics.metricName("name", "group", "desc", "id", "foo%"));
assertFalse(server.isRegistered(new ObjectName(":type=group,id=\"foo\\*\"")));
assertFalse(server.isRegistered(new ObjectName(":type=group,id=foo+")));
assertFalse(server.isRegistered(new ObjectName(":type=group,id=\"foo\\?\"")));
assertFalse(server.isRegistered(new ObjectName(":type=group,id=\"foo:\"")));
assertFalse(server.isRegistered(new ObjectName(":type=group,id=foo%")));
} finally {
metrics.close();
}
}
|
public PropertiesSnapshot updateWorkflowProperties(
String workflowId, User author, Properties props, PropertiesUpdate update) {
LOG.debug("Updating workflow properties for workflow id [{}]", workflowId);
Checks.notNull(
props, "properties changes to apply cannot be null for workflow [%s]", workflowId);
return withMetricLogError(
() ->
withRetryableTransaction(
conn -> {
WorkflowInfo workflowInfo = getWorkflowInfoForUpdate(conn, workflowId);
Checks.notNull(
workflowInfo.getPrevPropertiesSnapshot(),
"Cannot update workflow properties while the workflow [%s] does not exist",
workflowId);
PropertiesSnapshot snapshot =
updateWorkflowProps(
conn,
workflowId,
author,
System.currentTimeMillis(),
workflowInfo.getPrevPropertiesSnapshot(),
props,
update);
List<StatementPreparer> preparers = new ArrayList<>();
StringBuilder fields = prepareProperties(preparers, workflowId, snapshot);
long[] updateRes = executeTemplateUpdate(conn, fields, preparers);
if (updateRes != null) {
if (workflowInfo.getPrevActiveVersionId() != Constants.INACTIVE_VERSION_ID) {
updateWorkflowInfoForNextActiveWorkflow(
conn,
workflowId,
workflowInfo.getPrevActiveVersionId(),
workflowInfo,
snapshot);
addWorkflowTriggersIfNeeded(conn, workflowInfo);
}
MaestroJobEvent jobEvent = logToTimeline(conn, workflowId, snapshot);
publisher.publishOrThrow(
jobEvent, "Failed to publish maestro properties change job event.");
}
return snapshot;
}),
"updateWorkflowProperties",
"Failed updating the properties for workflow [{}]",
workflowId);
}
|
@Test
public void testInvalidWorkflowProperties() {
AssertHelper.assertThrows(
"cannot push a empty properties change for any workflow",
NullPointerException.class,
"properties changes to apply cannot be null for workflow",
() ->
workflowDao.updateWorkflowProperties(
TEST_WORKFLOW_ID2, User.create("test"), null, PROPERTIES_UPDATE));
AssertHelper.assertThrows(
"cannot push a properties change for non-existing workflow",
NullPointerException.class,
"Cannot update workflow properties while the workflow",
() ->
workflowDao.updateWorkflowProperties(
TEST_WORKFLOW_ID2, User.create("test"), new Properties(), PROPERTIES_UPDATE));
}
|
@Override
public void updateApiErrorLogProcess(Long id, Integer processStatus, Long processUserId) {
ApiErrorLogDO errorLog = apiErrorLogMapper.selectById(id);
if (errorLog == null) {
throw exception(API_ERROR_LOG_NOT_FOUND);
}
if (!ApiErrorLogProcessStatusEnum.INIT.getStatus().equals(errorLog.getProcessStatus())) {
throw exception(API_ERROR_LOG_PROCESSED);
}
// 标记处理
apiErrorLogMapper.updateById(ApiErrorLogDO.builder().id(id).processStatus(processStatus)
.processUserId(processUserId).processTime(LocalDateTime.now()).build());
}
|
@Test
public void testUpdateApiErrorLogProcess_success() {
// 准备参数
ApiErrorLogDO apiErrorLogDO = randomPojo(ApiErrorLogDO.class,
o -> o.setProcessStatus(ApiErrorLogProcessStatusEnum.INIT.getStatus()));
apiErrorLogMapper.insert(apiErrorLogDO);
// 准备参数
Long id = apiErrorLogDO.getId();
Integer processStatus = randomEle(ApiErrorLogProcessStatusEnum.values()).getStatus();
Long processUserId = randomLongId();
// 调用
apiErrorLogService.updateApiErrorLogProcess(id, processStatus, processUserId);
// 断言
ApiErrorLogDO dbApiErrorLogDO = apiErrorLogMapper.selectById(apiErrorLogDO.getId());
assertEquals(processStatus, dbApiErrorLogDO.getProcessStatus());
assertEquals(processUserId, dbApiErrorLogDO.getProcessUserId());
assertNotNull(dbApiErrorLogDO.getProcessTime());
}
|
public Parser<M> parser() {
return parser;
}
|
@Test
public void test_parser() {
assertThat(METADATA.parser()).isSameAs(ProjectDump.Metadata.parser());
}
|
public PendingSpan getOrCreate(
@Nullable TraceContext parent, TraceContext context, boolean start) {
PendingSpan result = get(context);
if (result != null) return result;
MutableSpan span = new MutableSpan(context, defaultSpan);
PendingSpan parentSpan = parent != null ? get(parent) : null;
// save overhead calculating time if the parent is in-progress (usually is)
TickClock clock;
if (parentSpan != null) {
TraceContext parentContext = parentSpan.context();
if (parentContext != null) parent = parentContext;
clock = parentSpan.clock;
if (start) span.startTimestamp(clock.currentTimeMicroseconds());
} else {
long currentTimeMicroseconds = this.clock.currentTimeMicroseconds();
clock = new TickClock(platform, currentTimeMicroseconds, platform.nanoTime());
if (start) span.startTimestamp(currentTimeMicroseconds);
}
PendingSpan newSpan = new PendingSpan(context, span, clock);
// Probably absent because we already checked with get() at the entrance of this method
PendingSpan previousSpan = putIfProbablyAbsent(context, newSpan);
if (previousSpan != null) return previousSpan; // lost race
// We've now allocated a new trace context.
assert parent != null || context.isLocalRoot() :
"Bug (or unexpected call to internal code): parent can only be null in a local root!";
spanHandler.begin(newSpan.handlerContext, newSpan.span, parentSpan != null
? parentSpan.handlerContext : null);
return newSpan;
}
|
@Test void remove_doesntReport() {
pendingSpans.getOrCreate(null, context, false);
pendingSpans.remove(context);
assertThat(spans).isEmpty();
}
|
public static Table resolveCalciteTable(SchemaPlus schemaPlus, List<String> tablePath) {
Schema subSchema = schemaPlus;
// subSchema.getSubschema() for all except last
for (int i = 0; i < tablePath.size() - 1; i++) {
subSchema = subSchema.getSubSchema(tablePath.get(i));
if (subSchema == null) {
throw new IllegalStateException(
String.format(
"While resolving table path %s, no sub-schema found for component %s (\"%s\")",
tablePath, i, tablePath.get(i)));
}
}
// for the final one call getTable()
return subSchema.getTable(Iterables.getLast(tablePath));
}
|
@Test
public void testResolveNestedWithDots() {
String subSchema = "fake.schema";
String tableName = "fake.table";
when(mockSchemaPlus.getSubSchema(subSchema)).thenReturn(innerSchemaPlus);
when(innerSchemaPlus.getTable(tableName)).thenReturn(mockTable);
Table table =
TableResolution.resolveCalciteTable(mockSchemaPlus, ImmutableList.of(subSchema, tableName));
assertThat(table, Matchers.is(mockTable));
}
|
@Override
public boolean supportsDataDefinitionAndDataManipulationTransactions() {
return false;
}
|
@Test
void assertSupportsDataDefinitionAndDataManipulationTransactions() {
assertFalse(metaData.supportsDataDefinitionAndDataManipulationTransactions());
}
|
@Override
public void updateLevel(MemberLevelUpdateReqVO updateReqVO) {
// 校验存在
validateLevelExists(updateReqVO.getId());
// 校验配置是否有效
validateConfigValid(updateReqVO.getId(), updateReqVO.getName(), updateReqVO.getLevel(), updateReqVO.getExperience());
// 更新
MemberLevelDO updateObj = MemberLevelConvert.INSTANCE.convert(updateReqVO);
memberLevelMapper.updateById(updateObj);
}
|
@Test
public void testUpdateLevel_notExists() {
// 准备参数
MemberLevelUpdateReqVO reqVO = randomPojo(MemberLevelUpdateReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> levelService.updateLevel(reqVO), LEVEL_NOT_EXISTS);
}
|
@Override
public String getName() {
return "browse_web";
}
|
@Test
void testGetName() {
assertEquals("browse_web", rawBrowserAction.getName());
}
|
public static boolean isBearerToken(final String authorizationHeader) {
return StringUtils.hasText(authorizationHeader) &&
authorizationHeader.startsWith(TOKEN_PREFIX);
}
|
@Test
void testIsBearerToken_WithEmptyHeader() {
// Given
String authorizationHeader = "";
// When
boolean result = Token.isBearerToken(authorizationHeader);
// Then
assertFalse(result);
}
|
public static IRubyObject deep(final Ruby runtime, final Object input) {
if (input == null) {
return runtime.getNil();
}
final Class<?> cls = input.getClass();
final Rubyfier.Converter converter = CONVERTER_MAP.get(cls);
if (converter != null) {
return converter.convert(runtime, input);
}
return fallbackConvert(runtime, input, cls);
}
|
@Test
public void testDeepListWithFloat() throws Exception {
List<Float> data = new ArrayList<>();
data.add(1.0F);
@SuppressWarnings("rawtypes")
RubyArray rubyArray = (RubyArray)Rubyfier.deep(RubyUtil.RUBY, data);
// toJavaArray does not newFromRubyArray inner elements to Java types \o/
assertEquals(RubyFloat.class, rubyArray.toJavaArray()[0].getClass());
assertEquals(1.0D, ((RubyFloat)rubyArray.toJavaArray()[0]).getDoubleValue(), 0);
}
|
public static boolean isUp(NetworkInterface ifc) {
try {
return ifc.isUp();
} catch (SocketException e) {
LOG.debug("Network interface can not get isUp, exception: ", e);
}
return false;
}
|
@Test
void testisUp() throws SocketException {
NetworkInterface nic = mock(NetworkInterface.class);
when(nic.isUp()).thenReturn(true);
assertTrue(InetUtils.isUp(nic));
when(nic.isUp()).thenReturn(false);
assertFalse(InetUtils.isUp(nic));
when(nic.isUp()).thenThrow(new SocketException());
assertFalse(InetUtils.isUp(nic));
}
|
public MeanStatistic copy() {
return new MeanStatistic(this);
}
|
@Test
public void testCopyNonEmpty() throws Throwable {
MeanStatistic stat = tenFromOne.copy();
Assertions.assertThat(stat)
.describedAs("copy of " + tenFromOne)
.isEqualTo(tenFromOne)
.isNotSameAs(tenFromOne);
}
|
@VisibleForTesting
static byte[] padBigEndianBytes(byte[] bigEndianBytes, int newLength) {
if (bigEndianBytes.length == newLength) {
return bigEndianBytes;
} else if (bigEndianBytes.length < newLength) {
byte[] result = new byte[newLength];
if (bigEndianBytes.length == 0) {
return result;
}
int start = newLength - bigEndianBytes.length;
if (bigEndianBytes[0] < 0) {
Arrays.fill(result, 0, start, (byte) 0xFF);
}
System.arraycopy(bigEndianBytes, 0, result, start, bigEndianBytes.length);
return result;
}
throw new IllegalArgumentException(
String.format(
"Buffer size of %d is larger than requested size of %d",
bigEndianBytes.length, newLength));
}
|
@Test
public void testPadBigEndianBytesOverflow() {
byte[] bytes = new byte[17];
assertThatThrownBy(() -> DecimalVectorUtil.padBigEndianBytes(bytes, 16))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Buffer size of 17 is larger than requested size of 16");
}
|
@Override
public void report(final SortedMap<MetricName, Gauge> gauges, final SortedMap<MetricName, Counter> counters,
final SortedMap<MetricName, Histogram> histograms, final SortedMap<MetricName, Meter> meters, final SortedMap<MetricName, Timer> timers) {
final long now = System.currentTimeMillis();
if(logger.isDebugEnabled()) logger.debug("InfluxDbReporter report is called with counter size " + counters.size());
try {
influxDb.flush();
for (Map.Entry<MetricName, Gauge> entry : gauges.entrySet()) {
reportGauge(entry.getKey(), entry.getValue(), now);
}
for (Map.Entry<MetricName, Counter> entry : counters.entrySet()) {
reportCounter(entry.getKey(), entry.getValue(), now);
}
for (Map.Entry<MetricName, Histogram> entry : histograms.entrySet()) {
reportHistogram(entry.getKey(), entry.getValue(), now);
}
for (Map.Entry<MetricName, Meter> entry : meters.entrySet()) {
reportMeter(entry.getKey(), entry.getValue(), now);
}
for (Map.Entry<MetricName, Timer> entry : timers.entrySet()) {
reportTimer(entry.getKey(), entry.getValue(), now);
}
if (influxDb.hasSeriesData()) {
influxDb.writeData();
}
// reset counters
for (Map.Entry<MetricName, Counter> entry : counters.entrySet()) {
Counter counter = entry.getValue();
long count = counter.getCount();
counter.dec(count);
}
} catch (Exception e) {
logger.error("Unable to report to InfluxDB. Discarding data.", e);
}
}
|
@Test
public void reportsTimers() throws Exception {
final Timer timer = mock(Timer.class);
when(timer.getCount()).thenReturn(1L);
when(timer.getMeanRate()).thenReturn(2.0);
when(timer.getOneMinuteRate()).thenReturn(3.0);
when(timer.getFiveMinuteRate()).thenReturn(4.0);
when(timer.getFifteenMinuteRate()).thenReturn(5.0);
final Snapshot snapshot = mock(Snapshot.class);
when(snapshot.getMin()).thenReturn(TimeUnit.MILLISECONDS.toNanos(100));
when(snapshot.getMean()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(200));
when(snapshot.getMax()).thenReturn(TimeUnit.MILLISECONDS.toNanos(300));
when(snapshot.getStdDev()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(400));
when(snapshot.getMedian()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(500));
when(snapshot.get75thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(600));
when(snapshot.get95thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(700));
when(snapshot.get98thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(800));
when(snapshot.get99thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(900));
when(snapshot.get999thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(1000));
when(timer.getSnapshot()).thenReturn(snapshot);
reporter.report(this.map(), this.map(), this.map(), this.map(), map("timer", timer));
final ArgumentCaptor<InfluxDbPoint> influxDbPointCaptor = ArgumentCaptor.forClass(InfluxDbPoint.class);
Mockito.verify(influxDb, atLeastOnce()).appendPoints(influxDbPointCaptor.capture());
InfluxDbPoint point = influxDbPointCaptor.getValue();
/*
assertThat(point.getMeasurement()).isEqualTo("timer");
assertThat(point.getFields()).isNotEmpty();
assertThat(point.getFields()).hasSize(17);
assertThat(point.getFields()).contains(entry("count", 1L));
assertThat(point.getFields()).contains(entry("mean-rate", 2.0));
assertThat(point.getFields()).contains(entry("one-minute", 3.0));
assertThat(point.getFields()).contains(entry("five-minute", 4.0));
assertThat(point.getFields()).contains(entry("fifteen-minute", 5.0));
assertThat(point.getFields()).contains(entry("min", 100.0));
assertThat(point.getFields()).contains(entry("mean", 200.0));
assertThat(point.getFields()).contains(entry("max", 300.0));
assertThat(point.getFields()).contains(entry("std-dev", 400.0));
assertThat(point.getFields()).contains(entry("median", 500.0));
assertThat(point.getFields()).contains(entry("75-percentile", 600.0));
assertThat(point.getFields()).contains(entry("95-percentile", 700.0));
assertThat(point.getFields()).contains(entry("98-percentile", 800.0));
assertThat(point.getFields()).contains(entry("99-percentile", 900.0));
assertThat(point.getFields()).contains(entry("999-percentile", 1000.0));
*/
}
|
protected String convertHeaderValueToString(Exchange exchange, Object headerValue) {
if ((headerValue instanceof Date || headerValue instanceof Locale)
&& convertDateAndLocaleLocally(exchange)) {
if (headerValue instanceof Date) {
return toHttpDate((Date) headerValue);
} else {
return toHttpLanguage((Locale) headerValue);
}
} else {
return exchange.getContext().getTypeConverter().convertTo(String.class, headerValue);
}
}
|
@Test
public void testConvertLocaleTypeConverter() {
DefaultHttpBinding binding = new DefaultHttpBinding();
Locale l = Locale.SIMPLIFIED_CHINESE;
Exchange exchange = super.createExchangeWithBody(null);
exchange.setProperty(DefaultHttpBinding.DATE_LOCALE_CONVERSION, false);
String value = binding.convertHeaderValueToString(exchange, l);
assertEquals(value, l.toString());
}
|
public Timestamp insert(PartitionMetadata row) {
final TransactionResult<Void> transactionResult =
runInTransaction(transaction -> transaction.insert(row), "InsertsPartitionMetadata");
return transactionResult.getCommitTimestamp();
}
|
@Test
public void testInTransactionContextInsert() {
ArgumentCaptor<ImmutableList<Mutation>> mutations =
ArgumentCaptor.forClass(ImmutableList.class);
doNothing().when(transaction).buffer(mutations.capture());
assertNull(inTransactionContext.insert(ROW));
assertEquals(1, mutations.getValue().size());
Map<String, Value> mutationValueMap = mutations.getValue().iterator().next().asMap();
assertEquals(
ROW.getPartitionToken(),
mutationValueMap.get(PartitionMetadataAdminDao.COLUMN_PARTITION_TOKEN).getString());
assertEquals(
ImmutableList.of(PARENT_TOKEN),
mutationValueMap.get(PartitionMetadataAdminDao.COLUMN_PARENT_TOKENS).getStringArray());
assertEquals(
ROW.getStartTimestamp(),
mutationValueMap.get(PartitionMetadataAdminDao.COLUMN_START_TIMESTAMP).getTimestamp());
assertEquals(
ROW.getEndTimestamp(),
mutationValueMap.get(PartitionMetadataAdminDao.COLUMN_END_TIMESTAMP).getTimestamp());
assertEquals(
ROW.getHeartbeatMillis(),
mutationValueMap.get(PartitionMetadataAdminDao.COLUMN_HEARTBEAT_MILLIS).getInt64());
assertEquals(
ROW.getState().toString(),
mutationValueMap.get(PartitionMetadataAdminDao.COLUMN_STATE).getString());
assertEquals(
ROW.getWatermark(),
mutationValueMap.get(PartitionMetadataAdminDao.COLUMN_WATERMARK).getTimestamp());
}
|
protected int calculateDegree(Graph graph, Node n) {
return graph.getDegree(n);
}
|
@Test
public void testCompleteGraphDegree() {
GraphModel graphModel = GraphGenerator.generateCompleteUndirectedGraph(5);
Graph graph = graphModel.getGraph();
Node n = graph.getNode("2");
Degree d = new Degree();
int degree = d.calculateDegree(graph, n);
assertEquals(degree, 4);
}
|
public static org.springframework.messaging.Message convertToSpringMessage(
org.apache.rocketmq.common.message.MessageExt message) {
MessageBuilder messageBuilder =
MessageBuilder.withPayload(message.getBody()).
setHeader(toRocketHeaderKey(RocketMQHeaders.KEYS), message.getKeys()).
setHeader(toRocketHeaderKey(RocketMQHeaders.TAGS), message.getTags()).
setHeader(toRocketHeaderKey(RocketMQHeaders.TOPIC), message.getTopic()).
setHeader(toRocketHeaderKey(RocketMQHeaders.MESSAGE_ID), message.getMsgId()).
setHeader(toRocketHeaderKey(RocketMQHeaders.BORN_TIMESTAMP), message.getBornTimestamp()).
setHeader(toRocketHeaderKey(RocketMQHeaders.BORN_HOST), message.getBornHostString()).
setHeader(toRocketHeaderKey(RocketMQHeaders.FLAG), message.getFlag()).
setHeader(toRocketHeaderKey(RocketMQHeaders.QUEUE_ID), message.getQueueId()).
setHeader(toRocketHeaderKey(RocketMQHeaders.SYS_FLAG), message.getSysFlag()).
setHeader(toRocketHeaderKey(RocketMQHeaders.TRANSACTION_ID), message.getTransactionId()).
setHeader(toRocketHeaderKey(RocketMQHeaders.DELAY), message.getDelayTimeLevel()).
setHeader(toRocketHeaderKey(RocketMQHeaders.WAIT), message.isWaitStoreMsgOK());
addUserProperties(message.getProperties(), messageBuilder);
return messageBuilder.build();
}
|
@Test
public void testConvertToSpringMessage() {
org.apache.rocketmq.common.message.MessageExt rocketMsg = new org.apache.rocketmq.common.message.MessageExt();
rocketMsg.setTopic("test");
rocketMsg.setBody("test".getBytes());
rocketMsg.setTags("tagA");
rocketMsg.setKeys("key1");
Message message = RocketMQUtil.convertToSpringMessage(rocketMsg);
assertEquals("test", message.getHeaders().get(toRocketHeaderKey(RocketMQHeaders.TOPIC)));
assertEquals("tagA", message.getHeaders().get(toRocketHeaderKey(RocketMQHeaders.TAGS)));
assertEquals("key1", message.getHeaders().get(toRocketHeaderKey(RocketMQHeaders.KEYS)));
}
|
Collection<OutputFile> compile() {
List<OutputFile> out = new ArrayList<>(queue.size() + 1);
for (Schema schema : queue) {
out.add(compile(schema));
}
if (protocol != null) {
out.add(compileInterface(protocol));
}
return out;
}
|
@Test
void maxValidParameterCounts() throws Exception {
Schema validSchema1 = createSampleRecordSchema(SpecificCompiler.MAX_FIELD_PARAMETER_UNIT_COUNT, 0);
assertCompilesWithJavaCompiler(new File(OUTPUT_DIR, "testMaxValidParameterCounts1"),
new SpecificCompiler(validSchema1).compile());
createSampleRecordSchema(SpecificCompiler.MAX_FIELD_PARAMETER_UNIT_COUNT - 2, 1);
assertCompilesWithJavaCompiler(new File(OUTPUT_DIR, "testMaxValidParameterCounts2"),
new SpecificCompiler(validSchema1).compile());
}
|
@Override
public Path getPathForLocalization(LocalResourceRequest req,
Path localDirPath, DeletionService delService) {
Path rPath = localDirPath;
if (useLocalCacheDirectoryManager && localDirPath != null) {
if (!directoryManagers.containsKey(localDirPath)) {
directoryManagers.putIfAbsent(localDirPath,
new LocalCacheDirectoryManager(conf));
}
LocalCacheDirectoryManager dir = directoryManagers.get(localDirPath);
rPath = localDirPath;
String hierarchicalPath = dir.getRelativePathForLocalization();
// For most of the scenarios we will get root path only which
// is an empty string
if (!hierarchicalPath.isEmpty()) {
rPath = new Path(localDirPath, hierarchicalPath);
}
inProgressLocalResourcesMap.put(req, rPath);
}
while (true) {
Path uniquePath = new Path(rPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
File file = new File(uniquePath.toUri().getRawPath());
if (!file.exists()) {
rPath = uniquePath;
break;
}
// If the directory already exists, delete it and move to next one.
LOG.warn("Directory " + uniquePath + " already exists, " +
"try next one.");
if (delService != null) {
FileDeletionTask deletionTask = new FileDeletionTask(delService,
getUser(), uniquePath, null);
delService.delete(deletionTask);
}
}
Path localPath = new Path(rPath, req.getPath().getName());
LocalizedResource rsrc = localrsrc.get(req);
if (rsrc == null) {
LOG.warn("Resource " + req + " has been removed"
+ " and will no longer be localized");
return null;
}
rsrc.setLocalPath(localPath);
LocalResource lr = LocalResource.newInstance(req.getResource(),
req.getType(), req.getVisibility(), req.getSize(),
req.getTimestamp());
try {
stateStore.startResourceLocalization(user, appId,
((LocalResourcePBImpl) lr).getProto(), localPath);
} catch (IOException e) {
LOG.error("Unable to record localization start for " + rsrc, e);
}
return rPath;
}
|
@Test
@SuppressWarnings("unchecked")
public void testGetPathForLocalization() throws Exception {
FileContext lfs = FileContext.getLocalFSFileContext();
Path base_path = new Path("target",
TestLocalResourcesTrackerImpl.class.getSimpleName());
final String user = "someuser";
final ApplicationId appId = ApplicationId.newInstance(1, 1);
Configuration conf = new YarnConfiguration();
DrainDispatcher dispatcher = null;
dispatcher = createDispatcher(conf);
EventHandler<LocalizerEvent> localizerEventHandler =
mock(EventHandler.class);
EventHandler<LocalizerEvent> containerEventHandler =
mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class, localizerEventHandler);
dispatcher.register(ContainerEventType.class, containerEventHandler);
NMStateStoreService stateStore = mock(NMStateStoreService.class);
DeletionService delService = mock(DeletionService.class);
try {
LocalResourceRequest req1 = createLocalResourceRequest(user, 1, 1,
LocalResourceVisibility.PUBLIC);
LocalizedResource lr1 = createLocalizedResource(req1, dispatcher);
ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc =
new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
localrsrc.put(req1, lr1);
LocalResourcesTrackerImpl tracker = new LocalResourcesTrackerImpl(user,
appId, dispatcher, localrsrc, true, conf, stateStore, null);
Path conflictPath = new Path(base_path, "10");
Path qualifiedConflictPath = lfs.makeQualified(conflictPath);
lfs.mkdir(qualifiedConflictPath, null, true);
Path rPath = tracker.getPathForLocalization(req1, base_path,
delService);
Assert.assertFalse(lfs.util().exists(rPath));
verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
delService, user, conflictPath, null)));
} finally {
lfs.delete(base_path, true);
if (dispatcher != null) {
dispatcher.stop();
}
}
}
|
public void sendTestEmail(String toAddress, String subject, String message) throws EmailException {
try {
EmailMessage emailMessage = new EmailMessage();
emailMessage.setTo(toAddress);
emailMessage.setSubject(subject);
emailMessage.setPlainTextMessage(message + getServerBaseUrlFooter());
send(emailMessage);
} catch (EmailException e) {
LOG.debug("Fail to send test email to {}: {}", toAddress, e);
throw e;
}
}
|
@Test
public void shouldThrowAnExceptionWhenUnableToSendTestEmail() {
configure();
smtpServer.stop();
try {
underTest.sendTestEmail("user@nowhere", "Test Message from SonarQube", "This is a test message from SonarQube.");
fail();
} catch (EmailException e) {
// expected
}
}
|
public Builder newBuilder() {
return new Builder();
}
|
@Test
public void testDoubleBuild() {
FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
FetchSessionHandler.Builder builder = handler.newBuilder();
builder.add(new TopicPartition("foo", 0),
new FetchRequest.PartitionData(Uuid.randomUuid(), 0, 100, 200, Optional.empty()));
builder.build();
assertThrows(Throwable.class, builder::build, "Expected calling build twice to fail.");
}
|
public static void main(String[] args) throws Throwable {
if (!parseInputArgs(args)) {
usage();
System.exit(EXIT_FAILED);
}
if (sHelp) {
usage();
System.exit(EXIT_SUCCEEDED);
}
try {
dumpJournal();
} catch (Exception exc) {
System.out.printf("Journal tool failed: %s%n", exc);
}
}
|
@Test
public void defaultJournalDir() throws Throwable {
JournalTool.main(new String[0]);
String inputUri = Whitebox.getInternalState(JournalTool.class, "sInputDir");
Assert.assertEquals(Configuration.get(PropertyKey.MASTER_JOURNAL_FOLDER), inputUri);
}
|
@Override public boolean dropTable(String catName, String dbName, String tblName)
throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
boolean succ = rawStore.dropTable(catName, dbName, tblName);
// in case of event based cache update, cache will be updated during commit.
if (succ && !canUseEvents) {
catName = normalizeIdentifier(catName);
dbName = normalizeIdentifier(dbName);
tblName = normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
return succ;
}
sharedCache.removeTableFromCache(catName, dbName, tblName);
}
return succ;
}
|
@Test public void testDropTable() throws Exception {
Configuration conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb");
MetaStoreTestUtils.setConfForStandloneMode(conf);
CachedStore cachedStore = new CachedStore();
CachedStore.clearSharedCache();
cachedStore.setConfForTest(conf);
ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore();
// Prewarm CachedStore
CachedStore.setCachePrewarmedState(false);
CachedStore.prewarm(objectStore);
List<String> db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName());
Assert.assertEquals(2, db1Tables.size());
List<String> db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName());
Assert.assertEquals(2, db2Tables.size());
// Drop table db1Utbl1 via CachedStore and read via ObjectStore
Table db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName());
cachedStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName());
db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName());
Assert.assertEquals(1, db1Tables.size());
Table db1Utbl1ReadOS =
objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName());
Assert.assertNull(db1Utbl1ReadOS);
// Drop table db2Utbl1 via ObjectStore and read via CachedStore
Table db2Utbl1Read = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName());
objectStore.dropTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName());
db2Tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName());
Assert.assertEquals(1, db2Tables.size());
updateCache(cachedStore);
db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName());
Assert.assertEquals(1, db2Tables.size());
Table db2Utbl1ReadCS =
cachedStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName());
Assert.assertNull(db2Utbl1ReadCS);
cachedStore.shutdown();
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
final PiActionProfileGroup other = (PiActionProfileGroup) obj;
return Objects.equal(this.groupId, other.groupId)
&& Objects.equal(this.members, other.members)
// FIXME: re-enable when this PI bug will be fixed:
// https://github.com/p4lang/PI/issues/452
// Currently PI-based devices always return max_group_size 0,
// event if we set a different one.
// && Objects.equal(this.maxSize, other.maxSize)
&& Objects.equal(this.actionProfileId, other.actionProfileId);
}
|
@Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(group1, sameAsGroup1, sameAsGroup1NoInstance)
.addEqualityGroup(group2, sameAsGroup2NoInstance)
.addEqualityGroup(asGroup2WithDifferentWeights)
.testEquals();
}
|
@SuppressWarnings({"unchecked", "rawtypes"})
@Override
public Collection<String> doSharding(final Collection<String> availableTargetNames, final Collection<ShardingConditionValue> shardingConditionValues,
final DataNodeInfo dataNodeInfo, final ConfigurationProperties props) {
ListShardingConditionValue<?> shardingValue = (ListShardingConditionValue) shardingConditionValues.iterator().next();
Collection<String> shardingResult = shardingAlgorithm.doSharding(availableTargetNames,
new HintShardingValue(shardingValue.getTableName(), shardingValue.getColumnName(), shardingValue.getValues()));
Collection<String> result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
result.addAll(shardingResult);
return result;
}
|
@Test
void assertDoSharding() {
Collection<String> targets = new HashSet<>(Arrays.asList("1", "2", "3"));
HintShardingStrategy hintShardingStrategy = new HintShardingStrategy(new CoreHintShardingAlgorithmFixture());
DataNodeInfo dataNodeInfo = new DataNodeInfo("logicTable_", 1, '0');
Collection<String> actualSharding = hintShardingStrategy.doSharding(targets, Collections.singletonList(
new ListShardingConditionValue<>("column", "logicTable", Collections.singletonList(1))), dataNodeInfo, new ConfigurationProperties(new Properties()));
assertThat(actualSharding.size(), is(1));
assertThat(actualSharding.iterator().next(), is("1"));
}
|
@Override
protected void addTargetDataListener(String path, Curator5ZookeeperClient.NodeCacheListenerImpl nodeCacheListener) {
this.addTargetDataListener(path, nodeCacheListener, null);
}
|
@Test
void testAddTargetDataListener() throws Exception {
String listenerPath = "/dubbo/service.name/configuration";
String path = listenerPath + "/dat/data";
String value = "vav";
curatorClient.createOrUpdate(path + "/d.json", value, true);
String valueFromCache = curatorClient.getContent(path + "/d.json");
Assertions.assertEquals(value, valueFromCache);
final AtomicInteger atomicInteger = new AtomicInteger(0);
curatorClient.addTargetDataListener(path + "/d.json", new Curator5ZookeeperClient.NodeCacheListenerImpl() {
@Override
public void nodeChanged() {
atomicInteger.incrementAndGet();
}
});
valueFromCache = curatorClient.getContent(path + "/d.json");
Assertions.assertNotNull(valueFromCache);
int currentCount1 = atomicInteger.get();
curatorClient.getClient().setData().forPath(path + "/d.json", "foo".getBytes());
await().until(() -> atomicInteger.get() > currentCount1);
int currentCount2 = atomicInteger.get();
curatorClient.getClient().setData().forPath(path + "/d.json", "bar".getBytes());
await().until(() -> atomicInteger.get() > currentCount2);
int currentCount3 = atomicInteger.get();
curatorClient.delete(path + "/d.json");
valueFromCache = curatorClient.getContent(path + "/d.json");
Assertions.assertNull(valueFromCache);
await().until(() -> atomicInteger.get() > currentCount3);
}
|
public static <T> Iterator<T> skipFirst(Iterator<T> iterator, @Nonnull Predicate<? super T> predicate) {
checkNotNull(iterator, "iterator cannot be null.");
while (iterator.hasNext()) {
T object = iterator.next();
if (!predicate.test(object)) {
continue;
}
return prepend(object, iterator);
}
return iterator;
}
|
@Test
public void skipFirstEmptyCollection() {
var actual = IterableUtil.skipFirst(Collections.emptyIterator(), v -> false);
assertIteratorsEquals(Collections.emptyList(), actual);
}
|
public static DistributionData singleton(long value) {
return create(value, 1, value, value);
}
|
@Test
public void testSingleton() {
DistributionData data = DistributionData.singleton(5);
assertEquals(5, data.sum());
assertEquals(1, data.count());
assertEquals(5, data.min());
assertEquals(5, data.max());
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
void testPrimaryKeyNoColumn() {
assertThatThrownBy(
() ->
TableSchema.builder()
.field("f0", DataTypes.BIGINT().notNull())
.primaryKey("pk", new String[] {"f0", "f2"})
.build())
.isInstanceOf(ValidationException.class)
.hasMessage("Could not create a PRIMARY KEY 'pk'. Column 'f2' does not exist.");
}
|
@Override
public RLock writeLock() {
return new RedissonWriteLock(commandExecutor, getName());
}
|
@Test
public void testWriteLockExpiration() throws InterruptedException {
RReadWriteLock rw1 = redisson.getReadWriteLock("test2s3");
RLock l1 = rw1.writeLock();
assertThat(l1.tryLock(10000, 10000, TimeUnit.MILLISECONDS)).isTrue();
RLock l2 = rw1.writeLock();
assertThat(l2.tryLock(1000, 1000, TimeUnit.MILLISECONDS)).isTrue();
await().atMost(Duration.ofSeconds(10)).until(() -> {
RReadWriteLock rw2 = redisson.getReadWriteLock("test2s3");
try {
return !rw2.writeLock().tryLock(3000, 1000, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
return false;
}
});
}
|
public @Nullable String formatDiff(A actual, E expected) {
return null;
}
|
@Test
public void testFrom_formatDiff() {
assertThat(STRING_PREFIX_EQUALITY.formatDiff("foo", "foot")).isNull();
}
|
public Object getDynamicAttr(String dynamicAttrKey) {
return dynamicAttrs.get(dynamicAttrKey);
}
|
@Test
public void testGetDynamicAttr() {
ProviderInfo providerInfo = new ProviderInfo();
providerInfo.setDynamicAttr("timeout", 1);
Assert.assertEquals("1", providerInfo.getAttr("timeout"));
}
|
public void clear() {
// while ( removeFirst() != null ) {
// }
this.firstNode = null;
this.lastNode = null;
size = 0;
}
|
@Test
public void testClear() {
this.list.add( this.node1 );
this.list.add( this.node2 );
this.list.add( this.node3 );
assertThat(this.list.size()).as("List size should be 3").isEqualTo(3);
this.list.clear();
assertThat(this.list.size()).as("Empty list should have size 0").isEqualTo(0);
}
|
@Override
public String getDataSource() {
return DataSourceConstant.DERBY;
}
|
@Test
void testGetDataSource() {
String sql = configInfoMapperByDerby.getDataSource();
assertEquals(DataSourceConstant.DERBY, sql);
}
|
public static <T> Write<T> write(String jdbcUrl, String table) {
return new AutoValue_ClickHouseIO_Write.Builder<T>()
.jdbcUrl(jdbcUrl)
.table(table)
.properties(new Properties())
.maxInsertBlockSize(DEFAULT_MAX_INSERT_BLOCK_SIZE)
.initialBackoff(DEFAULT_INITIAL_BACKOFF)
.maxRetries(DEFAULT_MAX_RETRIES)
.maxCumulativeBackoff(DEFAULT_MAX_CUMULATIVE_BACKOFF)
.build()
.withInsertDeduplicate(true)
.withInsertDistributedSync(true);
}
|
@Test
public void testNullableInt64() throws Exception {
Schema schema = Schema.of(Schema.Field.nullable("f0", FieldType.INT64));
Row row1 = Row.withSchema(schema).addValue(1L).build();
Row row2 = Row.withSchema(schema).addValue(null).build();
Row row3 = Row.withSchema(schema).addValue(3L).build();
executeSql("CREATE TABLE test_nullable_int64 (f0 Nullable(Int64)) ENGINE=Log");
pipeline
.apply(Create.of(row1, row2, row3).withRowSchema(schema))
.apply(write("test_nullable_int64"));
pipeline.run().waitUntilFinish();
long sum = executeQueryAsLong("SELECT SUM(f0) FROM test_nullable_int64");
long count0 = executeQueryAsLong("SELECT COUNT(*) FROM test_nullable_int64");
long count1 = executeQueryAsLong("SELECT COUNT(f0) FROM test_nullable_int64");
assertEquals(4L, sum);
assertEquals(3L, count0);
assertEquals(2L, count1);
}
|
public static float normInf(float[] x) {
int n = x.length;
float f = abs(x[0]);
for (int i = 1; i < n; i++) {
f = Math.max(f, abs(x[i]));
}
return f;
}
|
@Test
public void testNormInf_doubleArr() {
System.out.println("normInf");
double[] x = {-2.1968219, -0.9559913, -0.0431738, 1.0567679, 0.3853515};
assertEquals(2.196822, MathEx.normInf(x), 1E-6);
}
|
@Override
public CheckResult runCheck() {
try {
final String filter = buildQueryFilter(stream.getId(), query);
// TODO we don't support cardinality yet
final FieldStatsResult fieldStatsResult = searches.fieldStats(field, "*", filter,
RelativeRange.create(time * 60), false, true, false);
if (fieldStatsResult.count() == 0) {
LOG.debug("Alert check <{}> did not match any messages. Returning not triggered.", type);
return new NegativeCheckResult();
}
final double result;
switch (type) {
case MEAN:
result = fieldStatsResult.mean();
break;
case MIN:
result = fieldStatsResult.min();
break;
case MAX:
result = fieldStatsResult.max();
break;
case SUM:
result = fieldStatsResult.sum();
break;
case STDDEV:
result = fieldStatsResult.stdDeviation();
break;
default:
LOG.error("No such field value check type: [{}]. Returning not triggered.", type);
return new NegativeCheckResult();
}
LOG.debug("Alert check <{}> result: [{}]", id, result);
if (Double.isInfinite(result)) {
// This happens when there are no ES results/docs.
LOG.debug("Infinite value. Returning not triggered.");
return new NegativeCheckResult();
}
final boolean triggered;
switch (thresholdType) {
case HIGHER:
triggered = result > threshold.doubleValue();
break;
case LOWER:
triggered = result < threshold.doubleValue();
break;
default:
triggered = false;
}
if (triggered) {
final String resultDescription = "Field " + field + " had a " + type + " of "
+ decimalFormat.format(result) + " in the last " + time + " minutes with trigger condition "
+ thresholdType + " than " + decimalFormat.format(threshold) + ". "
+ "(Current grace time: " + grace + " minutes)";
final List<MessageSummary> summaries;
if (getBacklog() > 0) {
final List<ResultMessage> searchResult = fieldStatsResult.searchHits();
summaries = Lists.newArrayListWithCapacity(searchResult.size());
for (ResultMessage resultMessage : searchResult) {
final Message msg = resultMessage.getMessage();
summaries.add(new MessageSummary(resultMessage.getIndex(), msg));
}
} else {
summaries = Collections.emptyList();
}
return new CheckResult(true, this, resultDescription, Tools.nowUTC(), summaries);
} else {
return new NegativeCheckResult();
}
} catch (InvalidRangeParametersException e) {
// cannot happen lol
LOG.error("Invalid timerange.", e);
return null;
} catch (FieldTypeException e) {
LOG.debug("Field [{}] seems not to have a numerical type or doesn't even exist at all. Returning not triggered.", field, e);
return new NegativeCheckResult();
}
}
|
@Test
public void testRunCheckHigherPositive() throws Exception {
for (FieldValueAlertCondition.CheckType checkType : FieldValueAlertCondition.CheckType.values()) {
final double threshold = 50.0;
final double higherThanThreshold = threshold + 10;
final FieldValueAlertCondition fieldValueAlertCondition = getTestInstance(FieldValueAlertCondition.class,
getParametersMap(0, 0, FieldValueAlertCondition.ThresholdType.HIGHER, checkType, threshold, "response_time"),
alertConditionTitle);
fieldStatsShouldReturn(getFieldStatsResult(checkType, higherThanThreshold));
AlertCondition.CheckResult result = fieldValueAlertCondition.runCheck();
assertTriggered(fieldValueAlertCondition, result);
}
}
|
public static MongoSourceConfig load(String yamlFile) throws IOException {
final ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
final MongoSourceConfig cfg = mapper.readValue(new File(yamlFile), MongoSourceConfig.class);
return cfg;
}
|
@Test
public void testLoadMapConfig() throws IOException {
final Map<String, Object> configMap = TestHelper.createCommonConfigMap();
TestHelper.putSyncType(configMap, TestHelper.SYNC_TYPE);
SourceContext sourceContext = Mockito.mock(SourceContext.class);
final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext);
assertEquals(cfg.getMongoUri(), TestHelper.URI);
assertEquals(cfg.getDatabase(), TestHelper.DB);
assertEquals(cfg.getCollection(), TestHelper.COLL);
assertEquals(cfg.getSyncType(), TestHelper.SYNC_TYPE);
assertEquals(cfg.getBatchSize(), TestHelper.BATCH_SIZE);
assertEquals(cfg.getBatchTimeMs(), TestHelper.BATCH_TIME);
}
|
private EmbeddedFiles()
{
}
|
@Test
void testEmbeddedFiles() throws IOException
{
String outputFile = "target/test-output/EmbeddedFile.pdf";
String embeddedFile = "target/test-output/Test.txt";
new File("target/test-output").mkdirs();
new File(outputFile).delete();
new File(embeddedFile).delete();
String[] args = { outputFile };
EmbeddedFiles.main(args);
ExtractEmbeddedFiles.main(args);
byte[] bytes = Files.readAllBytes(Paths.get(embeddedFile));
String content = new String(bytes);
Assertions.assertEquals("This is the contents of the embedded file", content);
new File(embeddedFile).delete();
new File(outputFile).delete();
}
|
@Override
public boolean shouldFailover(SortedSet<BrokerStatus> brokerStatus) {
return this.autoFailoverPolicy.shouldFailoverToSecondary(brokerStatus);
}
|
@Test
public void testShouldFailover() throws Exception {
NamespaceIsolationPolicyImpl defaultPolicy = this.getDefaultPolicy();
List<BrokerStatus> brokerStatus = new ArrayList<>();
for (int i = 0; i < 10; i++) {
BrokerStatus status = BrokerStatus.builder()
.brokerAddress(String.format("broker-%s", i))
.active(true)
.loadFactor(i * 10)
.build();
brokerStatus.add(status);
}
assertFalse(defaultPolicy.shouldFailover(new TreeSet<>(brokerStatus)));
for (int i = 0; i < 8; i++) {
brokerStatus.set(i, BrokerStatus.builder()
.brokerAddress(brokerStatus.get(i).getBrokerAddress())
.active(false)
.loadFactor(brokerStatus.get(i).getLoadFactor())
.build());
}
assertTrue(defaultPolicy.shouldFailover(new TreeSet<>(brokerStatus)));
brokerStatus.set(7, BrokerStatus.builder()
.brokerAddress(brokerStatus.get(7).getBrokerAddress())
.active(true)
.loadFactor(brokerStatus.get(7).getLoadFactor())
.build());
assertTrue(defaultPolicy.shouldFailover(new TreeSet<>(brokerStatus)));
brokerStatus.set(9, BrokerStatus.builder()
.brokerAddress(brokerStatus.get(9).getBrokerAddress())
.active(brokerStatus.get(9).isActive())
.loadFactor(80)
.build());
assertFalse(defaultPolicy.shouldFailover(new TreeSet<>(brokerStatus)));
brokerStatus = new ArrayList<>();
for (int i = 0; i < 5; i++) {
BrokerStatus status = BrokerStatus.builder().brokerAddress(String.format("broker-%d", 2 * i))
.active(true)
.loadFactor(i * 20)
.build();
brokerStatus.add(status);
status = BrokerStatus.builder()
.brokerAddress(String.format("broker-%d", 2 * i + 1))
.active(true)
.loadFactor(i * 20)
.build();
brokerStatus.add(status);
}
assertEquals(brokerStatus.size(), 10);
}
|
@Override
protected MetaDataRegisterDTO buildMetaDataDTO(final Object bean, @NonNull final ShenyuSpringWebSocketClient webSocketClient, final String path, final Class<?> clazz, final Method method) {
return MetaDataRegisterDTO.builder()
.contextPath(getContextPath())
.appName(getAppName())
.path(UriComponentsBuilder.fromUriString(PathUtils.decoratorPathWithSlash(getContextPath())).build().encode().toUriString())
.rpcType(RpcTypeEnum.WEB_SOCKET.getName())
.enabled(true)
.ruleName(StringUtils.defaultIfBlank(webSocketClient.ruleName(), getContextPath()))
.build();
}
|
@Test
public void testBuildMetaDataDTO() throws NoSuchMethodException {
Method method = mockClass.getClass().getMethod("mockMethod");
MetaDataRegisterDTO metaDataRegisterDTO = eventListener.buildMetaDataDTO(mockClass, annotation, SUPER_PATH, MockClass.class, method);
assertNotNull(metaDataRegisterDTO);
}
|
public int tryClaim(final int msgTypeId, final int length)
{
checkTypeId(msgTypeId);
checkMsgLength(length);
final AtomicBuffer buffer = this.buffer;
final int recordLength = length + HEADER_LENGTH;
final int recordIndex = claimCapacity(buffer, recordLength);
if (INSUFFICIENT_CAPACITY == recordIndex)
{
return recordIndex;
}
buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength);
MemoryAccess.releaseFence();
buffer.putInt(typeOffset(recordIndex), msgTypeId);
return encodedMsgOffset(recordIndex);
}
|
@Test
void tryClaimReturnsInsufficientCapacityIfThereIsNotEnoughSpaceInTheBufferAfterWrap()
{
final int length = 100;
when(buffer.getLong(HEAD_COUNTER_CACHE_INDEX)).thenReturn(22L);
when(buffer.getLong(TAIL_COUNTER_INDEX)).thenReturn(CAPACITY * 2L - 10);
when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn(CAPACITY + 111L, 3L);
final int index = ringBuffer.tryClaim(MSG_TYPE_ID, length);
assertEquals(INSUFFICIENT_CAPACITY, index);
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer).getLong(HEAD_COUNTER_CACHE_INDEX);
inOrder.verify(buffer).getLong(TAIL_COUNTER_INDEX);
inOrder.verify(buffer).getLongVolatile(HEAD_COUNTER_INDEX);
inOrder.verify(buffer).putLong(HEAD_COUNTER_CACHE_INDEX, CAPACITY + 111L);
inOrder.verify(buffer).getLongVolatile(HEAD_COUNTER_INDEX);
inOrder.verify(buffer).putLong(HEAD_COUNTER_CACHE_INDEX, 3L);
inOrder.verify(buffer).putLongOrdered(TAIL_COUNTER_INDEX, CAPACITY * 2L);
final int paddingIndex = CAPACITY - 10;
inOrder.verify(buffer).putLong(0, 0L);
inOrder.verify(buffer).putIntOrdered(lengthOffset(paddingIndex), -10);
inOrder.verify(buffer).putInt(typeOffset(paddingIndex), PADDING_MSG_TYPE_ID);
inOrder.verify(buffer).putIntOrdered(lengthOffset(paddingIndex), 10);
inOrder.verifyNoMoreInteractions();
}
|
static String resolveEc2Endpoint(AwsConfig awsConfig, String region) {
String ec2HostHeader = awsConfig.getHostHeader();
if (isNullOrEmptyAfterTrim(ec2HostHeader)
|| ec2HostHeader.startsWith("ecs")
|| ec2HostHeader.equals("ec2")
) {
ec2HostHeader = DEFAULT_EC2_HOST_HEADER;
}
return ec2HostHeader.replace("ec2.", "ec2." + region + ".");
}
|
@Test
public void resolveEc2Endpoints() {
assertEquals("ec2.us-east-1.amazonaws.com", resolveEc2Endpoint(AwsConfig.builder().build(), "us-east-1"));
assertEquals("ec2.us-east-1.amazonaws.com", resolveEc2Endpoint(AwsConfig.builder().setHostHeader("ecs").build(), "us-east-1"));
assertEquals("ec2.us-east-1.amazonaws.com", resolveEc2Endpoint(AwsConfig.builder().setHostHeader("ec2").build(), "us-east-1"));
assertEquals("ec2.us-east-1.something",
resolveEc2Endpoint(AwsConfig.builder().setHostHeader("ec2.something").build(), "us-east-1"));
}
|
@Override
public boolean filterPath(Path filePath) {
if (getIncludeMatchers().isEmpty() && getExcludeMatchers().isEmpty()) {
return false;
}
// compensate for the fact that Flink paths are slashed
final String path =
filePath.hasWindowsDrive() ? filePath.getPath().substring(1) : filePath.getPath();
final java.nio.file.Path nioPath = Paths.get(path);
for (PathMatcher matcher : getIncludeMatchers()) {
if (matcher.matches(nioPath)) {
return shouldExclude(nioPath);
}
}
return true;
}
|
@Test
void testDoubleStarPattern() {
GlobFilePathFilter matcher =
new GlobFilePathFilter(Collections.singletonList("**"), Collections.emptyList());
assertThat(matcher.filterPath(new Path("a"))).isFalse();
assertThat(matcher.filterPath(new Path("a/b"))).isFalse();
assertThat(matcher.filterPath(new Path("a/b/c"))).isFalse();
}
|
public static TypeDescription convert(Schema schema) {
final TypeDescription root = TypeDescription.createStruct();
final Types.StructType schemaRoot = schema.asStruct();
for (Types.NestedField field : schemaRoot.asStructType().fields()) {
TypeDescription orcColumnType = convert(field.fieldId(), field.type(), field.isRequired());
root.addField(field.name(), orcColumnType);
}
return root;
}
|
@Test
public void testRoundtripConversionPrimitive() {
TypeDescription orcSchema = ORCSchemaUtil.convert(new Schema(SUPPORTED_PRIMITIVES.fields()));
assertThat(ORCSchemaUtil.convert(orcSchema).asStruct()).isEqualTo(SUPPORTED_PRIMITIVES);
}
|
@Override
public void trash(final Local file) throws LocalAccessDeniedException {
if(log.isDebugEnabled()) {
log.debug(String.format("Move %s to Trash", file));
}
final ObjCObjectByReference error = new ObjCObjectByReference();
if(!NSFileManager.defaultManager().trashItemAtURL_resultingItemURL_error(
NSURL.fileURLWithPath(file.getAbsolute()), null, error)) {
final NSError f = error.getValueAs(NSError.class);
if(null == f) {
throw new LocalAccessDeniedException(file.getAbsolute());
}
throw new LocalAccessDeniedException(String.format("%s", f.localizedDescription()));
}
}
|
@Test
public void testTrashRepeated() throws Exception {
final FileManagerTrashFeature f = new FileManagerTrashFeature();
Local l = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
new DefaultLocalTouchFeature().touch(l);
assertTrue(l.exists());
f.trash(l);
assertFalse(l.exists());
}
|
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
}
|
@Test
public void testUnpartitionedIsNotNull() throws Exception {
createUnpartitionedTable(spark, tableName);
SparkScanBuilder builder = scanBuilder();
TruncateFunction.TruncateString function = new TruncateFunction.TruncateString();
UserDefinedScalarFunc udf = toUDF(function, expressions(intLit(4), fieldRef("data")));
Predicate predicate = new Predicate("IS_NOT_NULL", expressions(udf));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
// NOT IsNotNull
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
}
|
public static String truncate(String s, int length) {
if (s == null) {
return null;
}
if (s.length() > length) {
return s.substring(0, length);
} else {
return s;
}
}
|
@Test
public void test_truncate() {
assertThat(StringUtils.truncate(null, 10), CoreMatchers.nullValue());
assertThat(StringUtils.truncate("", 4), is(""));
assertThat(StringUtils.truncate("1234", 4), is("1234"));
assertThat(StringUtils.truncate("1234", 3), is("123"));
}
|
public static GeneratorResult run(String resolverPath,
String defaultPackage,
final boolean generateImported,
final boolean generateDataTemplates,
RestliVersion version,
RestliVersion deprecatedByVersion,
String targetDirectoryPath,
String[] sources)
throws IOException
{
return run(resolverPath,
defaultPackage,
null,
generateImported,
generateDataTemplates,
version,
deprecatedByVersion,
targetDirectoryPath,
sources);
}
|
@Test(dataProvider = "arrayDuplicateDataProvider2")
public void testGenerationPathOrder(RestliVersion version, String restspec1, String restspec2, boolean generateLowercasePath) throws Exception
{
// Given: RestLi version and spec files.
File tmpDir = ExporterTestUtils.createTmpDir();
final String pegasusDir = moduleDir + FS + RESOURCES_DIR + FS + "pegasus";
final String tmpPath = tmpDir.getPath();
final String file1 = moduleDir + FS + RESOURCES_DIR + FS + "idls" + FS + restspec1;
final String file2 = moduleDir + FS + RESOURCES_DIR + FS + "idls" + FS + restspec2;
// When: Generate the files defined by spec.
GeneratorResult r = RestRequestBuilderGenerator.run(pegasusDir,
null,
moduleDir,
true,
false,
version,
null,
tmpPath,
new String[] { file1 },
generateLowercasePath);
GeneratorResult r2 = RestRequestBuilderGenerator.run(pegasusDir,
null,
moduleDir,
true,
false,
version,
null,
tmpPath,
new String[] { file2 },
generateLowercasePath);
int c = tmpDir.getCanonicalPath().length();
// Then: Validate the Builder files were created with the correct paths.
ArrayList<File> files = new ArrayList<>(r.getModifiedFiles());
files.addAll(r2.getModifiedFiles());
Assert.assertTrue(files.size() > 0);
for (File f : files) {
Assert.assertTrue(f.exists());
if (!isFileSystemCaseSensitive && !generateLowercasePath) {
// Do not validate path case since we would need to read paths from files.
continue;
} else if (generateLowercasePath) {
// Validate path is lowercase.
String path = f.getCanonicalPath().substring(c);
int idx = path.lastIndexOf("/") + 1;
path = path.substring(0, idx);
Matcher matcher = LOWERCASE_PATH_PATTERN.matcher(path);
Assert.assertTrue(matcher.find());
}
Assert.assertTrue(f.getCanonicalPath().endsWith(f.getAbsolutePath()));
}
// Clean up.
ExporterTestUtils.rmdir(tmpDir);
}
|
<T extends PipelineOptions> T as(Class<T> iface) {
checkNotNull(iface);
checkArgument(iface.isInterface(), "Not an interface: %s", iface);
T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
synchronized (this) {
// double check
existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
Registration<T> registration =
PipelineOptionsFactory.CACHE
.get()
.validateWellFormed(iface, computedProperties.knownInterfaces);
List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors();
Class<T> proxyClass = registration.getProxyClass();
existingOption =
InstanceBuilder.ofType(proxyClass)
.fromClass(proxyClass)
.withArg(InvocationHandler.class, this)
.build();
computedProperties =
computedProperties.updated(iface, existingOption, propertyDescriptors);
}
}
}
return existingOption;
}
|
@Test
public void testJsonConversionOfNotSerializableProperty() throws Exception {
NotSerializableProperty options = PipelineOptionsFactory.as(NotSerializableProperty.class);
options.setValue(new NotSerializable("TestString"));
expectedException.expect(JsonMappingException.class);
expectedException.expectMessage("Failed to serialize and deserialize property 'value'");
serializeDeserialize(NotSerializableProperty.class, options);
}
|
@SuppressWarnings({"dereference.of.nullable", "argument"})
public static PipelineResult run(DataTokenizationOptions options) {
SchemasUtils schema = null;
try {
schema = new SchemasUtils(options.getDataSchemaPath(), StandardCharsets.UTF_8);
} catch (IOException e) {
LOG.error("Failed to retrieve schema for data.", e);
}
checkArgument(schema != null, "Data schema is mandatory.");
// Create the pipeline
Pipeline pipeline = Pipeline.create(options);
// Register the coder for pipeline
CoderRegistry coderRegistry = pipeline.getCoderRegistry();
coderRegistry.registerCoderForType(
FAILSAFE_ELEMENT_CODER.getEncodedTypeDescriptor(), FAILSAFE_ELEMENT_CODER);
coderRegistry.registerCoderForType(
RowCoder.of(schema.getBeamSchema()).getEncodedTypeDescriptor(),
RowCoder.of(schema.getBeamSchema()));
/*
* Row/Row Coder for FailsafeElement.
*/
FailsafeElementCoder<Row, Row> coder =
FailsafeElementCoder.of(
RowCoder.of(schema.getBeamSchema()), RowCoder.of(schema.getBeamSchema()));
coderRegistry.registerCoderForType(coder.getEncodedTypeDescriptor(), coder);
PCollection<Row> rows;
if (options.getInputFilePattern() != null) {
rows = new TokenizationFileSystemIO(options).read(pipeline, schema);
} else if (options.getPubsubTopic() != null) {
rows =
pipeline
.apply(
"ReadMessagesFromPubsub",
PubsubIO.readStrings().fromTopic(options.getPubsubTopic()))
.apply(
"TransformToBeamRow",
new JsonToBeamRow(options.getNonTokenizedDeadLetterPath(), schema));
if (options.getOutputDirectory() != null) {
rows = rows.apply(Window.into(FixedWindows.of(parseDuration(options.getWindowDuration()))));
}
} else {
throw new IllegalStateException(
"No source is provided, please configure File System or Pub/Sub");
}
/*
Tokenize data using remote API call
*/
PCollectionTuple tokenizedRows =
rows.setRowSchema(schema.getBeamSchema())
.apply(
MapElements.into(
TypeDescriptors.kvs(TypeDescriptors.integers(), TypeDescriptors.rows()))
.via((Row row) -> KV.of(0, row)))
.setCoder(KvCoder.of(VarIntCoder.of(), RowCoder.of(schema.getBeamSchema())))
.apply(
"DsgTokenization",
RowToTokenizedRow.newBuilder()
.setBatchSize(options.getBatchSize())
.setRpcURI(options.getRpcUri())
.setSchema(schema.getBeamSchema())
.setSuccessTag(TOKENIZATION_OUT)
.setFailureTag(TOKENIZATION_DEADLETTER_OUT)
.build());
String csvDelimiter = options.getCsvDelimiter();
if (options.getNonTokenizedDeadLetterPath() != null) {
/*
Write tokenization errors to dead-letter sink
*/
tokenizedRows
.get(TOKENIZATION_DEADLETTER_OUT)
.apply(
"ConvertToCSV",
MapElements.into(FAILSAFE_ELEMENT_CODER.getEncodedTypeDescriptor())
.via(
(FailsafeElement<Row, Row> fse) ->
FailsafeElement.of(
new RowToCsv(csvDelimiter).getCsvFromRow(fse.getOriginalPayload()),
new RowToCsv(csvDelimiter).getCsvFromRow(fse.getPayload()))))
.apply(
"WriteTokenizationErrorsToFS",
ErrorConverters.WriteErrorsToTextIO.<String, String>newBuilder()
.setErrorWritePath(options.getNonTokenizedDeadLetterPath())
.setTranslateFunction(SerializableFunctions.getCsvErrorConverter())
.build());
}
if (options.getOutputDirectory() != null) {
new TokenizationFileSystemIO(options)
.write(tokenizedRows.get(TOKENIZATION_OUT), schema.getBeamSchema());
} else if (options.getBigQueryTableName() != null) {
WriteResult writeResult =
TokenizationBigQueryIO.write(
tokenizedRows.get(TOKENIZATION_OUT),
options.getBigQueryTableName(),
schema.getBigQuerySchema());
writeResult
.getFailedInsertsWithErr()
.apply(
"WrapInsertionErrors",
MapElements.into(FAILSAFE_ELEMENT_CODER.getEncodedTypeDescriptor())
.via(TokenizationBigQueryIO::wrapBigQueryInsertError))
.setCoder(FAILSAFE_ELEMENT_CODER)
.apply(
"WriteInsertionFailedRecords",
ErrorConverters.WriteStringMessageErrors.newBuilder()
.setErrorRecordsTable(
options.getBigQueryTableName() + DEFAULT_DEADLETTER_TABLE_SUFFIX)
.setErrorRecordsTableSchema(DEADLETTER_SCHEMA)
.build());
} else if (options.getBigTableInstanceId() != null) {
new TokenizationBigTableIO(options)
.write(tokenizedRows.get(TOKENIZATION_OUT), schema.getBeamSchema());
} else {
throw new IllegalStateException(
"No sink is provided, please configure BigQuery or BigTable.");
}
return pipeline.run();
}
|
@Test
public void testJsonToRow() throws IOException {
PCollection<Row> rows = fileSystemIORead(JSON_FILE_PATH, FORMAT.JSON);
PAssert.that(rows)
.satisfies(
x -> {
LinkedList<Row> beamRows = Lists.newLinkedList(x);
assertThat(beamRows, hasSize(3));
beamRows.forEach(
row -> {
List<Object> fieldValues = row.getValues();
for (Object element : fieldValues) {
assertThat((String) element, startsWith("FieldValue"));
}
});
return null;
});
testPipeline.run();
}
|
@Override
public <IN, ACC, OUT> AggregatingState<IN, OUT> getAggregatingState(
AggregatingStateDescriptor<IN, ACC, OUT> stateProperties) {
KeyedStateStore keyedStateStore = checkPreconditionsAndGetKeyedStateStore(stateProperties);
stateProperties.initializeSerializerUnlessSet(this::createSerializer);
return keyedStateStore.getAggregatingState(stateProperties);
}
|
@Test
void testV2AggregatingStateInstantiation() throws Exception {
final ExecutionConfig config = new ExecutionConfig();
SerializerConfig serializerConfig = config.getSerializerConfig();
serializerConfig.registerKryoType(Path.class);
final AtomicReference<Object> descriptorCapture = new AtomicReference<>();
StreamingRuntimeContext context = createRuntimeContext(descriptorCapture, config);
@SuppressWarnings("unchecked")
AggregateFunction<String, TaskInfo, String> aggregate =
(AggregateFunction<String, TaskInfo, String>) mock(AggregateFunction.class);
org.apache.flink.runtime.state.v2.AggregatingStateDescriptor<String, TaskInfo, String>
descr =
new org.apache.flink.runtime.state.v2.AggregatingStateDescriptor<>(
"name",
aggregate,
TypeInformation.of(TaskInfo.class),
serializerConfig);
context.getAggregatingState(descr);
org.apache.flink.runtime.state.v2.AggregatingStateDescriptor<?, ?, ?> descrIntercepted =
(org.apache.flink.runtime.state.v2.AggregatingStateDescriptor<?, ?, ?>)
descriptorCapture.get();
TypeSerializer<?> serializer = descrIntercepted.getSerializer();
// check that the Path class is really registered, i.e., the execution config was applied
assertThat(serializer).isInstanceOf(KryoSerializer.class);
assertThat(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId())
.isPositive();
}
|
JavaClasses getClassesToAnalyzeFor(Class<?> testClass, ClassAnalysisRequest classAnalysisRequest) {
checkNotNull(testClass);
checkNotNull(classAnalysisRequest);
if (cachedByTest.containsKey(testClass)) {
return cachedByTest.get(testClass);
}
LocationsKey locations = RequestedLocations.by(classAnalysisRequest, testClass).asKey();
JavaClasses classes = classAnalysisRequest.getCacheMode() == FOREVER
? cachedByLocations.getUnchecked(locations).get()
: new LazyJavaClasses(locations.locations, locations.importOptionTypes).get();
cachedByTest.put(testClass, classes);
return classes;
}
|
@Test
public void if_no_import_locations_are_specified_and_whole_classpath_is_set_false_then_the_default_is_the_package_of_the_test_class() {
TestAnalysisRequest defaultOptions = new TestAnalysisRequest().withWholeClasspath(false);
JavaClasses classes = cache.getClassesToAnalyzeFor(TestClass.class, defaultOptions);
assertThatTypes(classes).contain(getClass(), TestAnalysisRequest.class);
assertThatTypes(classes).doNotContain(ClassFileImporter.class);
}
|
@Override
public void lock() {
try {
lock(-1, null, false);
} catch (InterruptedException e) {
throw new IllegalStateException();
}
}
|
@Test
public void testGetHoldCount() {
RLock lock = redisson.getLock("lock");
Assertions.assertEquals(0, lock.getHoldCount());
lock.lock();
Assertions.assertEquals(1, lock.getHoldCount());
lock.unlock();
Assertions.assertEquals(0, lock.getHoldCount());
lock.lock();
lock.lock();
Assertions.assertEquals(2, lock.getHoldCount());
lock.unlock();
Assertions.assertEquals(1, lock.getHoldCount());
lock.unlock();
Assertions.assertEquals(0, lock.getHoldCount());
}
|
public Parser getParser() {
return parser;
}
|
@Test
public void testMultipleWithFallback() throws Exception {
TikaConfig config = getConfig("TIKA-1509-multiple-fallback.xml");
CompositeParser parser = (CompositeParser) config.getParser();
assertEquals(2, parser.getAllComponentParsers().size());
Parser p;
p = parser.getAllComponentParsers().get(0);
assertTrue(p instanceof ParserDecorator, p.toString());
assertEquals(DefaultParser.class, ((ParserDecorator) p).getWrappedParser().getClass());
p = parser.getAllComponentParsers().get(1);
assertTrue(p instanceof ParserDecorator, p.toString());
assertEquals(FallbackParser.class, ((ParserDecorator) p).getWrappedParser().getClass());
FallbackParser fbp = (FallbackParser) ((ParserDecorator) p).getWrappedParser();
assertEquals("DISCARD_ALL", fbp.getMetadataPolicy().toString());
}
|
public TaskAcknowledgeResult acknowledgeTask(
ExecutionAttemptID executionAttemptId,
TaskStateSnapshot operatorSubtaskStates,
CheckpointMetrics metrics) {
synchronized (lock) {
if (disposed) {
return TaskAcknowledgeResult.DISCARDED;
}
final ExecutionVertex vertex = notYetAcknowledgedTasks.remove(executionAttemptId);
if (vertex == null) {
if (acknowledgedTasks.contains(executionAttemptId)) {
return TaskAcknowledgeResult.DUPLICATE;
} else {
return TaskAcknowledgeResult.UNKNOWN;
}
} else {
acknowledgedTasks.add(executionAttemptId);
}
long ackTimestamp = System.currentTimeMillis();
if (operatorSubtaskStates != null && operatorSubtaskStates.isTaskDeployedAsFinished()) {
checkpointPlan.reportTaskFinishedOnRestore(vertex);
} else {
List<OperatorIDPair> operatorIDs = vertex.getJobVertex().getOperatorIDs();
for (OperatorIDPair operatorID : operatorIDs) {
updateOperatorState(vertex, operatorSubtaskStates, operatorID);
}
if (operatorSubtaskStates != null && operatorSubtaskStates.isTaskFinished()) {
checkpointPlan.reportTaskHasFinishedOperators(vertex);
}
}
++numAcknowledgedTasks;
// publish the checkpoint statistics
// to prevent null-pointers from concurrent modification, copy reference onto stack
if (pendingCheckpointStats != null) {
// Do this in millis because the web frontend works with them
long alignmentDurationMillis = metrics.getAlignmentDurationNanos() / 1_000_000;
long checkpointStartDelayMillis =
metrics.getCheckpointStartDelayNanos() / 1_000_000;
SubtaskStateStats subtaskStateStats =
new SubtaskStateStats(
vertex.getParallelSubtaskIndex(),
ackTimestamp,
metrics.getBytesPersistedOfThisCheckpoint(),
metrics.getTotalBytesPersisted(),
metrics.getSyncDurationMillis(),
metrics.getAsyncDurationMillis(),
metrics.getBytesProcessedDuringAlignment(),
metrics.getBytesPersistedDuringAlignment(),
alignmentDurationMillis,
checkpointStartDelayMillis,
metrics.getUnalignedCheckpoint(),
true);
LOG.trace(
"Checkpoint {} stats for {}: size={}Kb, duration={}ms, sync part={}ms, async part={}ms",
checkpointId,
vertex.getTaskNameWithSubtaskIndex(),
subtaskStateStats.getStateSize() == 0
? 0
: subtaskStateStats.getStateSize() / 1024,
subtaskStateStats.getEndToEndDuration(
pendingCheckpointStats.getTriggerTimestamp()),
subtaskStateStats.getSyncCheckpointDuration(),
subtaskStateStats.getAsyncCheckpointDuration());
pendingCheckpointStats.reportSubtaskStats(
vertex.getJobvertexId(), subtaskStateStats);
}
return TaskAcknowledgeResult.SUCCESS;
}
}
|
@Test
void testReportTaskFinishedOperators() throws IOException {
RecordCheckpointPlan recordCheckpointPlan =
new RecordCheckpointPlan(new ArrayList<>(ACK_TASKS));
PendingCheckpoint checkpoint = createPendingCheckpoint(recordCheckpointPlan);
checkpoint.acknowledgeTask(
ACK_TASKS.get(0).getAttemptId(),
new TaskStateSnapshot(10, true),
new CheckpointMetrics());
assertThat(recordCheckpointPlan.getReportedOperatorsFinishedTasks())
.contains(ACK_TASKS.get(0).getVertex());
}
|
List<Condition> run(boolean useKRaft) {
List<Condition> warnings = new ArrayList<>();
checkKafkaReplicationConfig(warnings);
checkKafkaBrokersStorage(warnings);
if (useKRaft) {
// Additional checks done for KRaft clusters
checkKRaftControllerStorage(warnings);
checkKRaftControllerCount(warnings);
checkKafkaMetadataVersion(warnings);
checkInterBrokerProtocolVersionInKRaft(warnings);
checkLogMessageFormatVersionInKRaft(warnings);
} else {
// Additional checks done for ZooKeeper-based clusters
checkKafkaLogMessageFormatVersion(warnings);
checkKafkaInterBrokerProtocolVersion(warnings);
checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings);
}
return warnings;
}
|
@Test
public void checkWithoutKRaftMetadataConfigInKRaftModeProducesNoWarning() {
// Kafka with Ephemeral storage
KafkaNodePool ephemeralPool = new KafkaNodePoolBuilder(POOL_A)
.editSpec()
.withNewEphemeralStorage()
.endEphemeralStorage()
.endSpec()
.build();
KafkaSpecChecker checker = generateChecker(KAFKA, List.of(CONTROLLERS, ephemeralPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE);
List<Condition> warnings = checker.run(true);
assertThat(warnings, hasSize(0));
// Kafka with Persistent storage
KafkaNodePool persistentPool = new KafkaNodePoolBuilder(POOL_A)
.editSpec()
.withNewPersistentClaimStorage()
.withSize("100Gi")
.endPersistentClaimStorage()
.endSpec()
.build();
checker = generateChecker(KAFKA, List.of(CONTROLLERS, persistentPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE);
warnings = checker.run(true);
assertThat(warnings, hasSize(0));
// Kafka with JBOD storage
KafkaNodePool jbodPool = new KafkaNodePoolBuilder(POOL_A)
.editSpec()
.withNewJbodStorage()
.addNewPersistentClaimStorageVolume()
.withId(0)
.withSize("100Gi")
.endPersistentClaimStorageVolume()
.addNewPersistentClaimStorageVolume()
.withId(0)
.withSize("100Gi")
.endPersistentClaimStorageVolume()
.endJbodStorage()
.endSpec()
.build();
checker = generateChecker(KAFKA, List.of(CONTROLLERS, jbodPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE);
warnings = checker.run(true);
assertThat(warnings, hasSize(0));
}
|
public boolean accepts( String fileName ) {
if ( fileName == null || fileName.indexOf( '.' ) == -1 ) {
return false;
}
String extension = fileName.substring( fileName.lastIndexOf( '.' ) + 1 );
return extension.equals( "kjb" );
}
|
@Test
public void testAccepts() throws Exception {
assertFalse( jobFileListener.accepts( null ) );
assertFalse( jobFileListener.accepts( "NoDot" ) );
assertTrue( jobFileListener.accepts( "Job.kjb" ) );
assertTrue( jobFileListener.accepts( ".kjb" ) );
}
|
public static ByteBuf copyLong(long value) {
ByteBuf buf = buffer(8);
buf.writeLong(value);
return buf;
}
|
@Test
public void testWrapLong() {
ByteBuf buffer = copyLong(1, 4);
assertEquals(16, buffer.capacity());
assertEquals(1, buffer.readLong());
assertEquals(4, buffer.readLong());
assertFalse(buffer.isReadable());
buffer.release();
buffer = copyLong(null);
assertEquals(0, buffer.capacity());
buffer.release();
buffer = copyLong(new long[] {});
assertEquals(0, buffer.capacity());
buffer.release();
}
|
@Override
public void finished(boolean allStepsExecuted) {
if (postProjectAnalysisTasks.length == 0) {
return;
}
ProjectAnalysisImpl projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED);
for (PostProjectAnalysisTask postProjectAnalysisTask : postProjectAnalysisTasks) {
executeTask(projectAnalysis, postProjectAnalysisTask);
}
}
|
@Test
@UseDataProvider("booleanValues")
public void logStatistics_add_fails_with_IAE_if_key_is_time_or_status_ignoring_case(boolean allStepsExecuted) {
underTest.finished(allStepsExecuted);
verify(postProjectAnalysisTask).finished(taskContextCaptor.capture());
PostProjectAnalysisTask.LogStatistics logStatistics = taskContextCaptor.getValue().getLogStatistics();
for (String reservedKey : asList("time", "TIME", "TImE", "status", "STATUS", "STaTuS")) {
assertThat(catchThrowable(() -> logStatistics.add(reservedKey, "foo")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Statistic with key [" + reservedKey + "] is not accepted");
}
}
|
@Override
public E remove() {
final E e = poll();
if (e == null) {
throw new NoSuchElementException("Queue is empty");
}
return e;
}
|
@Test(expected = UnsupportedOperationException.class)
public void testRemove_withObject() {
queue.remove(1);
}
|
public <T> void notifyReadyAsync(Callable<T> callable, BiConsumer<T, Throwable> handler) {
workerExecutor.execute(
() -> {
try {
T result = callable.call();
executorToNotify.execute(() -> handler.accept(result, null));
} catch (Throwable t) {
executorToNotify.execute(() -> handler.accept(null, t));
}
});
}
|
@Test
public void testExceptionInCallable() {
Exception exception = new Exception("Expected exception.");
notifier.notifyReadyAsync(
() -> {
throw exception;
},
(v, e) -> {
assertEquals(exception, e);
assertNull(v);
});
}
|
@GET
@Path("/health")
@Operation(summary = "Health check endpoint to verify worker readiness and liveness")
public Response healthCheck() throws Throwable {
WorkerStatus workerStatus;
int statusCode;
try {
FutureCallback<Void> cb = new FutureCallback<>();
herder.healthCheck(cb);
long timeoutNs = TimeUnit.MILLISECONDS.toNanos(requestTimeout.healthCheckTimeoutMs());
long deadlineNs = timeoutNs + time.nanoseconds();
time.waitForFuture(cb, deadlineNs);
statusCode = Response.Status.OK.getStatusCode();
workerStatus = WorkerStatus.healthy();
} catch (TimeoutException e) {
String statusDetails = e instanceof StagedTimeoutException
? ((StagedTimeoutException) e).stage().summarize()
: null;
if (!herder.isReady()) {
statusCode = Response.Status.SERVICE_UNAVAILABLE.getStatusCode();
workerStatus = WorkerStatus.starting(statusDetails);
} else {
statusCode = Response.Status.INTERNAL_SERVER_ERROR.getStatusCode();
workerStatus = WorkerStatus.unhealthy(statusDetails);
}
} catch (ExecutionException e) {
throw e.getCause();
}
return Response.status(statusCode).entity(workerStatus).build();
}
|
@Test
public void testHealthCheckRunning() throws Throwable {
expectHealthCheck(null);
Response response = rootResource.healthCheck();
assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
WorkerStatus expectedStatus = WorkerStatus.healthy();
WorkerStatus actualStatus = workerStatus(response);
assertEquals(expectedStatus, actualStatus);
}
|
public long runScheduledPendingTasks() {
try {
return embeddedEventLoop().runScheduledTasks();
} catch (Exception e) {
recordException(e);
return embeddedEventLoop().nextScheduledTask();
}
}
|
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testScheduling() throws Exception {
EmbeddedChannel ch = new EmbeddedChannel(new ChannelInboundHandlerAdapter());
final CountDownLatch latch = new CountDownLatch(2);
Future future = ch.eventLoop().schedule(new Runnable() {
@Override
public void run() {
latch.countDown();
}
}, 1, TimeUnit.SECONDS);
future.addListener(new FutureListener() {
@Override
public void operationComplete(Future future) throws Exception {
latch.countDown();
}
});
long next = ch.runScheduledPendingTasks();
assertTrue(next > 0);
// Sleep for the nanoseconds but also give extra 50ms as the clock my not be very precise and so fail the test
// otherwise.
Thread.sleep(TimeUnit.NANOSECONDS.toMillis(next) + 50);
assertEquals(-1, ch.runScheduledPendingTasks());
latch.await();
}
|
public ExitStatus(Options options) {
this.options = options;
}
|
@Test
void wip_with_passed_failed_scenarios() {
createWipRuntime();
bus.send(testCaseFinishedWithStatus(Status.PASSED));
bus.send(testCaseFinishedWithStatus(Status.FAILED));
assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x1)));
}
|
@Override
public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) {
ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new);
String shardingResultSuffix = getShardingResultSuffix(cutShardingValue(shardingValue.getValue()).mod(new BigInteger(String.valueOf(shardingCount))).toString());
return ShardingAutoTableAlgorithmUtils.findMatchedTargetName(availableTargetNames, shardingResultSuffix, shardingValue.getDataNodeInfo()).orElse(null);
}
|
@Test
void assertRangeDoShardingWithAllTargets() {
ModShardingAlgorithm algorithm = (ModShardingAlgorithm) TypedSPILoader.getService(ShardingAlgorithm.class, "MOD", PropertiesBuilder.build(new Property("sharding-count", "16")));
Collection<String> actual = algorithm.doSharding(createAvailableTargetNames(), new RangeShardingValue<>("t_order", "order_id", DATA_NODE_INFO, Range.closed(1L, 16L)));
assertThat(actual.size(), is(16));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.