focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static <K, V extends OrderedSPI<?>> Map<K, V> getServices(final Class<V> serviceInterface, final Collection<K> types) {
return getServices(serviceInterface, types, Comparator.naturalOrder());
}
|
@SuppressWarnings("rawtypes")
@Test
void assertGetServices() {
OrderedInterfaceFixtureImpl key = new OrderedInterfaceFixtureImpl();
Map<OrderedInterfaceFixtureImpl, OrderedSPIFixture> actual = OrderedSPILoader.getServices(OrderedSPIFixture.class, Collections.singleton(key));
assertThat(actual.size(), is(1));
assertThat(actual.get(key), instanceOf(OrderedSPIFixtureImpl.class));
}
|
@Override
public boolean isInputConsumable(
SchedulingExecutionVertex executionVertex,
Set<ExecutionVertexID> verticesToDeploy,
Map<ConsumedPartitionGroup, Boolean> consumableStatusCache) {
for (ConsumedPartitionGroup consumedPartitionGroup :
executionVertex.getConsumedPartitionGroups()) {
if (!consumableStatusCache.computeIfAbsent(
consumedPartitionGroup, this::isConsumableBasedOnFinishedProducers)) {
return false;
}
}
return true;
}
|
@Test
void testNotFinishedHybridInput() {
final TestingSchedulingTopology topology = new TestingSchedulingTopology();
final List<TestingSchedulingExecutionVertex> producers =
topology.addExecutionVertices().withParallelism(2).finish();
final List<TestingSchedulingExecutionVertex> consumer =
topology.addExecutionVertices().withParallelism(2).finish();
topology.connectAllToAll(producers, consumer)
.withResultPartitionState(ResultPartitionState.CREATED)
.withResultPartitionType(ResultPartitionType.HYBRID_FULL)
.finish();
AllFinishedInputConsumableDecider inputConsumableDecider =
createAllFinishedInputConsumableDecider();
assertThat(
inputConsumableDecider.isInputConsumable(
consumer.get(0), Collections.emptySet(), new HashMap<>()))
.isFalse();
assertThat(
inputConsumableDecider.isInputConsumable(
consumer.get(1), Collections.emptySet(), new HashMap<>()))
.isFalse();
}
|
public StateMachine<T> next(T nextState) throws IllegalStateException {
Set<T> allowed = transitions.get(currentState);
checkNotNull(allowed, "No transitions from state " + currentState);
checkState(allowed.contains(nextState), "Transition not allowed from state " + currentState + " to " + nextState);
currentState = nextState;
return this;
}
|
@Test(expected = IllegalStateException.class)
public void testThrowsException_whenTransitionInvalid() {
machine.next(State.C);
}
|
public Node parse() throws ScanException {
if (tokenList == null || tokenList.isEmpty())
return null;
return E();
}
|
@Test
public void literal() throws ScanException {
Tokenizer tokenizer = new Tokenizer("abc");
Parser parser = new Parser(tokenizer.tokenize());
Node node = parser.parse();
Node witness = new Node(Node.Type.LITERAL, "abc");
assertEquals(witness, node);
}
|
@Description("removes whitespace from the beginning of a string")
@ScalarFunction("ltrim")
@LiteralParameters("x")
@SqlType("varchar(x)")
public static Slice leftTrim(@SqlType("varchar(x)") Slice slice)
{
return SliceUtf8.leftTrim(slice);
}
|
@Test
public void testLeftTrim()
{
assertFunction("LTRIM('')", createVarcharType(0), "");
assertFunction("LTRIM(' ')", createVarcharType(3), "");
assertFunction("LTRIM(' hello ')", createVarcharType(9), "hello ");
assertFunction("LTRIM(' hello')", createVarcharType(7), "hello");
assertFunction("LTRIM('hello ')", createVarcharType(7), "hello ");
assertFunction("LTRIM(' hello world ')", createVarcharType(13), "hello world ");
assertFunction("LTRIM('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ')", createVarcharType(9), "\u4FE1\u5FF5 \u7231 \u5E0C\u671B ");
assertFunction("LTRIM(' \u4FE1\u5FF5 \u7231 \u5E0C\u671B ')", createVarcharType(9), "\u4FE1\u5FF5 \u7231 \u5E0C\u671B ");
assertFunction("LTRIM(' \u4FE1\u5FF5 \u7231 \u5E0C\u671B')", createVarcharType(9), "\u4FE1\u5FF5 \u7231 \u5E0C\u671B");
assertFunction("LTRIM(' \u2028 \u4FE1\u5FF5 \u7231 \u5E0C\u671B')", createVarcharType(10), "\u4FE1\u5FF5 \u7231 \u5E0C\u671B");
}
|
@Override
public YamlUserConfiguration swapToYamlConfiguration(final ShardingSphereUser data) {
if (null == data) {
return null;
}
YamlUserConfiguration result = new YamlUserConfiguration();
result.setUser(data.getGrantee().toString());
result.setPassword(data.getPassword());
result.setAuthenticationMethodName(data.getAuthenticationMethodName());
result.setAdmin(data.isAdmin());
return result;
}
|
@Test
void assertSwapToNullYamlConfiguration() {
assertNull(new YamlUserSwapper().swapToYamlConfiguration(null));
}
|
@Override
public Capabilities getCapabilities(String pluginId) {
String resolvedExtensionVersion = pluginManager.resolveExtensionVersion(pluginId, CONFIG_REPO_EXTENSION, goSupportedVersions);
if (resolvedExtensionVersion.equals("1.0")) {
return new Capabilities(false, false, false, false);
}
return pluginRequestHelper.submitRequest(pluginId, REQUEST_CAPABILITIES, new DefaultPluginInteractionCallback<>() {
@Override
public Capabilities onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) {
return messageHandlerMap.get(resolvedExtensionVersion).getCapabilitiesFromResponse(responseBody);
}
});
}
|
@Test
public void shouldRequestCapabilitiesV1() {
Capabilities capabilities = new Capabilities(false, false, false, false);
Capabilities res = extension.getCapabilities(PLUGIN_ID);
assertThat(capabilities, is(res));
}
|
@Udf(description = "Returns the INT base raised to the INT exponent.")
public Double power(
@UdfParameter(
value = "base",
description = "the base of the power."
) final Integer base,
@UdfParameter(
value = "exponent",
description = "the exponent of the power."
) final Integer exponent
) {
return power(
base == null ? null : base.doubleValue(),
exponent == null ? null : exponent.doubleValue()
);
}
|
@Test
public void shouldHandleZeroExponent() {
assertThat(udf.power(15, 0), closeTo(1.0, 0.000000000000001));
assertThat(udf.power(15L, 0L), closeTo(1.0, 0.000000000000001));
assertThat(udf.power(15.0, 0.0), closeTo(1.0, 0.000000000000001));
assertThat(udf.power(0, 0), closeTo(1.0, 0.000000000000001));
assertThat(udf.power(0L, 0L), closeTo(1.0, 0.000000000000001));
assertThat(udf.power(0.0, 0.0), closeTo(1.0, 0.000000000000001));
}
|
@Override
public void fail(Throwable failureCause) {
synchronized (lock) {
if (isFailed) {
return;
}
isFailed = true;
BufferIndexOrError bufferIndexOrError;
// empty from tail, in-case subpartition view consumes concurrently and gets the wrong
// order
while ((bufferIndexOrError = loadedBuffers.pollLast()) != null) {
if (bufferIndexOrError.getBuffer().isPresent()) {
bufferIndexOrError.getBuffer().get().recycleBuffer();
tryDecreaseBacklog(bufferIndexOrError.getBuffer().get());
}
}
loadedBuffers.add(BufferIndexOrError.newError(failureCause));
operations.notifyDataAvailable();
}
}
|
@Test
void testFail() throws Exception {
AtomicInteger numOfNotify = new AtomicInteger(0);
subpartitionOperation.setNotifyDataAvailableRunnable(numOfNotify::incrementAndGet);
HsSubpartitionFileReaderImpl subpartitionFileReader = createSubpartitionFileReader();
Deque<BufferIndexOrError> loadedBuffers = subpartitionFileReader.getLoadedBuffers();
writeDataToFile(targetChannel, 0, 2);
subpartitionFileReader.prepareForScheduling();
Queue<MemorySegment> memorySegments = createsMemorySegments(2);
// trigger reading, add buffer to queue.
AtomicInteger numReleased = new AtomicInteger(0);
subpartitionFileReader.readBuffers(
memorySegments, (buffer) -> numReleased.incrementAndGet());
assertThat(memorySegments).isEmpty();
assertThat(loadedBuffers).hasSize(2);
assertThat(numOfNotify).hasValue(1);
subpartitionFileReader.fail(new RuntimeException("expected exception."));
// all buffers in file reader queue should recycle during fail.
assertThat(numReleased).hasValue(2);
BufferIndexOrError error = loadedBuffers.poll();
assertThat(loadedBuffers).isEmpty();
assertThat(error).isNotNull();
assertThat(error.getThrowable())
.hasValueSatisfying(
throwable ->
assertThat(throwable)
.isInstanceOf(RuntimeException.class)
.hasMessage("expected exception."));
// subpartitionReader fail should notify downstream.
assertThat(numOfNotify).hasValue(2);
}
|
public static SignalNoiseRatio[] fit(DataFrame data, String clazz) {
BaseVector<?, ?, ?> y = data.column(clazz);
ClassLabels codec = ClassLabels.fit(y);
if (codec.k != 2) {
throw new UnsupportedOperationException("Signal Noise Ratio is applicable only to binary classification");
}
int n = data.nrow();
int n1 = 0;
for (int yi : codec.y) {
if (yi == 0) {
n1++;
}
}
int n2 = n - n1;
double[] x1 = new double[n1];
double[] x2 = new double[n2];
StructType schema = data.schema();
return IntStream.range(0, schema.length()).mapToObj(i -> {
StructField field = schema.field(i);
if (field.isNumeric()) {
Arrays.fill(x1, 0.0);
Arrays.fill(x2, 0.0);
BaseVector<?, ?, ?> xi = data.column(i);
for (int l = 0, j = 0, k = 0; l < n; l++) {
if (codec.y[l] == 0) {
x1[j++] = xi.getDouble(l);
} else {
x2[k++] = xi.getDouble(l);
}
}
double mu1 = MathEx.mean(x1);
double mu2 = MathEx.mean(x2);
double sd1 = MathEx.sd(x1);
double sd2 = MathEx.sd(x2);
double s2n = Math.abs(mu1 - mu2) / (sd1 + sd2);
return new SignalNoiseRatio(field.name, s2n);
} else {
return null;
}
}).filter(s2n -> s2n != null && !s2n.feature.equals(clazz)).toArray(SignalNoiseRatio[]::new);
}
|
@Test
public void testDefault() {
System.out.println("Default");
SignalNoiseRatio[] s2n = SignalNoiseRatio.fit(Default.data, "default");
assertEquals(2, s2n.length);
assertEquals(1.1832, s2n[0].s2n, 1E-4);
assertEquals(0.0545, s2n[1].s2n, 1E-4);
}
|
@Override
public boolean isTracked(String key) {
return OpType.fromSymbol(key) != null;
}
|
@Test
public void testIsTracked() {
assertFalse(statistics.isTracked(null));
assertFalse(statistics.isTracked(NO_SUCH_OP));
final Iterator<LongStatistic> iter = statistics.getLongStatistics();
while (iter.hasNext()) {
final LongStatistic longStatistic = iter.next();
assertTrue(statistics.isTracked(longStatistic.getName()));
}
}
|
public static void delete(File fileOrDir) throws IOException {
if (fileOrDir == null) {
return;
}
if (fileOrDir.isDirectory()) {
cleanDirectory(fileOrDir);
} else {
if (fileOrDir.exists()) {
boolean isDeleteOk = fileOrDir.delete();
if (!isDeleteOk) {
throw new IOException("delete fail");
}
}
}
}
|
@Test
void testDeleteSuccess() throws IOException {
File file = null;
try {
file = File.createTempFile("test_deleteForFile", ".txt");
assertTrue(file.exists());
IoUtils.delete(file);
assertFalse(file.exists());
} finally {
if (null != file) {
file.deleteOnExit();
}
}
}
|
public String format(Date then)
{
if (then == null)
then = now();
Duration d = approximateDuration(then);
return format(d);
}
|
@Test
public void testNullDate() throws Exception
{
PrettyTime t = new PrettyTime();
Date date = null;
Assert.assertEquals("moments from now", t.format(date));
}
|
public static String getParent(String url) {
String ensUrl = url != null ? url.trim() : "";
if (ensUrl.equals(".") || !ensUrl.contains(".")) {
return null;
}
return ensUrl.substring(ensUrl.indexOf(".") + 1);
}
|
@Test
void getParentWhenUrlWithoutParent() {
assertNull(EnsUtils.getParent("parent"));
}
|
@Override
public Map<String, StepTransition> translate(WorkflowInstance workflowInstance) {
WorkflowInstance instance = objectMapper.convertValue(workflowInstance, WorkflowInstance.class);
if (instance.getRunConfig() != null) {
if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_INCOMPLETE
|| instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) {
Map<String, StepInstance.Status> statusMap =
instance.getAggregatedInfo().getStepAggregatedViews().entrySet().stream()
.collect(
Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getStatus()));
if (!statusMap.isEmpty()) {
instance
.getRunConfig()
.setStartStepIds(
statusMap.entrySet().stream()
.filter(
entry ->
!entry.getValue().isComplete()
&& (entry.getValue().isTerminal()
|| entry.getValue() == StepInstance.Status.NOT_CREATED))
.map(Map.Entry::getKey)
.collect(Collectors.toList()));
}
// handle the special case of restarting from a completed step
if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) {
String restartStepId =
RunRequest.getCurrentNode(instance.getRunConfig().getRestartConfig()).getStepId();
if (!instance.getRunConfig().getStartStepIds().contains(restartStepId)) {
instance.getRunConfig().getStartStepIds().add(restartStepId);
}
}
} else {
if (workflowInstance.getRunConfig().getStartStepIds() != null) {
instance
.getRunConfig()
.setStartStepIds(new ArrayList<>(workflowInstance.getRunConfig().getStartStepIds()));
}
if (workflowInstance.getRunConfig().getEndStepIds() != null) {
instance
.getRunConfig()
.setEndStepIds(new ArrayList<>(workflowInstance.getRunConfig().getEndStepIds()));
}
}
}
List<String> startStepIds =
instance.getRunConfig() != null && instance.getRunConfig().getStartStepIds() != null
? instance.getRunConfig().getStartStepIds()
: null;
List<String> endStepIds =
instance.getRunConfig() != null && instance.getRunConfig().getEndStepIds() != null
? instance.getRunConfig().getEndStepIds()
: null;
return WorkflowGraph.computeDag(instance.getRuntimeWorkflow(), startStepIds, endStepIds);
}
|
@Test
public void testTranslateForRestartFromIncompleteWithNotCreatedSteps() {
instance
.getAggregatedInfo()
.getStepAggregatedViews()
.put("job3", StepAggregatedView.builder().status(StepInstance.Status.NOT_CREATED).build());
instance.getRunConfig().setPolicy(RunPolicy.RESTART_FROM_INCOMPLETE);
Map<String, StepTransition> dag = translator.translate(instance);
Assert.assertEquals(new HashSet<>(Arrays.asList("job.2", "job3", "job4")), dag.keySet());
StepTransition jobTransition = new StepTransition();
jobTransition.setPredecessors(Collections.singletonList("job3"));
jobTransition.setSuccessors(Collections.singletonMap("job4", "true"));
Assert.assertEquals(jobTransition, dag.get("job.2"));
jobTransition.setPredecessors(Collections.emptyList());
jobTransition.setSuccessors(new HashMap<>());
jobTransition.getSuccessors().put("job.2", "true");
jobTransition.getSuccessors().put("job4", "true");
Assert.assertEquals(jobTransition, dag.get("job3"));
jobTransition.setPredecessors(Arrays.asList("job3", "job.2"));
jobTransition.setSuccessors(Collections.emptyMap());
Assert.assertEquals(jobTransition, dag.get("job4"));
}
|
public GroupForbidden updateAndGetGroupForbidden(String addr, UpdateGroupForbiddenRequestHeader requestHeader,
long timeoutMillis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.UPDATE_AND_GET_GROUP_FORBIDDEN, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return RemotingSerializable.decode(response.getBody(), GroupForbidden.class);
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), addr);
}
|
@Test
public void assertUpdateAndGetGroupForbidden() throws RemotingException, InterruptedException, MQBrokerException {
mockInvokeSync();
GroupForbidden responseBody = new GroupForbidden();
responseBody.setGroup(group);
responseBody.setTopic(defaultTopic);
setResponseBody(responseBody);
GroupForbidden actual = mqClientAPI.updateAndGetGroupForbidden(defaultBrokerAddr, new UpdateGroupForbiddenRequestHeader(), defaultTimeout);
assertNotNull(actual);
assertEquals(group, actual.getGroup());
assertEquals(defaultTopic, actual.getTopic());
}
|
public static Guess performGuess(List<Date> releaseDates) {
if (releaseDates.size() <= 1) {
return new Guess(Schedule.UNKNOWN, null, null);
} else if (releaseDates.size() > MAX_DATA_POINTS) {
releaseDates = releaseDates.subList(releaseDates.size() - MAX_DATA_POINTS, releaseDates.size());
}
Stats stats = getStats(releaseDates);
final int maxTotalWrongDays = Math.max(1, releaseDates.size() / 5);
final int maxSingleDayOff = releaseDates.size() / 10;
GregorianCalendar last = new GregorianCalendar();
last.setTime(releaseDates.get(releaseDates.size() - 1));
last.set(Calendar.HOUR_OF_DAY, (int) stats.medianHour);
last.set(Calendar.MINUTE, (int) ((stats.medianHour - Math.floor(stats.medianHour)) * 60));
last.set(Calendar.SECOND, 0);
last.set(Calendar.MILLISECOND, 0);
if (Math.abs(stats.medianDistance - ONE_DAY) < 2 * ONE_HOUR
&& stats.avgDeltaToMedianDistance < 2 * ONE_HOUR) {
addTime(last, ONE_DAY);
return new Guess(Schedule.DAILY, Arrays.asList(Calendar.MONDAY, Calendar.TUESDAY, Calendar.WEDNESDAY,
Calendar.THURSDAY, Calendar.FRIDAY, Calendar.SATURDAY, Calendar.SUNDAY), last.getTime());
} else if (Math.abs(stats.medianDistance - ONE_WEEK) < ONE_DAY
&& stats.avgDeltaToMedianDistance < 2 * ONE_DAY) {
// Just using last.set(Calendar.DAY_OF_WEEK) could skip a week
// when the last release is delayed over week boundaries
addTime(last, 3 * ONE_DAY);
do {
addTime(last, ONE_DAY);
} while (last.get(Calendar.DAY_OF_WEEK) != stats.mostOftenDayOfWeek);
return new Guess(Schedule.WEEKLY, List.of(stats.mostOftenDayOfWeek), last.getTime());
} else if (Math.abs(stats.medianDistance - 2 * ONE_WEEK) < ONE_DAY
&& stats.avgDeltaToMedianDistance < 2 * ONE_DAY) {
// Just using last.set(Calendar.DAY_OF_WEEK) could skip a week
// when the last release is delayed over week boundaries
addTime(last, 10 * ONE_DAY);
do {
addTime(last, ONE_DAY);
} while (last.get(Calendar.DAY_OF_WEEK) != stats.mostOftenDayOfWeek);
return new Guess(Schedule.BIWEEKLY, List.of(stats.mostOftenDayOfWeek), last.getTime());
} else if (Math.abs(stats.medianDistance - ONE_MONTH) < 5 * ONE_DAY
&& stats.avgDeltaToMedianDistance < 5 * ONE_DAY) {
if (stats.daysOfMonth[stats.mostOftenDayOfMonth] >= releaseDates.size() - maxTotalWrongDays) {
// Just using last.set(Calendar.DAY_OF_MONTH) could skip a week
// when the last release is delayed over week boundaries
addTime(last, 2 * ONE_WEEK);
do {
addTime(last, ONE_DAY);
} while (last.get(Calendar.DAY_OF_MONTH) != stats.mostOftenDayOfMonth);
return new Guess(Schedule.MONTHLY, null, last.getTime());
}
addTime(last, 3 * ONE_WEEK + 3 * ONE_DAY);
do {
addTime(last, ONE_DAY);
} while (last.get(Calendar.DAY_OF_WEEK) != stats.mostOftenDayOfWeek);
return new Guess(Schedule.FOURWEEKLY, List.of(stats.mostOftenDayOfWeek), last.getTime());
}
// Find release days
List<Integer> largeDays = new ArrayList<>();
for (int i = Calendar.SUNDAY; i <= Calendar.SATURDAY; i++) {
if (stats.daysOfWeek[i] > maxSingleDayOff) {
largeDays.add(i);
}
}
// Ensure that all release days are used similarly often
int averageDays = releaseDates.size() / largeDays.size();
boolean matchesAverageDays = true;
for (int day : largeDays) {
if (stats.daysOfWeek[day] < averageDays - maxSingleDayOff) {
matchesAverageDays = false;
break;
}
}
if (matchesAverageDays && stats.medianDistance < ONE_WEEK) {
// Fixed daily release schedule (eg Mo, Thu, Fri)
addUntil(last, largeDays);
if (largeDays.size() == 5 && largeDays.containsAll(Arrays.asList(
Calendar.MONDAY, Calendar.TUESDAY, Calendar.WEDNESDAY, Calendar.THURSDAY, Calendar.FRIDAY))) {
return new Guess(Schedule.WEEKDAYS, largeDays, last.getTime());
}
return new Guess(Schedule.SPECIFIC_DAYS, largeDays, last.getTime());
} else if (largeDays.size() == 1) {
// Probably still weekly with more exceptions than others
addUntil(last, largeDays);
return new Guess(Schedule.WEEKLY, largeDays, last.getTime());
}
addTime(last, (long) (0.6f * stats.medianDistance));
return new Guess(Schedule.UNKNOWN, null, last.getTime());
}
|
@Test
public void testDaily() {
ArrayList<Date> releaseDates = new ArrayList<>();
releaseDates.add(makeDate("2024-01-01 16:30")); // Monday
releaseDates.add(makeDate("2024-01-02 16:25"));
releaseDates.add(makeDate("2024-01-03 16:35"));
releaseDates.add(makeDate("2024-01-04 16:40"));
releaseDates.add(makeDate("2024-01-05 16:20"));
releaseDates.add(makeDate("2024-01-06 16:10"));
releaseDates.add(makeDate("2024-01-07 16:32")); // Sunday
// Next day
ReleaseScheduleGuesser.Guess guess = performGuess(releaseDates);
assertEquals(ReleaseScheduleGuesser.Schedule.DAILY, guess.schedule);
assertClose(makeDate("2024-01-08 16:30"), guess.nextExpectedDate, 10 * ONE_MINUTE);
// One-off early release
releaseDates.add(makeDate("2024-01-08 10:00"));
guess = performGuess(releaseDates);
assertEquals(ReleaseScheduleGuesser.Schedule.DAILY, guess.schedule);
assertClose(makeDate("2024-01-09 16:30"), guess.nextExpectedDate, 10 * ONE_MINUTE);
}
|
public List<Map<String, Object>> run(String query) {
return this.run(query, Collections.emptyMap());
}
|
@Test
public void testRunShouldThrowErrorIfDriverFailsToRunQuery() {
doThrow(ClientException.class).when(session).run(anyString(), anyMap());
assertThrows(
Neo4jResourceManagerException.class,
() ->
testManager.run(
"MATCH (n) WHERE n < $val RETURN n LIMIT 1", Collections.singletonMap("val", 2)));
}
|
@Override
public void startWatching() {
if (settings.getProps().valueAsBoolean(ENABLE_STOP_COMMAND.getKey())) {
super.startWatching();
}
}
|
@Test
public void watch_stop_command_if_stop_command_is_enabled() {
TestAppSettings appSettings = new TestAppSettings(of(ENABLE_STOP_COMMAND.getKey(), "true"));
StopRequestWatcherImpl underTest = new StopRequestWatcherImpl(appSettings, scheduler, commands);
underTest.startWatching();
assertThat(underTest.isAlive()).isTrue();
verify(scheduler, never()).stop();
when(commands.askedForStop()).thenReturn(true);
verify(scheduler, timeout(1_000L)).stop();
underTest.stopWatching();
await().until(() -> !underTest.isAlive());
assertThat(underTest.isAlive()).isFalse();
}
|
public <T> T getStore(final StoreQueryParameters<T> storeQueryParameters) {
final String storeName = storeQueryParameters.storeName();
final QueryableStoreType<T> queryableStoreType = storeQueryParameters.queryableStoreType();
final List<T> globalStore = globalStoreProvider.stores(storeName, queryableStoreType);
if (!globalStore.isEmpty()) {
return queryableStoreType.create(globalStoreProvider, storeName);
}
return queryableStoreType.create(
new WrappingStoreProvider(storeProviders.values(), storeQueryParameters),
storeName
);
}
|
@Test
public void shouldThrowExceptionWhenLookingForWindowStoreWithDifferentType() {
assertThrows(InvalidStateStoreException.class, () -> storeProvider.getStore(StoreQueryParameters.fromNameAndType(windowStore,
QueryableStoreTypes.keyValueStore())).get("1"));
}
|
public static boolean validateCSConfiguration(
final Configuration oldConfParam, final Configuration newConf,
final RMContext rmContext) throws IOException {
// ensure that the oldConf is deep copied
Configuration oldConf = new Configuration(oldConfParam);
QueueMetrics.setConfigurationValidation(oldConf, true);
QueueMetrics.setConfigurationValidation(newConf, true);
CapacityScheduler liveScheduler = (CapacityScheduler) rmContext.getScheduler();
CapacityScheduler newCs = new CapacityScheduler();
try {
//TODO: extract all the validation steps and replace reinitialize with
//the specific validation steps
newCs.setConf(oldConf);
newCs.setRMContext(rmContext);
newCs.init(oldConf);
newCs.addNodes(liveScheduler.getAllNodes());
newCs.reinitialize(newConf, rmContext, true);
return true;
} finally {
newCs.stop();
}
}
|
@Test
public void testValidateCSConfigDominantRCAbsoluteModeParentMaxMemoryExceeded()
throws Exception {
setUpMockRM(true);
RMContext rmContext = mockRM.getRMContext();
CapacitySchedulerConfiguration oldConfiguration = cs.getConfiguration();
CapacitySchedulerConfiguration newConfiguration =
new CapacitySchedulerConfiguration(cs.getConfiguration());
newConfiguration.setMaximumResourceRequirement("",
LEAF_A_FULL_PATH, FULL_MAXRES);
try {
CapacitySchedulerConfigValidator
.validateCSConfiguration(oldConfiguration, newConfiguration, rmContext);
fail("Parent maximum capacity exceeded");
} catch (IOException e) {
Assert.assertTrue(e.getCause().getMessage()
.startsWith("Max resource configuration"));
} finally {
mockRM.stop();
}
}
|
@VisibleForTesting
List<String> getFuseInfo() {
return mFuseInfo;
}
|
@Test
public void UnderFileSystemS3A() {
try (FuseUpdateChecker checker = getUpdateCheckerWithUfs("s3a://alluxio-test/")) {
Assert.assertTrue(containsTargetInfo(checker.getFuseInfo(), "s3a"));
}
}
|
public static boolean urlEquals(String string1, String string2) {
Uri url1 = Uri.parse(string1);
Uri url2 = Uri.parse(string2);
if (url1 == null || url2 == null || url1.getHost() == null || url2.getHost() == null) {
return string1.equals(string2); // Unable to parse url properly
}
if (!url1.getHost().toLowerCase(Locale.ROOT).equals(url2.getHost().toLowerCase(Locale.ROOT))) {
return false;
}
List<String> pathSegments1 = normalizePathSegments(url1.getPathSegments());
List<String> pathSegments2 = normalizePathSegments(url2.getPathSegments());
if (!pathSegments1.equals(pathSegments2)) {
return false;
}
if (TextUtils.isEmpty(url1.getQuery())) {
return TextUtils.isEmpty(url2.getQuery());
}
return url1.getQuery().equals(url2.getQuery());
}
|
@Test
public void testUrlEqualsDifferent() {
assertFalse(UrlChecker.urlEquals("https://www.example.com/test", "https://www.example2.com/test"));
assertFalse(UrlChecker.urlEquals("https://www.example.com/test", "https://www.example.de/test"));
assertFalse(UrlChecker.urlEquals("https://example.com/", "https://otherpodcast.example.com/"));
assertFalse(UrlChecker.urlEquals("https://www.example.com/?id=42&a=b", "https://www.example.com/?id=43&a=b"));
assertFalse(UrlChecker.urlEquals("https://example.com/podcast%25test", "https://example.com/podcast test"));
assertFalse(UrlChecker.urlEquals("antennapod_local:abc", "https://example.com/"));
}
|
@Override
public DistroData getDatumSnapshot() {
List<ClientSyncData> datum = new LinkedList<>();
for (String each : clientManager.allClientId()) {
Client client = clientManager.getClient(each);
if (null == client || !client.isEphemeral()) {
continue;
}
datum.add(client.generateSyncData());
}
ClientSyncDatumSnapshot snapshot = new ClientSyncDatumSnapshot();
snapshot.setClientSyncDataList(datum);
byte[] data = ApplicationUtils.getBean(Serializer.class).serialize(snapshot);
return new DistroData(new DistroKey(DataOperation.SNAPSHOT.name(), TYPE), data);
}
|
@Test
void testGetDatumSnapshot() {
when(clientManager.allClientId()).thenReturn(Collections.singletonList(CLIENT_ID));
DistroData actual = distroClientDataProcessor.getDatumSnapshot();
assertEquals(DataOperation.SNAPSHOT.name(), actual.getDistroKey().getResourceKey());
assertEquals(DistroClientDataProcessor.TYPE, actual.getDistroKey().getResourceType());
}
|
@Override
public int messageSize() {
boolean useSegwit = hasWitnesses() && allowWitness(protocolVersion);
int size = 4; // version
if (useSegwit)
size += 2; // marker, flag
size += VarInt.sizeOf(inputs.size());
for (TransactionInput in : inputs)
size += in.messageSize();
size += VarInt.sizeOf(outputs.size());
for (TransactionOutput out : outputs)
size += out.messageSize();
if (useSegwit)
for (TransactionInput in : inputs)
size += in.getWitness().messageSize();
size += 4; // locktime
return size;
}
|
@Test
public void testMessageSize() {
Transaction tx = new Transaction();
int length = tx.messageSize();
// add fake transaction input
TransactionInput input = new TransactionInput(null, ScriptBuilder.createEmpty().program(),
new TransactionOutPoint(0, Sha256Hash.ZERO_HASH));
tx.addInput(input);
length += input.messageSize();
// add fake transaction output
TransactionOutput output = new TransactionOutput(null, Coin.COIN, ADDRESS);
tx.addOutput(output);
length += output.messageSize();
// message size has now grown
assertEquals(length, tx.messageSize());
}
|
@Override
public Processor<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>, K, SubscriptionResponseWrapper<VO>> get() {
return new ContextualProcessor<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>, K, SubscriptionResponseWrapper<VO>>() {
private KTableValueGetter<KO, VO> foreignValues;
@Override
public void init(final ProcessorContext<K, SubscriptionResponseWrapper<VO>> context) {
super.init(context);
foreignValues = foreignValueGetterSupplier.get();
foreignValues.init(context);
}
@Override
public void process(final Record<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> record) {
Objects.requireNonNull(record.key(), "This processor should never see a null key.");
Objects.requireNonNull(record.value(), "This processor should never see a null value.");
final ValueAndTimestamp<SubscriptionWrapper<K>> valueAndTimestamp = record.value().newValue;
Objects.requireNonNull(valueAndTimestamp, "This processor should never see a null newValue.");
final SubscriptionWrapper<K> value = valueAndTimestamp.value();
if (value.getVersion() > SubscriptionWrapper.CURRENT_VERSION) {
//Guard against modifications to SubscriptionWrapper. Need to ensure that there is compatibility
//with previous versions to enable rolling upgrades. Must develop a strategy for upgrading
//from older SubscriptionWrapper versions to newer versions.
throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
}
final ValueAndTimestamp<VO> foreignValueAndTime =
record.key().getForeignKey() == null ?
null :
foreignValues.get(record.key().getForeignKey());
final long resultTimestamp =
foreignValueAndTime == null ?
valueAndTimestamp.timestamp() :
Math.max(valueAndTimestamp.timestamp(), foreignValueAndTime.timestamp());
switch (value.getInstruction()) {
case DELETE_KEY_AND_PROPAGATE:
context().forward(
record.withKey(record.key().getPrimaryKey())
.withValue(new SubscriptionResponseWrapper<VO>(
value.getHash(),
null,
value.getPrimaryPartition()
))
.withTimestamp(resultTimestamp)
);
break;
case PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE:
//This one needs to go through regardless of LEFT or INNER join, since the extracted FK was
//changed and there is no match for it. We must propagate the (key, null) to ensure that the
//downstream consumers are alerted to this fact.
final VO valueToSend = foreignValueAndTime == null ? null : foreignValueAndTime.value();
context().forward(
record.withKey(record.key().getPrimaryKey())
.withValue(new SubscriptionResponseWrapper<>(
value.getHash(),
valueToSend,
value.getPrimaryPartition()
))
.withTimestamp(resultTimestamp)
);
break;
case PROPAGATE_ONLY_IF_FK_VAL_AVAILABLE:
if (foreignValueAndTime != null) {
context().forward(
record.withKey(record.key().getPrimaryKey())
.withValue(new SubscriptionResponseWrapper<>(
value.getHash(),
foreignValueAndTime.value(),
value.getPrimaryPartition()
))
.withTimestamp(resultTimestamp)
);
}
break;
case DELETE_KEY_NO_PROPAGATE:
break;
default:
throw new IllegalStateException("Unhandled instruction: " + value.getInstruction());
}
}
};
}
|
@Test
public void shouldDeleteKeyAndPropagateFKV0() {
final MockProcessorContext<String, SubscriptionResponseWrapper<String>> context = new MockProcessorContext<>();
processor.init(context);
final SubscriptionWrapper<String> newValue = new SubscriptionWrapper<>(
new long[]{1L},
Instruction.DELETE_KEY_AND_PROPAGATE,
"pk1",
SubscriptionWrapper.VERSION_0,
null
);
final Record<CombinedKey<String, String>, Change<ValueAndTimestamp<SubscriptionWrapper<String>>>> record =
new Record<>(
new CombinedKey<>("fk1", "pk1"),
new Change<>(ValueAndTimestamp.make(newValue, 1L), null),
1L
);
processor.process(record);
final List<CapturedForward<? extends String, ? extends SubscriptionResponseWrapper<String>>> forwarded = context.forwarded();
assertEquals(1, forwarded.size());
assertEquals(
new Record<>(
"pk1",
new SubscriptionResponseWrapper<>(
newValue.getHash(),
null,
null),
1L
),
forwarded.get(0).record()
);
}
|
private void printSourceDescription(final SourceDescription source) {
final boolean isTable = source.getType().equalsIgnoreCase("TABLE");
writer().println(String.format("%-20s : %s", "Name", source.getName()));
if (!source.isExtended()) {
printSchema(source.getWindowType(), source.getFields(), isTable);
writer().println(
"For runtime statistics and query details run: DESCRIBE <Stream,Table> EXTENDED;");
return;
}
writer().println(String.format("%-20s : %s", "Type", source.getType()));
printTopicInfo(source);
writer().println(String.format("%-20s : %s", "Statement", source.getStatement()));
writer().println("");
printSchema(source.getWindowType(), source.getFields(), isTable);
printSourceConstraints(source.getSourceConstraints());
printQueries(source.getReadQueries(), source.getType(), "read");
printQueries(source.getWriteQueries(), source.getType(), "write");
printStatistics(source);
writer().println(String.format(
"(%s)",
"Statistics of the local KSQL server interaction with the Kafka topic "
+ source.getTopic()
));
if (!source.getQueryOffsetSummaries().isEmpty()) {
writer().println();
writer().println("Consumer Groups summary:");
for (QueryOffsetSummary entry : source.getQueryOffsetSummaries()) {
writer().println();
writer().println(String.format("%-20s : %s", "Consumer Group", entry.getGroupId()));
if (entry.getTopicSummaries().isEmpty()) {
writer().println("<no offsets committed by this group yet>");
}
for (QueryTopicOffsetSummary topicSummary : entry.getTopicSummaries()) {
writer().println();
writer().println(String.format("%-20s : %s",
"Kafka topic", topicSummary.getKafkaTopic()));
writer().println(String.format("%-20s : %s",
"Max lag", topicSummary.getOffsets().stream()
.mapToLong(s -> s.getLogEndOffset() - s.getConsumerOffset())
.max()
.orElse(0)));
writer().println("");
final Table taskTable = new Table.Builder()
.withColumnHeaders(
ImmutableList.of("Partition", "Start Offset", "End Offset", "Offset", "Lag"))
.withRows(topicSummary.getOffsets()
.stream()
.map(offset -> ImmutableList.of(
String.valueOf(offset.getPartition()),
String.valueOf(offset.getLogStartOffset()),
String.valueOf(offset.getLogEndOffset()),
String.valueOf(offset.getConsumerOffset()),
String.valueOf(offset.getLogEndOffset() - offset.getConsumerOffset())
)))
.build();
taskTable.print(this);
}
}
}
}
|
@Test
public void testPrintSourceDescription() {
// Given:
final List<FieldInfo> fields = buildTestSchema(
SqlTypes.BOOLEAN,
SqlTypes.INTEGER,
SqlTypes.BIGINT,
SqlTypes.DOUBLE,
SqlTypes.STRING,
SqlTypes.array(SqlTypes.STRING),
SqlTypes.map(SqlTypes.STRING, SqlTypes.BIGINT),
SqlTypes.struct()
.field("a", SqlTypes.DOUBLE)
.build()
);
final List<RunningQuery> readQueries = ImmutableList.of(
new RunningQuery("read query", ImmutableSet.of("sink1"), ImmutableSet.of("sink1 topic"), new QueryId("readId"), queryStatusCount, KsqlConstants.KsqlQueryType.PERSISTENT)
);
final List<RunningQuery> writeQueries = ImmutableList.of(
new RunningQuery("write query", ImmutableSet.of("sink2"), ImmutableSet.of("sink2 topic"), new QueryId("writeId"), queryStatusCount, KsqlConstants.KsqlQueryType.PERSISTENT)
);
final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of(
new SourceDescriptionEntity(
"some sql",
buildSourceDescription(readQueries, writeQueries, fields, false),
Collections.emptyList()
)
));
// When:
console.printKsqlEntityList(entityList);
// Then:
final String output = terminal.getOutputString();
Approvals.verify(output, approvalOptions);
}
|
public boolean saveToRepository( EngineMetaInterface meta ) throws KettleException {
return saveToRepository( meta, meta.getObjectId() == null );
}
|
@Test
public void saveToRepository() throws Exception {
JobMeta mockJobMeta = mock( JobMeta.class );
prepareSetSaveTests( spoon, log, mockSpoonPerspective, mockJobMeta, false, false, "NotMainSpoonPerspective", true,
true, "filename", null, true, false );
RepositoryDirectoryInterface dirMock = mock( RepositoryDirectoryInterface.class );
doReturn( "my/path" ).when( dirMock ).getPath();
doReturn( dirMock ).when( mockJobMeta ).getRepositoryDirectory();
doReturn( "trans" ).when( mockJobMeta ).getName();
RepositoryDirectoryInterface newDirMock = mock( RepositoryDirectoryInterface.class );
doReturn( "my/new/path" ).when( newDirMock ).getPath();
RepositoryObject repositoryObject = mock( RepositoryObject.class );
doReturn( newDirMock ).when( repositoryObject ).getRepositoryDirectory();
FileDialogOperation fileDlgOp = mock( FileDialogOperation.class );
doReturn( repositoryObject ).when( fileDlgOp ).getRepositoryObject();
doReturn( fileDlgOp ).when( spoon ).getFileDialogOperation( FileDialogOperation.SAVE,
FileDialogOperation.ORIGIN_SPOON );
doReturn( "newTrans" ).when( repositoryObject ).getName();
doCallRealMethod().when( spoon ).saveToRepository( mockJobMeta, true );
// mock a successful save
doReturn( true ).when( spoon ).saveToRepositoryConfirmed( mockJobMeta );
spoon.saveToRepository( mockJobMeta, true );
// verify that the meta name and directory have been updated and renameTabs is called
verify( spoon.delegates.tabs, times( 1 ) ).renameTabs();
verify( mockJobMeta, times( 1 ) ).setRepositoryDirectory( newDirMock );
verify( mockJobMeta, never() ).setRepositoryDirectory( dirMock ); // verify that the dir is never set back
verify( mockJobMeta, times( 1 ) ).setName( "newTrans" );
verify( mockJobMeta, never() ).setName( "trans" ); // verify that the name is never set back
// mock a failed save
doReturn( false ).when( spoon ).saveToRepositoryConfirmed( mockJobMeta );
spoon.saveToRepository( mockJobMeta, true );
// verify that the meta name and directory have not changed and renameTabs is not called (only once form the
// previous test)
verify( spoon.delegates.tabs, times( 1 ) ).renameTabs();
verify( mockJobMeta, times( 2 ) ).setRepositoryDirectory( newDirMock );
verify( mockJobMeta, times( 1 ) ).setRepositoryDirectory( dirMock ); // verify that the dir is set back
verify( mockJobMeta, times( 2 ) ).setName( "newTrans" );
verify( mockJobMeta, times( 1 ) ).setName( "trans" ); // verify that the name is set back
}
|
@Override
protected Optional<ErrorResponse> filter(DiscFilterRequest request) {
String method = request.getMethod();
URI uri = request.getUri();
for (Rule rule : rules) {
if (rule.matches(method, uri)) {
log.log(Level.FINE, () ->
String.format("Request '%h' with method '%s' and uri '%s' matched rule '%s'", request, method, uri, rule.name));
return responseFor(request, rule.name, rule.response);
}
}
return responseFor(request, "default", defaultResponse);
}
|
@Test
void dryrun_does_not_block() {
RuleBasedFilterConfig config = new RuleBasedFilterConfig.Builder()
.dryrun(true)
.defaultRule(new DefaultRule.Builder()
.action(DefaultRule.Action.Enum.BLOCK))
.build();
Metric metric = mock(Metric.class);
RuleBasedRequestFilter filter = new RuleBasedRequestFilter(metric, config);
MockResponseHandler responseHandler = new MockResponseHandler();
filter.filter(request("GET", "http://myserver/"), responseHandler);
assertNull(responseHandler.getResponse());
}
|
@Override
public void updateNamespace(Namespace namespace) {
checkNotNull(namespace, ERR_NULL_NAMESPACE);
checkArgument(!Strings.isNullOrEmpty(namespace.getMetadata().getUid()),
ERR_NULL_NAMESPACE_UID);
k8sNamespaceStore.updateNamespace(namespace);
log.info(String.format(MSG_NAMESPACE,
namespace.getMetadata().getName(), MSG_UPDATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testUpdateUnregisteredNamespace() {
target.updateNamespace(NAMESPACE);
}
|
@Override
public Table getTable(String dbName, String tblName) {
JDBCTableName jdbcTable = new JDBCTableName(null, dbName, tblName);
return tableInstanceCache.get(jdbcTable,
k -> {
try (Connection connection = getConnection()) {
ResultSet columnSet = schemaResolver.getColumns(connection, dbName, tblName);
List<Column> fullSchema = schemaResolver.convertToSRTable(columnSet);
List<Column> partitionColumns = Lists.newArrayList();
if (schemaResolver.isSupportPartitionInformation()) {
partitionColumns = listPartitionColumns(dbName, tblName, fullSchema);
}
if (fullSchema.isEmpty()) {
return null;
}
Integer tableId = tableIdCache.getPersistentCache(jdbcTable,
j -> ConnectorTableId.CONNECTOR_ID_GENERATOR.getNextId().asInt());
return schemaResolver.getTable(tableId, tblName, fullSchema,
partitionColumns, dbName, catalogName, properties);
} catch (SQLException | DdlException e) {
LOG.warn("get table for JDBC catalog fail!", e);
return null;
}
});
}
|
@Test
public void testGetTable2() {
// user/password are optional fields for jdbc.
properties.put(JDBCResource.USER, "");
properties.put(JDBCResource.PASSWORD, "");
JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource);
Table table = jdbcMetadata.getTable("test", "tbl1");
Assert.assertNotNull(table);
}
|
public Map<String, Object> convertToMap(final String json) {
Map<String, Object> map = GSON_MAP.fromJson(json, new TypeToken<Map<String, Object>>() {
}.getType());
if (MapUtils.isEmpty(map)) {
return map;
}
for (Map.Entry<String, Object> entry : map.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (value instanceof String) {
String valueStr = ((String) value).trim();
if (valueStr.startsWith(LEFT_ANGLE_BRACKETS) && valueStr.endsWith(RIGHT_ANGLE_BRACKETS)) {
Map<String, Object> mv = convertToMap(value.toString());
map.put(key, mv);
}
} else if (value instanceof JsonObject) {
map.put(key, convertToMap(value.toString()));
} else if (value instanceof JsonArray) {
JsonArray jsonArray = (JsonArray) value;
map.put(key, jsonArrayToListInConvertToMap(jsonArray));
} else if (value instanceof JsonNull) {
map.put(key, null);
}
}
return map;
}
|
@Test
public void testConvertToMap() {
List<Integer> innerList = ImmutableList.of(1, 2, 3);
Map<String, Object> innerMap = ImmutableMap.of("id", 123, "name", "shenyu");
Map<String, Object> map = ImmutableMap.of("code", 200, "message", "test",
"data", innerMap, "list", innerList);
String testJson = "{\"code\":200,\"message\":\"test\","
+ "\"data\":{\"id\":123,\"name\":\"shenyu\"},\"list\":[1,2,3]}";
Map<String, Object> parseMap = GsonUtils.getInstance().convertToMap(testJson);
map.forEach((key, value) -> {
assertTrue(parseMap.containsKey(key));
if (value instanceof Map) {
Map<?, ?> tempMap = (Map<?, ?>) parseMap.get(key);
((Map<?, ?>) value).forEach((key1, value1) -> {
assertTrue(tempMap.containsKey(key1));
assertEquals(value1.toString(), tempMap.get(key1).toString());
});
} else if (value instanceof List) {
List<?> tempList = (List<?>) parseMap.get(key);
List<?> tempValue = (List<?>) value;
for (int i = 0; i < tempValue.size(); i++) {
assertEquals(tempValue.get(i).toString(), tempList.get(i).toString());
}
} else {
assertEquals(value.toString(), parseMap.get(key).toString());
}
});
assertNull(GsonUtils.getInstance().convertToMap(null));
}
|
@Override
public String name() {
return name;
}
|
@Test
public void testNotExposeTableProperties() {
Configuration conf = new Configuration();
conf.set("iceberg.hive.table-property-max-size", "0");
HiveTableOperations ops = new HiveTableOperations(conf, null, null, catalog.name(), DB_NAME, "tbl");
TableMetadata metadata = mock(TableMetadata.class);
Map<String, String> parameters = Maps.newHashMap();
parameters.put(CURRENT_SNAPSHOT_SUMMARY, "summary");
parameters.put(CURRENT_SNAPSHOT_ID, "snapshotId");
parameters.put(CURRENT_SNAPSHOT_TIMESTAMP, "timestamp");
parameters.put(CURRENT_SCHEMA, "schema");
parameters.put(DEFAULT_PARTITION_SPEC, "partitionSpec");
parameters.put(DEFAULT_SORT_ORDER, "sortOrder");
ops.setSnapshotStats(metadata, parameters);
assertThat(parameters)
.doesNotContainKey(CURRENT_SNAPSHOT_SUMMARY)
.doesNotContainKey(CURRENT_SNAPSHOT_ID)
.doesNotContainKey(CURRENT_SNAPSHOT_TIMESTAMP);
ops.setSchema(metadata, parameters);
assertThat(parameters).doesNotContainKey(CURRENT_SCHEMA);
ops.setPartitionSpec(metadata, parameters);
assertThat(parameters).doesNotContainKey(DEFAULT_PARTITION_SPEC);
ops.setSortOrder(metadata, parameters);
assertThat(parameters).doesNotContainKey(DEFAULT_SORT_ORDER);
}
|
@Override
public double logp(double x) {
if (x <= 0.0) {
throw new IllegalArgumentException("Invalid x: " + x);
}
return (0.5 * nu1 - 1.0) * Math.log(x) - 0.5 * (nu1 + nu2) * Math.log(nu2 + nu1 * x) + fac;
}
|
@Test
public void testLogP() {
System.out.println("logP");
FDistribution instance = new FDistribution(10, 20);
instance.rand();
assertEquals(-12.74989, instance.logp(0.01), 1E-5);
assertEquals(-4.196589, instance.logp(0.1), 1E-6);
assertEquals(-2.121800, instance.logp(0.2), 1E-6);
assertEquals(-0.374138, instance.logp(0.5), 1E-6);
assertEquals(-0.3363727, instance.logp(1), 1E-7);
assertEquals(-11.92045, instance.logp(10), 1E-5);
}
|
public static Optional<Class<?>> findPowerSerialize(Class<?>[] parameterTypes) {
if (ArrayUtils.isEmpty(parameterTypes)) {
return Optional.empty();
}
for (Class<?> clz : parameterTypes) {
final Class<?>[] interfaces = clz.getInterfaces();
if (ArrayUtils.isEmpty(interfaces)) {
continue;
}
if (PowerSerializable.class.isAssignableFrom(clz)) {
return Optional.of(clz);
}
}
return Optional.empty();
}
|
@Test
void findPowerSerialize() {
Class<?>[] contains = {AlarmConfig.class, ServerScheduleJobReq.class};
Class<?>[] notContains = {AlarmConfig.class};
final Optional<Class<?>> notContainsResult = RemoteUtils.findPowerSerialize(notContains);
log.info("[RemoteUtilsTest] notContainsResult: {}", notContainsResult);
final Optional<Class<?>> containsResult = RemoteUtils.findPowerSerialize(contains);
log.info("[RemoteUtilsTest] containsResult: {}", containsResult);
assert !notContainsResult.isPresent();
assert containsResult.isPresent();
}
|
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
}
|
@Test
public void testAllFromPipelineOptions() {
// TODO: Java core test failing on windows, https://github.com/apache/beam/issues/20466
assumeFalse(SystemUtils.IS_OS_WINDOWS);
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage(
"All inherited interfaces of"
+ " [org.apache.beam.sdk.options.PipelineOptionsFactoryTest$PipelineOptionsInheritedInvalid]"
+ " should inherit from the PipelineOptions interface. The following inherited"
+ " interfaces do not:\n"
+ " - org.apache.beam.sdk.options.PipelineOptionsFactoryTest$InvalidPipelineOptions1\n"
+ " - org.apache.beam.sdk.options.PipelineOptionsFactoryTest$InvalidPipelineOptions2");
PipelineOptionsFactory.as(PipelineOptionsInheritedInvalid.class);
}
|
public static LeaderElectionManagerConfig fromMap(Map<String, String> map) {
Map<String, String> envMap = new HashMap<>(map);
envMap.keySet().retainAll(LeaderElectionManagerConfig.keyNames());
Map<String, Object> generatedMap = ConfigParameter.define(envMap, CONFIG_VALUES);
return new LeaderElectionManagerConfig(generatedMap);
}
|
@Test
public void testMissingAllRequired() {
Map<String, String> envVars = new HashMap<>();
envVars.put(LeaderElectionManagerConfig.ENV_VAR_LEADER_ELECTION_LEASE_NAMESPACE.key(), null);
envVars.put(LeaderElectionManagerConfig.ENV_VAR_LEADER_ELECTION_IDENTITY.key(), null);
envVars.put(LeaderElectionManagerConfig.ENV_VAR_LEADER_ELECTION_LEASE_NAME.key(), null);
InvalidConfigurationException e = assertThrows(InvalidConfigurationException.class, () -> LeaderElectionManagerConfig.fromMap(envVars));
assertThat(e.getMessage(), is("Failed to parse. Value cannot be empty or null"));
}
|
public static boolean equal(String str1, String str2) {
if (str1 == null && str2 == null) {
return true;
}
if (str1 == null || str2 == null) {
return false;
}
return str1.equals(str2);
}
|
@Test
public void testEqual() {
String key = "test";
Assert.assertTrue(StringUtils.equal(key, key));
Assert.assertTrue(StringUtils.equalIgnoreCase(key, "Test"));
Assert.assertTrue(StringUtils.equalIgnoreCase(key, "teST"));
Assert.assertTrue(StringUtils.isEmpty(""));
Assert.assertTrue(StringUtils.contains(key, "tes"));
Assert.assertTrue(StringUtils.prefix(key, "te"));
Assert.assertFalse(StringUtils.prefix(key, "check"));
Assert.assertTrue(StringUtils.suffix(key, "st"));
Assert.assertFalse(StringUtils.suffix(key, "sT"));
Assert.assertFalse(StringUtils.suffix(key, null));
Assert.assertTrue(StringUtils.equal(StringUtils.trim(" test "), key));
}
|
public static Iterator<TransferableBlock> splitBlock(TransferableBlock block, DataBlock.Type type, int maxBlockSize) {
List<TransferableBlock> blockChunks = new ArrayList<>();
if (type == DataBlock.Type.ROW) {
// Use estimated row size, this estimate is not accurate and is used to estimate numRowsPerChunk only.
int estimatedRowSizeInBytes = block.getDataSchema().getColumnNames().length * MEDIAN_COLUMN_SIZE_BYTES;
int numRowsPerChunk = maxBlockSize / estimatedRowSizeInBytes;
Preconditions.checkState(numRowsPerChunk > 0, "row size too large for query engine to handle, abort!");
int totalNumRows = block.getNumRows();
List<Object[]> allRows = block.getContainer();
int currentRow = 0;
while (currentRow < totalNumRows) {
List<Object[]> chunk = allRows.subList(currentRow, Math.min(currentRow + numRowsPerChunk, allRows.size()));
currentRow += numRowsPerChunk;
blockChunks.add(new TransferableBlock(chunk, block.getDataSchema(), block.getType()));
}
return blockChunks.iterator();
} else if (type == DataBlock.Type.METADATA) {
return Iterators.singletonIterator(block);
} else {
throw new IllegalArgumentException("Unsupported data block type: " + type);
}
}
|
@Test(dataProvider = "splitRowCountProvider")
public void testSplitBlockUtils(int splitRowCount)
throws Exception {
DataSchema dataSchema = getDataSchema();
// compare serialized split
int estRowSizeInBytes = dataSchema.size() * TEST_EST_BYTES_PER_COLUMN;
List<Object[]> rows = DataBlockTestUtils.getRandomRows(dataSchema, TOTAL_ROW_COUNT, 1);
RowDataBlock rowBlock = DataBlockBuilder.buildFromRows(rows, dataSchema);
validateBlocks(TransferableBlockUtils.splitBlock(new TransferableBlock(rowBlock), DataBlock.Type.ROW,
estRowSizeInBytes * splitRowCount + 1), rows, dataSchema);
// compare non-serialized split
validateBlocks(TransferableBlockUtils.splitBlock(new TransferableBlock(rows, dataSchema, DataBlock.Type.ROW),
DataBlock.Type.ROW, estRowSizeInBytes * splitRowCount + 1), rows, dataSchema);
}
|
public WithJsonPath(JsonPath jsonPath, Matcher<T> resultMatcher) {
this.jsonPath = jsonPath;
this.resultMatcher = resultMatcher;
}
|
@Test
public void shouldDescribeMismatchOfEvaluation() {
Matcher<? super ReadContext> matcher = withJsonPath("expensive", equalTo(3));
Description description = new StringDescription();
matcher.describeMismatch(BOOKS_JSON, description);
assertThat(description.toString(), containsString("expensive"));
assertThat(description.toString(), containsString("<10>"));
}
|
@Override
public boolean sendElectionMessage(int currentId, String content) {
var candidateList = findElectionCandidateInstanceList(currentId);
if (candidateList.isEmpty()) {
return true;
} else {
var electionMessage = new Message(MessageType.ELECTION_INVOKE, "");
candidateList.forEach((i) -> instanceMap.get(i).onMessage(electionMessage));
return false;
}
}
|
@Test
void testSendElectionMessageNotAccepted() {
try {
var instance1 = new BullyInstance(null, 1, 1);
var instance2 = new BullyInstance(null, 1, 2);
var instance3 = new BullyInstance(null, 1, 3);
var instance4 = new BullyInstance(null, 1, 4);
Map<Integer, Instance> instanceMap = Map.of(1, instance1, 2, instance2, 3, instance3, 4, instance4);
instance1.setAlive(false);
var messageManager = new BullyMessageManager(instanceMap);
var result = messageManager.sendElectionMessage(3, "3");
var instanceClass = AbstractInstance.class;
var messageQueueField = instanceClass.getDeclaredField("messageQueue");
messageQueueField.setAccessible(true);
var message2 = ((Queue<Message>) messageQueueField.get(instance2)).poll();
var instance4QueueSize = ((Queue<Message>) messageQueueField.get(instance4)).size();
var expectedMessage = new Message(MessageType.ELECTION_INVOKE, "");
assertEquals(message2, expectedMessage);
assertEquals(instance4QueueSize, 0);
assertFalse(result);
} catch (IllegalAccessException | NoSuchFieldException e) {
fail("Error to access private field.");
}
}
|
@CheckReturnValue
@NonNull public static Observable<Boolean> observePowerSavingState(
@NonNull Context context, @StringRes int enablePrefResId, @BoolRes int defaultValueResId) {
final RxSharedPrefs prefs = AnyApplication.prefs(context);
return Observable.combineLatest(
prefs
.getString(
R.string.settings_key_power_save_mode,
R.string.settings_default_power_save_mode_value)
.asObservable(),
enablePrefResId == 0
? Observable.just(true)
: prefs.getBoolean(enablePrefResId, defaultValueResId).asObservable(),
RxBroadcastReceivers.fromIntentFilter(
context.getApplicationContext(), getBatteryStateIntentFilter())
.startWith(new Intent(Intent.ACTION_BATTERY_OKAY)),
RxBroadcastReceivers.fromIntentFilter(
context.getApplicationContext(), getChargerStateIntentFilter())
.startWith(new Intent(Intent.ACTION_POWER_DISCONNECTED)),
getOsPowerSavingStateObservable(context),
(powerSavingPref, enabledPref, batteryIntent, chargerIntent, osPowerSavingState) -> {
if (!enabledPref) return false;
switch (powerSavingPref) {
case "never":
return false;
case "always":
return true;
default:
return osPowerSavingState
|| (Intent.ACTION_BATTERY_LOW.equals(batteryIntent.getAction())
&& Intent.ACTION_POWER_DISCONNECTED.equals(chargerIntent.getAction()));
}
})
.distinctUntilChanged();
}
|
@Test
@Config(sdk = Build.VERSION_CODES.LOLLIPOP)
public void testWhenLowPowerSavingModeWithDevicePowerSavingState() {
Context context = Mockito.spy(getApplicationContext());
final PowerManager powerManager =
(PowerManager) getApplicationContext().getSystemService(Service.POWER_SERVICE);
Mockito.doReturn(powerManager).when(context).getSystemService(Service.POWER_SERVICE);
ShadowPowerManager shadowPowerManager = Shadows.shadowOf(powerManager);
AtomicReference<Boolean> state = new AtomicReference<>(null);
final Observable<Boolean> powerSavingState = PowerSaving.observePowerSavingState(context, 0);
Assert.assertNull(state.get());
final Disposable disposable = powerSavingState.subscribe(state::set);
// starts as false
Assert.assertEquals(Boolean.FALSE, state.get());
sendPowerSavingState(shadowPowerManager, false);
Assert.assertEquals(Boolean.FALSE, state.get());
sendPowerSavingState(shadowPowerManager, true);
Assert.assertEquals(Boolean.TRUE, state.get());
sendPowerSavingState(shadowPowerManager, false);
Assert.assertEquals(Boolean.FALSE, state.get());
disposable.dispose();
sendPowerSavingState(shadowPowerManager, true);
Assert.assertEquals(Boolean.FALSE, state.get());
sendPowerSavingState(shadowPowerManager, false);
Assert.assertEquals(Boolean.FALSE, state.get());
}
|
@Override
public void revert(final Path file) throws BackgroundException {
new B2CopyFeature(session, fileid).copy(file, file, new TransferStatus(), new DisabledLoginCallback(), new DisabledStreamListener());
}
|
@Test
public void testRevert() throws Exception {
final B2VersionIdProvider fileid = new B2VersionIdProvider(session);
final Path room = new B2DirectoryFeature(session, fileid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new B2TouchFeature(session, fileid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Path ignored = new B2TouchFeature(session, fileid).touch(new Path(room, String.format("%s-2", test.getName()), EnumSet.of(Path.Type.file)), new TransferStatus());
{
// Make sure there is another versioned copy of a file not to be included when listing
final byte[] content = RandomUtils.nextBytes(245);
final TransferStatus status = new TransferStatus().withLength(content.length);
final B2WriteFeature writer = new B2WriteFeature(session, fileid);
final HttpResponseOutputStream<BaseB2Response> out = writer.write(ignored, status, new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
}
assertTrue(new B2FindFeature(session, fileid).find(ignored));
final PathAttributes initialAttributes = new PathAttributes(test.attributes());
final String initialVersion = test.attributes().getVersionId();
final byte[] content = RandomUtils.nextBytes(32769);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
status.setExists(true);
final B2WriteFeature writer = new B2WriteFeature(session, fileid);
final StatusOutputStream<BaseB2Response> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
assertNotNull(test.attributes().getVersionId());
assertNotEquals(initialVersion, test.attributes().getVersionId());
final B2VersioningFeature feature = new B2VersioningFeature(session, fileid);
{
final AttributedList<Path> versions = feature.list(test, new DisabledListProgressListener());
assertEquals(1, versions.size());
assertEquals(new Path(test).withAttributes(initialAttributes), versions.get(0));
assertTrue(new B2FindFeature(session, fileid).find(versions.get(0)));
assertEquals(initialVersion, new B2AttributesFinderFeature(session, fileid).find(versions.get(0)).getVersionId());
}
final PathAttributes updated = new B2AttributesFinderFeature(session, fileid).find(test);
assertNotEquals(initialVersion, updated.getVersionId());
feature.revert(new Path(test).withAttributes(initialAttributes));
final AttributedList<Path> versions = feature.list(test, new DisabledListProgressListener());
assertEquals(2, versions.size());
assertEquals(status.getResponse().getVersionId(), versions.get(0).attributes().getVersionId());
assertEquals(initialVersion, versions.get(1).attributes().getVersionId());
for(Path version : new B2ListService(session, fileid).list(room, new DisabledListProgressListener())) {
new B2DeleteFeature(session, fileid).delete(Collections.singletonList(version), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
new B2DeleteFeature(session, fileid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public Map<Endpoint, CompletableFuture<Void>> futures() {
return futures;
}
|
@Test
public void testAddReadinessFutures() {
Map<Endpoint, CompletableFuture<Void>> bazFutures = new HashMap<>();
bazFutures.put(EXTERNAL, new CompletableFuture<>());
bazFutures.put(INTERNAL, new CompletableFuture<>());
EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder().
addReadinessFutures("baz", bazFutures).
build(Optional.empty(), INFO);
assertEquals(new HashSet<>(Arrays.asList(EXTERNAL, INTERNAL)),
readyFutures.futures().keySet());
assertIncomplete(readyFutures, EXTERNAL, INTERNAL);
bazFutures.get(EXTERNAL).complete(null);
assertComplete(readyFutures, EXTERNAL);
assertIncomplete(readyFutures, INTERNAL);
bazFutures.get(INTERNAL).complete(null);
assertComplete(readyFutures, EXTERNAL, INTERNAL);
}
|
@Override
public Supplier<Predicate> compilePredicate(SqlFunctionProperties sqlFunctionProperties, Map<SqlFunctionId, SqlInvokedFunction> sessionFunctions, RowExpression predicate)
{
if (predicateCache == null) {
return compilePredicateInternal(sqlFunctionProperties, sessionFunctions, predicate);
}
return predicateCache.getUnchecked(new CacheKey(sqlFunctionProperties, sessionFunctions, predicate));
}
|
@Test
public void testCache()
{
// a * 2 < 10
RowExpression predicate = call(
"=",
functionResolution.comparisonFunction(LESS_THAN, BIGINT, BIGINT),
BOOLEAN,
call("a * 2", functionResolution.arithmeticFunction(MULTIPLY, BIGINT, BIGINT), BIGINT, new InputReferenceExpression(Optional.empty(), 1, BIGINT), constant(2L, BIGINT)),
constant(10L, BIGINT));
PredicateCompiler compiler = new RowExpressionPredicateCompiler(metadata, 10_000);
assertSame(
compiler.compilePredicate(SESSION.getSqlFunctionProperties(), SESSION.getSessionFunctions(), predicate),
compiler.compilePredicate(SESSION.getSqlFunctionProperties(), SESSION.getSessionFunctions(), predicate));
PredicateCompiler noCacheCompiler = new RowExpressionPredicateCompiler(metadata, 0);
assertNotSame(
noCacheCompiler.compilePredicate(SESSION.getSqlFunctionProperties(), SESSION.getSessionFunctions(), predicate),
noCacheCompiler.compilePredicate(SESSION.getSqlFunctionProperties(), SESSION.getSessionFunctions(), predicate));
}
|
@HighFrequencyInvocation
public Optional<EncryptAlgorithm> findEncryptor(final String logicColumnName) {
return columns.containsKey(logicColumnName) ? Optional.of(columns.get(logicColumnName).getCipher().getEncryptor()) : Optional.empty();
}
|
@Test
void assertNotFindEncryptorName() {
assertFalse(encryptTable.findEncryptor("notExistLogicColumn").isPresent());
}
|
@Override
public LeaderElection createLeaderElection(String componentId) {
synchronized (lock) {
Preconditions.checkState(
!leadershipOperationExecutor.isShutdown(),
"The service was already closed and cannot be reused.");
Preconditions.checkState(
!leaderContenderRegistry.containsKey(componentId),
"There shouldn't be any contender registered under the passed component '%s'.",
componentId);
return new DefaultLeaderElection(this, componentId);
}
}
|
@Test
void testLazyDriverInstantiation() throws Exception {
final AtomicBoolean driverCreated = new AtomicBoolean();
try (final DefaultLeaderElectionService testInstance =
new DefaultLeaderElectionService(
listener -> {
driverCreated.set(true);
return TestingLeaderElectionDriver.newNoOpBuilder().build(listener);
},
fatalErrorHandlerExtension.getTestingFatalErrorHandler(),
Executors.newDirectExecutorService())) {
assertThat(driverCreated)
.as("The driver shouldn't have been created during service creation.")
.isFalse();
try (final LeaderElection leaderElection =
testInstance.createLeaderElection("component-id")) {
assertThat(driverCreated)
.as(
"The driver shouldn't have been created during LeaderElection creation.")
.isFalse();
leaderElection.startLeaderElection(
TestingGenericLeaderContender.newBuilder().build());
assertThat(driverCreated)
.as(
"The driver should have been created when registering the contender in the LeaderElection.")
.isTrue();
}
}
}
|
public static DataSource createDataSource(final ModeConfiguration modeConfig) throws SQLException {
return createDataSource(DefaultDatabase.LOGIC_NAME, modeConfig);
}
|
@Test
void assertCreateDataSourceWithAllParametersForSingleDataSourceWithDefaultDatabaseName() throws SQLException {
assertDataSource(ShardingSphereDataSourceFactory.createDataSource(
new ModeConfiguration("Standalone", null), new MockedDataSource(), new LinkedList<>(), new Properties()), DefaultDatabase.LOGIC_NAME);
}
|
@Override
public OverlayData createOverlayData(ComponentName remoteApp) {
final OverlayData original = mOriginal.createOverlayData(remoteApp);
if (original.isValid() || mFixInvalid) {
final int backgroundLuminance = luminance(original.getPrimaryColor());
final int diff = backgroundLuminance - luminance(original.getPrimaryTextColor());
if (mRequiredTextColorDiff > Math.abs(diff)) {
if (backgroundLuminance > GRAY_LUM) {
// closer to white, text will be black
original.setPrimaryTextColor(Color.BLACK);
original.setSecondaryTextColor(Color.DKGRAY);
} else {
original.setPrimaryTextColor(Color.WHITE);
original.setSecondaryTextColor(Color.LTGRAY);
}
}
}
return original;
}
|
@Test
public void testReturnsFixedToWhiteIfDarkIfTextIsTooClose() {
OverlayData original = setupOriginal(Color.DKGRAY, Color.DKGRAY, Color.GRAY);
final OverlayData fixed = mUnderTest.createOverlayData(mTestComponent);
Assert.assertSame(original, fixed);
Assert.assertTrue(fixed.isValid());
Assert.assertEquals(Color.DKGRAY, fixed.getPrimaryColor());
Assert.assertEquals(Color.DKGRAY, fixed.getPrimaryDarkColor());
Assert.assertEquals(Color.WHITE, fixed.getPrimaryTextColor());
Assert.assertEquals(Color.LTGRAY, fixed.getSecondaryTextColor());
}
|
static int readDirectBuffer(InputStream f, ByteBuffer buf, byte[] temp) throws IOException {
// copy all the bytes that return immediately, stopping at the first
// read that doesn't return a full buffer.
int nextReadLength = Math.min(buf.remaining(), temp.length);
int totalBytesRead = 0;
int bytesRead;
while ((bytesRead = f.read(temp, 0, nextReadLength)) == temp.length) {
buf.put(temp);
totalBytesRead += bytesRead;
nextReadLength = Math.min(buf.remaining(), temp.length);
}
if (bytesRead < 0) {
// return -1 if nothing was read
return totalBytesRead == 0 ? -1 : totalBytesRead;
} else {
// copy the last partial buffer
buf.put(temp, 0, bytesRead);
totalBytesRead += bytesRead;
return totalBytesRead;
}
}
|
@Test
public void testDirectSmallTempBufferWithPositionAndLimit() throws Exception {
byte[] temp = new byte[2]; // this will cause readDirectBuffer to loop
ByteBuffer readBuffer = ByteBuffer.allocateDirect(20);
readBuffer.position(5);
readBuffer.limit(13);
readBuffer.mark();
MockInputStream stream = new MockInputStream(7);
int len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, temp);
Assert.assertEquals(7, len);
Assert.assertEquals(12, readBuffer.position());
Assert.assertEquals(13, readBuffer.limit());
len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, temp);
Assert.assertEquals(1, len);
Assert.assertEquals(13, readBuffer.position());
Assert.assertEquals(13, readBuffer.limit());
len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, temp);
Assert.assertEquals(0, len);
readBuffer.reset();
Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 8), readBuffer);
}
|
@Udf
public String elt(
@UdfParameter(description = "the nth element to extract") final int n,
@UdfParameter(description = "the strings of which to extract the nth") final String... args
) {
if (args == null) {
return null;
}
if (n < 1 || n > args.length) {
return null;
}
return args[n - 1];
}
|
@Test
public void shouldHandleNullArgs() {
// When:
String[] array = null;
final String el = elt.elt(2, array);
// Then:
assertThat(el, is(nullValue()));
}
|
@Override
public ByteBuf setInt(int index, int value) {
throw new ReadOnlyBufferException();
}
|
@Test
public void testSetInt() throws IOException {
final ByteBuf buf = newBuffer(wrappedBuffer(new byte[8]));
try {
assertThrows(ReadOnlyBufferException.class, new Executable() {
@Override
public void execute() {
buf.setInt(0, 1);
}
});
} finally {
buf.release();
}
}
|
public static RuleDescriptionSectionContextDto of(String key, String displayName) {
return new RuleDescriptionSectionContextDto(key, displayName);
}
|
@Test
void equals_with_different_display_names_should_return_false() {
RuleDescriptionSectionContextDto context1 = RuleDescriptionSectionContextDto.of(CONTEXT_KEY, CONTEXT_DISPLAY_NAME);
RuleDescriptionSectionContextDto context2 = RuleDescriptionSectionContextDto.of(CONTEXT_KEY, CONTEXT_DISPLAY_NAME + "2");
assertThat(context1).isNotEqualTo(context2);
}
|
public long elapsedNanos() {
if (running) {
return this.ticks.ticks() - start;
} else {
if (elapsed == -1)
throw new IllegalStateException();
return elapsed;
}
}
|
@Test
void notStarted1() {
assertThrows(IllegalStateException.class, () -> {
FakeTicks f = new FakeTicks();
Stopwatch s = new Stopwatch(f);
s.elapsedNanos();
});
}
|
public static DateTime parse(CharSequence dateStr, DateFormat dateFormat) {
return new DateTime(dateStr, dateFormat);
}
|
@Test
public void parseJDkTest() {
final String dateStr = "Thu May 16 17:57:18 GMT+08:00 2019";
final DateTime time = DateUtil.parse(dateStr);
assertEquals("2019-05-16 17:57:18", Objects.requireNonNull(time).toString());
}
|
@Override
public void upgrade() {
if (clusterConfigService.get(V202406260800_MigrateCertificateAuthority.MigrationCompleted.class) != null) {
LOG.debug("Migration already completed.");
return;
}
readExistingKeystore().ifPresent(keystore -> clusterConfigService.write(new EncryptedCaKeystore(keystore)));
mongoConnection.getMongoDatabase().getCollection(LEGACY_COLLECTION_NAME).drop();
}
|
@Test
void testMigration() {
mongodb.importFixture("V202406260800_MigrateCertificateAuthorityTest.json", V202406260800_MigrateCertificateAuthorityTest.class);
final ClusterConfigService clusterConfigService = Mockito.mock(ClusterConfigService.class);
final V202406260800_MigrateCertificateAuthority migration = new V202406260800_MigrateCertificateAuthority(clusterConfigService, mongodb.mongoConnection());
final long documentsCount = mongodb.mongoConnection().getMongoDatabase().getCollection(V202406260800_MigrateCertificateAuthority.LEGACY_COLLECTION_NAME).countDocuments();
// there should be one entry with the encoded CA keystore
Assertions.assertThat(documentsCount).isEqualTo(1);
migration.upgrade();
final ArgumentCaptor<EncryptedCaKeystore> captor = ArgumentCaptor.forClass(EncryptedCaKeystore.class);
Mockito.verify(clusterConfigService, Mockito.times(1)).write(captor.capture());
// verify that migration extracted the correct data
Assertions.assertThat(captor.getValue().keystore().value()).startsWith("b97f382e52e0cf");
Assertions.assertThat(captor.getValue().keystore().salt()).isEqualTo("753bacc1ae1df5e3");
final Set<String> existingCollections = mongodb.mongoConnection().getMongoDatabase().listCollectionNames().into(new HashSet<>());
Assertions.assertThat(existingCollections).doesNotContain(V202406260800_MigrateCertificateAuthority.LEGACY_COLLECTION_NAME);
}
|
public ApplicationReport getReport() {
return report;
}
|
@Test
public void testParseToReport() {
try {
YarnApplicationReport yarnReport = new YarnApplicationReport(runningReport);
ApplicationReport report = yarnReport.getReport();
Assert.assertEquals("application_15888888888_0088", report.getApplicationId().toString());
Assert.assertEquals("label0", report.getName());
Assert.assertEquals("test", report.getUser());
Assert.assertEquals("test-queue", report.getQueue());
Assert.assertEquals(1597654469958L, report.getStartTime());
Assert.assertEquals(0L, report.getFinishTime());
Assert.assertTrue(report.getProgress() == 0.5f);
Assert.assertEquals(YarnApplicationState.RUNNING, report.getYarnApplicationState());
Assert.assertEquals(FinalApplicationStatus.UNDEFINED, report.getFinalApplicationStatus());
Assert.assertEquals("http://127.0.0.1:8080/proxy/application_1586619723848_0088/", report.getTrackingUrl());
Assert.assertEquals(40236, report.getRpcPort());
Assert.assertEquals("host-name", report.getHost());
} catch (LoadException e) {
e.printStackTrace();
Assert.fail();
}
}
|
@Override
public Graph<EntityDescriptor> resolveNativeEntity(EntityDescriptor entityDescriptor) {
final MutableGraph<EntityDescriptor> mutableGraph = GraphBuilder.directed().build();
mutableGraph.addNode(entityDescriptor);
final ModelId modelId = entityDescriptor.id();
try {
final PipelineDao pipelineDao = pipelineService.load(modelId.id());
final String pipelineSource = pipelineDao.source();
final Collection<String> referencedRules = referencedRules(pipelineSource);
referencedRules.stream()
.map(ModelId::of)
.map(id -> EntityDescriptor.create(id, ModelTypes.PIPELINE_RULE_V1))
.forEach(rule -> mutableGraph.putEdge(entityDescriptor, rule));
final Set<PipelineConnections> pipelineConnections = connectionsService.loadByPipelineId(pipelineDao.id());
pipelineConnections.stream()
.map(PipelineConnections::streamId)
.map(ModelId::of)
.map(id -> EntityDescriptor.create(id, ModelTypes.STREAM_REF_V1))
.forEach(stream -> mutableGraph.putEdge(entityDescriptor, stream));
} catch (NotFoundException e) {
LOG.debug("Couldn't find pipeline {}", entityDescriptor, e);
}
return ImmutableGraph.copyOf(mutableGraph);
}
|
@Test
@MongoDBFixtures("PipelineFacadeTest/pipelines.json")
public void resolveEntityDescriptor() {
final Stage stage = Stage.builder()
.stage(0)
.match(Stage.Match.EITHER)
.ruleReferences(Collections.singletonList("no-op"))
.build();
final Pipeline pipeline = Pipeline.builder()
.id("5a85c4854b900afd5d662be3")
.name("Test")
.stages(ImmutableSortedSet.of(stage))
.build();
when(pipelineRuleParser.parsePipeline("dummy", "pipeline \"Test\"\nstage 0 match either\nrule \"debug\"\nrule \"no-op\"\nend"))
.thenReturn(pipeline);
RuleDao ruleDao = RuleDao.builder()
.id("2342353045938450345")
.title("no-op")
.source("rule \\\"debug\\\"\\nrule \\\"no-op\\\"\\nend\"")
.build();
when(ruleService.findByName("no-op")).thenReturn(Optional.of(ruleDao));
final EntityDescriptor descriptor = EntityDescriptor.create("5a85c4854b900afd5d662be3", ModelTypes.PIPELINE_V1);
final Graph<EntityDescriptor> graph = facade.resolveNativeEntity(descriptor);
assertThat(graph.nodes()).containsOnly(
descriptor,
EntityDescriptor.create("5adf23894b900a0fdb4e517d", ModelTypes.STREAM_REF_V1),
EntityDescriptor.create("2342353045938450345", ModelTypes.PIPELINE_RULE_V1));
}
|
public <T> Future<Iterable<T>> bagFuture(
ByteString encodedTag, String stateFamily, Coder<T> elemCoder) {
// First request has no continuation position.
StateTag<Long> stateTag = StateTag.of(StateTag.Kind.BAG, encodedTag, stateFamily);
// Convert the ValuesAndContPosition<T> to Iterable<T>.
return valuesToPagingIterableFuture(stateTag, elemCoder, this.stateFuture(stateTag, elemCoder));
}
|
@Test
public void testReadBag() throws Exception {
Future<Iterable<Integer>> future = underTest.bagFuture(STATE_KEY_1, STATE_FAMILY, INT_CODER);
Mockito.verifyNoMoreInteractions(mockWindmill);
Windmill.KeyedGetDataRequest.Builder expectedRequest =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addBagsToFetch(
Windmill.TagBag.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.setFetchMaxBytes(WindmillStateReader.INITIAL_MAX_BAG_BYTES));
Windmill.KeyedGetDataResponse.Builder response =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addBags(
Windmill.TagBag.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addValues(intData(5))
.addValues(intData(6)));
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest.build()))
.thenReturn(response.build());
Iterable<Integer> results = future.get();
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest.build());
for (Integer unused : results) {
// Iterate over the results to force loading all the pages.
}
Mockito.verifyNoMoreInteractions(mockWindmill);
assertThat(results, Matchers.contains(5, 6));
assertNoReader(future);
}
|
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
}
|
@Test
public void testFetchSessionIdError() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
client.prepareResponse(fetchResponseWithTopLevelError(tidp0, Errors.FETCH_SESSION_TOPIC_ID_ERROR, 0));
networkClientDelegate.poll(time.timer(0));
assertEmptyFetch("Should not return records or advance position on fetch error");
assertEquals(0L, metadata.timeToNextUpdate(time.milliseconds()));
}
|
@Override
public void prepare() throws ServiceNotProvidedException, ModuleStartException {
try {
List<Address> addressList = ConnectUtils.parse(config.getHostPort());
List<HostAndPort> hostAndPorts = new ArrayList<>();
for (Address address : addressList) {
hostAndPorts.add(HostAndPort.fromParts(address.getHost(), address.getPort()));
}
Consul.Builder consulBuilder = Consul.builder()
// we should set this value or it will be blocked forever
.withConnectTimeoutMillis(3000);
if (StringUtils.isNotEmpty(config.getAclToken())) {
consulBuilder.withAclToken(config.getAclToken());
}
if (hostAndPorts.size() > 1) {
client = consulBuilder.withMultipleHostAndPort(hostAndPorts, 5000).build();
} else {
client = consulBuilder.withHostAndPort(hostAndPorts.get(0)).build();
}
} catch (ConnectStringParseException | ConsulException e) {
throw new ModuleStartException(e.getMessage(), e);
}
ConsulCoordinator coordinator = new ConsulCoordinator(getManager(), config, client);
this.registerServiceImplementation(ClusterRegister.class, coordinator);
this.registerServiceImplementation(ClusterNodesQuery.class, coordinator);
this.registerServiceImplementation(ClusterCoordinator.class, coordinator);
}
|
@Test
public void prepareWithNonHost() throws Exception {
assertThrows(ModuleStartException.class, () -> provider.prepare());
}
|
@Deprecated
public static String getJwt(JwtClaims claims) throws JoseException {
String jwt;
RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey(
jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName());
// A JWT is a JWS and/or a JWE with JSON claims as the payload.
// In this example it is a JWS nested inside a JWE
// So we first create a JsonWebSignature object.
JsonWebSignature jws = new JsonWebSignature();
// The payload of the JWS is JSON content of the JWT Claims
jws.setPayload(claims.toJson());
// The JWT is signed using the sender's private key
jws.setKey(privateKey);
// Get provider from security config file, it should be two digit
// And the provider id will set as prefix for keyid in the token header, for example: 05100
// if there is no provider id, we use "00" for the default value
String provider_id = "";
if (jwtConfig.getProviderId() != null) {
provider_id = jwtConfig.getProviderId();
if (provider_id.length() == 1) {
provider_id = "0" + provider_id;
} else if (provider_id.length() > 2) {
logger.error("provider_id defined in the security.yml file is invalid; the length should be 2");
provider_id = provider_id.substring(0, 2);
}
}
jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid());
// Set the signature algorithm on the JWT/JWS that will integrity protect the claims
jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256);
// Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS
// representation, which is a string consisting of three dot ('.') separated
// base64url-encoded parts in the form Header.Payload.Signature
jwt = jws.getCompactSerialization();
return jwt;
}
|
@Test
public void petstoreBootstrap() throws Exception {
JwtClaims claims = ClaimsUtil.getTestCcClaimsScopeService("f7d42348-c647-4efb-a52d-4c5787421e72", "portal.r portal.w", "com.networknt.petstore-3.0.1");
claims.setExpirationTimeMinutesInTheFuture(5256000);
String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA));
System.out.println("***Reference Long lived Bootstrap token for config server and controller: " + jwt);
}
|
@Override
public ExtensionMappingAddress mapLcafAddress(LispLcafAddress lcafAddress) {
switch (lcafAddress.getType()) {
case LIST:
LispListLcafAddress lcafListAddress = (LispListLcafAddress) lcafAddress;
MappingAddress ipv4Ma =
afi2mapping(lcafListAddress.getAddresses().get(0));
MappingAddress ipv6Ma =
afi2mapping(lcafListAddress.getAddresses().get(1));
return new LispListAddress.Builder()
.withIpv4(ipv4Ma)
.withIpv6(ipv6Ma)
.build();
case SEGMENT:
LispSegmentLcafAddress segmentLcafAddress =
(LispSegmentLcafAddress) lcafAddress;
return new LispSegmentAddress.Builder()
.withInstanceId(segmentLcafAddress.getInstanceId())
.withAddress(getMappingAddress(segmentLcafAddress.getAddress()))
.build();
case AS:
LispAsLcafAddress asLcafAddress = (LispAsLcafAddress) lcafAddress;
return new org.onosproject.drivers.lisp.extensions.LispAsAddress.Builder()
.withAsNumber(asLcafAddress.getAsNumber())
.withAddress(getMappingAddress(asLcafAddress.getAddress()))
.build();
case APPLICATION_DATA:
LispAppDataLcafAddress appLcafAddress = (LispAppDataLcafAddress) lcafAddress;
return new LispAppDataAddress.Builder()
.withProtocol(appLcafAddress.getProtocol())
.withIpTos(appLcafAddress.getIpTos())
.withLocalPortLow(appLcafAddress.getLocalPortLow())
.withLocalPortHigh(appLcafAddress.getLocalPortHigh())
.withRemotePortLow(appLcafAddress.getRemotePortLow())
.withRemotePortHigh(appLcafAddress.getRemotePortHigh())
.withAddress(getMappingAddress(appLcafAddress.getAddress()))
.build();
case GEO_COORDINATE:
LispGeoCoordinateLcafAddress gcLcafAddress =
(LispGeoCoordinateLcafAddress) lcafAddress;
return new LispGcAddress.Builder()
.withIsNorth(gcLcafAddress.isNorth())
.withLatitudeDegree(gcLcafAddress.getLatitudeDegree())
.withLatitudeMinute(gcLcafAddress.getLatitudeMinute())
.withLatitudeSecond(gcLcafAddress.getLatitudeSecond())
.withIsEast(gcLcafAddress.isEast())
.withLongitudeDegree(gcLcafAddress.getLongitudeDegree())
.withLongitudeMinute(gcLcafAddress.getLongitudeMinute())
.withLongitudeSecond(gcLcafAddress.getLongitudeSecond())
.withAltitude(gcLcafAddress.getAltitude())
.withAddress(getMappingAddress(gcLcafAddress.getAddress()))
.build();
case NAT:
LispNatLcafAddress natLcafAddress = (LispNatLcafAddress) lcafAddress;
List<MappingAddress> mas = Lists.newArrayList();
natLcafAddress.getRtrRlocAddresses()
.forEach(rtr -> mas.add(getMappingAddress(rtr)));
return new LispNatAddress.Builder()
.withMsUdpPortNumber(natLcafAddress.getMsUdpPortNumber())
.withEtrUdpPortNumber(natLcafAddress.getEtrUdpPortNumber())
.withMsRlocAddress(getMappingAddress(natLcafAddress.getMsRlocAddress()))
.withGlobalEtrRlocAddress(
getMappingAddress(natLcafAddress.getGlobalEtrRlocAddress()))
.withPrivateEtrRlocAddress(
getMappingAddress(natLcafAddress.getPrivateEtrRlocAddress()))
.withRtrRlocAddresses(mas)
.build();
case NONCE:
LispNonceLcafAddress nonceLcafAddress = (LispNonceLcafAddress) lcafAddress;
return new LispNonceAddress.Builder()
.withNonce(nonceLcafAddress.getNonce())
.withAddress(getMappingAddress(nonceLcafAddress.getAddress()))
.build();
case MULTICAST:
LispMulticastLcafAddress multiLcafAddress =
(LispMulticastLcafAddress) lcafAddress;
return new LispMulticastAddress.Builder()
.withInstanceId(multiLcafAddress.getInstanceId())
.withSrcAddress(getMappingAddress(multiLcafAddress.getSrcAddress()))
.withSrcMaskLength(multiLcafAddress.getSrcMaskLength())
.withGrpAddress(getMappingAddress(multiLcafAddress.getGrpAddress()))
.withGrpMaskLength(multiLcafAddress.getGrpMaskLength())
.build();
case TRAFFIC_ENGINEERING:
LispTeLcafAddress teLcafAddress = (LispTeLcafAddress) lcafAddress;
List<LispTeAddress.TeRecord> records = Lists.newArrayList();
teLcafAddress.getTeRecords().forEach(record -> {
LispTeAddress.TeRecord teRecord =
new LispTeAddress.TeRecord.Builder()
.withIsLookup(record.isLookup())
.withIsRlocProbe(record.isRlocProbe())
.withIsStrict(record.isStrict())
.withRtrRlocAddress(getMappingAddress(
record.getRtrRlocAddress()))
.build();
records.add(teRecord);
});
return new LispTeAddress.Builder()
.withTeRecords(records)
.build();
case SECURITY:
// TODO: need to implement security type later
log.warn("security type will be implemented later");
return null;
case SOURCE_DEST:
LispSourceDestLcafAddress srcDstLcafAddress =
(LispSourceDestLcafAddress) lcafAddress;
return new LispSrcDstAddress.Builder()
.withSrcPrefix(getMappingAddress(srcDstLcafAddress.getSrcPrefix()))
.withSrcMaskLength(srcDstLcafAddress.getSrcMaskLength())
.withDstPrefix(getMappingAddress(srcDstLcafAddress.getDstPrefix()))
.withDstMaskLength(srcDstLcafAddress.getDstMaskLength())
.build();
case UNSPECIFIED:
case UNKNOWN:
default:
log.error("Unsupported LCAF type {}", lcafAddress.getType());
return null;
}
}
|
@Test
public void testMapLcafAddress() {
new EqualsTester()
.addEqualityGroup(listExtAddress, interpreter.mapLcafAddress(listLcafAddress))
.addEqualityGroup(segmentExtAddress, interpreter.mapLcafAddress(segmentLcafAddress))
.addEqualityGroup(asExtAddress, interpreter.mapLcafAddress(asLcafAddress))
.addEqualityGroup(appDataExtAddress, interpreter.mapLcafAddress(appDataLcafAddress))
.addEqualityGroup(gcExtAddress, interpreter.mapLcafAddress(gcLcafAddress))
.addEqualityGroup(natExtAddress, interpreter.mapLcafAddress(natLcafAddress))
.addEqualityGroup(nonceExtAddress, interpreter.mapLcafAddress(nonceLcafAddress))
.addEqualityGroup(multicastExtAddress, interpreter.mapLcafAddress(multicastLcafAddress))
.addEqualityGroup(teExtAddress, interpreter.mapLcafAddress(teLcafAddress))
.addEqualityGroup(srcDstExtAddress, interpreter.mapLcafAddress(srcDstLcafAddress))
.testEquals();
}
|
public static File load(String name) {
try {
if (name == null) {
throw new IllegalArgumentException("name can't be null");
}
String decodedPath = URLDecoder.decode(name, StandardCharsets.UTF_8.name());
return getFileFromFileSystem(decodedPath);
} catch (UnsupportedEncodingException e) {
LOGGER.error("decode name error: {}", e.getMessage(), e);
}
return null;
}
|
@Test
public void testLoadException() {
Assertions.assertThrows(IllegalArgumentException.class, () -> FileLoader.load(null));
}
|
public boolean hasRemainingEncodedBytes() {
// We delete an array after fully consuming it.
return encodedArrays != null && encodedArrays.size() != 0;
}
|
@Test
public void testHasRemainingEncodedBytes() {
byte[] bytes = {'a', 'b', 'c'};
long number = 12345;
// Empty
OrderedCode orderedCode = new OrderedCode();
assertFalse(orderedCode.hasRemainingEncodedBytes());
// First and only field of each type.
orderedCode.writeBytes(bytes);
assertTrue(orderedCode.hasRemainingEncodedBytes());
assertArrayEquals(orderedCode.readBytes(), bytes);
assertFalse(orderedCode.hasRemainingEncodedBytes());
orderedCode.writeNumIncreasing(number);
assertTrue(orderedCode.hasRemainingEncodedBytes());
assertEquals(orderedCode.readNumIncreasing(), number);
assertFalse(orderedCode.hasRemainingEncodedBytes());
orderedCode.writeSignedNumIncreasing(number);
assertTrue(orderedCode.hasRemainingEncodedBytes());
assertEquals(orderedCode.readSignedNumIncreasing(), number);
assertFalse(orderedCode.hasRemainingEncodedBytes());
orderedCode.writeInfinity();
assertTrue(orderedCode.hasRemainingEncodedBytes());
assertTrue(orderedCode.readInfinity());
assertFalse(orderedCode.hasRemainingEncodedBytes());
orderedCode.writeTrailingBytes(bytes);
assertTrue(orderedCode.hasRemainingEncodedBytes());
assertArrayEquals(orderedCode.readTrailingBytes(), bytes);
assertFalse(orderedCode.hasRemainingEncodedBytes());
// Two fields of same type.
orderedCode.writeBytes(bytes);
orderedCode.writeBytes(bytes);
assertTrue(orderedCode.hasRemainingEncodedBytes());
assertArrayEquals(orderedCode.readBytes(), bytes);
assertArrayEquals(orderedCode.readBytes(), bytes);
assertFalse(orderedCode.hasRemainingEncodedBytes());
}
|
@Override
public boolean apply(InputFile f) {
if (path == null) {
return false;
}
return path.equals(f.relativePath());
}
|
@Test
public void returns_false_if_doesnt_match() {
RelativePathPredicate predicate = new RelativePathPredicate("path1");
InputFile inputFile = mock(InputFile.class);
when(inputFile.relativePath()).thenReturn("path2");
assertThat(predicate.apply(inputFile)).isFalse();
}
|
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to validate internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final ValidationResult validationResult = new ValidationResult();
final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet());
final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet());
while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) {
Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap();
if (!topicDescriptionsStillToValidate.isEmpty()) {
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate);
descriptionsForTopic = describeTopicsResult.topicNameValues();
}
Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap();
if (!topicConfigsStillToValidate.isEmpty()) {
final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(
topicConfigsStillToValidate.stream()
.map(topic -> new ConfigResource(Type.TOPIC, topic))
.collect(Collectors.toSet())
);
configsForTopic = describeConfigsResult.values().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue));
}
while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
if (!descriptionsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
descriptionsForTopic,
topicConfigs,
topicDescriptionsStillToValidate,
(streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide)
);
}
if (!configsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
configsForTopic,
topicConfigs,
topicConfigsStillToValidate,
(streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide)
);
}
maybeThrowTimeoutException(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
String.format("Could not validate internal topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs)
);
if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
"validated"
);
}
log.info("Completed validation of internal topics {}.", topicConfigs.keySet());
return validationResult;
}
|
@Test
public void shouldThrowWhenPartitionCountUnknown() {
setupTopicInMockAdminClient(topic1, repartitionTopicConfig());
final InternalTopicConfig internalTopicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap());
assertThrows(
IllegalStateException.class,
() -> internalTopicManager.validate(Collections.singletonMap(topic1, internalTopicConfig))
);
}
|
@Udf
public <T> List<T> mapValues(final Map<String, T> input) {
if (input == null) {
return null;
}
return Lists.newArrayList(input.values());
}
|
@SuppressWarnings("unchecked")
@Test
public void shouldHandleComplexValueTypes() {
final Map<String, Map<String, List<Double>>> input = Maps.newHashMap();
final Map<String, List<Double>> entry1 = Maps.newHashMap();
entry1.put("apple", Arrays.asList(Double.valueOf(12.34), Double.valueOf(56.78)));
entry1.put("banana", Arrays.asList(Double.valueOf(43.21), Double.valueOf(87.65)));
input.put("foo", entry1);
final Map<String, List<Double>> entry2 = Maps.newHashMap();
entry2.put("cherry", Arrays.asList(Double.valueOf(12.34), Double.valueOf(56.78)));
entry2.put("date", Arrays.asList(Double.valueOf(43.21), Double.valueOf(87.65)));
input.put("bar", entry2);
List<Map<String, List<Double>>> values = udf.mapValues(input);
assertThat(values, containsInAnyOrder(entry1, entry2));
}
|
public Optional<String> getNodeName(String nodeId) {
return nodeNameCache.getUnchecked(nodeId);
}
|
@Test
public void getNodeNameUsesCache() {
when(cluster.nodeIdToName("node_id")).thenReturn(Optional.of("Node Name"));
nodeInfoCache.getNodeName("node_id");
nodeInfoCache.getNodeName("node_id");
verify(cluster, times(1)).nodeIdToName("node_id");
}
|
public static Builder builder() {
return new AutoValue_HttpHeaders.Builder();
}
|
@Test
public void builderAddHeader_withIllegalHeaderValue_throwsIllegalArgumentException() {
assertThrows(
IllegalArgumentException.class,
() -> HttpHeaders.builder().addHeader("test_header", String.valueOf((char) 11)));
}
|
@Override
public String toString() {
DdlResult ddlResult = this;
StringBuilder sb = new StringBuilder();
do {
sb.append(String.format("DdlResult [schemaName=%s , tableName=%s , oriSchemaName=%s , oriTableName=%s , type=%s ];",
ddlResult.schemaName,
ddlResult.tableName,
ddlResult.oriSchemaName,
ddlResult.oriTableName,
ddlResult.type));
ddlResult = ddlResult.renameTableResult;
} while (ddlResult != null);
return sb.toString();
}
|
@Test
public void toStringOutputNotNull() {
// Arrange
final DdlResult objectUnderTest = new DdlResult();
// Act
final String actual = objectUnderTest.toString();
// Assert result
Assert.assertEquals(
"DdlResult [schemaName=null , tableName=null , oriSchemaName=null , oriTableName=null , type=null ];",
actual);
}
|
public static boolean isEntropyInjecting(FileSystem fs, Path target) {
final EntropyInjectingFileSystem entropyFs = getEntropyFs(fs);
return entropyFs != null
&& entropyFs.getEntropyInjectionKey() != null
&& target.getPath().contains(entropyFs.getEntropyInjectionKey());
}
|
@Test
void testIsEntropyFsWithNullEntropyKey() throws Exception {
final FileSystem efs = new TestEntropyInjectingFs(null, "ignored");
final File folder = TempDirUtils.newFolder(tempFolder);
assertThat(EntropyInjector.isEntropyInjecting(efs, Path.fromLocalFile(folder))).isFalse();
}
|
@NonNull
public static String fullEncode(@NonNull String s) {
return encode(s, fullUriMap);
}
|
@Test
public void testFullEncode() {
String[] data = {
"abcdefghijklmnopqrstuvwxyz",
"abcdefghijklmnopqrstuvwxyz",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"01234567890!@$&*()-_=+',.",
"01234567890%21%40%24%26%2A%28%29%2D%5F%3D%2B%27%2C%2E",
" \"#%/:;<>?",
"%20%22%23%25%2F%3A%3B%3C%3E%3F",
"[\\]^`{|}~",
"%5B%5C%5D%5E%60%7B%7C%7D%7E",
"d\u00E9velopp\u00E9s",
"d%C3%A9velopp%C3%A9s",
"Foo \uD800\uDF98 Foo",
"Foo%20%F0%90%8E%98%20Foo",
"\u00E9 ",
"%C3%A9%20",
};
for (int i = 0; i < data.length; i += 2) {
assertEquals("test " + i, data[i + 1], Util.fullEncode(data[i]));
}
}
|
@Override
public BooleanPredicate clone() throws CloneNotSupportedException {
return (BooleanPredicate)super.clone();
}
|
@Test
void requireThatCloneIsImplemented() throws CloneNotSupportedException {
BooleanPredicate node1 = new BooleanPredicate(true);
BooleanPredicate node2 = node1.clone();
assertEquals(node1, node2);
assertNotSame(node1, node2);
}
|
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
trackTime(nowNs);
int workCount = 0;
workCount += processTimers(nowNs);
if (!asyncClientCommandInFlight)
{
workCount += clientCommandAdapter.receive();
}
workCount += drainCommandQueue();
workCount += trackStreamPositions(workCount, nowNs);
workCount += nameResolver.doWork(cachedEpochClock.time());
workCount += freeEndOfLifeResources(ctx.resourceFreeLimit());
return workCount;
}
|
@Test
void shouldRemoveCounterOnClientTimeout()
{
final long registrationId = driverProxy.addCounter(
COUNTER_TYPE_ID,
counterKeyAndLabel,
COUNTER_KEY_OFFSET,
COUNTER_KEY_LENGTH,
counterKeyAndLabel,
COUNTER_LABEL_OFFSET,
COUNTER_LABEL_LENGTH);
driverConductor.doWork();
final ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(Integer.class);
verify(mockClientProxy).onCounterReady(eq(registrationId), captor.capture());
doWorkUntil(() -> (CLIENT_LIVENESS_TIMEOUT_NS * 2) - nanoClock.nanoTime() <= 0);
verify(spyCountersManager).free(captor.getValue());
}
|
@Override
public void close() throws InterruptedException {
beginShutdown();
for (Thread t : threads) {
t.join();
}
log.info("Event processor closed.");
}
|
@Test
public void testCreateAndClose() throws Exception {
CoordinatorEventProcessor eventProcessor = new MultiThreadedEventProcessor(
new LogContext(),
"event-processor-",
2,
Time.SYSTEM,
mock(CoordinatorRuntimeMetrics.class)
);
eventProcessor.close();
}
|
public void handleMessage(
MessageExt messageExt) throws MQClientException, RemotingException, InterruptedException {
if (rocketMQListener != null) {
rocketMQListener.onMessage(doConvertMessage(messageExt));
} else if (rocketMQReplyListener != null) {
Object replyContent = rocketMQReplyListener.onMessage(doConvertMessage(messageExt));
Message<?> message = MessageBuilder.withPayload(replyContent).build();
org.apache.rocketmq.common.message.Message replyMessage = MessageUtil.createReplyMessage(messageExt, convertToBytes(message));
DefaultMQProducer producer = consumer.getDefaultMQPushConsumerImpl().getmQClientFactory().getDefaultMQProducer();
producer.setSendMsgTimeout(replyTimeout);
producer.send(replyMessage, new SendCallback() {
@Override public void onSuccess(SendResult sendResult) {
if (sendResult.getSendStatus() != SendStatus.SEND_OK) {
log.error("Consumer replies message failed. SendStatus: {}", sendResult.getSendStatus());
} else {
log.debug("Consumer replies message success.");
}
}
@Override public void onException(Throwable e) {
log.error("Consumer replies message failed. error: {}", e.getLocalizedMessage());
}
});
}
}
|
@Test
public void testHandleMessage() throws Exception {
DefaultRocketMQListenerContainer listenerContainer = new DefaultRocketMQListenerContainer();
Method handleMessage = DefaultRocketMQListenerContainer.class.getDeclaredMethod("handleMessage", MessageExt.class);
handleMessage.setAccessible(true);
listenerContainer.setRocketMQListener(new RocketMQListener<String>() {
@Override
public void onMessage(String message) {
}
});
Field messageType = DefaultRocketMQListenerContainer.class.getDeclaredField("messageType");
messageType.setAccessible(true);
messageType.set(listenerContainer, String.class);
MessageExt messageExt = new MessageExt(0, System.currentTimeMillis(), null, System.currentTimeMillis(), null, null);
MessageAccessor.putProperty(messageExt, MessageConst.PROPERTY_CLUSTER, "defaultCluster");
messageExt.setBody("hello".getBytes());
handleMessage.invoke(listenerContainer, messageExt);
// reply message
listenerContainer.setRocketMQListener(null);
DefaultMQPushConsumer consumer = mock(DefaultMQPushConsumer.class);
DefaultMQPushConsumerImpl pushConsumer = mock(DefaultMQPushConsumerImpl.class);
MQClientInstance mqClientInstance = mock(MQClientInstance.class);
DefaultMQProducer producer = mock(DefaultMQProducer.class);
when(consumer.getDefaultMQPushConsumerImpl()).thenReturn(pushConsumer);
when(pushConsumer.getmQClientFactory()).thenReturn(mqClientInstance);
when(mqClientInstance.getDefaultMQProducer()).thenReturn(producer);
listenerContainer.setConsumer(consumer);
listenerContainer.setMessageConverter(new CompositeMessageConverter(Arrays.asList(new StringMessageConverter(), new MappingJackson2MessageConverter())));
doNothing().when(producer).send(any(MessageExt.class), any(SendCallback.class));
listenerContainer.setRocketMQReplyListener(new RocketMQReplyListener<String, String>() {
@Override
public String onMessage(String message) {
return "test";
}
});
handleMessage.invoke(listenerContainer, messageExt);
}
|
public synchronized void setSslTrustLevel(TrustLevel sslTrustLevel) {
if (sslTrustLevel != this.sslTrustLevel) {
this.sslTrustLevel = sslTrustLevel;
// force sslContext to be reinitialized with new trust level
sslContext = null;
}
}
|
@Test
public void testSslTrustLevel() throws Exception {
// default "open" trust level
CrawlURI curi = makeCrawlURI("https://localhost:7443/");
fetcher().process(curi);
runDefaultChecks(curi, "hostHeader");
// "normal" trust level
curi = makeCrawlURI("https://localhost:7443/");
fetcher().setSslTrustLevel(TrustLevel.NORMAL);
fetcher().process(curi);
assertEquals(1, curi.getNonFatalFailures().size());
assertTrue(curi.getNonFatalFailures().toArray()[0] instanceof SSLException);
assertEquals(FetchStatusCodes.S_CONNECT_FAILED, curi.getFetchStatus());
assertEquals(0, curi.getFetchCompletedTime());
}
|
@Override
public MetadataStore create(String metadataURL, MetadataStoreConfig metadataStoreConfig,
boolean enableSessionWatcher) throws MetadataStoreException {
return new LocalMemoryMetadataStore(metadataURL, metadataStoreConfig);
}
|
@Test
public void testIsIgnoreEvent() throws Exception {
TestMetadataEventSynchronizer sync = new TestMetadataEventSynchronizer();
@Cleanup
AbstractMetadataStore store1 = (AbstractMetadataStore) MetadataStoreFactory.create("memory:local",
MetadataStoreConfig.builder().synchronizer(sync).build());
String path = "/test";
byte[] value1 = "value1".getBytes(StandardCharsets.UTF_8);
byte[] value2 = "value2".getBytes(StandardCharsets.UTF_8);
store1.put(path, value1, Optional.empty()).join();
long time1 = Instant.now().toEpochMilli();
long time2 = time1 -5;
Stat stats = new Stat(path, 0, time2, time2, false, false);
GetResult eixistingData = new GetResult(value1, stats);
// (1) ignore due to Ephemeral node
MetadataEvent event = new MetadataEvent(path, value1, Sets.newHashSet(CreateOption.Ephemeral), 0L,
time1, sync.getClusterName(), NotificationType.Modified);
assertTrue(store1.shouldIgnoreEvent(event, eixistingData));
// (2) ignore due to invalid expected version
event = new MetadataEvent(path, value1, EMPTY_SET, 10L/*invalid-version*/,
time1, sync.getClusterName(), NotificationType.Modified);
assertTrue(store1.shouldIgnoreEvent(event, eixistingData));
// (3) accept with valid conditions
event = new MetadataEvent(path, value1, EMPTY_SET, 0L,
time1, sync.getClusterName(), NotificationType.Modified);
assertFalse(store1.shouldIgnoreEvent(event, eixistingData));
// (4) Ignore due to invalid cluster name
event = new MetadataEvent(path, value1, EMPTY_SET, 0L,
time1, null, NotificationType.Modified);
assertTrue(store1.shouldIgnoreEvent(event, eixistingData));
// (5) consider due to same timestamp and correct expected version on the same cluster
event = new MetadataEvent(path, value1, EMPTY_SET, 0L,
time2, sync.getClusterName(), NotificationType.Modified);
assertFalse(store1.shouldIgnoreEvent(event, eixistingData));
// (6) Ignore due to same timestamp but different expected version on the same cluster
event = new MetadataEvent(path, value1, EMPTY_SET, 10L,
time2, sync.getClusterName(), NotificationType.Modified);
assertTrue(store1.shouldIgnoreEvent(event, eixistingData));
// (7) consider due to same timestamp but expected version=-1 on the same cluster
event = new MetadataEvent(path, value1, EMPTY_SET, null,
time2, sync.getClusterName(), NotificationType.Modified);
assertFalse(store1.shouldIgnoreEvent(event, eixistingData));
// (8) Ignore due to less timestamp on the same cluster
event = new MetadataEvent(path, value1, EMPTY_SET, 0L,
time2-5, sync.getClusterName(), NotificationType.Modified);
assertTrue(store1.shouldIgnoreEvent(event, eixistingData));
// (9) consider "uest" > "test" and same timestamp
event = new MetadataEvent(path, value1, EMPTY_SET, 0L,
time2, "uest", NotificationType.Modified);
assertFalse(store1.shouldIgnoreEvent(event, eixistingData));
// (10) ignore "uest" > "test" and less timestamp
event = new MetadataEvent(path, value1, EMPTY_SET, 0L,
time2-5, "uest", NotificationType.Modified);
assertTrue(store1.shouldIgnoreEvent(event, eixistingData));
// (11) ignore "rest" < "test" and same timestamp
event = new MetadataEvent(path, value1, EMPTY_SET, 0L,
time2, "rest", NotificationType.Modified);
assertTrue(store1.shouldIgnoreEvent(event, eixistingData));
}
|
public static LatLong interpolateLatLong(LatLong p1, LatLong p2, double fraction) {
double maxLat = max(p1.latitude(), p2.latitude());
double minLat = min(p1.latitude(), p2.latitude());
checkArgument(maxLat - minLat <= 90.0, "Interpolation is unsafe at this distance (latitude)");
double maxLong = max(p1.longitude(), p2.longitude());
double minLong = min(p1.longitude(), p2.longitude());
checkArgument(maxLong - minLong <= 180.0, "Interpolation is unsafe at this distance (longitude)");
return new LatLong(
interpolate(p1.latitude(), p2.latitude(), fraction),
interpolate(p1.longitude(), p2.longitude(), fraction)
);
}
|
@Test
public void testInterpolateLatLong() {
LatLong p1 = new LatLong(0.0, 5.0);
LatLong p2 = new LatLong(5.0, 0.0);
double TOLERANCE = 0.0001;
assertEquals(
0.0,
interpolateLatLong(p1, p2, 0.0).latitude(),
TOLERANCE
);
assertEquals(
5.0,
interpolateLatLong(p1, p2, 1.0).latitude(),
TOLERANCE
);
assertEquals(
5.0,
interpolateLatLong(p1, p2, 0.0).longitude(),
TOLERANCE
);
assertEquals(
0.0,
interpolateLatLong(p1, p2, 1.0).longitude(),
TOLERANCE
);
}
|
@Override
public <T> Task<T> synchronize(Task<T> task, long deadline) {
return PlanLocal.get(getPlanLocalKey(), LockInternal.class)
.flatMap(lockInternal -> {
if (lockInternal != null) {
// we already acquire the lock, add count only.
lockInternal._lockCount++;
return Task.value(lockInternal._lockNode);
} else {
// try acquire.
return acquire(deadline);
}
})
/* run the given task with toTry() */
.flatMap(unused -> task).toTry()
/* release the lock and unwind the result */
.flatMap(result -> release().andThen(unwind(result)));
}
|
@Test
public void testMultiLocks()
throws InterruptedException {
int loopCount = 100;
final long deadline = System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(60, TimeUnit.SECONDS);
MultiLocks multiLocks1 = new MultiLocks(_zkClient, _acls, "/locks/l1", "/locks/l2", "/locks/l3");
MultiLocks multiLocks2 = new MultiLocks(_zkClient, _acls, "/locks/l3", "/locks/l1", "/locks/l2");
final AtomicReference<Integer> sum = new AtomicReference<>(0);
Task<Void> plan1 = loop(loopCount, () -> multiLocks1.synchronize(Task.action(() -> {
int current = sum.get();
// increment by one.
sum.set(++current);
}), deadline));
Task<Void> plan2 = loop(loopCount, () -> multiLocks2.synchronize(Task.action(() -> {
int current = sum.get();
// increment by one.
sum.set(++current);
}), deadline));
run(plan1);
run(plan2);
Assert.assertTrue(plan1.await(60, TimeUnit.SECONDS));
plan1.get();
Assert.assertTrue(plan2.await(60, TimeUnit.SECONDS));
plan2.get();
Assert.assertEquals((int) sum.get(), 2 * loopCount);
}
|
public static Optional<ESEventOriginContext> parseESContext(String url) {
if (url.startsWith(ES_EVENT) || url.startsWith(ES_MESSAGE)) {
final String[] tokens = url.split(":");
if (tokens.length != 6) {
return Optional.empty();
}
return Optional.of(ESEventOriginContext.create(tokens[4], tokens[5]));
} else {
return Optional.empty();
}
}
|
@Test
public void parseShortESContext() {
assertThat(EventOriginContext.parseESContext("urn:graylog:message:es:ind")).isEmpty();
}
|
public DdlCommandResult execute(
final String sql,
final DdlCommand ddlCommand,
final boolean withQuery,
final Set<SourceName> withQuerySources
) {
return execute(sql, ddlCommand, withQuery, withQuerySources, false);
}
|
@Test
public void shouldAddSourceStream() {
// Given:
final CreateStreamCommand cmd = buildCreateStream(
SourceName.of("t1"),
SCHEMA,
false,
true
);
// When:
cmdExec.execute(SQL_TEXT, cmd, true, NO_QUERY_SOURCES);
// Then:
final KsqlStream ksqlTable = (KsqlStream) metaStore.getSource(SourceName.of("t1"));
assertThat(ksqlTable.isSource(), is(true));
}
|
public static MapperReference getMapper() {
return MAPPER_REFERENCE.get();
}
|
@Test
public void testResourceQuotaMixIn() {
ObjectMapper objectMapper = ObjectMapperFactory.getMapper().getObjectMapper();
try {
ResourceQuota resourceQuota = new ResourceQuota();
String json = objectMapper.writeValueAsString(resourceQuota);
Assert.assertFalse(json.contains("valid"));
} catch (Exception ex) {
Assert.fail("shouldn't have thrown exception", ex);
}
}
|
public static L3ModificationInstruction decNwTtl() {
return new ModTtlInstruction(L3SubType.DEC_TTL);
}
|
@Test
public void testDecNwTtlOutMethod() {
final Instruction instruction = Instructions.decNwTtl();
final L3ModificationInstruction.ModTtlInstruction modTtlInstruction =
checkAndConvert(instruction,
Instruction.Type.L3MODIFICATION,
L3ModificationInstruction.ModTtlInstruction.class);
assertThat(modTtlInstruction.subtype(),
is(L3ModificationInstruction.L3SubType.DEC_TTL));
}
|
public static String toString(String[] line, boolean quote) {
return toString(line, quote, " ");
}
|
@Test
void testToStringWithSeparator() {
final String separator = "], [";
assertEquals("", CommandLine.toString(null, false, separator));
assertEquals(ARG_SPACES_NOQUOTES,
CommandLine.toString(new String[]{ARG_SPACES_NOQUOTES}, false, separator));
assertEquals(ARG_SPACES_NOQUOTES + separator + ARG_NOSPACES,
CommandLine.toString(new String[]{ARG_SPACES_NOQUOTES, ARG_NOSPACES}, false, separator));
assertEquals(String.join(separator, ARG_SPACES_NOQUOTES, ARG_NOSPACES, ARG_SPACES),
CommandLine.toString(new String[]{ARG_SPACES_NOQUOTES, ARG_NOSPACES, ARG_SPACES},
false, separator));
}
|
public static ProtoOverrides.TransformReplacement createSizedReplacement() {
return SizedReplacement.builder().setDrain(false).build();
}
|
@Test
public void testSizedReplacement() {
Pipeline p = Pipeline.create();
p.apply(Create.of("1", "2", "3"))
.apply("TestSDF", ParDo.of(new PairStringWithIndexToLengthBase()));
RunnerApi.Pipeline proto = PipelineTranslation.toProto(p);
String transformName =
Iterables.getOnlyElement(
Maps.filterValues(
proto.getComponents().getTransformsMap(),
(RunnerApi.PTransform transform) ->
transform
.getUniqueName()
.contains(PairStringWithIndexToLengthBase.class.getSimpleName()))
.keySet());
RunnerApi.Pipeline updatedProto =
ProtoOverrides.updateTransform(
PTransformTranslation.PAR_DO_TRANSFORM_URN,
proto,
SplittableParDoExpander.createSizedReplacement());
RunnerApi.PTransform newComposite =
updatedProto.getComponents().getTransformsOrThrow(transformName);
assertEquals(FunctionSpec.getDefaultInstance(), newComposite.getSpec());
assertEquals(3, newComposite.getSubtransformsCount());
assertEquals(
PTransformTranslation.SPLITTABLE_PAIR_WITH_RESTRICTION_URN,
updatedProto
.getComponents()
.getTransformsOrThrow(newComposite.getSubtransforms(0))
.getSpec()
.getUrn());
assertEquals(
PTransformTranslation.SPLITTABLE_SPLIT_AND_SIZE_RESTRICTIONS_URN,
updatedProto
.getComponents()
.getTransformsOrThrow(newComposite.getSubtransforms(1))
.getSpec()
.getUrn());
assertEquals(
PTransformTranslation.SPLITTABLE_PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS_URN,
updatedProto
.getComponents()
.getTransformsOrThrow(newComposite.getSubtransforms(2))
.getSpec()
.getUrn());
}
|
@Override
public GenericRow transform(GenericRow record) {
for (Map.Entry<String, FunctionEvaluator> entry : _expressionEvaluators.entrySet()) {
String column = entry.getKey();
FunctionEvaluator transformFunctionEvaluator = entry.getValue();
Object existingValue = record.getValue(column);
if (existingValue == null) {
try {
// Skip transformation if column value already exists
// NOTE: column value might already exist for OFFLINE data,
// For backward compatibility, The only exception here is that we will override nested field like array,
// collection or map since they were not included in the record transformation before.
record.putValue(column, transformFunctionEvaluator.evaluate(record));
} catch (Exception e) {
if (!_continueOnError) {
throw new RuntimeException("Caught exception while evaluation transform function for column: " + column, e);
} else {
LOGGER.debug("Caught exception while evaluation transform function for column: {}", column, e);
record.putValue(GenericRow.INCOMPLETE_RECORD_KEY, true);
}
}
} else if (existingValue.getClass().isArray() || existingValue instanceof Collections
|| existingValue instanceof Map) {
try {
Object transformedValue = transformFunctionEvaluator.evaluate(record);
// For backward compatibility, The only exception here is that we will override nested field like array,
// collection or map since they were not included in the record transformation before.
if (!isTypeCompatible(existingValue, transformedValue)) {
record.putValue(column, transformedValue);
}
} catch (Exception e) {
LOGGER.debug("Caught exception while evaluation transform function for column: {}", column, e);
}
}
}
return record;
}
|
@Test
public void testTransformFunctionWithWrongInput() {
Schema pinotSchema = new Schema();
DimensionFieldSpec dimensionFieldSpec = new DimensionFieldSpec("x", FieldSpec.DataType.INT, true);
pinotSchema.addField(dimensionFieldSpec);
List<TransformConfig> transformConfigs = Collections.singletonList(
new TransformConfig("y", "plus(x, 10)"));
IngestionConfig ingestionConfig = new IngestionConfig();
ingestionConfig.setTransformConfigs(transformConfigs);
TableConfig tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("testTransformFunctionWithWrongInput")
.setIngestionConfig(ingestionConfig)
.build();
ExpressionTransformer expressionTransformer = new ExpressionTransformer(tableConfig, pinotSchema);
// Valid case: x is int, y is int
GenericRow genericRow = new GenericRow();
genericRow.putValue("x", 10);
expressionTransformer.transform(genericRow);
Assert.assertEquals(genericRow.getValue("y"), 20.0);
// Invalid case: x is string, y is int
genericRow = new GenericRow();
genericRow.putValue("x", "abcd");
try {
expressionTransformer.transform(genericRow);
Assert.fail();
} catch (Exception e) {
Assert.assertEquals(e.getCause().getMessage(), "Caught exception while executing function: plus(x,'10')");
}
}
|
public ImmutableList<GlobalSetting> parse(final InputStream is) {
return Jsons.toObjects(is, GlobalSetting.class);
}
|
@Test
public void should_parse_setting_file_with_env() {
InputStream stream = getResourceAsStream("settings/env-settings.json");
ImmutableList<GlobalSetting> globalSettings = parser.parse(stream);
assertThat(globalSettings.get(0).includes().get(0), is(join("src", "test", "resources", "settings", "details", "foo.json")));
assertThat(globalSettings.get(0).getContext(), is("/foo"));
assertThat(globalSettings.get(0).getEnv(), is("foo"));
assertThat(globalSettings.get(1).includes().get(0), is(join("src", "test", "resources", "settings", "details", "bar.json")));
assertThat(globalSettings.get(1).getContext(), is("/bar"));
assertThat(globalSettings.get(1).getEnv(), is("bar"));
}
|
public static Projection<Entry<Object, Object>, JetSqlRow> toProjection(
KvRowProjector.Supplier rightRowProjectorSupplier,
ExpressionEvalContext evalContext
) {
return new JoinProjection(rightRowProjectorSupplier, UntrustedExpressionEvalContext.from(evalContext));
}
|
@Test
public void when_serializedObject_then_deserializedCorrect() {
AbstractSerializationService service = (AbstractSerializationService) TestUtil.getNode(instance()).getSerializationService();
var evalContextMock = mock(ExpressionEvalContext.class);
when(evalContextMock.getSerializationService()).thenReturn(mock());
when(evalContextMock.getArguments()).thenReturn(emptyList());
when(evalContextMock.getNodeEngine()).thenReturn(mock());
var supplier = KvRowProjector.supplier(
new QueryPath[]{},
new QueryDataType[]{},
null,
null,
null,
null
);
DataSerializable projection = (DataSerializable) QueryUtil.toProjection(supplier, evalContextMock);
var data = service.toData(projection);
var actual = service.toObject(data);
assertThat(actual)
.usingRecursiveComparison()
.comparingOnlyFields("arguments")
.isEqualTo(projection);
}
|
public static ParamType getSchemaFromType(final Type type) {
return getSchemaFromType(type, JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetBiFunction() throws NoSuchMethodException {
final Type type = getClass().getDeclaredMethod("biFunctionType", BiFunction.class)
.getGenericParameterTypes()[0];
final ParamType schema = UdfUtil.getSchemaFromType(type);
assertThat(schema, instanceOf(LambdaType.class));
assertThat(((LambdaType) schema).inputTypes(), equalTo(ImmutableList.of(ParamTypes.LONG, ParamTypes.INTEGER)));
assertThat(((LambdaType) schema).returnType(), equalTo(ParamTypes.BOOLEAN));
}
|
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
if(!session.getClient().setFileType(FTP.BINARY_FILE_TYPE)) {
throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString());
}
if(status.isAppend()) {
session.getClient().setRestartOffset(status.getOffset());
}
final InputStream in = new DataConnectionActionExecutor(session).data(new DataConnectionAction<InputStream>() {
@Override
public InputStream execute() throws BackgroundException {
try {
return session.getClient().retrieveFileStream(file.getAbsolute());
}
catch(IOException e) {
throw new FTPExceptionMappingService().map(e);
}
}
});
return new ReadReplyInputStream(in, status);
}
catch(IOException e) {
throw new FTPExceptionMappingService().map("Download {0} failed", e, file);
}
}
|
@Test(expected = NotfoundException.class)
public void testReadNotFound() throws Exception {
final TransferStatus status = new TransferStatus();
new FTPReadFeature(session).read(new Path(new FTPWorkdirService(session).find(), "nosuchname", EnumSet.of(Path.Type.file)), status, new DisabledConnectionCallback());
}
|
@Override
@MethodNotAvailable
public CompletionStage<V> putAsync(K key, V value) {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testPutAsyncWithTtl() {
adapter.putAsync(42, "value", 1, TimeUnit.MILLISECONDS);
}
|
static int run(Collection<URI> namenodes, final BalancerParameters p,
Configuration conf) throws IOException, InterruptedException {
return run(namenodes, null, p, conf);
}
|
@Test(timeout = 100000)
public void testManyBalancerSimultaneously() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
// add an empty node with half of the capacities(4 * CAPACITY) & the same
// rack
long[] capacities = new long[] { 4 * CAPACITY };
String[] racks = new String[] { RACK0 };
long newCapacity = 2 * CAPACITY;
String newRack = RACK0;
LOG.info("capacities = " + long2String(capacities));
LOG.info("racks = " + Arrays.asList(racks));
LOG.info("newCapacity= " + newCapacity);
LOG.info("newRack = " + newRack);
LOG.info("useTool = " + false);
assertEquals(capacities.length, racks.length);
int numOfDatanodes = capacities.length;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length)
.racks(racks).simulatedCapacities(capacities).build();
cluster.waitActive();
client = NameNodeProxies.createProxy(conf,
cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
long totalCapacity = sum(capacities);
// fill up the cluster to be 30% full
final long totalUsedSpace = totalCapacity * 3 / 10;
createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
(short) numOfDatanodes, 0);
// start up an empty node with the same capacity and on the same rack
cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
new long[] { newCapacity });
cluster.triggerHeartbeats();
// Case1: Simulate first balancer by creating 'balancer.id' file. It
// will keep this file until the balancing operation is completed.
FileSystem fs = cluster.getFileSystem(0);
final FSDataOutputStream out = fs
.create(Balancer.BALANCER_ID_PATH, false);
out.writeBytes(InetAddress.getLocalHost().getHostName());
out.hflush();
assertTrue("'balancer.id' file doesn't exist!",
fs.exists(Balancer.BALANCER_ID_PATH));
// start second balancer
final String[] args = { "-policy", "datanode" };
final Tool tool = new Cli();
tool.setConf(conf);
int exitCode = tool.run(args); // start balancing
assertEquals("Exit status code mismatches",
ExitStatus.IO_EXCEPTION.getExitCode(), exitCode);
// Case2: Release lease so that another balancer would be able to
// perform balancing.
out.close();
assertTrue("'balancer.id' file doesn't exist!",
fs.exists(Balancer.BALANCER_ID_PATH));
exitCode = tool.run(args); // start balancing
assertEquals("Exit status code mismatches",
ExitStatus.SUCCESS.getExitCode(), exitCode);
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Condition condition = (Condition) o;
return Objects.equals(metricKey, condition.metricKey) &&
operator == condition.operator &&
Objects.equals(errorThreshold, condition.errorThreshold);
}
|
@Test
public void equals_is_based_on_all_fields() {
assertThat(underTest)
.isEqualTo(underTest)
.isNotNull()
.isNotEqualTo(new Object())
.isEqualTo(new Condition(METRIC_KEY, OPERATOR, ERROR_THRESHOLD))
.isNotEqualTo(new Condition("other_metric_key", OPERATOR, ERROR_THRESHOLD));
Arrays.stream(Condition.Operator.values())
.filter(s -> !OPERATOR.equals(s))
.forEach(otherOperator -> assertThat(underTest)
.isNotEqualTo(new Condition(METRIC_KEY, otherOperator, ERROR_THRESHOLD)));
assertThat(underTest).isNotEqualTo(new Condition(METRIC_KEY, OPERATOR, "other_error_threshold"));
}
|
@Override
public boolean skip(final ServerWebExchange exchange) {
return skipExcept(exchange, RpcTypeEnum.SPRING_CLOUD);
}
|
@Test
public void skip() {
final boolean result = springCloudPlugin.skip(exchange);
assertFalse(result);
}
|
public Rule<FilterNode> filterNodeRule()
{
return new PullUpExpressionInLambdaFilterNodeRule();
}
|
@Test
public void testFilter()
{
tester().assertThat(new PullUpExpressionInLambdaRules(getFunctionManager()).filterNodeRule())
.setSystemProperty(PULL_EXPRESSION_FROM_LAMBDA_ENABLED, "true")
.on(p ->
{
p.variable("idmap", new MapType(BIGINT, BIGINT, KEY_BLOCK_EQUALS, KEY_BLOCK_HASH_CODE));
return p.filter(
p.rowExpression("cardinality(map_filter(idmap, (k, v) -> array_position(array_sort(map_keys(idmap)), k) <= 200)) > 0"),
p.values(p.variable("idmap", new MapType(BIGINT, BIGINT, KEY_BLOCK_EQUALS, KEY_BLOCK_HASH_CODE))));
})
.matches(
project(
ImmutableMap.of("idmap", expression("idmap")),
filter(
"(cardinality(map_filter(idmap, (k, v) -> (array_position(array_sort, k)) <= (INTEGER'200')))) > (INTEGER'0')",
project(ImmutableMap.of("array_sort", expression("array_sort(map_keys(idmap))")),
values("idmap")))));
}
|
@Override
public UrlPattern doGetPattern() {
return UrlPattern.builder()
.includes("/*")
.excludes(StaticResources.patterns())
.excludes(SKIPPED_URLS)
.build();
}
|
@Test
public void doGetPattern_excludesNotEmpty() {
PluginsRiskConsentFilter consentFilter = new PluginsRiskConsentFilter(configuration, userSession);
UrlPattern urlPattern = consentFilter.doGetPattern();
assertThat(urlPattern.getExclusions()).isNotEmpty();
}
|
public static Getter newMethodGetter(Object object, Getter parent, Method method, String modifier) throws Exception {
return newGetter(object, parent, modifier, method.getReturnType(), method::invoke,
(t, et) -> new MethodGetter(parent, method, modifier, t, et));
}
|
@Test
public void newMethodGetter_whenExtractingFromNonEmpty_Array_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType()
throws Exception {
OuterObject object = new OuterObject("name", new InnerObject("inner", 0, 1, 2, 3));
Getter parentGetter = GetterFactory.newMethodGetter(object, null, innersArrayMethod, "[any]");
Getter innerObjectNameGetter = GetterFactory.newMethodGetter(object, parentGetter, innerAttributesArrayMethod, "[any]");
Class<?> returnType = innerObjectNameGetter.getReturnType();
assertEquals(Integer.class, returnType);
}
|
@Override
public boolean syncData(DistroData data, String targetServer) {
if (isNoExistTarget(targetServer)) {
return true;
}
DistroDataRequest request = new DistroDataRequest(data, data.getType());
Member member = memberManager.find(targetServer);
if (checkTargetServerStatusUnhealthy(member)) {
Loggers.DISTRO
.warn("[DISTRO] Cancel distro sync caused by target server {} unhealthy, key: {}", targetServer,
data.getDistroKey());
return false;
}
try {
Response response = clusterRpcClientProxy.sendRequest(member, request);
return checkResponse(response);
} catch (NacosException e) {
Loggers.DISTRO.error("[DISTRO-FAILED] Sync distro data failed! key: {}", data.getDistroKey(), e);
}
return false;
}
|
@Test
void testSyncDataFailure() throws NacosException {
when(memberManager.hasMember(member.getAddress())).thenReturn(true);
when(memberManager.find(member.getAddress())).thenReturn(member);
member.setState(NodeState.UP);
response.setErrorInfo(ResponseCode.FAIL.getCode(), "TEST");
when(clusterRpcClientProxy.isRunning(member)).thenReturn(true);
assertFalse(transportAgent.syncData(new DistroData(), member.getAddress()));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.