focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public void createGroupTombstoneRecords(
String groupId,
List<CoordinatorRecord> records
) {
// At this point, we have already validated the group id, so we know that the group exists and that no exception will be thrown.
createGroupTombstoneRecords(group(groupId), records);
}
|
@Test
public void testClassicGroupDelete() {
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.build();
context.createClassicGroup("group-id");
List<CoordinatorRecord> expectedRecords = Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id"));
List<CoordinatorRecord> records = new ArrayList<>();
context.groupMetadataManager.createGroupTombstoneRecords("group-id", records);
assertEquals(expectedRecords, records);
}
|
protected FileAppender<E> buildAppender(LoggerContext context) {
if (archive) {
final RollingFileAppender<E> appender = new RollingFileAppender<>();
appender.setContext(context);
appender.setFile(currentLogFilename);
appender.setBufferSize(new FileSize(bufferSize.toBytes()));
if (maxFileSize != null && !requireNonNull(archivedLogFilenamePattern).contains("%d")) {
final FixedWindowRollingPolicy rollingPolicy = new FixedWindowRollingPolicy();
rollingPolicy.setContext(context);
rollingPolicy.setMaxIndex(getArchivedFileCount());
rollingPolicy.setFileNamePattern(getArchivedLogFilenamePattern());
rollingPolicy.setParent(appender);
rollingPolicy.start();
appender.setRollingPolicy(rollingPolicy);
final SizeBasedTriggeringPolicy<E> triggeringPolicy = new SizeBasedTriggeringPolicy<>();
triggeringPolicy.setMaxFileSize(new FileSize(maxFileSize.toBytes()));
triggeringPolicy.setContext(context);
triggeringPolicy.start();
appender.setTriggeringPolicy(triggeringPolicy);
return appender;
} else {
final TimeBasedRollingPolicy<E> rollingPolicy;
if (maxFileSize == null) {
rollingPolicy = new TimeBasedRollingPolicy<>();
final TimeBasedFileNamingAndTriggeringPolicy<E> triggeringPolicy = new DefaultTimeBasedFileNamingAndTriggeringPolicy<>();
triggeringPolicy.setContext(context);
triggeringPolicy.setTimeBasedRollingPolicy(rollingPolicy);
appender.setTriggeringPolicy(triggeringPolicy);
} else {
// Creating a size and time policy does not need a separate triggering policy set
// on the appender because this policy registers the trigger policy
final SizeAndTimeBasedRollingPolicy<E> sizeAndTimeBasedRollingPolicy = new SizeAndTimeBasedRollingPolicy<>();
sizeAndTimeBasedRollingPolicy.setMaxFileSize(new FileSize(maxFileSize.toBytes()));
rollingPolicy = sizeAndTimeBasedRollingPolicy;
}
if (totalSizeCap != null) {
rollingPolicy.setTotalSizeCap(new FileSize(totalSizeCap.toBytes()));
}
rollingPolicy.setContext(context);
rollingPolicy.setFileNamePattern(archivedLogFilenamePattern);
rollingPolicy.setMaxHistory(archivedFileCount);
appender.setRollingPolicy(rollingPolicy);
rollingPolicy.setParent(appender);
rollingPolicy.start();
return appender;
}
}
final FileAppender<E> appender = new FileAppender<>();
appender.setContext(context);
appender.setFile(currentLogFilename);
appender.setBufferSize(new FileSize(bufferSize.toBytes()));
return appender;
}
|
@Test
void validSetTotalSizeCapNoMaxFileSize() throws IOException, ConfigurationException {
final YamlConfigurationFactory<FileAppenderFactory> factory =
new YamlConfigurationFactory<>(FileAppenderFactory.class, validator, mapper, "dw");
final FileAppender appender = factory.build(new ResourceConfigurationSourceProvider(), "yaml/appender_file_cap2.yaml")
.buildAppender(new LoggerContext());
assertThat(appender).isInstanceOfSatisfying(RollingFileAppender.class, roller -> assertThat(roller.getRollingPolicy())
.isInstanceOfSatisfying(TimeBasedRollingPolicy.class, policy -> assertThat(policy)
.satisfies(p -> assertThat(p)
.extracting("totalSizeCap")
.isInstanceOfSatisfying(FileSize.class, x ->
assertThat(x.getSize()).isEqualTo(DataSize.mebibytes(50).toBytes())))
.satisfies(p -> assertThat(p.getMaxHistory()).isEqualTo(5))));
}
|
public HttpResponse getLogs(ApplicationId applicationId, Optional<DomainName> hostname, Query apiParams) {
Exception exception = null;
for (var uri : getLogServerUris(applicationId, hostname)) {
try {
return logRetriever.getLogs(uri.withQuery(apiParams), activationTime(applicationId));
} catch (RuntimeException e) {
exception = e;
log.log(Level.INFO, e.getMessage());
}
}
return HttpErrorResponse.internalServerError(Exceptions.toMessageString(exception));
}
|
@Test
public void getLogs() throws IOException {
deployApp(testAppLogServerWithContainer);
HttpResponse response = applicationRepository.getLogs(applicationId(), Optional.empty(), Query.empty());
assertEquals(200, response.getStatus());
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
response.render(buffer);
assertEquals("log line", buffer.toString(UTF_8));
}
|
@Override
public ContainerReport getContainerReport(ContainerId containerId)
throws YarnException, IOException {
ApplicationReport appReport = getApplicationReport(
containerId.getApplicationAttemptId().getApplicationId());
TimelineEntity entity = readerClient.getContainerEntity(containerId,
"ALL", null);
return TimelineEntityV2Converter.convertToContainerReport(
entity, logServerUrl, appReport.getUser());
}
|
@Test
public void testGetContainerReport() throws IOException, YarnException {
final ApplicationId appId = ApplicationId.newInstance(0, 1);
final ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
final ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
when(spyTimelineReaderClient.getContainerEntity(containerId, "ALL", null))
.thenReturn(createContainerEntity(containerId));
when(spyTimelineReaderClient.getApplicationEntity(appId, "ALL", null))
.thenReturn(createApplicationTimelineEntity(appId, true, false));
ContainerReport report = client.getContainerReport(containerId);
assertThat(report.getContainerId()).isEqualTo(containerId);
assertThat(report.getAssignedNode().getHost()).isEqualTo("test host");
assertThat(report.getAssignedNode().getPort()).isEqualTo(100);
assertThat(report.getAllocatedResource().getVirtualCores()).isEqualTo(8);
assertThat(report.getCreationTime()).isEqualTo(123456);
assertThat(report.getLogUrl()).isEqualTo("https://localhost:8188/ahs/logs/"
+ "test host:100/container_0_0001_01_000001/"
+ "container_0_0001_01_000001/user1");
}
|
List<TaskDirectory> listAllTaskDirectories() {
return listTaskDirectories(pathname -> pathname.isDirectory() && TASK_DIR_PATH_NAME.matcher(pathname.getName()).matches());
}
|
@Test
public void shouldReturnEmptyArrayIfListFilesReturnsNull() throws IOException {
stateDir = new File(TestUtils.IO_TMP_DIR, "kafka-" + TestUtils.randomString(5));
directory = new StateDirectory(
new StreamsConfig(new Properties() {
{
put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
put(StreamsConfig.STATE_DIR_CONFIG, stateDir.getPath());
}
}),
time,
true,
false);
appDir = new File(stateDir, applicationId);
// make sure the File#listFiles returns null and StateDirectory#listAllTaskDirectories is able to handle null
Utils.delete(appDir);
Files.createFile(appDir.toPath());
assertTrue(Files.exists(appDir.toPath()));
assertNull(appDir.listFiles());
assertEquals(0, directory.listAllTaskDirectories().size());
}
|
public static <E> LinkedList<E> newLinkedList() {
return new LinkedList<>();
}
|
@Test
public void testItrLinkedLists() {
Set<String> set = new HashSet<>();
set.add("record1");
set.add("record2");
set.add("record3");
List<String> list = Lists.newLinkedList(set);
list.add("record4");
Assert.assertEquals(4, list.size());
}
|
public CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> acknowledge(
String memberId,
String groupId,
Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics
) {
log.trace("Acknowledge request for topicIdPartitions: {} with groupId: {}",
acknowledgeTopics.keySet(), groupId);
this.shareGroupMetrics.shareAcknowledgement();
Map<TopicIdPartition, CompletableFuture<Errors>> futures = new HashMap<>();
acknowledgeTopics.forEach((topicIdPartition, acknowledgePartitionBatches) -> {
SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey(groupId, topicIdPartition));
if (sharePartition != null) {
CompletableFuture<Errors> future = sharePartition.acknowledge(memberId, acknowledgePartitionBatches).thenApply(throwable -> {
if (throwable.isPresent()) {
return Errors.forException(throwable.get());
}
acknowledgePartitionBatches.forEach(batch -> {
batch.acknowledgeTypes().forEach(this.shareGroupMetrics::recordAcknowledgement);
});
return Errors.NONE;
});
futures.put(topicIdPartition, future);
} else {
futures.put(topicIdPartition, CompletableFuture.completedFuture(Errors.UNKNOWN_TOPIC_OR_PARTITION));
}
});
CompletableFuture<Void> allFutures = CompletableFuture.allOf(
futures.values().toArray(new CompletableFuture[0]));
return allFutures.thenApply(v -> {
Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = new HashMap<>();
futures.forEach((topicIdPartition, future) -> result.put(topicIdPartition, new ShareAcknowledgeResponseData.PartitionData()
.setPartitionIndex(topicIdPartition.partition())
.setErrorCode(future.join().code())));
return result;
});
}
|
@Test
public void testAcknowledgeIncorrectGroupId() {
String groupId = "grp";
String groupId2 = "grp2";
String memberId = Uuid.randomUuid().toString();
TopicIdPartition tp = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0));
SharePartition sp = mock(SharePartition.class);
Map<SharePartitionManager.SharePartitionKey, SharePartition> partitionCacheMap = new HashMap<>();
partitionCacheMap.put(new SharePartitionManager.SharePartitionKey(groupId, tp), sp);
SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder()
.withPartitionCacheMap(partitionCacheMap).build();
Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = new HashMap<>();
acknowledgeTopics.put(tp, Arrays.asList(
new ShareAcknowledgementBatch(12, 20, Collections.singletonList((byte) 1)),
new ShareAcknowledgementBatch(24, 56, Collections.singletonList((byte) 1))
));
CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture =
sharePartitionManager.acknowledge(memberId, groupId2, acknowledgeTopics);
Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join();
assertEquals(1, result.size());
assertTrue(result.containsKey(tp));
assertEquals(0, result.get(tp).partitionIndex());
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp).errorCode());
}
|
public static Throwable peel(@Nullable Throwable t) {
while ((t instanceof CompletionException
|| t instanceof ExecutionException
|| t instanceof InvocationTargetException)
&& t.getCause() != null
&& t.getCause() != t
) {
t = t.getCause();
}
return t;
}
|
@Test
public void when_throwableIsRuntimeException_then_peelReturnsOriginal() {
Throwable throwable = new RuntimeException("expected exception");
Throwable result = peel(throwable);
assertEquals(throwable, result);
}
|
public JmxCollector register() {
return register(PrometheusRegistry.defaultRegistry);
}
|
@Test
public void testValueCaptureGroup() throws Exception {
JmxCollector jc =
new JmxCollector(
"\n---\nrules:\n- pattern: `^hadoop<.+-500(10)>`\n name: foo\n value: $1"
.replace('`', '"'))
.register(prometheusRegistry);
assertEquals(10.0, getSampleValue("foo", new String[] {}, new String[] {}), .001);
}
|
@Override
public boolean isCancelled() {
return original.isCancelled() || peel().isCancelled();
}
|
@Test
public void isCancelled() throws Exception {
ScheduledFuture<Object> outer = createScheduledFutureMock();
ScheduledFuture<Object> inner = createScheduledFutureMock();
when(outer.get()).thenReturn(inner);
when(outer.isCancelled()).thenReturn(false);
when(inner.isCancelled()).thenReturn(false);
assertFalse(new DelegatingScheduledFutureStripper<Object>(outer).isCancelled());
when(outer.isCancelled()).thenReturn(true);
when(inner.isCancelled()).thenReturn(false);
assertTrue(new DelegatingScheduledFutureStripper<Object>(outer).isCancelled());
when(outer.isCancelled()).thenReturn(false);
when(inner.isCancelled()).thenReturn(true);
assertTrue(new DelegatingScheduledFutureStripper<Object>(outer).isCancelled());
}
|
public static byte[] readBytes(ByteBuffer buffer) {
return readBytes(buffer, 0, buffer.remaining());
}
|
@Test
public void readBytesFromArrayBackedByteBuffer() {
final byte[] bytes = "FOOBAR".getBytes(StandardCharsets.US_ASCII);
final ByteBuffer buffer1 = ByteBuffer.wrap(bytes);
final ByteBuffer buffer2 = ByteBuffer.wrap(bytes);
final byte[] readBytesComplete = ByteBufferUtils.readBytes(buffer1);
final byte[] readBytesPartial = ByteBufferUtils.readBytes(buffer2, 0, 3);
assertThat(readBytesComplete).isEqualTo(bytes);
assertThat(readBytesPartial).isEqualTo(Arrays.copyOf(bytes, 3));
}
|
@Override
public void processElement2(StreamRecord<IN2> element) throws Exception {
collector.setTimestamp(element);
rwContext.setElement(element);
userFunction.processBroadcastElement(element.getValue(), rwContext, collector);
rwContext.setElement(null);
}
|
@Test
void testNoKeyedStateOnBroadcastSide() throws Exception {
final ValueStateDescriptor<String> valueState =
new ValueStateDescriptor<>("any", BasicTypeInfo.STRING_TYPE_INFO);
try (TwoInputStreamOperatorTestHarness<String, Integer, String> testHarness =
getInitializedTestHarness(
new BroadcastProcessFunction<String, Integer, String>() {
private static final long serialVersionUID = -1725365436500098384L;
@Override
public void processBroadcastElement(
Integer value, Context ctx, Collector<String> out)
throws Exception {
assertThatThrownBy(
() ->
getRuntimeContext()
.getState(valueState)
.value())
.isInstanceOf(NullPointerException.class)
.hasMessage(
String.format(
"Keyed state '%s' with type %s can only be used on a 'keyed stream', i.e., after a 'keyBy()' operation.",
valueState.getName(),
valueState.getType()));
}
@Override
public void processElement(
String value, ReadOnlyContext ctx, Collector<String> out)
throws Exception {
// do nothing
}
})) {
testHarness.processWatermark1(new Watermark(10L));
testHarness.processWatermark2(new Watermark(10L));
testHarness.processElement2(new StreamRecord<>(5, 12L));
}
}
|
public synchronized void useBundles(List<FileReference> newFileReferences) {
if (! readyForNewBundles)
throw new IllegalStateException("Bundles must be committed or reverted before using new bundles.");
obsoleteBundles = removeObsoleteReferences(newFileReferences);
osgi.allowDuplicateBundles(obsoleteBundles.values());
bundlesFromNewGeneration = installBundles(newFileReferences);
BundleStarter.startBundles(activeBundles.values());
if (obsoleteBundles.size() > 0 || newFileReferences.size() > 0) log.info(installedBundlesMessage());
readyForNewBundles = false;
}
|
@Test
void generation_must_be_marked_complete_before_using_new_bundles() {
bundleLoader.useBundles(List.of(BUNDLE_1_REF));
assertThrows(IllegalStateException.class,
() -> bundleLoader.useBundles(List.of(BUNDLE_1_REF)));
}
|
@Override
public Object getWrappedValue() {
return predicate;
}
|
@Test
public void requireThatWrappedValueIsPredicate() {
Predicate predicate = SimplePredicates.newPredicate();
PredicateFieldValue value = new PredicateFieldValue(predicate);
assertSame(predicate, value.getWrappedValue());
}
|
public static SchemaAndValue parseString(String value) {
if (value == null) {
return NULL_SCHEMA_AND_VALUE;
}
if (value.isEmpty()) {
return new SchemaAndValue(Schema.STRING_SCHEMA, value);
}
ValueParser parser = new ValueParser(new Parser(value));
return parser.parse(false);
}
|
@Test
public void shouldParseTimeStringAsTimeInMap() throws Exception {
String keyStr = "k1";
String timeStr = "14:34:54.346Z";
String mapStr = "{\"" + keyStr + "\":" + timeStr + "}";
SchemaAndValue result = Values.parseString(mapStr);
assertEquals(Type.MAP, result.schema().type());
Schema keySchema = result.schema().keySchema();
Schema valueSchema = result.schema().valueSchema();
assertEquals(Type.STRING, keySchema.type());
assertEquals(Type.INT32, valueSchema.type());
assertEquals(Time.LOGICAL_NAME, valueSchema.name());
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr);
assertEquals(Collections.singletonMap(keyStr, expected), result.value());
}
|
@Override
public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) {
return getSqlRecordIteratorBatch(value, descending, null);
}
|
@Test
public void getRecordsWithCursorUsingExactValueAscending() {
var expectedOrder = List.of(1, 4, 7);
performCursorTest(expectedOrder, cursor -> store.getSqlRecordIteratorBatch(1, false, cursor));
}
|
public static List<FieldSchema> convert(Schema schema) {
return schema.columns().stream()
.map(col -> new FieldSchema(col.name(), convertToTypeString(col.type()), col.doc()))
.collect(Collectors.toList());
}
|
@Test
public void testConversionWithoutLastComment() {
Schema expected =
new Schema(
optional(0, "customer_id", Types.LongType.get(), "customer comment"),
optional(1, "first_name", Types.StringType.get(), null));
Schema schema =
HiveSchemaUtil.convert(
Arrays.asList("customer_id", "first_name"),
Arrays.asList(
TypeInfoUtils.getTypeInfoFromTypeString(serdeConstants.BIGINT_TYPE_NAME),
TypeInfoUtils.getTypeInfoFromTypeString(serdeConstants.STRING_TYPE_NAME)),
Collections.singletonList("customer comment"));
assertThat(schema.asStruct()).isEqualTo(expected.asStruct());
}
|
public static Builder builder(String bucket, String testClassName, Credentials credentials) {
checkArgument(!bucket.equals(""));
checkArgument(!testClassName.equals(""));
return new Builder(bucket, testClassName, credentials);
}
|
@Test
public void testBuilderWithEmptyBucket() {
assertThrows(
IllegalArgumentException.class,
() -> GcsResourceManager.builder("", TEST_CLASS, null).build());
}
|
@Override
public boolean isEmpty() {
return false;
}
|
@Test
public void testIsEmpty() {
IntSet rs = new SingletonIntSet(3);
assertFalse(rs.isEmpty());
}
|
public JSONObject set(String key, Object value) throws JSONException {
return set(key, value, null, false);
}
|
@Test
public void toBeanTest() {
final JSONObject subJson = JSONUtil.createObj().set("value1", "strValue1").set("value2", "234");
final JSONObject json = JSONUtil.createObj().set("strValue", "strTest").set("intValue", 123)
// 测试空字符串转对象
.set("doubleValue", "")
.set("beanValue", subJson)
.set("list", JSONUtil.createArray().set("a").set("b")).set("testEnum", "TYPE_A");
final TestBean bean = json.toBean(TestBean.class);
assertEquals("a", bean.getList().get(0));
assertEquals("b", bean.getList().get(1));
assertEquals("strValue1", bean.getBeanValue().getValue1());
// BigDecimal转换检查
assertEquals(new BigDecimal("234"), bean.getBeanValue().getValue2());
// 枚举转换检查
assertEquals(TestEnum.TYPE_A, bean.getTestEnum());
}
|
public void project() {
srcPotentialIndex = 0;
trgPotentialIndex = 0;
recurse(0, 0);
BayesAbsorption.normalize(trgPotentials);
}
|
@Test
public void testProjection1() {
// Projects from node1 into sep. A and B are in node1. A and B are in the sep.
// this is a straight forward projection
BayesVariable a = new BayesVariable<String>( "A", 0, new String[] {"A1", "A2"}, null);
BayesVariable b = new BayesVariable<String>( "B", 1, new String[] {"B1", "B2"}, null);
Graph<BayesVariable> graph = new BayesNetwork();
GraphNode x0 = addNode(graph);
GraphNode x1 = addNode(graph);
x0.setContent( a );
x1.setContent( b );
JunctionTreeClique node1 = new JunctionTreeClique(0, graph, bitSet("0011") );
JunctionTreeClique node2 = new JunctionTreeClique(1, graph, bitSet("0011") );
SeparatorState sep = new JunctionTreeSeparator(0, node1, node2, bitSet("0011"), graph).createState();
double v = 0.1;
for ( int i = 0; i < node1.getPotentials().length; i++ ) {
node1.getPotentials()[i] = v;
v += + 0.1;
}
BayesVariable[] vars = new BayesVariable[] {a, b};
BayesVariable[] sepVars = new BayesVariable[] { a, b };
int[] sepVarPos = PotentialMultiplier.createSubsetVarPos(vars, sepVars);
int sepVarNumberOfStates = PotentialMultiplier.createNumberOfStates(sepVars);
int[] sepVarMultipliers = PotentialMultiplier.createIndexMultipliers(sepVars, sepVarNumberOfStates);
double[] projectedSepPotentials = new double[ sep.getPotentials().length];
BayesProjection p = new BayesProjection(vars, node1.getPotentials(), sepVarPos, sepVarMultipliers, projectedSepPotentials);
p.project();
assertArray(new double[]{0.1, 0.2, 0.3, 0.4}, scaleDouble(3, projectedSepPotentials));
}
|
public static boolean acceptEndpoint(String endpointUrl) {
return endpointUrl != null && endpointUrl.matches(ENDPOINT_PATTERN_STRING);
}
|
@Test
public void testAcceptEndpoint() {
assertTrue(AMQPMessageConsumptionTask.acceptEndpoint("amqp://localhost/q/testQueue"));
assertTrue(AMQPMessageConsumptionTask.acceptEndpoint("amqp://localhost/vHost/q/testQueue"));
assertTrue(AMQPMessageConsumptionTask.acceptEndpoint("amqp://localhost:5671/q/testQueue"));
assertTrue(AMQPMessageConsumptionTask.acceptEndpoint("amqp://localhost:5671/vHost/q/testQueue"));
assertTrue(AMQPMessageConsumptionTask.acceptEndpoint("amqp://localhost:5671/q/testQueue/with/path/elements"));
assertTrue(
AMQPMessageConsumptionTask.acceptEndpoint("amqp://localhost:5671/vHost/q/testQueue/with/path/elements"));
assertTrue(AMQPMessageConsumptionTask.acceptEndpoint("amqp://localhost:5671/f/testExchange?durable=true"));
assertTrue(AMQPMessageConsumptionTask
.acceptEndpoint("amqp://localhost:5671/t/testExchange?durable=true&routingKey=samples.*"));
assertTrue(AMQPMessageConsumptionTask
.acceptEndpoint("amqp://localhost:5671/vHost/t/testExchange?durable=true&routingKey=samples.*"));
}
|
public String getName() {
return name;
}
|
@Test
void testConstructor() {
Method method = new Method("bar");
assertNotNull(method);
assertEquals("bar", method.getName());
}
|
public static Expression rewrite(Expression expression, VariableAllocator variableAllocator)
{
return ExpressionTreeRewriter.rewriteWith(new Visitor(variableAllocator), expression, new Context());
}
|
@Test
public void testRewriteBasicLambda()
{
final List<VariableReferenceExpression> variables = ImmutableList.of(new VariableReferenceExpression(Optional.empty(), "a", BIGINT), new VariableReferenceExpression(Optional.empty(), "x", BIGINT));
final VariableAllocator allocator = new VariableAllocator(variables);
assertEquals(rewrite(expression("x -> a + x"), allocator),
new BindExpression(
ImmutableList.of(expression("a")),
new LambdaExpression(
Stream.of("a_0", "x")
.map(Identifier::new)
.map(LambdaArgumentDeclaration::new)
.collect(toList()),
expression("a_0 + x"))));
}
|
public ProviderBuilder port(Integer port) {
this.port = port;
return getThis();
}
|
@Test
void port() {
ProviderBuilder builder = ProviderBuilder.newBuilder();
builder.port(8080);
Assertions.assertEquals(8080, builder.build().getPort());
}
|
public void validateAndMergeOutputParams(StepRuntimeSummary runtimeSummary) {
Optional<String> externalJobId = extractExternalJobId(runtimeSummary);
if (externalJobId.isPresent()) {
Optional<OutputData> outputDataOpt =
outputDataDao.getOutputDataForExternalJob(externalJobId.get(), ExternalJobType.TITUS);
outputDataOpt.ifPresent(
outputData -> {
ParamsMergeHelper.mergeOutputDataParams(
runtimeSummary.getParams(), outputData.getParams());
});
}
}
|
@Test
public void testMissingOutputParams() {
runtimeSummary = runtimeSummaryBuilder().artifacts(artifacts).build();
outputDataManager.validateAndMergeOutputParams(runtimeSummary);
assertTrue(runtimeSummary.getParams().isEmpty());
}
|
public Optional<Snapshot> getSnapshotAndReset() {
if (!dirty.getAndSet(false)) {
return Optional.empty();
}
ImmutableLongArray.Builder bucketsSnapshotBuilder =
ImmutableLongArray.builder(buckets.length());
for (int i = 0; i < buckets.length(); i++) {
bucketsSnapshotBuilder.add(buckets.getAndSet(i, 0));
}
OutlierStatistic overflowSnapshot = overflowStatistic.getAndSet(OutlierStatistic.EMPTY);
OutlierStatistic underflowSnapshot = underflowStatistic.getAndSet(OutlierStatistic.EMPTY);
return Optional.of(
Snapshot.create(
underflowSnapshot, overflowSnapshot, bucketsSnapshotBuilder.build(), bucketType));
}
|
@Test
public void testUpdateAndSnapshots_MultipleThreads() {
int numRunnables = 200;
ExecutorService executor = Executors.newFixedThreadPool(numRunnables);
HistogramData.BucketType bucketType = HistogramData.ExponentialBuckets.of(1, 10);
LockFreeHistogram histogram =
new LockFreeHistogram(MetricName.named("name", "namespace"), bucketType);
List<UpdateHistogramCallable> callables = new ArrayList<>();
for (int i = 0; i < numRunnables; i++) {
callables.add(new UpdateHistogramCallable(histogram, i));
}
long totalValuesRecorded = 0;
try {
List<Future<Long>> futures = executor.invokeAll(callables);
for (Future<Long> future : futures) {
totalValuesRecorded += future.get();
}
} catch (Exception e) {
return;
}
Optional<LockFreeHistogram.Snapshot> finalSnapshot = histogram.getSnapshotAndReset();
if (finalSnapshot.isPresent()) {
totalValuesRecorded += finalSnapshot.get().totalCount();
}
assertThat(
totalValuesRecorded, equalTo(numRunnables * UpdateHistogramCallable.numValuesRecorded()));
}
|
@Override
public void updateNetworkPolicy(NetworkPolicy networkPolicy) {
checkNotNull(networkPolicy, ERR_NULL_NETWORK_POLICY);
checkArgument(!Strings.isNullOrEmpty(networkPolicy.getMetadata().getUid()),
ERR_NULL_NETWORK_POLICY_UID);
k8sNetworkPolicyStore.updateNetworkPolicy(networkPolicy);
log.info(String.format(MSG_NETWORK_POLICY,
networkPolicy.getMetadata().getName(), MSG_UPDATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testUpdateUnregisteredNetworkPolicy() {
target.updateNetworkPolicy(NETWORK_POLICY);
}
|
@VisibleForTesting
public static JobGraph createJobGraph(StreamGraph streamGraph) {
return new StreamingJobGraphGenerator(
Thread.currentThread().getContextClassLoader(),
streamGraph,
null,
Runnable::run)
.createJobGraph();
}
|
@Test
void testSinkSupportConcurrentExecutionAttempts() {
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(new Configuration());
env.setRuntimeMode(RuntimeExecutionMode.BATCH);
final DataStream<Integer> source = env.fromData(1, 2, 3).name("source");
source.rebalance()
.sinkTo(new TestSinkWithSupportsConcurrentExecutionAttempts())
.name("sink");
final StreamGraph streamGraph = env.getStreamGraph();
final JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
assertThat(jobGraph.getNumberOfVertices()).isEqualTo(6);
for (JobVertex jobVertex : jobGraph.getVertices()) {
if (jobVertex.getName().contains("source")) {
assertThat(jobVertex.isSupportsConcurrentExecutionAttempts()).isTrue();
} else if (jobVertex.getName().contains("pre-writer")) {
assertThat(jobVertex.isSupportsConcurrentExecutionAttempts()).isTrue();
} else if (jobVertex.getName().contains("Writer")) {
assertThat(jobVertex.isSupportsConcurrentExecutionAttempts()).isTrue();
} else if (jobVertex.getName().contains("pre-committer")) {
assertThat(jobVertex.isSupportsConcurrentExecutionAttempts()).isFalse();
} else if (jobVertex.getName().contains("post-committer")) {
assertThat(jobVertex.isSupportsConcurrentExecutionAttempts()).isFalse();
} else if (jobVertex.getName().contains("Committer")) {
assertThat(jobVertex.isSupportsConcurrentExecutionAttempts()).isFalse();
} else {
Assertions.fail("Unexpected job vertex " + jobVertex.getName());
}
}
}
|
@Override
public Path copy(final Path file, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
try {
final EueApiClient client = new EueApiClient(session);
if(status.isExists()) {
if(log.isWarnEnabled()) {
log.warn(String.format("Trash file %s to be replaced with %s", target, file));
}
new EueTrashFeature(session, fileid).delete(Collections.singletonMap(target, status), callback, new Delete.DisabledCallback());
}
final String resourceId = fileid.getFileId(file);
final String parentResourceId = fileid.getFileId(target.getParent());
String targetResourceId = null;
final ResourceCopyResponseEntries resourceCopyResponseEntries;
switch(parentResourceId) {
case EueResourceIdProvider.ROOT:
case EueResourceIdProvider.TRASH:
resourceCopyResponseEntries = new CopyChildrenForAliasApiApi(client)
.resourceAliasAliasChildrenCopyPost(parentResourceId,
Collections.singletonList(String.format("%s/resource/%s",
session.getBasePath(), resourceId)), null, null, null,
"rename", null);
break;
default:
resourceCopyResponseEntries = new CopyChildrenApi(client).resourceResourceIdChildrenCopyPost(parentResourceId,
Collections.singletonList(String.format("%s/resource/%s", session.getBasePath(), resourceId)), null, null, null,
"rename", null);
}
if(null == resourceCopyResponseEntries) {
// Copy of single file will return 200 status code with empty response body
}
else {
for(ResourceCopyResponseEntry resourceCopyResponseEntry : resourceCopyResponseEntries.values()) {
switch(resourceCopyResponseEntry.getStatusCode()) {
case HttpStatus.SC_CREATED:
fileid.cache(target, EueResourceIdProvider.getResourceIdFromResourceUri(resourceCopyResponseEntry.getHeaders().getLocation()));
break;
default:
log.warn(String.format("Failure %s copying file %s", resourceCopyResponseEntries, file));
throw new EueExceptionMappingService().map(new ApiException(resourceCopyResponseEntry.getReason(),
null, resourceCopyResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
}
}
listener.sent(status.getLength());
if(!StringUtils.equals(file.getName(), target.getName())) {
final ResourceUpdateModel resourceUpdateModel = new ResourceUpdateModel();
final ResourceUpdateModelUpdate resourceUpdateModelUpdate = new ResourceUpdateModelUpdate();
final Uifs uifs = new Uifs();
uifs.setName(target.getName());
resourceUpdateModelUpdate.setUifs(uifs);
resourceUpdateModel.setUpdate(resourceUpdateModelUpdate);
final ResourceMoveResponseEntries resourceMoveResponseEntries = new UpdateResourceApi(client).resourceResourceIdPatch(fileid.getFileId(target),
resourceUpdateModel, null, null, null);
if(null == resourceMoveResponseEntries) {
// Move of single file will return 200 status code with empty response body
}
else {
for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) {
switch(resourceMoveResponseEntry.getStatusCode()) {
case HttpStatus.SC_CREATED:
break;
default:
log.warn(String.format("Failure %s renaming file %s", resourceMoveResponseEntry, file));
throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(),
null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
}
}
}
return target;
}
catch(ApiException e) {
throw new EueExceptionMappingService().map("Cannot copy {0}", e, file);
}
}
|
@Test
public void testCopyFile() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final Path sourceFolder = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory));
final Path sourceFile = new Path(sourceFolder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new EueDirectoryFeature(session, fileid).mkdir(sourceFolder, new TransferStatus());
createFile(fileid, sourceFile, RandomUtils.nextBytes(1023));
assertTrue(new EueFindFeature(session, fileid).find(sourceFile));
final Path targetFolder = new EueDirectoryFeature(session, fileid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus());
final Path targetFile = new Path(targetFolder, sourceFile.getName(), EnumSet.of(AbstractPath.Type.file));
final Path copy = new EueCopyFeature(session, fileid).copy(sourceFile, targetFile, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener());
assertTrue(new EueFindFeature(session, fileid).find(sourceFile));
assertTrue(new DefaultFindFeature(session).find(sourceFile));
assertTrue(new EueFindFeature(session, fileid).find(targetFile));
assertTrue(new DefaultFindFeature(session).find(targetFile));
assertEquals(new EueAttributesFinderFeature(session, fileid).find(sourceFile).getSize(),
new EueAttributesFinderFeature(session, fileid).find(targetFile).getSize());
assertNotEquals(new EueAttributesFinderFeature(session, fileid).find(sourceFile).getETag(),
new EueAttributesFinderFeature(session, fileid).find(targetFile).getETag());
assertNotEquals(new EueAttributesFinderFeature(session, fileid).find(sourceFile).getFileId(),
new EueAttributesFinderFeature(session, fileid).find(targetFile).getFileId());
new EueDeleteFeature(session, fileid).delete(Arrays.asList(sourceFolder, targetFolder), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Nullable static String getHeaderIfString(Message message, String name) {
MessageProperties properties = message.getMessageProperties();
if (properties == null) return null;
Object o = properties.getHeader(name);
if (o instanceof String) return o.toString();
return null;
}
|
@Test void getHeaderIfString_null() {
assertThat(MessageHeaders.getHeaderIfString(message, "b3")).isNull();
}
|
public static <T> T retry(Callable<T> callable, int retries) {
return retry(callable, retries, Collections.emptyList());
}
|
@Test(expected = RuntimeException.class)
public void retryRetriesFailed()
throws Exception {
// given
given(callable.call()).willThrow(new RuntimeException()).willThrow(new RuntimeException()).willReturn(RESULT);
// when
RetryUtils.retry(callable, RETRIES);
// then
// throws exception
}
|
@Override
public Object read(final MySQLPacketPayload payload, final boolean unsigned) throws SQLException {
int length = payload.readInt1();
switch (length) {
case 0:
throw new SQLFeatureNotSupportedException("Can not support date format if year, month, day is absent.");
case 4:
return getTimestampForDate(payload);
case 7:
return getTimestampForDatetime(payload);
case 11:
Timestamp result = getTimestampForDatetime(payload);
result.setNanos(payload.readInt4() * 1000);
return result;
default:
throw new SQLFeatureNotSupportedException(String.format("Wrong length `%d` of MYSQL_TYPE_TIME", length));
}
}
|
@Test
void assertReadWithFourBytes() throws SQLException {
when(payload.readInt1()).thenReturn(4, 12, 31);
when(payload.readInt2()).thenReturn(2018);
LocalDateTime actual = LocalDateTime.ofInstant(Instant.ofEpochMilli(((Timestamp) new MySQLDateBinaryProtocolValue().read(payload, false)).getTime()), ZoneId.systemDefault());
assertThat(actual.getYear(), is(2018));
assertThat(actual.getMonthValue(), is(12));
assertThat(actual.getDayOfMonth(), is(31));
}
|
@Override
public Transfer withCache(final Cache<Path> cache) {
this.cache = cache;
return this;
}
|
@Test
public void testCacheResume() throws Exception {
final AtomicInteger c1 = new AtomicInteger();
final AtomicInteger c2 = new AtomicInteger();
final NullLocal local = new NullLocal("t") {
@Override
public AttributedList<Local> list() {
AttributedList<Local> l = new AttributedList<>();
l.add(new NullLocal(this.getAbsolute(), "a") {
@Override
public boolean exists() {
return true;
}
});
l.add(new NullLocal(this.getAbsolute(), "b") {
@Override
public boolean exists() {
return true;
}
});
l.add(new NullLocal(this.getAbsolute(), "c") {
@Override
public boolean exists() {
return true;
}
});
return l;
}
@Override
public boolean isFile() {
return false;
}
@Override
public boolean isDirectory() {
return true;
}
@Override
public boolean exists() {
return true;
}
};
final Path root = new Path("/t", EnumSet.of(Path.Type.directory));
final NullSession session = new NullSession(new Host(new TestProtocol())) {
@Override
public AttributedList<Path> list(final Path folder, final ListProgressListener listener) throws ConnectionCanceledException {
final AttributedList<Path> list = new AttributedList<>();
if(folder.equals(root.getParent())) {
c1.incrementAndGet();
list.add(root);
}
if(folder.equals(root)) {
c2.incrementAndGet();
}
listener.chunk(folder, list);
return list;
}
};
final PathCache cache = new PathCache(2);
final Transfer t = new UploadTransfer(new Host(new TestProtocol()), root, local) {
@Override
public void transfer(final Session<?> source, final Session<?> destination, final Path file, Local local,
final TransferOptions options, final TransferStatus overall, final TransferStatus segment,
final ConnectionCallback connectionCallback,
final ProgressListener listener, final StreamListener streamListener) {
assertTrue(options.resumeRequested);
}
}.withCache(cache);
final TransferOptions options = new TransferOptions();
options.resumeRequested = true;
new SingleTransferWorker(session, null, t, options, new TransferSpeedometer(t), new DisabledTransferPrompt() {
@Override
public TransferAction prompt(final TransferItem file) {
fail();
return null;
}
}, new DisabledTransferErrorCallback(),
new DisabledProgressListener(), new DisabledStreamListener(), new DisabledLoginCallback(), new DisabledNotificationService()).run(session);
assertEquals(1, c1.get());
assertEquals(1, c2.get());
assertTrue(cache.isCached(root.getParent()));
assertTrue(cache.isCached(root));
}
|
@VisibleForTesting
static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options) {
return createStreamExecutionEnvironment(
options,
MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()),
options.getFlinkConfDir());
}
|
@Test
public void shouldFailOnNoStoragePathProvided() {
FlinkPipelineOptions options = getDefaultPipelineOptions();
options.setStreaming(true);
options.setStateBackend("unknown");
assertThrows(
"State backend was set to 'unknown' but no storage path was provided.",
IllegalArgumentException.class,
() -> FlinkExecutionEnvironments.createStreamExecutionEnvironment(options));
}
|
@Override
protected void rename(
List<HadoopResourceId> srcResourceIds,
List<HadoopResourceId> destResourceIds,
MoveOptions... moveOptions)
throws IOException {
if (moveOptions.length > 0) {
throw new UnsupportedOperationException("Support for move options is not yet implemented.");
}
for (int i = 0; i < srcResourceIds.size(); ++i) {
final Path srcPath = srcResourceIds.get(i).toPath();
final Path destPath = destResourceIds.get(i).toPath();
// this enforces src and dest file systems to match
final org.apache.hadoop.fs.FileSystem fs = srcPath.getFileSystem(configuration);
// rename in HDFS requires the target directory to exist or silently fails (BEAM-4861)
mkdirs(destPath);
boolean success = fs.rename(srcPath, destPath);
// If the failure was due to the file already existing, delete and retry (BEAM-5036).
// This should be the exceptional case, so handle here rather than incur the overhead of
// testing first
if (!success && fs.exists(srcPath) && fs.exists(destPath)) {
LOG.debug(LOG_DELETING_EXISTING_FILE, Path.getPathWithoutSchemeAndAuthority(destPath));
fs.delete(destPath, false); // not recursive
success = fs.rename(srcPath, destPath);
}
if (!success) {
if (!fs.exists(srcPath)) {
throw new FileNotFoundException(
String.format(
"Unable to rename resource %s to %s as source not found.", srcPath, destPath));
} else if (fs.exists(destPath)) {
throw new FileAlreadyExistsException(
String.format(
"Unable to rename resource %s to %s as destination already exists and couldn't be deleted.",
srcPath, destPath));
} else {
throw new IOException(
String.format(
"Unable to rename resource %s to %s. No further information provided by underlying filesystem.",
srcPath, destPath));
}
}
}
}
|
@Test(expected = FileNotFoundException.class)
public void testRenameMissingSource() throws Exception {
fileSystem.rename(
ImmutableList.of(testPath("missingFile")), ImmutableList.of(testPath("testFileA")));
}
|
static String encodeTokenValue(String value) throws URISyntaxException {
return URISupport.createQueryString(Collections.singletonMap("x", value)).substring(2)
.replace("+", "%2B") // sig is base64
.replace("%3A", ":"); // se has time separator
}
|
@Test
void encodeTokenValueShouldPreserveTimeSeparator() throws Exception {
// e.g. for the se param on SAS token the encoding style must preserve ':'
assertEquals("11:55:01", FilesURIStrings.encodeTokenValue("11:55:01"));
}
|
@Override
public void define(WebService.NewController context) {
WebService.NewAction action = context
.createAction("list")
.setSince("4.2")
.setDescription("List web services")
.setResponseExample(getClass().getResource("list-example.json"))
.setHandler(this);
action
.createParam("include_internals")
.setDescription("Include web services that are implemented for internal use only. Their forward-compatibility is not assured")
.setBooleanPossibleValues()
.setDefaultValue("false");
}
|
@Test
public void list() {
new MetricWs().define(context);
String response = newRequest().execute().getInput();
assertJson(response).withStrictArrayOrder().isSimilarTo(getClass().getResource("list-example.json"));
}
|
public final void setStrictness(Strictness strictness) {
Objects.requireNonNull(strictness);
this.strictness = strictness;
}
|
@Test
public void testEscapedNewlineNotAllowedInStrictMode() {
String json = "\"\\\n\"";
JsonReader reader = new JsonReader(reader(json));
reader.setStrictness(Strictness.STRICT);
IOException expected = assertThrows(IOException.class, reader::nextString);
assertThat(expected)
.hasMessageThat()
.startsWith("Cannot escape a newline character in strict mode");
}
|
@Override
public Optional<ShardingConditionValue> generate(final BinaryOperationExpression predicate, final Column column, final List<Object> params, final TimestampServiceRule timestampServiceRule) {
String operator = predicate.getOperator().toUpperCase();
if (!isSupportedOperator(operator)) {
return Optional.empty();
}
ExpressionSegment valueExpression = predicate.getLeft() instanceof ColumnSegment ? predicate.getRight() : predicate.getLeft();
ConditionValue conditionValue = new ConditionValue(valueExpression, params);
if (conditionValue.isNull()) {
return generate(null, column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
Optional<Comparable<?>> value = conditionValue.getValue();
if (value.isPresent()) {
return generate(value.get(), column, operator, conditionValue.getParameterMarkerIndex().orElse(-1));
}
if (ExpressionConditionUtils.isNowExpression(valueExpression)) {
return generate(timestampServiceRule.getTimestamp(), column, operator, -1);
}
return Optional.empty();
}
|
@SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValueWithAtLeastOperator() {
BinaryOperationExpression rightValue = new BinaryOperationExpression(0, 0, mock(ColumnSegment.class), new LiteralExpressionSegment(0, 0, 1), ">=", null);
Optional<ShardingConditionValue> shardingConditionValue = generator.generate(rightValue, column, new LinkedList<>(), mock(TimestampServiceRule.class));
assertTrue(shardingConditionValue.isPresent());
assertTrue(Range.atLeast(1).encloses(((RangeShardingConditionValue<Integer>) shardingConditionValue.get()).getValueRange()));
assertTrue(shardingConditionValue.get().getParameterMarkerIndexes().isEmpty());
}
|
@Override
public void start() {
Optional<String> passcodeOpt = configuration.get(WEB_SYSTEM_PASS_CODE.getKey())
// if present, result is never empty string
.map(StringUtils::trimToNull);
if (passcodeOpt.isPresent()) {
logState("enabled");
configuredPasscode = passcodeOpt.get();
} else {
logState("disabled");
configuredPasscode = null;
}
}
|
@Test
public void startup_logs_show_that_feature_is_disabled() {
underTest.start();
assertThat(logTester.logs(Level.INFO)).contains("System authentication by passcode is disabled");
}
|
@Override
public StreamsMaterializedTable nonWindowed() {
if (windowInfo.isPresent()) {
throw new UnsupportedOperationException("Table has windowed key");
}
return new KsMaterializedTable(stateStore);
}
|
@Test(expected = UnsupportedOperationException.class)
public void shouldThrowOnNonWindowedIfWindowed() {
// Given:
givenWindowType(Optional.of(WindowType.SESSION));
// When:
materialization.nonWindowed();
}
|
@Override
@DSTransactional // 多数据源,使用 @DSTransactional 保证本地事务,以及数据源的切换
public void updateTenant(TenantSaveReqVO updateReqVO) {
// 校验存在
TenantDO tenant = validateUpdateTenant(updateReqVO.getId());
// 校验租户名称是否重复
validTenantNameDuplicate(updateReqVO.getName(), updateReqVO.getId());
// 校验租户域名是否重复
validTenantWebsiteDuplicate(updateReqVO.getWebsite(), updateReqVO.getId());
// 校验套餐被禁用
TenantPackageDO tenantPackage = tenantPackageService.validTenantPackage(updateReqVO.getPackageId());
// 更新租户
TenantDO updateObj = BeanUtils.toBean(updateReqVO, TenantDO.class);
tenantMapper.updateById(updateObj);
// 如果套餐发生变化,则修改其角色的权限
if (ObjectUtil.notEqual(tenant.getPackageId(), updateReqVO.getPackageId())) {
updateTenantRoleMenu(tenant.getId(), tenantPackage.getMenuIds());
}
}
|
@Test
public void testUpdateTenant_notExists() {
// 准备参数
TenantSaveReqVO reqVO = randomPojo(TenantSaveReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> tenantService.updateTenant(reqVO), TENANT_NOT_EXISTS);
}
|
@Override
public Data getValueData() {
if (valueData == null) {
valueData = serializationService.toData(valueObject);
}
return valueData;
}
|
@Override
@Test
public void getValueData_caching() {
QueryableEntry entry = createEntry("key", "value");
assertSame(entry.getValueData(), entry.getValueData());
}
|
@Override
public URL getResource(String name) {
ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name);
log.trace("Received request to load resource '{}'", name);
for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) {
URL url = null;
switch (classLoadingSource) {
case APPLICATION:
url = super.getResource(name);
break;
case PLUGIN:
url = findResource(name);
break;
case DEPENDENCIES:
url = findResourceFromDependencies(name);
break;
}
if (url != null) {
log.trace("Found resource '{}' in {} classpath", name, classLoadingSource);
return url;
} else {
log.trace("Couldn't find resource '{}' in {}", name, classLoadingSource);
}
}
return null;
}
|
@Test
void parentFirstGetResourceExistsOnlyInPlugin() throws IOException, URISyntaxException {
URL resource = parentFirstPluginClassLoader.getResource("META-INF/plugin-file");
assertFirstLine("plugin", resource);
}
|
public void setPolicy(String policyName, NamespaceIsolationData policyData) {
policyData.validate();
policies.put(policyName, (NamespaceIsolationDataImpl) policyData);
}
|
@Test
public void testSetPolicy() throws Exception {
NamespaceIsolationPolicies policies = this.getDefaultTestPolicies();
// set a new policy
String newPolicyJson = "{\"namespaces\":[\"pulsar/use/TESTNS.*\"],\"primary\":[\"prod1-broker[45].messaging.use.example.com\"],\"secondary\":[\"prod1-broker.*.use.example.com\"],\"auto_failover_policy\":{\"policy_type\":\"min_available\",\"parameters\":{\"min_limit\":2,\"usage_threshold\":80}}}";
String newPolicyName = "policy2";
ObjectMapper jsonMapper = ObjectMapperFactory.create();
NamespaceIsolationDataImpl nsPolicyData = jsonMapper.readValue(newPolicyJson.getBytes(),
NamespaceIsolationDataImpl.class);
policies.setPolicy(newPolicyName, nsPolicyData);
assertEquals(policies.getPolicies().size(), 2);
assertEquals(policies.getPolicyByName(newPolicyName), new NamespaceIsolationPolicyImpl(nsPolicyData));
assertNotEquals(policies.getPolicyByName("policy1"), policies.getPolicyByName(newPolicyName));
assertEquals(policies.getPolicyByNamespace(NamespaceName.get("pulsar/use/TESTNS.1")),
new NamespaceIsolationPolicyImpl(nsPolicyData));
}
|
@Override
@SuppressWarnings("checkstyle:npathcomplexity")
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AbstractRecord<?> that = (AbstractRecord<?>) o;
if (version != that.version) {
return false;
}
if (hits != that.hits) {
return false;
}
if (lastAccessTime != that.lastAccessTime) {
return false;
}
if (lastUpdateTime != that.lastUpdateTime) {
return false;
}
if (creationTime != that.creationTime) {
return false;
}
if (lastStoredTime != that.lastStoredTime) {
return false;
}
return true;
}
|
@Test
public void testEquals() {
assertEquals(record, record);
assertEquals(record, recordSameAttributes);
assertNotEquals(null, record);
assertNotEquals(new Object(), record);
assertNotEquals(record, recordOtherVersion);
assertNotEquals(record, recordOtherCreationTime);
assertNotEquals(record, recordOtherHits);
assertNotEquals(record, recordOtherLastAccessTime);
assertNotEquals(record, recordOtherLastUpdateTime);
}
|
public static InternalLogger getInstance(Class<?> clazz) {
return getInstance(clazz.getName());
}
|
@Test
public void testInfo() {
final InternalLogger logger = InternalLoggerFactory.getInstance("mock");
logger.info("a");
verify(mockLogger).info("a");
}
|
public static String localIP() {
if (!StringUtils.isEmpty(localIp)) {
return localIp;
}
if (System.getProperties().containsKey(CLIENT_LOCAL_IP_PROPERTY)) {
return localIp = System.getProperty(CLIENT_LOCAL_IP_PROPERTY, getAddress());
}
localIp = getAddress();
return localIp;
}
|
@Test
void testLocalIpWithSpecifiedIp() {
System.setProperty("com.alibaba.nacos.client.local.ip", "10.2.8.8");
assertEquals("10.2.8.8", NetUtils.localIP());
System.setProperty("com.alibaba.nacos.client.local.ip", "10.2.8.9");
assertEquals("10.2.8.8", NetUtils.localIP());
}
|
public HttpResult getBinary(String url) throws IOException, NotModifiedException {
return getBinary(url, null, null);
}
|
@Test
void validFeed() throws Exception {
this.mockServerClient.when(HttpRequest.request().withMethod("GET"))
.respond(HttpResponse.response()
.withBody(feedContent)
.withContentType(MediaType.APPLICATION_ATOM_XML)
.withHeader(HttpHeaders.LAST_MODIFIED, "123456")
.withHeader(HttpHeaders.ETAG, "78910"));
HttpResult result = getter.getBinary(this.feedUrl);
Assertions.assertArrayEquals(feedContent, result.getContent());
Assertions.assertEquals(MediaType.APPLICATION_ATOM_XML.toString(), result.getContentType());
Assertions.assertEquals("123456", result.getLastModifiedSince());
Assertions.assertEquals("78910", result.getETag());
Assertions.assertEquals(this.feedUrl, result.getUrlAfterRedirect());
}
|
public static RDA fit(double[][] x, int[] y, Properties params) {
double alpha = Double.parseDouble(params.getProperty("smile.rda.alpha", "0.9"));
double[] priori = Strings.parseDoubleArray(params.getProperty("smile.rda.priori"));
double tol = Double.parseDouble(params.getProperty("smile.rda.tolerance", "1E-4"));
return fit(x, y, alpha, priori, tol);
}
|
@Test
public void testBreastCancer() {
System.out.println("Breast Cancer");
MathEx.setSeed(19650218); // to get repeatable results.
ClassificationValidations<RDA> result = CrossValidation.classification(10, BreastCancer.x, BreastCancer.y,
(x, y) -> RDA.fit(x, y, 0.9));
System.out.println(result);
assertEquals(0.9461, result.avg.accuracy, 1E-4);
}
|
@Override
public int choosePartition(Message<?> msg, TopicMetadata topicMetadata) {
// If the message has a key, it supersedes the round robin routing policy
if (msg.hasKey()) {
return signSafeMod(hash.makeHash(msg.getKey()), topicMetadata.numPartitions());
}
if (isBatchingEnabled) { // if batching is enabled, choose partition on `partitionSwitchMs` boundary.
long currentMs = clock.millis();
return signSafeMod(currentMs / partitionSwitchMs + startPtnIdx, topicMetadata.numPartitions());
} else {
return signSafeMod(PARTITION_INDEX_UPDATER.getAndIncrement(this), topicMetadata.numPartitions());
}
}
|
@Test
public void testChoosePartitionWithoutKeyWithBatching() {
Message<?> msg = mock(Message.class);
Mockito.when(msg.getKey()).thenReturn(null);
// Fake clock, simulate 1 millisecond passes for each invocation
Clock clock = new Clock() {
private long current = 0;
@Override
public Clock withZone(ZoneId zone) {
return null;
}
@Override
public long millis() {
return current++;
}
@Override
public Instant instant() {
return Instant.ofEpochMilli(millis());
}
@Override
public ZoneId getZone() {
return ZoneId.systemDefault();
}
};
RoundRobinPartitionMessageRouterImpl router = new RoundRobinPartitionMessageRouterImpl(
HashingScheme.JavaStringHash, 0, true, 5, clock);
// Since the batching time is 5millis, first 5 messages will go on partition 0 and next five would go on
// partition 1
for (int i = 0; i < 5; i++) {
assertEquals(0, router.choosePartition(msg, new TopicMetadataImpl(5)));
}
for (int i = 5; i < 10; i++) {
assertEquals(1, router.choosePartition(msg, new TopicMetadataImpl(5)));
}
}
|
@Override
public Void handleResponse(Response response) throws IOException, UnexpectedBlobDigestException {
blobSizeListener.accept(response.getContentLength());
try (OutputStream outputStream =
new NotifyingOutputStream(destinationOutputStream, writtenByteCountListener)) {
BlobDescriptor receivedBlobDescriptor =
Digests.computeDigest(response.getBody(), outputStream);
if (!blobDigest.equals(receivedBlobDescriptor.getDigest())) {
throw new UnexpectedBlobDigestException(
"The pulled BLOB has digest '"
+ receivedBlobDescriptor.getDigest()
+ "', but the request digest was '"
+ blobDigest
+ "'");
}
}
return null;
}
|
@Test
public void testHandleResponse_unexpectedDigest() throws IOException {
InputStream blobContent =
new ByteArrayInputStream("some BLOB content".getBytes(StandardCharsets.UTF_8));
DescriptorDigest testBlobDigest = Digests.computeDigest(blobContent).getDigest();
blobContent.reset();
Response mockResponse = Mockito.mock(Response.class);
Mockito.when(mockResponse.getBody()).thenReturn(blobContent);
try {
testBlobPuller.handleResponse(mockResponse);
Assert.fail("Receiving an unexpected digest should fail");
} catch (UnexpectedBlobDigestException ex) {
Assert.assertEquals(
"The pulled BLOB has digest '"
+ testBlobDigest
+ "', but the request digest was '"
+ fakeDigest
+ "'",
ex.getMessage());
}
}
|
public static Ip4Prefix valueOf(int address, int prefixLength) {
return new Ip4Prefix(Ip4Address.valueOf(address), prefixLength);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfAddressTooLongPrefixLengthIPv4() {
Ip4Address ipAddress;
Ip4Prefix ipPrefix;
ipAddress = Ip4Address.valueOf("1.2.3.4");
ipPrefix = Ip4Prefix.valueOf(ipAddress, 33);
}
|
@Override
public int intValue() {
return value;
}
|
@Test
public void testIntValue() {
assertEquals(100, MilliPct.ofMilliPct(100).intValue());
assertEquals(-100, MilliPct.ofMilliPct(-100).intValue());
}
|
private ImageConverter() {}
|
@Test
public void testImageConverter() {
ImmutableFeatureMap fmap = constructFeatureMap();
Example<MockOutput> e = constructExample();
// 3,3,2
ImageConverter first = new ImageConverter("test",3,3,2);
FloatNdArray ndarray = (TFloat32) first.convert(e,fmap).getMap().get("test");
assertEquals( 0, ndarray.getFloat(0,0,0,0), 1e-10);
assertEquals( 1, ndarray.getFloat(0,0,0,1), 1e-10);
assertEquals( 2, ndarray.getFloat(0,0,1,0), 1e-10);
assertEquals( 3, ndarray.getFloat(0,0,1,1), 1e-10);
assertEquals( 0, ndarray.getFloat(0,0,2,0), 1e-10);
assertEquals( 5, ndarray.getFloat(0,0,2,1), 1e-10);
assertEquals( 6, ndarray.getFloat(0,1,0,0), 1e-10);
assertEquals( 7, ndarray.getFloat(0,1,0,1), 1e-10);
assertEquals( 8, ndarray.getFloat(0,1,1,0), 1e-10);
assertEquals( 9, ndarray.getFloat(0,1,1,1), 1e-10);
assertEquals(10, ndarray.getFloat(0,1,2,0), 1e-10);
assertEquals(11, ndarray.getFloat(0,1,2,1), 1e-10);
assertEquals(12, ndarray.getFloat(0,2,0,0), 1e-10);
assertEquals( 0, ndarray.getFloat(0,2,0,1), 1e-10);
assertEquals(14, ndarray.getFloat(0,2,1,0), 1e-10);
assertEquals(15, ndarray.getFloat(0,2,1,1), 1e-10);
assertEquals(16, ndarray.getFloat(0,2,2,0), 1e-10);
assertEquals(17, ndarray.getFloat(0,2,2,1), 1e-10);
// 3,2,3
ImageConverter second = new ImageConverter("test",3,2,3);
ndarray = (TFloat32) second.convert(e,fmap).getMap().get("test");
assertEquals( 0, ndarray.getFloat(0,0,0,0),1e-10);
assertEquals( 1, ndarray.getFloat(0,0,0,1),1e-10);
assertEquals( 2, ndarray.getFloat(0,0,0,2),1e-10);
assertEquals( 3, ndarray.getFloat(0,0,1,0),1e-10);
assertEquals( 0, ndarray.getFloat(0,0,1,1),1e-10);
assertEquals( 5, ndarray.getFloat(0,0,1,2),1e-10);
assertEquals( 6, ndarray.getFloat(0,1,0,0),1e-10);
assertEquals( 7, ndarray.getFloat(0,1,0,1),1e-10);
assertEquals( 8, ndarray.getFloat(0,1,0,2),1e-10);
assertEquals( 9, ndarray.getFloat(0,1,1,0),1e-10);
assertEquals(10, ndarray.getFloat(0,1,1,1),1e-10);
assertEquals(11, ndarray.getFloat(0,1,1,2),1e-10);
assertEquals(12, ndarray.getFloat(0,2,0,0),1e-10);
assertEquals( 0, ndarray.getFloat(0,2,0,1),1e-10);
assertEquals(14, ndarray.getFloat(0,2,0,2),1e-10);
assertEquals(15, ndarray.getFloat(0,2,1,0),1e-10);
assertEquals(16, ndarray.getFloat(0,2,1,1),1e-10);
assertEquals(17, ndarray.getFloat(0,2,1,2),1e-10);
// 2,3,3
ImageConverter third = new ImageConverter("test",2,3,3);
ndarray = (TFloat32) third.convert(e,fmap).getMap().get("test");
assertEquals( 0, ndarray.getFloat(0,0,0,0),1e-10);
assertEquals( 1, ndarray.getFloat(0,0,0,1),1e-10);
assertEquals( 2, ndarray.getFloat(0,0,0,2),1e-10);
assertEquals( 3, ndarray.getFloat(0,0,1,0),1e-10);
assertEquals( 0, ndarray.getFloat(0,0,1,1),1e-10);
assertEquals( 5, ndarray.getFloat(0,0,1,2),1e-10);
assertEquals( 6, ndarray.getFloat(0,0,2,0),1e-10);
assertEquals( 7, ndarray.getFloat(0,0,2,1),1e-10);
assertEquals( 8, ndarray.getFloat(0,0,2,2),1e-10);
assertEquals( 9, ndarray.getFloat(0,1,0,0),1e-10);
assertEquals(10, ndarray.getFloat(0,1,0,1),1e-10);
assertEquals(11, ndarray.getFloat(0,1,0,2),1e-10);
assertEquals(12, ndarray.getFloat(0,1,1,0),1e-10);
assertEquals( 0, ndarray.getFloat(0,1,1,1),1e-10);
assertEquals(14, ndarray.getFloat(0,1,1,2),1e-10);
assertEquals(15, ndarray.getFloat(0,1,2,0),1e-10);
assertEquals(16, ndarray.getFloat(0,1,2,1),1e-10);
assertEquals(17, ndarray.getFloat(0,1,2,2),1e-10);
}
|
public static KTableHolder<GenericKey> build(
final KGroupedStreamHolder groupedStream,
final StreamAggregate aggregate,
final RuntimeBuildContext buildContext,
final MaterializedFactory materializedFactory) {
return build(
groupedStream,
aggregate,
buildContext,
materializedFactory,
new AggregateParamsFactory()
);
}
|
@Test
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT")
public void shouldBuildTumblingWindowedAggregateCorrectly() {
// Given:
givenTumblingWindowedAggregate();
// When:
final KTableHolder<Windowed<GenericKey>> result = windowedAggregate.build(planBuilder, planInfo);
// Then:
assertThat(result.getTable(), is(windowedWithWindowBounds));
verify(gracePeriodClause).toDuration();
verify(retentionClause).toDuration();
final InOrder inOrder = Mockito.inOrder(
groupedStream,
timeWindowedStream,
windowed,
windowedWithResults,
windowedWithWindowBounds
);
inOrder.verify(groupedStream).windowedBy(TimeWindows.ofSizeAndGrace(WINDOW, gracePeriodClause.toDuration()));
inOrder.verify(timeWindowedStream).aggregate(initializer, aggregator, timeWindowMaterialized);
inOrder.verify(windowed).transformValues(any(), any(Named.class));
inOrder.verify(windowedWithResults).transformValues(any(), any(Named.class));
inOrder.verifyNoMoreInteractions();
assertThat(result.getTable(), is(windowedWithWindowBounds));
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
// Test cases that are JSON that can be created via the Builder
public void testRoundTripSerDe() throws JsonProcessingException {
String fullJson =
"{\"namespace\":[\"accounting\",\"tax\"],\"properties\":{\"owner\":\"Hank\"}}";
CreateNamespaceRequest req =
CreateNamespaceRequest.builder().withNamespace(NAMESPACE).setProperties(PROPERTIES).build();
assertRoundTripSerializesEquallyFrom(fullJson, req);
String jsonEmptyProperties = "{\"namespace\":[\"accounting\",\"tax\"],\"properties\":{}}";
CreateNamespaceRequest reqWithExplicitEmptyProperties =
CreateNamespaceRequest.builder()
.withNamespace(NAMESPACE)
.setProperties(EMPTY_PROPERTIES)
.build();
assertRoundTripSerializesEquallyFrom(jsonEmptyProperties, reqWithExplicitEmptyProperties);
CreateNamespaceRequest reqWithImplicitEmptyProperties =
CreateNamespaceRequest.builder().withNamespace(NAMESPACE).build();
assertRoundTripSerializesEquallyFrom(jsonEmptyProperties, reqWithImplicitEmptyProperties);
String jsonWithEmptyNamespace = "{\"namespace\":[],\"properties\":{}}";
CreateNamespaceRequest reqUsingEmptyNamespace =
CreateNamespaceRequest.builder().withNamespace(Namespace.empty()).build();
assertRoundTripSerializesEquallyFrom(jsonWithEmptyNamespace, reqUsingEmptyNamespace);
}
|
@Override
public void onMessage(final String result) {
if (LOG.isDebugEnabled()) {
LOG.debug("onMessage server[{}] result({})", this.getURI().toString(), result);
}
Map<String, Object> jsonToMap = JsonUtils.jsonToMap(result);
Object eventType = jsonToMap.get(RunningModeConstants.EVENT_TYPE);
if (Objects.equals(DataEventTypeEnum.RUNNING_MODE.name(), eventType)) {
LOG.info("server[{}] handle running mode result({})", this.getURI().toString(), result);
this.runningMode = String.valueOf(jsonToMap.get(RunningModeConstants.RUNNING_MODE));
if (Objects.equals(RunningModeEnum.STANDALONE.name(), runningMode)) {
return;
}
this.masterUrl = String.valueOf(jsonToMap.get(RunningModeConstants.MASTER_URL));
this.isConnectedToMaster = Boolean.TRUE.equals(jsonToMap.get(RunningModeConstants.IS_MASTER));
} else {
handleResult(result);
}
}
|
@Test
public void testOnMessage() {
doNothing().when(pluginDataSubscriber).onSubscribe(any());
String json = GsonUtils.getInstance().toJson(websocketData);
shenyuWebsocketClient.onMessage(json);
verify(pluginDataSubscriber).onSubscribe(any());
}
|
public File getDataDirectory() {
return dataDirectory;
}
|
@Test
public void override_data_dir() throws Exception {
File sqHomeDir = temp.newFolder();
File tempDir = temp.newFolder();
File dataDir = temp.newFolder();
Props props = new Props(new Properties());
props.set(PATH_HOME.getKey(), sqHomeDir.getAbsolutePath());
props.set(PATH_TEMP.getKey(), tempDir.getAbsolutePath());
props.set(PATH_LOGS.getKey(), temp.newFolder().getAbsolutePath());
props.set(PATH_DATA.getKey(), dataDir.getAbsolutePath());
EsInstallation underTest = new EsInstallation(props);
assertThat(underTest.getDataDirectory()).isEqualTo(new File(dataDir, "es8"));
}
|
public static <KLeftT, KRightT> KTableHolder<KLeftT> build(
final KTableHolder<KLeftT> left,
final KTableHolder<KRightT> right,
final ForeignKeyTableTableJoin<KLeftT, KRightT> join,
final RuntimeBuildContext buildContext
) {
final LogicalSchema leftSchema = left.getSchema();
final LogicalSchema rightSchema = right.getSchema();
final ProcessingLogger logger = buildContext.getProcessingLogger(
join.getProperties().getQueryContext()
);
final ExpressionEvaluator expressionEvaluator;
final CodeGenRunner codeGenRunner = new CodeGenRunner(
leftSchema,
buildContext.getKsqlConfig(),
buildContext.getFunctionRegistry()
);
final Optional<ColumnName> leftColumnName = join.getLeftJoinColumnName();
final Optional<Expression> leftJoinExpression = join.getLeftJoinExpression();
if (leftColumnName.isPresent()) {
expressionEvaluator = codeGenRunner.buildCodeGenFromParseTree(
new UnqualifiedColumnReferenceExp(leftColumnName.get()),
"Left Join Expression"
);
} else if (leftJoinExpression.isPresent()) {
expressionEvaluator = codeGenRunner.buildCodeGenFromParseTree(
leftJoinExpression.get(),
"Left Join Expression"
);
} else {
throw new IllegalStateException("Both leftColumnName and leftJoinExpression are empty.");
}
final ForeignKeyJoinParams<KRightT> joinParams = ForeignKeyJoinParamsFactory
.create(expressionEvaluator, leftSchema, rightSchema, logger);
final Formats formats = join.getFormats();
final PhysicalSchema physicalSchema = PhysicalSchema.from(
joinParams.getSchema(),
formats.getKeyFeatures(),
formats.getValueFeatures()
);
final Serde<KLeftT> keySerde = left.getExecutionKeyFactory().buildKeySerde(
formats.getKeyFormat(),
physicalSchema,
join.getProperties().getQueryContext()
);
final Serde<GenericRow> valSerde = buildContext.buildValueSerde(
formats.getValueFormat(),
physicalSchema,
join.getProperties().getQueryContext()
);
final KTable<KLeftT, GenericRow> result;
switch (join.getJoinType()) {
case INNER:
result = left.getTable().join(
right.getTable(),
joinParams.getKeyExtractor(),
joinParams.getJoiner(),
buildContext.getMaterializedFactory().create(keySerde, valSerde)
);
break;
case LEFT:
result = left.getTable().leftJoin(
right.getTable(),
joinParams.getKeyExtractor(),
joinParams.getJoiner(),
buildContext.getMaterializedFactory().create(keySerde, valSerde)
);
break;
default:
throw new IllegalStateException("invalid join type: " + join.getJoinType());
}
return KTableHolder.unmaterialized(
result,
joinParams.getSchema(),
left.getExecutionKeyFactory()
);
}
|
@Test
@SuppressWarnings({"unchecked", "rawtypes"})
public void shouldDoLeftJoinOnKey() {
// Given:
givenLeftJoin(left, L_KEY);
// When:
final KTableHolder<Struct> result = join.build(planBuilder, planInfo);
// Then:
final ArgumentCaptor<KsqlKeyExtractor> ksqlKeyExtractor
= ArgumentCaptor.forClass(KsqlKeyExtractor.class);
verify(leftKTable).leftJoin(
same(rightKTable),
ksqlKeyExtractor.capture(),
eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 0)),
any(Materialized.class)
);
verifyNoMoreInteractions(leftKTable, rightKTable, resultKTable);
final GenericKey extractedKey = GenericKey.genericKey(LEFT_KEY);
assertThat(ksqlKeyExtractor.getValue().apply(LEFT_ROW), is(extractedKey));
assertThat(result.getTable(), is(resultKTable));
assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory));
}
|
@Override
public int generate(final Properties props) {
int result = loadExistedWorkerId().orElseGet(this::generateNewWorkerId);
logWarning(result, props);
return result;
}
|
@Test
void assertGenerateWithoutExistedWorkerId() {
ClusterPersistRepository repository = mock(ClusterPersistRepository.class);
doAnswer((Answer<Object>) invocation -> Boolean.TRUE).when(repository).persistExclusiveEphemeral("/reservation/worker_id/0", "foo_id");
assertThat(new ClusterWorkerIdGenerator(repository, "foo_id").generate(new Properties()), is(0));
}
|
public static SchemaPairCompatibility checkReaderWriterCompatibility(final Schema reader, final Schema writer) {
final SchemaCompatibilityResult compatibility = new ReaderWriterCompatibilityChecker().getCompatibility(reader,
writer);
final String message;
switch (compatibility.getCompatibility()) {
case INCOMPATIBLE: {
message = String.format(
"Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n",
writer.toString(true), reader.toString(true));
break;
}
case COMPATIBLE: {
message = READER_WRITER_COMPATIBLE_MESSAGE;
break;
}
default:
throw new AvroRuntimeException("Unknown compatibility: " + compatibility);
}
return new SchemaPairCompatibility(compatibility, reader, writer, message);
}
|
@Test
void validateArrayWriterSchema() {
final Schema validReader = Schema.createArray(STRING_SCHEMA);
final Schema invalidReader = Schema.createMap(STRING_SCHEMA);
final SchemaCompatibility.SchemaPairCompatibility validResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.compatible(), validReader, STRING_ARRAY_SCHEMA,
SchemaCompatibility.READER_WRITER_COMPATIBLE_MESSAGE);
final SchemaCompatibility.SchemaPairCompatibility invalidResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.incompatible(
SchemaIncompatibilityType.TYPE_MISMATCH, invalidReader, STRING_ARRAY_SCHEMA,
"reader type: MAP not compatible with writer type: ARRAY", Collections.singletonList("")),
invalidReader, STRING_ARRAY_SCHEMA,
String.format(
"Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n",
STRING_ARRAY_SCHEMA.toString(true), invalidReader.toString(true)));
assertEquals(validResult, checkReaderWriterCompatibility(validReader, STRING_ARRAY_SCHEMA));
assertEquals(invalidResult, checkReaderWriterCompatibility(invalidReader, STRING_ARRAY_SCHEMA));
}
|
public synchronized void executeDdlStatement(String statement) throws IllegalStateException {
checkIsUsable();
maybeCreateInstance();
maybeCreateDatabase();
LOG.info("Executing DDL statement '{}' on database {}.", statement, databaseId);
try {
databaseAdminClient
.updateDatabaseDdl(
instanceId, databaseId, ImmutableList.of(statement), /* operationId= */ null)
.get();
LOG.info("Successfully executed DDL statement '{}' on database {}.", statement, databaseId);
} catch (ExecutionException | InterruptedException | SpannerException e) {
throw new SpannerResourceManagerException("Failed to execute statement.", e);
}
}
|
@Test
public void testExecuteDdlStatementShouldThrowExceptionWhenSpannerUpdateDatabaseFails()
throws ExecutionException, InterruptedException {
// arrange
prepareCreateInstanceMock();
prepareCreateDatabaseMock();
when(spanner.getDatabaseAdminClient().updateDatabaseDdl(any(), any(), any(), any()).get())
.thenThrow(InterruptedException.class);
String statement =
"CREATE TABLE Singers (\n"
+ " SingerId INT64 NOT NULL,\n"
+ " FirstName STRING(1024),\n"
+ " LastName STRING(1024),\n"
+ ") PRIMARY KEY (SingerId)";
// act & assert
assertThrows(
SpannerResourceManagerException.class, () -> testManager.executeDdlStatement(statement));
}
|
@VisibleForTesting
void recover() {
try (DbSession dbSession = dbClient.openSession(false)) {
Profiler profiler = Profiler.create(LOGGER).start();
long beforeDate = system2.now() - minAgeInMs;
IndexingResult result = new IndexingResult();
Collection<EsQueueDto> items = dbClient.esQueueDao().selectForRecovery(dbSession, beforeDate, loopLimit);
while (!items.isEmpty()) {
IndexingResult loopResult = new IndexingResult();
groupItemsByDocType(items).asMap().forEach((type, typeItems) -> loopResult.add(doIndex(dbSession, type, typeItems)));
result.add(loopResult);
if (loopResult.getSuccessRatio() <= CIRCUIT_BREAKER_IN_PERCENT) {
LOGGER.error(LOG_PREFIX + "too many failures [{}/{} documents], waiting for next run", loopResult.getFailures(), loopResult.getTotal());
break;
}
if (loopResult.getTotal() == 0L) {
break;
}
items = dbClient.esQueueDao().selectForRecovery(dbSession, beforeDate, loopLimit);
}
if (result.getTotal() > 0L) {
profiler.stopInfo(LOG_PREFIX + format("%d documents processed [%d failures]", result.getTotal(), result.getFailures()));
}
} catch (Throwable t) {
LOGGER.error(LOG_PREFIX + "fail to recover documents", t);
}
}
|
@Test
public void do_not_stop_run_if_success_rate_is_greater_than_circuit_breaker() {
IntStream.range(0, 10).forEach(i -> insertItem(FOO_TYPE, "" + i));
advanceInTime();
// 10 docs to process, by groups of 5.
// Each group successfully recovers 4 docs --> below 30% of failures --> continue run
PartiallyFailingIndexer indexer = new PartiallyFailingIndexer(FOO_TYPE, 4, 4, 2);
MapSettings settings = new MapSettings()
.setProperty("sonar.search.recovery.loopLimit", "5");
underTest = newRecoveryIndexer(settings.asConfig(), indexer);
underTest.recover();
assertThatLogsDoNotContain(ERROR, "too many failures");
assertThatQueueHasSize(0);
assertThat(indexer.indexed).hasSize(10);
assertThat(indexer.called).hasSize(10 + 2 /* retries */);
}
|
public static void main(String[] args) {
var filterManager = new FilterManager();
filterManager.addFilter(new NameFilter());
filterManager.addFilter(new ContactFilter());
filterManager.addFilter(new AddressFilter());
filterManager.addFilter(new DepositFilter());
filterManager.addFilter(new OrderFilter());
var client = new Client();
client.setFilterManager(filterManager);
}
|
@Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
}
|
@Override
public double dot(SGDVector other) {
if (other.size() != size) {
throw new IllegalArgumentException("Can't dot two vectors of different lengths, this = " + size + ", other = " + other.size());
} else if (other instanceof SparseVector) {
double score = 0.0;
// If there are elements, calculate the dot product.
if ((other.numActiveElements() != 0) && (indices.length != 0)) {
Iterator<VectorTuple> itr = iterator();
Iterator<VectorTuple> otherItr = other.iterator();
VectorTuple tuple = itr.next();
VectorTuple otherTuple = otherItr.next();
while (itr.hasNext() && otherItr.hasNext()) {
if (tuple.index == otherTuple.index) {
score += tuple.value * otherTuple.value;
tuple = itr.next();
otherTuple = otherItr.next();
} else if (tuple.index < otherTuple.index) {
tuple = itr.next();
} else {
otherTuple = otherItr.next();
}
}
while (itr.hasNext()) {
if (tuple.index == otherTuple.index) {
score += tuple.value * otherTuple.value;
}
tuple = itr.next();
}
while (otherItr.hasNext()) {
if (tuple.index == otherTuple.index) {
score += tuple.value * otherTuple.value;
}
otherTuple = otherItr.next();
}
if (tuple.index == otherTuple.index) {
score += tuple.value * otherTuple.value;
}
}
return score;
} else if (other instanceof DenseVector) {
double score = 0.0;
for (int i = 0; i < indices.length; i++) {
score += other.get(indices[i]) * values[i];
}
return score;
} else {
throw new IllegalArgumentException("Unknown vector subclass " + other.getClass().getCanonicalName() + " for input");
}
}
|
@Test
public void emptyDot() {
SparseVector a = generateVectorA();
SparseVector b = generateVectorB();
SparseVector c = generateVectorC();
SparseVector empty = generateEmptyVector();
assertEquals(a.dot(empty),empty.dot(a),1e-10);
assertEquals(0.0, a.dot(empty),1e-10);
assertEquals(b.dot(empty),empty.dot(b),1e-10);
assertEquals(0.0, b.dot(empty),1e-10);
assertEquals(c.dot(empty),empty.dot(c),1e-10);
assertEquals(0.0, c.dot(empty),1e-10);
}
|
private long getLastInsertId(final Collection<UpdateResult> updateResults, final Collection<Comparable<?>> autoIncrementGeneratedValues) {
List<Long> lastInsertIds = new ArrayList<>(updateResults.size() + autoIncrementGeneratedValues.size());
for (UpdateResult each : updateResults) {
if (each.getLastInsertId() > 0L) {
lastInsertIds.add(each.getLastInsertId());
}
}
for (Comparable<?> each : autoIncrementGeneratedValues) {
if (each instanceof Number) {
lastInsertIds.add(((Number) each).longValue());
}
}
return lastInsertIds.isEmpty() ? 0L : getMinLastInsertId(lastInsertIds);
}
|
@Test
void assertGetLastInsertIdWhenExecuteResultIsNotEmpty() {
UpdateResponseHeader actual = new UpdateResponseHeader(mock(SQLStatement.class), createExecuteUpdateResults());
assertThat(actual.getLastInsertId(), is(2L));
}
|
@Override
public Stream<HoodieBaseFile> getLatestBaseFiles(String partitionPath) {
return execute(partitionPath, preferredView::getLatestBaseFiles, (path) -> getSecondaryView().getLatestBaseFiles(path));
}
|
@Test
public void testGetLatestBaseFiles() {
Stream<HoodieBaseFile> actual;
Stream<HoodieBaseFile> expected = testBaseFileStream;
when(primary.getLatestBaseFiles()).thenReturn(testBaseFileStream);
actual = fsView.getLatestBaseFiles();
assertEquals(expected, actual);
verify(secondaryViewSupplier, never()).get();
resetMocks();
when(secondaryViewSupplier.get()).thenReturn(secondary);
when(primary.getLatestBaseFiles()).thenThrow(new RuntimeException());
when(secondary.getLatestBaseFiles()).thenReturn(testBaseFileStream);
actual = fsView.getLatestBaseFiles();
assertEquals(expected, actual);
resetMocks();
when(secondary.getLatestBaseFiles()).thenReturn(testBaseFileStream);
actual = fsView.getLatestBaseFiles();
assertEquals(expected, actual);
resetMocks();
when(secondary.getLatestBaseFiles()).thenThrow(new RuntimeException());
assertThrows(RuntimeException.class, () -> {
fsView.getLatestBaseFiles();
});
}
|
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
}
|
@Test
void booleanFalseLiteral() {
String inputExpression = "false";
BaseNode bool = parse( inputExpression );
assertThat( bool).isInstanceOf(BooleanNode.class);
assertThat( bool.getResultType()).isEqualTo(BuiltInType.BOOLEAN);
assertLocation( inputExpression, bool );
}
|
public final void containsNoneOf(
@Nullable Object firstExcluded,
@Nullable Object secondExcluded,
@Nullable Object @Nullable ... restOfExcluded) {
containsNoneIn(accumulate(firstExcluded, secondExcluded, restOfExcluded));
}
|
@Test
public void iterableContainsNoneOfFailureWithDuplicateInExpected() {
expectFailureWhenTestingThat(asList(1, 2, 3)).containsNoneOf(1, 2, 2, 4);
assertFailureValue("but contained", "[1, 2]");
}
|
@Bean
public ShenyuPlugin resilience4JPlugin() {
return new Resilience4JPlugin(new CombinedExecutor(), new RateLimiterExecutor());
}
|
@Test
public void testResilience4JPlugin() {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(Resilience4JPluginConfiguration.class))
.withBean(Resilience4JPluginConfigurationTest.class)
.withPropertyValues("debug=true")
.run(context -> {
Resilience4JPlugin plugin = context.getBean("resilience4JPlugin", Resilience4JPlugin.class);
assertNotNull(plugin);
assertThat(plugin.named()).isEqualTo(PluginEnum.RESILIENCE4J.getName());
});
}
|
@Override
public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getNewKey(), "New name must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
byte[] newKeyBuf = toByteArray(command.getNewKey());
if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) {
return super.rename(commands);
}
return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf)
.filter(Objects::nonNull)
.zipWith(
Mono.defer(() -> pTtl(command.getKey())
.filter(Objects::nonNull)
.map(ttl -> Math.max(0, ttl))
.switchIfEmpty(Mono.just(0L))
)
)
.flatMap(valueAndTtl -> {
return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1());
})
.thenReturn(new BooleanResponse<>(command, true))
.doOnSuccess((ignored) -> del(command.getKey()));
});
}
|
@Test
public void testRename_keyNotExist() {
Integer originalSlot = getSlotForKey(originalKey);
newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot));
if (sameSlot) {
// This is a quirk of the implementation - since same-slot renames use the non-cluster version,
// the result is a Redis error. This behavior matches other spring-data-redis implementations
assertThatThrownBy(() -> connection.keyCommands().rename(originalKey, newKey).block())
.isInstanceOf(RedisSystemException.class);
} else {
Boolean response = connection.keyCommands().rename(originalKey, newKey).block();
assertThat(response).isTrue();
final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block();
assertThat(newKeyValue).isEqualTo(null);
}
}
|
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
}
|
@Test
public void shouldBuildCorrectSerde() {
// Given:
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
valueColumnNames(SCHEMA),
ImmutableList.of(
new StringLiteral("str"),
new LongLiteral(2L)
)
);
// When:
executor.execute(statement, mock(SessionProperties.class), engine, serviceContext);
// Then:
verify(keySerdeFactory).create(
FormatInfo.of(FormatFactory.KAFKA.name()),
PersistenceSchema.from(SCHEMA.key(), SerdeFeatures.of()),
new KsqlConfig(ImmutableMap.of()),
srClientFactory,
"",
NoopProcessingLogContext.INSTANCE,
Optional.empty()
);
verify(valueSerdeFactory).create(
FormatInfo.of(FormatFactory.JSON.name()),
PersistenceSchema.from(SCHEMA.value(), SerdeFeatures.of()),
new KsqlConfig(ImmutableMap.of()),
srClientFactory,
"",
NoopProcessingLogContext.INSTANCE,
Optional.empty()
);
}
|
public void run() {
try {
InputStreamReader isr = new InputStreamReader( this.is );
BufferedReader br = new BufferedReader( isr );
String line = null;
while ( ( line = br.readLine() ) != null ) {
String logEntry = this.type + " " + line;
switch ( this.logLevel ) {
case MINIMAL:
log.logMinimal( logEntry );
break;
case BASIC:
log.logBasic( logEntry );
break;
case DETAILED:
log.logDetailed( logEntry );
break;
case DEBUG:
log.logDebug( logEntry );
break;
case ROWLEVEL:
log.logRowlevel( logEntry );
break;
case ERROR:
log.logError( logEntry );
break;
default: // NONE
break;
}
}
} catch ( IOException ioe ) {
if ( log.isError() ) {
log.logError( this.type + " " + Const.getStackTracker( ioe ) );
}
}
}
|
@Test
public void testLogBasic() {
streamLogger = new ConfigurableStreamLogger( log, is, LogLevel.BASIC, PREFIX );
streamLogger.run();
Mockito.verify( log ).logBasic( OUT1 );
Mockito.verify( log ).logBasic( OUT2 );
}
|
public void setDerbyOpsEnabled(boolean derbyOpsEnabled) {
this.derbyOpsEnabled = derbyOpsEnabled;
}
|
@Test
void testSetDerbyOpsEnabled() {
assertFalse(commonConfig.isDerbyOpsEnabled());
commonConfig.setDerbyOpsEnabled(true);
assertTrue(commonConfig.isDerbyOpsEnabled());
}
|
public static MockRepositoryImporter getMockRepositoryImporter(File mockRepository,
ReferenceResolver referenceResolver) throws IOException {
MockRepositoryImporter importer = null;
// Analyse first lines of file content to guess repository type.
String line = null;
try (BufferedReader reader = Files.newBufferedReader(mockRepository.toPath(), StandardCharsets.UTF_8)) {
while ((line = reader.readLine()) != null && importer == null) {
line = line.trim();
// Check with basic Postman formats..
importer = checkPostmanImporters(line, mockRepository);
// Then try OpenAPI related ones...
if (importer == null) {
importer = checkOpenAPIImporters(line, mockRepository, referenceResolver);
}
// Then try any other else.
if (importer == null) {
importer = checkOtherImporters(line, mockRepository, referenceResolver);
}
}
}
// Otherwise, default to SoapUI project importer implementation.
if (importer == null) {
log.info("Have not found any explicit marker so applying the default SoapUI Project importer...");
importer = new SoapUIProjectImporter(mockRepository.getPath());
}
return importer;
}
|
@Test
void testGetMockRepositoryImporter() {
// Load a SoapUI file.
File soapUIProject = new File("../samples/HelloService-soapui-project.xml");
MockRepositoryImporter importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(soapUIProject, null);
} catch (Throwable t) {
fail("Getting importer for SoapUI should not fail!");
}
assertTrue(importer instanceof SoapUIProjectImporter);
// Load a Postman file.
File postmanCollection = new File("../samples/PetstoreAPI-collection.json");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(postmanCollection, null);
} catch (Throwable t) {
fail("Getting importer for Postman should not fail!");
}
assertTrue(importer instanceof PostmanCollectionImporter);
// Load a Postman Workspace file.
File postmanWorkspaceCollection = new File(
"target/test-classes/io/github/microcks/util/postman/Swagger Petstore.postman_workspace_collection-2.1.json");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(postmanWorkspaceCollection, null);
} catch (Throwable t) {
fail("Getting importer for Postman Workspace should not fail!");
}
assertTrue(importer instanceof PostmanWorkspaceCollectionImporter);
// Load an OpenAPI YAML file.
importer = null;
File openAPISpec = new File("target/test-classes/io/github/microcks/util/openapi/cars-openapi.yaml");
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(openAPISpec, null);
} catch (Throwable t) {
fail("Getting importer for OpenAPI YAML should not fail!");
}
assertTrue(importer instanceof OpenAPIImporter);
// Load an OpenAPI JSON file.
openAPISpec = new File("target/test-classes/io/github/microcks/util/openapi/cars-openapi.json");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(openAPISpec, null);
} catch (Throwable t) {
fail("Getting importer for OpenAPI JSON should not fail!");
}
assertTrue(importer instanceof OpenAPIImporter);
// Load an OpenAPI JSON oneliner file.
openAPISpec = new File("target/test-classes/io/github/microcks/util/openapi/openapi-oneliner.json");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(openAPISpec, null);
} catch (Throwable t) {
fail("Getting importer for OpenAPI JSON oneliner should not fail!");
}
assertTrue(importer instanceof OpenAPIImporter);
// Load an AsyncAPI JSON file.
File asyncAPISpec = new File("target/test-classes/io/github/microcks/util/asyncapi/user-signedup-asyncapi.json");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(asyncAPISpec, null);
} catch (Throwable t) {
fail("Getting importer for AsyncAPI JSON should not fail!");
}
assertTrue(importer instanceof AsyncAPIImporter);
// Load an AsyncAPI JSON oneliner file.
asyncAPISpec = new File(
"target/test-classes/io/github/microcks/util/asyncapi/user-signedup-asyncapi-oneliner.json");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(asyncAPISpec, null);
} catch (Throwable t) {
fail("Getting importer for AsyncAPI JSON oneliner should not fail!");
}
assertTrue(importer instanceof AsyncAPIImporter);
// Load an AsyncAPI YAML file.
asyncAPISpec = new File("target/test-classes/io/github/microcks/util/asyncapi/user-signedup-asyncapi.yaml");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(asyncAPISpec, null);
} catch (Throwable t) {
fail("Getting importer for AsyncAPI YAML should not fail!");
}
assertTrue(importer instanceof AsyncAPIImporter);
// Load an AsyncAPI v3 YAML file.
asyncAPISpec = new File("target/test-classes/io/github/microcks/util/asyncapi/user-signedup-asyncapi-3.0.yaml");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(asyncAPISpec, null);
} catch (Throwable t) {
fail("Getting importer for AsyncAPI v3 YAML should not fail!");
}
assertTrue(importer instanceof AsyncAPI3Importer);
// Load a Protobuf schema file.
File protobufSchema = new File("target/test-classes/io/github/microcks/util/grpc/hello-v1.proto");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(protobufSchema, null);
} catch (Throwable t) {
fail("Getting importer for Protobuf should not fail!");
}
assertTrue(importer instanceof ProtobufImporter);
// Load an APIMetadata file.
File apiMetadata = new File("target/test-classes/io/github/microcks/util/metadata/hello-grpc-v1-metadata.yml");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(apiMetadata, null);
} catch (Throwable t) {
fail("Getting importer for APIMetadata should not fail!");
}
assertTrue(importer instanceof MetadataImporter);
// Load a GraphQL schema file.
File graphQLSchema = new File("target/test-classes/io/github/microcks/util/graphql/films.graphql");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(graphQLSchema, null);
} catch (Throwable t) {
fail("Getting importer for GraphQL should not fail!");
}
assertTrue(importer instanceof GraphQLImporter);
// Load a Swagger v2 YAML file.
File swaggerSpec = new File("target/test-classes/io/github/microcks/util/openapi/beer-catalog-api-swagger.yaml");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(swaggerSpec, null);
} catch (Throwable t) {
fail("Getting importer for Swagger v2 YAML should not fail!");
}
assertTrue(importer instanceof SwaggerImporter);
// Load a Swagger v2 JSON file.
swaggerSpec = new File("target/test-classes/io/github/microcks/util/openapi/beer-catalog-api-swagger.json");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(swaggerSpec, null);
} catch (Throwable t) {
fail("Getting importer for Swagger v2 JSON should not fail!");
}
assertTrue(importer instanceof SwaggerImporter);
// Load a HAR JSON file.
File harFile = new File("target/test-classes/io/github/microcks/util/har/api-pastries-0.0.1.har");
importer = null;
try {
importer = MockRepositoryImporterFactory.getMockRepositoryImporter(harFile, null);
} catch (IOException ioe) {
fail("Getting importer for HAR JSON file should not fail!");
}
assertTrue(importer instanceof HARImporter);
}
|
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
}
|
@Test
public void testEpochBumpOnOutOfOrderSequenceForNextBatchWhenThereIsNoBatchInFlight() throws Exception {
// Verify that partitions without in-flight batches when the producer epoch
// is bumped get their sequence number reset correctly.
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
// Init producer id/epoch
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertEquals(producerId, transactionManager.producerIdAndEpoch().producerId);
assertEquals(0, transactionManager.producerIdAndEpoch().epoch);
// Partition 0 - Send first batch
appendToAccumulator(tp0);
sender.runOnce();
// Partition 0 - State is lazily initialized
assertPartitionState(transactionManager, tp0, producerId, (short) 0, 1, OptionalInt.empty());
// Partition 0 - Successful response
sendIdempotentProducerResponse(0, 0, tp0, Errors.NONE, 0, -1);
sender.runOnce();
// Partition 0 - Last ack is updated
assertPartitionState(transactionManager, tp0, producerId, (short) 0, 1, OptionalInt.of(0));
// Partition 1 - Send first batch
appendToAccumulator(tp1);
sender.runOnce();
// Partition 1 - State is lazily initialized
assertPartitionState(transactionManager, tp1, producerId, (short) 0, 1, OptionalInt.empty());
// Partition 1 - Successful response
sendIdempotentProducerResponse(0, 0, tp1, Errors.NONE, 0, -1);
sender.runOnce();
// Partition 1 - Last ack is updated
assertPartitionState(transactionManager, tp1, producerId, (short) 0, 1, OptionalInt.of(0));
// Partition 0 - Send second batch
appendToAccumulator(tp0);
sender.runOnce();
// Partition 0 - Sequence is incremented
assertPartitionState(transactionManager, tp0, producerId, (short) 0, 2, OptionalInt.of(0));
// Partition 0 - Failed response with OUT_OF_ORDER_SEQUENCE_NUMBER
sendIdempotentProducerResponse(0, 1, tp0, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, -1, -1);
sender.runOnce(); // Receive
sender.runOnce(); // Bump epoch & Retry
// Producer epoch is bumped
assertEquals(1, transactionManager.producerIdAndEpoch().epoch);
// Partition 0 - State is reset to current producer epoch
assertPartitionState(transactionManager, tp0, producerId, (short) 1, 1, OptionalInt.empty());
// Partition 1 - State is not changed
assertPartitionState(transactionManager, tp1, producerId, (short) 0, 1, OptionalInt.of(0));
assertTrue(transactionManager.hasStaleProducerIdAndEpoch(tp1));
// Partition 0 - Successful Response
sendIdempotentProducerResponse(1, 0, tp0, Errors.NONE, 1, -1);
sender.runOnce();
// Partition 0 - Last ack is updated
assertPartitionState(transactionManager, tp0, producerId, (short) 1, 1, OptionalInt.of(0));
// Partition 1 - Send second batch
appendToAccumulator(tp1);
sender.runOnce();
// Partition 1 - Epoch is bumped, sequence is reset and incremented
assertPartitionState(transactionManager, tp1, producerId, (short) 1, 1, OptionalInt.empty());
assertFalse(transactionManager.hasStaleProducerIdAndEpoch(tp1));
// Partition 1 - Successful Response
sendIdempotentProducerResponse(1, 0, tp1, Errors.NONE, 1, -1);
sender.runOnce();
// Partition 1 - Last ack is updated
assertPartitionState(transactionManager, tp1, producerId, (short) 1, 1, OptionalInt.of(0));
}
|
@Override
@CacheEvict(cacheNames = RedisKeyConstants.OAUTH_CLIENT,
allEntries = true) // allEntries 清空所有缓存,因为 id 不是直接的缓存 key,不好清理
public void deleteOAuth2Client(Long id) {
// 校验存在
validateOAuth2ClientExists(id);
// 删除
oauth2ClientMapper.deleteById(id);
}
|
@Test
public void testDeleteOAuth2Client_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> oauth2ClientService.deleteOAuth2Client(id), OAUTH2_CLIENT_NOT_EXISTS);
}
|
public MessageReceiptHandle removeReceiptHandle(ProxyContext context, Channel channel, String group, String msgID, String receiptHandle) {
ReceiptHandleGroup handleGroup = receiptHandleGroupMap.get(new ReceiptHandleGroupKey(channel, group));
if (handleGroup == null) {
return null;
}
return handleGroup.remove(msgID, receiptHandle);
}
|
@Test
public void testRemoveReceiptHandle() {
Channel channel = PROXY_CONTEXT.getVal(ContextVariable.CHANNEL);
receiptHandleManager.addReceiptHandle(PROXY_CONTEXT, channel, GROUP, MSG_ID, messageReceiptHandle);
receiptHandleManager.removeReceiptHandle(PROXY_CONTEXT, channel, GROUP, MSG_ID, receiptHandle);
SubscriptionGroupConfig groupConfig = new SubscriptionGroupConfig();
Mockito.when(metadataService.getSubscriptionGroupConfig(Mockito.any(), Mockito.eq(GROUP))).thenReturn(groupConfig);
receiptHandleManager.scheduleRenewTask();
Mockito.verify(messagingProcessor, Mockito.timeout(1000).times(0))
.changeInvisibleTime(Mockito.any(ProxyContext.class), Mockito.any(ReceiptHandle.class), Mockito.anyString(),
Mockito.anyString(), Mockito.anyString(), Mockito.anyLong());
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof ListOptions)) {
return false;
}
ListOptions that = (ListOptions) o;
return Objects.equal(mRecursive, that.mRecursive);
}
|
@Test
public void equalsTest() throws Exception {
CommonUtils.testEquals(ListOptions.class);
}
|
public Grok cachedGrokForPattern(String pattern) {
return cachedGrokForPattern(pattern, false);
}
|
@Test
public void cachedGrokForPattern() {
final Grok grok = grokPatternRegistry.cachedGrokForPattern("%{TESTNUM}");
assertThat(grok.getPatterns()).containsEntry(GROK_PATTERN.name(), GROK_PATTERN.pattern());
}
|
public static byte zoomForBounds(Dimension dimension, BoundingBox boundingBox, int tileSize) {
long mapSize = MercatorProjection.getMapSize((byte) 0, tileSize);
double pixelXMax = MercatorProjection.longitudeToPixelX(boundingBox.maxLongitude, mapSize);
double pixelXMin = MercatorProjection.longitudeToPixelX(boundingBox.minLongitude, mapSize);
double zoomX = -Math.log(Math.abs(pixelXMax - pixelXMin) / dimension.width) / LOG_2;
double pixelYMax = MercatorProjection.latitudeToPixelY(boundingBox.maxLatitude, mapSize);
double pixelYMin = MercatorProjection.latitudeToPixelY(boundingBox.minLatitude, mapSize);
double zoomY = -Math.log(Math.abs(pixelYMax - pixelYMin) / dimension.height) / LOG_2;
double zoom = Math.floor(Math.min(zoomX, zoomY));
if (zoom < 0) {
return 0;
}
if (zoom > Byte.MAX_VALUE) {
return Byte.MAX_VALUE;
}
return (byte) zoom;
}
|
@Test
public void zoomForBoundsTest() {
// TODO rewrite this unit tests to make it easier to understand
Dimension[] dimensions = {new Dimension(200, 300), new Dimension(500, 400), new Dimension(1000, 600),
new Dimension(3280, 1780), new Dimension(100, 200), new Dimension(500, 200)};
BoundingBox[] boundingBoxes = {new BoundingBox(12.2, 0, 34.3, 120), new BoundingBox(-30, 20, 30, 30),
new BoundingBox(20.3, 100, 30.4, 120), new BoundingBox(4.4, 2, 4.5, 2.2),
new BoundingBox(50.43, 12.23, 50.44, 12.24), new BoundingBox(50.43, 12, 50.44, 40)};
int[] tileSizes = {256, 512, 500, 620, 451};
byte[] results = {1, 0, 0, 0, 0, 2, 1, 1, 1, 1, 3, 2, 2, 2, 2, 10, 9, 9, 9, 9, 14, 13, 13, 13, 13, 3, 2,
2, 2, 2, 2, 1, 1, 1, 1, 3, 2, 2, 1, 2, 5, 4, 4, 3, 4, 11, 10, 10, 10, 10, 15, 14, 14, 13, 14, 4, 3, 3,
3, 3, 3, 2, 2, 2, 2, 3, 2, 2, 2, 2, 6, 5, 5, 4, 5, 12, 11, 11, 11, 11, 15, 14, 14, 14, 14, 5, 4, 4, 4,
4, 5, 4, 4, 3, 4, 5, 4, 4, 4, 4, 7, 6, 6, 6, 6, 14, 13, 13, 13, 13, 17, 16, 16, 16, 16, 7, 6, 6, 6,
6, 0, 0, 0, 0, 0, 2, 1, 1, 0, 1, 2, 1, 1, 1, 1, 9, 8, 8, 8, 8, 13, 12, 12, 12, 12, 2, 1, 1, 1, 1,
2, 1, 1, 1, 1, 2, 1, 1, 0, 1, 4, 3, 3, 3, 3, 11, 10, 10, 10, 10, 14, 13, 13, 12, 13, 4, 3, 3, 3, 3};
int i = 0;
for (Dimension dimension : dimensions) {
for (BoundingBox boundingBox : boundingBoxes) {
for (int tileSize : tileSizes) {
Assert.assertEquals(results[i], LatLongUtils.zoomForBounds(dimension, boundingBox, tileSize));
++i;
}
}
}
}
|
public boolean createMetadataTable() {
GCRules.GCRule gcRules = GCRules.GCRULES.maxVersions(1);
if (tableAdminClient.exists(tableId)) {
Table table = tableAdminClient.getTable(tableId);
List<ColumnFamily> currentCFs = table.getColumnFamilies();
ModifyColumnFamiliesRequest request = ModifyColumnFamiliesRequest.of(tableId);
boolean needsNewColumnFamily = false;
for (String targetCF : COLUMN_FAMILIES) {
boolean exists = false;
for (ColumnFamily currentCF : currentCFs) {
if (targetCF.equals(currentCF.getId())) {
exists = true;
break;
}
}
if (!exists) {
needsNewColumnFamily = true;
request.addFamily(targetCF, gcRules);
}
}
if (needsNewColumnFamily) {
tableAdminClient.modifyFamilies(request);
}
return false;
}
CreateTableRequest createTableRequest = CreateTableRequest.of(tableId);
for (String cf : COLUMN_FAMILIES) {
createTableRequest.addFamily(cf, gcRules);
}
tableAdminClient.createTable(createTableRequest);
return true;
}
|
@Test
public void testNewColumnFamiliesAreAddedInExistingTable() {
CreateTableRequest createTableRequest = CreateTableRequest.of(tableId);
tableAdminClient.createTable(createTableRequest);
Table table = tableAdminClient.getTable(tableId);
assertEquals(0, table.getColumnFamilies().size());
assertFalse(metadataTableAdminDao.createMetadataTable());
table = tableAdminClient.getTable(tableId);
assertEquals(COLUMN_FAMILIES.size(), table.getColumnFamilies().size());
assertThat(
table.getColumnFamilies().stream().map(ColumnFamily::getId).collect(Collectors.toList()),
Matchers.containsInAnyOrder(COLUMN_FAMILIES.toArray()));
}
|
@Override
public void setParameters(Collection<CompoundVariable> parameters) throws InvalidVariableException {
checkParameterCount(parameters, MIN_PARAMETER_COUNT, MAX_PARAMETER_COUNT);
values = parameters.toArray(new CompoundVariable[parameters.size()]);
}
|
@Test
public void testChangeCaseError() throws Exception {
assertThrows(
InvalidVariableException.class,
() -> changeCase.setParameters(new ArrayList<>()));
}
|
@Override
public AccessPathStore<V> leastUpperBound(AccessPathStore<V> other) {
ImmutableMap.Builder<AccessPath, V> resultHeap = ImmutableMap.builder();
for (AccessPath aPath : intersection(heap().keySet(), other.heap().keySet())) {
resultHeap.put(aPath, heap().get(aPath).leastUpperBound(other.heap().get(aPath)));
}
return AccessPathStore.create(resultHeap.buildOrThrow());
}
|
@Test
public void leastUpperBoundEmpty() {
assertThat(newStore().leastUpperBound(newStore())).isEqualTo(newStore());
}
|
public static String getKey(String dataId, String group) {
return getKey(dataId, group, "");
}
|
@Test
public void testGetKey() {
String dataId = "dataId";
String group = "group";
String expected = "dataId+group";
String key = GroupKey.getKey(dataId, group);
Assert.isTrue(key.equals(expected));
}
|
@GET
@Path("/{entityType}/{entityId}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8
/* , MediaType.APPLICATION_XML */})
public TimelineEntity getEntity(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("entityType") String entityType,
@PathParam("entityId") String entityId,
@QueryParam("fields") String fields) {
init(res);
TimelineEntity entity = null;
try {
entity = timelineDataManager.getEntity(
parseStr(entityType),
parseStr(entityId),
parseFieldsStr(fields, ","),
getUser(req));
} catch (YarnException e) {
// The user doesn't have the access to override the existing domain.
LOG.info(e.getMessage(), e);
throw new ForbiddenException(e);
} catch (IllegalArgumentException e) {
throw new BadRequestException(e);
} catch (Exception e) {
LOG.error("Error getting entity", e);
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
if (entity == null) {
throw new NotFoundException("Timeline entity "
+ new EntityIdentifier(parseStr(entityId), parseStr(entityType))
+ " is not found");
}
return entity;
}
|
@Test
void testGetEventsWithYarnACLsEnabled() {
AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
// Put entity [5, 5] in domain 1
TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity();
entity.setEntityId("test id 5");
entity.setEntityType("test type 5");
entity.setStartTime(System.currentTimeMillis());
entity.setDomainId("domain_id_1");
TimelineEvent event = new TimelineEvent();
event.setEventType("event type 1");
event.setTimestamp(System.currentTimeMillis());
entity.addEvent(event);
entities.addEntity(entity);
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "writer_user_1")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
TimelinePutResponse putResponse =
response.getEntity(TimelinePutResponse.class);
assertEquals(0, putResponse.getErrors().size());
// Put entity [5, 6] in domain 2
entities = new TimelineEntities();
entity = new TimelineEntity();
entity.setEntityId("test id 6");
entity.setEntityType("test type 5");
entity.setStartTime(System.currentTimeMillis());
entity.setDomainId("domain_id_2");
event = new TimelineEvent();
event.setEventType("event type 2");
event.setTimestamp(System.currentTimeMillis());
entity.addEvent(event);
entities.addEntity(entity);
r = resource();
response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "writer_user_3")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
putResponse = response.getEntity(TimelinePutResponse.class);
assertEquals(0, putResponse.getErrors().size());
// Query events belonging to the entities of type 4
response = r.path("ws").path("v1").path("timeline")
.path("test type 5").path("events")
.queryParam("user.name", "reader_user_1")
.queryParam("entityId", "test id 5,test id 6")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
TimelineEvents events = response.getEntity(TimelineEvents.class);
// Reader 1 should just have the access to the events of entity [5, 5]
assertEquals(1, events.getAllEvents().size());
assertEquals("test id 5", events.getAllEvents().get(0).getEntityId());
} finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
|
@SuppressWarnings("unchecked")
void openDB(final Map<String, Object> configs, final File stateDir) {
// initialize the default rocksdb options
final DBOptions dbOptions = new DBOptions();
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions);
final BlockBasedTableConfigWithAccessibleCache tableConfig = new BlockBasedTableConfigWithAccessibleCache();
cache = new LRUCache(BLOCK_CACHE_SIZE);
tableConfig.setBlockCache(cache);
tableConfig.setBlockSize(BLOCK_SIZE);
filter = new BloomFilter();
tableConfig.setFilterPolicy(filter);
userSpecifiedOptions.optimizeFiltersForHits();
userSpecifiedOptions.setTableFormatConfig(tableConfig);
userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE);
userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE);
userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE);
userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
userSpecifiedOptions.setCreateIfMissing(true);
userSpecifiedOptions.setErrorIfExists(false);
userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
// this is the recommended way to increase parallelism in RocksDb
// note that the current implementation of setIncreaseParallelism affects the number
// of compaction threads but not flush threads (the latter remains one). Also,
// the parallelism value needs to be at least two because of the code in
// https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
// subtracts one from the value passed to determine the number of compaction threads
// (this could be a bug in the RocksDB code and their devs have been contacted).
userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));
wOptions = new WriteOptions();
wOptions.setDisableWAL(true);
fOptions = new FlushOptions();
fOptions.setWaitForFlush(true);
final Class<RocksDBConfigSetter> configSetterClass =
(Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
if (configSetterClass != null) {
configSetter = Utils.newInstance(configSetterClass);
configSetter.setConfig(name, userSpecifiedOptions, configs);
}
dbDir = new File(new File(stateDir, parentDir), name);
try {
Files.createDirectories(dbDir.getParentFile().toPath());
Files.createDirectories(dbDir.getAbsoluteFile().toPath());
} catch (final IOException fatal) {
throw new ProcessorStateException(fatal);
}
// Setup statistics before the database is opened, otherwise the statistics are not updated
// with the measurements from Rocks DB
setupStatistics(configs, dbOptions);
openRocksDB(dbOptions, columnFamilyOptions);
dbAccessor = new DirectDBAccessor(db, fOptions, wOptions);
open = true;
addValueProvidersToMetricsRecorder();
}
|
@Test
public void shouldThrowProcessorStateExceptionOnOpeningReadOnlyDir() {
final File tmpDir = TestUtils.tempDirectory();
final InternalMockProcessorContext tmpContext = new InternalMockProcessorContext(tmpDir, new StreamsConfig(StreamsTestUtils.getStreamsConfig()));
assertTrue(tmpDir.setReadOnly());
assertThrows(ProcessorStateException.class, () -> rocksDBStore.openDB(tmpContext.appConfigs(), tmpContext.stateDir()));
}
|
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
}
|
@Test
public void testMissingLeaderEpochInRecords() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
ByteBuffer buffer = ByteBuffer.allocate(1024);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V0,
Compression.NONE, TimestampType.CREATE_TIME, 0L, System.currentTimeMillis(),
RecordBatch.NO_PARTITION_LEADER_EPOCH);
builder.append(0L, "key".getBytes(), "1".getBytes());
builder.append(0L, "key".getBytes(), "2".getBytes());
MemoryRecords records = builder.build();
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords();
assertTrue(partitionRecords.containsKey(tp0));
assertEquals(2, partitionRecords.get(tp0).size());
for (ConsumerRecord<byte[], byte[]> record : partitionRecords.get(tp0)) {
assertEquals(Optional.empty(), record.leaderEpoch());
}
}
|
@Override
public BucketSpec apply(final Grouping grouping) {
return Values.builder()
.field(grouping.requestedField().name())
.type(Values.NAME)
.limit(grouping.limit())
.build();
}
|
@Test
void throwsNullPointerExceptionOnNullGrouping() {
assertThrows(NullPointerException.class, () -> toTest.apply(null));
}
|
@Override
public Committer closeForCommit() throws IOException {
lock();
try {
closeAndUploadPart();
return upload.snapshotAndGetCommitter();
} finally {
unlock();
}
}
|
@Test
public void noWritesShouldResolveInAnEmptyFile() throws IOException {
RecoverableFsDataOutputStream.Committer committer = streamUnderTest.closeForCommit();
committer.commit();
assertThat(multipartUploadUnderTest, hasContent(new byte[0]));
}
|
public String sendExport( String filename, String type, String load ) throws Exception {
// Request content will be retrieved directly from the input stream
try ( InputStream is = KettleVFS.getInputStream( KettleVFS.getFileObject( filename ) ) ) {
// Execute request
HttpPost method = buildSendExportMethod( type, load, is );
try {
return executeAuth( method );
} finally {
// Release current connection to the connection pool once you are done
method.releaseConnection();
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "SlaveServer.DETAILED_SentExportToService",
RegisterPackageServlet.CONTEXT_PATH, environmentSubstitute( hostname ) ) );
}
}
}
}
|
@Test( expected = NullPointerException.class )
public void testSendExport() throws Exception {
slaveServer.setHostname( "hostNameStub" );
slaveServer.setUsername( "userNAmeStub" );
HttpPost httpPostMock = mock( HttpPost.class );
URI uriMock = new URI( "fake" );
doReturn( uriMock ).when( httpPostMock ).getURI();
doReturn( httpPostMock ).when( slaveServer ).buildSendExportMethod( anyString(), anyString(), any(
InputStream.class ) );
File tempFile;
tempFile = File.createTempFile( "PDI-", "tmp" );
tempFile.deleteOnExit();
slaveServer.sendExport( tempFile.getAbsolutePath(), "", "" );
fail( "Incorrect connection details had been used, but no exception was thrown" );
}
|
public List<String> getHeadersAsList(String name) {
List<String> values = parent.headers().get(name);
if(values == null) {
return List.of();
}
return parent.headers().get(name);
}
|
@Test
void testGetHeadersAsList() {
URI uri = URI.create("http://localhost:8080/test");
HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1);
DiscFilterRequest request = new DiscFilterRequest(httpReq);
assertNotNull(request.getHeaderNamesAsList());
assertEquals(request.getHeaderNamesAsList().size(), 0);
httpReq.headers().add("header_1", "value1");
httpReq.headers().add("header_1", "value2");
assertEquals(request.getHeadersAsList("header_1").size(), 2);
}
|
@Override
public void fetchSegmentToLocal(URI downloadURI, File dest)
throws Exception {
// Create a RoundRobinURIProvider to round robin IP addresses when retry uploading. Otherwise may always try to
// download from a same broken host as: 1) DNS may not RR the IP addresses 2) OS cache the DNS resolution result.
RoundRobinURIProvider uriProvider = new RoundRobinURIProvider(List.of(downloadURI), true);
int retryCount = getRetryCount(uriProvider);
_logger.info("Retry downloading for {} times. retryCount from pinot server config: {}, number of IP addresses for "
+ "download URI: {}", retryCount, _retryCount, uriProvider.numAddresses());
RetryPolicies.exponentialBackoffRetryPolicy(retryCount, _retryWaitMs, _retryDelayScaleFactor).attempt(() -> {
URI uri = uriProvider.next();
try {
String hostName = downloadURI.getHost();
int port = downloadURI.getPort();
// If the original download address is specified as host name, need add a "HOST" HTTP header to the HTTP
// request. Otherwise, if the download address is a LB address, when the LB be configured as "disallow direct
// access by IP address", downloading will fail.
List<Header> httpHeaders = new LinkedList<>();
if (!InetAddresses.isInetAddress(hostName)) {
httpHeaders.add(new BasicHeader(HttpHeaders.HOST, hostName + ":" + port));
}
int statusCode = _httpClient.downloadFile(uri, dest, _authProvider, httpHeaders);
_logger.info("Downloaded segment from: {} to: {} of size: {}; Response status code: {}", uri, dest,
dest.length(), statusCode);
return true;
} catch (HttpErrorStatusException e) {
int statusCode = e.getStatusCode();
if (statusCode == HttpStatus.SC_NOT_FOUND || statusCode >= 500) {
// Temporary exception
// 404 is treated as a temporary exception, as the downloadURI may be backed by multiple hosts,
// if singe host is down, can retry with another host.
_logger.warn("Got temporary error status code: {} while downloading segment from: {} to: {}", statusCode, uri,
dest, e);
return false;
} else {
// Permanent exception
_logger.error("Got permanent error status code: {} while downloading segment from: {} to: {}, won't retry",
statusCode, uri, dest, e);
throw e;
}
} catch (Exception e) {
_logger.warn("Caught exception while downloading segment from: {} to: {}", uri, dest, e);
return false;
}
});
}
|
@Test
public void testFetchSegmentToLocalSuccessAfterRetry()
throws Exception {
FileUploadDownloadClient client = mock(FileUploadDownloadClient.class);
// The first two attempts failed and the last attempt succeeded
when(client.downloadFile(any(), any(), any())).thenReturn(300).thenReturn(300).thenReturn(200);
HttpSegmentFetcher segmentFetcher = getSegmentFetcher(client);
List<URI> uris = List.of(new URI("http://h1:8080"), new URI("http://h2:8080"));
segmentFetcher.fetchSegmentToLocal(SEGMENT_NAME, () -> uris, SEGMENT_FILE);
}
|
public static StatementExecutorResponse execute(
final ConfiguredStatement<DescribeFunction> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final DescribeFunction describeFunction = statement.getStatement();
final FunctionName functionName = FunctionName.of(describeFunction.getFunctionName());
if (executionContext.getMetaStore().isAggregate(functionName)) {
return StatementExecutorResponse.handled(Optional.of(
describeAggregateFunction(executionContext, functionName,
statement.getMaskedStatementText())));
}
if (executionContext.getMetaStore().isTableFunction(functionName)) {
return StatementExecutorResponse.handled(Optional.of(
describeTableFunction(executionContext, functionName,
statement.getMaskedStatementText())));
}
return StatementExecutorResponse.handled(Optional.of(
describeNonAggregateFunction(executionContext, functionName,
statement.getMaskedStatementText())));
}
|
@Test
public void shouldDescribeUDAFWithVarArgsInMiddle() {
// When:
final FunctionDescriptionList functionList = (FunctionDescriptionList)
CustomExecutors.DESCRIBE_FUNCTION.execute(
engine.configure("DESCRIBE FUNCTION MID_VAR_ARG;"),
mock(SessionProperties.class),
engine.getEngine(),
engine.getServiceContext()
).getEntity().orElseThrow(IllegalStateException::new);
// Then:
assertThat(functionList, new TypeSafeMatcher<FunctionDescriptionList>() {
@Override
protected boolean matchesSafely(final FunctionDescriptionList item) {
return functionList.getName().equals("MID_VAR_ARG")
&& functionList.getType().equals(FunctionType.AGGREGATE);
}
@Override
public void describeTo(final Description description) {
description.appendText(functionList.getName());
}
});
}
|
@GetMapping("/hoteles/{hotelId}")
public ResponseEntity<List<Calificacion>> listarCalificacionesPorHotelId (@PathVariable String hotelId) {
return ResponseEntity.ok(calificacionService.getCalificacionesByHotelId(hotelId));
}
|
@Test
void testListarCalificacionesPorHotelId() throws Exception {
List<Calificacion> calificaciones = Arrays.asList(calificacion1, calificacion3);
when(calificacionService.getCalificacionesByHotelId("hotel1")).thenReturn(calificaciones);
mockMvc.perform(get("/calificaciones/hoteles/hotel1")
.contentType(MediaType.APPLICATION_JSON))
.andExpect(status().isOk())
.andExpect(jsonPath("$", hasSize(2)))
.andExpect(jsonPath("$[0].id", is(calificacion1.getId())))
.andExpect(jsonPath("$[1].id", is(calificacion3.getId())));
verify(calificacionService, times(1)).getCalificacionesByHotelId("hotel1");
}
|
@Override
public double[] smoothDerivative(double[] input) {
if (input.length < weights.length) {
return averageDerivativeForVeryShortTrack(input);
}
double[] smoothed = new double[input.length];
int halfWindowFloored = weights.length / 2; // we want to exclude the center point
for (int i = halfWindowFloored; i < input.length - halfWindowFloored; i++) {
for (int windowIndex = 0; windowIndex < smoothCoeff.length; windowIndex++) {
smoothed[i] += derivCoeff[windowIndex] * input[i + windowIndex + offsetFromWindowCenter];
}
smoothed[i] = smoothed[i] / timeStep;
}
fillSmoothDerivativeLeftSide(smoothed, halfWindowFloored);
fillSmoothDerivativeRightSide(smoothed, halfWindowFloored);
return smoothed;
}
|
@Test
public void Derivative_FromShortTrack_ReturnAverageDerivative() {
SavitzkyGolayFilter test = new SavitzkyGolayFilter(3.0); // note the time step
double[] input = new double[]{10.0, 13.0, 16.0, 18.0, 21.0};
double[] actual = test.smoothDerivative(input);
assertThat(actual.length, equalTo(input.length));
for (int i = 0; i < input.length; i++) {
assertThat(actual[i], closeTo(0.9167, 1E-3));
}
}
|
@Override
public MapSettings setProperty(String key, String value) {
return (MapSettings) super.setProperty(key, value);
}
|
@Test
public void getKeysStartingWith() {
Settings settings = new MapSettings();
settings.setProperty("sonar.jdbc.url", "foo");
settings.setProperty("sonar.jdbc.username", "bar");
settings.setProperty("sonar.security", "admin");
assertThat(settings.getKeysStartingWith("sonar")).containsOnly("sonar.jdbc.url", "sonar.jdbc.username", "sonar.security");
assertThat(settings.getKeysStartingWith("sonar.jdbc")).containsOnly("sonar.jdbc.url", "sonar.jdbc.username");
assertThat(settings.getKeysStartingWith("other")).isEmpty();
}
|
public static YamlProxyConfiguration load(final String path) throws IOException {
YamlProxyServerConfiguration serverConfig = loadServerConfiguration(getGlobalConfigFile(path));
File configPath = getResourceFile(path);
Collection<YamlProxyDatabaseConfiguration> databaseConfigs = loadDatabaseConfigurations(configPath);
YamlProxyConfigurationChecker.checkDataSources(serverConfig.getDataSources(), databaseConfigs);
return new YamlProxyConfiguration(serverConfig, databaseConfigs.stream().collect(Collectors.toMap(
YamlProxyDatabaseConfiguration::getDatabaseName, each -> each, (oldValue, currentValue) -> oldValue, LinkedHashMap::new)));
}
|
@Test
void assertLoad() throws IOException {
YamlProxyConfiguration actual = ProxyConfigurationLoader.load("/conf/config_loader/");
Iterator<YamlRuleConfiguration> actualGlobalRules = actual.getServerConfiguration().getRules().iterator();
// TODO assert mode
// TODO assert authority rule
actualGlobalRules.next();
assertThat(actual.getDatabaseConfigurations().size(), is(3));
assertShardingRuleConfiguration(actual.getDatabaseConfigurations().get("sharding_db"));
assertReadwriteSplittingRuleConfiguration(actual.getDatabaseConfigurations().get("readwrite_splitting_db"));
assertEncryptRuleConfiguration(actual.getDatabaseConfigurations().get("encrypt_db"));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.