focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
static Properties resolveConsumerProperties(Map<String, String> options, Object keySchema, Object valueSchema) {
Properties properties = from(options);
withSerdeConsumerProperties(true, options, keySchema, properties);
withSerdeConsumerProperties(false, options, valueSchema, properties);
return properties;
}
|
@Test
public void test_consumerProperties_json() {
// key
assertThat(resolveConsumerProperties(Map.of(OPTION_KEY_FORMAT, JSON_FLAT_FORMAT)))
.containsExactlyEntriesOf(Map.of(KEY_DESERIALIZER, ByteArrayDeserializer.class.getCanonicalName()));
// value
assertThat(resolveConsumerProperties(Map.of(
OPTION_KEY_FORMAT, UNKNOWN_FORMAT,
OPTION_VALUE_FORMAT, JSON_FLAT_FORMAT
))).containsExactlyEntriesOf(Map.of(VALUE_DESERIALIZER, ByteArrayDeserializer.class.getCanonicalName()));
}
|
boolean dropDatabase(@NotNull String dbName) throws TException {
client.drop_database(dbName, true, true);
return true;
}
|
@Test
public void dropNonExistingDb() {
Throwable exception = Assertions.assertThrows(NoSuchObjectException.class,
() -> client.dropDatabase("WhatIsThisDatabase"));
}
|
public void runExtractor(Message msg) {
try(final Timer.Context ignored = completeTimer.time()) {
final String field;
try (final Timer.Context ignored2 = conditionTimer.time()) {
// We can only work on Strings.
if (!(msg.getField(sourceField) instanceof String)) {
conditionMissesCounter.inc();
return;
}
field = (String) msg.getField(sourceField);
// Decide if to extract at all.
if (conditionType.equals(ConditionType.STRING)) {
if (field.contains(conditionValue)) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
} else if (conditionType.equals(ConditionType.REGEX)) {
if (regexConditionPattern.matcher(field).find()) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
}
}
try (final Timer.Context ignored2 = executionTimer.time()) {
Result[] results;
try {
results = run(field);
} catch (ExtractorException e) {
final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>";
msg.addProcessingError(new Message.ProcessingError(
ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e)));
return;
}
if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) {
return;
} else if (results.length == 1 && results[0].target == null) {
// results[0].target is null if this extractor cannot produce multiple fields use targetField in that case
msg.addField(targetField, results[0].getValue());
} else {
for (final Result result : results) {
msg.addField(result.getTarget(), result.getValue());
}
}
// Remove original from message?
if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) {
final StringBuilder sb = new StringBuilder(field);
final List<Result> reverseList = Arrays.stream(results)
.sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed())
.collect(Collectors.toList());
// remove all from reverse so that the indices still match
for (final Result result : reverseList) {
sb.delete(result.getBeginIndex(), result.getEndIndex());
}
final String builtString = sb.toString();
final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString;
msg.removeField(sourceField);
// TODO don't add an empty field back, or rather don't add fullyCutByExtractor
msg.addField(sourceField, finalResult);
}
runConverters(msg);
}
}
}
|
@Test
public void testRunExtractorCheckSourceValueIsString() throws Exception {
final TestExtractor extractor = new TestExtractor.Builder()
.sourceField("a_field")
.build();
// Extractor should not run for source field values that are not strings!
final Message msg1 = createMessage("the message");
msg1.addField("a_field", 1);
extractor.runExtractor(msg1);
assertThat(msg1.hasField("target")).isFalse();
// The extractor should run for a source field value of type string.
final Message msg2 = createMessage("the message");
msg2.addField("a_field", "the source");
extractor.runExtractor(msg2);
assertThat(msg2.hasField("target")).isTrue();
}
|
public static IpPrefix valueOf(int address, int prefixLength) {
return new IpPrefix(IpAddress.valueOf(address), prefixLength);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfIntegerNegativePrefixLengthIPv4() {
IpPrefix ipPrefix;
ipPrefix = IpPrefix.valueOf(0x01020304, -1);
}
|
@Override
public StringBuffer format(Object obj, StringBuffer result, FieldPosition pos) {
return format.format(toArgs((Attributes) obj), result, pos);
}
|
@Test
public void testFormat() {
Attributes attrs = new Attributes();
attrs.setString(Tag.ImageType, VR.CS, "ORIGINAL", "PRIMARY", "AXIAL");
attrs.setString(Tag.StudyDate, VR.DA, "20111012");
attrs.setString(Tag.StudyTime, VR.TM, "0930");
attrs.setString(Tag.StudyInstanceUID, VR.UI, "1.2.3");
attrs.setString(Tag.SeriesInstanceUID, VR.UI, "1.2.3.4");
attrs.setString(Tag.SOPInstanceUID, VR.UI, "1.2.3.4.5");
assertEquals("2011/10/12/09/02C82A3A/71668980/PRIMARY/1.2.3.4.5.dcm",
new AttributesFormat(TEST_PATTERN).format(attrs));
}
|
@Override
public boolean validate(Path path, ResourceContext context) {
// explicitly call a method not depending on LinkResourceService
return validate(path);
}
|
@Test
public void testLessThanLatency() {
sut = new LatencyConstraint(Duration.of(10, ChronoUnit.NANOS));
assertThat(sut.validate(path, resourceContext), is(true));
}
|
public static Integer size(Object value) {
if (value != null) {
if (value instanceof Collection<?> collection) {
return collection.size();
} else if (value instanceof Map<?, ?> map) {
return map.size();
} else if (value instanceof Object[] array) {
return array.length;
} else if (value.getClass().isArray()) {
return Array.getLength(value);
} else if (value instanceof NodeList nodeList) {
return nodeList.getLength();
}
}
return null;
}
|
@Test
public void testSize() {
Map<String, Object> map = new HashMap<>();
map.put("foo", 123);
map.put("bar", 456);
assertEquals(2, CollectionHelper.size(map).intValue());
String[] array = new String[] { "Claus", "Willem" };
assertEquals(2, CollectionHelper.size(array).intValue());
}
|
public RequestFuture requestFuture(Request request) throws NacosException {
int retryTimes = 0;
long start = System.currentTimeMillis();
Exception exceptionToThrow = null;
while (retryTimes <= rpcClientConfig.retryTimes() && System.currentTimeMillis() < start + rpcClientConfig
.timeOutMills()) {
boolean waitReconnect = false;
try {
if (this.currentConnection == null || !isRunning()) {
waitReconnect = true;
throw new NacosException(NacosException.CLIENT_DISCONNECT, "Client not connected.");
}
return this.currentConnection.requestFuture(request);
} catch (Exception e) {
if (waitReconnect) {
try {
// wait client to reconnect.
Thread.sleep(100L);
} catch (Exception exception) {
// Do nothing.
}
}
LoggerUtils.printIfErrorEnabled(LOGGER,
"[{}] Send request fail, request = {}, retryTimes = {}, errorMessage = {}",
rpcClientConfig.name(), request, retryTimes, e.getMessage());
exceptionToThrow = e;
}
retryTimes++;
}
if (rpcClientStatus.compareAndSet(RpcClientStatus.RUNNING, RpcClientStatus.UNHEALTHY)) {
switchServerAsyncOnRequestFail();
}
if (exceptionToThrow != null) {
throw (exceptionToThrow instanceof NacosException) ? (NacosException) exceptionToThrow
: new NacosException(SERVER_ERROR, exceptionToThrow);
} else {
throw new NacosException(SERVER_ERROR, "Request future fail, unknown error");
}
}
|
@Test
void testRequestFutureWhenClientAlreadyShutDownThenThrowException() throws NacosException {
assertThrows(NacosException.class, () -> {
rpcClient.rpcClientStatus.set(RpcClientStatus.SHUTDOWN);
rpcClient.currentConnection = connection;
rpcClient.requestFuture(null);
});
}
|
public SuperModelConfigProvider getSuperModel() {
return model;
}
|
@Test
public void test_lb_config_simple() {
LbServicesConfig.Builder lb = new LbServicesConfig.Builder();
handler.getSuperModel().getConfig(lb);
LbServicesConfig lbc = new LbServicesConfig(lb);
assertEquals(1, lbc.tenants().size());
assertEquals(1, lbc.tenants("a").applications().size());
Applications app = lbc.tenants("a").applications("foo:prod:default:default");
assertNotNull(app);
}
|
public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId application) {
try {
Optional<byte[]> data = curator.getData(endpointCertificateMetadataPathOf(application));
if (data.isEmpty() || data.get().length == 0) return Optional.empty();
Slime slime = SlimeUtils.jsonToSlime(data.get());
EndpointCertificateMetadata endpointCertificateMetadata = EndpointCertificateMetadataSerializer.fromSlime(slime.get());
return Optional.of(endpointCertificateMetadata);
} catch (Exception e) {
throw new RuntimeException("Error reading endpoint certificate metadata for " + application, e);
}
}
|
@Test
public void reads_object_format() {
curator.set(endpointCertificateMetadataPath,
"{\"keyName\": \"vespa.tlskeys.tenant1--app1-key\", \"certName\":\"vespa.tlskeys.tenant1--app1-cert\", \"version\": 0}"
.getBytes());
// Read from zk and verify cert and key are available
var secrets = endpointCertificateMetadataStore.readEndpointCertificateMetadata(applicationId)
.flatMap(endpointCertificateRetriever::readEndpointCertificateSecrets);
assertTrue(secrets.isPresent());
assertTrue(secrets.get().key().startsWith("-----BEGIN EC PRIVATE KEY"));
assertTrue(secrets.get().certificate().startsWith("-----BEGIN CERTIFICATE"));
}
|
@Override
public void dropRuleItemConfiguration(final DropRuleItemEvent event, final MaskRuleConfiguration currentRuleConfig) {
currentRuleConfig.getMaskAlgorithms().remove(((DropNamedRuleItemEvent) event).getItemName());
}
|
@Test
void assertDropRuleItemConfiguration() {
MaskRuleConfiguration currentRuleConfig = new MaskRuleConfiguration(Collections.emptyList(), new HashMap<>(Collections.singletonMap("type: TEST", mock(AlgorithmConfiguration.class))));
new MaskAlgorithmChangedProcessor().dropRuleItemConfiguration(new DropNamedRuleItemEvent("foo_db", "type: TEST", ""), currentRuleConfig);
assertTrue(currentRuleConfig.getMaskAlgorithms().isEmpty());
}
|
public void load() {
for (NextWordsContainer container : mStorage.loadStoredNextWords()) {
if (BuildConfig.DEBUG) Log.d(TAG, "Loaded " + container);
mNextWordMap.put(container.word, container);
}
}
|
@Test
public void testDoesNotLearnIfNotNotifying() throws Exception {
mNextWordDictionaryUnderTest.load();
assertHasNextWordsForWord(false, mNextWordDictionaryUnderTest, "hello");
assertHasNextWordsForWord(false, mNextWordDictionaryUnderTest, "menny");
assertHasNextWordsForWord(false, mNextWordDictionaryUnderTest, "hello");
assertHasNextWordsForWord(false, mNextWordDictionaryUnderTest, "menny");
}
|
public static void mergeMap(boolean decrypt, Map<String, Object> config) {
merge(decrypt, config);
}
|
@Test(expected = RuntimeException.class)
public void testMap_key_keyMustBeString() {
Map<String, Object> testMap = new HashMap<>();
testMap.put("${TEST.double: 1.1}", "value");
CentralizedManagement.mergeMap(true, testMap);
}
|
public BeamFnApi.InstructionResponse.Builder processBundle(BeamFnApi.InstructionRequest request)
throws Exception {
BeamFnApi.ProcessBundleResponse.Builder response = BeamFnApi.ProcessBundleResponse.newBuilder();
BundleProcessor bundleProcessor =
bundleProcessorCache.get(
request,
() -> {
try {
return createBundleProcessor(
request.getProcessBundle().getProcessBundleDescriptorId(),
request.getProcessBundle());
} catch (IOException e) {
throw new RuntimeException(e);
}
});
try {
PTransformFunctionRegistry startFunctionRegistry = bundleProcessor.getStartFunctionRegistry();
PTransformFunctionRegistry finishFunctionRegistry =
bundleProcessor.getFinishFunctionRegistry();
ExecutionStateTracker stateTracker = bundleProcessor.getStateTracker();
try (HandleStateCallsForBundle beamFnStateClient = bundleProcessor.getBeamFnStateClient()) {
stateTracker.start(request.getInstructionId());
try {
// Already in reverse topological order so we don't need to do anything.
for (ThrowingRunnable startFunction : startFunctionRegistry.getFunctions()) {
LOG.debug("Starting function {}", startFunction);
startFunction.run();
}
if (request.getProcessBundle().hasElements()) {
boolean inputFinished =
bundleProcessor
.getInboundObserver()
.multiplexElements(request.getProcessBundle().getElements());
if (!inputFinished) {
throw new RuntimeException(
"Elements embedded in ProcessBundleRequest do not contain stream terminators for "
+ "all data and timer inputs. Unterminated endpoints: "
+ bundleProcessor.getInboundObserver().getUnfinishedEndpoints());
}
} else if (!bundleProcessor.getInboundEndpointApiServiceDescriptors().isEmpty()) {
BeamFnDataInboundObserver observer = bundleProcessor.getInboundObserver();
beamFnDataClient.registerReceiver(
request.getInstructionId(),
bundleProcessor.getInboundEndpointApiServiceDescriptors(),
observer);
observer.awaitCompletion();
beamFnDataClient.unregisterReceiver(
request.getInstructionId(),
bundleProcessor.getInboundEndpointApiServiceDescriptors());
}
// Need to reverse this since we want to call finish in topological order.
for (ThrowingRunnable finishFunction :
Lists.reverse(finishFunctionRegistry.getFunctions())) {
LOG.debug("Finishing function {}", finishFunction);
finishFunction.run();
}
// If bundleProcessor has not flushed any elements, embed them in response.
embedOutboundElementsIfApplicable(response, bundleProcessor);
// Add all checkpointed residuals to the response.
response.addAllResidualRoots(bundleProcessor.getSplitListener().getResidualRoots());
// Add all metrics to the response.
bundleProcessor.getProgressRequestLock().lock();
Map<String, ByteString> monitoringData = finalMonitoringData(bundleProcessor);
if (runnerAcceptsShortIds) {
response.putAllMonitoringData(monitoringData);
} else {
for (Map.Entry<String, ByteString> metric : monitoringData.entrySet()) {
response.addMonitoringInfos(
shortIds.get(metric.getKey()).toBuilder().setPayload(metric.getValue()));
}
}
if (!bundleProcessor.getBundleFinalizationCallbackRegistrations().isEmpty()) {
finalizeBundleHandler.registerCallbacks(
bundleProcessor.getInstructionId(),
ImmutableList.copyOf(bundleProcessor.getBundleFinalizationCallbackRegistrations()));
response.setRequiresFinalization(true);
}
} finally {
// We specifically deactivate state tracking while we are holding the progress request and
// sampling locks.
stateTracker.reset();
}
}
// Mark the bundle processor as re-usable.
bundleProcessorCache.release(
request.getProcessBundle().getProcessBundleDescriptorId(), bundleProcessor);
return BeamFnApi.InstructionResponse.newBuilder().setProcessBundle(response);
} catch (Exception e) {
// Make sure we clean-up from the active set of bundle processors.
bundleProcessorCache.discard(bundleProcessor);
throw e;
}
}
|
@Test
public void testTimerRegistrationsFailIfNoTimerApiServiceDescriptorSpecified() throws Exception {
BeamFnApi.ProcessBundleDescriptor processBundleDescriptor =
BeamFnApi.ProcessBundleDescriptor.newBuilder()
.putTransforms(
"2L",
RunnerApi.PTransform.newBuilder()
.setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(DATA_INPUT_URN).build())
.build())
.build();
Map<String, BeamFnApi.ProcessBundleDescriptor> fnApiRegistry =
ImmutableMap.of("1L", processBundleDescriptor);
ProcessBundleHandler handler =
new ProcessBundleHandler(
PipelineOptionsFactory.create(),
Collections.emptySet(),
fnApiRegistry::get,
beamFnDataClient,
null /* beamFnStateGrpcClientCache */,
null /* finalizeBundleHandler */,
new ShortIdMap(),
executionStateSampler,
ImmutableMap.of(
DATA_INPUT_URN,
new PTransformRunnerFactory<Object>() {
@Override
public Object createRunnerForPTransform(Context context) throws IOException {
context.addOutgoingTimersEndpoint(
"timer", Timer.Coder.of(StringUtf8Coder.of(), GlobalWindow.Coder.INSTANCE));
return null;
}
}),
Caches.noop(),
new BundleProcessorCache(),
null /* dataSampler */);
assertThrows(
"Timers are unsupported",
IllegalStateException.class,
() ->
handler.processBundle(
BeamFnApi.InstructionRequest.newBuilder()
.setProcessBundle(
BeamFnApi.ProcessBundleRequest.newBuilder()
.setProcessBundleDescriptorId("1L"))
.build()));
}
|
@Operation(summary = "queryProjectListPaging", description = "QUERY_PROJECT_LIST_PAGING_NOTES")
@Parameters({
@Parameter(name = "searchVal", description = "SEARCH_VAL", schema = @Schema(implementation = String.class)),
@Parameter(name = "pageSize", description = "PAGE_SIZE", required = true, schema = @Schema(implementation = int.class, example = "10")),
@Parameter(name = "pageNo", description = "PAGE_NO", required = true, schema = @Schema(implementation = int.class, example = "1"))
})
@GetMapping()
@ResponseStatus(HttpStatus.OK)
@ApiException(LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR)
public Result queryProjectListPaging(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize,
@RequestParam("pageNo") Integer pageNo) {
checkPageParams(pageNo, pageSize);
searchVal = ParameterUtils.handleEscapes(searchVal);
return projectService.queryProjectListPaging(loginUser, pageSize, pageNo, searchVal);
}
|
@Test
public void testQueryProjectListPaging() {
int pageNo = 1;
int pageSize = 10;
String searchVal = "";
Result result = Result.success(new PageInfo<Project>(1, 10));
Mockito.when(projectService.queryProjectListPaging(user, pageSize, pageNo, searchVal)).thenReturn(result);
Result response = projectController.queryProjectListPaging(user, searchVal, pageSize, pageNo);
Assertions.assertTrue(response != null && response.isSuccess());
}
|
@Override
public <R> HoodieData<HoodieRecord<R>> tagLocation(
HoodieData<HoodieRecord<R>> records, HoodieEngineContext context,
HoodieTable hoodieTable) {
return HoodieJavaRDD.of(HoodieJavaRDD.getJavaRDD(records)
.mapPartitionsWithIndex(locationTagFunction(hoodieTable.getMetaClient()), true));
}
|
@Test
public void testSmallBatchSize() throws Exception {
final String newCommitTime = "001";
final int numRecords = 10;
List<HoodieRecord> records = dataGen.generateInserts(newCommitTime, numRecords);
JavaRDD<HoodieRecord> writeRecords = jsc().parallelize(records, 1);
// Load to memory
HoodieWriteConfig config = getConfig(2);
SparkHoodieHBaseIndex index = new SparkHoodieHBaseIndex(config);
try (SparkRDDWriteClient writeClient = getHoodieWriteClient(config);) {
metaClient = HoodieTableMetaClient.reload(metaClient);
HoodieTable hoodieTable = HoodieSparkTable.create(config, context, metaClient);
// Test tagLocation without any entries in index
JavaRDD<HoodieRecord> records1 = tagLocation(index, writeRecords, hoodieTable);
assertEquals(0, records1.filter(record -> record.isCurrentLocationKnown()).count());
// Insert 200 records
writeClient.startCommitWithTime(newCommitTime);
JavaRDD<WriteStatus> writeStatues = writeClient.upsert(writeRecords, newCommitTime);
assertNoWriteErrors(writeStatues.collect());
// Now tagLocation for these records, hbaseIndex should not tag them since it was a failed
// commit
JavaRDD<HoodieRecord> records2 = tagLocation(index, writeRecords, hoodieTable);
assertEquals(0, records2.filter(record -> record.isCurrentLocationKnown()).count());
// Now commit this & update location of records inserted and validate no errors
writeClient.commit(newCommitTime, writeStatues);
// Now tagLocation for these records, hbaseIndex should tag them correctly
metaClient = HoodieTableMetaClient.reload(metaClient);
hoodieTable = HoodieSparkTable.create(config, context, metaClient);
List<HoodieRecord> records3 = tagLocation(index, writeRecords, hoodieTable).collect();
assertEquals(numRecords, records3.stream().filter(record -> record.isCurrentLocationKnown()).count());
assertEquals(numRecords, records3.stream().map(record -> record.getKey().getRecordKey()).distinct().count());
assertEquals(numRecords, records3.stream().filter(record -> (record.getCurrentLocation() != null
&& record.getCurrentLocation().getInstantTime().equals(newCommitTime))).distinct().count());
}
}
|
ClassicGroup getOrMaybeCreateClassicGroup(
String groupId,
boolean createIfNotExists
) throws GroupIdNotFoundException {
Group group = groups.get(groupId);
if (group == null && !createIfNotExists) {
throw new GroupIdNotFoundException(String.format("Classic group %s not found.", groupId));
}
if (group == null) {
ClassicGroup classicGroup = new ClassicGroup(logContext, groupId, ClassicGroupState.EMPTY, time, metrics);
groups.put(groupId, classicGroup);
metrics.onClassicGroupStateTransition(null, classicGroup.currentState());
return classicGroup;
} else {
if (group.type() == CLASSIC) {
return (ClassicGroup) group;
} else {
// We don't support upgrading/downgrading between protocols at the moment so
// we throw an exception if a group exists with the wrong type.
throw new GroupIdNotFoundException(String.format("Group %s is not a classic group.",
groupId));
}
}
}
|
@Test
public void testStaticMemberRejoinWithLeaderIdAndUnexpectedDeadGroup() throws Exception {
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.build();
GroupMetadataManagerTestContext.RebalanceResult rebalanceResult = context.staticMembersJoinAndRebalance(
"group-id",
"leader-instance-id",
"follower-instance-id"
);
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false);
group.transitionTo(DEAD);
JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder()
.withGroupId("group-id")
.withGroupInstanceId("leader-instance-id")
.withMemberId(rebalanceResult.leaderId)
.withDefaultProtocolTypeAndProtocols()
.build();
GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(request, true, true);
assertTrue(joinResult.records.isEmpty());
assertTrue(joinResult.joinFuture.isDone());
assertEquals(Errors.COORDINATOR_NOT_AVAILABLE.code(), joinResult.joinFuture.get().errorCode());
}
|
@Override
public String create() {
return UuidFactoryImpl.INSTANCE.create();
}
|
@Test
public void create_different_uuids() {
// this test is not enough to ensure that generated strings are unique,
// but it still does a simple and stupid verification
assertThat(underTest.create()).isNotEqualTo(underTest.create());
}
|
public Integer put(FileInStream is) {
int id = mCounter.incrementAndGet();
mInStreamCache.put(id, is);
return id;
}
|
@Test
public void expiration() throws Exception {
StreamCache streamCache = new StreamCache(0);
FileInStream is = mock(FileInStream.class);
FileOutStream os = mock(FileOutStream.class);
streamCache.put(is);
streamCache.put(os);
verify(is).close();
verify(os).close();
}
|
public static <T> List<T> emptyOnNull(List<T> list) {
return list == null ? new ArrayList<>() : list;
}
|
@Test
void emptyOnNull() {
var list = ListUtils.emptyOnNull(null);
assertThat(list, notNullValue());
assertThat(list, empty());
list = ListUtils.emptyOnNull(List.of("1"));
assertThat(list, notNullValue());
assertThat(list.size(), is(1));
}
|
public static JsonAsserter with(String json) {
return new JsonAsserterImpl(JsonPath.parse(json).json());
}
|
@Test
public void ends_with_evalueates() throws Exception {
with(JSON).assertThat("$.store.book[0].category", endsWith("nce"));
}
|
static boolean canUpdate(int transactionLogLayer, int transactionLogIndex, int entryInfoLayer, int entryInfoIndex) {
if (transactionLogLayer == entryInfoLayer) {
// Must make sure to not update beyond the current index
if (transactionLogIndex >= entryInfoIndex) {
return true;
}
}
if (transactionLogLayer > entryInfoLayer) {
if (transactionLogIndex < entryInfoIndex) {
return true;
}
}
return false;
}
|
@DisplayName("Tests that can update records on the same layer")
@Test
void testCanUpdateSameLayer() {
assertTrue(TransactionLog.canUpdate(0, 10, 0, 1));
assertTrue(TransactionLog.canUpdate(0, 10, 0, 10));
assertFalse(TransactionLog.canUpdate(0, 10, 0, 11));
}
|
@CanIgnoreReturnValue
public final Ordered containsAtLeast(
@Nullable Object k0, @Nullable Object v0, @Nullable Object... rest) {
return containsAtLeastEntriesIn(accumulateMap("containsAtLeast", k0, v0, rest));
}
|
@Test
public void containsAtLeastMissingKey() {
ImmutableMap<String, Integer> actual = ImmutableMap.of("jan", 1, "feb", 2);
expectFailureWhenTestingThat(actual).containsAtLeast("jan", 1, "march", 3);
assertFailureKeys(
"missing keys",
"for key",
"expected value",
"---",
"expected to contain at least",
"but was");
assertFailureValue("for key", "march");
assertFailureValue("expected value", "3");
assertFailureValue("expected to contain at least", "{jan=1, march=3}");
}
|
public static Schema select(Schema schema, Set<Integer> fieldIds) {
Preconditions.checkNotNull(schema, "Schema cannot be null");
Types.StructType result = select(schema.asStruct(), fieldIds);
if (Objects.equals(schema.asStruct(), result)) {
return schema;
} else if (result != null) {
if (schema.getAliases() != null) {
return new Schema(result.fields(), schema.getAliases());
} else {
return new Schema(result.fields());
}
}
return new Schema(ImmutableList.of(), schema.getAliases());
}
|
@Test
public void testSelect() {
Schema schema =
new Schema(
Lists.newArrayList(
required(10, "a", Types.IntegerType.get()),
required(11, "A", Types.IntegerType.get()),
required(
12,
"someStruct",
Types.StructType.of(
required(13, "b", Types.IntegerType.get()),
required(14, "B", Types.IntegerType.get()),
required(
15,
"anotherStruct",
Types.StructType.of(
required(16, "c", Types.IntegerType.get()),
required(17, "C", Types.IntegerType.get())))))));
Schema expectedTop = new Schema(Lists.newArrayList(required(11, "A", Types.IntegerType.get())));
Schema actualTop = TypeUtil.select(schema, Sets.newHashSet(11));
assertThat(actualTop.asStruct()).isEqualTo(expectedTop.asStruct());
Schema expectedDepthOne =
new Schema(
Lists.newArrayList(
required(10, "a", Types.IntegerType.get()),
required(
12,
"someStruct",
Types.StructType.of(
required(13, "b", Types.IntegerType.get()),
required(14, "B", Types.IntegerType.get()),
required(
15,
"anotherStruct",
Types.StructType.of(
required(16, "c", Types.IntegerType.get()),
required(17, "C", Types.IntegerType.get())))))));
Schema actualDepthOne = TypeUtil.select(schema, Sets.newHashSet(10, 12));
assertThat(actualDepthOne.asStruct()).isEqualTo(expectedDepthOne.asStruct());
Schema expectedDepthTwo =
new Schema(
Lists.newArrayList(
required(11, "A", Types.IntegerType.get()),
required(
12,
"someStruct",
Types.StructType.of(
required(
15,
"anotherStruct",
Types.StructType.of(required(17, "C", Types.IntegerType.get())))))));
Schema actualDepthTwo = TypeUtil.select(schema, Sets.newHashSet(11, 17));
assertThat(actualDepthTwo.asStruct()).isEqualTo(expectedDepthTwo.asStruct());
}
|
public synchronized static MetricRegistry setDefault(String name) {
final MetricRegistry registry = getOrCreate(name);
return setDefault(name, registry);
}
|
@Test
public void errorsWhenDefaultAlreadySet() {
SharedMetricRegistries.setDefault("foobah");
exception.expect(IllegalStateException.class);
exception.expectMessage("Default metric registry name is already set.");
SharedMetricRegistries.setDefault("borg");
}
|
@Override
public void execute(Exchange exchange) throws SmppException {
SubmitMulti[] submitMulties = createSubmitMulti(exchange);
List<SubmitMultiResult> results = new ArrayList<>(submitMulties.length);
for (SubmitMulti submitMulti : submitMulties) {
SubmitMultiResult result;
if (log.isDebugEnabled()) {
log.debug("Sending multiple short messages for exchange id '{}'...", exchange.getExchangeId());
}
try {
result = session.submitMultiple(
submitMulti.getServiceType(),
TypeOfNumber.valueOf(submitMulti.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(submitMulti.getSourceAddrNpi()),
submitMulti.getSourceAddr(),
(Address[]) submitMulti.getDestAddresses(),
new ESMClass(submitMulti.getEsmClass()),
submitMulti.getProtocolId(),
submitMulti.getPriorityFlag(),
submitMulti.getScheduleDeliveryTime(),
submitMulti.getValidityPeriod(),
new RegisteredDelivery(submitMulti.getRegisteredDelivery()),
new ReplaceIfPresentFlag(submitMulti.getReplaceIfPresentFlag()),
DataCodings.newInstance(submitMulti.getDataCoding()),
submitMulti.getSmDefaultMsgId(),
submitMulti.getShortMessage(),
submitMulti.getOptionalParameters());
results.add(result);
} catch (Exception e) {
throw new SmppException(e);
}
}
if (log.isDebugEnabled()) {
log.debug("Sent multiple short messages for exchange id '{}' and received results '{}'", exchange.getExchangeId(),
results);
}
List<String> messageIDs = new ArrayList<>(results.size());
// {messageID : [{destAddr : address, error : errorCode}]}
Map<String, List<Map<String, Object>>> errors = new HashMap<>();
for (SubmitMultiResult result : results) {
UnsuccessDelivery[] deliveries = result.getUnsuccessDeliveries();
if (deliveries != null) {
List<Map<String, Object>> undelivered = new ArrayList<>();
for (UnsuccessDelivery delivery : deliveries) {
Map<String, Object> error = new HashMap<>();
error.put(SmppConstants.DEST_ADDR, delivery.getDestinationAddress().getAddress());
error.put(SmppConstants.ERROR, delivery.getErrorStatusCode());
undelivered.add(error);
}
if (!undelivered.isEmpty()) {
errors.put(result.getMessageId(), undelivered);
}
}
messageIDs.add(result.getMessageId());
}
Message message = ExchangeHelper.getResultMessage(exchange);
message.setHeader(SmppConstants.ID, messageIDs);
message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size());
if (!errors.isEmpty()) {
message.setHeader(SmppConstants.ERROR, errors);
}
}
|
@Test
public void bodyWithSMPP8bitDataCodingNotModified() throws Exception {
final byte dataCoding = (byte) 0x04; /* SMPP 8-bit */
byte[] body = { (byte) 0xFF, 'A', 'B', (byte) 0x00, (byte) 0xFF, (byte) 0x7F, 'C', (byte) 0xFF };
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitMulti");
exchange.getIn().setHeader(SmppConstants.DATA_CODING, dataCoding);
exchange.getIn().setBody(body);
Address[] destAddrs = new Address[] {
new Address(
TypeOfNumber.UNKNOWN,
NumberingPlanIndicator.UNKNOWN,
"1717")
};
when(session.submitMultiple(eq("CMT"),
eq(TypeOfNumber.UNKNOWN),
eq(NumberingPlanIndicator.UNKNOWN),
eq("1616"),
eq(destAddrs),
eq(new ESMClass()),
eq((byte) 0),
eq((byte) 1),
(String) isNull(),
(String) isNull(),
eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)),
eq(ReplaceIfPresentFlag.DEFAULT),
eq(DataCodings.newInstance(dataCoding)),
eq((byte) 0),
eq(body)))
.thenReturn(new SubmitMultiResult("1", null, null));
command.execute(exchange);
assertEquals(Collections.singletonList("1"), exchange.getMessage().getHeader(SmppConstants.ID));
}
|
private static ClientAuthenticationMethod getClientAuthenticationMethod(
List<com.nimbusds.oauth2.sdk.auth.ClientAuthenticationMethod> metadataAuthMethods) {
if (metadataAuthMethods == null || metadataAuthMethods
.contains(com.nimbusds.oauth2.sdk.auth.ClientAuthenticationMethod.CLIENT_SECRET_BASIC)) {
// If null, the default includes client_secret_basic
return ClientAuthenticationMethod.CLIENT_SECRET_BASIC;
}
if (metadataAuthMethods.contains(com.nimbusds.oauth2.sdk.auth.ClientAuthenticationMethod.CLIENT_SECRET_POST)) {
return ClientAuthenticationMethod.CLIENT_SECRET_POST;
}
if (metadataAuthMethods.contains(com.nimbusds.oauth2.sdk.auth.ClientAuthenticationMethod.NONE)) {
return ClientAuthenticationMethod.NONE;
}
return null;
}
|
@Test
public void buildWhenCustomGrantAllAttributesProvidedThenAllAttributesAreSet() {
AuthorizationGrantType customGrantType = new AuthorizationGrantType("CUSTOM");
// @formatter:off
ClientRegistration registration = ClientRegistration
.withRegistrationId(REGISTRATION_ID)
.clientId(CLIENT_ID)
.clientSecret(CLIENT_SECRET)
.clientAuthenticationMethod(ClientAuthenticationMethod.CLIENT_SECRET_BASIC)
.authorizationGrantType(customGrantType)
.scope(SCOPES.toArray(new String[0]))
.tokenUri(TOKEN_URI)
.clientName(CLIENT_NAME)
.build();
// @formatter:on
assertThat(registration.getRegistrationId()).isEqualTo(REGISTRATION_ID);
assertThat(registration.getClientId()).isEqualTo(CLIENT_ID);
assertThat(registration.getClientSecret()).isEqualTo(CLIENT_SECRET);
assertThat(registration.getClientAuthenticationMethod())
.isEqualTo(ClientAuthenticationMethod.CLIENT_SECRET_BASIC);
assertThat(registration.getAuthorizationGrantType()).isEqualTo(customGrantType);
assertThat(registration.getScopes()).isEqualTo(SCOPES);
assertThat(registration.getProviderDetails().getTokenUri()).isEqualTo(TOKEN_URI);
assertThat(registration.getClientName()).isEqualTo(CLIENT_NAME);
}
|
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public GpgInfo get() {
return new GpgInfo(this.gpgGenerator.getGPGContext());
}
|
@Test
public void testGetGPG() throws JSONException, Exception {
WebResource r = resource();
JSONObject json = r.path("ws").path("v1").path("gpg")
.accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
assertNotNull(json);
}
|
public static boolean isIPv4Address(final String input) {
return IPV4_PATTERN.matcher(input).matches();
}
|
@Test
public void isIPv4Address() {
assertThat(NetAddressValidatorUtil.isIPv4Address("192.168.1.2")).isTrue();
assertThat(NetAddressValidatorUtil.isIPv4Address("127.0.0.1")).isTrue();
assertThat(NetAddressValidatorUtil.isIPv4Address("999.999.999.999")).isFalse();
}
|
public Pattern toRegex() {
String prefix = (this.pattern.startsWith("*") ? ".*" : "");
String suffix = (this.pattern.endsWith("*") ? ".*" : "");
String regex = Arrays.stream(this.pattern.split("\\*"))
.filter(s -> !s.isEmpty())
.map(Pattern::quote)
.collect(Collectors.joining(".*", prefix, suffix));
return Pattern.compile(regex);
}
|
@Test
public void testToRegex() {
ResourcePatternDescriber describer = new ResourcePatternDescriber(
"META-INF/dubbo/internal/org.apache.dubbo.common.extension.ExtensionInjector", null);
Assertions.assertEquals(
"\\QMETA-INF/dubbo/internal/org.apache.dubbo.common.extension.ExtensionInjector\\E",
describer.toRegex().toString());
}
|
public void clear() {
channelCache.invalidateAll();
}
|
@Test
public void testClear() throws InterruptedException {
String channelName = "existingChannel";
CountDownLatch notifyWhenChannelClosed = new CountDownLatch(1);
cache =
ChannelCache.forTesting(
ignored -> newChannel(channelName), notifyWhenChannelClosed::countDown);
WindmillServiceAddress someAddress = mock(WindmillServiceAddress.class);
ManagedChannel cachedChannel = cache.get(someAddress);
cache.clear();
notifyWhenChannelClosed.await();
assertTrue(cache.isEmpty());
assertTrue(cachedChannel.isShutdown());
}
|
@Override
public boolean hasSameDbVendor() {
Optional<String> registeredDbVendor = metadataIndex.getDbVendor();
return registeredDbVendor.isPresent() && registeredDbVendor.get().equals(getDbVendor());
}
|
@Test
public void hasSameDbVendor_is_false_if_values_dont_match() {
prepareDb("mssql");
prepareEs("postgres");
assertThat(underTest.hasSameDbVendor()).isFalse();
}
|
protected boolean canSkipBadReplayedJournal(Throwable t) {
if (Config.metadata_enable_recovery_mode) {
LOG.warn("skip journal load failure because cluster is in recovery mode");
return true;
}
try {
for (String idStr : Config.metadata_journal_skip_bad_journal_ids.split(",")) {
if (!StringUtils.isEmpty(idStr) && Long.parseLong(idStr) == replayedJournalId.get() + 1) {
LOG.warn("skip bad replayed journal id {} because configured {}",
idStr, Config.metadata_journal_skip_bad_journal_ids);
return true;
}
}
} catch (Exception e) {
LOG.warn("failed to parse metadata_journal_skip_bad_journal_ids: {}",
Config.metadata_journal_skip_bad_journal_ids, e);
}
short opCode = OperationType.OP_INVALID;
if (t instanceof JournalException) {
opCode = ((JournalException) t).getOpCode();
}
if (t instanceof JournalInconsistentException) {
opCode = ((JournalInconsistentException) t).getOpCode();
}
if (opCode != OperationType.OP_INVALID
&& OperationType.IGNORABLE_OPERATIONS.contains(opCode)) {
if (Config.metadata_journal_ignore_replay_failure) {
LOG.warn("skip ignorable journal load failure, opCode: {}", opCode);
return true;
} else {
LOG.warn("the failure of opCode: {} is ignorable, " +
"you can set metadata_journal_ignore_replay_failure to true to ignore this failure", opCode);
return false;
}
}
return false;
}
|
@Test
public void testCanSkipBadReplayedJournal() {
boolean originVal = Config.metadata_journal_ignore_replay_failure;
Config.metadata_journal_ignore_replay_failure = false;
// when recover_on_load_journal_failed is false, the failure of every operation type can not be skipped.
Assert.assertFalse(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalException(OperationType.OP_ADD_ANALYZE_STATUS, "failed")));
Assert.assertFalse(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalException(OperationType.OP_CREATE_DB_V2, "failed")));
Assert.assertFalse(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalInconsistentException(OperationType.OP_ADD_ANALYZE_STATUS, "failed")));
Assert.assertFalse(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalInconsistentException(OperationType.OP_CREATE_DB_V2, "failed")));
Config.metadata_journal_ignore_replay_failure = true;
// when recover_on_load_journal_failed is false, the failure of recoverable operation type can be skipped.
Assert.assertTrue(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalException(OperationType.OP_ADD_ANALYZE_STATUS, "failed")));
Assert.assertFalse(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalException(OperationType.OP_CREATE_DB_V2, "failed")));
Assert.assertTrue(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalInconsistentException(OperationType.OP_ADD_ANALYZE_STATUS, "failed")));
Assert.assertFalse(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalInconsistentException(OperationType.OP_CREATE_DB_V2, "failed")));
Config.metadata_journal_ignore_replay_failure = originVal;
// when metadata_enable_recovery_mode is true, all types of failure can be skipped.
originVal = Config.metadata_enable_recovery_mode;
Config.metadata_enable_recovery_mode = true;
Assert.assertTrue(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalException(OperationType.OP_ADD_ANALYZE_STATUS, "failed")));
Assert.assertTrue(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalException(OperationType.OP_CREATE_DB_V2, "failed")));
Assert.assertTrue(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalInconsistentException(OperationType.OP_ADD_ANALYZE_STATUS, "failed")));
Assert.assertTrue(GlobalStateMgr.getServingState().canSkipBadReplayedJournal(
new JournalInconsistentException(OperationType.OP_CREATE_DB_V2, "failed")));
Config.metadata_enable_recovery_mode = originVal;
}
|
public static String toJson(SnapshotRef ref) {
return toJson(ref, false);
}
|
@Test
public void testTagToJsonDefault() {
String json = "{\"snapshot-id\":1,\"type\":\"tag\"}";
SnapshotRef ref = SnapshotRef.tagBuilder(1L).build();
assertThat(SnapshotRefParser.toJson(ref))
.as("Should be able to serialize default tag")
.isEqualTo(json);
}
|
@Override
public boolean accept(ProcessingEnvironment processingEnv, DeclaredType type) {
Elements elements = processingEnv.getElementUtils();
TypeElement mapTypeElement = elements.getTypeElement(Map.class.getTypeName());
TypeMirror mapType = mapTypeElement.asType();
Types types = processingEnv.getTypeUtils();
TypeMirror erasedType = types.erasure(type);
return types.isAssignable(erasedType, mapType);
}
|
@Test
void testAccept() {
assertTrue(builder.accept(processingEnv, stringsField.asType()));
assertTrue(builder.accept(processingEnv, colorsField.asType()));
assertTrue(builder.accept(processingEnv, primitiveTypeModelsField.asType()));
assertTrue(builder.accept(processingEnv, modelsField.asType()));
assertTrue(builder.accept(processingEnv, modelArraysField.asType()));
}
|
private ConsumerToken generateConsumerToken(Consumer consumer, Date expires) {
long consumerId = consumer.getId();
String createdBy = userInfoHolder.getUser().getUserId();
Date createdTime = new Date();
ConsumerToken consumerToken = new ConsumerToken();
consumerToken.setConsumerId(consumerId);
consumerToken.setExpires(expires);
consumerToken.setDataChangeCreatedBy(createdBy);
consumerToken.setDataChangeCreatedTime(createdTime);
consumerToken.setDataChangeLastModifiedBy(createdBy);
consumerToken.setDataChangeLastModifiedTime(createdTime);
generateAndEnrichToken(consumer, consumerToken);
return consumerToken;
}
|
@Test
public void testGenerateConsumerToken() throws Exception {
String someConsumerAppId = "100003171";
Date generationTime = new GregorianCalendar(2016, Calendar.AUGUST, 9, 12, 10, 50).getTime();
String tokenSalt = "apollo";
String expectedToken = "151067a53d08d70de161fa06b455623741877ce2f019f6e3018844c1a16dd8c6";
String actualToken = consumerService.generateToken(someConsumerAppId, generationTime, tokenSalt);
assertEquals(expectedToken, actualToken);
}
|
public static TransformExecutorService parallel(ExecutorService executor) {
return new ParallelTransformExecutor(executor);
}
|
@Test
public void parallelRejectedShutdownSucceeds() {
@SuppressWarnings("unchecked")
DirectTransformExecutor<Object> first = mock(DirectTransformExecutor.class);
TransformExecutorService parallel = TransformExecutorServices.parallel(executorService);
executorService.shutdown();
parallel.shutdown();
parallel.schedule(first);
}
|
public static Builder newBuilder() {
return new Builder();
}
|
@Test
public void decreaseSmoothing() {
VegasLimit limit = VegasLimit.newBuilder()
.decrease(current -> current / 2)
.smoothing(0.5)
.initialLimit(100)
.maxConcurrency(200)
.build();
// Pick up first min-rtt
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(10), 100, false);
Assert.assertEquals(100, limit.getLimit());
// First decrease
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false);
Assert.assertEquals(75, limit.getLimit());
// Second decrease
limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false);
Assert.assertEquals(56, limit.getLimit());
}
|
public static <NodeT, EdgeT> Set<NodeT> reachableNodes(
Network<NodeT, EdgeT> network, Set<NodeT> startNodes, Set<NodeT> endNodes) {
Set<NodeT> visitedNodes = new HashSet<>();
Queue<NodeT> queuedNodes = new ArrayDeque<>();
queuedNodes.addAll(startNodes);
// Perform a breadth-first traversal rooted at the input node.
while (!queuedNodes.isEmpty()) {
NodeT currentNode = queuedNodes.remove();
// If we have already visited this node or it is a terminal node than do not add any
// successors.
if (!visitedNodes.add(currentNode) || endNodes.contains(currentNode)) {
continue;
}
queuedNodes.addAll(network.successors(currentNode));
}
return visitedNodes;
}
|
@Test
public void testReachableNodesWithEmptyNetwork() {
assertThat(
Networks.reachableNodes(
createEmptyNetwork(), Collections.emptySet(), Collections.emptySet()),
empty());
}
|
@Override
protected boolean isStepCompleted(@NonNull Context context) {
return isContactsPermComplete(context) && isNotificationPermComplete(context);
}
|
@Test
public void testKeyboardEnabledAndDefaultButDictionaryDisabled() {
final String flatASKComponent =
new ComponentName(BuildConfig.APPLICATION_ID, SoftKeyboard.class.getName())
.flattenToString();
Settings.Secure.putString(
getApplicationContext().getContentResolver(),
Settings.Secure.ENABLED_INPUT_METHODS,
flatASKComponent);
Settings.Secure.putString(
getApplicationContext().getContentResolver(),
Settings.Secure.DEFAULT_INPUT_METHOD,
flatASKComponent);
SharedPrefsHelper.setPrefsValue(R.string.settings_key_use_contacts_dictionary, false);
WizardPermissionsFragment fragment = startFragment();
Assert.assertTrue(fragment.isStepCompleted(getApplicationContext()));
var contacts = fragment.getView().findViewById(R.id.contacts_permission_group);
Assert.assertEquals(View.GONE, contacts.getVisibility());
}
|
static ColumnExtractor create(final Column column) {
final int index = column.index();
Preconditions.checkArgument(index >= 0, "negative index: " + index);
return column.namespace() == Namespace.KEY
? new KeyColumnExtractor(index)
: new ValueColumnExtractor(index);
}
|
@Test
public void shouldExtractWindowedKeyColumn() {
// Given:
when(column.namespace()).thenReturn(Namespace.KEY);
when(column.index()).thenReturn(0);
when(key.get(0)).thenReturn("some value");
final ColumnExtractor extractor = TimestampColumnExtractors.create(column);
// When:
final Object result = extractor.extract(windowedKey, value);
// Then:
assertThat(result, is("some value"));
}
|
public Result parse(final String string) throws DateNotParsableException {
return this.parse(string, new Date());
}
|
@Test
public void testUTCTZ() throws Exception {
NaturalDateParser.Result today = naturalDateParserUtc.parse("today");
assertThat(today.getFrom()).as("From should not be null").isNotNull();
assertThat(today.getTo()).as("To should not be null").isNotNull();
assertThat(today.getDateTimeZone().getID()).as("should have the Etc/UTC as Timezone").isEqualTo("Etc/UTC");
}
|
public ResourceMethodDescriptor process(final ServerResourceContext context)
{
String path = context.getRequestURI().getRawPath();
if (path.length() < 2)
{
throw new RoutingException(HttpStatus.S_404_NOT_FOUND.getCode());
}
if (path.charAt(0) == '/')
{
path = path.substring(1);
}
Queue<String> remainingPath = new LinkedList<>(Arrays.asList(SLASH_PATTERN.split(path)));
String rootPath = "/" + remainingPath.poll();
ResourceModel currentResource;
try
{
currentResource =
_pathRootResourceMap.get(URLDecoder.decode(rootPath,
RestConstants.DEFAULT_CHARSET_NAME));
}
catch (UnsupportedEncodingException e)
{
throw new RestLiInternalException("UnsupportedEncodingException while trying to decode the root path",
e);
}
if (currentResource == null)
{
throw new RoutingException(String.format("No root resource defined for path '%s'",
rootPath),
HttpStatus.S_404_NOT_FOUND.getCode());
}
return processResourceTree(currentResource, context, remainingPath);
}
|
@Test
public void failsOnRootResourceNotFound() throws URISyntaxException
{
final TestSetup setup = new TestSetup();
final RestLiRouter router = setup._router;
final ServerResourceContext context = setup._context;
doReturn(new URI("/root")).when(context).getRequestURI();
final RoutingException e = runAndCatch(() -> router.process(context), RoutingException.class);
Assert.assertEquals(e.getStatus(), HttpStatus.S_404_NOT_FOUND.getCode());
}
|
@Override
public void execute() {
DescribeTableResponse result
= ddbClient.describeTable(DescribeTableRequest.builder().tableName(determineTableName()).build());
Message msg = getMessageForResponse(exchange);
msg.setHeader(Ddb2Constants.TABLE_NAME, result.table().tableName());
msg.setHeader(Ddb2Constants.TABLE_STATUS, result.table().tableStatus());
msg.setHeader(Ddb2Constants.CREATION_DATE, result.table().creationDateTime());
msg.setHeader(Ddb2Constants.ITEM_COUNT, result.table().itemCount());
msg.setHeader(Ddb2Constants.KEY_SCHEMA, result.table().keySchema());
msg.setHeader(Ddb2Constants.READ_CAPACITY, result.table().provisionedThroughput().readCapacityUnits());
msg.setHeader(Ddb2Constants.WRITE_CAPACITY, result.table().provisionedThroughput().writeCapacityUnits());
msg.setHeader(Ddb2Constants.TABLE_SIZE, result.table().tableSizeBytes());
}
|
@Test
public void testExecute() {
command.execute();
List<KeySchemaElement> keySchema = new ArrayList<>();
keySchema.add(KeySchemaElement.builder().attributeName("name").build());
assertEquals("FULL_DESCRIBE_TABLE", ddbClient.describeTableRequest.tableName());
assertEquals("FULL_DESCRIBE_TABLE", exchange.getIn().getHeader(Ddb2Constants.TABLE_NAME));
assertEquals(TableStatus.ACTIVE, exchange.getIn().getHeader(Ddb2Constants.TABLE_STATUS));
assertEquals(100L, exchange.getIn().getHeader(Ddb2Constants.ITEM_COUNT));
assertEquals(keySchema, exchange.getIn().getHeader(Ddb2Constants.KEY_SCHEMA));
assertEquals(20L, exchange.getIn().getHeader(Ddb2Constants.READ_CAPACITY));
assertEquals(10L, exchange.getIn().getHeader(Ddb2Constants.WRITE_CAPACITY));
assertEquals(1000L, exchange.getIn().getHeader(Ddb2Constants.TABLE_SIZE));
}
|
public static CsvReader getReader(CsvReadConfig config) {
return new CsvReader(config);
}
|
@Test
public void readTest2() {
CsvReader reader = CsvUtil.getReader();
reader.read(FileUtil.getUtf8Reader("test.csv"), (csvRow)-> {
// 只有一行,所以直接判断
assertEquals("sss,sss", csvRow.get(0));
assertEquals("姓名", csvRow.get(1));
assertEquals("性别", csvRow.get(2));
assertEquals("关注\"对象\"", csvRow.get(3));
assertEquals("年龄", csvRow.get(4));
assertEquals("", csvRow.get(5));
assertEquals("\"", csvRow.get(6));
});
}
|
public ScheduledExecutorService build() {
final InstrumentedThreadFactory instrumentedThreadFactory = new InstrumentedThreadFactory(threadFactory,
environment.getMetricRegistry(), nameFormat);
final ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(poolSize, instrumentedThreadFactory, handler);
executor.setRemoveOnCancelPolicy(removeOnCancel);
environment.manage(new ExecutorServiceManager(executor, shutdownTime, nameFormat));
return executor;
}
|
@Test
void testBasicInvocation() {
final String poolName = this.getClass().getSimpleName();
final ScheduledExecutorServiceBuilder test = new ScheduledExecutorServiceBuilder(this.le,
poolName,
false);
this.execTracker = test.build();
assertThat(this.execTracker)
.isInstanceOfSatisfying(ScheduledThreadPoolExecutor.class, exe -> assertThat(exe)
.satisfies(castedExec -> assertThat(castedExec.getRemoveOnCancelPolicy()).isFalse())
.satisfies(castedExec -> assertThat(castedExec.getThreadFactory()).isInstanceOf(InstrumentedThreadFactory.class)));
final ArgumentCaptor<ExecutorServiceManager> esmCaptor = ArgumentCaptor.forClass(ExecutorServiceManager.class);
verify(this.le).manage(esmCaptor.capture());
final ExecutorServiceManager esmCaptured = esmCaptor.getValue();
assertThat(esmCaptured.getExecutor()).isSameAs(this.execTracker);
assertThat(esmCaptured.getShutdownPeriod()).isEqualTo(DEFAULT_SHUTDOWN_PERIOD);
assertThat(esmCaptured.getPoolName()).isSameAs(poolName);
}
|
public static <T> AsList<T> asList() {
return new AsList<>(null, false);
}
|
@Test
@Category(ValidatesRunner.class)
public void testWindowedListSideInput() {
final PCollectionView<List<Integer>> view =
pipeline
.apply(
"CreateSideInput",
Create.timestamped(
TimestampedValue.of(11, new Instant(1)),
TimestampedValue.of(13, new Instant(1)),
TimestampedValue.of(17, new Instant(1)),
TimestampedValue.of(23, new Instant(1)),
TimestampedValue.of(31, new Instant(11)),
TimestampedValue.of(33, new Instant(11)),
TimestampedValue.of(37, new Instant(11)),
TimestampedValue.of(43, new Instant(11))))
.apply("SideWindowInto", Window.into(FixedWindows.of(Duration.millis(10))))
.apply(View.asList());
PCollection<Integer> output =
pipeline
.apply(
"CreateMainInput",
Create.timestamped(
TimestampedValue.of(29, new Instant(1)),
TimestampedValue.of(35, new Instant(11))))
.apply("MainWindowInto", Window.into(FixedWindows.of(Duration.millis(10))))
.apply(
"OutputSideInputs",
ParDo.of(
new DoFn<Integer, Integer>() {
@ProcessElement
public void processElement(ProcessContext c) {
checkArgument(c.sideInput(view).size() == 4);
checkArgument(
c.sideInput(view).get(0).equals(c.sideInput(view).get(0)));
for (Integer i : c.sideInput(view)) {
c.output(i);
}
}
})
.withSideInputs(view));
PAssert.that(output).containsInAnyOrder(11, 13, 17, 23, 31, 33, 37, 43);
pipeline.run();
}
|
public double getCenterY() {
return (this.top + this.bottom) / 2;
}
|
@Test
public void getCenterYTest() {
Rectangle rectangle = create(1, 2, 3, 4);
Assert.assertEquals(3, rectangle.getCenterY(), 0);
}
|
@Entrance
public final void combine(@SourceFrom int value, @Arg String name, @Arg boolean status) {
int t = DICT.lookup(name).intValue();
int t4 = t * 4;
totalNum++;
if (!status || value > t4) {
return;
}
if (value > t) {
tNum++;
} else {
sNum++;
}
}
|
@Test
public void testCombine() {
ApdexMetrics apdex1 = new ApdexMetricsImpl();
apdex1.combine(200, "foo", true);
apdex1.combine(300, "bar", true);
apdex1.combine(200, "foo", true);
apdex1.combine(1500, "bar", true);
ApdexMetrics apdex2 = new ApdexMetricsImpl();
apdex2.combine(200, "foo", true);
apdex2.combine(300, "bar", false);
apdex2.combine(200, "foo", true);
apdex2.combine(1500, "bar", false);
apdex2.combine(200, "foo", true);
apdex2.combine(5000, "bar", true);
apdex1.combine(apdex2);
apdex1.calculate();
assertThat(apdex1.getValue()).isEqualTo(6500);
}
|
@Override
public boolean match(Message msg, StreamRule rule) {
Object rawField = msg.getField(rule.getField());
if (rawField == null) {
return rule.getInverted();
}
if (rawField instanceof String) {
String field = (String) rawField;
Boolean result = rule.getInverted() ^ !(field.trim().isEmpty());
return result;
}
return !rule.getInverted();
}
|
@Test
public void testBasicNonMatch() throws Exception {
StreamRule rule = getSampleRule();
rule.setField("nonexistentField");
rule.setType(StreamRuleType.PRESENCE);
rule.setInverted(false);
Message message = getSampleMessage();
StreamRuleMatcher matcher = getMatcher(rule);
Boolean result = matcher.match(message, rule);
assertFalse(result);
}
|
public static ILogger getLogger(@Nonnull Class<?> clazz) {
checkNotNull(clazz, "class must not be null");
return getLoggerInternal(clazz.getName());
}
|
@Test
public void getLogger_whenJdk_thenReturnStandardLogger() {
isolatedLoggingRule.setLoggingType(LOGGING_TYPE_JDK);
assertInstanceOf(StandardLoggerFactory.StandardLogger.class, Logger.getLogger(getClass()));
}
|
synchronized void ensureTokenInitialized() throws IOException {
// we haven't inited yet, or we used to have a token but it expired
if (!hasInitedToken || (action != null && !action.isValid())) {
//since we don't already have a token, go get one
Token<?> token = fs.getDelegationToken(null);
// security might be disabled
if (token != null) {
fs.setDelegationToken(token);
addRenewAction(fs);
LOG.debug("Created new DT for {}", token.getService());
}
hasInitedToken = true;
}
}
|
@Test
public void testInitWithNoTokens() throws IOException, URISyntaxException {
Configuration conf = new Configuration();
DummyFs fs = spy(new DummyFs());
doReturn(null).when(fs).getDelegationToken(anyString());
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
fs.tokenAspect.ensureTokenInitialized();
// No token will be selected.
verify(fs, never()).setDelegationToken(
Mockito.<Token<? extends TokenIdentifier>> any());
}
|
public File dumpHeap()
throws MalformedObjectNameException, InstanceNotFoundException, ReflectionException,
MBeanException, IOException {
return dumpHeap(localDumpFolder);
}
|
@Test
public void heapDumpTwice() throws Exception {
File folder = tempFolder.newFolder();
File dump1 = MemoryMonitor.dumpHeap(folder);
assertNotNull(dump1);
assertTrue(dump1.exists());
assertThat(dump1.getParentFile(), Matchers.equalTo(folder));
File dump2 = MemoryMonitor.dumpHeap(folder);
assertNotNull(dump2);
assertTrue(dump2.exists());
assertThat(dump2.getParentFile(), Matchers.equalTo(folder));
}
|
@Nonnull
public static String getAfter(@Nonnull String text, @Nonnull String after) {
int i = text.lastIndexOf(after);
if (i < 0) return text;
return text.substring(i + after.length());
}
|
@Test
void testGetAfter() {
assertEquals("", StringUtil.getAfter("", ""));
assertEquals("d", StringUtil.getAfter("a b c d", "c "));
assertEquals("a b c d", StringUtil.getAfter("a b c d", "z"));
}
|
public static List<String> finalDestination(List<String> elements) {
if (isMagicPath(elements)) {
List<String> destDir = magicPathParents(elements);
List<String> children = magicPathChildren(elements);
checkArgument(!children.isEmpty(), "No path found under the prefix " +
MAGIC_PATH_PREFIX);
ArrayList<String> dest = new ArrayList<>(destDir);
if (containsBasePath(children)) {
// there's a base marker in the path
List<String> baseChildren = basePathChildren(children);
checkArgument(!baseChildren.isEmpty(),
"No path found under " + BASE);
dest.addAll(baseChildren);
} else {
dest.add(filename(children));
}
return dest;
} else {
return elements;
}
}
|
@Test
public void testFinalDestinationMagic1() {
assertEquals(l("first", "2"),
finalDestination(l("first", MAGIC_PATH_PREFIX, "2")));
}
|
static IndexComponentFilter findBestComponentFilter(
IndexType type,
List<IndexComponentCandidate> candidates,
QueryDataType converterType
) {
// First look for equality filters, assuming that they are more selective than ranges
IndexComponentFilter equalityComponentFilter = searchForEquality(candidates, converterType);
if (equalityComponentFilter != null) {
return equalityComponentFilter;
}
// Look for ranges filters
return searchForRange(type, candidates, converterType);
}
|
@Test
public void when_upperBoundRangeFilterPresentAndNoBetterChoiceAndSortedIndex_then_itIsUsed() {
IndexComponentFilter bestFilter = IndexComponentFilterResolver.findBestComponentFilter(
indexType, WITH_UPPER_BOUND_RANGE_AS_BEST_CANDIDATES, QUERY_DATA_TYPE
);
if (indexType == IndexType.SORTED) {
assertEquals(bestFilter.getFilter(), UPPER_BOUND_RANGE_CANDIDATE.getFilter());
} else {
assertNull(bestFilter);
}
}
|
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
CommandFormat cf =
new CommandFormat(1, Integer.MAX_VALUE, OPTION_FOLLOW_LINK,
OPTION_FOLLOW_ARG_LINK, null);
cf.parse(args);
if (cf.getOpt(OPTION_FOLLOW_LINK)) {
getOptions().setFollowLink(true);
} else if (cf.getOpt(OPTION_FOLLOW_ARG_LINK)) {
getOptions().setFollowArgLink(true);
}
// search for first non-path argument (ie starts with a "-") and capture and
// remove the remaining arguments as expressions
LinkedList<String> expressionArgs = new LinkedList<String>();
Iterator<String> it = args.iterator();
boolean isPath = true;
while (it.hasNext()) {
String arg = it.next();
if (isPath) {
if (arg.startsWith("-")) {
isPath = false;
}
}
if (!isPath) {
expressionArgs.add(arg);
it.remove();
}
}
if (args.isEmpty()) {
args.add(Path.CUR_DIR);
}
Expression expression = parseExpression(expressionArgs);
if (!expression.isAction()) {
Expression and = getExpression(And.class);
Deque<Expression> children = new LinkedList<Expression>();
children.add(getExpression(Print.class));
children.add(expression);
and.addChildren(children);
expression = and;
}
setRootExpression(expression);
}
|
@Test
public void processOptionsExpression() throws IOException {
Find find = new Find();
find.setConf(conf);
String paths = "path1 path2 path3";
String args = "-L -H " + paths + " -print -name test";
LinkedList<String> argsList = getArgs(args);
find.processOptions(argsList);
LinkedList<String> pathList = getArgs(paths);
assertEquals(pathList, argsList);
}
|
@HighFrequencyInvocation
public boolean isEncryptColumn(final String logicColumnName) {
return columns.containsKey(logicColumnName);
}
|
@Test
void assertIsEncryptColumn() {
assertTrue(encryptTable.isEncryptColumn("logicColumn"));
}
|
public boolean parse(final String s) {
if (StringUtils.isEmpty(s) || StringUtils.isBlank(s)) {
return false;
}
final String[] tmps = Utils.parsePeerId(s);
if (tmps.length < 2 || tmps.length > 4) {
return false;
}
try {
final int port = Integer.parseInt(tmps[1]);
this.endpoint = new Endpoint(tmps[0], port);
switch (tmps.length) {
case 3:
this.idx = Integer.parseInt(tmps[2]);
break;
case 4:
if (tmps[2].equals("")) {
this.idx = 0;
} else {
this.idx = Integer.parseInt(tmps[2]);
}
this.priority = Integer.parseInt(tmps[3]);
break;
default:
break;
}
this.str = null;
return true;
} catch (final Exception e) {
LOG.error("Parse peer from string failed: {}.", s, e);
return false;
}
}
|
@Test
public void testToStringParseFailed() {
final PeerId pp = new PeerId();
final String str1 = "";
final String str2 = "192.168.1.1";
final String str3 = "92.168.1.1:8081::1:2";
assertFalse(pp.parse(str1));
assertFalse(pp.parse(str2));
}
|
public int asInteger() {
checkState(type == Type.INTEGER, "Value is not an integer");
return Integer.parseInt(value);
}
|
@Test
public void asInteger() {
ConfigProperty p = defineProperty("foo", INTEGER, "123", "Foo Prop");
validate(p, "foo", INTEGER, "123", "123");
assertEquals("incorrect value", 123, p.asInteger());
assertEquals("incorrect value", 123L, p.asLong());
}
|
@Override
public Mono<Void> removeProductFromFavourites(int productId, String userId) {
return this.favouriteProductRepository.deleteByProductIdAndUserId(productId, userId);
}
|
@Test
void removeProductFromFavourites_ReturnsEmptyMono() {
// given
doReturn(Mono.empty()).when(this.favouriteProductRepository)
.deleteByProductIdAndUserId(1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c");
// when
StepVerifier.create(this.service.removeProductFromFavourites(1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c"))
// then
.verifyComplete();
verify(this.favouriteProductRepository)
.deleteByProductIdAndUserId(1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c");
}
|
public static String calculateMemoryWithDefaultOverhead(String memory) {
long memoryMB = convertToBytes(memory) / M;
long memoryOverheadMB = Math.max((long) (memoryMB * 0.1f), MINIMUM_OVERHEAD);
return (memoryMB + memoryOverheadMB) + "Mi";
}
|
@Test
void testExceptionNoValidNumber() {
assertThrows(NumberFormatException.class, () -> {
K8sUtils.calculateMemoryWithDefaultOverhead("NoValidNumber10000000Tb");
});
}
|
public static TypeInformation<?> readTypeInfo(String typeString) {
final List<Token> tokens = tokenize(typeString);
final TokenConverter converter = new TokenConverter(typeString, tokens);
return converter.convert();
}
|
@Test
void testSyntaxError1() {
assertThatThrownBy(() -> TypeStringUtils.readTypeInfo("ROW<<f0 DECIMAL, f1 TINYINT>"))
.isInstanceOf(ValidationException.class); // additional <
}
|
public static Set<String> findReferencedColumn(String columnName, ResolvedSchema schema) {
Column column =
schema.getColumn(columnName)
.orElseThrow(
() ->
new ValidationException(
String.format(
"The input column %s doesn't exist in the schema.",
columnName)));
if (!(column instanceof Column.ComputedColumn)) {
return Collections.emptySet();
}
ColumnReferenceVisitor visitor =
new ColumnReferenceVisitor(
// the input ref index is based on a projection of non-computed columns
schema.getColumns().stream()
.filter(c -> !(c instanceof Column.ComputedColumn))
.map(Column::getName)
.collect(Collectors.toList()));
return visitor.visit(((Column.ComputedColumn) column).getExpression());
}
|
@Test
void testFindReferencedColumn() {
assertThat(ColumnReferenceFinder.findReferencedColumn("b", resolvedSchema))
.isEqualTo(Collections.emptySet());
assertThat(ColumnReferenceFinder.findReferencedColumn("a", resolvedSchema))
.containsExactlyInAnyOrder("b");
assertThat(ColumnReferenceFinder.findReferencedColumn("c", resolvedSchema))
.containsExactlyInAnyOrder("d", "e");
assertThat(ColumnReferenceFinder.findReferencedColumn("ts", resolvedSchema))
.containsExactlyInAnyOrder("tuple");
assertThat(ColumnReferenceFinder.findWatermarkReferencedColumn(resolvedSchema))
.containsExactlyInAnyOrder("ts", "g");
}
|
@Override
public RexNode visit(CallExpression call) {
boolean isBatchMode = unwrapContext(relBuilder).isBatchMode();
for (CallExpressionConvertRule rule : getFunctionConvertChain(isBatchMode)) {
Optional<RexNode> converted = rule.convert(call, newFunctionContext());
if (converted.isPresent()) {
return converted.get();
}
}
throw new RuntimeException("Unknown call expression: " + call);
}
|
@Test
void testDecimalLiteral() {
BigDecimal value = new BigDecimal("12345678.999");
RexNode rex = converter.visit(valueLiteral(value));
assertThat(((RexLiteral) rex).getValueAs(BigDecimal.class)).isEqualTo(value);
assertThat(rex.getType().getSqlTypeName()).isEqualTo(SqlTypeName.DECIMAL);
assertThat(rex.getType().getPrecision()).isEqualTo(11);
assertThat(rex.getType().getScale()).isEqualTo(3);
}
|
static Optional<File> getOptionalFileFromURLFile(URL retrieved) {
File toReturn = new File(retrieved.getFile());
logger.debug(TO_RETURN_TEMPLATE, toReturn);
logger.debug(TO_RETURN_GETABSOLUTEPATH_TEMPLATE, toReturn.getAbsolutePath());
return Optional.of(toReturn);
}
|
@Test
void getOptionalFileFromURLFile() {
URL resourceUrl = getResourceUrl();
Optional<File> retrieved = MemoryFileUtils.getOptionalFileFromURLFile(resourceUrl);
assertThat(retrieved).isNotNull();
assertThat(retrieved.isPresent()).isTrue();
assertThat(retrieved).get().isInstanceOf(File.class);
}
|
@Override
public Publisher<Exchange> to(String uri, Object data) {
String streamName = requestedUriToStream.computeIfAbsent(uri, camelUri -> {
try {
String uuid = context.getUuidGenerator().generateUuid();
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
from("reactive-streams:" + uuid)
.to(camelUri);
}
});
return uuid;
} catch (Exception e) {
throw new IllegalStateException("Unable to create requested reactive stream from direct URI: " + uri, e);
}
});
return toStream(streamName, data);
}
|
@Test
public void testTo() throws Exception {
context.start();
Set<String> values = Collections.synchronizedSet(new TreeSet<>());
CountDownLatch latch = new CountDownLatch(3);
Flux.just(1, 2, 3)
.flatMap(e -> crs.to("bean:hello", e, String.class))
.doOnNext(values::add)
.doOnNext(res -> latch.countDown())
.subscribe();
assertTrue(latch.await(2, TimeUnit.SECONDS));
assertEquals(new TreeSet<>(Arrays.asList("Hello 1", "Hello 2", "Hello 3")), values);
}
|
@Override
public void initTransactions() {
verifyNotClosed();
verifyNotFenced();
if (this.transactionInitialized) {
throw new IllegalStateException("MockProducer has already been initialized for transactions.");
}
if (this.initTransactionException != null) {
throw this.initTransactionException;
}
this.transactionInitialized = true;
this.transactionInFlight = false;
this.transactionCommitted = false;
this.transactionAborted = false;
this.sentOffsets = false;
}
|
@Test
public void shouldThrowOnAbortTransactionIfNoTransactionGotStarted() {
buildMockProducer(true);
producer.initTransactions();
assertThrows(IllegalStateException.class, producer::abortTransaction);
}
|
@Override
@Transactional(rollbackFor = Exception.class)
@CacheEvict(value = RedisKeyConstants.ROLE, key = "#id")
@LogRecord(type = SYSTEM_ROLE_TYPE, subType = SYSTEM_ROLE_DELETE_SUB_TYPE, bizNo = "{{#id}}",
success = SYSTEM_ROLE_DELETE_SUCCESS)
public void deleteRole(Long id) {
// 1. 校验是否可以更新
RoleDO role = validateRoleForUpdate(id);
// 2.1 标记删除
roleMapper.deleteById(id);
// 2.2 删除相关数据
permissionService.processRoleDeleted(id);
// 3. 记录操作日志上下文
LogRecordContext.putVariable(DiffParseFunction.OLD_OBJECT, BeanUtils.toBean(role, RoleSaveReqVO.class));
LogRecordContext.putVariable("role", role);
}
|
@Test
public void testDeleteRole() {
// mock 数据
RoleDO roleDO = randomPojo(RoleDO.class, o -> o.setType(RoleTypeEnum.CUSTOM.getType()));
roleMapper.insert(roleDO);
// 参数准备
Long id = roleDO.getId();
// 调用
roleService.deleteRole(id);
// 断言
assertNull(roleMapper.selectById(id));
// verify 删除相关数据
verify(permissionService).processRoleDeleted(id);
}
|
public abstract byte[] encode(MutableSpan input);
|
@Test void errorSpan_JSON_V2() {
assertThat(new String(encoder.encode(errorSpan), UTF_8))
.isEqualTo(
"{\"traceId\":\"dc955a1d4768875d\",\"id\":\"dc955a1d4768875d\",\"kind\":\"CLIENT\",\"localEndpoint\":{\"serviceName\":\"isao01\"},\"tags\":{\"error\":\"\"}}");
}
|
@Override
public void pluginLoaded(GoPluginDescriptor descriptor) {
loadTaskConfigIntoPreferenceStore(descriptor);
}
|
@Test
public void shouldLoadPreferencesOnlyForTaskPlugins() {
final GoPluginDescriptor descriptor = mock(GoPluginDescriptor.class);
String pluginId = "test-plugin-id";
when(descriptor.id()).thenReturn(pluginId);
final Task task = mock(Task.class);
TaskConfig config = new TaskConfig();
TaskView taskView = mock(TaskView.class);
when(task.config()).thenReturn(config);
when(task.view()).thenReturn(taskView);
PluginManager pluginManager = mock(PluginManager.class);
final TaskExtension taskExtension = mock(TaskExtension.class);
when(taskExtension.canHandlePlugin(pluginId)).thenReturn(false);
doAnswer(invocationOnMock -> {
final Action<Task> action = (Action<Task>) invocationOnMock.getArguments()[1];
action.execute(task, descriptor);
return null;
}).when(taskExtension).doOnTask(eq(pluginId), any(Action.class));
PluggableTaskPreferenceLoader pluggableTaskPreferenceLoader = new PluggableTaskPreferenceLoader(pluginManager, taskExtension);
pluggableTaskPreferenceLoader.pluginLoaded(descriptor);
assertThat(PluggableTaskConfigStore.store().hasPreferenceFor(pluginId), is(false));
verify(pluginManager).addPluginChangeListener(pluggableTaskPreferenceLoader);
}
|
public Mono<Object> genericInvoker(final String body, final MetaData metaData, final ServerWebExchange exchange) throws ShenyuException {
String referenceKey = metaData.getPath();
String namespace = "";
if (CollectionUtils.isNotEmpty(exchange.getRequest().getHeaders().get(Constants.NAMESPACE))) {
namespace = exchange.getRequest().getHeaders().get(Constants.NAMESPACE).get(0);
referenceKey = namespace + ":" + referenceKey;
}
ReferenceConfig<GenericService> reference = ApacheDubboConfigCache.getInstance().get(referenceKey);
if (StringUtils.isEmpty(reference.getInterface())) {
ApacheDubboConfigCache.getInstance().invalidate(referenceKey);
reference = ApacheDubboConfigCache.getInstance().initRefN(metaData, namespace);
}
GenericService genericService = reference.get();
Pair<String[], Object[]> pair;
if (StringUtils.isBlank(metaData.getParameterTypes()) || ParamCheckUtils.bodyIsEmpty(body)) {
pair = new ImmutablePair<>(new String[]{}, new Object[]{});
} else {
pair = dubboParamResolveService.buildParameter(body, metaData.getParameterTypes());
}
return Mono.fromFuture(invokeAsync(genericService, metaData.getMethodName(), pair.getLeft(), pair.getRight()).thenApply(ret -> {
if (Objects.isNull(ret)) {
ret = Constants.DUBBO_RPC_RESULT_EMPTY;
}
exchange.getAttributes().put(Constants.RPC_RESULT, ret);
exchange.getAttributes().put(Constants.CLIENT_RESPONSE_RESULT_TYPE, ResultEnum.SUCCESS.getName());
return ret;
})).onErrorMap(exception -> exception instanceof GenericException ? new ShenyuException(((GenericException) exception).getExceptionMessage()) : new ShenyuException(exception));
}
|
@Test
@SuppressWarnings(value = "unchecked")
public void genericInvokerTest() throws IllegalAccessException, NoSuchFieldException {
GenericService genericService = mock(GenericService.class);
when(referenceConfig.get()).thenReturn(genericService);
when(referenceConfig.getInterface()).thenReturn(PATH);
CompletableFuture<Object> future = new CompletableFuture<>();
when(genericService.$invoke(METHOD_NAME, LEFT, RIGHT)).thenReturn(future);
ApacheDubboConfigCache apacheDubboConfigCache = ApacheDubboConfigCache.getInstance();
Field field = ApacheDubboConfigCache.class.getDeclaredField("cache");
field.setAccessible(true);
((LoadingCache<String, ReferenceConfig<GenericService>>) field.get(apacheDubboConfigCache)).put(PATH, referenceConfig);
ApacheDubboProxyService apacheDubboProxyService = new ApacheDubboProxyService(new BodyParamResolveServiceImpl());
apacheDubboProxyService.genericInvoker("", metaData, exchange);
future.complete("success");
}
|
public MethodBuilder serviceId(String serviceId) {
this.serviceId = serviceId;
return getThis();
}
|
@Test
void serviceId() {
MethodBuilder builder = MethodBuilder.newBuilder();
builder.serviceId("serviceId");
Assertions.assertEquals("serviceId", builder.build().getServiceId());
}
|
@Override
public boolean removeExistingSetCookie(String cookieName) {
String cookieNamePrefix = cookieName + "=";
boolean dirty = false;
Headers filtered = new Headers();
for (Header hdr : getHeaders().entries()) {
if (HttpHeaderNames.SET_COOKIE.equals(hdr.getName())) {
String value = hdr.getValue();
// Strip out this set-cookie as requested.
if (value.startsWith(cookieNamePrefix)) {
// Don't copy it.
dirty = true;
} else {
// Copy all other headers.
filtered.add(hdr.getName(), hdr.getValue());
}
} else {
// Copy all other headers.
filtered.add(hdr.getName(), hdr.getValue());
}
}
if (dirty) {
setHeaders(filtered);
}
return dirty;
}
|
@Test
void testRemoveExistingSetCookie() {
response.getHeaders()
.add(
"Set-Cookie",
"c1=1234; Max-Age=-1; Expires=Tue, 01 Sep 2015 22:49:57 GMT; Path=/; Domain=.netflix.com");
response.getHeaders()
.add(
"Set-Cookie",
"c2=4567; Max-Age=-1; Expires=Tue, 01 Sep 2015 22:49:57 GMT; Path=/; Domain=.netflix.com");
response.removeExistingSetCookie("c1");
assertEquals(1, response.getHeaders().size());
assertFalse(response.hasSetCookieWithName("c1"));
assertTrue(response.hasSetCookieWithName("c2"));
}
|
@Override
public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception {
OptionParser optParser = new OptionParser();
OptionSpec<String> codecOpt = Util.compressionCodecOptionWithDefault(optParser, DataFileConstants.NULL_CODEC);
OptionSpec<Integer> levelOpt = Util.compressionLevelOption(optParser);
OptionSet opts = optParser.parse(args.toArray(new String[0]));
List<String> nargs = (List<String>) opts.nonOptionArguments();
if (nargs.size() > 2) {
err.println("Expected at most an input file and output file.");
optParser.printHelpOn(err);
return 1;
}
InputStream input = in;
boolean inputNeedsClosing = false;
if (nargs.size() > 0 && !nargs.get(0).equals("-")) {
input = Util.openFromFS(nargs.get(0));
inputNeedsClosing = true;
}
OutputStream output = out;
boolean outputNeedsClosing = false;
if (nargs.size() > 1 && !nargs.get(1).equals("-")) {
output = Util.createFromFS(nargs.get(1));
outputNeedsClosing = true;
}
DataFileStream<GenericRecord> reader = new DataFileStream<>(input, new GenericDatumReader<>());
Schema schema = reader.getSchema();
DataFileWriter<GenericRecord> writer = new DataFileWriter<>(new GenericDatumWriter<>());
// unlike the other Avro tools, we default to a null codec, not deflate
CodecFactory codec = Util.codecFactory(opts, codecOpt, levelOpt, DataFileConstants.NULL_CODEC);
writer.setCodec(codec);
for (String key : reader.getMetaKeys()) {
if (!DataFileWriter.isReservedMeta(key)) {
writer.setMeta(key, reader.getMeta(key));
}
}
writer.create(schema, output);
writer.appendAllFrom(reader, true);
writer.flush();
if (inputNeedsClosing) {
input.close();
}
if (outputNeedsClosing) {
output.close();
}
writer.close();
return 0;
}
|
@Test
void recodec() throws Exception {
String metaKey = "myMetaKey";
String metaValue = "myMetaValue";
File inputFile = new File(DIR, "input.avro");
Schema schema = Schema.create(Type.STRING);
DataFileWriter<String> writer = new DataFileWriter<>(new GenericDatumWriter<String>(schema));
writer.setMeta(metaKey, metaValue).create(schema, inputFile);
// We write some garbage which should be quite compressible by deflate,
// but is complicated enough that deflate-9 will work better than deflate-1.
// These values were plucked from thin air and worked on the first try, so
// don't read too much into them.
for (int i = 0; i < 100000; i++) {
writer.append("" + i % 100);
}
writer.close();
File defaultOutputFile = new File(DIR, "default-output.avro");
File nullOutputFile = new File(DIR, "null-output.avro");
File deflateDefaultOutputFile = new File(DIR, "deflate-default-output.avro");
File deflate1OutputFile = new File(DIR, "deflate-1-output.avro");
File deflate9OutputFile = new File(DIR, "deflate-9-output.avro");
new RecodecTool().run(new FileInputStream(inputFile), new PrintStream(defaultOutputFile), null, new ArrayList<>());
new RecodecTool().run(new FileInputStream(inputFile), new PrintStream(nullOutputFile), null,
Collections.singletonList("--codec=null"));
new RecodecTool().run(new FileInputStream(inputFile), new PrintStream(deflateDefaultOutputFile), null,
Collections.singletonList("--codec=deflate"));
new RecodecTool().run(new FileInputStream(inputFile), new PrintStream(deflate1OutputFile), null,
asList("--codec=deflate", "--level=1"));
new RecodecTool().run(new FileInputStream(inputFile), new PrintStream(deflate9OutputFile), null,
asList("--codec=deflate", "--level=9"));
// We assume that metadata copying is orthogonal to codec selection, and
// so only test it for a single file.
try (DataFileReader<Void> reader = new DataFileReader<Void>(defaultOutputFile, new GenericDatumReader<>())) {
assertEquals(metaValue, reader.getMetaString(metaKey));
}
// The "default" codec should be the same as null.
assertEquals(defaultOutputFile.length(), nullOutputFile.length());
// All of the deflated files should be smaller than the null file.
assertLessThan(deflateDefaultOutputFile.length(), nullOutputFile.length());
assertLessThan(deflate1OutputFile.length(), nullOutputFile.length());
assertLessThan(deflate9OutputFile.length(), nullOutputFile.length());
// The "level 9" file should be smaller than the "level 1" file.
assertLessThan(deflate9OutputFile.length(), deflate1OutputFile.length());
}
|
@Override
public ElasticAgentPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) {
String pluginId = descriptor.id();
PluggableInstanceSettings pluggableInstanceSettings = null;
if (!extension.supportsClusterProfiles(pluginId)) {
pluggableInstanceSettings = getPluginSettingsAndView(descriptor, extension);
}
return new ElasticAgentPluginInfo(descriptor,
elasticElasticAgentProfileSettings(pluginId),
elasticClusterProfileSettings(pluginId),
image(pluginId),
pluggableInstanceSettings,
capabilities(pluginId));
}
|
@Test
public void shouldBuildPluginInfoWithoutClusterProfileSettingsForPluginsImplementedUsingv4Extension() {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
List<PluginConfiguration> elasticAgentProfileConfigurations = List.of(new PluginConfiguration("aws_password", new Metadata(true, false)));
PluginSettingsProperty property = new PluginSettingsProperty("ami-id", "ami-123");
PluginSettingsConfiguration pluginSettingsConfiguration = new PluginSettingsConfiguration();
pluginSettingsConfiguration.add(property);
Image icon = new Image("content_type", "data", "hash");
when(pluginManager.resolveExtensionVersion("plugin1", ELASTIC_AGENT_EXTENSION, SUPPORTED_VERSIONS)).thenReturn("1.0");
when(extension.getPluginSettingsConfiguration(descriptor.id())).thenReturn(pluginSettingsConfiguration);
when(extension.getPluginSettingsView(descriptor.id())).thenReturn("some html");
when(extension.getIcon(descriptor.id())).thenReturn(icon);
when(extension.getProfileMetadata(descriptor.id())).thenReturn(elasticAgentProfileConfigurations);
when(extension.getProfileView(descriptor.id())).thenReturn("elastic_agent_profile_view");
when(extension.supportsClusterProfiles("plugin1")).thenReturn(false);
ElasticAgentPluginInfoBuilder builder = new ElasticAgentPluginInfoBuilder(extension);
ElasticAgentPluginInfo pluginInfo = builder.pluginInfoFor(descriptor);
assertThat(pluginInfo.getDescriptor(), is(descriptor));
assertThat(pluginInfo.getExtensionName(), is("elastic-agent"));
assertThat(pluginInfo.getImage(), is(icon));
assertThat(pluginInfo.getElasticAgentProfileSettings(), is(new PluggableInstanceSettings(elasticAgentProfileConfigurations, new PluginView("elastic_agent_profile_view"))));
assertThat(pluginInfo.getClusterProfileSettings(), is(new PluggableInstanceSettings(null, null)));
assertThat(pluginInfo.getPluginSettings(), is(new PluggableInstanceSettings(builder.configurations(pluginSettingsConfiguration), new PluginView("some html"))));
assertFalse(pluginInfo.supportsStatusReport());
verify(extension, never()).getClusterProfileMetadata(any());
verify(extension, never()).getClusterProfileView(any());
}
|
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
gauges.put("name", (Gauge<String>) runtime::getName);
gauges.put("vendor", (Gauge<String>) () -> String.format(Locale.US,
"%s %s %s (%s)",
runtime.getVmVendor(),
runtime.getVmName(),
runtime.getVmVersion(),
runtime.getSpecVersion()));
gauges.put("uptime", (Gauge<Long>) runtime::getUptime);
return Collections.unmodifiableMap(gauges);
}
|
@Test
public void hasAGaugeForTheJVMVendor() {
final Gauge<String> gauge = (Gauge<String>) gauges.getMetrics().get("vendor");
assertThat(gauge.getValue())
.isEqualTo("Oracle Corporation Java HotSpot(TM) 64-Bit Server VM 23.7-b01 (1.7)");
}
|
public LimitKey getKey() {
return key;
}
|
@Test
public void testLimitKey() {
Assert.assertEquals(limitConfig.getKey(), LimitKey.SERVER);
}
|
public static FromMatchesFilter create(Jid address) {
return new FromMatchesFilter(address, address != null ? address.hasNoResource() : false) ;
}
|
@Test
public void autoCompareMatchingServiceJid() {
FromMatchesFilter filter = FromMatchesFilter.create(SERVICE_JID1);
Stanza packet = StanzaBuilder.buildMessage().build();
packet.setFrom(SERVICE_JID1);
assertTrue(filter.accept(packet));
packet.setFrom(SERVICE_JID2);
assertFalse(filter.accept(packet));
packet.setFrom(BASE_JID1);
assertFalse(filter.accept(packet));
packet.setFrom(FULL_JID1_R1);
assertFalse(filter.accept(packet));
packet.setFrom(BASE_JID3);
assertFalse(filter.accept(packet));
}
|
@Override
public boolean hasAttribute(String key) {
return channel.hasAttribute(key);
}
|
@Test
void hasAttributeTest() {
Assertions.assertFalse(header.hasAttribute("test"));
header.setAttribute("test", "test");
Assertions.assertTrue(header.hasAttribute("test"));
}
|
public DockerImagesCommand getSingleImageStatus(String imageName) {
Preconditions.checkNotNull(imageName, "imageName");
super.addCommandArguments("image", imageName);
return this;
}
|
@Test
public void testSingleImage() {
dockerImagesCommand = dockerImagesCommand.getSingleImageStatus(IMAGE_NAME);
assertEquals("images", StringUtils.join(",",
dockerImagesCommand.getDockerCommandWithArguments()
.get("docker-command")));
assertEquals("image name", "foo", StringUtils.join(",",
dockerImagesCommand.getDockerCommandWithArguments().get("image")));
assertEquals(2, dockerImagesCommand.getDockerCommandWithArguments().size());
}
|
@Override
/**
* {@inheritDoc} Handles the bundle's completion report. Parses the monitoringInfos in the
* response, then updates the MetricsRegistry.
*/
public void onCompleted(BeamFnApi.ProcessBundleResponse response) {
response.getMonitoringInfosList().stream()
.filter(monitoringInfo -> !monitoringInfo.getPayload().isEmpty())
.map(this::parseAndUpdateMetric)
.distinct()
.forEach(samzaMetricsContainer::updateMetrics);
}
|
@Test
public void testEmptyPayload() {
byte[] emptyPayload = "".getBytes(Charset.defaultCharset());
MetricsApi.MonitoringInfo emptyMonitoringInfo =
MetricsApi.MonitoringInfo.newBuilder()
.setType(SUM_INT64_TYPE)
.setPayload(ByteString.copyFrom(emptyPayload))
.putLabels(MonitoringInfoConstants.Labels.NAMESPACE, EXPECTED_NAMESPACE)
.putLabels(MonitoringInfoConstants.Labels.NAME, EXPECTED_COUNTER_NAME)
.build();
// Hex for 123
byte[] payload = "\173".getBytes(Charset.defaultCharset());
MetricsApi.MonitoringInfo monitoringInfo =
MetricsApi.MonitoringInfo.newBuilder()
.setType(SUM_INT64_TYPE)
.setPayload(ByteString.copyFrom(payload))
.putLabels(MonitoringInfoConstants.Labels.NAMESPACE, EXPECTED_NAMESPACE)
.putLabels(MonitoringInfoConstants.Labels.NAME, EXPECTED_COUNTER_NAME)
.build();
BeamFnApi.ProcessBundleResponse response =
BeamFnApi.ProcessBundleResponse.newBuilder()
.addMonitoringInfos(emptyMonitoringInfo)
.addMonitoringInfos(monitoringInfo)
.addMonitoringInfos(emptyMonitoringInfo)
.build();
// Execute
samzaMetricsBundleProgressHandler.onCompleted(response);
// Verify
MetricName metricName = MetricName.named(EXPECTED_NAMESPACE, EXPECTED_COUNTER_NAME);
CounterCell counter =
(CounterCell) samzaMetricsContainer.getContainer(stepName).getCounter(metricName);
assertEquals(counter.getCumulative(), (Long) 123L);
}
|
public StreamObserver<BeamFnApi.Elements> getOutboundObserver() {
return outboundObserver;
}
|
@Test
public void testOutboundObserver() {
Collection<BeamFnApi.Elements> values = new ArrayList<>();
BeamFnDataGrpcMultiplexer multiplexer =
new BeamFnDataGrpcMultiplexer(
DESCRIPTOR,
OutboundObserverFactory.clientDirect(),
inboundObserver -> TestStreams.withOnNext(values::add).build());
multiplexer.getOutboundObserver().onNext(ELEMENTS);
MatcherAssert.assertThat(values, Matchers.contains(ELEMENTS));
}
|
private void handleTokenCallback(OAuthBearerTokenCallback callback) throws IOException {
checkInitialized();
String accessToken = accessTokenRetriever.retrieve();
try {
OAuthBearerToken token = accessTokenValidator.validate(accessToken);
callback.token(token);
} catch (ValidateException e) {
log.warn(e.getMessage(), e);
callback.error("invalid_token", e.getMessage(), null);
}
}
|
@Test
public void testHandleTokenCallback() throws Exception {
Map<String, ?> configs = getSaslConfigs();
AccessTokenBuilder builder = new AccessTokenBuilder()
.jwk(createRsaJwk())
.alg(AlgorithmIdentifiers.RSA_USING_SHA256);
String accessToken = builder.build();
AccessTokenRetriever accessTokenRetriever = () -> accessToken;
OAuthBearerLoginCallbackHandler handler = createHandler(accessTokenRetriever, configs);
try {
OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback();
handler.handle(new Callback[] {callback});
assertNotNull(callback.token());
OAuthBearerToken token = callback.token();
assertEquals(accessToken, token.value());
assertEquals(builder.subject(), token.principalName());
assertEquals(builder.expirationSeconds() * 1000, token.lifetimeMs());
assertEquals(builder.issuedAtSeconds() * 1000, token.startTimeMs());
} finally {
handler.close();
}
}
|
public static String formatExpression(final Expression expression) {
return formatExpression(expression, FormatOptions.of(s -> false));
}
|
@Test
public void shouldFormatWhen() {
assertThat(ExpressionFormatter.formatExpression(new WhenClause(new LongLiteral(1), new LongLiteral(2))), equalTo("WHEN 1 THEN 2"));
}
|
@Timed
@Path("/{destination}")
@PUT
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@ManagedAsync
@Operation(
summary = "Send a message",
description = """
Deliver a message to a single recipient. May be authenticated or unauthenticated; if unauthenticated,
an unidentifed-access key or group-send endorsement token must be provided, unless the message is a story.
""")
@ApiResponse(responseCode="200", description="Message was successfully sent", useReturnTypeSchema=true)
@ApiResponse(
responseCode="401",
description="The message is not a story and the authorization, unauthorized access key, or group send endorsement token is missing or incorrect")
@ApiResponse(
responseCode="404",
description="The message is not a story and some the recipient service ID does not correspond to a registered Signal user")
@ApiResponse(
responseCode = "409", description = "Incorrect set of devices supplied for recipient",
content = @Content(schema = @Schema(implementation = AccountMismatchedDevices[].class)))
@ApiResponse(
responseCode = "410", description = "Mismatched registration ids supplied for some recipient devices",
content = @Content(schema = @Schema(implementation = AccountStaleDevices[].class)))
public Response sendMessage(@ReadOnly @Auth Optional<AuthenticatedDevice> source,
@Parameter(description="The recipient's unidentified access key")
@HeaderParam(HeaderUtils.UNIDENTIFIED_ACCESS_KEY) Optional<Anonymous> accessKey,
@Parameter(description="A group send endorsement token covering the recipient. Must not be combined with `Unidentified-Access-Key` or set on a story message.")
@HeaderParam(HeaderUtils.GROUP_SEND_TOKEN)
@Nullable GroupSendTokenHeader groupSendToken,
@HeaderParam(HttpHeaders.USER_AGENT) String userAgent,
@Parameter(description="If true, deliver the message only to recipients that are online when it is sent")
@PathParam("destination") ServiceIdentifier destinationIdentifier,
@Parameter(description="If true, the message is a story; access tokens are not checked and sending to nonexistent recipients is permitted")
@QueryParam("story") boolean isStory,
@Parameter(description="The encrypted message payloads for each recipient device")
@NotNull @Valid IncomingMessageList messages,
@Context ContainerRequestContext context) throws RateLimitExceededException {
final Sample sample = Timer.start();
try {
if (source.isEmpty() && accessKey.isEmpty() && groupSendToken == null && !isStory) {
throw new WebApplicationException(Response.Status.UNAUTHORIZED);
}
if (groupSendToken != null) {
if (!source.isEmpty() || !accessKey.isEmpty()) {
throw new BadRequestException("Group send endorsement tokens should not be combined with other authentication");
} else if (isStory) {
throw new BadRequestException("Group send endorsement tokens should not be sent for story messages");
}
}
final String senderType;
if (source.isPresent()) {
if (source.get().getAccount().isIdentifiedBy(destinationIdentifier)) {
senderType = SENDER_TYPE_SELF;
} else {
senderType = SENDER_TYPE_IDENTIFIED;
}
} else {
senderType = SENDER_TYPE_UNIDENTIFIED;
}
boolean isSyncMessage = source.isPresent() && source.get().getAccount().isIdentifiedBy(destinationIdentifier);
if (isSyncMessage && destinationIdentifier.identityType() == IdentityType.PNI) {
throw new WebApplicationException(Status.FORBIDDEN);
}
Optional<Account> destination;
if (!isSyncMessage) {
destination = accountsManager.getByServiceIdentifier(destinationIdentifier);
} else {
destination = source.map(AuthenticatedDevice::getAccount);
}
final Optional<Response> spamCheck = spamChecker.checkForSpam(
context, source.map(AuthenticatedDevice::getAccount), destination);
if (spamCheck.isPresent()) {
return spamCheck.get();
}
final Optional<byte[]> spamReportToken = switch (senderType) {
case SENDER_TYPE_IDENTIFIED ->
reportSpamTokenProvider.makeReportSpamToken(context, source.get(), destination);
default -> Optional.empty();
};
int totalContentLength = 0;
for (final IncomingMessage message : messages.messages()) {
int contentLength = 0;
if (StringUtils.isNotEmpty(message.content())) {
contentLength += message.content().length();
}
validateContentLength(contentLength, false, userAgent);
validateEnvelopeType(message.type(), userAgent);
totalContentLength += contentLength;
}
try {
rateLimiters.getInboundMessageBytes().validate(destinationIdentifier.uuid(), totalContentLength);
} catch (final RateLimitExceededException e) {
if (dynamicConfigurationManager.getConfiguration().getInboundMessageByteLimitConfiguration().enforceInboundLimit()) {
messageByteLimitEstimator.add(destinationIdentifier.uuid().toString());
throw e;
}
}
try {
if (isStory) {
// Stories will be checked by the client; we bypass access checks here for stories.
} else if (groupSendToken != null) {
checkGroupSendToken(List.of(destinationIdentifier.toLibsignal()), groupSendToken);
if (destination.isEmpty()) {
throw new NotFoundException();
}
} else {
OptionalAccess.verify(source.map(AuthenticatedDevice::getAccount), accessKey, destination,
destinationIdentifier);
}
boolean needsSync = !isSyncMessage && source.isPresent() && source.get().getAccount().getDevices().size() > 1;
// We return 200 when stories are sent to a non-existent account. Since story sends bypass OptionalAccess.verify
// we leak information about whether a destination UUID exists if we return any other code (e.g. 404) from
// these requests.
if (isStory && destination.isEmpty()) {
return Response.ok(new SendMessageResponse(needsSync)).build();
}
// if destination is empty we would either throw an exception in OptionalAccess.verify when isStory is false
// or else return a 200 response when isStory is true.
assert destination.isPresent();
if (source.isPresent() && !isSyncMessage) {
checkMessageRateLimit(source.get(), destination.get(), userAgent);
}
if (isStory) {
rateLimiters.getStoriesLimiter().validate(destination.get().getUuid());
}
final Set<Byte> excludedDeviceIds;
if (isSyncMessage) {
excludedDeviceIds = Set.of(source.get().getAuthenticatedDevice().getId());
} else {
excludedDeviceIds = Collections.emptySet();
}
DestinationDeviceValidator.validateCompleteDeviceList(destination.get(),
messages.messages().stream().map(IncomingMessage::destinationDeviceId).collect(Collectors.toSet()),
excludedDeviceIds);
DestinationDeviceValidator.validateRegistrationIds(destination.get(),
messages.messages(),
IncomingMessage::destinationDeviceId,
IncomingMessage::destinationRegistrationId,
destination.get().getPhoneNumberIdentifier().equals(destinationIdentifier.uuid()));
final String authType;
if (SENDER_TYPE_IDENTIFIED.equals(senderType)) {
authType = AUTH_TYPE_IDENTIFIED;
} else if (isStory) {
authType = AUTH_TYPE_STORY;
} else if (groupSendToken != null) {
authType = AUTH_TYPE_GROUP_SEND_TOKEN;
} else {
authType = AUTH_TYPE_ACCESS_KEY;
}
final List<Tag> tags = List.of(UserAgentTagUtil.getPlatformTag(userAgent),
Tag.of(ENDPOINT_TYPE_TAG_NAME, ENDPOINT_TYPE_SINGLE),
Tag.of(EPHEMERAL_TAG_NAME, String.valueOf(messages.online())),
Tag.of(SENDER_TYPE_TAG_NAME, senderType),
Tag.of(AUTH_TYPE_TAG_NAME, authType),
Tag.of(IDENTITY_TYPE_TAG_NAME, destinationIdentifier.identityType().name()));
for (IncomingMessage incomingMessage : messages.messages()) {
Optional<Device> destinationDevice = destination.get().getDevice(incomingMessage.destinationDeviceId());
if (destinationDevice.isPresent()) {
Metrics.counter(SENT_MESSAGE_COUNTER_NAME, tags).increment();
sendIndividualMessage(
source,
destination.get(),
destinationDevice.get(),
destinationIdentifier,
messages.timestamp(),
messages.online(),
isStory,
messages.urgent(),
incomingMessage,
userAgent,
spamReportToken);
}
}
return Response.ok(new SendMessageResponse(needsSync)).build();
} catch (MismatchedDevicesException e) {
throw new WebApplicationException(Response.status(409)
.type(MediaType.APPLICATION_JSON_TYPE)
.entity(new MismatchedDevices(e.getMissingDevices(),
e.getExtraDevices()))
.build());
} catch (StaleDevicesException e) {
throw new WebApplicationException(Response.status(410)
.type(MediaType.APPLICATION_JSON)
.entity(new StaleDevices(e.getStaleDevices()))
.build());
}
} finally {
sample.stop(SEND_MESSAGE_LATENCY_TIMER);
}
}
|
@Test
void testSingleDeviceCurrentUnidentified() throws Exception {
try (final Response response =
resources.getJerseyTest()
.target(String.format("/v1/messages/%s", SINGLE_DEVICE_UUID))
.request()
.header(HeaderUtils.UNIDENTIFIED_ACCESS_KEY, Base64.getEncoder().encodeToString(UNIDENTIFIED_ACCESS_BYTES))
.put(Entity.entity(SystemMapper.jsonMapper().readValue(jsonFixture("fixtures/current_message_single_device.json"),
IncomingMessageList.class),
MediaType.APPLICATION_JSON_TYPE))) {
assertThat("Good Response", response.getStatus(), is(equalTo(200)));
ArgumentCaptor<Envelope> captor = ArgumentCaptor.forClass(Envelope.class);
verify(messageSender, times(1)).sendMessage(any(Account.class), any(Device.class), captor.capture(), eq(false));
assertFalse(captor.getValue().hasSourceUuid());
assertFalse(captor.getValue().hasSourceDevice());
}
}
|
public Collection<ServerAbilityInitializer> getInitializers() {
return initializers;
}
|
@Test
void testGetInitializers() {
assertEquals(1, ServerAbilityInitializerHolder.getInstance().getInitializers().size());
}
|
public int getClusterID() {
return clusterID;
}
|
@Test
public void testConstruct() {
Storage storage1 = new Storage(1, "token", "test");
Assert.assertEquals(1, storage1.getClusterID());
}
|
public static DatabaseType getProtocolType(final DatabaseConfiguration databaseConfig, final ConfigurationProperties props) {
Optional<DatabaseType> configuredDatabaseType = findConfiguredDatabaseType(props);
if (configuredDatabaseType.isPresent()) {
return configuredDatabaseType.get();
}
Collection<DataSource> dataSources = getDataSources(databaseConfig).values();
return dataSources.isEmpty() ? getDefaultStorageType() : getStorageType(dataSources.iterator().next());
}
|
@Test
void assertGetProtocolTypeFromConfiguredProperties() {
Properties props = PropertiesBuilder.build(new Property(ConfigurationPropertyKey.PROXY_FRONTEND_DATABASE_PROTOCOL_TYPE.getKey(), "MySQL"));
DatabaseConfiguration databaseConfig = new DataSourceProvidedDatabaseConfiguration(Collections.emptyMap(), Collections.singleton(new FixtureRuleConfiguration()));
assertThat(DatabaseTypeEngine.getProtocolType(databaseConfig, new ConfigurationProperties(props)), instanceOf(MySQLDatabaseType.class));
assertThat(DatabaseTypeEngine.getProtocolType(Collections.singletonMap("foo_db", databaseConfig), new ConfigurationProperties(props)), instanceOf(MySQLDatabaseType.class));
}
|
public void applyPluginsForContainer( final String category, final XulDomContainer container ) throws XulException {
List<SpoonPluginInterface> plugins = pluginCategoryMap.get( category );
if ( plugins != null ) {
for ( SpoonPluginInterface sp : plugins ) {
sp.applyToContainer( category, container );
}
}
}
|
@Test
public void testApplyPluginsForContainer() throws Exception {
spoonPluginManager.pluginAdded( plugin1 );
spoonPluginManager.pluginAdded( plugin2 );
spoonPluginManager.applyPluginsForContainer( "trans-graph", xulDomContainer );
assertEquals( 2, applies.size() );
assertEquals( 1, (int) applies.get( spoonPluginInterface1 ) );
assertEquals( 1, (int) applies.get( spoonPluginInterface2 ) );
}
|
public static InstanceMetaData create(final String instanceId, final InstanceType instanceType, final String attributes, final String version) {
return InstanceType.JDBC == instanceType ? new JDBCInstanceMetaData(instanceId, attributes, version) : new ProxyInstanceMetaData(instanceId, attributes, version);
}
|
@Test
void assertCreateJDBCInstanceMetaDataWithInstanceId() {
InstanceMetaData actual = InstanceMetaDataFactory.create("foo_id", InstanceType.JDBC, "127.0.0.1", "foo_version");
assertThat(actual.getId(), is("foo_id"));
assertNotNull(actual.getIp());
assertThat(actual.getAttributes(), is("127.0.0.1"));
assertThat(actual.getVersion(), is("foo_version"));
assertThat(actual.getType(), is(InstanceType.JDBC));
}
|
public int get(final int key)
{
final int missingValue = this.missingValue;
final int[] entries = this.entries;
@DoNotSub final int mask = entries.length - 1;
@DoNotSub int index = Hashing.evenHash(key, mask);
int value;
while (missingValue != (value = entries[index + 1]))
{
if (key == entries[index])
{
break;
}
index = next(index, mask);
}
return value;
}
|
@Test
void boxedGetShouldReturnNull()
{
assertNull(map.get((Integer)1));
}
|
@Override
public byte[] evaluateResponse( @Nonnull final byte[] response ) throws SaslException
{
if ( isComplete() )
{
throw new IllegalStateException( "Authentication exchange already completed." );
}
// The value as sent to us in the 'from' attribute of the stream element sent by the remote server.
final String defaultIdentity = session.getDefaultIdentity();
// RFC 6120 Section 4.7.1:
// "Because a server is a "public entity" on the XMPP network, it MUST include the 'from' attribute after the
// confidentiality and integrity of the stream are protected via TLS or an equivalent security layer."
//
// When doing SASL EXTERNAL, TLS must already have been negotiated, which means that the 'from' attribute must have been set.
if (defaultIdentity == null || defaultIdentity.isEmpty()) {
throw new SaslFailureException(Failure.NOT_AUTHORIZED, "Peer does not provide 'from' attribute value on stream.");
}
final String requestedId;
if (response.length == 0 && session.getSessionData(SASLAuthentication.SASL_LAST_RESPONSE_WAS_PROVIDED_BUT_EMPTY) == null) {
if (PROPERTY_SASL_EXTERNAL_SERVER_REQUIRE_AUTHZID.getValue()) {
// No initial response. Send a challenge to get one, per RFC 4422 appendix-A.
return new byte[0];
} else {
requestedId = defaultIdentity;
}
}
else
{
requestedId = new String( response, StandardCharsets.UTF_8 );
}
complete = true;
Log.trace("Completing handshake with '{}' using authzid value: '{}'", defaultIdentity, requestedId);
// Added for backwards compatibility. Not required by XMPP, but versions of Openfire prior to 4.8.0 did require the authzid to be present.
if (SASLAuthentication.EXTERNAL_S2S_REQUIRE_AUTHZID.getValue() && requestedId.isEmpty()) {
throw new SaslFailureException(Failure.INVALID_AUTHZID, "Peer does not provide authzid, which is required by configuration.");
}
// When an authorization identity is provided, make sure that it matches the 'from' value from the session stream.
if (!requestedId.isEmpty() && !requestedId.equals(defaultIdentity)) {
throw new SaslFailureException(Failure.INVALID_AUTHZID, "Stream 'from' attribute value '" + defaultIdentity + "' does not equal SASL authzid '" + requestedId + "'");
}
if (!SASLAuthentication.verifyCertificates(session.getConnection().getPeerCertificates(), defaultIdentity, true)) {
throw new SaslFailureException(Failure.NOT_AUTHORIZED, "Server-to-Server certificate verification failed.");
}
authorizationID = defaultIdentity;
Log.trace("Successfully authenticated '{}'", authorizationID);
return null; // Success!
}
|
@Test(expected = SaslFailureException.class)
public void testInitialResponseDifferentFromStreamID() throws Exception
{
// Setup test fixture.
final String authzID = "foo.example.org";
final String certID = "bar.example.com";
when(session.getDefaultIdentity()).thenReturn(authzID);
when(session.getConnection()).thenReturn(connection);
saslAuthentication.when(() -> SASLAuthentication.verifyCertificates(any(), eq(certID), anyBoolean())).thenReturn(true);
final ExternalServerSaslServer server = new ExternalServerSaslServer(session);
final byte[] input = authzID.getBytes(StandardCharsets.UTF_8);
// Execute system under test.
server.evaluateResponse(input);
}
|
public static <T> NullableCoder<T> of(Coder<T> valueCoder) {
if (valueCoder instanceof NullableCoder) {
return (NullableCoder<T>) valueCoder;
}
return new NullableCoder<>(valueCoder);
}
|
@Test
public void testSubcoderRecievesEntireStream() throws Exception {
NullableCoder<String> coder = NullableCoder.of(new EntireStreamExpectingCoder());
CoderProperties.coderDecodeEncodeEqualInContext(coder, Context.OUTER, null);
CoderProperties.coderDecodeEncodeEqualInContext(coder, Context.OUTER, "foo");
}
|
@Override
public void invoke(IN value, Context context) throws Exception {
bufferLock.lock();
try {
// TODO this implementation is not very effective,
// optimize this with MemorySegment if needed
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputViewStreamWrapper wrapper = new DataOutputViewStreamWrapper(baos);
serializer.serialize(value, wrapper);
invokingRecordBytes = baos.size();
if (invokingRecordBytes > maxBytesPerBatch) {
throw new RuntimeException(
"Record size is too large for CollectSinkFunction. Record size is "
+ invokingRecordBytes
+ " bytes, "
+ "but max bytes per batch is only "
+ maxBytesPerBatch
+ " bytes. "
+ "Please consider increasing max bytes per batch value by setting "
+ CollectSinkOperatorFactory.MAX_BATCH_SIZE.key());
}
if (currentBufferBytes + invokingRecordBytes > bufferSizeLimitBytes) {
bufferCanAddNextResultCondition.await();
}
buffer.add(baos.toByteArray());
currentBufferBytes += baos.size();
} finally {
bufferLock.unlock();
}
}
|
@Test
void testAccumulatorResultWithCheckpoint() throws Exception {
functionWrapper.openFunctionWithState();
for (int i = 0; i < 6; i++) {
functionWrapper.invoke(i);
}
String version = initializeVersion();
functionWrapper.sendRequestAndGetResponse(version, 3);
functionWrapper.checkpointFunction(1);
functionWrapper.checkpointComplete(1);
for (int i = 6; i < 9; i++) {
functionWrapper.invoke(i);
}
functionWrapper.sendRequestAndGetResponse(version, 5);
functionWrapper.closeFunctionNormally();
CollectTestUtils.assertAccumulatorResult(
functionWrapper.getAccumulatorResults(),
5,
version,
3,
Arrays.asList(5, 6, 7, 8),
serializer);
}
|
public void isNull() {
standardIsEqualTo(null);
}
|
@Test
public void isNullWhenSubjectForbidsIsEqualToFail() {
expectFailure.whenTesting().about(objectsForbiddingEqualityCheck()).that(new Object()).isNull();
}
|
@Override
public ExtensionFactory getExtensionFactory() {
return original.getExtensionFactory();
}
|
@Test
public void getExtensionFactory() {
pluginManager.loadPlugins();
assertEquals(pluginManager.getExtensionFactory(), wrappedPluginManager.getExtensionFactory());
}
|
private Mute() {
}
|
@Test
void loggedMuteShouldRunTheCheckedRunnableAndNotThrowAnyExceptionIfCheckedRunnableDoesNotThrowAnyException() {
assertDoesNotThrow(() -> Mute.mute(this::methodNotThrowingAnyException));
}
|
@Override
public synchronized void destroy() {
if (id < 0) {
throw new IllegalStateException("Negative segment ID indicates a reserved segment, "
+ "which should not be destroyed. Reserved segments are cleaned up only when "
+ "an entire store is closed, via the close() method rather than destroy().");
}
final Bytes keyPrefix = prefixKeyFormatter.getPrefix();
// this deleteRange() call deletes all entries with the given prefix, because the
// deleteRange() implementation calls Bytes.increment() in order to make keyTo inclusive
physicalStore.deleteRange(keyPrefix, keyPrefix);
}
|
@Test
public void shouldNotDestroySegmentWithNegativeId() {
// reserved segments should not be destroyed. they are cleaned up only when
// an entire store is closed, via the close() method rather than destroy()
assertThrows(IllegalStateException.class, () -> negativeIdSegment.destroy());
}
|
@Override
public DirectoryTimestamp getDirectoryTimestamp() {
return DirectoryTimestamp.explicit;
}
|
@Test
public void testFeatures() {
assertEquals(Protocol.Case.sensitive, new CteraProtocol().getCaseSensitivity());
assertEquals(Protocol.DirectoryTimestamp.explicit, new CteraProtocol().getDirectoryTimestamp());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.