focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static ServiceBusMessage createServiceBusMessage(
final Object data, final Map<String, Object> applicationProperties, final String correlationId) {
ServiceBusMessage serviceBusMessage;
if (data instanceof String) {
serviceBusMessage = new ServiceBusMessage((String) data);
} else if (data instanceof byte[]) {
serviceBusMessage = new ServiceBusMessage((byte[]) data);
} else if (data instanceof BinaryData) {
serviceBusMessage = new ServiceBusMessage((BinaryData) data);
} else {
throw new IllegalArgumentException("Make sure your message data is in String, byte[] or BinaryData");
}
if (applicationProperties != null) {
serviceBusMessage.getRawAmqpMessage().getApplicationProperties().putAll(applicationProperties);
}
if (ObjectHelper.isNotEmpty(correlationId)) {
serviceBusMessage.setCorrelationId(correlationId);
}
return serviceBusMessage;
}
|
@Test
void testCreateServiceBusMessage() {
// test string
final ServiceBusMessage message1 = ServiceBusUtils.createServiceBusMessage("test string", null, null);
assertEquals("test string", message1.getBody().toString());
// test int
final ServiceBusMessage message2 = ServiceBusUtils.createServiceBusMessage(String.valueOf(12345), null, null);
assertEquals("12345", message2.getBody().toString());
//test bytes
byte[] testByteBody = "test string".getBytes(StandardCharsets.UTF_8);
final ServiceBusMessage message3 = ServiceBusUtils.createServiceBusMessage(testByteBody, null, null);
assertArrayEquals(testByteBody, message3.getBody().toBytes());
}
|
@Override
public Processor<K, Change<V>, K, Change<VOut>> get() {
return new KTableTransformValuesProcessor(transformerSupplier.get());
}
|
@Test
public void shouldNotSendOldValuesByDefault() {
final KTableTransformValues<String, String, String> transformValues =
new KTableTransformValues<>(parent, new ExclamationValueTransformerSupplier(), null);
final Processor<String, Change<String>, String, Change<String>> processor = transformValues.get();
processor.init(context);
doNothing().when(context).forward(new Record<>("Key", new Change<>("Key->newValue!", null), 0));
processor.process(new Record<>("Key", new Change<>("newValue", "oldValue"), 0));
}
|
@Override
public byte[] serialize() {
byte[] optionsData = null;
if (this.options.hasOptions()) {
optionsData = this.options.serialize();
}
int optionsLength = 0;
if (optionsData != null) {
optionsLength = optionsData.length;
}
final byte[] data = new byte[HEADER_LENGTH + optionsLength];
final ByteBuffer bb = ByteBuffer.wrap(data);
bb.putInt(0);
bb.put(this.targetAddress, 0, Ip6Address.BYTE_LENGTH);
bb.put(this.destinationAddress, 0, Ip6Address.BYTE_LENGTH);
if (optionsData != null) {
bb.put(optionsData);
}
return data;
}
|
@Test
public void testSerialize() {
Redirect rd = new Redirect();
rd.setTargetAddress(TARGET_ADDRESS);
rd.setDestinationAddress(DESTINATION_ADDRESS);
rd.addOption(NeighborDiscoveryOptions.TYPE_TARGET_LL_ADDRESS,
MAC_ADDRESS.toBytes());
assertArrayEquals(rd.serialize(), bytePacket);
}
|
@Override // To express more specific javadoc
public void isEqualTo(@Nullable Object expected) {
super.isEqualTo(expected);
}
|
@SuppressWarnings("TruthSelfEquals")
@Test
public void isEqualTo() {
// make sure this still works
assertThat(TEN).isEqualTo(TEN);
}
|
@Override
public boolean test(Pickle pickle) {
URI picklePath = pickle.getUri();
if (!lineFilters.containsKey(picklePath)) {
return true;
}
for (Integer line : lineFilters.get(picklePath)) {
if (Objects.equals(line, pickle.getLocation().getLine())
|| Objects.equals(line, pickle.getScenarioLocation().getLine())
|| pickle.getExamplesLocation().map(Location::getLine).map(line::equals).orElse(false)
|| pickle.getRuleLocation().map(Location::getLine).map(line::equals).orElse(false)
|| pickle.getFeatureLocation().map(Location::getLine).map(line::equals).orElse(false)) {
return true;
}
}
return false;
}
|
@Test
void matches_first_examples() {
LinePredicate predicate = new LinePredicate(singletonMap(
featurePath,
singletonList(5)));
assertTrue(predicate.test(firstPickle));
assertTrue(predicate.test(secondPickle));
assertFalse(predicate.test(thirdPickle));
assertFalse(predicate.test(fourthPickle));
}
|
public static List<String> finalDestination(List<String> elements) {
if (isMagicPath(elements)) {
List<String> destDir = magicPathParents(elements);
List<String> children = magicPathChildren(elements);
checkArgument(!children.isEmpty(), "No path found under the prefix " +
MAGIC_PATH_PREFIX);
ArrayList<String> dest = new ArrayList<>(destDir);
if (containsBasePath(children)) {
// there's a base marker in the path
List<String> baseChildren = basePathChildren(children);
checkArgument(!baseChildren.isEmpty(),
"No path found under " + BASE);
dest.addAll(baseChildren);
} else {
dest.add(filename(children));
}
return dest;
} else {
return elements;
}
}
|
@Test
public void testFinalDestinationBaseSubdirsChild() {
assertEquals(l("2", "3.txt"),
finalDestination(l(MAGIC_PATH_PREFIX, "4", BASE, "2", "3.txt")));
}
|
@Override
@MethodNotAvailable
public <T> T invoke(K key, EntryProcessor<K, V, T> entryProcessor, Object... arguments) throws EntryProcessorException {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testInvoke() {
adapter.invoke(23, new ICacheReplaceEntryProcessor(), "value", "newValue");
}
|
@Override
public MetricsRepository load() {
List<Metric> metrics = new ArrayList<>();
try {
loadFromPaginatedWs(metrics);
} catch (Exception e) {
throw new IllegalStateException("Unable to load metrics", e);
}
return new MetricsRepository(metrics);
}
|
@Test
public void testIOError() throws IOException {
Reader reader = mock(Reader.class);
when(reader.read(any(char[].class), anyInt(), anyInt())).thenThrow(new IOException());
WsTestUtil.mockReader(wsClient, reader);
assertThatThrownBy(() -> metricsRepositoryLoader.load())
.isInstanceOf(IllegalStateException.class);
}
|
public Collection<ResultPartitionID> stopTrackingPartitions(K key) {
Preconditions.checkNotNull(key);
Set<ResultPartitionID> storedPartitions = trackedPartitionsPerKey.remove(key);
return storedPartitions == null ? Collections.emptyList() : storedPartitions;
}
|
@Test
void testStopTrackingPartitions() {
final ResultPartitionID partitionId2 = new ResultPartitionID();
final PartitionTable<JobID> table = new PartitionTable<>();
table.startTrackingPartitions(JOB_ID, Collections.singletonList(PARTITION_ID));
table.startTrackingPartitions(JOB_ID, Collections.singletonList(partitionId2));
table.stopTrackingPartitions(JOB_ID, Collections.singletonList(partitionId2));
assertThat(table.hasTrackedPartitions(JOB_ID)).isTrue();
Collection<ResultPartitionID> storedPartitions = table.stopTrackingPartitions(JOB_ID);
assertThat(storedPartitions).contains(PARTITION_ID);
assertThat(table.hasTrackedPartitions(JOB_ID)).isFalse();
}
|
static public String toString(InputStream input, String encoding) throws IOException {
return (null == encoding) ? toString(new InputStreamReader(input, StandardCharsets.UTF_8)) : toString(new InputStreamReader(
input, encoding));
}
|
@Test
public void testToString() throws Exception {
byte[] b = "testToString".getBytes(StandardCharsets.UTF_8);
InputStream is = new ByteArrayInputStream(b);
String str = IOTinyUtils.toString(is, null);
assertEquals("testToString", str);
is = new ByteArrayInputStream(b);
str = IOTinyUtils.toString(is, StandardCharsets.UTF_8.name());
assertEquals("testToString", str);
is = new ByteArrayInputStream(b);
Reader isr = new InputStreamReader(is, StandardCharsets.UTF_8);
str = IOTinyUtils.toString(isr);
assertEquals("testToString", str);
}
|
public DeleteObjectsResult deleteObjects(List<String> keyList) throws CosClientException {
DeleteObjectsRequest deleteObjectsRequest = new DeleteObjectsRequest(cosClientConfig.getBucket());
// 设置要删除的key列表, 最多一次删除1000个
ArrayList<DeleteObjectsRequest.KeyVersion> keyVersions = new ArrayList<>();
// 传入要删除的文件名
// 注意文件名不允许以正斜线/或者反斜线\开头,例如:
// 存储桶目录下有a/b/c.txt文件,如果要删除,只能是 keyList.add(new KeyVersion("a/b/c.txt")), 若使用 keyList.add(new KeyVersion("/a/b/c.txt"))会导致删除不成功
for (String key : keyList) {
keyVersions.add(new DeleteObjectsRequest.KeyVersion(key));
}
deleteObjectsRequest.setKeys(keyVersions);
return cosClient.deleteObjects(deleteObjectsRequest);
}
|
@Test
void deleteObjects() {
cosManager.deleteObjects(Arrays.asList("test/2.jpg",
"test/7.jpg"
));
}
|
@Override
public String getName() {
return TransformFunctionType.OR.getName();
}
|
@Test
public void testOrNullColumn() {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("or(%s,%s)", INT_SV_COLUMN, INT_SV_NULL_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof OrOperatorTransformFunction);
Assert.assertEquals(transformFunction.getName(), TransformFunctionType.OR.getName());
int[] expectedValues = new int[NUM_ROWS];
RoaringBitmap roaringBitmap = new RoaringBitmap();
for (int i = 0; i < NUM_ROWS; i++) {
if (_intSVValues[i] != 0) {
expectedValues[i] = 1;
} else if (isNullRow(i)) {
roaringBitmap.add(i);
} else {
expectedValues[i] = 0;
}
}
testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap);
}
|
public static Object newInstance(String name) {
try {
return forName(name).getDeclaredConstructor().newInstance();
} catch (InstantiationException
| IllegalAccessException
| InvocationTargetException
| NoSuchMethodException e) {
throw new IllegalStateException(e.getMessage(), e);
}
}
|
@Test
void testNewInstance1() {
Assertions.assertThrows(
IllegalStateException.class,
() -> ClassUtils.newInstance(
"org.apache.dubbo.common.compiler.support.internal.HelloServiceInternalImpl"));
}
|
@Override
public boolean filterPath(Path filePath) {
if (getIncludeMatchers().isEmpty() && getExcludeMatchers().isEmpty()) {
return false;
}
// compensate for the fact that Flink paths are slashed
final String path =
filePath.hasWindowsDrive() ? filePath.getPath().substring(1) : filePath.getPath();
final java.nio.file.Path nioPath = Paths.get(path);
for (PathMatcher matcher : getIncludeMatchers()) {
if (matcher.matches(nioPath)) {
return shouldExclude(nioPath);
}
}
return true;
}
|
@Test
void testExcludeFilenameWithStart() {
assumeThat(OperatingSystem.isWindows())
.as("Windows does not allow asterisks in file names.")
.isFalse();
GlobFilePathFilter matcher =
new GlobFilePathFilter(
Collections.singletonList("**"), Collections.singletonList("\\*"));
assertThat(matcher.filterPath(new Path("*"))).isTrue();
assertThat(matcher.filterPath(new Path("**"))).isFalse();
assertThat(matcher.filterPath(new Path("other.txt"))).isFalse();
}
|
public Method getMethod(Class<?> clazz, String name) {
Method m = null;
try {
for (Method tempMethod : clazz.getMethods()) {
if (tempMethod.getName().equals(name)) {
m = tempMethod;
break;
}
}
} catch (Exception e) {
m = null;
}
return m;
}
|
@Test
public void testGetMethodDeepInheritance() {
try {
ReflectionEngine engine = new ReflectionEngine();
MethodInvoker mInvoker;
TestEngine2 tEngine2 = new TestEngine2();
mInvoker = engine.getMethod(tEngine2, "method3", new Object[] { tEngine2 });
assertArrayEquals(mInvoker.getMethod().getParameterTypes(), new Class[] { TestInterface.class });
TestEngine3 tEngine3 = new TestEngine3();
mInvoker = engine.getMethod(tEngine3, "method3", new Object[] { tEngine3 });
assertArrayEquals(mInvoker.getMethod().getParameterTypes(), new Class[] { TestInterface.class });
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
|
public DirectGraph getGraph() {
checkState(finalized, "Can't get a graph before the Pipeline has been completely traversed");
return DirectGraph.create(
producers, viewWriters, perElementConsumers, rootTransforms, stepNames);
}
|
@Test
public void getRootTransformsContainsRootTransforms() {
PCollection<byte[]> impulse = p.apply(Impulse.create());
impulse.apply(WithKeys.of("abc"));
p.traverseTopologically(visitor);
DirectGraph graph = visitor.getGraph();
assertThat(graph.getRootTransforms(), hasSize(1));
assertThat(
graph.getRootTransforms(),
Matchers.containsInAnyOrder(new Object[] {graph.getProducer(impulse)}));
for (AppliedPTransform<?, ?, ?> root : graph.getRootTransforms()) {
// Root transforms will have no inputs
assertThat(root.getInputs().entrySet(), emptyIterable());
assertThat(
Iterables.getOnlyElement(root.getOutputs().values()), Matchers.<POutput>isOneOf(impulse));
}
}
|
@Override
public Option<IndexedRecord> combineAndGetUpdateValue(IndexedRecord currentValue, Schema schema, Properties properties)
throws IOException {
return combineAndGetUpdateValue(currentValue, schema);
}
|
@Test
public void testDeleteWithEmptyPayLoad() {
Schema avroSchema = new Schema.Parser().parse(AVRO_SCHEMA_STRING);
Properties properties = new Properties();
GenericRecord oldRecord = new GenericData.Record(avroSchema);
oldRecord.put("field1", 2);
oldRecord.put("Op", "U");
AWSDmsAvroPayload payload = new AWSDmsAvroPayload(Option.empty());
try {
Option<IndexedRecord> outputPayload = payload.combineAndGetUpdateValue(oldRecord, avroSchema, properties);
// expect nothing to be committed to table
assertFalse(outputPayload.isPresent());
} catch (Exception e) {
e.printStackTrace();
fail("Unexpected exception");
}
}
|
public static <P> Matcher<P> or(Iterable<? extends Matcher<P>> matchers) {
return or(toArray(matchers));
}
|
@Test void or_multiple_matched() {
Matcher<Void> one = b -> true;
Matcher<Void> two = b -> false;
Matcher<Void> three = b -> true;
assertThat(or(one, two, three).matches(null)).isTrue();
}
|
public String getDataId() {
return dataId;
}
|
@Test
void getDataId() {
ConfigurationChangeEvent event = new ConfigurationChangeEvent();
event.setDataId("dataId");
Assertions.assertEquals("dataId", event.getDataId());
}
|
@Override
public AppResponse process(Flow flow, SessionDataRequest request) throws SharedServiceClientException {
return validateAmountOfApps(flow, appSession.getAccountId(), request)
.orElseGet(() -> validateSms(flow, appSession.getAccountId(), request.getSmscode())
.orElseGet(() -> confirmSession(flow, request)));
}
|
@Test
void processActivateAppWithPasswordLetterFlow() throws SharedServiceClientException {
when(appAuthenticatorService.countByAccountIdAndInstanceIdNot(ACCOUNT_ID, SESSION_DATA_REQUEST_INSTANCE_ID)).thenReturn(3);
AppResponse appResponse = sessionConfirmed.process(mockedActivateAppWithPasswordLetterFlow, mockedSessionDataRequest);
AppAuthenticator appAuthenticator = sessionConfirmed.getAppAuthenticator();
assertEquals(DEVICE_NAME, appAuthenticator.getDeviceName());
assertEquals(SESSION_DATA_REQUEST_INSTANCE_ID, appAuthenticator.getInstanceId());
assertEquals(VALID_RESPONSE_CODE, ((SessionDataResponse)appResponse).getStatus());
assertEquals(APP_AUTHENTICATOR_USER_APP_ID, ((SessionDataResponse)appResponse).getUserAppId());
}
|
public byte[] createChildKeyStore(ApplicationId appId, String ksPassword)
throws Exception {
// We don't check the expiration date, and this will provide further reason
// for outside users to not accept these certificates
Date from = new Date();
Date to = from;
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(2048);
KeyPair keyPair = keyGen.genKeyPair();
String issuer = caCert.getSubjectX500Principal().getName();
String subject = "CN=" + appId;
X509Certificate cert = createCert(false, issuer, subject, from, to,
keyPair.getPublic(), caKeyPair.getPrivate());
if (LOG.isTraceEnabled()) {
LOG.trace("Certificate for {}: \n{}", appId, cert);
}
KeyStore keyStore = createChildKeyStore(ksPassword, "server",
keyPair.getPrivate(), cert);
return keyStoreToBytes(keyStore, ksPassword);
}
|
@Test
void testCreateChildKeyStore() throws Exception {
ProxyCA proxyCA = new ProxyCA();
proxyCA.init();
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
byte[] keystoreBytes = proxyCA.createChildKeyStore(appId,
"password");
KeyStore keyStore = KeyStoreTestUtil.bytesToKeyStore(keystoreBytes,
"password");
assertEquals(1, keyStore.size());
Certificate[] certChain = keyStore.getCertificateChain("server");
assertEquals(2, certChain.length);
X509Certificate caCert = (X509Certificate) certChain[1];
X509Certificate cert = (X509Certificate) certChain[0];
// check child cert
assertEquals(caCert.getSubjectX500Principal().toString(),
cert.getIssuerDN().toString());
assertEquals(new X500Principal("CN=" + appId),
cert.getSubjectX500Principal());
assertFalse(cert.getSubjectX500Principal().toString().contains(","),
"Found multiple fields in X500 Principal, when there " +
"should have only been one: " + cert.getSubjectX500Principal());
assertEquals("SHA512withRSA", cert.getSigAlgName());
assertEquals(cert.getNotBefore(), cert.getNotAfter());
assertTrue(cert.getNotAfter().before(new Date()),
"Expected certificate to be expired but was not: " + cert.getNotAfter());
assertEquals(new X500Principal("CN=" + appId).toString(),
cert.getSubjectDN().toString());
Key privateKey = keyStore.getKey("server", "password".toCharArray());
assertEquals("RSA", privateKey.getAlgorithm());
assertEquals(-1, cert.getBasicConstraints());
// verify signature on child cert
PublicKey caPublicKey = caCert.getPublicKey();
cert.verify(caPublicKey);
// check CA cert
checkCACert(caCert);
assertEquals(proxyCA.getCaCert(), caCert);
// verify signature on CA cert
caCert.verify(caPublicKey);
// verify CA public key matches private key
PrivateKey caPrivateKey =
proxyCA.getX509KeyManager().getPrivateKey(null);
checkPrivatePublicKeys(caPrivateKey, caPublicKey);
assertEquals(proxyCA.getCaKeyPair().getPublic(), caPublicKey);
assertEquals(proxyCA.getCaKeyPair().getPrivate(), caPrivateKey);
}
|
@Override
public @Nullable Instant currentOutputWatermarkTime() {
return watermarks.getOutputWatermark();
}
|
@Test
public void getOutputWatermarkTimeUsesWatermarkTime() {
when(watermarks.getOutputWatermark()).thenReturn(new Instant(25525L));
assertThat(internals.currentOutputWatermarkTime(), equalTo(new Instant(25525L)));
}
|
public static synchronized void registerProvider(ZuulBlockFallbackProvider provider) {
AssertUtil.notNull(provider, "fallback provider cannot be null");
String route = provider.getRoute();
if ("*".equals(route) || route == null) {
defaultFallbackProvider = provider;
} else {
fallbackProviderCache.put(route, provider);
}
}
|
@Test
public void testRegisterProvider() throws Exception {
MyNullResponseFallBackProvider myNullResponseFallBackProvider = new MyNullResponseFallBackProvider();
ZuulBlockFallbackManager.registerProvider(myNullResponseFallBackProvider);
Assert.assertEquals(myNullResponseFallBackProvider.getRoute(), ROUTE);
Assert.assertNull(myNullResponseFallBackProvider.fallbackResponse(ROUTE, new FlowException("flow ex")));
}
|
public static String[][] assignExecutors(
List<? extends ScanTaskGroup<?>> taskGroups, List<String> executorLocations) {
Map<Integer, JavaHash<StructLike>> partitionHashes = Maps.newHashMap();
String[][] locations = new String[taskGroups.size()][];
for (int index = 0; index < taskGroups.size(); index++) {
locations[index] = assign(taskGroups.get(index), executorLocations, partitionHashes);
}
return locations;
}
|
@Test
public void testFileScanTaskWithDeletes() {
StructLike partition1 = Row.of("k2", null);
StructLike partition2 = Row.of("k1");
List<ScanTask> tasks =
ImmutableList.of(
new MockFileScanTask(
mockDataFile(partition1), mockDeleteFiles(1, partition1), SCHEMA, SPEC_1),
new MockFileScanTask(
mockDataFile(partition2), mockDeleteFiles(3, partition2), SCHEMA, SPEC_2),
new MockFileScanTask(
mockDataFile(partition1), mockDeleteFiles(2, partition1), SCHEMA, SPEC_1));
ScanTaskGroup<ScanTask> taskGroup = new BaseScanTaskGroup<>(tasks);
List<ScanTaskGroup<ScanTask>> taskGroups = ImmutableList.of(taskGroup);
String[][] locations = SparkPlanningUtil.assignExecutors(taskGroups, EXECUTOR_LOCATIONS);
// should assign executors and handle different size of partitions
assertThat(locations.length).isEqualTo(1);
assertThat(locations[0].length).isGreaterThanOrEqualTo(1);
}
|
public String removeFileExtension(String filename) {
if (filename == null || filename.isEmpty()) {
return filename;
}
String extPattern = "(?<!^)[.]" + "[^.]*$";
return filename.replaceAll(extPattern, "");
}
|
@Test
public void testRemoveFileExtension() {
String result = fileUtil.removeFileExtension(FOOBAR_HTML);
assertEquals(FOOBAR, result);
result = fileUtil.removeFileExtension(null);
assertNull(result);
result = fileUtil.removeFileExtension("");
assertEquals(0, result.length());
}
|
public Optional<ContentPackInstallation> findById(ObjectId id) {
final ContentPackInstallation installation = dbCollection.findOneById(id);
return Optional.ofNullable(installation);
}
|
@Test
@MongoDBFixtures("ContentPackInstallationPersistenceServiceTest.json")
public void findByIdWithInvalidId() {
final Optional<ContentPackInstallation> contentPacks = persistenceService.findById(new ObjectId("0000000000000000deadbeef"));
assertThat(contentPacks).isEmpty();
}
|
@Override
public CompletableFuture<Long> getMessageStoreTimeStampAsync(String topic, int queueId,
long consumeQueueOffset) {
if (fetchFromCurrentStore(topic, queueId, consumeQueueOffset)) {
Stopwatch stopwatch = Stopwatch.createStarted();
return fetcher.getMessageStoreTimeStampAsync(topic, queueId, consumeQueueOffset)
.thenApply(time -> {
Attributes latencyAttributes = TieredStoreMetricsManager.newAttributesBuilder()
.put(TieredStoreMetricsConstant.LABEL_OPERATION,
TieredStoreMetricsConstant.OPERATION_API_GET_TIME_BY_OFFSET)
.put(TieredStoreMetricsConstant.LABEL_TOPIC, topic)
.build();
TieredStoreMetricsManager.apiLatency.record(stopwatch.elapsed(TimeUnit.MILLISECONDS), latencyAttributes);
if (time == -1) {
log.debug("GetEarliestMessageTimeAsync failed, try to get message time from next store, topic: {}, queue: {}, queue offset: {}",
topic, queueId, consumeQueueOffset);
return next.getMessageStoreTimeStamp(topic, queueId, consumeQueueOffset);
}
return time;
});
}
return next.getMessageStoreTimeStampAsync(topic, queueId, consumeQueueOffset);
}
|
@Test
public void testGetMessageStoreTimeStampAsync() {
// TieredStorageLevel.DISABLE
Properties properties = new Properties();
properties.setProperty("tieredStorageLevel", "DISABLE");
configuration.update(properties);
when(fetcher.getMessageStoreTimeStampAsync(anyString(), anyInt(), anyLong())).thenReturn(CompletableFuture.completedFuture(1L));
when(defaultStore.getMessageStoreTimeStampAsync(anyString(), anyInt(), anyLong())).thenReturn(CompletableFuture.completedFuture(2L));
when(defaultStore.getMessageStoreTimeStamp(anyString(), anyInt(), anyLong())).thenReturn(3L);
Assert.assertEquals(2, (long) currentStore.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 0).join());
// TieredStorageLevel.FORCE
properties.setProperty("tieredStorageLevel", "FORCE");
configuration.update(properties);
Assert.assertEquals(1, (long) currentStore.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 0).join());
Mockito.when(fetcher.getMessageStoreTimeStampAsync(anyString(), anyInt(), anyLong())).thenReturn(CompletableFuture.completedFuture(-1L));
Assert.assertEquals(3, (long) currentStore.getMessageStoreTimeStampAsync(mq.getTopic(), mq.getQueueId(), 0).join());
}
|
public List<Entity> insert(String kind, Map<Long, FullEntity<?>> entities) {
List<Entity> created = new ArrayList<>();
try {
for (Map.Entry<Long, FullEntity<?>> entry : entities.entrySet()) {
Key entityKey =
datastore.newKeyFactory().setKind(kind).setNamespace(namespace).newKey(entry.getKey());
Entity entity = Entity.newBuilder(entityKey, entry.getValue()).build();
created.add(datastore.put(entity));
keys.add(entityKey);
}
} catch (Exception e) {
throw new DatastoreResourceManagerException("Error inserting Datastore entity", e);
}
return created;
}
|
@Test
public void testInsert() {
// Prepare test data
Map<Long, FullEntity<?>> entities = new HashMap<>();
Entity entity = Entity.newBuilder(datastoreMock.newKeyFactory().newKey(1L)).build();
entities.put(1L, entity);
// Mock the Datastore put method
when(datastoreMock.put(any(FullEntity.class))).thenReturn(entity);
// Execute the method under test
List<Entity> result = resourceManager.insert("test_kind", entities);
// Verify the result
assertThat(result).hasSize(1);
assertThat(result).contains(entity);
}
|
@Override
public void run(Namespace namespace, Liquibase liquibase) throws Exception {
String scopeId = null;
if (scopedObjects != null) {
scopeId = Scope.enter(scopedObjects);
}
final AbstractLiquibaseCommand<T> subcommand =
requireNonNull(subcommands.get(namespace.getString(COMMAND_NAME_ATTR)), "Unable find the command");
try {
subcommand.run(namespace, liquibase);
} finally {
if (scopeId != null) {
Scope.exit(scopeId);
}
}
}
|
@Test
void testRun() throws Exception {
// Apply and rollback some DDL changes
final TestMigrationConfiguration conf = MigrationTestSupport.createConfiguration();
assertThatNoException()
.isThrownBy(() -> dbTestCommand.run(null, new Namespace(Collections.emptyMap()), conf));
}
|
@Override
public Collection<String> connectors() {
FutureCallback<Collection<String>> connectorsCallback = new FutureCallback<>();
herder.connectors(connectorsCallback);
try {
return connectorsCallback.get(herderRequestTimeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new ConnectException("Failed to retrieve list of connectors", e);
}
}
|
@Test
public void connectors() {
@SuppressWarnings("unchecked")
ArgumentCaptor<Callback<Collection<String>>> callback = ArgumentCaptor.forClass(Callback.class);
doAnswer(invocation -> {
callback.getValue().onCompletion(null, expectedConnectors);
return null;
}).when(herder).connectors(callback.capture());
assertEquals(expectedConnectors, connectClusterState.connectors());
}
|
static List<String> parseYarn(String[] args) {
String[] params = new String[args.length - 1];
System.arraycopy(args, 1, params, 0, params.length);
CommandLine commandLine = parse(YARN_OPTIONS, params);
if (commandLine.hasOption(OPTION_HELP.getOpt())) {
printYarnHelp();
System.exit(0);
}
List<String> options = new ArrayList<>();
options.add(args[0]);
options.add("-m");
options.add("yarn-cluster");
constructYarnOption(options, OPTION_JM_MEMORY, commandLine);
constructYarnOption(options, OPTION_NAME, commandLine);
constructYarnOption(options, OPTION_QUEUE, commandLine);
constructYarnOption(options, OPTION_SLOTS, commandLine);
constructYarnOption(options, OPTION_TM_MEMORY, commandLine);
return options;
}
|
@Test
void testParseYarnWithoutOptions() {
String[] args = {"yarn"};
List<String> commandOptions = PythonShellParser.parseYarn(args);
String[] expectedCommandOptions = {"yarn", "-m", "yarn-cluster"};
assertThat(commandOptions.toArray()).isEqualTo(expectedCommandOptions);
}
|
@Override
public E set(int index, E element) {
return underlying.set(index, element);
}
|
@Test
public void testSet() {
BoundedList<Integer> list = BoundedList.newArrayBacked(3);
list.add(1);
list.add(200);
list.add(3);
assertEquals(Arrays.asList(1, 200, 3), list);
list.set(0, 100);
list.set(1, 200);
list.set(2, 300);
assertEquals(Arrays.asList(100, 200, 300), list);
}
|
public boolean isEmptyValue( String valueName ) throws KettleValueException {
int idx = rowMeta.indexOfValue( valueName );
if ( idx < 0 ) {
throw new KettleValueException( "Unknown column '" + valueName + "'" );
}
ValueMetaInterface metaType = rowMeta.getValueMeta( idx );
// find by source value type
switch ( metaType.getType() ) {
case ValueMetaInterface.TYPE_STRING:
return rowMeta.getString( data, idx ) == null;
case ValueMetaInterface.TYPE_BOOLEAN:
return rowMeta.getBoolean( data, idx ) == null;
case ValueMetaInterface.TYPE_INTEGER:
return rowMeta.getInteger( data, idx ) == null;
case ValueMetaInterface.TYPE_NUMBER:
return rowMeta.getNumber( data, idx ) == null;
case ValueMetaInterface.TYPE_BIGNUMBER:
return rowMeta.getBigNumber( data, idx ) == null;
case ValueMetaInterface.TYPE_BINARY:
return rowMeta.getBinary( data, idx ) == null;
case ValueMetaInterface.TYPE_DATE:
case ValueMetaInterface.TYPE_TIMESTAMP:
return rowMeta.getDate( data, idx ) == null;
case ValueMetaInterface.TYPE_INET:
return rowMeta.getString( data, idx ) == null;
}
throw new KettleValueException( "Unknown source type: " + metaType.getTypeDesc() );
}
|
@Test
public void testEmptyValues() throws Exception {
RowMeta rowsMetaEmpty = new RowMeta();
rowsMetaEmpty.addValueMeta( new ValueMetaString( "str" ) );
rowsMetaEmpty.addValueMeta( new ValueMetaBoolean( "bool" ) );
rowsMetaEmpty.addValueMeta( new ValueMetaInteger( "int" ) );
rowsMetaEmpty.addValueMeta( new ValueMetaNumber( "num" ) );
rowsMetaEmpty.addValueMeta( new ValueMetaBigNumber( "bignum" ) );
rowsMetaEmpty.addValueMeta( new ValueMetaBinary( "bin" ) );
rowsMetaEmpty.addValueMeta( new ValueMetaDate( "date" ) );
rowsMetaEmpty.addValueMeta( new ValueMetaTimestamp( "timestamp" ) );
rowsMetaEmpty.addValueMeta( new ValueMetaInternetAddress( "inet" ) );
row = new RowMetaAndData( rowsMetaEmpty, null, null, null, null, null, null, null, null, null );
assertTrue( row.isEmptyValue( "str" ) );
assertTrue( row.isEmptyValue( "bool" ) );
assertTrue( row.isEmptyValue( "int" ) );
assertTrue( row.isEmptyValue( "num" ) );
assertTrue( row.isEmptyValue( "bignum" ) );
assertTrue( row.isEmptyValue( "bin" ) );
assertTrue( row.isEmptyValue( "date" ) );
assertTrue( row.isEmptyValue( "timestamp" ) );
assertTrue( row.isEmptyValue( "inet" ) );
}
|
@Nonnull
public SerializedCheckpointException getSerializedCheckpointException() {
return serializedCheckpointException;
}
|
@Test
void testDeclineCheckpointWithUserExceptionCanBeDeserializedWithoutUserClass()
throws Exception {
final String className = "UserException";
final URLClassLoader userClassLoader =
ClassLoaderUtils.compileAndLoadJava(
TempDirUtils.newFolder(tempDir),
className + ".java",
String.format(
"public class %s extends RuntimeException { public %s() {super(\"UserMessage\");} }",
className, className));
Throwable userException =
(Throwable) Class.forName(className, false, userClassLoader).newInstance();
CheckpointException checkpointException =
new CheckpointException(CheckpointFailureReason.CHECKPOINT_DECLINED, userException);
final byte[] serializedCheckpointMessage =
InstantiationUtil.serializeObject(
new DeclineCheckpoint(
new JobID(), createExecutionAttemptId(), 1, checkpointException));
final DeclineCheckpoint deserializedCheckpointMessage =
InstantiationUtil.deserializeObject(
serializedCheckpointMessage, ClassLoader.getSystemClassLoader());
Throwable throwable =
deserializedCheckpointMessage.getSerializedCheckpointException().unwrap();
assertThat(throwable).isInstanceOf(CheckpointException.class);
Optional<Throwable> throwableWithMessage =
ExceptionUtils.findThrowableWithMessage(throwable, userException.getMessage());
assertThat(throwableWithMessage)
.isPresent()
.hasValueSatisfying(
throwable1 ->
assertThat(throwable1)
.hasMessage(
String.format(
"%s: %s",
userException.getClass().getName(),
userException.getMessage())));
}
|
public static byte[] serialize(Descriptors.Descriptor descriptor) {
byte[] schemaDataBytes;
try {
Map<String, FileDescriptorProto> fileDescriptorProtoCache = new HashMap<>();
//recursively cache all FileDescriptorProto
serializeFileDescriptor(descriptor.getFile(), fileDescriptorProtoCache);
//extract root message path
String rootMessageTypeName = descriptor.getFullName();
String rootFileDescriptorName = descriptor.getFile().getFullName();
//build FileDescriptorSet, this is equal to < protoc --include_imports --descriptor_set_out >
byte[] fileDescriptorSet = FileDescriptorSet.newBuilder().addAllFile(fileDescriptorProtoCache.values())
.build().toByteArray();
//serialize to bytes
ProtobufNativeSchemaData schemaData = ProtobufNativeSchemaData.builder()
.fileDescriptorSet(fileDescriptorSet)
.rootFileDescriptorName(rootFileDescriptorName).rootMessageTypeName(rootMessageTypeName).build();
schemaDataBytes = ObjectMapperFactory.getMapperWithIncludeAlways().writer().writeValueAsBytes(schemaData);
logger.debug("descriptor '{}' serialized to '{}'.", descriptor.getFullName(), schemaDataBytes);
} catch (Exception e) {
e.printStackTrace();
throw new SchemaSerializationException(e);
}
return schemaDataBytes;
}
|
@Test
public static void testSerialize() {
byte[] data = ProtobufNativeSchemaUtils.serialize(org.apache.pulsar.client.schema.proto.Test.TestMessage.getDescriptor());
Descriptors.Descriptor descriptor = ProtobufNativeSchemaUtils.deserialize(data);
Assert.assertNotNull(descriptor);
Assert.assertNotNull(descriptor.findFieldByName("nestedField").getMessageType());
Assert.assertNotNull(descriptor.findFieldByName("externalMessage").getMessageType());
}
|
@Override
public boolean trySetCount(long count) {
return get(trySetCountAsync(count));
}
|
@Test
public void testTrySetCount() throws Exception {
RCountDownLatch latch = redisson.getCountDownLatch("latch");
assertThat(latch.trySetCount(1)).isTrue();
assertThat(latch.trySetCount(2)).isFalse();
}
|
public static <T> T[] getBeans(Class<T> interfaceClass) {
Object object = serviceMap.get(interfaceClass.getName());
if(object == null) return null;
if(object instanceof Object[]) {
return (T[])object;
} else {
Object array = Array.newInstance(interfaceClass, 1);
Array.set(array, 0, object);
return (T[])array;
}
}
|
@Test
@Ignore
public void testMultipleWithProperties() {
J[] j = SingletonServiceFactory.getBeans(J.class);
Arrays.stream(j).forEach(o -> logger.debug(o.getJack()));
K[] k = SingletonServiceFactory.getBeans(K.class);
Arrays.stream(k).forEach(o -> logger.debug(o.getKing()));
}
|
public static MemberVersion of(int major, int minor, int patch) {
if (major == 0 && minor == 0 && patch == 0) {
return MemberVersion.UNKNOWN;
} else {
return new MemberVersion(major, minor, patch);
}
}
|
@Test
public void testVersionOf_whenVersionStringIsRelease() {
MemberVersion expected = MemberVersion.of(3, 8, 2);
assertEquals(expected, MemberVersion.of(VERSION_3_8_2_STRING));
}
|
@Override
public Optional<FileHashesDto> getDbFile(Component component) {
checkState(previousFileHashesByUuid != null, "Repository not initialized");
return Optional.ofNullable(previousFileHashesByUuid.get(component.getUuid()));
}
|
@Test
public void fail_if_not_set() {
assertThatThrownBy(() -> previousFileHashesRepository.getDbFile(mock(Component.class))).isInstanceOf(IllegalStateException.class);
}
|
public void refer(OperationProgress other) {
// ensure the integrity and avoid dead lock.
List<OperationStep> steps;
List<Long> startTimes;
synchronized (other) {
steps = other._steps;
startTimes = other._startTimes;
}
synchronized (this) {
ensureMutable();
this._steps = steps;
this._startTimes = startTimes;
this._mutable = false;
}
}
|
@Test
public void testRefer() {
OperationProgress progress1 = new OperationProgress();
progress1.addStep(new Pending());
OperationProgress progress2 = new OperationProgress();
progress2.addStep(new WaitingForClusterModel());
assertThat(progress1.progress().get(0), instanceOf(Pending.class));
progress1.refer(progress2);
assertThat(progress1.progress().get(0), instanceOf(WaitingForClusterModel.class));
assertEquals(progress1.progress(), progress2.progress());
}
|
public MetricsBuilder enableThreadPool(Boolean enableThreadPool) {
this.enableThreadpool = enableThreadPool;
return getThis();
}
|
@Test
void enableThreadPool() {
MetricsBuilder builder = MetricsBuilder.newBuilder();
builder.enableThreadPool(false);
Assertions.assertFalse(builder.build().getEnableThreadpool());
}
|
public int findSubscriptionDataCount(final String group) {
ConsumerGroupInfo consumerGroupInfo = this.getConsumerGroupInfo(group);
if (consumerGroupInfo != null) {
return consumerGroupInfo.getSubscriptionTable().size();
}
return 0;
}
|
@Test
public void findSubscriptionDataCountTest() {
register();
final int count = consumerManager.findSubscriptionDataCount(GROUP);
assert count > 0;
}
|
public AgentBootstrapperArgs parse(String... args) {
AgentBootstrapperArgs result = new AgentBootstrapperArgs();
try {
new JCommander(result).parse(args);
if (result.help) {
printUsageAndExit(0);
}
return result;
} catch (ParameterException e) {
stderr.println(e.getMessage());
printUsageAndExit(1);
}
return null;
}
|
@Test
public void shouldRaiseExceptionWhenSSLPrivateKeyPassphraseFileIsNotPresent() {
assertThatCode(() -> agentCLI.parse("-serverUrl", "http://example.com/go", "-sslPrivateKeyPassphraseFile", UUID.randomUUID().toString()))
.isInstanceOf(ExitException.class)
.satisfies(o -> assertThat(((ExitException) o).getStatus()).isEqualTo(1));
assertThat(errorStream.toString()).contains("-sslPrivateKeyPassphraseFile must be a file that is readable.");
}
|
@Override
public Optional<ServiceInstance> findById(final String id) {
return jdbcRepository.getDslContextWrapper().transactionResult(
configuration -> findById(id, configuration, false)
);
}
|
@Test
protected void shouldFindByServiceId() {
// Given
AbstractJdbcServiceInstanceRepositoryTest.Fixtures.all().forEach(repository::save);
String uuid = AbstractJdbcServiceInstanceRepositoryTest.Fixtures.EmptyServiceInstance.id();
// When
Optional<ServiceInstance> result = repository.findById(uuid);
// Then
Assertions.assertEquals(Optional.of(AbstractJdbcServiceInstanceRepositoryTest.Fixtures.EmptyServiceInstance), result);
}
|
public void processIssuesByBatch(DbSession dbSession, Set<String> issueKeysSnapshot, Consumer<List<IssueDto>> listConsumer, Predicate<? super IssueDto> filter) {
boolean hasMoreIssues = !issueKeysSnapshot.isEmpty();
long offset = 0;
List<IssueDto> issueDtos = new ArrayList<>();
while (hasMoreIssues) {
Set<String> page = paginate(issueKeysSnapshot, offset);
List<IssueDto> nextOpenIssues = nextOpenIssues(dbSession, page)
.stream()
.filter(filter)
.toList();
issueDtos.addAll(nextOpenIssues);
offset += page.size();
hasMoreIssues = offset < issueKeysSnapshot.size();
}
listConsumer.accept(issueDtos);
}
|
@Test
public void processIssuesByBatch_correctly_processes_all_issues_regardless_of_creation_timestamp() {
var pullActionIssuesRetriever = new PullActionIssuesRetriever(dbClient, queryParams);
List<IssueDto> issuesWithSameCreationTimestamp = IntStream.rangeClosed(1, 100).mapToObj(i -> new IssueDto()
.setKee(Integer.toString(i)).setCreatedAt(100L)).toList();
when(issueDao.selectByBranch(any(), any(), any()))
.thenReturn(issuesWithSameCreationTimestamp);
List<IssueDto> returnedDtos = new ArrayList<>();
Consumer<List<IssueDto>> listConsumer = returnedDtos::addAll;
Set<String> issueKeysSnapshot = issuesWithSameCreationTimestamp.stream().map(IssueDto::getKee).collect(Collectors.toSet());
pullActionIssuesRetriever.processIssuesByBatch(dbClient.openSession(true), issueKeysSnapshot, listConsumer, issueDto -> true);
assertThat(returnedDtos).hasSize(100);
}
|
public void command(String primaryCommand, SecureConfig config, String... allArguments) {
terminal.writeLine("");
final Optional<CommandLine> commandParseResult;
try {
commandParseResult = Command.parse(primaryCommand, allArguments);
} catch (InvalidCommandException e) {
terminal.writeLine(String.format("ERROR: %s", e.getMessage()));
return;
}
if (commandParseResult.isEmpty()) {
printHelp();
return;
}
final CommandLine commandLine = commandParseResult.get();
switch (commandLine.getCommand()) {
case CREATE: {
if (commandLine.hasOption(CommandOptions.HELP)){
terminal.writeLine("Creates a new keystore. For example: 'bin/logstash-keystore create'");
return;
}
if (secretStoreFactory.exists(config.clone())) {
terminal.write("An Logstash keystore already exists. Overwrite ? [y/N] ");
if (isYes(terminal.readLine())) {
create(config);
}
} else {
create(config);
}
break;
}
case LIST: {
if (commandLine.hasOption(CommandOptions.HELP)){
terminal.writeLine("List all secret identifiers from the keystore. For example: " +
"`bin/logstash-keystore list`. Note - only the identifiers will be listed, not the secrets.");
return;
}
Collection<SecretIdentifier> ids = secretStoreFactory.load(config).list();
List<String> keys = ids.stream().filter(id -> !id.equals(LOGSTASH_MARKER)).map(id -> id.getKey()).collect(Collectors.toList());
Collections.sort(keys);
keys.forEach(terminal::writeLine);
break;
}
case ADD: {
if (commandLine.hasOption(CommandOptions.HELP)){
terminal.writeLine("Add secrets to the keystore. For example: " +
"`bin/logstash-keystore add my-secret`, at the prompt enter your secret. You will use the identifier ${my-secret} in your Logstash configuration.");
return;
}
if (commandLine.getArguments().isEmpty()) {
terminal.writeLine("ERROR: You must supply an identifier to add. (e.g. bin/logstash-keystore add my-secret)");
return;
}
if (secretStoreFactory.exists(config.clone())) {
final SecretStore secretStore = secretStoreFactory.load(config);
for (String argument : commandLine.getArguments()) {
final SecretIdentifier id = new SecretIdentifier(argument);
final byte[] existingValue = secretStore.retrieveSecret(id);
if (existingValue != null) {
SecretStoreUtil.clearBytes(existingValue);
terminal.write(String.format("%s already exists. Overwrite ? [y/N] ", argument));
if (!isYes(terminal.readLine())) {
continue;
}
}
final String enterValueMessage = String.format("Enter value for %s: ", argument);
char[] secret = null;
while(secret == null) {
terminal.write(enterValueMessage);
final char[] readSecret = terminal.readSecret();
if (readSecret == null || readSecret.length == 0) {
terminal.writeLine("ERROR: Value cannot be empty");
continue;
}
if (!ASCII_ENCODER.canEncode(CharBuffer.wrap(readSecret))) {
terminal.writeLine("ERROR: Value must contain only ASCII characters");
continue;
}
secret = readSecret;
}
add(secretStore, id, SecretStoreUtil.asciiCharToBytes(secret));
}
} else {
terminal.writeLine("ERROR: Logstash keystore not found. Use 'create' command to create one.");
}
break;
}
case REMOVE: {
if (commandLine.hasOption(CommandOptions.HELP)){
terminal.writeLine("Remove secrets from the keystore. For example: " +
"`bin/logstash-keystore remove my-secret`");
return;
}
if (commandLine.getArguments().isEmpty()) {
terminal.writeLine("ERROR: You must supply a value to remove. (e.g. bin/logstash-keystore remove my-secret)");
return;
}
final SecretStore secretStore = secretStoreFactory.load(config);
for (String argument : commandLine.getArguments()) {
SecretIdentifier id = new SecretIdentifier(argument);
if (secretStore.containsSecret(id)) {
secretStore.purgeSecret(id);
terminal.writeLine(String.format("Removed '%s' from the Logstash keystore.", id.getKey()));
} else {
terminal.writeLine(String.format("ERROR: '%s' does not exist in the Logstash keystore.", argument));
}
}
break;
}
}
}
|
@Test
public void testHelpAdd() {
cli.command("add", null, "--help");
assertThat(terminal.out).containsIgnoringCase("Add secrets to the keystore");
}
|
@Override
public void onStreamRequest(StreamRequest req,
RequestContext requestContext,
Map<String, String> wireAttrs,
NextFilter<StreamRequest, StreamResponse> nextFilter)
{
disruptRequest(req, requestContext, wireAttrs, nextFilter);
}
|
@Test
public void testStreamTimeoutDisrupt() throws Exception
{
final RequestContext requestContext = new RequestContext();
requestContext.putLocalAttr(DISRUPT_CONTEXT_KEY, DisruptContexts.timeout());
final DisruptFilter filter = new DisruptFilter(_scheduler, _executor, REQUEST_TIMEOUT, _clock);
final CountDownLatch latch = new CountDownLatch(1);
final AtomicBoolean success = new AtomicBoolean(false);
final NextFilter<StreamRequest, StreamResponse> next = new NextFilter<StreamRequest, StreamResponse>()
{
@Override
public void onRequest(StreamRequest restRequest, RequestContext requestContext, Map<String, String> wireAttrs)
{
latch.countDown();
}
@Override
public void onResponse(StreamResponse restResponse, RequestContext requestContext, Map<String, String> wireAttrs)
{
latch.countDown();
}
@Override
public void onError(Throwable ex, RequestContext requestContext, Map<String, String> wireAttrs)
{
success.set(ex instanceof TimeoutException);
latch.countDown();
}
};
filter.onStreamRequest(new StreamRequestBuilder(new URI(URI)).build(EntityStreams.emptyStream()), requestContext,
Collections.emptyMap(), next);
Assert.assertTrue(latch.await(TEST_TIMEOUT, TimeUnit.MILLISECONDS), "Missing NextFilter invocation");
Assert.assertTrue(success.get(), "Unexpected method invocation");
}
|
static EndTransactionMarker deserializeValue(ControlRecordType type, ByteBuffer value) {
ensureTransactionMarkerControlType(type);
if (value.remaining() < CURRENT_END_TXN_MARKER_VALUE_SIZE)
throw new InvalidRecordException("Invalid value size found for end transaction marker. Must have " +
"at least " + CURRENT_END_TXN_MARKER_VALUE_SIZE + " bytes, but found only " + value.remaining());
short version = value.getShort(0);
if (version < 0)
throw new InvalidRecordException("Invalid version found for end transaction marker: " + version +
". May indicate data corruption");
if (version > CURRENT_END_TXN_MARKER_VERSION)
log.debug("Received end transaction marker value version {}. Parsing as version {}", version,
CURRENT_END_TXN_MARKER_VERSION);
int coordinatorEpoch = value.getInt(2);
return new EndTransactionMarker(type, coordinatorEpoch);
}
|
@Test
public void testCannotDeserializeUnknownControlType() {
assertThrows(IllegalArgumentException.class,
() -> EndTransactionMarker.deserializeValue(ControlRecordType.UNKNOWN, ByteBuffer.wrap(new byte[0])));
}
|
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = models.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = 0;
for (int i = 0; i < ntrees; i++) {
base = base + models[i].tree.predict(xj);
prediction[i][j] = base / (i+1);
}
}
return prediction;
}
|
@Test
public void testBank32nh() {
test("bank32nh", Bank32nh.formula, Bank32nh.data, 0.0978);
}
|
@Bean
public ShenyuPlugin sofaPlugin(final ObjectProvider<SofaParamResolveService> sofaParamResolveService) {
return new SofaPlugin(new SofaProxyService(sofaParamResolveService.getIfAvailable()));
}
|
@Test
public void testSofaPlugin() {
applicationContextRunner.run(context -> {
ShenyuPlugin plugin = context.getBean("sofaPlugin", ShenyuPlugin.class);
assertNotNull(plugin);
assertThat(plugin.named()).isEqualTo(PluginEnum.SOFA.getName());
}
);
}
|
public String toString() {
// ensure minimum precision of 3 decimal places by using our own 3-decimal-place formatter when we have no nanos.
final DateTimeFormatter formatter = (instant.getNano() == 0 ? ISO_INSTANT_MILLIS : DateTimeFormatter.ISO_INSTANT);
return formatter.format(instant);
}
|
@Test
public void testParsingDateWithOffset() throws Exception {
final Timestamp t = new Timestamp("2014-09-23-08:00", OFFSET_CLOCK, LOCALE);
assertEquals("2014-09-23T08:00:00.000Z", t.toString());
}
|
public static Statement sanitize(
final Statement node,
final MetaStore metaStore) {
return sanitize(node, metaStore, true);
}
|
@Test
public void shouldThrowIfInsertIntoSourceWithHeader() {
// Given:
final Statement stmt = givenQuery("INSERT INTO TEST1 SELECT * FROM TEST0;");
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> AstSanitizer.sanitize(stmt, META_STORE)
);
// Then:
assertThat(e.getMessage(), containsString(
"Cannot insert into TEST1 because it has header columns"));
}
|
@Override
public void open() throws Exception {
windowSerializer = windowAssigner.getWindowSerializer(new ExecutionConfig());
internalTimerService = getInternalTimerService("window-timers", windowSerializer, this);
// The structure is: [type]|[normal record]|[timestamp]|[current watermark]|[timer data]
// If the type is 'NORMAL_RECORD', store the RowData object in the 2nd column.
// If the type is 'TRIGGER_TIMER', store the timestamp in 3rd column and the timer
// data in 5th column.
reuseRowData =
new UpdatableRowData(GenericRowData.of(NORMAL_RECORD, null, null, null, null), 5);
reuseTimerRowData =
new UpdatableRowData(GenericRowData.of(TRIGGER_TIMER, null, null, null, null), 5);
// The structure is: [timer_type]|[row key]|[encoded namespace]
reuseTimerData = new UpdatableRowData(GenericRowData.of(0, null, 0), 3);
reuseTimerRowData.setField(4, reuseTimerData);
keyLength = getKeyType().getFieldCount();
keySerializer = (RowDataSerializer) getKeySerializer();
super.open();
}
|
@Test
void testFinishBundleTriggeredByTime() throws Exception {
Configuration conf = new Configuration();
conf.set(PythonOptions.MAX_BUNDLE_SIZE, 10);
conf.set(PythonOptions.MAX_BUNDLE_TIME_MILLS, 1000L);
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = getTestHarness(conf);
long initialTime = 0L;
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.open();
testHarness.processElement(newRecord(true, initialTime + 1, "c1", "c2", 0L, 0L));
testHarness.processElement(newRecord(true, initialTime + 2, "c1", "c4", 1L, 6000L));
testHarness.processElement(newRecord(true, initialTime + 3, "c1", "c6", 2L, 10000L));
testHarness.processElement(newRecord(true, initialTime + 4, "c2", "c8", 3L, 0L));
testHarness.processWatermark(new Watermark(20000L));
testHarness.setProcessingTime(1000L);
expectedOutput.add(newWindowRecord(-5000L, 5000L, "c1", 0L));
expectedOutput.add(newStateCleanupRecord(-5000L, 5000L, "c1"));
expectedOutput.add(newWindowRecord(-5000L, 5000L, "c2", 3L));
expectedOutput.add(newStateCleanupRecord(-5000L, 5000L, "c2"));
expectedOutput.add(newWindowRecord(0, 10000L, "c2", 3L));
expectedOutput.add(newStateCleanupRecord(0L, 10000L, "c2"));
expectedOutput.add(newWindowRecord(0, 10000L, "c1", 0L));
expectedOutput.add(newStateCleanupRecord(0L, 10000L, "c1"));
expectedOutput.add(newWindowRecord(5000L, 15000L, "c1", 1L));
expectedOutput.add(newStateCleanupRecord(5000L, 15000L, "c1"));
expectedOutput.add(newWindowRecord(10000L, 20000L, "c1", 2L));
expectedOutput.add(newStateCleanupRecord(10000L, 20000L, "c1"));
expectedOutput.add(new Watermark(20000L));
assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.close();
}
|
public void deleteFavorite(long favoriteId) {
Favorite favorite = favoriteRepository.findById(favoriteId).orElse(null);
checkUserOperatePermission(favorite);
favoriteRepository.delete(favorite);
}
|
@Test
@Sql(scripts = "/sql/favorites/favorites.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD)
@Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD)
public void testDeleteFavorite() {
long legalFavoriteId = 21L;
favoriteService.deleteFavorite(legalFavoriteId);
Assert.assertNull(favoriteRepository.findById(legalFavoriteId).orElse(null));
}
|
public Type getType() {
return this.type;
}
|
@Test
public void testGetType() {
AppPermission appPermission = new AppPermission(Type.APP_WRITE);
assertEquals(Type.APP_WRITE, appPermission.getType());
}
|
@Override
public List<String> getChildren(String path) {
try {
return client.getChildren().forPath(path);
} catch (NoNodeException e) {
return null;
} catch (Exception e) {
throw new IllegalStateException(e.getMessage(), e);
}
}
|
@Test
void testChildrenPath() {
String path = "/dubbo/org.apache.dubbo.demo.DemoService/providers";
curatorClient.create(path, false, true);
curatorClient.create(path + "/provider1", false, true);
curatorClient.create(path + "/provider2", false, true);
List<String> children = curatorClient.getChildren(path);
assertThat(children.size(), is(2));
}
|
@Override
public void configure(Map<String, ?> configs) {
super.configure(configs);
configureSamplingInterval(configs);
configurePrometheusAdapter(configs);
configureQueryMap(configs);
}
|
@Test(expected = ConfigException.class)
public void testConfigureWithNoPrometheusEndpointFails() throws Exception {
Map<String, Object> config = new HashMap<>();
addCapacityConfig(config);
_prometheusMetricSampler.configure(config);
}
|
@Override
protected KafkaLogCollectClient getLogConsumeClient() {
return LoggingKafkaPluginDataHandler.getKafkaLogCollectClient();
}
|
@Test
public void testGetLogConsumeClient() {
LogConsumeClient logConsumeClient = new KafkaLogCollector().getLogConsumeClient();
Assertions.assertEquals(KafkaLogCollectClient.class, logConsumeClient.getClass());
}
|
@PostMapping(
path = "/admin/extension/{namespaceName}/{extensionName}/delete",
produces = MediaType.APPLICATION_JSON_VALUE
)
public ResponseEntity<ResultJson> deleteExtension(@PathVariable String namespaceName,
@PathVariable String extensionName,
@RequestBody(required = false) List<TargetPlatformVersionJson> targetVersions) {
try {
ResultJson result;
var adminUser = admins.checkAdminUser();
if(targetVersions == null) {
result = admins.deleteExtension(namespaceName, extensionName, adminUser);
} else {
var results = new ArrayList<ResultJson>();
for(var targetVersion : targetVersions) {
results.add(admins.deleteExtension(namespaceName, extensionName, targetVersion.targetPlatform, targetVersion.version, adminUser));
}
result = new ResultJson();
result.error = results.stream().map(r -> r.error).filter(Objects::nonNull).collect(Collectors.joining("\n"));
result.success = results.stream().map(r -> r.success).filter(Objects::nonNull).collect(Collectors.joining("\n"));
}
return ResponseEntity.ok(result);
} catch (ErrorResultException exc) {
return exc.toResponseEntity();
}
}
|
@Test
public void testDeleteExtension() throws Exception {
mockAdminUser();
mockExtension(2, 0, 0);
mockMvc.perform(post("/admin/extension/{namespace}/{extension}/delete", "foobar", "baz")
.with(user("admin_user").authorities(new SimpleGrantedAuthority(("ROLE_ADMIN"))))
.with(csrf().asHeader()))
.andExpect(status().isOk())
.andExpect(content().json(successJson("Deleted foobar.baz")));
}
|
@Override
public PackageConfiguration responseMessageForPackageConfiguration(String responseBody) {
try {
PackageConfiguration packageConfiguration = new PackageConfiguration();
Map<String, Map> configurations;
try {
configurations = parseResponseToMap(responseBody);
} catch (Exception e) {
throw new RuntimeException("Package configuration should be returned as a map");
}
if (configurations == null || configurations.isEmpty()) {
throw new RuntimeException("Empty response body");
}
for (String key : configurations.keySet()) {
if (isEmpty(key)) {
throw new RuntimeException("Package configuration key cannot be empty");
}
if (!(configurations.get(key) instanceof Map)) {
throw new RuntimeException(format("Package configuration properties for key '%s' should be represented as a Map", key));
}
packageConfiguration.add(toPackageMaterialProperty(key, configurations.get(key)));
}
return packageConfiguration;
} catch (RuntimeException e) {
throw new RuntimeException(format("Unable to de-serialize json response. %s", e.getMessage()));
}
}
|
@Test
public void shouldBuildPackageConfigurationFromResponseBody() throws Exception {
String responseBody = "{" +
"\"key-one\":{}," +
"\"key-two\":{\"default-value\":\"two\",\"part-of-identity\":true,\"secure\":true,\"required\":true,\"display-name\":\"display-two\",\"display-order\":\"1\"}," +
"\"key-three\":{\"default-value\":\"three\",\"part-of-identity\":false,\"secure\":false,\"required\":false,\"display-name\":\"display-three\",\"display-order\":\"2\"}" +
"}";
com.thoughtworks.go.plugin.api.material.packagerepository.PackageConfiguration packageConfiguration = messageHandler.responseMessageForPackageConfiguration(responseBody);
assertPropertyConfiguration((PackageMaterialProperty) packageConfiguration.get("key-one"), "key-one", "", true, true, false, "", 0);
assertPropertyConfiguration((PackageMaterialProperty) packageConfiguration.get("key-two"), "key-two", "two", true, true, true, "display-two", 1);
assertPropertyConfiguration((PackageMaterialProperty) packageConfiguration.get("key-three"), "key-three", "three", false, false, false, "display-three", 2);
}
|
@Override
public boolean isGeneric() {
return generic;
}
|
@Test
void isGeneric() {
Assertions.assertFalse(method.isGeneric());
}
|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void setStickerPositionInSet() {
GetStickerSetResponse setResponse = bot.execute(new GetStickerSet(stickerSet));
Sticker sticker = setResponse.stickerSet().stickers()[0];
BaseResponse response = bot.execute(new SetStickerPositionInSet(sticker.fileId(), 0));
assertTrue(response.isOk());
}
|
@Override
public int size() {
return contents.size();
}
|
@Test
public void testGetSetByType3() throws HCatException {
HCatRecord inpRec = getHCat13TypesRecord();
HCatRecord newRec = new DefaultHCatRecord(inpRec.size());
HCatSchema hsch = HCatSchemaUtils.getHCatSchema(
"a:decimal(5,2),b:char(10),c:varchar(20),d:date,e:timestamp");
newRec.setDecimal("a", hsch, inpRec.getDecimal("a", hsch));
newRec.setChar("b", hsch, inpRec.getChar("b", hsch));
newRec.setVarchar("c", hsch, inpRec.getVarchar("c", hsch));
newRec.setDate("d", hsch, inpRec.getDate("d", hsch));
newRec.setTimestamp("e", hsch, inpRec.getTimestamp("e", hsch));
}
|
public static void updateKeyForBlobStore(Map<String, Object> conf, BlobStore blobStore, CuratorFramework zkClient, String key,
NimbusInfo nimbusDetails) {
try {
// Most of clojure tests currently try to access the blobs using getBlob. Since, updateKeyForBlobStore
// checks for updating the correct version of the blob as a part of nimbus ha before performing any
// operation on it, there is a necessity to stub several test cases to ignore this method. It is a valid
// trade off to return if nimbusDetails which include the details of the current nimbus host port data are
// not initialized as a part of the test. Moreover, this applies to only local blobstore when used along with
// nimbus ha.
if (nimbusDetails == null) {
return;
}
boolean isListContainsCurrentNimbusInfo = false;
List<String> stateInfo;
if (zkClient.checkExists().forPath(BLOBSTORE_SUBTREE + "/" + key) == null) {
return;
}
stateInfo = zkClient.getChildren().forPath(BLOBSTORE_SUBTREE + "/" + key);
if (stateInfo == null || stateInfo.isEmpty()) {
return;
}
LOG.debug("StateInfo for update {}", stateInfo);
Set<NimbusInfo> nimbusInfoList = getNimbodesWithLatestSequenceNumberOfBlob(zkClient, key);
for (NimbusInfo nimbusInfo : nimbusInfoList) {
if (nimbusInfo.getHost().equals(nimbusDetails.getHost())) {
isListContainsCurrentNimbusInfo = true;
break;
}
}
if (!isListContainsCurrentNimbusInfo && downloadUpdatedBlob(conf, blobStore, key, nimbusInfoList)) {
LOG.debug("Updating state inside zookeeper for an update");
createStateInZookeeper(conf, key, nimbusDetails);
}
} catch (KeeperException.NoNodeException | KeyNotFoundException e) {
//race condition with a delete
return;
} catch (Exception exp) {
throw new RuntimeException(exp);
}
}
|
@Test
public void testUpdateKeyForBlobStore_nullNimbusInfo() {
BlobStoreUtils.updateKeyForBlobStore(conf, blobStore, zkClientBuilder.build(), KEY, null);
zkClientBuilder.verifyExists(false);
zkClientBuilder.verifyGetChildren(false);
verify(nimbusDetails, never()).getHost();
verify(conf, never()).get(anyString());
}
|
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter,
MetricsRecorder metricsRecorder,
BufferSupplier bufferSupplier) {
if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) {
// check the magic value
if (!records.hasMatchingMagic(toMagic))
return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder);
else
// Do in-place validation, offset assignment and maybe set timestamp
return assignOffsetsNonCompressed(offsetCounter, metricsRecorder);
} else
return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier);
}
|
@Test
public void testControlRecordsNotCompressed() {
long offset = 1234567;
EndTransactionMarker endTxnMarker = new EndTransactionMarker(ControlRecordType.COMMIT, 0);
MemoryRecords records = MemoryRecords.withEndTransactionMarker(23423L, (short) 5, endTxnMarker);
LogValidator.ValidationResult result = new LogValidator(
records,
new TopicPartition("topic", 0),
time,
CompressionType.NONE,
Compression.snappy().build(),
false,
RecordBatch.CURRENT_MAGIC_VALUE,
TimestampType.CREATE_TIME,
5000L,
5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.COORDINATOR,
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset),
metricsRecorder,
RequestLocal.withThreadConfinedCaching().bufferSupplier()
);
MemoryRecords validatedRecords = result.validatedRecords;
assertEquals(1, TestUtils.toList(validatedRecords.batches()).size());
assertFalse(TestUtils.toList(validatedRecords.batches()).get(0).isCompressed());
}
|
public Subscription addSubscriber(Function<? super E, Subscription> subscriber) {
Objects.requireNonNull(subscriber);
subscribers.add(subscriber);
List<E> keys = new ArrayList<>(map.keySet());
keys.forEach(key -> {
List<Subscription> otherSubs = map.get(key);
Subscription sub = subscriber.apply(key);
otherSubs.add(sub);
map.put(key, otherSubs);
});
return () -> removeSubscriber(subscriber);
}
|
@Test
public void adding_subscriber_and_removing_it_will_not_throw_exception() {
SubscribeableContentsObsSet<Integer> set = new SubscribeableContentsObsSet<>();
Subscription removeSubscriber = set.addSubscriber(i -> Subscription.EMPTY);
removeSubscriber.unsubscribe();
}
|
public void stop() {
if (shutdownExecutorOnStop) {
executor.shutdown(); // Disable new tasks from being submitted
}
try {
report(); // Report metrics one last time
} catch (Exception e) {
LOG.warn("Final reporting of metrics failed.", e);
}
if (shutdownExecutorOnStop) {
try {
// Wait a while for existing tasks to terminate
if (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
executor.shutdownNow(); // Cancel currently executing tasks
// Wait a while for tasks to respond to being cancelled
if (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
LOG.warn("ScheduledExecutorService did not terminate.");
}
}
} catch (InterruptedException ie) {
// (Re-)Cancel if current thread also interrupted
executor.shutdownNow();
// Preserve interrupt status
Thread.currentThread().interrupt();
}
} else {
// The external manager (like JEE container) responsible for lifecycle of executor
cancelScheduledFuture();
}
}
|
@Test
public void shouldNotFailOnStopIfReporterWasNotStared() {
for (ScheduledReporter reporter : reporters) {
reporter.stop();
}
}
|
public void setDefault() {
replaceAllByValue = null;
replaceAllMask = null;
selectFields = false;
selectValuesType = false;
setEmptyStringAll = false;
int nrfields = 0;
int nrtypes = 0;
allocate( nrtypes, nrfields );
/*
* Code will never execute. nrfields and nrtypes
* are both zero above. so for-next is skipped on both.
*
* MB - 5/2016
*
for ( int i = 0; i < nrtypes; i++ ) {
typeName[i] = "typename" + i;
typereplaceValue[i] = "typevalue" + i;
typereplaceMask[i] = "typemask" + i;
setTypeEmptyString[i] = false;
}
for ( int i = 0; i < nrfields; i++ ) {
fieldName[i] = "field" + i;
replaceValue[i] = "value" + i;
replaceMask[i] = "mask" + i;
setEmptyString[i] = false;
}
*/
}
|
@Test
public void testSetDefault() throws Exception {
IfNullMeta inm = new IfNullMeta();
inm.setDefault();
assertTrue( ( inm.getValueTypes() != null ) && ( inm.getValueTypes().length == 0 ) );
assertTrue( ( inm.getFields() != null ) && ( inm.getFields().length == 0 ) );
assertFalse( inm.isSelectFields() );
assertFalse( inm.isSelectValuesType() );
}
|
public RemotingDesc getRemotingBeanDesc(Object bean) {
return remotingServiceMap.get(bean);
}
|
@Test
public void testGetRemotingDeanDescFail() {
SimpleBean simpleBean = new SimpleBean();
assertNull(remotingParser.getRemotingBeanDesc(simpleBean));
}
|
public boolean finish() {
return finish(false);
}
|
@Test
@Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testHandlerAddedExecutedInEventLoop() throws Throwable {
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
final ChannelHandler handler = new ChannelHandlerAdapter() {
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
try {
assertTrue(ctx.executor().inEventLoop());
} catch (Throwable cause) {
error.set(cause);
} finally {
latch.countDown();
}
}
};
EmbeddedChannel channel = new EmbeddedChannel(handler);
assertFalse(channel.finish());
latch.await();
Throwable cause = error.get();
if (cause != null) {
throw cause;
}
}
|
public String render(String inline, Map<String, Object> variables) throws IllegalVariableEvaluationException {
return this.render(inline, variables, this.variableConfiguration.getRecursiveRendering());
}
|
@Test
void shouldRenderUsingAlternativeRendering() throws IllegalVariableEvaluationException {
TestVariableRenderer renderer = new TestVariableRenderer(applicationContext, variableConfiguration);
String render = renderer.render("{{ dummy }}", Map.of());
Assertions.assertEquals("result", render);
}
|
public ContentInfo verify(ContentInfo signedMessage, Date date) {
final SignedData signedData = SignedData.getInstance(signedMessage.getContent());
final X509Certificate cert = certificate(signedData);
certificateVerifier.verify(cert, date);
final X500Name name = X500Name.getInstance(cert.getIssuerX500Principal().getEncoded());
try {
final CMSSignedData cms = new CMSSignedData(signedMessage);
cms.verifySignatures(signerId -> {
if (!name.equals(signerId.getIssuer())) {
throw new VerificationException("Issuer does not match certificate");
}
if (!cert.getSerialNumber().equals(signerId.getSerialNumber())) {
throw new VerificationException("Serial number does not match certificate");
}
return new JcaSignerInfoVerifierBuilder(digestProvider).setProvider(bcProvider).build(cert);
});
} catch (CMSException e) {
throw new VerificationException("Could not verify CMS", e);
}
return signedData.getEncapContentInfo();
}
|
@Test
public void shouldThrowExceptionIfSignatureIsIncorrect() throws Exception {
final byte[] data = fixture();
data[2183]++;
final ContentInfo signedMessage = ContentInfo.getInstance(data);
thrown.expect(VerificationException.class);
thrown.expectMessage("Could not verify CMS");
new CmsVerifier(new CertificateVerifier.None()).verify(signedMessage);
}
|
@Override
public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) {
if (input.isEmpty()) return List.of();
List<Token> tokens = textToTokens(input, analyzerFactory.getAnalyzer(language, stemMode, removeAccents));
log.log(Level.FINEST, () -> "Tokenized '" + language + "' text='" + input + "' into: n=" + tokens.size() + ", tokens=" + tokens);
return tokens;
}
|
@Test
public void testTokenizer() {
String text = "This is my Text";
Iterable<Token> tokens = luceneLinguistics().getTokenizer()
.tokenize(text, Language.ENGLISH, StemMode.ALL, true);
assertEquals(List.of("my", "text"), tokenStrings(tokens));
}
|
public static String getCertFingerPrint(Certificate cert) {
byte [] digest = null;
try {
byte[] encCertInfo = cert.getEncoded();
MessageDigest md = MessageDigest.getInstance("SHA-1");
digest = md.digest(encCertInfo);
} catch (Exception e) {
logger.error("Exception:", e);
}
if (digest != null) {
return bytesToHex(digest).toLowerCase();
}
return null;
}
|
@Test
public void testGetCertFingerPrintBob() throws Exception {
X509Certificate cert = null;
try (InputStream is = Config.getInstance().getInputStreamFromFile("bob.crt")){
CertificateFactory cf = CertificateFactory.getInstance("X.509");
cert = (X509Certificate) cf.generateCertificate(is);
} catch (Exception e) {
e.printStackTrace();
}
String fp = FingerPrintUtil.getCertFingerPrint(cert);
Assert.assertEquals("921b97842f23474d8961bfd54911c298316aa558", fp);
}
|
Flux<DataEntityList> export(KafkaCluster cluster) {
String clusterOddrn = Oddrn.clusterOddrn(cluster);
Statistics stats = statisticsCache.get(cluster);
return Flux.fromIterable(stats.getTopicDescriptions().keySet())
.filter(topicFilter)
.flatMap(topic -> createTopicDataEntity(cluster, topic, stats))
.onErrorContinue(
(th, topic) -> log.warn("Error exporting data for topic {}, cluster {}", topic, cluster.getName(), th))
.buffer(100)
.map(topicsEntities ->
new DataEntityList()
.dataSourceOddrn(clusterOddrn)
.items(topicsEntities));
}
|
@Test
void doesExportTopicData() {
when(schemaRegistryClientMock.getSubjectVersion("testTopic-value", "latest", false))
.thenReturn(Mono.just(
new SchemaSubject()
.schema("\"string\"")
.schemaType(SchemaType.AVRO)
));
when(schemaRegistryClientMock.getSubjectVersion("testTopic-key", "latest", false))
.thenReturn(Mono.just(
new SchemaSubject()
.schema("\"int\"")
.schemaType(SchemaType.AVRO)
));
stats = Statistics.empty()
.toBuilder()
.topicDescriptions(
Map.of(
"testTopic",
new TopicDescription(
"testTopic",
false,
List.of(
new TopicPartitionInfo(
0,
null,
List.of(
new Node(1, "host1", 9092),
new Node(2, "host2", 9092)
),
List.of())
))
)
)
.topicConfigs(
Map.of(
"testTopic", List.of(
new ConfigEntry(
"custom.config",
"100500",
ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG,
false,
false,
List.of(),
ConfigEntry.ConfigType.INT,
null
)
)
)
)
.build();
StepVerifier.create(topicsExporter.export(cluster))
.assertNext(entityList -> {
assertThat(entityList.getItems())
.hasSize(1);
DataEntity topicEntity = entityList.getItems().get(0);
assertThat(topicEntity.getName()).isNotEmpty();
assertThat(topicEntity.getOddrn())
.isEqualTo("//kafka/cluster/localhost:19092,localhost:9092/topics/testTopic");
assertThat(topicEntity.getType()).isEqualTo(DataEntityType.KAFKA_TOPIC);
assertThat(topicEntity.getMetadata())
.hasSize(1)
.singleElement()
.satisfies(e ->
assertThat(e.getMetadata())
.containsExactlyInAnyOrderEntriesOf(
Map.of(
"partitions", 1,
"replication_factor", 2,
"custom.config", "100500")));
assertThat(topicEntity.getDataset()).isNotNull();
assertThat(topicEntity.getDataset().getFieldList())
.hasSize(4); // 2 field for key, 2 for value
})
.verifyComplete();
}
|
@Operation(summary = "grantUDFFunc", description = "GRANT_UDF_FUNC_NOTES")
@Parameters({
@Parameter(name = "userId", description = "USER_ID", required = true, schema = @Schema(implementation = int.class, example = "100")),
@Parameter(name = "udfIds", description = "UDF_IDS", required = true, schema = @Schema(implementation = String.class))
})
@PostMapping(value = "/grant-udf-func")
@ResponseStatus(HttpStatus.OK)
@ApiException(GRANT_UDF_FUNCTION_ERROR)
public Result grantUDFFunc(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "userId") int userId,
@RequestParam(value = "udfIds") String udfIds) {
Map<String, Object> result = usersService.grantUDFFunction(loginUser, userId, udfIds);
return returnDataList(result);
}
|
@Test
public void testGrantUDFFunc() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId", "32");
paramsMap.add("udfIds", "5");
MvcResult mvcResult = mockMvc.perform(post("/users/grant-udf-func")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertEquals(Status.USER_NOT_EXIST.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
|
public static List<String> loadAndModifyConfiguration(String[] args) throws FlinkException {
return ConfigurationParserUtils.loadAndModifyConfiguration(
filterCmdArgs(args, ModifiableClusterConfigurationParserFactory.options()),
BashJavaUtils.class.getSimpleName());
}
|
@TestTemplate
void testloadAndModifyConfigurationRemoveKeysMatched() throws Exception {
String key = "key";
String[] args = {
"--configDir",
confDir.toFile().getAbsolutePath(),
String.format("-D%s=value", key),
"--removeKey",
key
};
List<String> list = FlinkConfigLoader.loadAndModifyConfiguration(args);
if (standardYaml) {
assertThat(list).containsExactly("test:", " key: " + TEST_CONFIG_VALUE);
} else {
assertThat(list).containsExactlyInAnyOrder(TEST_CONFIG_KEY + ": " + TEST_CONFIG_VALUE);
}
}
|
@CanIgnoreReturnValue
public final Ordered containsAtLeastEntriesIn(Multimap<?, ?> expectedMultimap) {
checkNotNull(expectedMultimap, "expectedMultimap");
checkNotNull(actual);
ListMultimap<?, ?> missing = difference(expectedMultimap, actual);
// TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in
// the subject but not enough times. Similarly for unexpected extra items.
if (!missing.isEmpty()) {
failWithActual(
fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))),
simpleFact("---"),
fact("expected to contain at least", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
}
return new MultimapInOrder(/* allowUnexpected = */ true, expectedMultimap);
}
|
@Test
public void containsAtLeastRespectsDuplicates() {
ImmutableListMultimap<Integer, String> actual =
ImmutableListMultimap.of(3, "one", 3, "two", 3, "one", 4, "five", 4, "five");
ImmutableListMultimap<Integer, String> expected =
ImmutableListMultimap.of(3, "two", 4, "five", 3, "one", 4, "five", 3, "one");
assertThat(actual).containsAtLeastEntriesIn(expected);
}
|
@Override
public void init(TbContext ctx, TbNodeConfiguration configuration) throws TbNodeException {
this.config = TbNodeUtils.convert(configuration, TbMsgDeleteAttributesNodeConfiguration.class);
this.keys = config.getKeys();
}
|
@Test
void givenMsg_whenOnMsg_thenVerifyOutput_SendAttributesDeletedNotification_NotifyDevice() throws Exception {
config.setSendAttributesDeletedNotification(true);
config.setNotifyDevice(true);
config.setScope(DataConstants.SHARED_SCOPE);
nodeConfiguration = new TbNodeConfiguration(JacksonUtil.valueToTree(config));
node.init(ctx, nodeConfiguration);
onMsg_thenVerifyOutput(true, true, false);
}
|
public HoodieConfig() {
this.props = new TypedProperties();
}
|
@Test
public void testHoodieConfig() {
// Case 1: defaults and infer function are used
HoodieTestFakeConfig config1 = HoodieTestFakeConfig.newBuilder().build();
assertEquals("1", config1.getFakeString());
assertEquals(0, config1.getFakeInteger());
assertEquals("value3", config1.getFakeStringNoDefaultWithInfer());
assertEquals(null, config1.getFakeStringNoDefaultWithInferEmpty());
// Case 2: FAKE_STRING_CONFIG is set. FAKE_INTEGER_CONFIG,
// FAKE_STRING_CONFIG_NO_DEFAULT_WITH_INFER, and
// FAKE_STRING_CONFIG_NO_DEFAULT_WITH_INFER_EMPTY are inferred
HoodieTestFakeConfig config2 = HoodieTestFakeConfig.newBuilder()
.withFakeString("value1").build();
assertEquals("value1", config2.getFakeString());
assertEquals(0, config2.getFakeInteger());
assertEquals("value2", config2.getFakeStringNoDefaultWithInfer());
assertEquals("value10", config2.getFakeStringNoDefaultWithInferEmpty());
// Case 3: FAKE_STRING_CONFIG is set to a different value. FAKE_INTEGER_CONFIG,
// FAKE_STRING_CONFIG_NO_DEFAULT_WITH_INFER, and
// FAKE_STRING_CONFIG_NO_DEFAULT_WITH_INFER_EMPTY are inferred
HoodieTestFakeConfig config3 = HoodieTestFakeConfig.newBuilder()
.withFakeString("5").build();
assertEquals("5", config3.getFakeString());
assertEquals(100, config3.getFakeInteger());
assertEquals("value3", config3.getFakeStringNoDefaultWithInfer());
assertEquals(null, config3.getFakeStringNoDefaultWithInferEmpty());
// Case 4: all configs are set. No default or infer function should be used
HoodieTestFakeConfig config4 = HoodieTestFakeConfig.newBuilder()
.withFakeString("5")
.withFakeInteger(200)
.withFakeStringNoDefaultWithInfer("xyz")
.withFakeStringNoDefaultWithInferEmpty("uvw").build();
assertEquals("5", config4.getFakeString());
assertEquals(200, config4.getFakeInteger());
assertEquals("xyz", config4.getFakeStringNoDefaultWithInfer());
assertEquals("uvw", config4.getFakeStringNoDefaultWithInferEmpty());
}
|
Schema getAvroCompatibleSchema() {
return avroCompatibleSchema;
}
|
@Test
public void shouldDropOptionalFromRootPrimitiveSchema() {
// Given:
final Schema schema = Schema.OPTIONAL_INT64_SCHEMA;
// When:
final AvroDataTranslator translator =
new AvroDataTranslator(schema, AvroProperties.DEFAULT_AVRO_SCHEMA_FULL_NAME);
// Then:
assertThat("Root required", translator.getAvroCompatibleSchema().isOptional(), is(false));
}
|
@VisibleForTesting
void submit(long requestId, DispatchableSubPlan dispatchableSubPlan, long timeoutMs, Map<String, String> queryOptions)
throws Exception {
Deadline deadline = Deadline.after(timeoutMs, TimeUnit.MILLISECONDS);
// Serialize the stage plans in parallel
List<DispatchablePlanFragment> stagePlans = dispatchableSubPlan.getQueryStageList();
Set<QueryServerInstance> serverInstances = new HashSet<>();
// Ignore the reduce stage (stage 0)
int numStages = stagePlans.size() - 1;
List<CompletableFuture<StageInfo>> stageInfoFutures = new ArrayList<>(numStages);
for (int i = 0; i < numStages; i++) {
DispatchablePlanFragment stagePlan = stagePlans.get(i + 1);
serverInstances.addAll(stagePlan.getServerInstanceToWorkerIdMap().keySet());
stageInfoFutures.add(CompletableFuture.supplyAsync(() -> {
ByteString rootNode = PlanNodeSerializer.process(stagePlan.getPlanFragment().getFragmentRoot()).toByteString();
ByteString customProperty = QueryPlanSerDeUtils.toProtoProperties(stagePlan.getCustomProperties());
return new StageInfo(rootNode, customProperty);
}, _executorService));
}
List<StageInfo> stageInfos = new ArrayList<>(numStages);
try {
for (CompletableFuture<StageInfo> future : stageInfoFutures) {
stageInfos.add(future.get(deadline.timeRemaining(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS));
}
} finally {
for (CompletableFuture<?> future : stageInfoFutures) {
if (!future.isDone()) {
future.cancel(true);
}
}
}
Map<String, String> requestMetadata = new HashMap<>();
requestMetadata.put(CommonConstants.Query.Request.MetadataKeys.REQUEST_ID, Long.toString(requestId));
requestMetadata.put(CommonConstants.Broker.Request.QueryOptionKey.TIMEOUT_MS,
Long.toString(deadline.timeRemaining(TimeUnit.MILLISECONDS)));
requestMetadata.putAll(queryOptions);
ByteString protoRequestMetadata = QueryPlanSerDeUtils.toProtoProperties(requestMetadata);
// Submit the query plan to all servers in parallel
int numServers = serverInstances.size();
BlockingQueue<AsyncQueryDispatchResponse> dispatchCallbacks = new ArrayBlockingQueue<>(numServers);
for (QueryServerInstance serverInstance : serverInstances) {
_executorService.submit(() -> {
try {
Worker.QueryRequest.Builder requestBuilder = Worker.QueryRequest.newBuilder();
requestBuilder.setVersion(CommonConstants.MultiStageQueryRunner.PlanVersions.V1);
for (int i = 0; i < numStages; i++) {
int stageId = i + 1;
DispatchablePlanFragment stagePlan = stagePlans.get(stageId);
List<Integer> workerIds = stagePlan.getServerInstanceToWorkerIdMap().get(serverInstance);
if (workerIds != null) {
List<WorkerMetadata> stageWorkerMetadataList = stagePlan.getWorkerMetadataList();
List<WorkerMetadata> workerMetadataList = new ArrayList<>(workerIds.size());
for (int workerId : workerIds) {
workerMetadataList.add(stageWorkerMetadataList.get(workerId));
}
List<Worker.WorkerMetadata> protoWorkerMetadataList =
QueryPlanSerDeUtils.toProtoWorkerMetadataList(workerMetadataList);
StageInfo stageInfo = stageInfos.get(i);
Worker.StageMetadata stageMetadata =
Worker.StageMetadata.newBuilder().setStageId(stageId).addAllWorkerMetadata(protoWorkerMetadataList)
.setCustomProperty(stageInfo._customProperty).build();
requestBuilder.addStagePlan(
Worker.StagePlan.newBuilder().setRootNode(stageInfo._rootNode).setStageMetadata(stageMetadata)
.build());
}
}
requestBuilder.setMetadata(protoRequestMetadata);
getOrCreateDispatchClient(serverInstance).submit(requestBuilder.build(), serverInstance, deadline,
dispatchCallbacks::offer);
} catch (Throwable t) {
LOGGER.warn("Caught exception while dispatching query: {} to server: {}", requestId, serverInstance, t);
dispatchCallbacks.offer(new AsyncQueryDispatchResponse(serverInstance, null, t));
}
});
}
int numSuccessCalls = 0;
// TODO: Cancel all dispatched requests if one of the dispatch errors out or deadline is breached.
while (!deadline.isExpired() && numSuccessCalls < numServers) {
AsyncQueryDispatchResponse resp =
dispatchCallbacks.poll(deadline.timeRemaining(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
if (resp != null) {
if (resp.getThrowable() != null) {
throw new RuntimeException(
String.format("Error dispatching query: %d to server: %s", requestId, resp.getServerInstance()),
resp.getThrowable());
} else {
Worker.QueryResponse response = resp.getQueryResponse();
assert response != null;
if (response.containsMetadata(CommonConstants.Query.Response.ServerResponseStatus.STATUS_ERROR)) {
throw new RuntimeException(
String.format("Unable to execute query plan for request: %d on server: %s, ERROR: %s", requestId,
resp.getServerInstance(),
response.getMetadataOrDefault(CommonConstants.Query.Response.ServerResponseStatus.STATUS_ERROR,
"null")));
}
numSuccessCalls++;
}
}
}
if (deadline.isExpired()) {
throw new TimeoutException("Timed out waiting for response of async query-dispatch");
}
}
|
@Test
public void testQueryDispatcherThrowsWhenQueryServerThrows() {
String sql = "SELECT * FROM a WHERE col1 = 'foo'";
QueryServer failingQueryServer = _queryServerMap.values().iterator().next();
Mockito.doThrow(new RuntimeException("foo")).when(failingQueryServer).submit(Mockito.any(), Mockito.any());
DispatchableSubPlan dispatchableSubPlan = _queryEnvironment.planQuery(sql);
try {
_queryDispatcher.submit(REQUEST_ID_GEN.getAndIncrement(), dispatchableSubPlan, 10_000L, Collections.emptyMap());
Assert.fail("Method call above should have failed");
} catch (Exception e) {
Assert.assertTrue(e.getMessage().contains("Error dispatching query"));
}
Mockito.reset(failingQueryServer);
}
|
@Override
public double variance() {
return (1 - p) / (p * p);
}
|
@Test
public void testVariance() {
System.out.println("variance");
ShiftedGeometricDistribution instance = new ShiftedGeometricDistribution(0.3);
instance.rand();
assertEquals(0.7/0.09, instance.variance(), 1E-7);
}
|
public static List<AclEntry> replaceAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
// Replacement is done separately for each scope: access and default.
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry aclSpecEntry: aclSpec) {
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
}
// Copy existing entries if the scope was not replaced.
for (AclEntry existingEntry: existingAcl) {
if (!scopeDirty.contains(existingEntry.getScope())) {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
|
@Test
public void testReplaceAclEntriesAccessMaskPreserved() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, USER, "diana", READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ),
aclEntry(DEFAULT, GROUP, READ),
aclEntry(DEFAULT, OTHER, NONE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, replaceAclEntries(existing, aclSpec));
}
|
public <T> T parse(String input, Class<T> cls) {
return readFlow(input, cls, type(cls));
}
|
@Test
void invalidProperty() {
ConstraintViolationException exception = assertThrows(
ConstraintViolationException.class,
() -> this.parse("flows/invalids/invalid-property.yaml")
);
assertThat(exception.getMessage(), is("Unrecognized field \"invalid\" (class io.kestra.plugin.core.debug.Return), not marked as ignorable (10 known properties: \"logLevel\", \"timeout\", \"format\", \"retry\", \"type\", \"id\", \"description\", \"workerGroup\", \"disabled\", \"allowFailure\"])"));
assertThat(exception.getConstraintViolations().size(), is(1));
assertThat(exception.getConstraintViolations().iterator().next().getPropertyPath().toString(), is("io.kestra.core.models.flows.Flow[\"tasks\"]->java.util.ArrayList[0]->io.kestra.plugin.core.debug.Return[\"invalid\"]"));
}
|
public void onPeriodicEmit() {
updateCombinedWatermark();
}
|
@Test
void singleDeferredWatermarkAfterIdleness() {
TestingWatermarkOutput underlyingWatermarkOutput = createTestingWatermarkOutput();
WatermarkOutputMultiplexer multiplexer =
new WatermarkOutputMultiplexer(underlyingWatermarkOutput);
WatermarkOutput watermarkOutput = createDeferredOutput(multiplexer);
watermarkOutput.markIdle();
multiplexer.onPeriodicEmit();
assertThat(underlyingWatermarkOutput.isIdle()).isTrue();
watermarkOutput.emitWatermark(new Watermark(0));
multiplexer.onPeriodicEmit();
assertThat(underlyingWatermarkOutput.lastWatermark()).isEqualTo(new Watermark(0));
assertThat(underlyingWatermarkOutput.isIdle()).isFalse();
}
|
public static AggregationUnit create(final AggregationType type, final boolean isDistinct) {
switch (type) {
case MAX:
return new ComparableAggregationUnit(false);
case MIN:
return new ComparableAggregationUnit(true);
case SUM:
return isDistinct ? new DistinctSumAggregationUnit() : new AccumulationAggregationUnit();
case COUNT:
return isDistinct ? new DistinctCountAggregationUnit() : new AccumulationAggregationUnit();
case AVG:
return isDistinct ? new DistinctAverageAggregationUnit() : new AverageAggregationUnit();
case BIT_XOR:
return new BitXorAggregationUnit();
default:
throw new UnsupportedSQLOperationException(type.name());
}
}
|
@Test
void assertCreateDistinctAverageAggregationUnit() {
assertThat(AggregationUnitFactory.create(AggregationType.AVG, true), instanceOf(DistinctAverageAggregationUnit.class));
}
|
Map<Class, Object> getSerializers() {
return serializers;
}
|
@Test
public void testLoad_withParametrizedConstructor() {
SerializerConfig serializerConfig = new SerializerConfig();
serializerConfig.setClassName("com.hazelcast.internal.serialization.impl.TestSerializerHook$TestSerializerWithTypeConstructor");
serializerConfig.setTypeClassName("com.hazelcast.internal.serialization.impl.SampleIdentifiedDataSerializable");
SerializationConfig serializationConfig = getConfig().getSerializationConfig();
serializationConfig.addSerializerConfig(serializerConfig);
SerializerHookLoader hook = new SerializerHookLoader(serializationConfig, classLoader);
Map<Class, Object> serializers = hook.getSerializers();
TestSerializerHook.TestSerializerWithTypeConstructor serializer = (TestSerializerHook.TestSerializerWithTypeConstructor)
serializers.get(SampleIdentifiedDataSerializable.class);
assertEquals(SampleIdentifiedDataSerializable.class, serializer.getClazz());
}
|
public static final String getLine( LogChannelInterface log, BufferedInputStreamReader reader, int formatNr,
StringBuilder line ) throws KettleFileException {
EncodingType type = EncodingType.guessEncodingType( reader.getEncoding() );
return getLine( log, reader, type, formatNr, line );
}
|
@Test
public void getLineWithEnclosureTest() throws Exception {
String text = "\"firstLine\"\n\"secondLine\"";
StringBuilder linebuilder = new StringBuilder( "" );
InputStream is = new ByteArrayInputStream( text.getBytes() );
BufferedInputStreamReader isr = new BufferedInputStreamReader( new InputStreamReader( is ) );
TextFileLine line = TextFileInputUtils.getLine( Mockito.mock( LogChannelInterface.class ), isr, EncodingType.SINGLE, 1, linebuilder, "\"", "", 0 );
Assert.assertEquals( "\"firstLine\"", line.getLine() );
}
|
@Override
public String toString() {
return "ByteArrayObjectDataInput{"
+ "size=" + size
+ ", pos=" + pos
+ ", mark=" + mark
+ '}';
}
|
@Test
public void testToString() {
assertNotNull(in.toString());
}
|
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
}
|
@Test
public void testMergeWithParamMismatchDefinedBySEL() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
parseParamDefMap("{'tomerge': {'type': 'STRING','expression': 'stringValue'}}");
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap(
"{'tomerge': {'type': 'LONG','expression': 'Long data = 123; return data;'}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext);
assertEquals(1, allParams.size());
assertEquals(
"Long data = 123; return data;",
allParams.get("tomerge").asStringParamDef().getExpression());
}
|
void storeEdits(byte[] inputData, long newStartTxn, long newEndTxn,
int newLayoutVersion) {
if (newStartTxn < 0 || newEndTxn < newStartTxn) {
Journal.LOG.error(String.format("Attempted to cache data of length %d " +
"with newStartTxn %d and newEndTxn %d",
inputData.length, newStartTxn, newEndTxn));
return;
}
try (AutoCloseableLock l = writeLock.acquire()) {
if (newLayoutVersion != layoutVersion) {
try {
updateLayoutVersion(newLayoutVersion, newStartTxn);
} catch (IOException ioe) {
Journal.LOG.error(String.format("Unable to save new edits [%d, %d] " +
"due to exception when updating to new layout version %d",
newStartTxn, newEndTxn, newLayoutVersion), ioe);
return;
}
} else if (lowestTxnId == INVALID_TXN_ID) {
Journal.LOG.info("Initializing edits cache starting from txn ID " +
newStartTxn);
initialize(newStartTxn);
} else if (highestTxnId + 1 != newStartTxn) {
// Cache is out of sync; clear to avoid storing noncontiguous regions
Journal.LOG.error(String.format("Edits cache is out of sync; " +
"looked for next txn id at %d but got start txn id for " +
"cache put request at %d. Reinitializing at new request.",
highestTxnId + 1, newStartTxn));
initialize(newStartTxn);
}
while ((totalSize + inputData.length) > capacity && !dataMap.isEmpty()) {
Map.Entry<Long, byte[]> lowest = dataMap.firstEntry();
dataMap.remove(lowest.getKey());
totalSize -= lowest.getValue().length;
}
if (inputData.length > capacity) {
initialize(INVALID_TXN_ID);
Journal.LOG.warn(String.format("A single batch of edits was too " +
"large to fit into the cache: startTxn = %d, endTxn = %d, " +
"input length = %d. The cache size (%s) or cache fraction (%s) must be " +
"increased for it to work properly (current capacity %d)." +
"Cache is now empty.",
newStartTxn, newEndTxn, inputData.length,
DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY,
DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_KEY, capacity));
return;
}
if (dataMap.isEmpty()) {
lowestTxnId = newStartTxn;
} else {
lowestTxnId = dataMap.firstKey();
}
dataMap.put(newStartTxn, inputData);
highestTxnId = newEndTxn;
totalSize += inputData.length;
}
}
|
@Test
public void testCacheBelowCapacityRequestOnBoundary() throws Exception {
storeEdits(1, 5);
storeEdits(6, 20);
storeEdits(21, 30);
// First segment only
assertTxnCountAndContents(1, 3, 3);
// Second segment only
assertTxnCountAndContents(6, 10, 15);
// First and second segment
assertTxnCountAndContents(1, 7, 7);
// All three segments
assertTxnCountAndContents(1, 25, 25);
// Second and third segment
assertTxnCountAndContents(6, 20, 25);
// Second and third segment; request past the end
assertTxnCountAndContents(6, 50, 30);
// Third segment only; request past the end
assertTxnCountAndContents(21, 20, 30);
}
|
@Override
public int hashCode() {
return Objects.hash(targetImage, imageDigest, imageId, tags, imagePushed);
}
|
@Test
public void testEquality_differentImageId() {
JibContainer container1 = new JibContainer(targetImage1, digest1, digest1, tags1, true);
JibContainer container2 = new JibContainer(targetImage1, digest1, digest2, tags1, true);
Assert.assertNotEquals(container1, container2);
Assert.assertNotEquals(container1.hashCode(), container2.hashCode());
}
|
@VisibleForTesting
static Optional<String> buildGlueExpressionForSingleDomain(String columnName, Domain domain)
{
checkState(!domain.isAll());
ValueSet valueSet = domain.getValues();
if (!canConvertSqlTypeToStringForGlue(domain.getType())) {
return Optional.empty();
}
if (domain.getValues().isAll()) {
return Optional.of(format("(%s <> '%s')", columnName, HIVE_DEFAULT_DYNAMIC_PARTITION));
}
// null must be allowed for this case since callers must filter Domain.none() out
if (domain.getValues().isNone()) {
return Optional.of(format("(%s = '%s')", columnName, HIVE_DEFAULT_DYNAMIC_PARTITION));
}
List<String> disjuncts = new ArrayList<>();
List<String> singleValues = new ArrayList<>();
for (Range range : valueSet.getRanges().getOrderedRanges()) {
checkState(!range.isAll()); // Already checked
if (range.isSingleValue()) {
singleValues.add(valueToString(range.getSingleValue(), range.getType()));
}
else {
List<String> rangeConjuncts = new ArrayList<>();
if (!range.isLowUnbounded()) {
rangeConjuncts.add(format(
"%s %s %s",
columnName,
range.isLowInclusive() ? ">=" : ">",
valueToString(range.getLowBoundedValue(), range.getType())));
}
if (!range.isHighUnbounded()) {
rangeConjuncts.add(format(
"%s %s %s",
columnName,
range.isHighInclusive() ? "<=" : "<",
valueToString(range.getHighBoundedValue(), range.getType())));
}
// If rangeConjuncts is null, then the range was ALL, which should already have been checked for by callers
checkState(!rangeConjuncts.isEmpty());
disjuncts.add("(" + CONJUNCT_JOINER.join(rangeConjuncts) + ")");
}
}
if (singleValues.size() == 1) {
String equalsTest = format("(%s = %s)", columnName, getOnlyElement(singleValues.listIterator()));
disjuncts.add(equalsTest);
}
else if (singleValues.size() > 1) {
String values = Joiner.on(", ").join(singleValues);
String inClause = format("(%s in (%s))", columnName, values);
disjuncts.add(inClause);
}
return Optional.of("(" + DISJUNCT_JOINER.join(disjuncts) + ")");
}
|
@Test
public void testBuildGlueExpressionDomainEqualsSingleValue()
{
Domain domain = Domain.singleValue(VarcharType.VARCHAR, utf8Slice("2020-01-01"));
Optional<String> foo = buildGlueExpressionForSingleDomain("foo", domain);
assertEquals(foo.get(), "((foo = '2020-01-01'))");
}
|
@Override
public ProxyInvocationHandler parserInterfaceToProxy(Object target, String objectName) {
// eliminate the bean without two phase annotation.
Set<String> methodsToProxy = this.tccProxyTargetMethod(target);
if (methodsToProxy.isEmpty()) {
return null;
}
// register resource and enhance with interceptor
DefaultResourceRegisterParser.get().registerResource(target, objectName);
return new TccActionInterceptorHandler(target, methodsToProxy);
}
|
@Test
public void testNestTcc_should_rollback() throws Exception {
TccActionImpl tccAction = new TccActionImpl();
TccAction tccActionProxy = ProxyUtil.createProxy(tccAction, "oldtccAction");
Assertions.assertNotNull(tccActionProxy);
NestTccActionImpl nestTccAction = new NestTccActionImpl();
nestTccAction.setTccAction(tccActionProxy);
//when
ProxyInvocationHandler proxyInvocationHandler = DefaultInterfaceParser.get().parserInterfaceToProxy(nestTccAction, nestTccAction.getClass().getName());
//then
Assertions.assertNotNull(proxyInvocationHandler);
//when
NestTccAction nestTccActionProxy = ProxyUtil.createProxy(nestTccAction, "oldnestTccAction");
//then
Assertions.assertNotNull(nestTccActionProxy);
// transaction commit test
GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();
try {
tx.begin(60000, "testBiz");
boolean result = nestTccActionProxy.prepare(null, 1);
Assertions.assertFalse(result);
if (result) {
tx.commit();
} else {
tx.rollback();
}
} catch (Exception exx) {
tx.rollback();
throw exx;
}
Assertions.assertFalse(nestTccAction.isCommit());
Assertions.assertFalse(tccAction.isCommit());
}
|
@VisibleForTesting
public void validateTemplateParams(NotifyTemplateDO template, Map<String, Object> templateParams) {
template.getParams().forEach(key -> {
Object value = templateParams.get(key);
if (value == null) {
throw exception(NOTIFY_SEND_TEMPLATE_PARAM_MISS, key);
}
});
}
|
@Test
public void testCheckTemplateParams_paramMiss() {
// 准备参数
NotifyTemplateDO template = randomPojo(NotifyTemplateDO.class,
o -> o.setParams(Lists.newArrayList("code")));
Map<String, Object> templateParams = new HashMap<>();
// mock 方法
// 调用,并断言异常
assertServiceException(() -> notifySendService.validateTemplateParams(template, templateParams),
NOTIFY_SEND_TEMPLATE_PARAM_MISS, "code");
}
|
@Override
protected void write(final PostgreSQLPacketPayload payload) {
payload.writeInt2(columnDescriptions.size());
for (PostgreSQLColumnDescription each : columnDescriptions) {
payload.writeStringNul(each.getColumnName());
payload.writeInt4(each.getTableOID());
payload.writeInt2(each.getColumnIndex());
payload.writeInt4(each.getTypeOID());
payload.writeInt2(each.getColumnLength());
payload.writeInt4(each.getTypeModifier());
payload.writeInt2(each.getDataFormat());
}
}
|
@Test
void assertWrite() {
PostgreSQLColumnDescription description = new PostgreSQLColumnDescription("name", 1, Types.VARCHAR, 4, null);
PostgreSQLRowDescriptionPacket packet = new PostgreSQLRowDescriptionPacket(Collections.singleton(description));
packet.write(payload);
verify(payload, times(2)).writeInt2(1);
verify(payload).writeStringNul("name");
verify(payload).writeInt4(0);
verify(payload, times(2)).writeInt2(1);
verify(payload).writeInt4(1043);
verify(payload).writeInt2(4);
verify(payload).writeInt4(-1);
verify(payload).writeInt2(0);
}
|
static Object parseCell(String cell, Schema.Field field) {
Schema.FieldType fieldType = field.getType();
try {
switch (fieldType.getTypeName()) {
case STRING:
return cell;
case INT16:
return Short.parseShort(cell);
case INT32:
return Integer.parseInt(cell);
case INT64:
return Long.parseLong(cell);
case BOOLEAN:
return Boolean.parseBoolean(cell);
case BYTE:
return Byte.parseByte(cell);
case DECIMAL:
return new BigDecimal(cell);
case DOUBLE:
return Double.parseDouble(cell);
case FLOAT:
return Float.parseFloat(cell);
case DATETIME:
return Instant.parse(cell);
default:
throw new UnsupportedOperationException(
"Unsupported type: " + fieldType + ", consider using withCustomRecordParsing");
}
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
e.getMessage() + " field " + field.getName() + " was received -- type mismatch");
}
}
|
@Test
public void givenValidBooleanCell_parses() {
DefaultMapEntry cellToExpectedValue = new DefaultMapEntry("false", false);
Schema schema =
Schema.builder().addBooleanField("a_boolean").addStringField("a_string").build();
assertEquals(
cellToExpectedValue.getValue(),
CsvIOParseHelpers.parseCell(
cellToExpectedValue.getKey().toString(), schema.getField("a_boolean")));
}
|
public ClientTelemetrySender telemetrySender() {
return clientTelemetrySender;
}
|
@Test
public void testHandleResponsePushTelemetryTerminating() {
ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender();
telemetrySender.updateSubscriptionResult(subscription, time.milliseconds());
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS));
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED));
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.TERMINATING_PUSH_NEEDED));
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.TERMINATING_PUSH_IN_PROGRESS));
PushTelemetryResponse response = new PushTelemetryResponse(new PushTelemetryResponseData());
telemetrySender.handleResponse(response);
// The telemetry sender remains in TERMINATING_PUSH_IN_PROGRESS so that a subsequent close() finishes the job
assertEquals(ClientTelemetryState.TERMINATING_PUSH_IN_PROGRESS, telemetrySender.state());
assertEquals(subscription.pushIntervalMs(), telemetrySender.intervalMs());
assertTrue(telemetrySender.enabled());
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.TERMINATED));
}
|
public static void updateDetailMessage(
@Nullable Throwable root, @Nullable Function<Throwable, String> throwableToMessage) {
if (throwableToMessage == null) {
return;
}
Throwable it = root;
while (it != null) {
String newMessage = throwableToMessage.apply(it);
if (newMessage != null) {
updateDetailMessageOfThrowable(it, newMessage);
}
it = it.getCause();
}
}
|
@Test
void testUpdateDetailMessageOfRelevantThrowableAsCause() {
Throwable oomCause =
new IllegalArgumentException("another message deep down in the cause tree");
Throwable oom = new OutOfMemoryError("old message").initCause(oomCause);
oom.setStackTrace(
new StackTraceElement[] {new StackTraceElement("class", "method", "file", 1)});
oom.addSuppressed(new NullPointerException());
Throwable rootThrowable = new IllegalStateException("another message", oom);
ExceptionUtils.updateDetailMessage(
rootThrowable,
t -> t.getClass().equals(OutOfMemoryError.class) ? "new message" : null);
assertThat(rootThrowable.getCause()).isSameAs(oom);
assertThat(rootThrowable.getCause().getMessage()).isEqualTo("new message");
assertThat(rootThrowable.getCause().getStackTrace()).isEqualTo(oom.getStackTrace());
assertThat(rootThrowable.getCause().getSuppressed()).isEqualTo(oom.getSuppressed());
assertThat(rootThrowable.getCause().getCause()).isSameAs(oomCause);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.