focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public void removeSeckill(long seckillId) {
String key = "seckill:" + seckillId;
redisTemplate.delete(key);
redisTemplate.delete(String.valueOf(seckillId));
}
|
@Test
void removeSeckill() {
assertDoesNotThrow(() -> redisService.removeSeckill(1001L));
}
|
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
}
|
@Test
public void testCpuHuber() {
test(Loss.huber(0.9), "CPU", CPU.formula, CPU.data, 65.4128);
}
|
@Override
public void isEqualTo(@Nullable Object expected) {
super.isEqualTo(expected);
}
|
@Test
public void isEqualTo_WithoutToleranceParameter_Fail_NotAnArray() {
expectFailureWhenTestingThat(array(2.2d, 3.3d, 4.4d)).isEqualTo(new Object());
}
|
@Override
public void verify(byte[] data, byte[] signature, MessageDigest digest) {
final byte[] decrypted = engine.processBlock(signature, 0, signature.length);
final int delta = checkSignature(decrypted, digest);
final int offset = decrypted.length - digest.getDigestLength() - delta;
digest.update(decrypted, 1, offset - 1);
digest.update(data);
if (!CryptoUtils.compare(digest.digest(), decrypted, offset)) {
throw new VerificationException("Invalid signature");
}
}
|
@Test
public void shouldThrowCryptoExceptionIfTrailerIsUnknown() {
final byte[] challenge = CryptoUtils.random(40);
final byte[] signature = sign(0x54, challenge, ISOTrailers.TRAILER_SHA1, "MD5");
thrown.expect(CryptoException.class);
thrown.expectMessage("Unknown trailer for digest MD5");
new DssRsaSignatureVerifier(PUBLIC).verify(challenge, signature, "MD5");
}
|
public short[] decodeInt2Array(final byte[] parameterBytes, final boolean isBinary) {
ShardingSpherePreconditions.checkState(!isBinary, () -> new UnsupportedSQLOperationException("binary mode"));
String parameterValue = new String(parameterBytes, StandardCharsets.UTF_8);
Collection<String> parameterElements = decodeText(parameterValue);
short[] result = new short[parameterElements.size()];
int index = 0;
for (String each : parameterElements) {
result[index++] = Short.parseShort(each);
}
return result;
}
|
@Test
void assertParseInt2ArrayNormalTextMode() {
short[] actual = DECODER.decodeInt2Array(INT_ARRAY_STR.getBytes(), false);
assertThat(actual.length, is(2));
assertThat(actual[0], is((short) 11));
assertThat(actual[1], is((short) 12));
}
|
static Optional<String> globalResponseError(Optional<ClientResponse> response) {
if (!response.isPresent()) {
return Optional.of("Timeout");
}
if (response.get().authenticationException() != null) {
return Optional.of("AuthenticationException");
}
if (response.get().wasTimedOut()) {
return Optional.of("Disonnected[Timeout]");
}
if (response.get().wasDisconnected()) {
return Optional.of("Disconnected");
}
if (response.get().versionMismatch() != null) {
return Optional.of("UnsupportedVersionException");
}
if (response.get().responseBody() == null) {
return Optional.of("EmptyResponse");
}
if (!(response.get().responseBody() instanceof AssignReplicasToDirsResponse)) {
return Optional.of("ClassCastException");
}
AssignReplicasToDirsResponseData data = ((AssignReplicasToDirsResponse)
response.get().responseBody()).data();
Errors error = Errors.forCode(data.errorCode());
if (error != Errors.NONE) {
return Optional.of("Response-level error: " + error.name());
}
return Optional.empty();
}
|
@Test
public void testGlobalResponseErrorUnsupportedVersionException() {
assertEquals(Optional.of("UnsupportedVersionException"),
AssignmentsManager.globalResponseError(Optional.of(
new ClientResponse(null, null, "", 0, 0, false,
new UnsupportedVersionException("failed"), null, null))));
}
|
public static SolrSinkConfig load(String yamlFile) throws IOException {
ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
return mapper.readValue(new File(yamlFile), SolrSinkConfig.class);
}
|
@Test
public final void loadFromMapTest() throws IOException {
Map<String, Object> map = new HashMap<>();
map.put("solrUrl", "localhost:2181,localhost:2182/chroot");
map.put("solrMode", "SolrCloud");
map.put("solrCollection", "techproducts");
map.put("solrCommitWithinMs", "100");
map.put("username", "fakeuser");
map.put("password", "fake@123");
SinkContext sinkContext = Mockito.mock(SinkContext.class);
SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext);
assertNotNull(config);
assertEquals(config.getSolrUrl(), "localhost:2181,localhost:2182/chroot");
assertEquals(config.getSolrMode(), "SolrCloud");
assertEquals(config.getSolrCollection(), "techproducts");
assertEquals(config.getSolrCommitWithinMs(), Integer.parseInt("100"));
assertEquals(config.getUsername(), "fakeuser");
assertEquals(config.getPassword(), "fake@123");
}
|
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException,
NoSuchPaddingException, InvalidKeySpecException,
InvalidAlgorithmParameterException,
KeyException, IOException {
return toPrivateKey(keyFile, keyPassword, true);
}
|
@Test
public void testPkcs1AesEncryptedDsaEmptyPassword() throws Exception {
assertThrows(IOException.class, new Executable() {
@Override
public void execute() throws Throwable {
SslContext.toPrivateKey(new File(getClass().getResource("dsa_pkcs1_aes_encrypted.key")
.getFile()), "");
}
});
}
|
public static <T> T retry(Callable<T> callable, int retries) {
return retry(callable, retries, Collections.emptyList());
}
|
@Test
public void retryNoRetries()
throws Exception {
// given
given(callable.call()).willReturn(RESULT);
// when
String result = RetryUtils.retry(callable, RETRIES);
// then
assertEquals(RESULT, result);
verify(callable).call();
}
|
@Override
public Local create(final Path file) {
return this.create(String.format("%s-%s", new AlphanumericRandomStringService().random(), file.getName()));
}
|
@Test
public void testTemporaryPathCustomPrefix() {
final Path file = new Path("/f1/f2/t.txt", EnumSet.of(Path.Type.file));
file.attributes().setDuplicate(true);
file.attributes().setVersionId("1");
final Local local = new FlatTemporaryFileService().create("u", file);
assertTrue(local.getParent().exists());
assertEquals("t.txt", file.getName());
assertTrue(local.getName().endsWith("-t.txt"));
assertEquals(LocalFactory.get(PreferencesFactory.get().getProperty("tmp.dir"), "u"), LocalFactory.get(local.getParent().getAbsolute()));
}
|
public static void bindEnvironment(ScriptEngine engine, String requestContent, Map<String, Object> requestContext,
StateStore stateStore) {
// Build a map of header values.
bindEnvironment(engine, requestContent, requestContext, stateStore, null);
}
|
@Test
void testRequestContentHeadersAreBound() {
String script = """
def headers = mockRequest.getRequestHeaders()
log.info("headers: " + headers)
return headers.get("foo", "null");
""";
ScriptEngineManager sem = new ScriptEngineManager();
MockHttpServletRequest request = new MockHttpServletRequest();
request.addHeader("foo", "bar");
String body = "content";
try {
// Evaluating request with script coming from operation dispatcher rules.
ScriptEngine se = sem.getEngineByExtension("groovy");
ScriptEngineBinder.bindEnvironment(se, body, null, null, request);
String result = (String) se.eval(script);
assertEquals("bar", result);
} catch (Exception e) {
fail("Exception should no be thrown");
}
}
|
public static ImmutableSet<HttpUrl> allSubPaths(String url) {
return allSubPaths(HttpUrl.parse(url));
}
|
@Test
public void allSubPaths_whenNoSubPathNoTrailingSlash_returnsSingleUrl() {
assertThat(allSubPaths("http://localhost")).containsExactly(HttpUrl.parse("http://localhost/"));
}
|
@Override
public boolean next() throws SQLException {
if (isExecutedAllDirection) {
return false;
}
if (orderByValuesQueue.isEmpty()) {
return false;
}
if (isFirstNext) {
isFirstNext = false;
fetchCount--;
return true;
}
OrderByValue firstOrderByValue = orderByValuesQueue.poll();
if (firstOrderByValue.next()) {
orderByValuesQueue.offer(firstOrderByValue);
}
if (orderByValuesQueue.isEmpty()) {
return false;
}
setCurrentQueryResult(orderByValuesQueue.peek().getQueryResult());
return DirectionType.isAllDirectionType(directionType) || fetchCount-- > 0L;
}
|
@Test
void assertNextForResultSetsAllNotEmptyWhenConfigAllDirectionType() throws SQLException {
List<QueryResult> queryResults = Arrays.asList(mock(QueryResult.class, RETURNS_DEEP_STUBS), mock(QueryResult.class, RETURNS_DEEP_STUBS), mock(QueryResult.class, RETURNS_DEEP_STUBS));
for (QueryResult each : queryResults) {
when(each.next()).thenReturn(true, false);
}
MergedResult actual = resultMerger.merge(queryResults, fetchAllStatementContext, database, connectionContext);
assertTrue(actual.next());
assertTrue(actual.next());
assertTrue(actual.next());
assertFalse(actual.next());
}
|
@Override
public void sent(final long bytes) {
this.increment();
}
|
@Test
public void testSent() {
final DownloadTransfer transfer = new DownloadTransfer(new Host(new TestProtocol()), Collections.<TransferItem>emptyList());
final TerminalStreamListener l = new TerminalStreamListener(new TransferSpeedometer(transfer));
l.sent(1L);
transfer.addSize(1L);
l.sent(1L);
}
|
public void validatePositionsIfNeeded() {
Map<TopicPartition, SubscriptionState.FetchPosition> partitionsToValidate =
offsetFetcherUtils.getPartitionsToValidate();
validatePositionsAsync(partitionsToValidate);
}
|
@Test
public void testOffsetValidationRequestGrouping() {
buildFetcher();
assignFromUser(mkSet(tp0, tp1, tp2, tp3));
metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWithIds("dummy", 3,
Collections.emptyMap(), singletonMap(topicName, 4),
tp -> 5, topicIds), false, 0L);
for (TopicPartition tp : subscriptions.assignedPartitions()) {
Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch(
metadata.currentLeader(tp).leader, Optional.of(4));
subscriptions.seekUnvalidated(tp,
new SubscriptionState.FetchPosition(0, Optional.of(4), leaderAndEpoch));
}
Set<TopicPartition> allRequestedPartitions = new HashSet<>();
for (Node node : metadata.fetch().nodes()) {
apiVersions.update(node.idString(), NodeApiVersions.create());
Set<TopicPartition> expectedPartitions = subscriptions.assignedPartitions().stream()
.filter(tp ->
metadata.currentLeader(tp).leader.equals(Optional.of(node)))
.collect(Collectors.toSet());
assertTrue(expectedPartitions.stream().noneMatch(allRequestedPartitions::contains));
assertFalse(expectedPartitions.isEmpty());
allRequestedPartitions.addAll(expectedPartitions);
OffsetForLeaderEpochResponseData data = new OffsetForLeaderEpochResponseData();
expectedPartitions.forEach(tp -> {
OffsetForLeaderTopicResult topic = data.topics().find(tp.topic());
if (topic == null) {
topic = new OffsetForLeaderTopicResult().setTopic(tp.topic());
data.topics().add(topic);
}
topic.partitions().add(new EpochEndOffset()
.setPartition(tp.partition())
.setErrorCode(Errors.NONE.code())
.setLeaderEpoch(4)
.setEndOffset(0));
});
OffsetsForLeaderEpochResponse response = new OffsetsForLeaderEpochResponse(data);
client.prepareResponseFrom(body -> {
OffsetsForLeaderEpochRequest request = (OffsetsForLeaderEpochRequest) body;
return expectedPartitions.equals(offsetForLeaderPartitionMap(request.data()).keySet());
}, response, node);
}
assertEquals(subscriptions.assignedPartitions(), allRequestedPartitions);
offsetFetcher.validatePositionsIfNeeded();
consumerClient.pollNoWakeup();
assertTrue(subscriptions.assignedPartitions()
.stream().noneMatch(subscriptions::awaitingValidation));
}
|
public static boolean canDrop(
FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) {
Objects.requireNonNull(pred, "pred cannnot be null");
Objects.requireNonNull(columns, "columns cannnot be null");
return pred.accept(new DictionaryFilter(columns, dictionaries));
}
|
@Test
public void testGtEqMissingColumn() throws Exception {
BinaryColumn b = binaryColumn("missing_column");
assertTrue(
"Should drop block for any non-null query",
canDrop(gtEq(b, Binary.fromString("any")), ccmd, dictionaries));
}
|
public String convert(ILoggingEvent le) {
long timestamp = le.getTimeStamp();
return cachingDateFormatter.format(timestamp);
}
|
@Test
public void convertsDateInSpecifiedTimeZoneAsRawOffset() {
assertEquals(formatDate("-0800"), convert(_timestamp, DATETIME_PATTERN, "-0800"));
}
|
@Override
public boolean tryClaim(Long i) {
checkArgument(
lastAttemptedOffset == null || i > lastAttemptedOffset,
"Trying to claim offset %s while last attempted was %s",
i,
lastAttemptedOffset);
checkArgument(
i >= range.getFrom(), "Trying to claim offset %s before start of the range %s", i, range);
lastAttemptedOffset = i;
// No respective checkArgument for i < range.to() - it's ok to try claiming offsets beyond it.
if (i >= range.getTo()) {
return false;
}
lastClaimedOffset = i;
return true;
}
|
@Test
public void testNonMonotonicClaim() throws Exception {
expected.expectMessage("Trying to claim offset 103 while last attempted was 110");
OffsetRangeTracker tracker = new OffsetRangeTracker(new OffsetRange(100, 200));
assertTrue(tracker.tryClaim(105L));
assertTrue(tracker.tryClaim(110L));
tracker.tryClaim(103L);
}
|
@Override
protected int command() {
if (!validateConfigFilePresent()) {
return 1;
}
final MigrationConfig config;
try {
config = MigrationConfig.load(getConfigFile());
} catch (KsqlException | MigrationException e) {
LOGGER.error(e.getMessage());
return 1;
}
return command(
config,
MigrationsUtil::getKsqlClient,
getMigrationsDir(getConfigFile(), config),
Clock.systemDefaultZone()
);
}
|
@Test
public void shouldApplySpecificMigration() throws Exception {
// Given:
command = PARSER.parse("-v", "3");
createMigrationFile(1, NAME, migrationsDir, COMMAND);
createMigrationFile(3, NAME, migrationsDir, COMMAND);
givenCurrentMigrationVersion("1");
givenAppliedMigration(1, NAME, MigrationState.MIGRATED);
// When:
final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed(
Instant.ofEpochMilli(1000), ZoneId.systemDefault()));
// Then:
assertThat(result, is(0));
final InOrder inOrder = inOrder(ksqlClient);
verifyMigratedVersion(inOrder, 3, "1", MigrationState.MIGRATED);
inOrder.verify(ksqlClient).close();
inOrder.verifyNoMoreInteractions();
}
|
public static HashingAlgorithm getHashingAlgorithm(String password)
{
if (password.startsWith("$2y")) {
if (getBCryptCost(password) < BCRYPT_MIN_COST) {
throw new HashedPasswordException("Minimum cost of BCrypt password must be " + BCRYPT_MIN_COST);
}
return HashingAlgorithm.BCRYPT;
}
if (password.contains(":")) {
if (getPBKDF2Iterations(password) < PBKDF2_MIN_ITERATIONS) {
throw new HashedPasswordException("Minimum iterations of PBKDF2 password must be " + PBKDF2_MIN_ITERATIONS);
}
return HashingAlgorithm.PBKDF2;
}
throw new HashedPasswordException("Password hashing algorithm cannot be determined");
}
|
@Test
public void testMinBCryptCost()
{
// BCrypt password created with cost of 7 --> "htpasswd -n -B -C 7 test"
String password = "$2y$07$XxMSjoWesbX9s9LCD5Kp1OaFD/bcLUq0zoRCTsTNwjF6N/nwHVCVm";
assertThatThrownBy(() -> getHashingAlgorithm(password))
.isInstanceOf(HashedPasswordException.class)
.hasMessage("Minimum cost of BCrypt password must be 8");
}
|
@Override
public void setNumBuffers(int numBuffers) {
CompletableFuture<?> toNotify;
synchronized (availableMemorySegments) {
checkArgument(
numBuffers >= numberOfRequiredMemorySegments,
"Buffer pool needs at least %s buffers, but tried to set to %s",
numberOfRequiredMemorySegments,
numBuffers);
currentPoolSize = Math.min(numBuffers, maxNumberOfMemorySegments);
returnExcessMemorySegments();
if (isDestroyed) {
// FLINK-19964: when two local buffer pools are released concurrently, one of them
// gets buffers assigned
// make sure that checkAndUpdateAvailability is not called as it would proactively
// acquire one buffer from NetworkBufferPool.
return;
}
toNotify = checkAndUpdateAvailability();
}
mayNotifyAvailable(toNotify);
}
|
@Test
void testSetLessThanRequiredNumBuffers() {
localBufferPool.setNumBuffers(1);
assertThatThrownBy(() -> localBufferPool.setNumBuffers(0))
.isInstanceOf(IllegalArgumentException.class);
}
|
public void start() {
Preconditions.checkState(state.compareAndSet(State.LATENT, State.STARTED), "Already started");
client.getConnectionStateListenable().addListener(connectionStateListener);
createNode();
}
|
@Test
public void testEphemeralSequentialWithProtectionReconnection() throws Exception {
Timing timing = new Timing();
PersistentNode pen = null;
CuratorFramework client = CuratorFrameworkFactory.newClient(
server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1));
try {
client.start();
client.create().creatingParentsIfNeeded().forPath("/test/one");
pen = new PersistentNode(client, CreateMode.EPHEMERAL_SEQUENTIAL, true, "/test/one/two", new byte[0]);
pen.start();
List<String> children = client.getChildren().forPath("/test/one");
System.out.println("children before restart: " + children);
assertEquals(1, children.size());
server.stop();
timing.sleepABit();
server.restart();
timing.sleepABit();
List<String> childrenAfter = client.getChildren().forPath("/test/one");
System.out.println("children after restart: " + childrenAfter);
assertEquals(children, childrenAfter, "unexpected znodes: " + childrenAfter);
} finally {
CloseableUtils.closeQuietly(pen);
CloseableUtils.closeQuietly(client);
}
}
|
public static List<Permission> convertToJavaPermissions(Set<org.onosproject.security.Permission> permissions) {
List<Permission> result = Lists.newArrayList();
for (org.onosproject.security.Permission perm : permissions) {
Permission javaPerm = getPermission(perm);
if (javaPerm != null) {
if (javaPerm instanceof AppPermission) {
if (((AppPermission) javaPerm).getType() != null) {
AppPermission ap = (AppPermission) javaPerm;
result.add(ap);
if (serviceDirectory.containsKey(ap.getType())) {
for (String service : serviceDirectory.get(ap.getType())) {
result.add(new ServicePermission(service, ServicePermission.GET));
}
}
}
} else if (javaPerm instanceof ServicePermission) {
if (!javaPerm.getName().contains(SecurityAdminService.class.getName())) {
result.add(javaPerm);
}
} else {
result.add(javaPerm);
}
}
}
return result;
}
|
@Test
public void testConvertToJavaPermissions() {
List<Permission> result = Lists.newArrayList();
for (org.onosproject.security.Permission perm : testPermissions) {
Permission javaPerm = new AppPermission(perm.getName());
if (javaPerm != null) {
if (javaPerm instanceof AppPermission) {
if (((AppPermission) javaPerm).getType() != null) {
AppPermission ap = (AppPermission) javaPerm;
result.add(ap);
}
} else if (javaPerm instanceof ServicePermission) {
if (!javaPerm.getName().contains(SecurityAdminService.class.getName())) {
result.add(javaPerm);
}
} else {
result.add(javaPerm);
}
}
}
assertTrue(!result.isEmpty());
assertEquals("APP_READ", result.get(0).getName());
}
|
public CompositeFileEntryParser getParser(final String system) {
return this.getParser(system, TimeZone.getDefault());
}
|
@Test
public void testGetParser() {
assertNotNull(new FTPParserSelector().getParser(null));
}
|
public synchronized <K, V> GlobalKTable<K, V> globalTable(final String topic,
final Consumed<K, V> consumed) {
Objects.requireNonNull(topic, "topic can't be null");
Objects.requireNonNull(consumed, "consumed can't be null");
final ConsumedInternal<K, V> consumedInternal = new ConsumedInternal<>(consumed);
final MaterializedInternal<K, V, KeyValueStore<Bytes, byte[]>> materializedInternal =
new MaterializedInternal<>(
Materialized.with(consumedInternal.keySerde(), consumedInternal.valueSerde()),
internalStreamsBuilder, topic + "-");
return internalStreamsBuilder.globalTable(topic, consumedInternal, materializedInternal);
}
|
@Test
public void shouldThrowOnVersionedStoreSupplierForGlobalTable() {
final String topic = "topic";
assertThrows(
TopologyException.class,
() -> builder.globalTable(
topic,
Materialized.<Long, String>as(Stores.persistentVersionedKeyValueStore("store", Duration.ZERO))
.withKeySerde(Serdes.Long())
.withValueSerde(Serdes.String()
)
)
);
}
|
static Properties loadPropertiesFile(File homeDir) {
Properties p = new Properties();
File propsFile = new File(new File(homeDir, "conf"), "sonar.properties");
if (propsFile.exists()) {
try (Reader reader = new InputStreamReader(new FileInputStream(propsFile), UTF_8)) {
p.load(reader);
return p;
} catch (IOException e) {
throw new IllegalStateException("Cannot open file " + propsFile, e);
}
} else {
throw new IllegalStateException("Configuration file not found: " + propsFile);
}
}
|
@Test
public void loadPropertiesFile_reads_sonar_properties_content() throws IOException {
File homeDir = temporaryFolder.newFolder();
File confDir = new File(homeDir, "conf");
confDir.mkdirs();
File sonarProperties = new File(confDir, "sonar.properties");
sonarProperties.createNewFile();
Files.write(sonarProperties.toPath(), Arrays.asList("foo=bar"));
Properties properties = Shutdowner.loadPropertiesFile(homeDir);
assertThat(properties.getProperty("foo")).isEqualTo("bar");
}
|
@Override
public void serialize(Asn1OutputStream out, Class<? extends Object> type, Object instance, Asn1ObjectMapper mapper)
throws IOException {
for (final Map.Entry<String, List<Asn1Field>> entry : fieldsMap(mapper.getFields(type)).entrySet()) {
final List<Asn1Field> fields = entry.getValue();
final Object[] values = new Object[fields.size()];
if (!fetchValues(fields, instance, values)) continue;
try (final Asn1OutputStream seqOut = new Asn1OutputStream(out, getNestedTagNo(type))) {
try (final Asn1OutputStream oidOut = new Asn1OutputStream(seqOut, 0x06)) {
Asn1Utils.encodeObjectIdentifier(entry.getKey(), oidOut);
}
int i = 0;
for (final Asn1Field field : fields) {
try (final Asn1OutputStream propOut = new Asn1OutputStream(seqOut, field.tagNo)) {
mapper.writeValue(propOut, field.converter(), field.type(), values[i++]);
}
}
}
}
}
|
@Test
public void shouldSerialize() {
assertArrayEquals(
new byte[] { 0x30, 6, 0x06, 1, 83, 0x02, 1, 3, 0x30, 9, 0x06, 1, 84, 0x02, 1, 1, 0x02, 1, 2 },
serialize(new SetOfIdentifiedConverter(), Set.class, new Set(1, 2, 3))
);
}
|
@Override
public CompletableFuture<ExecutionGraphInfo> getExecutionGraphInfo(
JobID jobId, RestfulGateway restfulGateway) {
return getExecutionGraphInternal(jobId, restfulGateway).thenApply(Function.identity());
}
|
@Test
void testConcurrentAccess() throws Exception {
final Time timeout = Time.milliseconds(100L);
final Time timeToLive = Time.hours(1L);
final CountingRestfulGateway restfulGateway =
createCountingRestfulGateway(
expectedJobId,
CompletableFuture.completedFuture(expectedExecutionGraphInfo));
final int numConcurrentAccesses = 10;
final ArrayList<CompletableFuture<ExecutionGraphInfo>> executionGraphFutures =
new ArrayList<>(numConcurrentAccesses);
final ExecutorService executor =
java.util.concurrent.Executors.newFixedThreadPool(numConcurrentAccesses);
try (ExecutionGraphCache executionGraphCache =
new DefaultExecutionGraphCache(timeout, timeToLive)) {
for (int i = 0; i < numConcurrentAccesses; i++) {
CompletableFuture<ExecutionGraphInfo> executionGraphFuture =
CompletableFuture.supplyAsync(
() ->
executionGraphCache.getExecutionGraphInfo(
expectedJobId, restfulGateway),
executor)
.thenCompose(Function.identity());
executionGraphFutures.add(executionGraphFuture);
}
final CompletableFuture<Collection<ExecutionGraphInfo>> allExecutionGraphFutures =
FutureUtils.combineAll(executionGraphFutures);
Collection<ExecutionGraphInfo> allExecutionGraphs = allExecutionGraphFutures.get();
for (ExecutionGraphInfo executionGraph : allExecutionGraphs) {
assertThat(executionGraph).isEqualTo(expectedExecutionGraphInfo);
}
assertThat(restfulGateway.getNumRequestJobCalls()).isOne();
} finally {
ExecutorUtils.gracefulShutdown(5000L, TimeUnit.MILLISECONDS, executor);
}
}
|
@Override
protected License create() {
return new DefaultLicenseFactory(this).create();
}
|
@Test
public void testCreate() {
assertEquals(new Receipt(null, "b8e85600dffe"), new ReceiptFactory(new Local("src/test/resources")).create());
}
|
public static boolean passCheck(ResourceWrapper resourceWrapper, /*@Valid*/ ParamFlowRule rule, /*@Valid*/ int count,
Object... args) {
if (args == null) {
return true;
}
int paramIdx = rule.getParamIdx();
if (args.length <= paramIdx) {
return true;
}
// Get parameter value.
Object value = args[paramIdx];
// Assign value with the result of paramFlowKey method
if (value instanceof ParamFlowArgument) {
value = ((ParamFlowArgument) value).paramFlowKey();
}
// If value is null, then pass
if (value == null) {
return true;
}
if (rule.isClusterMode() && rule.getGrade() == RuleConstant.FLOW_GRADE_QPS) {
return passClusterCheck(resourceWrapper, rule, count, value);
}
return passLocalCheck(resourceWrapper, rule, count, value);
}
|
@Test
public void testPassLocalCheckForComplexParam() throws InterruptedException {
class User implements ParamFlowArgument {
Integer id;
String name;
String address;
public User(Integer id, String name, String address) {
this.id = id;
this.name = name;
this.address = address;
}
@Override
public Object paramFlowKey() {
return name;
}
}
final String resourceName = "testPassLocalCheckForComplexParam";
final ResourceWrapper resourceWrapper = new StringResourceWrapper(resourceName, EntryType.IN);
int paramIdx = 0;
double globalThreshold = 1;
ParamFlowRule rule = new ParamFlowRule(resourceName).setParamIdx(paramIdx).setCount(globalThreshold);
Object[] args = new Object[]{new User(1, "Bob", "Hangzhou"), 10, "Demo"};
ParameterMetric metric = new ParameterMetric();
ParameterMetricStorage.getMetricsMap().put(resourceWrapper.getName(), metric);
metric.getRuleTimeCounterMap().put(rule, new ConcurrentLinkedHashMapWrapper<Object, AtomicLong>(4000));
metric.getRuleTokenCounterMap().put(rule, new ConcurrentLinkedHashMapWrapper<>(4000));
assertTrue(ParamFlowChecker.passCheck(resourceWrapper, rule, 1, args));
assertFalse(ParamFlowChecker.passCheck(resourceWrapper, rule, 1, args));
}
|
@Override
public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws
RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
final EndTransactionRequestHeader requestHeader =
(EndTransactionRequestHeader) request.decodeCommandCustomHeader(EndTransactionRequestHeader.class);
LOGGER.debug("Transaction request:{}", requestHeader);
if (BrokerRole.SLAVE == brokerController.getMessageStoreConfig().getBrokerRole()) {
response.setCode(ResponseCode.SLAVE_NOT_AVAILABLE);
LOGGER.warn("Message store is slave mode, so end transaction is forbidden. ");
return response;
}
if (requestHeader.getFromTransactionCheck()) {
switch (requestHeader.getCommitOrRollback()) {
case MessageSysFlag.TRANSACTION_NOT_TYPE: {
LOGGER.warn("Check producer[{}] transaction state, but it's pending status."
+ "RequestHeader: {} Remark: {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
requestHeader.toString(),
request.getRemark());
return null;
}
case MessageSysFlag.TRANSACTION_COMMIT_TYPE: {
LOGGER.warn("Check producer[{}] transaction state, the producer commit the message."
+ "RequestHeader: {} Remark: {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
requestHeader.toString(),
request.getRemark());
break;
}
case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE: {
LOGGER.warn("Check producer[{}] transaction state, the producer rollback the message."
+ "RequestHeader: {} Remark: {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
requestHeader.toString(),
request.getRemark());
break;
}
default:
return null;
}
} else {
switch (requestHeader.getCommitOrRollback()) {
case MessageSysFlag.TRANSACTION_NOT_TYPE: {
LOGGER.warn("The producer[{}] end transaction in sending message, and it's pending status."
+ "RequestHeader: {} Remark: {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
requestHeader.toString(),
request.getRemark());
return null;
}
case MessageSysFlag.TRANSACTION_COMMIT_TYPE: {
break;
}
case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE: {
LOGGER.warn("The producer[{}] end transaction in sending message, rollback the message."
+ "RequestHeader: {} Remark: {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
requestHeader.toString(),
request.getRemark());
break;
}
default:
return null;
}
}
OperationResult result = new OperationResult();
if (MessageSysFlag.TRANSACTION_COMMIT_TYPE == requestHeader.getCommitOrRollback()) {
result = this.brokerController.getTransactionalMessageService().commitMessage(requestHeader);
if (result.getResponseCode() == ResponseCode.SUCCESS) {
if (rejectCommitOrRollback(requestHeader, result.getPrepareMessage())) {
response.setCode(ResponseCode.ILLEGAL_OPERATION);
LOGGER.warn("Message commit fail [producer end]. currentTimeMillis - bornTime > checkImmunityTime, msgId={},commitLogOffset={}, wait check",
requestHeader.getMsgId(), requestHeader.getCommitLogOffset());
return response;
}
RemotingCommand res = checkPrepareMessage(result.getPrepareMessage(), requestHeader);
if (res.getCode() == ResponseCode.SUCCESS) {
MessageExtBrokerInner msgInner = endMessageTransaction(result.getPrepareMessage());
msgInner.setSysFlag(MessageSysFlag.resetTransactionValue(msgInner.getSysFlag(), requestHeader.getCommitOrRollback()));
msgInner.setQueueOffset(requestHeader.getTranStateTableOffset());
msgInner.setPreparedTransactionOffset(requestHeader.getCommitLogOffset());
msgInner.setStoreTimestamp(result.getPrepareMessage().getStoreTimestamp());
MessageAccessor.clearProperty(msgInner, MessageConst.PROPERTY_TRANSACTION_PREPARED);
RemotingCommand sendResult = sendFinalMessage(msgInner);
if (sendResult.getCode() == ResponseCode.SUCCESS) {
this.brokerController.getTransactionalMessageService().deletePrepareMessage(result.getPrepareMessage());
// successful committed, then total num of half-messages minus 1
this.brokerController.getTransactionalMessageService().getTransactionMetrics().addAndGet(msgInner.getTopic(), -1);
BrokerMetricsManager.commitMessagesTotal.add(1, BrokerMetricsManager.newAttributesBuilder()
.put(LABEL_TOPIC, msgInner.getTopic())
.build());
// record the commit latency.
Long commitLatency = (System.currentTimeMillis() - result.getPrepareMessage().getBornTimestamp()) / 1000;
BrokerMetricsManager.transactionFinishLatency.record(commitLatency, BrokerMetricsManager.newAttributesBuilder()
.put(LABEL_TOPIC, msgInner.getTopic())
.build());
}
return sendResult;
}
return res;
}
} else if (MessageSysFlag.TRANSACTION_ROLLBACK_TYPE == requestHeader.getCommitOrRollback()) {
result = this.brokerController.getTransactionalMessageService().rollbackMessage(requestHeader);
if (result.getResponseCode() == ResponseCode.SUCCESS) {
if (rejectCommitOrRollback(requestHeader, result.getPrepareMessage())) {
response.setCode(ResponseCode.ILLEGAL_OPERATION);
LOGGER.warn("Message rollback fail [producer end]. currentTimeMillis - bornTime > checkImmunityTime, msgId={},commitLogOffset={}, wait check",
requestHeader.getMsgId(), requestHeader.getCommitLogOffset());
return response;
}
RemotingCommand res = checkPrepareMessage(result.getPrepareMessage(), requestHeader);
if (res.getCode() == ResponseCode.SUCCESS) {
this.brokerController.getTransactionalMessageService().deletePrepareMessage(result.getPrepareMessage());
// roll back, then total num of half-messages minus 1
this.brokerController.getTransactionalMessageService().getTransactionMetrics().addAndGet(result.getPrepareMessage().getProperty(MessageConst.PROPERTY_REAL_TOPIC), -1);
BrokerMetricsManager.rollBackMessagesTotal.add(1, BrokerMetricsManager.newAttributesBuilder()
.put(LABEL_TOPIC, result.getPrepareMessage().getProperty(MessageConst.PROPERTY_REAL_TOPIC))
.build());
}
return res;
}
}
response.setCode(result.getResponseCode());
response.setRemark(result.getResponseRemark());
return response;
}
|
@Test
public void testProcessRequest_RollBack() throws RemotingCommandException {
when(transactionMsgService.rollbackMessage(any(EndTransactionRequestHeader.class))).thenReturn(createResponse(ResponseCode.SUCCESS));
RemotingCommand request = createEndTransactionMsgCommand(MessageSysFlag.TRANSACTION_ROLLBACK_TYPE, true);
RemotingCommand response = endTransactionProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
|
@Override
public EquatableValueSet union(ValueSet other)
{
EquatableValueSet otherValueSet = checkCompatibility(other);
if (whiteList && otherValueSet.isWhiteList()) {
return new EquatableValueSet(type, true, union(entries, otherValueSet.entries));
}
else if (whiteList) {
return new EquatableValueSet(type, false, subtract(otherValueSet.entries, entries));
}
else if (otherValueSet.isWhiteList()) {
return new EquatableValueSet(type, false, subtract(entries, otherValueSet.entries));
}
else {
return new EquatableValueSet(type, false, intersect(otherValueSet.entries, entries));
}
}
|
@Test
public void testUnion()
{
assertEquals(EquatableValueSet.none(TestingIdType.ID).union(EquatableValueSet.none(TestingIdType.ID)), EquatableValueSet.none(TestingIdType.ID));
assertEquals(EquatableValueSet.all(TestingIdType.ID).union(EquatableValueSet.all(TestingIdType.ID)), EquatableValueSet.all(TestingIdType.ID));
assertEquals(EquatableValueSet.none(TestingIdType.ID).union(EquatableValueSet.all(TestingIdType.ID)), EquatableValueSet.all(TestingIdType.ID));
assertEquals(EquatableValueSet.none(TestingIdType.ID).union(EquatableValueSet.of(TestingIdType.ID, 0L)), EquatableValueSet.of(TestingIdType.ID, 0L));
assertEquals(EquatableValueSet.all(TestingIdType.ID).union(EquatableValueSet.of(TestingIdType.ID, 0L)), EquatableValueSet.all(TestingIdType.ID));
assertEquals(EquatableValueSet.of(TestingIdType.ID, 0L).union(EquatableValueSet.of(TestingIdType.ID, 0L)), EquatableValueSet.of(TestingIdType.ID, 0L));
assertEquals(EquatableValueSet.of(TestingIdType.ID, 0L, 1L).union(EquatableValueSet.of(TestingIdType.ID, 0L)), EquatableValueSet.of(TestingIdType.ID, 0L, 1L));
assertEquals(EquatableValueSet.of(TestingIdType.ID, 0L).complement().union(EquatableValueSet.of(TestingIdType.ID, 0L)), EquatableValueSet.all(TestingIdType.ID));
assertEquals(EquatableValueSet.of(TestingIdType.ID, 0L).complement().union(EquatableValueSet.of(TestingIdType.ID, 1L)), EquatableValueSet.of(TestingIdType.ID, 0L).complement());
assertEquals(EquatableValueSet.of(TestingIdType.ID, 0L).union(EquatableValueSet.of(TestingIdType.ID, 1L).complement()), EquatableValueSet.of(TestingIdType.ID, 1L).complement());
assertEquals(EquatableValueSet.of(TestingIdType.ID, 0L, 1L).union(EquatableValueSet.of(TestingIdType.ID, 0L, 2L)), EquatableValueSet.of(TestingIdType.ID, 0L, 1L, 2L));
assertEquals(EquatableValueSet.of(TestingIdType.ID, 0L, 1L).complement().union(EquatableValueSet.of(TestingIdType.ID, 0L, 2L)), EquatableValueSet.of(TestingIdType.ID, 1L).complement());
assertEquals(EquatableValueSet.of(TestingIdType.ID, 0L, 1L).complement().union(EquatableValueSet.of(TestingIdType.ID, 0L, 2L).complement()), EquatableValueSet.of(TestingIdType.ID, 0L).complement());
}
|
public static boolean matchIpRange(String pattern, String host, int port) throws UnknownHostException {
if (pattern == null || host == null) {
throw new IllegalArgumentException(
"Illegal Argument pattern or hostName. Pattern:" + pattern + ", Host:" + host);
}
pattern = pattern.trim();
if ("*.*.*.*".equals(pattern) || "*".equals(pattern)) {
return true;
}
InetAddress inetAddress = InetAddress.getByName(host);
boolean isIpv4 = isValidV4Address(inetAddress);
String[] hostAndPort = getPatternHostAndPort(pattern, isIpv4);
if (hostAndPort[1] != null && !hostAndPort[1].equals(String.valueOf(port))) {
return false;
}
pattern = hostAndPort[0];
String splitCharacter = SPLIT_IPV4_CHARACTER;
if (!isIpv4) {
splitCharacter = SPLIT_IPV6_CHARACTER;
}
String[] mask = pattern.split(splitCharacter);
// check format of pattern
checkHostPattern(pattern, mask, isIpv4);
host = inetAddress.getHostAddress();
if (pattern.equals(host)) {
return true;
}
// short name condition
if (!ipPatternContainExpression(pattern)) {
InetAddress patternAddress = InetAddress.getByName(pattern);
return patternAddress.getHostAddress().equals(host);
}
String[] ipAddress = host.split(splitCharacter);
for (int i = 0; i < mask.length; i++) {
if ("*".equals(mask[i]) || mask[i].equals(ipAddress[i])) {
continue;
} else if (mask[i].contains("-")) {
String[] rangeNumStrs = StringUtils.split(mask[i], '-');
if (rangeNumStrs.length != 2) {
throw new IllegalArgumentException("There is wrong format of ip Address: " + mask[i]);
}
Integer min = getNumOfIpSegment(rangeNumStrs[0], isIpv4);
Integer max = getNumOfIpSegment(rangeNumStrs[1], isIpv4);
Integer ip = getNumOfIpSegment(ipAddress[i], isIpv4);
if (ip < min || ip > max) {
return false;
}
} else if ("0".equals(ipAddress[i])
&& ("0".equals(mask[i])
|| "00".equals(mask[i])
|| "000".equals(mask[i])
|| "0000".equals(mask[i]))) {
continue;
} else if (!mask[i].equals(ipAddress[i])) {
return false;
}
}
return true;
}
|
@Test
void testMatchIpRangeMatchWhenIpWrongException() {
UnknownHostException thrown = assertThrows(
UnknownHostException.class, () -> NetUtils.matchIpRange("192.168.1.63", "192.168.1.ff", 90));
assertTrue(thrown.getMessage().contains("192.168.1.ff"));
}
|
public Class<?> getSerializedClass() {
return serializedClass;
}
|
@Test
void testConstructorWithSerializedClassAndCause() {
NacosSerializationException exception = new NacosSerializationException(NacosSerializationExceptionTest.class,
new RuntimeException("test"));
assertEquals(Constants.Exception.SERIALIZE_ERROR_CODE, exception.getErrCode());
assertEquals(String.format("errCode: 100, errMsg: Nacos serialize for class [%s] failed. ",
NacosSerializationExceptionTest.class.getName(), "test"), exception.getMessage());
assertEquals(NacosSerializationExceptionTest.class, exception.getSerializedClass());
}
|
public boolean identical(Credentials other) {
return this == other || (other != null && (accessKey == null && other.accessKey == null
|| accessKey != null && accessKey.equals(other.accessKey)) && (
secretKey == null && other.secretKey == null || secretKey != null && secretKey
.equals(other.secretKey)));
}
|
@Test
void testIdentical() {
//given
String ak = "ak";
String sk = "sk";
String tenantId = "100";
Credentials credentials1 = new Credentials(ak, sk, "101");
Credentials credentials2 = new Credentials(ak, sk, "100");
//then
boolean actual = credentials1.identical(credentials2);
//then
assertTrue(actual);
}
|
static ApiError validateQuotaKeyValue(
Map<String, ConfigDef.ConfigKey> validKeys,
String key,
double value
) {
// Ensure we have an allowed quota key
ConfigDef.ConfigKey configKey = validKeys.get(key);
if (configKey == null) {
return new ApiError(Errors.INVALID_REQUEST, "Invalid configuration key " + key);
}
if (value <= 0.0) {
return new ApiError(Errors.INVALID_REQUEST, "Quota " + key + " must be greater than 0");
}
// Ensure the quota value is valid
switch (configKey.type()) {
case DOUBLE:
return ApiError.NONE;
case SHORT:
if (value > Short.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for a SHORT.");
}
return getErrorForIntegralQuotaValue(value, key);
case INT:
if (value > Integer.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for an INT.");
}
return getErrorForIntegralQuotaValue(value, key);
case LONG: {
if (value > Long.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for a LONG.");
}
return getErrorForIntegralQuotaValue(value, key);
}
default:
return new ApiError(Errors.UNKNOWN_SERVER_ERROR,
"Unexpected config type " + configKey.type() + " should be Long or Double");
}
}
|
@Test
public void testValidateQuotaKeyValueForValidConsumerByteRate() {
assertEquals(ApiError.NONE, ClientQuotaControlManager.validateQuotaKeyValue(
VALID_CLIENT_ID_QUOTA_KEYS, "consumer_byte_rate", 1234.0));
}
|
@Override
public CloseableIterator<ColumnarBatch> readJsonFiles(
CloseableIterator<FileStatus> scanFileIter,
StructType physicalSchema,
Optional<Predicate> predicate) {
return new CloseableIterator<>() {
private String currentFile;
// index of the current line being read from the current read json list, -1 means no line is read yet
private int currentReadLine = -1;
private List<JsonNode> currentReadJsonList = Lists.newArrayList();
@Override
public void close() {
Utils.closeCloseables(scanFileIter);
currentReadLine = -1;
currentReadJsonList = null;
}
@Override
public boolean hasNext() {
if (hasNextToConsume()) {
return true; // we have un-consumed last read line
}
// There is no file in reading or the current file being read has no more data
// initialize the next file reader or return false if there are no more files to
// read.
try {
tryGetNextFileJson();
} catch (Exception ex) {
throw new KernelEngineException(
format("Error reading JSON file: %s", currentFile), ex);
}
return hasNextToConsume();
}
private boolean hasNextToConsume() {
return currentReadLine != -1 && !currentReadJsonList.isEmpty() && currentReadLine < currentReadJsonList.size();
}
@Override
public ColumnarBatch next() {
try (Timer ignored = Tracers.watchScope(Tracers.get(), EXTERNAL, "DeltaLakeJsonHandler.JsonToColumnarBatch")) {
if (!hasNextToConsume()) {
throw new NoSuchElementException();
}
List<Row> rows = new ArrayList<>();
int currentBatchSize = 0;
do {
// hasNext already reads the next file and keeps it in member variable `cachedJsonList`
JsonNode jsonNode = currentReadJsonList.get(currentReadLine);
Row row = new io.delta.kernel.defaults.internal.data.DefaultJsonRow(
(ObjectNode) jsonNode, physicalSchema);
rows.add(row);
currentBatchSize++;
currentReadLine++;
} while (currentBatchSize < maxBatchSize && hasNext());
return new io.delta.kernel.defaults.internal.data.DefaultRowBasedColumnarBatch(
physicalSchema, rows);
}
}
private void tryGetNextFileJson() throws ExecutionException, IOException {
if (scanFileIter.hasNext()) {
currentFile = scanFileIter.next().getPath();
Path filePath = new Path(currentFile);
if (filePath.getName().equals(LAST_CHECKPOINT_FILE_NAME)) {
// can not read last_checkpoint file from cache
currentReadJsonList = readJsonFile(currentFile, hadoopConf);
} else {
currentReadJsonList = jsonCache.get(currentFile);
}
currentReadLine = 0;
}
}
};
}
|
@Test
public void testReadJsonMetadata() {
String path = deltaLakePath + "/00000000000000000031.json";
DeltaLakeJsonHandler deltaLakeJsonHandler = new DeltaLakeJsonHandler(hdfsConfiguration, jsonCache);
StructType readSchema = LogReplay.getAddRemoveReadSchema(true);
FileStatus fileStatus = FileStatus.of(path, 0, 0);
List<Row> addRows = Lists.newArrayList();
try (CloseableIterator<ColumnarBatch> jsonIter = deltaLakeJsonHandler.readJsonFiles(
Utils.singletonCloseableIterator(fileStatus), readSchema, Optional.empty())) {
while (jsonIter.hasNext()) {
ColumnarBatch columnarBatch = jsonIter.next();
ColumnVector addsVector = columnarBatch.getColumnVector(ADD_FILE_ORDINAL);
for (int rowId = 0; rowId < addsVector.getSize(); rowId++) {
if (addsVector.isNullAt(rowId)) {
continue;
}
getAddFilePath(addsVector, rowId);
}
try (CloseableIterator<Row> rows = columnarBatch.getRows()) {
while (rows.hasNext()) {
Row row = rows.next();
addRows.add(row);
}
}
}
} catch (IOException e) {
Assert.fail();
}
Assert.assertEquals(2, addRows.size());
Row scanRow = addRows.get(1);
Row addFile = getAddFileEntry(scanRow);
Assert.assertEquals("col_date=2024-01-06/part-00000-3c9a556a-d185-4963-869d-b059d4c9b482.c000.snappy.parquet",
addFile.getString(ADD_FILE_PATH_ORDINAL));
Assert.assertEquals(724, addFile.getLong(ADD_FILE_SIZE_ORDINAL));
Assert.assertEquals(1721830614469L, addFile.getLong(ADD_FILE_MOD_TIME_ORDINAL));
Map<String, String> partitionValues = InternalScanFileUtils.getPartitionValues(scanRow);
Assert.assertTrue(partitionValues.containsKey("col_date"));
Assert.assertEquals("2024-01-06", partitionValues.get("col_date"));
Assert.assertFalse(jsonCache.asMap().isEmpty());
Assert.assertTrue(jsonCache.asMap().containsKey(path));
}
|
@Override
public OpenstackNode node(String hostname) {
return osNodeStore.node(hostname);
}
|
@Test
public void testGetNodeByDeviceId() {
assertTrue(ERR_NOT_FOUND, Objects.equals(
target.node(GATEWAY_1_INTG_DEVICE.id()), GATEWAY_1));
assertTrue(ERR_NOT_FOUND, Objects.equals(
target.node(GATEWAY_1.ovsdb()), GATEWAY_1));
}
|
public static InstrumentedExecutorService newSingleThreadExecutor(MetricRegistry registry, String name) {
return new InstrumentedExecutorService(Executors.newSingleThreadExecutor(), registry, name);
}
|
@Test
public void testNewSingleThreadExecutorWithThreadFactory() throws Exception {
final ExecutorService executorService = InstrumentedExecutors.newSingleThreadExecutor(defaultThreadFactory, registry);
executorService.submit(new NoopRunnable());
executorService.shutdown();
}
|
public List<DataRecord> merge(final List<DataRecord> dataRecords) {
Map<DataRecord.Key, DataRecord> result = new HashMap<>();
dataRecords.forEach(each -> {
if (PipelineSQLOperationType.INSERT == each.getType()) {
mergeInsert(each, result);
} else if (PipelineSQLOperationType.UPDATE == each.getType()) {
mergeUpdate(each, result);
} else if (PipelineSQLOperationType.DELETE == each.getType()) {
mergeDelete(each, result);
}
});
return new ArrayList<>(result.values());
}
|
@Test
void assertUpdatePrimaryKeyBeforeUpdate() {
DataRecord beforeDataRecord = mockUpdateDataRecord(1, 2, 10, 50);
DataRecord afterDataRecord = mockUpdateDataRecord(2, 10, 200);
Collection<DataRecord> actual = groupEngine.merge(Arrays.asList(beforeDataRecord, afterDataRecord));
assertThat(actual.size(), is(1));
DataRecord dataRecord = actual.iterator().next();
assertThat(dataRecord.getType(), is(PipelineSQLOperationType.UPDATE));
assertThat(dataRecord.getTableName(), is("order"));
assertThat(dataRecord.getActualTableName(), is("order_0"));
assertThat(dataRecord.getCommitTime(), is(456L));
assertColumnsMatched(dataRecord.getColumn(0), new Column("id", 1, 2, true, true));
assertColumnsMatched(dataRecord.getColumn(1), new Column("user_id", 10, 10, false, false));
assertColumnsMatched(dataRecord.getColumn(2), new Column("total_price", 50, 200, true, false));
}
|
public static List<Vertex> sortVertices(Graph g) throws UnexpectedGraphCycleError {
if (g.getEdges().size() == 0) return new ArrayList<>(g.getVertices());
List<Vertex> sorted = new ArrayList<>(g.getVertices().size());
Deque<Vertex> pending = new LinkedList<>();
pending.addAll(g.getRoots());
Set<Edge> traversedEdges = new HashSet<>();
while (!pending.isEmpty()) {
Vertex currentVertex = pending.removeFirst();
sorted.add(currentVertex);
currentVertex.getOutgoingEdges().forEach(edge -> {
traversedEdges.add(edge);
Vertex toVertex = edge.getTo();
if (toVertex.getIncomingEdges().stream().allMatch(traversedEdges::contains)) {
pending.add(toVertex);
}
});
}
// Check for cycles
if (g.edges().noneMatch(traversedEdges::contains)) {
throw new UnexpectedGraphCycleError(g);
}
return sorted;
}
|
@Test
public void testSortOrder() throws InvalidIRException, TopologicalSort.UnexpectedGraphCycleError {
Graph g = Graph.empty();
Vertex v1 = IRHelpers.createTestVertex();
Vertex v2 = IRHelpers.createTestVertex();
Vertex v3 = IRHelpers.createTestVertex();
Vertex v4 = IRHelpers.createTestVertex();
g.chainVertices(v3, v1, v2);
g.chainVertices(v4, v1, v2);
assertThat(TopologicalSort.sortVertices(g),
anyOf(
is(Arrays.asList(v3,v4,v1,v2)),
is(Arrays.asList(v4,v3,v1,v2))
));
}
|
@Override
public ConfigData get(String path) {
return get(path, null);
}
|
@Test
void testGetAllEnvVarsNotEmpty() {
ConfigData properties = envVarConfigProvider.get("");
assertNotEquals(0, properties.data().size());
}
|
@Override
public void addValue(Type type, Block block, int position)
{
if (!block.isNull(position)) {
nonNullValueCount++;
}
}
|
@Test
public void testAddValue()
{
CountStatisticsBuilder statisticsBuilder = new CountStatisticsBuilder();
statisticsBuilder.addValue();
statisticsBuilder.addValue();
ColumnStatistics columnStatistics = statisticsBuilder.buildColumnStatistics();
assertEquals(columnStatistics.getNumberOfValues(), 2);
}
|
public void init() {
loadExtend();
}
|
@Test
void init() {
healthCheckExtendProvider.init();
}
|
@Override
public Server build(Environment environment) {
printBanner(environment.getName());
final ThreadPool threadPool = createThreadPool(environment.metrics());
final Server server = buildServer(environment.lifecycle(), threadPool);
final Handler applicationHandler = createAppServlet(server,
environment.jersey(),
environment.getObjectMapper(),
environment.getValidator(),
environment.getApplicationContext(),
environment.getJerseyServletContainer(),
environment.metrics());
final Handler adminHandler = createAdminServlet(server,
environment.getAdminContext(),
environment.metrics(),
environment.healthChecks(),
environment.admin());
final RoutingHandler routingHandler = buildRoutingHandler(environment.metrics(),
server,
applicationHandler,
adminHandler);
final Handler gzipHandler = buildGzipHandler(routingHandler);
server.setHandler(addStatsHandler(addRequestLog(server, gzipHandler, environment.getName())));
return server;
}
|
@Test
void defaultsDetailedJsonProcessingExceptionToFalse() {
http.build(environment);
assertThat(environment.jersey().getResourceConfig().getSingletons())
.filteredOn(x -> x instanceof ExceptionMapperBinder)
.map(x -> (ExceptionMapperBinder) x)
.singleElement()
.satisfies(x -> assertThat(x.isShowDetails()).isFalse());
}
|
@Override
public NodeHealth get() {
NodeHealth.Builder builder = NodeHealth.newNodeHealthBuilder();
if (clusterAppState.isOperational(ProcessId.ELASTICSEARCH, true)) {
builder.setStatus(NodeHealth.Status.GREEN);
} else {
builder.setStatus(NodeHealth.Status.RED)
.addCause("Elasticsearch is not operational");
}
return builder
.setDetails(nodeDetails)
.build();
}
|
@Test
public void get_returns_status_RED_with_cause_if_elasticsearch_process_is_not_operational_in_ClusterAppState() {
Properties properties = new Properties();
setRequiredPropertiesAndMocks(properties);
when(clusterAppState.isOperational(ProcessId.ELASTICSEARCH, true)).thenReturn(false);
SearchNodeHealthProvider underTest = new SearchNodeHealthProvider(new Props(properties), clusterAppState, networkUtils, clock);
NodeHealth nodeHealth = underTest.get();
assertThat(nodeHealth.getStatus()).isEqualTo(NodeHealth.Status.RED);
assertThat(nodeHealth.getCauses()).containsOnly("Elasticsearch is not operational");
}
|
public List<String> splitSql(String text) {
List<String> queries = new ArrayList<>();
StringBuilder query = new StringBuilder();
char character;
boolean multiLineComment = false;
boolean singleLineComment = false;
boolean singleQuoteString = false;
boolean doubleQuoteString = false;
for (int index = 0; index < text.length(); index++) {
character = text.charAt(index);
// end of single line comment
if (singleLineComment && (character == '\n')) {
singleLineComment = false;
query.append(character);
if (index == (text.length() - 1) && !query.toString().trim().isEmpty()) {
// add query when it is the end of sql.
queries.add(query.toString());
}
continue;
}
// end of multiple line comment
if (multiLineComment && (index - 1) >= 0 && text.charAt(index - 1) == '/'
&& (index - 2) >= 0 && text.charAt(index - 2) == '*') {
multiLineComment = false;
}
if (character == '\'' && !(singleLineComment || multiLineComment)) {
if (singleQuoteString) {
singleQuoteString = false;
} else if (!doubleQuoteString) {
singleQuoteString = true;
}
}
if (character == '"' && !(singleLineComment || multiLineComment)) {
if (doubleQuoteString && index > 0) {
doubleQuoteString = false;
} else if (!singleQuoteString) {
doubleQuoteString = true;
}
}
if (!singleQuoteString && !doubleQuoteString && !multiLineComment && !singleLineComment
&& text.length() > (index + 1)) {
if (isSingleLineComment(text.charAt(index), text.charAt(index + 1))) {
singleLineComment = true;
} else if (text.charAt(index) == '/' && text.length() > (index + 2)
&& text.charAt(index + 1) == '*' && text.charAt(index + 2) != '+') {
multiLineComment = true;
}
}
if (character == ';' && !singleQuoteString && !doubleQuoteString && !multiLineComment && !singleLineComment) {
// meet the end of semicolon
if (!query.toString().trim().isEmpty()) {
queries.add(query.toString());
query = new StringBuilder();
}
} else if (index == (text.length() - 1)) {
// meet the last character
if ((!singleLineComment && !multiLineComment)) {
query.append(character);
}
if (!query.toString().trim().isEmpty()) {
queries.add(query.toString());
query = new StringBuilder();
}
} else if (!singleLineComment && !multiLineComment) {
// normal case, not in single line comment and not in multiple line comment
query.append(character);
} else if (character == '\n') {
query.append(character);
}
}
List<String> refinedQueries = new ArrayList<>();
for (int i = 0; i < queries.size(); ++i) {
String emptyLine = "";
if (i > 0) {
emptyLine = createEmptyLine(refinedQueries.get(i-1));
}
if (isSingleLineComment(queries.get(i)) || isMultipleLineComment(queries.get(i))) {
// refine the last refinedQuery
if (refinedQueries.size() > 0) {
String lastRefinedQuery = refinedQueries.get(refinedQueries.size() - 1);
refinedQueries.set(refinedQueries.size() - 1,
lastRefinedQuery + createEmptyLine(queries.get(i)));
}
} else {
String refinedQuery = emptyLine + queries.get(i);
refinedQueries.add(refinedQuery);
}
}
return refinedQueries;
}
|
@Test
void testQuoteInComment() {
SqlSplitter sqlSplitter = new SqlSplitter();
List<String> sqls = sqlSplitter.splitSql("show tables;-- comment_1'\ndescribe table_1");
assertEquals(2, sqls.size());
assertEquals("show tables", sqls.get(0));
assertEquals("\ndescribe table_1", sqls.get(1));
sqls = sqlSplitter.splitSql("show tables;-- comment_1\"\ndescribe table_1");
assertEquals(2, sqls.size());
assertEquals("show tables", sqls.get(0));
assertEquals("\ndescribe table_1", sqls.get(1));
sqls = sqlSplitter.splitSql("show tables;/* comment_1' */\ndescribe table_1");
assertEquals(2, sqls.size());
assertEquals("show tables", sqls.get(0));
assertEquals("\ndescribe table_1", sqls.get(1));
sqls = sqlSplitter.splitSql("show tables;/* comment_1\" */\ndescribe table_1");
assertEquals(2, sqls.size());
assertEquals("show tables", sqls.get(0));
assertEquals("\ndescribe table_1", sqls.get(1));
}
|
public RuntimeOptionsBuilder parse(Map<String, String> properties) {
return parse(properties::get);
}
|
@Test
void should_warn_about_cucumber_options(LogRecordListener logRecordListener) {
properties.put(Constants.OPTIONS_PROPERTY_NAME, "--help");
cucumberPropertiesParser.parse(properties).build();
assertThat(logRecordListener.getLogRecords().get(0).getMessage(), equalTo("" +
"Passing commandline options via the property 'cucumber.options' is no longer supported. " +
"Please use individual properties instead. " +
"See the java doc on io.cucumber.core.options.Constants for details."));
}
|
public JmxCollector register() {
return register(PrometheusRegistry.defaultRegistry);
}
|
@Test
public void testLowercaseOutputName() throws Exception {
new JmxCollector(
"\n---\nlowercaseOutputName: true\nrules:\n- pattern: `^hadoop<service=DataNode, name=DataNodeActivity-ams-hdd001-50010><>replaceBlockOpMinTime:`\n name: Foo"
.replace('`', '"'))
.register(prometheusRegistry);
assertEquals(200, getSampleValue("foo", new String[] {}, new String[] {}), .001);
}
|
public void correctlySpends(Transaction txContainingThis, int scriptSigIndex, @Nullable TransactionWitness witness, @Nullable Coin value,
Script scriptPubKey, Set<VerifyFlag> verifyFlags) throws ScriptException {
if (ScriptPattern.isP2WPKH(scriptPubKey)) {
// For segwit, full validation isn't implemented. So we simply check the signature. P2SH_P2WPKH is handled
// by the P2SH code for now.
if (witness.getPushCount() < 2)
throw new ScriptException(ScriptError.SCRIPT_ERR_WITNESS_PROGRAM_WITNESS_EMPTY, witness.toString());
TransactionSignature signature;
try {
signature = TransactionSignature.decodeFromBitcoin(witness.getPush(0), true, true);
} catch (SignatureDecodeException x) {
throw new ScriptException(ScriptError.SCRIPT_ERR_SIG_DER, "Cannot decode", x);
}
ECKey pubkey = ECKey.fromPublicOnly(witness.getPush(1));
Script scriptCode = ScriptBuilder.createP2PKHOutputScript(pubkey);
Sha256Hash sigHash = txContainingThis.hashForWitnessSignature(scriptSigIndex, scriptCode, value,
signature.sigHashMode(), false);
boolean validSig = pubkey.verify(sigHash, signature);
if (!validSig)
throw new ScriptException(ScriptError.SCRIPT_ERR_CHECKSIGVERIFY, "Invalid signature");
} else if (ScriptPattern.isP2PKH(scriptPubKey)) {
if (chunks.size() != 2)
throw new ScriptException(ScriptError.SCRIPT_ERR_SCRIPT_SIZE, "Invalid size: " + chunks.size());
TransactionSignature signature;
try {
signature = TransactionSignature.decodeFromBitcoin(chunks.get(0).data, true, true);
} catch (SignatureDecodeException x) {
throw new ScriptException(ScriptError.SCRIPT_ERR_SIG_DER, "Cannot decode", x);
}
ECKey pubkey = ECKey.fromPublicOnly(chunks.get(1).data);
Sha256Hash sigHash = txContainingThis.hashForSignature(scriptSigIndex, scriptPubKey,
signature.sigHashMode(), false);
boolean validSig = pubkey.verify(sigHash, signature);
if (!validSig)
throw new ScriptException(ScriptError.SCRIPT_ERR_CHECKSIGVERIFY, "Invalid signature");
} else if (ScriptPattern.isP2PK(scriptPubKey)) {
if (chunks.size() != 1)
throw new ScriptException(ScriptError.SCRIPT_ERR_SCRIPT_SIZE, "Invalid size: " + chunks.size());
TransactionSignature signature;
try {
signature = TransactionSignature.decodeFromBitcoin(chunks.get(0).data, false, false);
} catch (SignatureDecodeException x) {
throw new ScriptException(ScriptError.SCRIPT_ERR_SIG_DER, "Cannot decode", x);
}
ECKey pubkey = ECKey.fromPublicOnly(ScriptPattern.extractKeyFromP2PK(scriptPubKey));
Sha256Hash sigHash = txContainingThis.hashForSignature(scriptSigIndex, scriptPubKey,
signature.sigHashMode(), false);
boolean validSig = pubkey.verify(sigHash, signature);
if (!validSig)
throw new ScriptException(ScriptError.SCRIPT_ERR_CHECKSIGVERIFY, "Invalid signature");
} else {
correctlySpends(txContainingThis, scriptSigIndex, scriptPubKey, verifyFlags);
}
}
|
@Test
public void dataDrivenInvalidTransactions() throws Exception {
JsonNode json = new ObjectMapper().readTree(new InputStreamReader(getClass().getResourceAsStream(
"tx_invalid.json"), StandardCharsets.UTF_8));
for (JsonNode test : json) {
if (test.isArray() && test.size() == 1 && test.get(0).isTextual())
continue; // This is a comment.
Map<TransactionOutPoint, Script> scriptPubKeys = parseScriptPubKeys(test.get(0));
byte[] txBytes = ByteUtils.parseHex(test.get(1).asText().toLowerCase());
MessageSerializer serializer = TESTNET.getDefaultSerializer();
Transaction transaction;
try {
transaction = serializer.makeTransaction(ByteBuffer.wrap(txBytes));
} catch (ProtocolException ignore) {
// Try to parse as a no-witness transaction because some vectors are 0-input, 1-output txs that fail
// to correctly parse as witness transactions.
int protoVersionNoWitness = serializer.getProtocolVersion() | SERIALIZE_TRANSACTION_NO_WITNESS;
transaction = serializer.withProtocolVersion(protoVersionNoWitness).makeTransaction(ByteBuffer.wrap(txBytes));
}
Set<VerifyFlag> verifyFlags = parseVerifyFlags(test.get(2).asText());
boolean valid = true;
try {
Transaction.verify(TESTNET.network(), transaction);
} catch (VerificationException e) {
valid = false;
}
// Bitcoin Core checks this case in CheckTransaction, but we leave it to
// later where we will see an attempt to double-spend, so we explicitly check here
HashSet<TransactionOutPoint> set = new HashSet<>();
for (TransactionInput input : transaction.getInputs()) {
if (set.contains(input.getOutpoint()))
valid = false;
set.add(input.getOutpoint());
}
for (int i = 0; i < transaction.getInputs().size() && valid; i++) {
TransactionInput input = transaction.getInput(i);
assertTrue(scriptPubKeys.containsKey(input.getOutpoint()));
try {
input.getScriptSig().correctlySpends(transaction, i, null, null,
scriptPubKeys.get(input.getOutpoint()), verifyFlags);
} catch (VerificationException e) {
valid = false;
}
}
if (valid) {
System.out.println(test);
fail();
}
}
}
|
static String strip(final String line) {
return new Parser(line).parse();
}
|
@Test
public void shouldStripCommentFromStatementContainingQuoteCharactersInStrings() {
// Given:
final String line = "\"````````\" '\"\"\"\"\"\"' '`````' -- this is a comment -- with other dashes";
// Then:
assertThat(CommentStripper.strip(line), is("\"````````\" '\"\"\"\"\"\"' '`````'"));
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
} else if (!(obj instanceof Tile)) {
return false;
}
Tile other = (Tile) obj;
if (this.tileX != other.tileX) {
return false;
} else if (this.tileY != other.tileY) {
return false;
} else if (this.zoomLevel != other.zoomLevel) {
return false;
} else if (this.tileSize != other.tileSize) {
return false;
}
return true;
}
|
@Test
public void equalsTest() {
Tile tile1 = new Tile(1, 2, (byte) 3, TILE_SIZE);
Tile tile2 = new Tile(1, 2, (byte) 3, TILE_SIZE);
Tile tile3 = new Tile(1, 1, (byte) 3, TILE_SIZE);
Tile tile4 = new Tile(2, 2, (byte) 3, TILE_SIZE);
Tile tile5 = new Tile(1, 2, (byte) 4, TILE_SIZE);
TestUtils.equalsTest(tile1, tile2);
TestUtils.notEqualsTest(tile1, tile3);
TestUtils.notEqualsTest(tile1, tile4);
TestUtils.notEqualsTest(tile1, tile5);
TestUtils.notEqualsTest(tile1, new Object());
TestUtils.notEqualsTest(tile1, null);
}
|
@VisibleForTesting
static JibContainerBuilder processCommonConfiguration(
RawConfiguration rawConfiguration,
InferredAuthProvider inferredAuthProvider,
ProjectProperties projectProperties)
throws InvalidFilesModificationTimeException, InvalidAppRootException,
IncompatibleBaseImageJavaVersionException, IOException, InvalidImageReferenceException,
InvalidContainerizingModeException, MainClassInferenceException, InvalidPlatformException,
InvalidContainerVolumeException, InvalidWorkingDirectoryException,
InvalidCreationTimeException, ExtraDirectoryNotFoundException {
// Create and configure JibContainerBuilder
ModificationTimeProvider modificationTimeProvider =
createModificationTimeProvider(rawConfiguration.getFilesModificationTime());
JavaContainerBuilder javaContainerBuilder =
getJavaContainerBuilderWithBaseImage(
rawConfiguration, projectProperties, inferredAuthProvider)
.setAppRoot(getAppRootChecked(rawConfiguration, projectProperties))
.setModificationTimeProvider(modificationTimeProvider);
JibContainerBuilder jibContainerBuilder =
projectProperties.createJibContainerBuilder(
javaContainerBuilder,
getContainerizingModeChecked(rawConfiguration, projectProperties));
jibContainerBuilder
.setFormat(rawConfiguration.getImageFormat())
.setPlatforms(getPlatformsSet(rawConfiguration))
.setEntrypoint(computeEntrypoint(rawConfiguration, projectProperties, jibContainerBuilder))
.setProgramArguments(rawConfiguration.getProgramArguments().orElse(null))
.setEnvironment(rawConfiguration.getEnvironment())
.setExposedPorts(Ports.parse(rawConfiguration.getPorts()))
.setVolumes(getVolumesSet(rawConfiguration))
.setLabels(rawConfiguration.getLabels())
.setUser(rawConfiguration.getUser().orElse(null))
.setCreationTime(getCreationTime(rawConfiguration.getCreationTime(), projectProperties));
getWorkingDirectoryChecked(rawConfiguration)
.ifPresent(jibContainerBuilder::setWorkingDirectory);
// Adds all the extra files.
for (ExtraDirectoriesConfiguration extraDirectory : rawConfiguration.getExtraDirectories()) {
Path from = extraDirectory.getFrom();
if (Files.exists(from)) {
jibContainerBuilder.addFileEntriesLayer(
JavaContainerBuilderHelper.extraDirectoryLayerConfiguration(
from,
AbsoluteUnixPath.get(extraDirectory.getInto()),
extraDirectory.getIncludesList(),
extraDirectory.getExcludesList(),
rawConfiguration.getExtraDirectoryPermissions(),
modificationTimeProvider));
} else if (!from.endsWith(DEFAULT_JIB_DIR)) {
throw new ExtraDirectoryNotFoundException(from.toString(), from.toString());
}
}
return jibContainerBuilder;
}
|
@Test
public void testEntrypoint_extraClasspathNonWarPackaging()
throws IOException, InvalidImageReferenceException, MainClassInferenceException,
InvalidAppRootException, InvalidWorkingDirectoryException, InvalidPlatformException,
InvalidContainerVolumeException, IncompatibleBaseImageJavaVersionException,
NumberFormatException, InvalidContainerizingModeException,
InvalidFilesModificationTimeException, InvalidCreationTimeException,
ExtraDirectoryNotFoundException {
when(rawConfiguration.getExtraClasspath()).thenReturn(Collections.singletonList("/foo"));
when(projectProperties.isWarProject()).thenReturn(false);
ContainerBuildPlan buildPlan = processCommonConfiguration();
assertThat(buildPlan.getEntrypoint())
.containsExactly(
"java", "-cp", "/foo:/app/resources:/app/classes:/app/libs/*", "java.lang.Object")
.inOrder();
ArgumentMatcher<LogEvent> isLogWarn = logEvent -> logEvent.getLevel() == LogEvent.Level.WARN;
verify(logger, never()).accept(argThat(isLogWarn));
}
|
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext,
final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) {
SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext();
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof TCLStatement) {
return new ShardingDatabaseBroadcastRoutingEngine();
}
if (sqlStatement instanceof DDLStatement) {
if (sqlStatementContext instanceof CursorAvailable) {
return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props);
}
return getDDLRoutingEngine(shardingRule, database, sqlStatementContext);
}
if (sqlStatement instanceof DALStatement) {
return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext);
}
if (sqlStatement instanceof DCLStatement) {
return getDCLRoutingEngine(shardingRule, database, sqlStatementContext);
}
return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext);
}
|
@Test
void assertNewInstanceForSelectBroadcastTable() {
SQLStatement sqlStatement = mock(MySQLSelectStatement.class);
when(sqlStatementContext.getSqlStatement()).thenReturn(sqlStatement);
QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class));
ShardingRouteEngine actual =
ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet));
assertThat(actual, instanceOf(ShardingUnicastRoutingEngine.class));
}
|
public List<String> getMatchers() {
List<String> matchers = new Matcher(matcher).toCollection();
Collections.sort(matchers);
return matchers;
}
|
@Test
void shouldUnderstandSplittingMatcherString() {
User user = new User("UserName", new String[]{"A", "b"}, "[email protected]", true);
assertThat(user.getMatchers()).isEqualTo(List.of("A", "b"));
user = new User("UserName", new String[]{"A,b"}, "[email protected]", true);
assertThat(user.getMatchers()).isEqualTo(List.of("A", "b"));
user = new User("UserName", new String[]{""}, "[email protected]", true);
List<String> matchers = Collections.emptyList();
assertThat(user.getMatchers()).isEqualTo(matchers);
user = new User("UserName", new String[]{"b,A"}, "[email protected]", true);
assertThat(user.getMatchers()).isEqualTo(List.of("A", "b"));
}
|
@Override
public boolean add(Object e) {
return addRaw(JSONUtil.wrap(e, this.config), null);
}
|
@Test
public void addTest() {
// 方法1
JSONArray array = JSONUtil.createArray();
// 方法2
// JSONArray array = new JSONArray();
array.add("value1");
array.add("value2");
array.add("value3");
assertEquals(array.get(0), "value1");
}
|
public void formatSource(CharSource input, CharSink output)
throws FormatterException, IOException {
// TODO(cushon): proper support for streaming input/output. Input may
// not be feasible (parsing) but output should be easier.
output.write(formatSource(input.read()));
}
|
@Test
public void lineCommentTrailingBlank() throws FormatterException {
String input = "class T {\n// asd \n\nint x;\n}";
String output = new Formatter().formatSource(input);
String expect = "class T {\n // asd\n\n int x;\n}\n";
assertThat(output).isEqualTo(expect);
}
|
static InetSocketAddress parse(
final String value, final String uriParamName, final boolean isReResolution, final NameResolver nameResolver)
{
if (Strings.isEmpty(value))
{
throw new NullPointerException("input string must not be null or empty");
}
final String nameAndPort = nameResolver.lookup(value, uriParamName, isReResolution);
ParseResult result = tryParseIpV4(nameAndPort);
if (null == result)
{
result = tryParseIpV6(nameAndPort);
}
if (null == result)
{
throw new IllegalArgumentException("invalid format: " + nameAndPort);
}
final InetAddress inetAddress = nameResolver.resolve(result.host, uriParamName, isReResolution);
return null == inetAddress ? InetSocketAddress.createUnresolved(result.host, result.port) :
new InetSocketAddress(inetAddress, result.port);
}
|
@Test
void shouldRejectOnInvalidPort2()
{
assertThrows(IllegalArgumentException.class, () -> SocketAddressParser.parse(
"192.168.1.20::123", ENDPOINT_PARAM_NAME, false, DEFAULT_RESOLVER));
}
|
static public Entry buildMenuStructure(String xml) {
final Reader reader = new StringReader(xml);
return buildMenuStructure(reader);
}
|
@Test
public void givenXmlWithChildEntryWithOneBuilder_createsStructureWithChildEntry() {
String xmlWithoutContent = "<FreeplaneUIEntries><Entry builder='builder'/></FreeplaneUIEntries>";
Entry builtMenuStructure = XmlEntryStructureBuilder.buildMenuStructure(xmlWithoutContent);
Entry menuStructureWithChildEntry = new Entry();
final Entry childEntry = new Entry();
childEntry.setBuilders(asList("builder"));
menuStructureWithChildEntry.addChild(childEntry);
assertThat(builtMenuStructure, equalTo(menuStructureWithChildEntry));
}
|
@Override
protected XmppWebSocketTransport getTransport() {
return websocketTransport;
}
|
@Test
public void lookupConnectionEndpointsTest() throws URISyntaxException {
XmppWebSocketTransportModuleDescriptor websocketTransportModuleDescriptor = getWebSocketDescriptor();
ModularXmppClientToServerConnectionInternal connectionInternal = mock(ModularXmppClientToServerConnectionInternal.class);
XmppWebSocketTransportModule transportModule
= new XmppWebSocketTransportModule(websocketTransportModuleDescriptor, connectionInternal);
XmppWebSocketTransportModule.XmppWebSocketTransport transport = transportModule.getTransport();
assertNotNull(transport.lookupConnectionEndpoints());
}
|
@Override
public CEFParserResult evaluate(FunctionArgs args, EvaluationContext context) {
final String cef = valueParam.required(args, context);
final boolean useFullNames = useFullNamesParam.optional(args, context).orElse(false);
final CEFParser parser = CEFParserFactory.create();
if (cef == null || cef.isEmpty()) {
LOG.debug("NULL or empty parameter passed to CEF parser function. Not evaluating.");
return null;
}
LOG.debug("Running CEF parser for [{}].", cef);
final MappedMessage message;
try (Timer.Context timer = parseTime.time()) {
message = new MappedMessage(parser.parse(cef.trim()), useFullNames);
} catch (Exception e) {
LOG.error("Error while parsing CEF message: {}", cef, e);
return null;
}
final Map<String, Object> fields = new HashMap<>();
/*
* Add all CEF standard fields. We are prefixing with cef_ to avoid overwriting existing fields or to be
* overwritten ourselves later in the processing. The user is encouraged to run another pipeline function
* to clean up field names if desired.
*/
fields.put("cef_version", message.cefVersion());
fields.put("device_vendor", message.deviceVendor());
fields.put("device_product", message.deviceProduct());
fields.put("device_version", message.deviceVersion());
fields.put("device_event_class_id", message.deviceEventClassId());
fields.put("name", message.name());
fields.put("severity", message.severity());
// Add all custom CEF fields.
fields.putAll(message.mappedExtensions());
return new CEFParserResult(fields);
}
|
@Test
public void evaluate_returns_result_without_message_field() throws Exception {
final Map<String, Expression> arguments = ImmutableMap.of(
CEFParserFunction.VALUE, new StringExpression(new CommonToken(0), "CEF:0|vendor|product|1.0|id|name|low|dvc=example.com"),
CEFParserFunction.USE_FULL_NAMES, new BooleanExpression(new CommonToken(0), false)
);
final FunctionArgs functionArgs = new FunctionArgs(function, arguments);
final Message message = messageFactory.createMessage("__dummy", "__dummy", DateTime.parse("2010-07-30T16:03:25Z"));
final EvaluationContext evaluationContext = new EvaluationContext(message);
final CEFParserResult result = function.evaluate(functionArgs, evaluationContext);
assertNotNull(result);
assertEquals(0, result.get("cef_version"));
assertEquals("vendor", result.get("device_vendor"));
assertEquals("product", result.get("device_product"));
assertEquals("1.0", result.get("device_version"));
assertEquals("id", result.get("device_event_class_id"));
assertEquals("low", result.get("severity"));
assertEquals("example.com", result.get("dvc"));
assertFalse(result.containsKey("message"));
}
|
@Override
public Path touch(final Path file, final TransferStatus status) throws BackgroundException {
return super.touch(file, status.withChecksum(write.checksum(file, status).compute(new NullInputStream(0L), status)));
}
|
@Test
public void testTouch() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
final S3TouchFeature feature = new S3TouchFeature(session, new S3AccessControlListFeature(session));
final String filename = new AsciiRandomStringService().random();
assertFalse(feature.isSupported(new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)), filename));
assertTrue(feature.isSupported(container, filename));
final Path test = feature.touch(new Path(container, filename, EnumSet.of(Path.Type.file)), new TransferStatus());
assertNull(test.attributes().getVersionId());
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test));
assertEquals(test.attributes(), new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(test));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test));
}
|
@NonNull
public String processShownotes() {
String shownotes = rawShownotes;
if (TextUtils.isEmpty(shownotes)) {
Log.d(TAG, "shownotesProvider contained no shownotes. Returning 'no shownotes' message");
shownotes = "<html><head></head><body><p id='apNoShownotes'>" + noShownotesLabel + "</p></body></html>";
}
// replace ASCII line breaks with HTML ones if shownotes don't contain HTML line breaks already
if (!LINE_BREAK_REGEX.matcher(shownotes).find() && !shownotes.contains("<p>")) {
shownotes = shownotes.replace("\n", "<br />");
}
Document document = Jsoup.parse(shownotes);
cleanCss(document);
document.head().appendElement("style").attr("type", "text/css").text(webviewStyle);
addTimecodes(document);
return document.toString();
}
|
@Test
public void testProcessShownotesAddNoTimecodeDuration() {
final String timeStr = "2:11:12";
final int time = 2 * 60 * 60 * 1000 + 11 * 60 * 1000 + 12 * 1000;
String shownotes = "<p> Some test text with a timecode " + timeStr + " here.</p>";
ShownotesCleaner t = new ShownotesCleaner(context, shownotes, time);
String res = t.processShownotes();
Document d = Jsoup.parse(res);
assertEquals("Should not parse time codes that equal duration", 0, d.body().getElementsByTag("a").size());
}
|
public TaskRunHistory getTaskRunHistory() {
return taskRunManager.getTaskRunHistory();
}
|
@Test
public void testForceGC() {
Config.enable_task_history_archive = false;
TaskRunManager taskRunManager = new TaskRunManager();
for (int i = 0; i < 100; i++) {
TaskRunStatus taskRunStatus = new TaskRunStatus();
taskRunStatus.setQueryId("test" + i);
taskRunStatus.setTaskName("test" + i);
taskRunManager.getTaskRunHistory().addHistory(taskRunStatus);
}
Config.task_runs_max_history_number = 20;
taskRunManager.getTaskRunHistory().forceGC();
Assert.assertEquals(20, taskRunManager.getTaskRunHistory().getInMemoryHistory().size());
Config.task_runs_max_history_number = 10000;
Config.enable_task_history_archive = true;
}
|
@Override
public void upgrade() {
if (clusterConfigService.get(MigrationCompleted.class) != null) {
LOG.debug("Migration already completed!");
return;
}
final List<ViewWidgetLimitMigration> widgetLimitMigrations = StreamSupport.stream(this.views.find().spliterator(), false)
.flatMap(document -> {
final String viewId = document.get("_id", ObjectId.class).toHexString();
final Map<String, Document> state = document.get("state", Collections.emptyMap());
return state.entrySet().stream()
.flatMap(entry -> {
final String queryId = entry.getKey();
final List<Document> widgets = entry.getValue().get("widgets", Collections.emptyList());
return EntryStream.of(widgets)
.filter(widget -> "aggregation".equals(widget.getValue().getString("type")))
.flatMap(widgetEntry -> {
final Document widget = widgetEntry.getValue();
final Integer widgetIndex = widgetEntry.getKey();
final Document config = widget.get("config", new Document());
final boolean hasRowLimit = config.containsKey("row_limit");
final boolean hasColumnLimit = config.containsKey("column_limit");
final Optional<Integer> rowLimit = Optional.ofNullable(config.getInteger("row_limit"));
final Optional<Integer> columnLimit = Optional.ofNullable(config.getInteger("column_limit"));
if (widgetIndex != null && (hasRowLimit || hasColumnLimit)) {
return Stream.of(new ViewWidgetLimitMigration(viewId, queryId, widgetIndex, rowLimit, columnLimit));
}
return Stream.empty();
});
});
})
.collect(Collectors.toList());
final List<WriteModel<Document>> operations = widgetLimitMigrations.stream()
.flatMap(widgetMigration -> {
final ImmutableList.Builder<WriteModel<Document>> builder = ImmutableList.builder();
builder.add(
updateView(
widgetMigration.viewId,
doc("$unset", doc(widgetConfigPath(widgetMigration) + ".row_limit", 1))
)
);
builder.add(
updateView(
widgetMigration.viewId,
doc("$set", doc(widgetConfigPath(widgetMigration) + ".row_pivots.$[config].config.limit", widgetMigration.rowLimit.orElse(DEFAULT_LIMIT))),
matchValuePivots
)
);
builder.add(
updateView(
widgetMigration.viewId,
doc("$unset", doc(widgetConfigPath(widgetMigration) + ".column_limit", 1))
)
);
builder.add(
updateView(
widgetMigration.viewId,
doc("$set", doc(widgetConfigPath(widgetMigration) + ".column_pivots.$[config].config.limit", widgetMigration.columnLimit.orElse(DEFAULT_LIMIT))),
matchValuePivots
)
);
return builder.build().stream();
})
.collect(Collectors.toList());
if (!operations.isEmpty()) {
LOG.debug("Updating {} widgets ...", widgetLimitMigrations.size());
this.views.bulkWrite(operations);
}
clusterConfigService.write(new MigrationCompleted(widgetLimitMigrations.size()));
}
|
@Test
@MongoDBFixtures("V20230113095300_MigrateGlobalPivotLimitsToGroupingsInViewsTest_multiplePivots.json")
void migratingMultiplePivots() {
this.migration.upgrade();
assertThat(migrationCompleted().migratedViews()).isEqualTo(4);
final Document document = this.collection.find().first();
final List<Document> widgets = getWidgets(document);
assertThat(rowPivotLimits(widgets.get(0))).containsExactly(3);
assertThat(columnPivotLimits(widgets.get(0))).containsExactly(10);
assertThat(rowPivotLimits(widgets.get(1))).containsExactly(20, null, 20);
assertThat(columnPivotLimits(widgets.get(1))).isEmpty();
assertThat(rowPivotLimits(widgets.get(2))).containsExactly(15, 15, 15);
assertThat(columnPivotLimits(widgets.get(2))).isEmpty();
assertThat(rowPivotLimits(widgets.get(3))).isEmpty();
assertThat(columnPivotLimits(widgets.get(3))).containsExactly(null, 15, 15);
assertThat(migrationCompleted().migratedViews()).isEqualTo(4);
for (Document widget : widgets) {
assertThatFieldsAreUnset(widget);
}
}
|
public static void validate(
FederationPolicyInitializationContext policyContext, String myType)
throws FederationPolicyInitializationException {
if (myType == null) {
throw new FederationPolicyInitializationException(
"The myType parameter" + " should not be null.");
}
if (policyContext == null) {
throw new FederationPolicyInitializationException(
"The FederationPolicyInitializationContext provided is null. Cannot"
+ " reinitialize " + "successfully.");
}
if (policyContext.getFederationStateStoreFacade() == null) {
throw new FederationPolicyInitializationException(
"The FederationStateStoreFacade provided is null. Cannot"
+ " reinitialize successfully.");
}
if (policyContext.getFederationSubclusterResolver() == null) {
throw new FederationPolicyInitializationException(
"The FederationSubclusterResolver provided is null. Cannot"
+ " reinitialize successfully.");
}
if (policyContext.getSubClusterPolicyConfiguration() == null) {
throw new FederationPolicyInitializationException(
"The SubClusterPolicyConfiguration provided is null. Cannot "
+ "reinitialize successfully.");
}
String intendedType =
policyContext.getSubClusterPolicyConfiguration().getType();
if (!myType.equals(intendedType)) {
throw new FederationPolicyInitializationException(
"The FederationPolicyConfiguration carries a type (" + intendedType
+ ") different then mine (" + myType
+ "). Cannot reinitialize successfully.");
}
}
|
@Test(expected = FederationPolicyInitializationException.class)
public void nullResolver() throws Exception {
context.setFederationSubclusterResolver(null);
FederationPolicyInitializationContextValidator.validate(context,
MockPolicyManager.class.getCanonicalName());
}
|
@Transactional
@Cacheable(CACHE_DATABASE_SEARCH)
@CacheEvict(value = CACHE_AVERAGE_REVIEW_RATING, allEntries = true)
public SearchHits<ExtensionSearch> search(ISearchService.Options options) {
// grab all extensions
var matchingExtensions = repositories.findAllActiveExtensions();
// no extensions in the database
if (matchingExtensions.isEmpty()) {
return new SearchHitsImpl<>(0,TotalHitsRelation.OFF, 0f, null, null, Collections.emptyList(), null, null);
}
// exlude namespaces
if(options.namespacesToExclude != null) {
for(var namespaceToExclude : options.namespacesToExclude) {
matchingExtensions = matchingExtensions.filter(extension -> !extension.getNamespace().getName().equals(namespaceToExclude));
}
}
// filter target platform
if(TargetPlatform.isValid(options.targetPlatform)) {
matchingExtensions = matchingExtensions.filter(extension -> extension.getVersions().stream().anyMatch(ev -> ev.getTargetPlatform().equals(options.targetPlatform)));
}
// filter category
if (options.category != null) {
matchingExtensions = matchingExtensions.filter(extension -> {
var latest = repositories.findLatestVersion(extension, null, false, true);
return latest.getCategories().stream().anyMatch(category -> category.equalsIgnoreCase(options.category));
});
}
// filter text
if (options.queryString != null) {
matchingExtensions = matchingExtensions.filter(extension -> {
var latest = repositories.findLatestVersion(extension, null, false, true);
return extension.getName().toLowerCase().contains(options.queryString.toLowerCase())
|| extension.getNamespace().getName().contains(options.queryString.toLowerCase())
|| (latest.getDescription() != null && latest.getDescription()
.toLowerCase().contains(options.queryString.toLowerCase()))
|| (latest.getDisplayName() != null && latest.getDisplayName()
.toLowerCase().contains(options.queryString.toLowerCase()));
});
}
// need to perform the sortBy ()
// 'relevance' | 'timestamp' | 'rating' | 'downloadCount';
Stream<ExtensionSearch> searchEntries;
if("relevance".equals(options.sortBy) || "rating".equals(options.sortBy)) {
var searchStats = new SearchStats(repositories);
searchEntries = matchingExtensions.stream().map(extension -> relevanceService.toSearchEntry(extension, searchStats));
} else {
searchEntries = matchingExtensions.stream().map(extension -> {
var latest = repositories.findLatestVersion(extension, null, false, true);
var targetPlatforms = repositories.findExtensionTargetPlatforms(extension);
return extension.toSearch(latest, targetPlatforms);
});
}
var comparators = new HashMap<>(Map.of(
"relevance", new RelevanceComparator(),
"timestamp", new TimestampComparator(),
"rating", new RatingComparator(),
"downloadCount", new DownloadedCountComparator()
));
var comparator = comparators.get(options.sortBy);
if(comparator != null) {
searchEntries = searchEntries.sorted(comparator);
}
var sortedExtensions = searchEntries.collect(Collectors.toList());
// need to do sortOrder
// 'asc' | 'desc';
if ("desc".equals(options.sortOrder)) {
// reverse the order
Collections.reverse(sortedExtensions);
}
// Paging
var totalHits = sortedExtensions.size();
var endIndex = Math.min(sortedExtensions.size(), options.requestedOffset + options.requestedSize);
var startIndex = Math.min(endIndex, options.requestedOffset);
sortedExtensions = sortedExtensions.subList(startIndex, endIndex);
List<SearchHit<ExtensionSearch>> searchHits;
if (sortedExtensions.isEmpty()) {
searchHits = Collections.emptyList();
} else {
// client is interested only in the extension IDs
searchHits = sortedExtensions.stream().map(extensionSearch -> new SearchHit<>(null, null, null, 0.0f, null, null, null, null, null, null, extensionSearch)).collect(Collectors.toList());
}
return new SearchHitsImpl<>(totalHits, TotalHitsRelation.OFF, 0f, null, null, searchHits, null, null);
}
|
@Test
public void testSortByTimeStamp() {
var ext1 = mockExtension("yaml", 3.0, 100, 0, "redhat", List.of("Snippets", "Programming Languages"));
ext1.getVersions().get(0).setTimestamp(LocalDateTime.parse("2021-10-10T00:00"));
var ext2 = mockExtension("java", 4.0, 100, 0, "redhat", List.of("Snippets", "Programming Languages"));
ext2.getVersions().get(0).setTimestamp(LocalDateTime.parse("2021-10-07T00:00"));
var ext3 = mockExtension("openshift", 4.0, 100, 0, "redhat", List.of("Snippets", "Other"));
ext3.getVersions().get(0).setTimestamp(LocalDateTime.parse("2021-10-11T00:00"));
var ext4 = mockExtension("foo", 4.0, 100, 0, "bar", List.of("Other"));
ext4.getVersions().get(0).setTimestamp(LocalDateTime.parse("2021-10-06T00:00"));
Mockito.when(repositories.findAllActiveExtensions()).thenReturn(Streamable.of(List.of(ext1, ext2, ext3, ext4)));
var searchOptions = new ISearchService.Options(null, null, TargetPlatform.NAME_UNIVERSAL, 50, 0, null, "timestamp", false);
var result = search.search(searchOptions);
// all extensions should be there
assertThat(result.getTotalHits()).isEqualTo(4);
// test now the order
var hits = result.getSearchHits();
assertThat(getIdFromExtensionHits(hits, 0)).isEqualTo(getIdFromExtensionName("foo"));
assertThat(getIdFromExtensionHits(hits, 1)).isEqualTo(getIdFromExtensionName("java"));
assertThat(getIdFromExtensionHits(hits, 2)).isEqualTo(getIdFromExtensionName("yaml"));
assertThat(getIdFromExtensionHits(hits, 3)).isEqualTo(getIdFromExtensionName("openshift"));
}
|
public static String getDistrictCodeByIdCard(String idcard) {
int len = idcard.length();
if (len == CHINA_ID_MIN_LENGTH || len == CHINA_ID_MAX_LENGTH) {
return idcard.substring(0, 6);
}
return null;
}
|
@Test
public void getDistrictCodeByIdCardTest() {
String codeByIdCard = IdcardUtil.getDistrictCodeByIdCard(ID_18);
assertEquals("321083", codeByIdCard);
}
|
public static Schema getOutputSchema(
Schema inputSchema, FieldAccessDescriptor fieldAccessDescriptor) {
return getOutputSchemaTrackingNullable(inputSchema, fieldAccessDescriptor, false);
}
|
@Test
public void testNullableSchemaArray() {
FieldAccessDescriptor fieldAccessDescriptor1 =
FieldAccessDescriptor.withFieldNames("nestedArray.field1").resolve(NESTED_NULLABLE_SCHEMA);
Schema schema1 = SelectHelpers.getOutputSchema(NESTED_NULLABLE_SCHEMA, fieldAccessDescriptor1);
Schema expectedSchema1 =
Schema.builder().addNullableField("field1", FieldType.array(FieldType.STRING)).build();
assertEquals(expectedSchema1, schema1);
FieldAccessDescriptor fieldAccessDescriptor2 =
FieldAccessDescriptor.withFieldNames("nestedArray.*").resolve(NESTED_NULLABLE_SCHEMA);
Schema schema2 = SelectHelpers.getOutputSchema(NESTED_NULLABLE_SCHEMA, fieldAccessDescriptor2);
Schema expectedSchema2 =
Schema.builder()
.addNullableField("field1", FieldType.array(FieldType.STRING))
.addNullableField("field2", FieldType.array(FieldType.INT32))
.addNullableField("field3", FieldType.array(FieldType.DOUBLE))
.addNullableField("field_extra", FieldType.array(FieldType.STRING))
.build();
assertEquals(expectedSchema2, schema2);
}
|
public int read(final MessageHandler handler)
{
return read(handler, Integer.MAX_VALUE);
}
|
@Test
void shouldLimitReadOfMessages()
{
final int msgLength = 16;
final int recordLength = HEADER_LENGTH + msgLength;
final int alignedRecordLength = align(recordLength, ALIGNMENT);
final long head = 0L;
final int headIndex = (int)head;
when(buffer.getLong(HEAD_COUNTER_INDEX)).thenReturn(head);
when(buffer.getInt(typeOffset(headIndex))).thenReturn(MSG_TYPE_ID);
when(buffer.getIntVolatile(lengthOffset(headIndex))).thenReturn(recordLength);
final MutableInteger times = new MutableInteger();
final MessageHandler handler = (msgTypeId, buffer, index, length) -> times.increment();
final int limit = 1;
final int messagesRead = ringBuffer.read(handler, limit);
assertThat(messagesRead, is(1));
assertThat(times.get(), is(1));
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer, times(1)).setMemory(headIndex, alignedRecordLength, (byte)0);
inOrder.verify(buffer, times(1)).putLongOrdered(HEAD_COUNTER_INDEX, head + alignedRecordLength);
}
|
int run() {
final Map<String, String> configProps = options.getConfigFile()
.map(Ksql::loadProperties)
.orElseGet(Collections::emptyMap);
final Map<String, String> sessionVariables = options.getVariables();
try (KsqlRestClient restClient = buildClient(configProps)) {
try (Cli cli = cliBuilder.build(
options.getStreamedQueryRowLimit(),
options.getStreamedQueryTimeoutMs(),
options.getOutputFormat(),
restClient)
) {
// Add CLI variables If defined by parameters
cli.addSessionVariables(sessionVariables);
if (options.getExecute().isPresent()) {
return cli.runCommand(options.getExecute().get());
} else if (options.getScriptFile().isPresent()) {
final File scriptFile = new File(options.getScriptFile().get());
if (scriptFile.exists() && scriptFile.isFile()) {
return cli.runScript(scriptFile.getPath());
} else {
throw new KsqlException("No such script file: " + scriptFile.getPath());
}
} else {
return cli.runInteractively();
}
}
}
}
|
@Test
public void shouldSupportSslConfigInConfigFile() throws Exception {
// Given:
givenConfigFile(
"ssl.truststore.location=some/path" + System.lineSeparator()
+ "ssl.truststore.password=letmein"
);
// When:
ksql.run();
// Then:
verify(clientBuilder).build(any(), any(), eq(ImmutableMap.of(
"ssl.truststore.location", "some/path",
"ssl.truststore.password", "letmein"
)), any(), any());
}
|
@Override
public int length() {
return 2;
}
|
@Test
public void testLength() {
System.out.println("length");
GammaDistribution instance = new GammaDistribution(3, 2.1);
instance.rand();
assertEquals(2, instance.length());
}
|
public UniVocityFixedDataFormat setFieldLengths(int[] fieldLengths) {
this.fieldLengths = fieldLengths;
return this;
}
|
@Test
public void shouldConfigureNormalizedLineSeparator() {
UniVocityFixedDataFormat dataFormat = new UniVocityFixedDataFormat()
.setFieldLengths(new int[] { 1, 2, 3 })
.setNormalizedLineSeparator('n');
assertEquals(Character.valueOf('n'), dataFormat.getNormalizedLineSeparator());
assertEquals('n', dataFormat.createAndConfigureWriterSettings().getFormat().getNormalizedNewline());
assertEquals('n', dataFormat.createAndConfigureParserSettings().getFormat().getNormalizedNewline());
}
|
@Override
public URL getApiRoute(String apiRouteBase) throws MalformedURLException {
return new URL(
apiRouteBase + registryEndpointRequestProperties.getImageName() + "/manifests/" + imageTag);
}
|
@Test
public void testApiRoute() throws MalformedURLException {
Assert.assertEquals(
new URL("http://someApiBase/someImageName/manifests/test-image-tag"),
testManifestPusher.getApiRoute("http://someApiBase/"));
}
|
public static BigDecimal jsToBigNumber( Object value, String classType ) {
if ( classType.equalsIgnoreCase( JS_UNDEFINED ) ) {
return null;
} else if ( classType.equalsIgnoreCase( JS_NATIVE_NUM ) ) {
Number nb = Context.toNumber( value );
return BigDecimal.valueOf( nb.doubleValue() );
} else if ( classType.equalsIgnoreCase( JS_NATIVE_JAVA_OBJ ) ) {
// Is it a BigDecimal class ?
return convertNativeJavaToBigDecimal( value );
} else if ( classType.equalsIgnoreCase( "java.lang.Byte" ) ) {
return BigDecimal.valueOf( ( (Byte) value ).longValue() );
} else if ( classType.equalsIgnoreCase( "java.lang.Short" ) ) {
return BigDecimal.valueOf( ( (Short) value ).longValue() );
} else if ( classType.equalsIgnoreCase( "java.lang.Integer" ) ) {
return BigDecimal.valueOf( ( (Integer) value ).longValue() );
} else if ( classType.equalsIgnoreCase( "java.lang.Long" ) ) {
return BigDecimal.valueOf( ( (Long) value ).longValue() );
} else if ( classType.equalsIgnoreCase( "java.lang.Double" ) ) {
return BigDecimal.valueOf( ( (Double) value ).doubleValue() );
} else if ( classType.equalsIgnoreCase( "java.lang.String" ) ) {
return BigDecimal.valueOf( ( new Long( (String) value ) ).longValue() );
} else {
throw new UnsupportedOperationException( "JavaScript conversion to BigNumber not implemented for " + classType );
}
}
|
@Test
public void jsToBigNumber_Double() throws Exception {
assertEquals( BIG_DECIMAL_ONE_DOT_ZERO, JavaScriptUtils.jsToBigNumber( 1.0, Double.class.getName() ) );
}
|
@Override
public void apply(IntentOperationContext<FlowRuleIntent> context) {
Optional<IntentData> toUninstall = context.toUninstall();
Optional<IntentData> toInstall = context.toInstall();
if (toInstall.isPresent() && toUninstall.isPresent()) {
Intent intentToInstall = toInstall.get().intent();
if (requireNonDisruptive(intentToInstall) && INSTALLED.equals(toUninstall.get().state())) {
reallocate(context);
return;
}
}
if (!toInstall.isPresent() && !toUninstall.isPresent()) {
// Nothing to do.
intentInstallCoordinator.intentInstallSuccess(context);
return;
}
List<FlowRuleIntent> uninstallIntents = context.intentsToUninstall();
List<FlowRuleIntent> installIntents = context.intentsToInstall();
List<FlowRule> flowRulesToUninstall;
List<FlowRule> flowRulesToInstall;
if (toUninstall.isPresent()) {
// Remove tracked resource from both Intent and installable Intents.
trackIntentResources(toUninstall.get(), uninstallIntents, REMOVE);
// Retrieves all flow rules from all flow rule Intents.
flowRulesToUninstall = uninstallIntents.stream()
.map(FlowRuleIntent::flowRules)
.flatMap(Collection::stream)
.filter(flowRule -> flowRuleService.getFlowEntry(flowRule) != null)
.collect(Collectors.toList());
} else {
// No flow rules to be uninstalled.
flowRulesToUninstall = Collections.emptyList();
}
if (toInstall.isPresent()) {
// Track resource from both Intent and installable Intents.
trackIntentResources(toInstall.get(), installIntents, ADD);
// Retrieves all flow rules from all flow rule Intents.
flowRulesToInstall = installIntents.stream()
.map(FlowRuleIntent::flowRules)
.flatMap(Collection::stream)
.collect(Collectors.toList());
} else {
// No flow rules to be installed.
flowRulesToInstall = Collections.emptyList();
}
List<FlowRule> flowRuleToModify;
List<FlowRule> dontTouch;
// If both uninstall/install list contained equal (=match conditions are equal) FlowRules,
// omit it from remove list, since it will/should be overwritten by install
flowRuleToModify = flowRulesToInstall.stream()
.filter(flowRule -> flowRulesToUninstall.stream().anyMatch(flowRule::equals))
.collect(Collectors.toList());
// If both contained exactMatch-ing FlowRules, remove from both list,
// since it will result in no-op.
dontTouch = flowRulesToInstall.stream()
.filter(flowRule -> flowRulesToUninstall.stream().anyMatch(flowRule::exactMatch))
.collect(Collectors.toList());
flowRulesToUninstall.removeAll(flowRuleToModify);
flowRulesToUninstall.removeAll(dontTouch);
flowRulesToInstall.removeAll(flowRuleToModify);
flowRulesToInstall.removeAll(dontTouch);
flowRuleToModify.removeAll(dontTouch);
if (flowRulesToInstall.isEmpty() && flowRulesToUninstall.isEmpty() && flowRuleToModify.isEmpty()) {
// There is no flow rules to install/uninstall
intentInstallCoordinator.intentInstallSuccess(context);
return;
}
FlowRuleOperations.Builder builder = FlowRuleOperations.builder();
// Add flows
flowRulesToInstall.forEach(builder::add);
// Modify flows
flowRuleToModify.forEach(builder::modify);
// Remove flows
flowRulesToUninstall.forEach(builder::remove);
FlowRuleOperationsContext flowRuleOperationsContext = new FlowRuleOperationsContext() {
@Override
public void onSuccess(FlowRuleOperations ops) {
intentInstallCoordinator.intentInstallSuccess(context);
}
@Override
public void onError(FlowRuleOperations ops) {
intentInstallCoordinator.intentInstallFailed(context);
}
};
FlowRuleOperations operations = builder.build(flowRuleOperationsContext);
log.debug("applying intent {} -> {} with {} rules: {}",
toUninstall.map(x -> x.key().toString()).orElse("<empty>"),
toInstall.map(x -> x.key().toString()).orElse("<empty>"),
operations.stages().stream().mapToLong(Set::size).sum(),
operations.stages());
flowRuleService.apply(operations);
}
|
@Test
public void testUninstallAndInstallSame() {
List<Intent> intentsToInstall = createFlowRuleIntents();
List<Intent> intentsToUninstall = intentsToInstall;
IntentData toInstall = new IntentData(createP2PIntent(),
IntentState.INSTALLING,
new WallClockTimestamp());
toInstall = IntentData.compiled(toInstall, intentsToInstall);
IntentData toUninstall = new IntentData(createP2PIntent(),
IntentState.INSTALLED,
new WallClockTimestamp());
toUninstall = IntentData.compiled(toUninstall, intentsToUninstall);
IntentOperationContext<FlowRuleIntent> operationContext;
IntentInstallationContext context = new IntentInstallationContext(toUninstall, toInstall);
operationContext = new IntentOperationContext(intentsToUninstall, intentsToInstall, context);
flowRuleService.load(operationContext.intentsToUninstall());
installer.apply(operationContext);
IntentOperationContext successContext = intentInstallCoordinator.successContext;
assertEquals(successContext, operationContext);
assertEquals(0, flowRuleService.flowRulesRemove.size());
assertEquals(0, flowRuleService.flowRulesAdd.size());
assertEquals(0, flowRuleService.flowRulesModify.size());
}
|
public ConsumerBuilder queues(Integer queues) {
this.queues = queues;
return getThis();
}
|
@Test
void queues() {
ConsumerBuilder builder = ConsumerBuilder.newBuilder();
builder.queues(200);
Assertions.assertEquals(200, builder.build().getQueues());
}
|
static CatalogLoader createCatalogLoader(
String name, Map<String, String> properties, Configuration hadoopConf) {
String catalogImpl = properties.get(CatalogProperties.CATALOG_IMPL);
if (catalogImpl != null) {
String catalogType = properties.get(ICEBERG_CATALOG_TYPE);
Preconditions.checkArgument(
catalogType == null,
"Cannot create catalog %s, both catalog-type and catalog-impl are set: catalog-type=%s, catalog-impl=%s",
name,
catalogType,
catalogImpl);
return CatalogLoader.custom(name, properties, hadoopConf, catalogImpl);
}
String catalogType = properties.getOrDefault(ICEBERG_CATALOG_TYPE, ICEBERG_CATALOG_TYPE_HIVE);
switch (catalogType.toLowerCase(Locale.ENGLISH)) {
case ICEBERG_CATALOG_TYPE_HIVE:
// The values of properties 'uri', 'warehouse', 'hive-conf-dir' are allowed to be null, in
// that case it will
// fallback to parse those values from hadoop configuration which is loaded from classpath.
String hiveConfDir = properties.get(HIVE_CONF_DIR);
String hadoopConfDir = properties.get(HADOOP_CONF_DIR);
Configuration newHadoopConf = mergeHiveConf(hadoopConf, hiveConfDir, hadoopConfDir);
return CatalogLoader.hive(name, newHadoopConf, properties);
case ICEBERG_CATALOG_TYPE_HADOOP:
return CatalogLoader.hadoop(name, hadoopConf, properties);
case ICEBERG_CATALOG_TYPE_REST:
return CatalogLoader.rest(name, hadoopConf, properties);
default:
throw new UnsupportedOperationException(
"Unknown catalog-type: " + catalogType + " (Must be 'hive', 'hadoop' or 'rest')");
}
}
|
@Test
public void testCreateCatalogHadoop() {
String catalogName = "hadoopCatalog";
props.put(
FlinkCatalogFactory.ICEBERG_CATALOG_TYPE, FlinkCatalogFactory.ICEBERG_CATALOG_TYPE_HADOOP);
Catalog catalog =
FlinkCatalogFactory.createCatalogLoader(catalogName, props, new Configuration())
.loadCatalog();
assertThat(catalog).isNotNull().isInstanceOf(HadoopCatalog.class);
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStartBounds,
final Range<Instant> windowEndBounds,
final Optional<Position> position
) {
try {
final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore
.store(QueryableStoreTypes.timestampedWindowStore(), partition);
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it
= cacheBypassFetcher.fetch(store, key, lower, upper)) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next();
final Instant windowStart = Instant.ofEpochMilli(next.key);
if (!windowStartBounds.contains(windowStart)) {
continue;
}
final Instant windowEnd = windowStart.plus(windowSize);
if (!windowEndBounds.contains(windowEnd)) {
continue;
}
final TimeWindow window =
new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
new Windowed<>(key, window),
next.value.value(),
next.value.timestamp()
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIterator(builder.build().iterator());
}
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldReturnEmptyIfKeyNotPresent() {
// When:
final Iterator<WindowedRow> rowIterator =
table.get(A_KEY, PARTITION, WINDOW_START_BOUNDS, WINDOW_END_BOUNDS).rowIterator;
// Then:
assertThat(rowIterator.hasNext(), is(false));
}
|
@Override
public ObjectNode encode(MappingValue value, CodecContext context) {
checkNotNull(value, "Mapping value cannot be null");
final ObjectNode result = context.mapper().createObjectNode();
final ArrayNode jsonTreatments = result.putArray(TREATMENTS);
final JsonCodec<MappingTreatment> treatmentCodec =
context.codec(MappingTreatment.class);
final JsonCodec<MappingAction> actionCodec =
context.codec(MappingAction.class);
for (final MappingTreatment treatment : value.treatments()) {
jsonTreatments.add(treatmentCodec.encode(treatment, context));
}
result.set(ACTION, actionCodec.encode(value.action(), context));
return result;
}
|
@Test
public void testMappingValueEncode() {
MappingInstruction unicastWeight = MappingInstructions.unicastWeight(UNICAST_WEIGHT);
MappingInstruction unicastPriority = MappingInstructions.unicastPriority(UNICAST_PRIORITY);
MappingInstruction multicastWeight = MappingInstructions.multicastWeight(MULTICAST_WEIGHT);
MappingInstruction multicastPriority = MappingInstructions.multicastPriority(MULTICAST_PRIORITY);
MappingAddress address = MappingAddresses.ipv4MappingAddress(IPV4_PREFIX);
MappingTreatment treatment = DefaultMappingTreatment.builder()
.add(unicastWeight)
.add(unicastPriority)
.add(multicastWeight)
.add(multicastPriority)
.withAddress(address)
.build();
MappingAction action = MappingActions.noAction();
MappingValue value = DefaultMappingValue.builder()
.add(treatment)
.withAction(action)
.build();
ObjectNode valueJson = valueCodec.encode(value, context);
assertThat(valueJson, MappingValueJsonMatcher.matchesMappingValue(value));
}
|
@Override
public ComponentCreationData createProjectAndBindToDevOpsPlatform(DbSession dbSession, CreationMethod creationMethod, Boolean monorepo, @Nullable String projectKey,
@Nullable String projectName) {
String pat = findPersonalAccessTokenOrThrow(dbSession, almSettingDto);
String url = requireNonNull(almSettingDto.getUrl(), "DevOps Platform url cannot be null");
checkArgument(devOpsProjectDescriptor.projectIdentifier() != null, "DevOps Project Identifier cannot be null for Azure DevOps");
GsonAzureRepo repo = fetchAzureDevOpsProject(url, pat, devOpsProjectDescriptor.projectIdentifier(), devOpsProjectDescriptor.repositoryIdentifier());
ComponentCreationData componentCreationData = projectCreator.createProject(
dbSession,
getProjectKey(projectKey, repo),
getProjectName(projectName, repo),
repo.getDefaultBranchName(),
creationMethod);
ProjectDto projectDto = Optional.ofNullable(componentCreationData.projectDto()).orElseThrow();
createProjectAlmSettingDto(dbSession, repo, projectDto, almSettingDto, monorepo);
return componentCreationData;
}
|
@Test
void createProjectAndBindToDevOpsPlatform_projectIdentifierIsNull_shouldThrow() {
mockPatForUser();
lenient().when(devOpsProjectDescriptor.projectIdentifier()).thenReturn(null);
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> underTest.createProjectAndBindToDevOpsPlatform(mock(DbSession.class), CreationMethod.ALM_IMPORT_API, false, null, null))
.withMessage("DevOps Project Identifier cannot be null for Azure DevOps");
}
|
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
}
|
@Test
public void testHsGhcKc()
{
ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "You have opened the Grand Hallowed Coffin <col=ff0000>1,542</col> times!", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("killcount", "hallowed sepulchre", 1542);
}
|
String loadAll(int n) {
return loadAllQueries.computeIfAbsent(n, loadAllFactory);
}
|
@Test
public void testLoadAllIsQuoted() {
Queries queries = new Queries(mapping, idColumn, columnMetadata);
String result = queries.loadAll(2);
assertEquals("SELECT * FROM \"mymapping\" WHERE \"id\" IN (?, ?)", result);
}
|
@Udf(description = "Returns a new string with all matches of regexp in str replaced with newStr")
public String regexpReplace(
@UdfParameter(
description = "The source string. If null, then function returns null.") final String str,
@UdfParameter(
description = "The regexp to match."
+ " If null, then function returns null.") final String regexp,
@UdfParameter(
description = "The string to replace the matches with."
+ " If null, then function returns null.") final String newStr) {
if (str == null || regexp == null || newStr == null) {
return null;
}
try {
return str.replaceAll(regexp, newStr);
} catch (PatternSyntaxException e) {
throw new KsqlFunctionException("Invalid regular expression pattern: " + regexp, e);
}
}
|
@Test
public void shouldHandleNull() {
assertThat(udf.regexpReplace(null, "foo", "bar"), isEmptyOrNullString());
assertThat(udf.regexpReplace("foo", null, "bar"), isEmptyOrNullString());
assertThat(udf.regexpReplace("foo", "bar", null), isEmptyOrNullString());
}
|
public String transform() throws ScanException {
StringBuilder stringBuilder = new StringBuilder();
compileNode(node, stringBuilder, new Stack<Node>());
return stringBuilder.toString();
}
|
@Test
public void loneColonShouldReadLikeAnyOtherCharacter() throws ScanException {
String input = "java:comp/env/jdbc/datasource";
Node node = makeNode(input);
NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0);
Assertions.assertEquals(input, nodeToStringTransformer.transform());
}
|
public void removeColdDataFlowCtrGroupConfig(final String addr, final String consumerGroup, final long timeoutMillis)
throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException, UnsupportedEncodingException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.REMOVE_COLD_DATA_FLOW_CTR_CONFIG, null);
if (consumerGroup != null && consumerGroup.length() > 0) {
request.setBody(consumerGroup.getBytes(MixAll.DEFAULT_CHARSET));
RemotingCommand response = this.remotingClient.invokeSync(
MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis);
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return;
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark());
}
}
|
@Test
public void testRemoveColdDataFlowCtrGroupConfig() throws RemotingException, InterruptedException, MQBrokerException, UnsupportedEncodingException {
mockInvokeSync();
mqClientAPI.removeColdDataFlowCtrGroupConfig(defaultBrokerAddr, "", defaultTimeout);
}
|
@Override
public final Set<Entry<K, V>> entrySet() {
return delegate.entrySet();
}
|
@Test
public void requireThatSingletonEntrySetIteratorNextThrowsIfInvokedMoreThanOnce() {
LazyMap<String, String> map = newSingletonMap("foo", "bar");
Iterator<Map.Entry<String, String>> it = map.entrySet().iterator();
it.next();
try {
it.next();
fail();
} catch (NoSuchElementException e) {
}
try {
it.next();
fail();
} catch (NoSuchElementException e) {
}
}
|
@Override
@Nullable
public Object convert(String value) {
if (value == null || value.isEmpty()) {
return null;
}
final Parser parser = new Parser(timeZone.toTimeZone());
final List<DateGroup> r = parser.parse(value);
if (r.isEmpty() || r.get(0).getDates().isEmpty()) {
return null;
}
return new DateTime(r.get(0).getDates().get(0), timeZone);
}
|
@Test
public void testConvert() throws Exception {
Converter c = new FlexibleDateConverter(Collections.<String, Object>emptyMap());
assertNull(c.convert(null));
assertEquals(null, c.convert(""));
assertEquals(null, c.convert("foo"));
// Using startsWith here to avoid time zone problems in tests.
assertTrue(c.convert("2014-3-12").toString().startsWith("2014-03-12T"));
assertTrue(c.convert("2014-3-12 12:27").toString().startsWith("2014-03-12T12:27:00.000"));
assertTrue(c.convert("Mar 12").toString().contains("-03-12T"));
assertTrue(c.convert("Mar 12 2pm").toString().contains("-03-12T14:00:00.000"));
assertTrue(c.convert("Mar 12 14:45:38").toString().contains("-03-12T14:45:38.000"));
assertTrue(c.convert("Mar 2 13:48:18").toString().contains("-03-02T13:48:18.000"));
}
|
public static List<UpdateRequirement> forUpdateTable(
TableMetadata base, List<MetadataUpdate> metadataUpdates) {
Preconditions.checkArgument(null != base, "Invalid table metadata: null");
Preconditions.checkArgument(null != metadataUpdates, "Invalid metadata updates: null");
Builder builder = new Builder(base, false);
builder.require(new UpdateRequirement.AssertTableUUID(base.uuid()));
metadataUpdates.forEach(builder::update);
return builder.build();
}
|
@Test
public void addSchemaFailure() {
when(metadata.lastColumnId()).thenReturn(2);
when(updated.lastColumnId()).thenReturn(3);
List<UpdateRequirement> requirements =
UpdateRequirements.forUpdateTable(
metadata,
ImmutableList.of(
new MetadataUpdate.AddSchema(new Schema(), 1),
new MetadataUpdate.AddSchema(new Schema(), 2),
new MetadataUpdate.AddSchema(new Schema(), 3)));
assertThatThrownBy(() -> requirements.forEach(req -> req.validate(updated)))
.isInstanceOf(CommitFailedException.class)
.hasMessage("Requirement failed: last assigned field id changed: expected id 2 != 3");
}
|
public static String getOperatingSystemCompleteName() {
return OS_COMPLETE_NAME;
}
|
@Test
@EnabledOnOs(OS.MAC)
public void shouldGetCompleteNameOnMac() {
assertThat(SystemInfo.getOperatingSystemCompleteName()).matches("macOS [0-9.]+ \\(.*\\)");
}
|
public static BoolQueryBuilder boolQuery() {
return new BoolQueryBuilder();
}
|
@Test
public void testBoolQuery() throws Exception {
QueryBuilders.QueryBuilder q1 = QueryBuilders.boolQuery()
.must(QueryBuilders.termQuery("k", "aaa"));
assertEquals("{\"bool\":{\"must\":{\"term\":{\"k\":\"aaa\"}}}}",
toJson(q1));
QueryBuilders.QueryBuilder q2 = QueryBuilders.boolQuery()
.must(QueryBuilders.termQuery("k1", "aaa")).must(QueryBuilders.termQuery("k2", "bbb"));
assertEquals("{\"bool\":{\"must\":[{\"term\":{\"k1\":\"aaa\"}},{\"term\":{\"k2\":\"bbb\"}}]}}",
toJson(q2));
QueryBuilders.QueryBuilder q3 = QueryBuilders.boolQuery()
.mustNot(QueryBuilders.termQuery("k", "fff"));
assertEquals("{\"bool\":{\"must_not\":{\"term\":{\"k\":\"fff\"}}}}",
toJson(q3));
QueryBuilders.QueryBuilder q4 = QueryBuilders.rangeQuery("k1").lt(200).gt(-200);
QueryBuilders.QueryBuilder q5 = QueryBuilders.termsQuery("k2", Arrays.asList("aaa", "bbb", "ccc"));
QueryBuilders.QueryBuilder q6 = QueryBuilders.boolQuery().must(q4).should(q5);
assertEquals(
"{\"bool\":{\"must\":{\"range\":{\"k1\":{\"gt\":-200,\"lt\":200}}}," +
"\"should\":{\"terms\":{\"k2\":[\"aaa\",\"bbb\",\"ccc\"]}}}}",
toJson(q6));
assertEquals(
"{\"bool\":{\"filter\":[{\"range\":{\"k1\":{\"gt\":-200,\"lt\":200}}}," +
"{\"terms\":{\"k2\":[\"aaa\",\"bbb\",\"ccc\"]}}]}}",
toJson(QueryBuilders.boolQuery().filter(q4).filter(q5)));
assertEquals(
"{\"bool\":{\"filter\":{\"range\":{\"k1\":{\"gt\":-200,\"lt\":200}}}," +
"\"must_not\":{\"terms\":{\"k2\":[\"aaa\",\"bbb\",\"ccc\"]}}}}",
toJson(QueryBuilders.boolQuery().filter(q4).mustNot(q5)));
}
|
@Deprecated
public static void writeMetadataFile(Configuration configuration, Path outputPath, List<Footer> footers)
throws IOException {
writeMetadataFile(configuration, outputPath, footers, JobSummaryLevel.ALL);
}
|
@Test
public void testMetaDataFile() throws Exception {
File testDir = temp.newFolder();
Path testDirPath = new Path(testDir.toURI());
Configuration configuration = getTestConfiguration();
final FileSystem fs = testDirPath.getFileSystem(configuration);
enforceEmptyDir(configuration, testDirPath);
MessageType schema = MessageTypeParser.parseMessageType(
"message m { required group a {required binary b;} required group c { required int64 d; }}");
createFile(configuration, new Path(testDirPath, "part0"), schema);
createFile(configuration, new Path(testDirPath, "part1"), schema);
createFile(configuration, new Path(testDirPath, "part2"), schema);
FileStatus outputStatus = fs.getFileStatus(testDirPath);
List<Footer> footers = ParquetFileReader.readFooters(configuration, outputStatus, false);
validateFooters(footers);
ParquetFileWriter.writeMetadataFile(configuration, testDirPath, footers, JobSummaryLevel.ALL);
footers = ParquetFileReader.readFooters(configuration, outputStatus, false);
validateFooters(footers);
footers = ParquetFileReader.readFooters(configuration, fs.getFileStatus(new Path(testDirPath, "part0")), false);
assertEquals(1, footers.size());
final FileStatus metadataFile =
fs.getFileStatus(new Path(testDirPath, ParquetFileWriter.PARQUET_METADATA_FILE));
final FileStatus metadataFileLight =
fs.getFileStatus(new Path(testDirPath, ParquetFileWriter.PARQUET_COMMON_METADATA_FILE));
final List<Footer> metadata = ParquetFileReader.readSummaryFile(configuration, metadataFile);
validateFooters(metadata);
footers = ParquetFileReader.readAllFootersInParallelUsingSummaryFiles(
configuration, Arrays.asList(fs.listStatus(testDirPath, HiddenFileFilter.INSTANCE)), false);
validateFooters(footers);
fs.delete(metadataFile.getPath(), false);
fs.delete(metadataFileLight.getPath(), false);
footers = ParquetFileReader.readAllFootersInParallelUsingSummaryFiles(
configuration, Arrays.asList(fs.listStatus(testDirPath)), false);
validateFooters(footers);
}
|
@Override
public String getContextPath() {
return CONTEXT_PATH;
}
|
@Test
public void testGetContextPath() {
assertEquals( "/kettle/registerPackage", servlet.getContextPath() );
}
|
public static PathMatcherPredicate matches(final List<String> patterns) {
return new PathMatcherPredicate(null, patterns);
}
|
@Test
void shouldMatchAllGivenRecursiveGlobExpressionAndNoBasePath() {
// Given
List<Path> paths = Stream.of("/base/test.txt", "/base/sub/dir/test.txt").map(Path::of).toList();
PathMatcherPredicate predicate = PathMatcherPredicate.matches(List.of("**/*.txt"));
// When
List<Path> filtered = paths.stream().filter(predicate).toList();
// Then
assertEquals(paths, filtered);
}
|
@Override
public CompletableFuture<V> exceptionally(Function<Throwable, ? extends V> fn) {
return future.handleAsync(new ExceptionallyAdapter(fn), defaultExecutor());
}
|
@Test
public void exceptionally() {
CompletableFuture<String> nextStage = delegatingFuture.exceptionally(t -> "value2");
invocationFuture.completeExceptionally(new IllegalStateException());
assertTrueEventually(() -> assertTrue(nextStage.isDone()));
assertEquals("value2", nextStage.join());
}
|
public String getOriSchemaName() {
return oriSchemaName;
}
|
@Test
public void getOriSchemaNameOutputNull() {
// Arrange
final DdlResult objectUnderTest = new DdlResult();
// Act
final String actual = objectUnderTest.getOriSchemaName();
// Assert result
Assert.assertNull(actual);
}
|
public DoubleArrayAsIterable usingExactEquality() {
return new DoubleArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
}
|
@Test
public void usingExactEquality_containsAtLeast_primitiveDoubleArray_inOrder_success() {
assertThat(array(1.1, 2.2, 3.3))
.usingExactEquality()
.containsAtLeast(array(1.1, 2.2))
.inOrder();
}
|
@ProcessElement
public void processElement(OutputReceiver<InitialPipelineState> receiver) throws IOException {
LOG.info(daoFactory.getStreamTableDebugString());
LOG.info(daoFactory.getMetadataTableDebugString());
LOG.info("ChangeStreamName: " + daoFactory.getChangeStreamName());
boolean resume = false;
DetectNewPartitionsState detectNewPartitionsState =
daoFactory.getMetadataTableDao().readDetectNewPartitionsState();
switch (existingPipelineOptions) {
case RESUME_OR_NEW:
// perform resumption.
if (detectNewPartitionsState != null) {
resume = true;
startTime = detectNewPartitionsState.getWatermark();
LOG.info("Resuming from previous pipeline with low watermark of {}", startTime);
} else {
LOG.info(
"Attempted to resume, but previous watermark does not exist, starting at {}",
startTime);
}
break;
case RESUME_OR_FAIL:
// perform resumption.
if (detectNewPartitionsState != null) {
resume = true;
startTime = detectNewPartitionsState.getWatermark();
LOG.info("Resuming from previous pipeline with low watermark of {}", startTime);
} else {
LOG.error("Previous pipeline with the same change stream name doesn't exist, stopping");
return;
}
break;
case FAIL_IF_EXISTS:
if (detectNewPartitionsState != null) {
LOG.error(
"A previous pipeline exists with the same change stream name and existingPipelineOption is set to FAIL_IF_EXISTS.");
return;
}
break;
case SKIP_CLEANUP:
if (detectNewPartitionsState != null) {
LOG.error(
"A previous pipeline exists with the same change stream name and existingPipelineOption is set to SKIP_CLEANUP. This option should only be used in tests.");
return;
}
break;
default:
LOG.error("Unexpected existingPipelineOptions option.");
// terminate pipeline
return;
}
daoFactory.getMetadataTableDao().writeDetectNewPartitionVersion();
receiver.output(new InitialPipelineState(startTime, resume));
}
|
@Test
public void testInitializeSkipCleanupWithoutDNP() throws IOException {
ByteString metadataRowKey =
metadataTableAdminDao
.getChangeStreamNamePrefix()
.concat(ByteString.copyFromUtf8("existing_row"));
dataClient.mutateRow(
RowMutation.create(tableId, metadataRowKey)
.setCell(
MetadataTableAdminDao.CF_WATERMARK, MetadataTableAdminDao.QUALIFIER_DEFAULT, 123));
Instant startTime = Instant.now();
InitializeDoFn initializeDoFn =
new InitializeDoFn(daoFactory, startTime, ExistingPipelineOptions.SKIP_CLEANUP);
initializeDoFn.processElement(outputReceiver);
// Skip cleanup will always resume from startTime
verify(outputReceiver, times(1)).output(new InitialPipelineState(startTime, false));
// Existing metadata shouldn't be cleaned up
assertNotNull(dataClient.readRow(tableId, metadataRowKey));
}
|
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final SMBSession.DiskShareWrapper share = session.openShare(file);
try {
final File entry = share.get().openFile(new SMBPathContainerService(session).getKey(file),
Collections.singleton(AccessMask.FILE_READ_DATA),
Collections.singleton(FileAttributes.FILE_ATTRIBUTE_NORMAL),
Collections.singleton(SMB2ShareAccess.FILE_SHARE_READ),
SMB2CreateDisposition.FILE_OPEN,
Collections.singleton(SMB2CreateOptions.FILE_NON_DIRECTORY_FILE));
final InputStream stream = entry.getInputStream();
if(status.isAppend()) {
stream.skip(status.getOffset());
}
return new SMBInputStream(file, stream, entry);
}
catch(SMBRuntimeException e) {
throw new SMBExceptionMappingService().map("Download {0} failed", e, file);
}
catch(IOException e) {
throw new SMBTransportExceptionMappingService().map("Download {0} failed", e, file);
}
finally {
session.releaseShare(share);
}
}
|
@Test
public void testReadRange() throws Exception {
final TransferStatus status = new TransferStatus();
final int length = 140000;
final byte[] content = RandomUtils.nextBytes(length);
status.setLength(content.length);
final Path home = new DefaultHomeFinderService(session).find();
final Path folder = new SMBDirectoryFeature(session).mkdir(
new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path test = new SMBTouchFeature(session).touch(
new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Write writer = new SMBWriteFeature(session);
status.setChecksum(writer.checksum(test, status).compute(new ByteArrayInputStream(content), status));
final OutputStream out = writer.write(test, status.exists(true), new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
assertTrue(new SMBFindFeature(session).find(test));
assertEquals(content.length, new SMBListService(session).list(test.getParent(), new DisabledListProgressListener()).get(test).attributes().getSize());
{
final ByteArrayOutputStream buffer = new ByteArrayOutputStream(40000);
final TransferStatus read = new TransferStatus();
read.setOffset(23); // offset within chunk
read.setAppend(true);
read.withLength(40000); // ensure to read at least two chunks
final InputStream in = new SMBReadFeature(session).read(test, read, new DisabledConnectionCallback());
new StreamCopier(read, read).withLimit(40000L).transfer(in, buffer);
final byte[] reference = new byte[40000];
System.arraycopy(content, 23, reference, 0, reference.length);
assertArrayEquals(reference, buffer.toByteArray());
}
{
final ByteArrayOutputStream buffer = new ByteArrayOutputStream(40000);
final TransferStatus read = new TransferStatus();
read.setOffset(65536); // offset at the beginning of a new chunk
read.setAppend(true);
read.withLength(40000); // ensure to read at least two chunks
final InputStream in = new SMBReadFeature(session).read(test, read, new DisabledConnectionCallback());
new StreamCopier(read, read).withLimit(40000L).transfer(in, buffer);
final byte[] reference = new byte[40000];
System.arraycopy(content, 65536, reference, 0, reference.length);
assertArrayEquals(reference, buffer.toByteArray());
}
{
final ByteArrayOutputStream buffer = new ByteArrayOutputStream(40000);
final TransferStatus read = new TransferStatus();
read.setOffset(65537); // offset at the beginning+1 of a new chunk
read.setAppend(true);
read.withLength(40000); // ensure to read at least two chunks
final InputStream in = new SMBReadFeature(session).read(test, read, new DisabledConnectionCallback());
new StreamCopier(read, read).withLimit(40000L).transfer(in, buffer);
final byte[] reference = new byte[40000];
System.arraycopy(content, 65537, reference, 0, reference.length);
assertArrayEquals(reference, buffer.toByteArray());
}
new SMBDeleteFeature(session).delete(Arrays.asList(test, folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public synchronized Optional<ListenableFuture<V>> schedule(
Checkable<K, V> target, K context) {
if (checksInProgress.containsKey(target)) {
return Optional.empty();
}
final LastCheckResult<V> result = completedChecks.get(target);
if (result != null) {
final long msSinceLastCheck = timer.monotonicNow() - result.completedAt;
if (msSinceLastCheck < minMsBetweenChecks) {
LOG.debug("Skipped checking {}. Time since last check {}ms " +
"is less than the min gap {}ms.",
target, msSinceLastCheck, minMsBetweenChecks);
return Optional.empty();
}
}
LOG.info("Scheduling a check for {}", target);
final ListenableFuture<V> lfWithoutTimeout = executorService.submit(
new Callable<V>() {
@Override
public V call() throws Exception {
return target.check(context);
}
});
final ListenableFuture<V> lf;
if (diskCheckTimeout > 0) {
lf = TimeoutFuture
.create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS,
scheduledExecutorService);
} else {
lf = lfWithoutTimeout;
}
checksInProgress.put(target, lf);
addResultCachingCallback(target, lf);
return Optional.of(lf);
}
|
@Test(timeout=60000)
public void testScheduler() throws Exception {
final NoOpCheckable target1 = new NoOpCheckable();
final NoOpCheckable target2 = new NoOpCheckable();
final FakeTimer timer = new FakeTimer();
ThrottledAsyncChecker<Boolean, Boolean> checker =
new ThrottledAsyncChecker<>(timer, MIN_ERROR_CHECK_GAP, 0,
getExecutorService());
// check target1 and ensure we get back the expected result.
assertTrue(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 1L);
// Check target1 again without advancing the timer. target1 should not
// be checked again.
assertFalse(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 1L);
// Schedule target2 scheduled without advancing the timer.
// target2 should be checked as it has never been checked before.
assertTrue(checker.schedule(target2, true).isPresent());
waitTestCheckableCheckCount(target2, 1L);
// Advance the timer but just short of the min gap.
// Neither target1 nor target2 should be checked again.
timer.advance(MIN_ERROR_CHECK_GAP - 1);
assertFalse(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 1L);
assertFalse(checker.schedule(target2, true).isPresent());
waitTestCheckableCheckCount(target2, 1L);
// Advance the timer again.
// Both targets should be checked now.
timer.advance(MIN_ERROR_CHECK_GAP);
assertTrue(checker.schedule(target1, true).isPresent());
waitTestCheckableCheckCount(target1, 2L);
assertTrue(checker.schedule(target2, true).isPresent());
waitTestCheckableCheckCount(target2, 2L);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.