focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public Range<T> rangeContaining(long key, long value) {
BitSet rangeBitSet = rangeBitSetMap.get(key);
if (rangeBitSet != null) {
if (!rangeBitSet.get(getSafeEntry(value))) {
// if position is not part of any range then return null
return null;
}
int lowerValue = rangeBitSet.previousClearBit(getSafeEntry(value)) + 1;
final T lower = consumer.apply(key, lowerValue);
final T upper = consumer.apply(key,
Math.max(rangeBitSet.nextClearBit(getSafeEntry(value)) - 1, lowerValue));
return Range.closed(lower, upper);
}
return null;
}
|
@Test
public void testRangeContaining() {
OpenLongPairRangeSet<LongPair> set = new OpenLongPairRangeSet<>(consumer);
set.add(Range.closed(new LongPair(0, 98), new LongPair(0, 99)));
set.add(Range.closed(new LongPair(0, 100), new LongPair(1, 5)));
com.google.common.collect.RangeSet<LongPair> gSet = TreeRangeSet.create();
gSet.add(Range.closed(new LongPair(0, 98), new LongPair(0, 100)));
gSet.add(Range.closed(new LongPair(0, 101), new LongPair(1, 5)));
set.add(Range.closed(new LongPair(1, 10), new LongPair(1, 15)));
set.add(Range.closed(new LongPair(1, 20), new LongPair(2, 10)));
set.add(Range.closed(new LongPair(2, 25), new LongPair(2, 28)));
set.add(Range.closed(new LongPair(3, 12), new LongPair(3, 20)));
set.add(Range.closed(new LongPair(4, 12), new LongPair(4, 20)));
gSet.add(Range.closed(new LongPair(1, 10), new LongPair(1, 15)));
gSet.add(Range.closed(new LongPair(1, 20), new LongPair(2, 10)));
gSet.add(Range.closed(new LongPair(2, 25), new LongPair(2, 28)));
gSet.add(Range.closed(new LongPair(3, 12), new LongPair(3, 20)));
gSet.add(Range.closed(new LongPair(4, 12), new LongPair(4, 20)));
LongPair position = new LongPair(0, 99);
assertEquals(set.rangeContaining(position.getKey(), position.getValue()),
Range.closed(new LongPair(0, 98), new LongPair(0, 100)));
assertEquals(set.rangeContaining(position.getKey(), position.getValue()), gSet.rangeContaining(position));
position = new LongPair(2, 30);
assertNull(set.rangeContaining(position.getKey(), position.getValue()));
assertEquals(set.rangeContaining(position.getKey(), position.getValue()), gSet.rangeContaining(position));
position = new LongPair(3, 13);
assertEquals(set.rangeContaining(position.getKey(), position.getValue()),
Range.closed(new LongPair(3, 12), new LongPair(3, 20)));
assertEquals(set.rangeContaining(position.getKey(), position.getValue()), gSet.rangeContaining(position));
position = new LongPair(3, 22);
assertNull(set.rangeContaining(position.getKey(), position.getValue()));
assertEquals(set.rangeContaining(position.getKey(), position.getValue()), gSet.rangeContaining(position));
}
|
public static InputConsumableDecider.Factory loadInputConsumableDeciderFactory(
HybridPartitionDataConsumeConstraint hybridPartitionDataConsumeConstraint) {
switch (hybridPartitionDataConsumeConstraint) {
case ALL_PRODUCERS_FINISHED:
return AllFinishedInputConsumableDecider.Factory.INSTANCE;
case ONLY_FINISHED_PRODUCERS:
return PartialFinishedInputConsumableDecider.Factory.INSTANCE;
case UNFINISHED_PRODUCERS:
return DefaultInputConsumableDecider.Factory.INSTANCE;
default:
throw new IllegalStateException(
hybridPartitionDataConsumeConstraint + "is not supported.");
}
}
|
@Test
void testLoadInputConsumableDeciderFactory() {
assertAndLoadInputConsumableDecider(
UNFINISHED_PRODUCERS, DefaultInputConsumableDecider.Factory.INSTANCE);
assertAndLoadInputConsumableDecider(
ONLY_FINISHED_PRODUCERS, PartialFinishedInputConsumableDecider.Factory.INSTANCE);
assertAndLoadInputConsumableDecider(
ALL_PRODUCERS_FINISHED, AllFinishedInputConsumableDecider.Factory.INSTANCE);
}
|
long trySend(long now) {
long pollDelayMs = maxPollTimeoutMs;
// send any requests that can be sent now
for (Node node : unsent.nodes()) {
Iterator<ClientRequest> iterator = unsent.requestIterator(node);
if (iterator.hasNext())
pollDelayMs = Math.min(pollDelayMs, client.pollDelayMs(node, now));
while (iterator.hasNext()) {
ClientRequest request = iterator.next();
if (client.ready(node, now)) {
client.send(request, now);
iterator.remove();
} else {
// try next node when current node is not ready
break;
}
}
}
return pollDelayMs;
}
|
@Test
public void testTrySend() {
final AtomicBoolean isReady = new AtomicBoolean();
final AtomicInteger checkCount = new AtomicInteger();
client = new MockClient(time, metadata) {
@Override
public boolean ready(Node node, long now) {
checkCount.incrementAndGet();
if (isReady.get())
return super.ready(node, now);
else
return false;
}
};
consumerClient = new ConsumerNetworkClient(new LogContext(), client, metadata, time, 100, 10, Integer.MAX_VALUE);
consumerClient.send(node, heartbeat());
consumerClient.send(node, heartbeat());
assertEquals(2, consumerClient.pendingRequestCount(node));
assertEquals(0, client.inFlightRequestCount(node.idString()));
consumerClient.trySend(time.milliseconds());
// only check one time when the node doesn't ready
assertEquals(1, checkCount.getAndSet(0));
assertEquals(2, consumerClient.pendingRequestCount(node));
assertEquals(0, client.inFlightRequestCount(node.idString()));
isReady.set(true);
consumerClient.trySend(time.milliseconds());
// check node ready or not for every request
assertEquals(2, checkCount.getAndSet(0));
assertEquals(2, consumerClient.pendingRequestCount(node));
assertEquals(2, client.inFlightRequestCount(node.idString()));
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
final DefaultPacketRequest other = (DefaultPacketRequest) obj;
return Objects.equals(this.selector, other.selector)
&& Objects.equals(this.priority, other.priority)
&& Objects.equals(this.appId, other.appId)
&& Objects.equals(this.nodeId, other.nodeId)
&& Objects.equals(this.deviceId, other.deviceId);
}
|
@Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(packetRequest1, sameAsacketRequest1)
.addEqualityGroup(packetRequest2)
.addEqualityGroup(packetRequest3)
.addEqualityGroup(packetRequest4)
.testEquals();
}
|
@Override
public void unsubscribeService(Service service, Subscriber subscriber, String clientId) {
throw new UnsupportedOperationException("No persistent subscribers");
}
|
@Test
void testUnsubscribeService() {
assertThrows(UnsupportedOperationException.class, () -> {
persistentClientOperationServiceImpl.unsubscribeService(service, subscriber, clientId);
});
}
|
@Override
public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) {
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof ShowStatement) {
return Optional.of(new PostgreSQLShowVariableExecutor((ShowStatement) sqlStatement));
}
return Optional.empty();
}
|
@Test
void assertCreateWithDMLStatement() {
DeleteStatementContext sqlStatementContext = new DeleteStatementContext(new PostgreSQLDeleteStatement(), DefaultDatabase.LOGIC_NAME);
assertThat(new PostgreSQLAdminExecutorCreator().create(sqlStatementContext, "delete from t where id = 1", "", Collections.emptyList()), is(Optional.empty()));
}
|
@Override
public WorkerIdentity get() {
// Look at configurations first
if (mConf.isSetByUser(PropertyKey.WORKER_IDENTITY_UUID)) {
String uuidStr = mConf.getString(PropertyKey.WORKER_IDENTITY_UUID);
final WorkerIdentity workerIdentity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(uuidStr);
LOG.debug("Loaded worker identity from configuration: {}", workerIdentity);
return workerIdentity;
}
// Try loading from the identity file
String filePathStr = mConf.getString(PropertyKey.WORKER_IDENTITY_UUID_FILE_PATH);
final Path idFile = Paths.get(filePathStr);
try (BufferedReader reader = Files.newBufferedReader(idFile)) {
List<String> nonCommentLines = reader.lines()
.filter(line -> !line.startsWith("#"))
.filter(line -> !line.trim().isEmpty())
.collect(Collectors.toList());
if (nonCommentLines.size() > 0) {
if (nonCommentLines.size() > 1) {
LOG.warn("Multiple worker identities configured in {}, only the first one will be used",
idFile);
}
String uuidStr = nonCommentLines.get(0);
final WorkerIdentity workerIdentity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(uuidStr);
LOG.debug("Loaded worker identity from file {}: {}",
idFile, workerIdentity);
return workerIdentity;
}
} catch (FileNotFoundException | NoSuchFileException ignored) {
// if not existent, proceed to auto generate one
LOG.debug("Worker identity file {} not found", idFile);
} catch (IOException e) {
// in case of other IO error, better stop worker from starting up than use a new identity
throw new RuntimeException(
String.format("Failed to read worker identity from identity file %s", idFile), e);
}
// No identity is supplied by the user
// Assume this is the first time the worker starts up, and generate a new one
LOG.debug("Auto generating new worker identity as no identity is supplied by the user");
UUID generatedId = mUUIDGenerator.get();
WorkerIdentity identity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(generatedId);
LOG.debug("Generated worker identity as {}", identity);
try (BufferedWriter writer = Files.newBufferedWriter(idFile, StandardCharsets.UTF_8,
StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) {
writer.write("# Worker identity automatically generated at ");
writer.write(OffsetDateTime.now().format(DateTimeFormatter.RFC_1123_DATE_TIME));
writer.newLine();
writer.write(generatedId.toString());
writer.newLine();
} catch (Exception e) {
LOG.warn("Failed to persist automatically generated worker identity ({}) to {}, "
+ "this worker will lose its identity after restart", identity, idFile, e);
}
try {
// set the file to be read-only
Set<PosixFilePermission> permSet = Files.getPosixFilePermissions(idFile);
Set<PosixFilePermission> nonWritablePermSet = Sets.filter(permSet,
perm -> perm != PosixFilePermission.OWNER_WRITE
&& perm != PosixFilePermission.GROUP_WRITE
&& perm != PosixFilePermission.OTHERS_WRITE);
Files.setPosixFilePermissions(idFile, nonWritablePermSet);
} catch (Exception e) {
LOG.warn("Failed to set identity file to be read-only", e);
}
return identity;
}
|
@Test
public void autoGenerateIfIdFilePathNotSetAndFileNotExists() throws Exception {
AlluxioProperties props = new AlluxioProperties();
props.put(PropertyKey.WORKER_IDENTITY_UUID_FILE_PATH, mUuidFilePath.toString(), Source.DEFAULT);
// put the identity in a file in the same directory but with a different name,
// which cannot be detected
Path path = mUuidFilePath.getParent().resolve("not_really_worker_identity");
try (BufferedWriter fout = Files.newBufferedWriter(path, StandardCharsets.UTF_8,
StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) {
fout.write(mReferenceUuid.toString());
fout.newLine();
}
assertFalse(Files.exists(mUuidFilePath));
AlluxioConfiguration conf = new InstancedConfiguration(props);
WorkerIdentityProvider provider = new WorkerIdentityProvider(conf, () -> mDifferentUuid);
WorkerIdentity identity = provider.get();
assertNotEquals(mReferenceUuid, WorkerIdentity.ParserV1.INSTANCE.toUUID(identity));
assertEquals(mDifferentUuid, WorkerIdentity.ParserV1.INSTANCE.toUUID(identity));
}
|
public static Resource getResource(File workingDir, String path) {
if (path.startsWith(Resource.CLASSPATH_COLON)) {
path = removePrefix(path);
File file = classPathToFile(path);
if (file != null) {
return new FileResource(file, true, path);
}
List<Resource> resources = new ArrayList<>();
synchronized (SCAN_RESULT) {
ResourceList rl = SCAN_RESULT.getResourcesWithPath(path);
if (rl == null) {
rl = ResourceList.emptyList();
}
rl.forEachByteArrayIgnoringIOException((res, bytes) -> {
URI uri = res.getURI();
if ("file".equals(uri.getScheme())) {
File found = Paths.get(uri).toFile();
resources.add(new FileResource(found, true, res.getPath()));
} else {
resources.add(new JarResource(bytes, res.getPath(), uri));
}
});
}
if (resources.isEmpty()) {
throw new RuntimeException("not found: " + path);
}
return resources.get(0);
} else {
path = path.replace('\\', '/'); // windows fix
File file = new File(removePrefix(path));
if (!file.exists()) {
throw new RuntimeException("not found: " + path);
}
Path relativePath = workingDir.toPath().relativize(file.getAbsoluteFile().toPath());
return new FileResource(file, false, relativePath.toString());
}
}
|
@Test
void testResolveRelativeClassPathFile() {
Resource temp = ResourceUtils.getResource(new File(""), "classpath:com/intuit/karate/resource/dir1/dir1.log");
Resource resource = temp.resolve("../dir2/dir2.log");
assertTrue(resource.isFile());
assertTrue(resource.isClassPath());
assertEquals("com/intuit/karate/resource/dir1/../dir2/dir2.log", resource.getRelativePath());
assertEquals("classpath:com/intuit/karate/resource/dir1/../dir2/dir2.log", resource.getPrefixedPath());
assertEquals("bar", FileUtils.toString(resource.getStream()));
}
|
public PropertyPanel addProp(String key, String label, String value) {
properties.add(new Prop(key, label, value));
return this;
}
|
@Test
public void intValues() {
basic();
pp.addProp(KEY_A, KEY_A, 200)
.addProp(KEY_B, KEY_B, 2000)
.addProp(KEY_C, KEY_C, 1234567);
validateProp(KEY_A, "200");
validateProp(KEY_B, "2,000");
validateProp(KEY_C, "1,234,567");
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStart,
final Range<Instant> windowEnd,
final Optional<Position> position
) {
try {
final ReadOnlySessionStore<GenericKey, GenericRow> store = stateStore
.store(QueryableStoreTypes.sessionStore(), partition);
return KsMaterializedQueryResult.rowIterator(
findSession(store, key, windowStart, windowEnd).iterator());
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldReturnValueIfSessionEndsAtUpperBoundIfUpperBoundClosed() {
// Given:
final Range<Instant> endBounds = Range.closed(
LOWER_INSTANT,
UPPER_INSTANT
);
final Instant wstart = UPPER_INSTANT.minusMillis(1);
givenSingleSession(wstart, UPPER_INSTANT);
// When:
final Iterator<WindowedRow> rowIterator =
table.get(A_KEY, PARTITION, Range.all(), endBounds).rowIterator;
// Then:
assertThat(rowIterator.next(), is(
WindowedRow.of(
SCHEMA,
sessionKey(wstart, UPPER_INSTANT),
A_VALUE,
UPPER_INSTANT.toEpochMilli()
)
));
}
|
public void createView(View view, boolean replace, boolean ifNotExists) {
if (ifNotExists) {
relationsStorage.putIfAbsent(view.name(), view);
} else if (replace) {
relationsStorage.put(view.name(), view);
} else if (!relationsStorage.putIfAbsent(view.name(), view)) {
throw QueryException.error("Mapping or view already exists: " + view.name());
}
}
|
@Test
public void when_createsDuplicateViewsIfReplaceAndIfNotExists_then_succeeds() {
// given
View view = view();
// when
catalog.createView(view, true, true);
// then
verify(relationsStorage).putIfAbsent(eq(view.name()), isA(View.class));
}
|
@NonNull
public static Permutor<FeedItem> getPermutor(@NonNull SortOrder sortOrder) {
Comparator<FeedItem> comparator = null;
Permutor<FeedItem> permutor = null;
switch (sortOrder) {
case EPISODE_TITLE_A_Z:
comparator = (f1, f2) -> itemTitle(f1).compareTo(itemTitle(f2));
break;
case EPISODE_TITLE_Z_A:
comparator = (f1, f2) -> itemTitle(f2).compareTo(itemTitle(f1));
break;
case DATE_OLD_NEW:
comparator = (f1, f2) -> pubDate(f1).compareTo(pubDate(f2));
break;
case DATE_NEW_OLD:
comparator = (f1, f2) -> pubDate(f2).compareTo(pubDate(f1));
break;
case DURATION_SHORT_LONG:
comparator = (f1, f2) -> Integer.compare(duration(f1), duration(f2));
break;
case DURATION_LONG_SHORT:
comparator = (f1, f2) -> Integer.compare(duration(f2), duration(f1));
break;
case EPISODE_FILENAME_A_Z:
comparator = (f1, f2) -> itemLink(f1).compareTo(itemLink(f2));
break;
case EPISODE_FILENAME_Z_A:
comparator = (f1, f2) -> itemLink(f2).compareTo(itemLink(f1));
break;
case FEED_TITLE_A_Z:
comparator = (f1, f2) -> feedTitle(f1).compareTo(feedTitle(f2));
break;
case FEED_TITLE_Z_A:
comparator = (f1, f2) -> feedTitle(f2).compareTo(feedTitle(f1));
break;
case RANDOM:
permutor = Collections::shuffle;
break;
case SMART_SHUFFLE_OLD_NEW:
permutor = (queue) -> smartShuffle(queue, true);
break;
case SMART_SHUFFLE_NEW_OLD:
permutor = (queue) -> smartShuffle(queue, false);
break;
case SIZE_SMALL_LARGE:
comparator = (f1, f2) -> Long.compare(size(f1), size(f2));
break;
case SIZE_LARGE_SMALL:
comparator = (f1, f2) -> Long.compare(size(f2), size(f1));
break;
case COMPLETION_DATE_NEW_OLD:
comparator = (f1, f2) -> f2.getMedia().getPlaybackCompletionDate()
.compareTo(f1.getMedia().getPlaybackCompletionDate());
break;
default:
throw new IllegalArgumentException("Permutor not implemented");
}
if (comparator != null) {
final Comparator<FeedItem> comparator2 = comparator;
permutor = (queue) -> Collections.sort(queue, comparator2);
}
return permutor;
}
|
@Test
public void testPermutorForRule_DATE_ASC_NulPubDatel() {
Permutor<FeedItem> permutor = FeedItemPermutors.getPermutor(SortOrder.DATE_OLD_NEW);
List<FeedItem> itemList = getTestList();
itemList.get(2) // itemId 2
.setPubDate(null);
assertTrue(checkIdOrder(itemList, 1, 3, 2)); // before sorting
permutor.reorder(itemList);
assertTrue(checkIdOrder(itemList, 2, 1, 3)); // after sorting
}
|
@Override
synchronized public boolean cd(String directory) {
if (StrUtil.isBlank(directory)) {
// 当前目录
return true;
}
try {
return client.changeWorkingDirectory(directory);
} catch (IOException e) {
throw new IORuntimeException(e);
}
}
|
@Test
@Disabled
public void cdTest() {
final Ftp ftp = new Ftp("looly.centos");
ftp.cd("/file/aaa");
Console.log(ftp.pwd());
IoUtil.close(ftp);
}
|
@Override
protected void write(final MySQLPacketPayload payload) {
payload.writeInt1(STATUS);
payload.writeInt4(statementId);
// TODO Column Definition Block should be added in future when the meta data of the columns is cached.
payload.writeInt2(columnCount);
payload.writeInt2(parameterCount);
payload.writeReserved(1);
payload.writeInt2(warningCount);
}
|
@Test
void assertWrite() {
MySQLComStmtPrepareOKPacket actual = new MySQLComStmtPrepareOKPacket(1, 0, 1, 0);
actual.write(payload);
verify(payload).writeInt1(0x00);
verify(payload, times(2)).writeInt2(0);
verify(payload).writeInt2(1);
verify(payload).writeInt4(1);
verify(payload).writeReserved(1);
}
|
public final void isLessThan(int other) {
asDouble.isLessThan(other);
}
|
@Test
public void isLessThan_int() {
expectFailureWhenTestingThat(2.0f).isLessThan(2);
assertThat(2.0f).isLessThan(3);
assertThat(0x1.0p30f).isLessThan((1 << 30) + 1);
}
|
public abstract boolean compare(A actual, E expected);
|
@Test
public void testTransforming_both_compare_nullTransformedValue() {
assertThat(HYPHENS_MATCH_COLONS.compare("mailing-list", "abcdefg-hij")).isFalse();
assertThat(HYPHENS_MATCH_COLONS.compare("forum", "abcde:fghij")).isFalse();
assertThat(HYPHENS_MATCH_COLONS.compare("forum", "abcde-fghij")).isTrue();
}
|
public static LinkExtractorParser getParser(String parserClassName)
throws LinkExtractorParseException {
// Is there a cached parser?
LinkExtractorParser parser = PARSERS.get(parserClassName);
if (parser != null) {
LOG.debug("Fetched {}", parserClassName);
return parser;
}
try {
Object clazz = Class.forName(parserClassName).getDeclaredConstructor().newInstance();
if (clazz instanceof LinkExtractorParser) {
parser = (LinkExtractorParser) clazz;
} else {
throw new LinkExtractorParseException(new ClassCastException(parserClassName));
}
} catch (IllegalArgumentException | ReflectiveOperationException | SecurityException e) {
throw new LinkExtractorParseException(e);
}
LOG.info("Created {}", parserClassName);
if (parser.isReusable()) {
LinkExtractorParser currentParser = PARSERS.putIfAbsent(
parserClassName, parser);// cache the parser if not already
// done by another thread
if (currentParser != null) {
return currentParser;
}
}
return parser;
}
|
@Test
public void testNotReusableCache() throws Exception {
assertNotSame(BaseParser.getParser(NotReusableParser.class.getCanonicalName()), BaseParser.getParser(NotReusableParser.class.getCanonicalName()));
}
|
public static InputStream limitedInputStream(final InputStream is, final int limit) throws IOException {
return new InputStream() {
private int mPosition = 0;
private int mMark = 0;
private final int mLimit = Math.min(limit, is.available());
@Override
public int read() throws IOException {
if (mPosition < mLimit) {
mPosition++;
return is.read();
}
return -1;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || len > b.length - off) {
throw new IndexOutOfBoundsException();
}
if (mPosition >= mLimit) {
return -1;
}
if (mPosition + len > mLimit) {
len = mLimit - mPosition;
}
if (len <= 0) {
return 0;
}
is.read(b, off, len);
mPosition += len;
return len;
}
@Override
public long skip(long len) throws IOException {
if (mPosition + len > mLimit) {
len = mLimit - mPosition;
}
if (len <= 0) {
return 0;
}
is.skip(len);
mPosition += len;
return len;
}
@Override
public int available() {
return mLimit - mPosition;
}
@Override
public boolean markSupported() {
return is.markSupported();
}
@Override
public synchronized void mark(int readlimit) {
is.mark(readlimit);
mMark = mPosition;
}
@Override
public synchronized void reset() throws IOException {
is.reset();
mPosition = mMark;
}
@Override
public void close() throws IOException {
is.close();
}
};
}
|
@Test
void testLimitedInputStream() throws Exception {
InputStream is = StreamUtilsTest.class.getResourceAsStream("/StreamUtilsTest.txt");
assertThat(10, is(is.available()));
is = StreamUtils.limitedInputStream(is, 2);
assertThat(2, is(is.available()));
assertThat(is.markSupported(), is(true));
is.mark(0);
assertEquals((int) '0', is.read());
assertEquals((int) '1', is.read());
assertEquals(-1, is.read());
is.reset();
is.skip(1);
assertEquals((int) '1', is.read());
is.reset();
is.skip(-1);
assertEquals((int) '0', is.read());
is.reset();
byte[] bytes = new byte[2];
int read = is.read(bytes, 1, 1);
assertThat(read, is(1));
is.reset();
StreamUtils.skipUnusedStream(is);
assertEquals(-1, is.read());
is.close();
}
|
public static InetSocketAddress parseAddress(String address, int defaultPort) {
return parseAddress(address, defaultPort, false);
}
|
@Test
void parseAddressBadValues() {
assertThatExceptionOfType(NullPointerException.class)
.isThrownBy(() -> AddressUtils.parseAddress(null, 0))
.withMessage("address");
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> AddressUtils.parseAddress("address", -1))
.withMessage("port out of range:-1");
}
|
public static void initRequestFromEntity(HttpRequestBase requestBase, Map<String, String> body, String charset)
throws Exception {
if (body == null || body.isEmpty()) {
return;
}
List<NameValuePair> params = new ArrayList<>(body.size());
for (Map.Entry<String, String> entry : body.entrySet()) {
params.add(new BasicNameValuePair(entry.getKey(), entry.getValue()));
}
if (requestBase instanceof HttpEntityEnclosingRequest) {
HttpEntityEnclosingRequest request = (HttpEntityEnclosingRequest) requestBase;
HttpEntity entity = new UrlEncodedFormEntity(params, charset);
request.setEntity(entity);
}
}
|
@Test
void testInitRequestFromEntity5() throws Exception {
HttpDelete httpDelete = new HttpDelete("");
HttpUtils.initRequestFromEntity(httpDelete, Collections.singletonMap("k", "v"), "UTF-8");
// nothing change
assertEquals(new HttpDelete("").getMethod(), httpDelete.getMethod());
assertArrayEquals(new HttpDelete("").getAllHeaders(), httpDelete.getAllHeaders());
}
|
@Delete(uri = "/{executionId}")
@ExecuteOn(TaskExecutors.IO)
@Operation(tags = {"Executions"}, summary = "Delete an execution")
@ApiResponse(responseCode = "204", description = "On success")
public HttpResponse<Void> delete(
@Parameter(description = "The execution id") @PathVariable String executionId,
@Parameter(description = "Whether to delete execution logs") @QueryValue(defaultValue = "true") Boolean deleteLogs,
@Parameter(description = "Whether to delete execution metrics") @QueryValue(defaultValue = "true") Boolean deleteMetrics,
@Parameter(description = "Whether to delete execution files in the internal storage") @QueryValue(defaultValue = "true") Boolean deleteStorage
) throws IOException {
Optional<Execution> execution = executionRepository.findById(tenantService.resolveTenant(), executionId);
if (execution.isPresent()) {
executionService.delete(execution.get(), deleteLogs, deleteMetrics, deleteStorage);
return HttpResponse.status(HttpStatus.NO_CONTENT);
} else {
return HttpResponse.status(HttpStatus.NOT_FOUND);
}
}
|
@Test
void delete() {
Execution result = triggerInputsFlowExecution(true);
var response = client.toBlocking().exchange(HttpRequest.DELETE("/api/v1/executions/" + result.getId()));
assertThat(response.getStatus(), is(HttpStatus.NO_CONTENT));
var notFound = assertThrows(HttpClientResponseException.class, () -> client.toBlocking().exchange(HttpRequest.DELETE("/api/v1/executions/notfound")));
assertThat(notFound.getStatus(), is(HttpStatus.NOT_FOUND));
}
|
@Override
public <R> R run(Action<R, C, E> action) throws E, InterruptedException {
return run(action, retryByDefault);
}
|
@Test
public void testNoRetryingNonRetryableException() {
try (MockClientPoolImpl mockClientPool =
new MockClientPoolImpl(2, RetryableException.class, true, 3)) {
assertThatThrownBy(() -> mockClientPool.run(MockClient::failWithNonRetryable, true))
.isInstanceOf(NonRetryableException.class);
assertThat(mockClientPool.reconnectionAttempts()).isEqualTo(0);
}
}
|
public static VersionRange parse(String rangeString) {
validateRangeString(rangeString);
Inclusiveness minVersionInclusiveness =
rangeString.startsWith("[") ? Inclusiveness.INCLUSIVE : Inclusiveness.EXCLUSIVE;
Inclusiveness maxVersionInclusiveness =
rangeString.endsWith("]") ? Inclusiveness.INCLUSIVE : Inclusiveness.EXCLUSIVE;
int commaIndex = rangeString.indexOf(',');
String minVersionString = rangeString.substring(1, commaIndex).trim();
Version minVersion;
if (minVersionString.isEmpty()) {
minVersionInclusiveness = Inclusiveness.EXCLUSIVE;
minVersion = Version.minimum();
} else {
minVersion = Version.fromString(minVersionString);
}
String maxVersionString =
rangeString.substring(commaIndex + 1, rangeString.length() - 1).trim();
Version maxVersion;
if (maxVersionString.isEmpty()) {
maxVersionInclusiveness = Inclusiveness.EXCLUSIVE;
maxVersion = Version.maximum();
} else {
maxVersion = Version.fromString(maxVersionString);
}
if (!minVersion.isLessThan(maxVersion)) {
throw new IllegalArgumentException(
String.format(
"Min version in range must be less than max version in range, got '%s'",
rangeString));
}
return builder()
.setMinVersion(minVersion)
.setMinVersionInclusiveness(minVersionInclusiveness)
.setMaxVersion(maxVersion)
.setMaxVersionInclusiveness(maxVersionInclusiveness)
.build();
}
|
@Test
public void parse_withEmptyRangeString_throwsIllegalArgumentException() {
IllegalArgumentException exception =
assertThrows(IllegalArgumentException.class, () -> VersionRange.parse(""));
assertThat(exception).hasMessageThat().isEqualTo("Range string cannot be empty.");
}
|
@Override
public void abortLm(MdId mdName, MaIdShort maName, MepId mepId)
throws CfmConfigException {
throw new UnsupportedOperationException("Not yet implemented");
}
|
@Test
public void testAbortAllLmOnMep() throws CfmConfigException {
//TODO: Implement underlying method
try {
soamManager.abortLm(MDNAME1, MANAME1, MEPID1);
fail("Expecting UnsupportedOperationException");
} catch (UnsupportedOperationException e) {
}
}
|
public static File openFile(String path, String fileName) {
return openFile(path, fileName, false);
}
|
@Test
void testOpenFileWithPath() {
File file = DiskUtils.openFile(testFile.getParent(), testFile.getName(), false);
assertNotNull(file);
assertEquals(testFile.getPath(), file.getPath());
assertEquals(testFile.getName(), file.getName());
}
|
static ImmutableList<PushImageStep> makeListForManifestList(
BuildContext buildContext,
ProgressEventDispatcher.Factory progressEventDispatcherFactory,
RegistryClient registryClient,
ManifestTemplate manifestList,
boolean manifestListAlreadyExists)
throws IOException {
Set<String> tags = buildContext.getAllTargetImageTags();
EventHandlers eventHandlers = buildContext.getEventHandlers();
try (TimerEventDispatcher ignored =
new TimerEventDispatcher(eventHandlers, "Preparing manifest list pushers");
ProgressEventDispatcher progressEventDispatcher =
progressEventDispatcherFactory.create("launching manifest list pushers", tags.size())) {
boolean singlePlatform = buildContext.getContainerConfiguration().getPlatforms().size() == 1;
if (singlePlatform) {
return ImmutableList.of(); // single image; no need to push a manifest list
}
if (JibSystemProperties.skipExistingImages() && manifestListAlreadyExists) {
eventHandlers.dispatch(LogEvent.info("Skipping pushing manifest list; already exists."));
return ImmutableList.of();
}
DescriptorDigest manifestListDigest = Digests.computeJsonDigest(manifestList);
return tags.stream()
.map(
tag ->
new PushImageStep(
buildContext,
progressEventDispatcher.newChildProducer(),
registryClient,
manifestList,
tag,
manifestListDigest,
// TODO: a manifest list digest isn't an "image id". Figure out the right
// return value and type.
manifestListDigest))
.collect(ImmutableList.toImmutableList());
}
}
|
@Test
public void testMakeListForManifestList_manifestListAlreadyExists() throws IOException {
System.setProperty(JibSystemProperties.SKIP_EXISTING_IMAGES, "true");
List<PushImageStep> pushImageStepList =
PushImageStep.makeListForManifestList(
buildContext, progressDispatcherFactory, registryClient, manifestList, true);
assertThat(pushImageStepList).isEmpty();
}
|
Map<Path, Set<Integer>> changedLines() {
return tracker.changedLines();
}
|
@Test
public void count_multiple_added_lines() throws IOException {
String example = "Index: sample1\n"
+ "===================================================================\n"
+ "--- a/sample1\n"
+ "+++ b/sample1\n"
+ "@@ -1 +1,3 @@\n"
+ " same line\n"
+ "+added line 1\n"
+ "+added line 2\n";
printDiff(example);
assertThat(underTest.changedLines()).isEqualTo(Collections.singletonMap(rootBaseDir.resolve("sample1"), new HashSet<>(Arrays.asList(2, 3))));
}
|
public static DataSource createDataSource(final File yamlFile) throws SQLException, IOException {
YamlJDBCConfiguration rootConfig = YamlEngine.unmarshal(yamlFile, YamlJDBCConfiguration.class);
return createDataSource(new YamlDataSourceConfigurationSwapper().swapToDataSources(rootConfig.getDataSources()), rootConfig);
}
|
@Test
void assertCreateDataSourceWithBytes() throws SQLException, IOException {
assertDataSource(YamlShardingSphereDataSourceFactory.createDataSource(readFile(getYamlFileUrl()).getBytes()));
}
|
public List<Expr> getPartitionExprs(Map<ColumnId, Column> idToColumn) {
List<Expr> result = new ArrayList<>(partitionExprs.size());
for (ColumnIdExpr columnIdExpr : partitionExprs) {
result.add(columnIdExpr.convertToColumnNameExpr(idToColumn));
}
return result;
}
|
@Test
public void testExpressionRangePartitionInfoSerialized_FunctionExpr() throws Exception {
ConnectContext ctx = starRocksAssert.getCtx();
String createSQL = "CREATE TABLE table_hitcount (\n" +
"databaseName varchar(200) NULL COMMENT \"\",\n" +
"tableName varchar(200) NULL COMMENT \"\",\n" +
"queryTime varchar(50) NULL COMMENT \"\",\n" +
"queryId varchar(50) NULL COMMENT \"\",\n" +
"partitionHitSum int(11) NULL COMMENT \"\",\n" +
"partitionSum int(11) NULL COMMENT \"\",\n" +
"tabletHitNum int(11) NULL COMMENT \"\",\n" +
"tabletSum int(11) NULL COMMENT \"\",\n" +
"startHitPartition varchar(20) NULL COMMENT \"\",\n" +
"dt date NULL COMMENT \"\",\n" +
"clusterAddress varchar(50) NULL COMMENT \"\",\n" +
"costTime int(11) NULL COMMENT \"\",\n" +
"tableQueryCount int(11) NULL COMMENT \"\"\n" +
") ENGINE=OLAP\n" +
"DUPLICATE KEY(databaseName, tableName)\n" +
"COMMENT \"OLAP\"\n" +
"PARTITION BY date_trunc('day', dt)\n" +
"DISTRIBUTED BY HASH(databaseName) BUCKETS 1\n" +
"PROPERTIES (\n" +
"\"replication_num\" = \"1\",\n" +
"\"in_memory\" = \"false\",\n" +
"\"enable_persistent_index\" = \"false\",\n" +
"\"replicated_storage\" = \"true\",\n" +
"\"compression\" = \"LZ4\"\n" +
");";
CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx);
StarRocksAssert.utCreateTableWithRetry(createTableStmt);
Database db = GlobalStateMgr.getCurrentState().getDb("test");
Table table = db.getTable("table_hitcount");
// serialize
String json = GsonUtils.GSON.toJson(table);
// deserialize
OlapTable readTable = GsonUtils.GSON.fromJson(json, OlapTable.class);
ExpressionRangePartitionInfo expressionRangePartitionInfo = (ExpressionRangePartitionInfo) readTable.getPartitionInfo();
List<Expr> readPartitionExprs = expressionRangePartitionInfo.getPartitionExprs(readTable.getIdToColumn());
Function fn = readPartitionExprs.get(0).getFn();
Assert.assertNotNull(fn);
starRocksAssert.dropTable("table_hitcount");
}
|
@Override
public KubevirtNode removeNode(String hostname) {
checkArgument(!Strings.isNullOrEmpty(hostname), ERR_NULL_HOSTNAME);
KubevirtNode node = nodeStore.removeNode(hostname);
log.info(String.format(MSG_NODE, hostname, MSG_REMOVED));
return node;
}
|
@Test(expected = IllegalArgumentException.class)
public void testRemoveNullNode() {
target.removeNode(null);
}
|
public SubsetItem getClientsSubset(String serviceName,
int minClusterSubsetSize,
int partitionId,
Map<URI, Double> possibleUris,
long version,
SimpleLoadBalancerState state)
{
SubsettingStrategy<URI> subsettingStrategy = _subsettingStrategyFactory.get(serviceName, minClusterSubsetSize, partitionId);
if (subsettingStrategy == null)
{
return new SubsetItem(false, false, possibleUris, Collections.emptySet());
}
DeterministicSubsettingMetadata metadata = _subsettingMetadataProvider.getSubsettingMetadata(state);
if (metadata == null)
{
return new SubsetItem(false, false, possibleUris, Collections.emptySet());
}
synchronized (_lockMap.computeIfAbsent(serviceName, name -> new Object()))
{
SubsetCache subsetCache = _subsetCache.get(serviceName);
if (isCacheValid(version, metadata.getPeerClusterVersion(), minClusterSubsetSize, subsetCache))
{
if (subsetCache.getWeightedSubsets().containsKey(partitionId))
{
return new SubsetItem(true, false, subsetCache.getWeightedSubsets().get(partitionId), Collections.emptySet());
}
}
Map<URI, Double> subsetMap = subsettingStrategy.getWeightedSubset(possibleUris, metadata);
if (subsetMap == null)
{
return new SubsetItem(false, false, possibleUris, Collections.emptySet());
}
else
{
LOG.debug("Force updating subset cache for service " + serviceName);
Set<URI> doNotSlowStartUris = new HashSet<>();
if (subsetCache != null)
{
Set<URI> oldPossibleUris = subsetCache.getPossibleUris().getOrDefault(partitionId, Collections.emptySet());
for (URI uri : subsetMap.keySet())
{
if (oldPossibleUris.contains(uri))
{
doNotSlowStartUris.add(uri);
}
}
subsetCache.setVersion(version);
subsetCache.setPeerClusterVersion(metadata.getPeerClusterVersion());
subsetCache.setMinClusterSubsetSize(minClusterSubsetSize);
subsetCache.getPossibleUris().put(partitionId, possibleUris.keySet());
subsetCache.getWeightedSubsets().put(partitionId, subsetMap);
}
else
{
LOG.info("Cluster subsetting enabled for service: " + serviceName);
Map<Integer, Set<URI>> servicePossibleUris = new HashMap<>();
Map<Integer, Map<URI, Double>> serviceWeightedSubset = new HashMap<>();
servicePossibleUris.put(partitionId, possibleUris.keySet());
serviceWeightedSubset.put(partitionId, subsetMap);
subsetCache = new SubsetCache(version, metadata.getPeerClusterVersion(),
minClusterSubsetSize, servicePossibleUris, serviceWeightedSubset);
_subsetCache.put(serviceName, subsetCache);
}
LOG.debug("Subset cache updated for service " + serviceName + ": " + subsetCache);
return new SubsetItem(true, true, subsetMap, doNotSlowStartUris);
}
}
}
|
@Test
public void testMultiThreadCase() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(THREAD_NUM * 3);
Mockito.when(_subsettingMetadataProvider.getSubsettingMetadata(_state))
.thenReturn(new DeterministicSubsettingMetadata(0, 5, 0));
for (int i = 0; i < THREAD_NUM; i++)
{
new Thread(() ->
{
SubsettingState.SubsetItem subsetItem = _subsettingState.getClientsSubset("test", 4, PARTITION_ID,
createUris(30), 0, _state);
verifySubset(subsetItem.getWeightedUriSubset().size(), 6);
latch.countDown();
}).start();
}
Thread.sleep(500);
Mockito.when(_subsettingMetadataProvider.getSubsettingMetadata(_state))
.thenReturn(new DeterministicSubsettingMetadata(0, 4, 1));
for (int i = 0; i < THREAD_NUM; i++)
{
new Thread(() ->
{
SubsettingState.SubsetItem subsetItem = _subsettingState.getClientsSubset("test", 4, PARTITION_ID,
createUris(30), 0, _state);
verifySubset(subsetItem.getWeightedUriSubset().size(), 8);
latch.countDown();
}).start();
}
Thread.sleep(500);
for (int i = 0; i < THREAD_NUM; i++)
{
new Thread(() ->
{
SubsettingState.SubsetItem subsetItem = _subsettingState.getClientsSubset("test", 4, PARTITION_ID,
createUris(28), 2, _state);
verifySubset(subsetItem.getWeightedUriSubset().size(), 7);
latch.countDown();
}).start();
}
if (!latch.await(5, TimeUnit.SECONDS))
{
fail("subsetting update failed to finish within 5 seconds");
}
if (_failure.get() != null)
throw _failure.get();
}
|
public static void convertFromLegacyTableConfig(TableConfig tableConfig) {
// It is possible that indexing as well as ingestion configs exist, in which case we always honor ingestion config.
IngestionConfig ingestionConfig = tableConfig.getIngestionConfig();
BatchIngestionConfig batchIngestionConfig =
(ingestionConfig != null) ? ingestionConfig.getBatchIngestionConfig() : null;
SegmentsValidationAndRetentionConfig validationConfig = tableConfig.getValidationConfig();
String segmentPushType = validationConfig.getSegmentPushType();
String segmentPushFrequency = validationConfig.getSegmentPushFrequency();
if (batchIngestionConfig == null) {
// Only create the config if any of the deprecated config is not null.
if (segmentPushType != null || segmentPushFrequency != null) {
batchIngestionConfig = new BatchIngestionConfig(null, segmentPushType, segmentPushFrequency);
}
} else {
// This should not happen typically, but since we are in repair mode, might as well cover this corner case.
if (batchIngestionConfig.getSegmentIngestionType() == null) {
batchIngestionConfig.setSegmentIngestionType(segmentPushType);
}
if (batchIngestionConfig.getSegmentIngestionFrequency() == null) {
batchIngestionConfig.setSegmentIngestionFrequency(segmentPushFrequency);
}
}
StreamIngestionConfig streamIngestionConfig =
(ingestionConfig != null) ? ingestionConfig.getStreamIngestionConfig() : null;
IndexingConfig indexingConfig = tableConfig.getIndexingConfig();
if (streamIngestionConfig == null) {
// Only set the new config if the deprecated one is set.
Map<String, String> streamConfigs = indexingConfig.getStreamConfigs();
if (MapUtils.isNotEmpty(streamConfigs)) {
streamIngestionConfig = new StreamIngestionConfig(Collections.singletonList(streamConfigs));
}
}
if (ingestionConfig == null) {
if (batchIngestionConfig != null || streamIngestionConfig != null) {
ingestionConfig = new IngestionConfig();
ingestionConfig.setBatchIngestionConfig(batchIngestionConfig);
ingestionConfig.setStreamIngestionConfig(streamIngestionConfig);
}
} else {
ingestionConfig.setBatchIngestionConfig(batchIngestionConfig);
ingestionConfig.setStreamIngestionConfig(streamIngestionConfig);
}
// Set the new config fields.
tableConfig.setIngestionConfig(ingestionConfig);
// Clear the deprecated ones.
indexingConfig.setStreamConfigs(null);
validationConfig.setSegmentPushFrequency(null);
validationConfig.setSegmentPushType(null);
}
|
@Test
public void testConvertFromLegacyTableConfig() {
String expectedPushFrequency = "HOURLY";
String expectedPushType = "APPEND";
Map<String, String> expectedStreamConfigsMap = getTestStreamConfigs();
TableConfig tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME)
.setSegmentPushFrequency(expectedPushFrequency).setSegmentPushType(expectedPushType)
.setStreamConfigs(expectedStreamConfigsMap).build();
// Before conversion, the ingestion config should be null.
Assert.assertNull(tableConfig.getIngestionConfig());
// Perform conversion.
TableConfigUtils.convertFromLegacyTableConfig(tableConfig);
// After conversion, assert that the configs are transferred ingestionConfig.
BatchIngestionConfig batchIngestionConfig = tableConfig.getIngestionConfig().getBatchIngestionConfig();
Assert.assertEquals(batchIngestionConfig.getSegmentIngestionFrequency(), expectedPushFrequency);
Assert.assertEquals(batchIngestionConfig.getSegmentIngestionType(), expectedPushType);
Map<String, String> actualStreamConfigsMap =
tableConfig.getIngestionConfig().getStreamIngestionConfig().getStreamConfigMaps().get(0);
Assert.assertEquals(actualStreamConfigsMap, expectedStreamConfigsMap);
// Assert that the deprecated fields are cleared.
Assert.assertNull(tableConfig.getIndexingConfig().getStreamConfigs());
SegmentsValidationAndRetentionConfig validationConfig = tableConfig.getValidationConfig();
Assert.assertNull(validationConfig.getSegmentPushFrequency());
Assert.assertNull(validationConfig.getSegmentPushType());
}
|
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
}
|
@Test
public void testFetchError() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NOT_LEADER_OR_FOLLOWER, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords();
assertFalse(partitionRecords.containsKey(tp0));
}
|
public static void sleep(long timeMs) {
accept(SLEEP, timeMs);
}
|
@Test
public void testSleep() {
final long start = System.currentTimeMillis();
CommonUtils.sleep(SLEEP_TIME);
final long end = System.currentTimeMillis();
// There is an error, and the error value is 20 ms before and after
assertTrue((end - start < SLEEP_TIME + DIFF) || (end - start > SLEEP_TIME - DIFF));
}
|
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
if (!(statement.getStatement() instanceof CreateSource)
&& !(statement.getStatement() instanceof CreateAsSelect)) {
return statement;
}
try {
if (statement.getStatement() instanceof CreateSource) {
final ConfiguredStatement<CreateSource> createStatement =
(ConfiguredStatement<CreateSource>) statement;
return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement);
} else {
final ConfiguredStatement<CreateAsSelect> createStatement =
(ConfiguredStatement<CreateAsSelect>) statement;
return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse(
createStatement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
}
|
@Test
public void shouldInjectForCtStatement() {
// Given:
givenKeyAndValueInferenceSupported();
// When:
final ConfiguredStatement<CreateTable> result = injector.inject(ctStatement);
// Then:
assertThat(result.getStatement().getElements(),
is(combineElements(INFERRED_KSQL_KEY_SCHEMA_TABLE, INFERRED_KSQL_VALUE_SCHEMA)));
assertThat(result.getMaskedStatementText(), is(
"CREATE TABLE `ct` ("
+ "`key` STRING PRIMARY KEY, "
+ "`intField` INTEGER, "
+ "`bigIntField` BIGINT, "
+ "`doubleField` DOUBLE, "
+ "`stringField` STRING, "
+ "`booleanField` BOOLEAN, "
+ "`arrayField` ARRAY<INTEGER>, "
+ "`mapField` MAP<STRING, BIGINT>, "
+ "`structField` STRUCT<`s0` BIGINT>, "
+ "`decimalField` DECIMAL(4, 2)) "
+ "WITH (KAFKA_TOPIC='some-topic', KEY_FORMAT='protobuf', VALUE_FORMAT='avro');"
));
}
|
public static Namespace of(String... levels) {
Preconditions.checkArgument(null != levels, "Cannot create Namespace from null array");
if (levels.length == 0) {
return empty();
}
for (String level : levels) {
Preconditions.checkNotNull(level, "Cannot create a namespace with a null level");
Preconditions.checkArgument(
!CONTAINS_NULL_CHARACTER.test(level),
"Cannot create a namespace with the null-byte character");
}
return new Namespace(levels);
}
|
@Test
public void testWithNullInLevel() {
assertThatThrownBy(() -> Namespace.of("a", null, "b"))
.isInstanceOf(NullPointerException.class)
.hasMessage("Cannot create a namespace with a null level");
}
|
public static SchemaAndValue parseString(String value) {
if (value == null) {
return NULL_SCHEMA_AND_VALUE;
}
if (value.isEmpty()) {
return new SchemaAndValue(Schema.STRING_SCHEMA, value);
}
ValueParser parser = new ValueParser(new Parser(value));
return parser.parse(false);
}
|
@Test
public void shouldParseNestedArray() {
SchemaAndValue schemaAndValue = Values.parseString("[[]]");
assertEquals(Type.ARRAY, schemaAndValue.schema().type());
assertEquals(Type.ARRAY, schemaAndValue.schema().valueSchema().type());
}
|
public static List<WeightedHostAddress> prioritize(WeightedHostAddress[] records) {
final List<WeightedHostAddress> result = new LinkedList<>();
// sort by priority (ascending)
SortedMap<Integer, Set<WeightedHostAddress>> byPriority = new TreeMap<>();
for(final WeightedHostAddress record : records) {
if (byPriority.containsKey(record.getPriority())) {
byPriority.get(record.getPriority()).add(record);
} else {
final Set<WeightedHostAddress> set = new HashSet<>();
set.add(record);
byPriority.put(record.getPriority(), set);
}
}
// now, randomize each priority set by weight.
for(Map.Entry<Integer, Set<WeightedHostAddress>> weights : byPriority.entrySet()) {
List<WeightedHostAddress> zeroWeights = new LinkedList<>();
int totalWeight = 0;
final Iterator<WeightedHostAddress> i = weights.getValue().iterator();
while (i.hasNext()) {
final WeightedHostAddress next = i.next();
if (next.weight == 0) {
// set aside, as these should be considered last according to the RFC.
zeroWeights.add(next);
i.remove();
continue;
}
totalWeight += next.getWeight();
}
int iterationWeight = totalWeight;
Iterator<WeightedHostAddress> iter = weights.getValue().iterator();
while (iter.hasNext()) {
int needle = new Random().nextInt(iterationWeight);
while (true) {
final WeightedHostAddress record = iter.next();
needle -= record.getWeight();
if (needle <= 0) {
result.add(record);
iter.remove();
iterationWeight -= record.getWeight();
break;
}
}
iter = weights.getValue().iterator();
}
// finally, append the hosts with zero priority (shuffled)
Collections.shuffle(zeroWeights);
result.addAll(zeroWeights);
}
return result;
}
|
@Test
public void testOneHostZeroWeight() throws Exception {
// setup
final DNSUtil.WeightedHostAddress host = new DNSUtil.WeightedHostAddress("host", 5222, false, 1, 0);
// do magic
final List<DNSUtil.WeightedHostAddress> result = DNSUtil.prioritize(new DNSUtil.WeightedHostAddress[]{host});
// verify
assertEquals(1, result.size());
assertEquals(host, result.get(0));
}
|
static String getPoolNameFromPodName(String clusterName, String podName) {
return podName.substring(clusterName.length() + 1, podName.lastIndexOf("-"));
}
|
@Test
public void testPoolNameFromPodName() {
assertThat(ReconcilerUtils.getPoolNameFromPodName("my-cluster", "my-cluster-brokers-2"), is("brokers"));
assertThat(ReconcilerUtils.getPoolNameFromPodName("my-cluster", "my-cluster-new-brokers-2"), is("new-brokers"));
assertThat(ReconcilerUtils.getPoolNameFromPodName("my-cluster", "my-cluster-brokers2-2"), is("brokers2"));
}
|
@Override
public Connection connect(String url, Properties info) throws SQLException {
// calciteConnection is initialized with an empty Beam schema,
// we need to populate it with pipeline options, load table providers, etc
return JdbcConnection.initialize((CalciteConnection) super.connect(url, info));
}
|
@Test
public void testSelectsFromExistingComplexTable() throws Exception {
TestTableProvider tableProvider = new TestTableProvider();
Connection connection = JdbcDriver.connect(tableProvider, PipelineOptionsFactory.create());
connection
.createStatement()
.executeUpdate(
"CREATE EXTERNAL TABLE person ( \n"
+ "description VARCHAR, \n"
+ "nestedRow ROW< \n"
+ " id BIGINT, \n"
+ " name VARCHAR> \n"
+ ") \n"
+ "TYPE 'test'");
tableProvider.addRows(
"person",
row(COMPLEX_SCHEMA, "description1", row(1L, "aaa")),
row(COMPLEX_SCHEMA, "description2", row(2L, "bbb")));
ResultSet selectResult =
connection
.createStatement()
.executeQuery("SELECT person.nestedRow.id, person.nestedRow.name FROM person");
List<Row> resultRows =
readResultSet(selectResult).stream()
.map(values -> values.stream().collect(toRow(BASIC_SCHEMA)))
.collect(Collectors.toList());
assertThat(resultRows, containsInAnyOrder(row(1L, "aaa"), row(2L, "bbb")));
}
|
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
writeToOutput(out);
}
|
@Test
public void testWrite() throws IOException {
// create a mock for DataOutput that will be used in the write method
// this way we can capture and verify if correct arguments were passed
DataOutput out = mock(DataOutput.class);
// register expected method calls for void functions
// so that we can verify what was called after the method call finishes
doNothing().when(out).writeByte(anyInt());
doNothing().when(out).writeInt(anyInt());
doNothing().when(out).write(any(byte[].class), anyInt(), anyInt());
doNothing().when(out).write(any(byte[].class));
// call the method we want to test with the mocked input
split.write(out);
// verify the method calls on the mocked object in the order of the calls
InOrder inorder = inOrder(out);
inorder.verify(out, times(1)).writeByte(eq(fileSplitName.length()));
inorder.verify(out, times(1)).write(aryEq(Text.encode(fileSplitName).array()), eq(0), eq(fileSplitName.length()));
inorder.verify(out, times(1)).writeInt(eq(basePath.length()));
inorder.verify(out, times(1)).write(aryEq(getUTF8Bytes(basePath)));
inorder.verify(out, times(1)).writeInt(eq(maxCommitTime.length()));
inorder.verify(out, times(1)).write(aryEq(getUTF8Bytes(maxCommitTime)));
inorder.verify(out, times(1)).writeInt(eq(deltaLogPaths.size()));
inorder.verify(out, times(1)).writeInt(eq(deltaLogPaths.get(0).length()));
inorder.verify(out, times(1)).write(aryEq(getUTF8Bytes(deltaLogPaths.get(0))));
inorder.verify(out, times(1)).writeBoolean(false);
// verify there are no more interactions happened on the mocked object
inorder.verifyNoMoreInteractions();
}
|
@Override
public CiConfiguration loadConfiguration() {
String revision = system.envVariable("BUILDKITE_COMMIT");
return new CiConfigurationImpl(revision, getName());
}
|
@Test
public void loadConfiguration() {
setEnvVariable("CI", "true");
setEnvVariable("BUILDKITE", "true");
setEnvVariable("BUILDKITE_COMMIT", "abd12fc");
assertThat(underTest.loadConfiguration().getScmRevision()).hasValue("abd12fc");
}
|
@Override
public String toEncrypted(final Session<?> session, final String directoryId, final String filename, final EnumSet<Path.Type> type) throws BackgroundException {
final String ciphertextName = cryptomator.getFileNameCryptor().encryptFilename(BaseEncoding.base64Url(),
filename, directoryId.getBytes(StandardCharsets.UTF_8)) + EXTENSION_REGULAR;
if(log.isDebugEnabled()) {
log.debug(String.format("Encrypted filename %s to %s", filename, ciphertextName));
}
return cryptomator.getFilenameProvider().deflate(session, ciphertextName);
}
|
@Test(expected = NotfoundException.class)
public void testToEncryptedInvalidArgument() throws Exception {
final Path home = new Path("/vault", EnumSet.of(Path.Type.directory));
final CryptoVault vault = new CryptoVault(home);
final CryptoDirectory provider = new CryptoDirectoryV7Provider(home, vault);
provider.toEncrypted(new NullSession(new Host(new TestProtocol())), null, new Path("/vault/f", EnumSet.of(Path.Type.file)));
}
|
public static void createNewFile(String filePath) throws IOException {
File file = new File(filePath);
if (file.exists()) {
file.delete();
}
if (!file.getParentFile().exists()) {
createParentFile(file);
}
file.createNewFile();
}
|
@Test
public void createNewFile() throws IOException {
// create new file
FileUtils.createNewFile("/tmp/test.txt");
Assertions.assertEquals("", FileUtils.readFileToStr(Paths.get("/tmp/test.txt")));
// delete exist file and create new file
FileUtils.writeStringToFile("/tmp/test2.txt", "test");
Path test2 = Paths.get("/tmp/test2.txt");
Assertions.assertEquals("test", FileUtils.readFileToStr(test2).trim());
FileUtils.createNewFile("/tmp/test2.txt");
Assertions.assertEquals("", FileUtils.readFileToStr(test2));
// create new file with not exist folder
FileUtils.createNewFile("/tmp/newfolder/test.txt");
Assertions.assertEquals("", FileUtils.readFileToStr(Paths.get("/tmp/newfolder/test.txt")));
FileUtils.createNewFile("/tmp/newfolder/newfolder2/newfolde3/test.txt");
Assertions.assertEquals(
"",
FileUtils.readFileToStr(Paths.get("/tmp/newfolder/newfolder2/newfolde3/test.txt")));
}
|
public static String getTaskManagerShellCommand(
org.apache.flink.configuration.Configuration flinkConfig,
ContaineredTaskManagerParameters tmParams,
String configDirectory,
String logDirectory,
boolean hasLogback,
boolean hasLog4j,
boolean hasKrb5,
Class<?> mainClass,
String mainArgs) {
final Map<String, String> startCommandValues = new HashMap<>();
startCommandValues.put("java", "$JAVA_HOME/bin/java");
final TaskExecutorProcessSpec taskExecutorProcessSpec =
tmParams.getTaskExecutorProcessSpec();
startCommandValues.put(
"jvmmem", ProcessMemoryUtils.generateJvmParametersStr(taskExecutorProcessSpec));
List<ConfigOption<String>> jvmOptions =
Arrays.asList(
CoreOptions.FLINK_DEFAULT_JVM_OPTIONS,
CoreOptions.FLINK_JVM_OPTIONS,
CoreOptions.FLINK_DEFAULT_TM_JVM_OPTIONS,
CoreOptions.FLINK_TM_JVM_OPTIONS);
startCommandValues.put("jvmopts", generateJvmOptsString(flinkConfig, jvmOptions, hasKrb5));
String logging = "";
if (hasLogback || hasLog4j) {
logging = "-Dlog.file=" + logDirectory + "/taskmanager.log";
if (hasLogback) {
logging += " -Dlogback.configurationFile=file:" + configDirectory + "/logback.xml";
}
if (hasLog4j) {
logging += " -Dlog4j.configuration=file:" + configDirectory + "/log4j.properties";
logging +=
" -Dlog4j.configurationFile=file:" + configDirectory + "/log4j.properties";
}
}
startCommandValues.put("logging", logging);
startCommandValues.put("class", mainClass.getName());
startCommandValues.put(
"redirects",
"1> "
+ logDirectory
+ "/taskmanager.out "
+ "2> "
+ logDirectory
+ "/taskmanager.err");
String argsStr =
TaskExecutorProcessUtils.generateDynamicConfigsStr(taskExecutorProcessSpec)
+ " --configDir "
+ configDirectory;
if (!mainArgs.isEmpty()) {
argsStr += " " + mainArgs;
}
startCommandValues.put("args", argsStr);
final String commandTemplate = flinkConfig.get(YARN_CONTAINER_START_COMMAND_TEMPLATE);
String startCommand = getStartCommand(commandTemplate, startCommandValues);
LOG.debug("TaskManager start command: " + startCommand);
return startCommand;
}
|
@Test
void testGetTaskManagerShellCommand() {
final Configuration cfg = new Configuration();
final TaskExecutorProcessSpec taskExecutorProcessSpec =
new TaskExecutorProcessSpec(
new CPUResource(1.0),
new MemorySize(0), // frameworkHeapSize
new MemorySize(0), // frameworkOffHeapSize
new MemorySize(111), // taskHeapSize
new MemorySize(0), // taskOffHeapSize
new MemorySize(222), // networkMemSize
new MemorySize(0), // managedMemorySize
new MemorySize(333), // jvmMetaspaceSize
new MemorySize(0), // jvmOverheadSize
Collections.emptyList());
final ContaineredTaskManagerParameters containeredParams =
new ContaineredTaskManagerParameters(taskExecutorProcessSpec, new HashMap<>());
// no logging, with/out krb5
final String java = "$JAVA_HOME/bin/java";
final String jvmmem =
"-Xmx111 -Xms111 -XX:MaxDirectMemorySize=222 -XX:MaxMetaspaceSize=333";
final String defaultJvmOpts = "-DdefaultJvm"; // if set
final String jvmOpts = "-Djvm"; // if set
final String defaultTmJvmOpts = "-DdefaultTmJvm"; // if set
final String tmJvmOpts = "-DtmJvm"; // if set
final String logfile = "-Dlog.file=./logs/taskmanager.log"; // if set
final String logback = "-Dlogback.configurationFile=file:./conf/logback.xml"; // if set
final String log4j =
"-Dlog4j.configuration=file:./conf/log4j.properties"
+ " -Dlog4j.configurationFile=file:./conf/log4j.properties"; // if set
final String mainClass = "org.apache.flink.yarn.UtilsTest";
final String dynamicConfigs =
TaskExecutorProcessUtils.generateDynamicConfigsStr(taskExecutorProcessSpec).trim();
final String basicArgs = "--configDir ./conf";
final String mainArgs = "-Djobmanager.rpc.address=host1 -Dkey.a=v1";
final String args = dynamicConfigs + " " + basicArgs + " " + mainArgs;
final String redirects = "1> ./logs/taskmanager.out 2> ./logs/taskmanager.err";
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
false,
false,
false,
this.getClass(),
""))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
mainClass,
dynamicConfigs,
basicArgs,
redirects));
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
false,
false,
false,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
mainClass,
args,
redirects));
final String krb5 = "-Djava.security.krb5.conf=krb5.conf";
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
false,
false,
true,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
krb5,
mainClass,
args,
redirects));
// logback only, with/out krb5
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
true,
false,
false,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
logfile,
logback,
mainClass,
args,
redirects));
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
true,
false,
true,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
krb5,
logfile,
logback,
mainClass,
args,
redirects));
// log4j, with/out krb5
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
false,
true,
false,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
logfile,
log4j,
mainClass,
args,
redirects));
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
false,
true,
true,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
krb5,
logfile,
log4j,
mainClass,
args,
redirects));
// logback + log4j, with/out krb5
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
true,
true,
false,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
logfile,
logback,
log4j,
mainClass,
args,
redirects));
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
true,
true,
true,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
krb5,
logfile,
logback,
log4j,
mainClass,
args,
redirects));
// logback + log4j, with/out krb5, different JVM opts
cfg.set(CoreOptions.FLINK_DEFAULT_JVM_OPTIONS, defaultJvmOpts);
cfg.set(CoreOptions.FLINK_JVM_OPTIONS, jvmOpts);
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
true,
true,
false,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
defaultJvmOpts,
jvmOpts,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
logfile,
logback,
log4j,
mainClass,
args,
redirects));
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
true,
true,
true,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
defaultJvmOpts,
jvmOpts,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
krb5,
logfile,
logback,
log4j,
mainClass,
args,
redirects));
// logback + log4j, with/out krb5, different JVM opts
cfg.set(CoreOptions.FLINK_DEFAULT_TM_JVM_OPTIONS, defaultTmJvmOpts);
cfg.set(CoreOptions.FLINK_TM_JVM_OPTIONS, tmJvmOpts);
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
true,
true,
false,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
defaultJvmOpts,
jvmOpts,
defaultTmJvmOpts,
tmJvmOpts,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
logfile,
logback,
log4j,
mainClass,
args,
redirects));
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
true,
true,
true,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
jvmmem,
defaultJvmOpts,
jvmOpts,
defaultTmJvmOpts,
tmJvmOpts,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
krb5,
logfile,
logback,
log4j,
mainClass,
args,
redirects));
// now try some configurations with different yarn.container-start-command-template
cfg.set(
YARN_CONTAINER_START_COMMAND_TEMPLATE,
"%java% 1 %jvmmem% 2 %jvmopts% 3 %logging% 4 %class% 5 %args% 6 %redirects%");
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
true,
true,
true,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
"1",
jvmmem,
"2",
defaultJvmOpts,
jvmOpts,
defaultTmJvmOpts,
tmJvmOpts,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
krb5,
"3",
logfile,
logback,
log4j,
"4",
mainClass,
"5",
args,
"6",
redirects));
cfg.set(
YARN_CONTAINER_START_COMMAND_TEMPLATE,
"%java% %logging% %jvmopts% %jvmmem% %class% %args% %redirects%");
assertThat(
Utils.getTaskManagerShellCommand(
cfg,
containeredParams,
"./conf",
"./logs",
true,
true,
true,
this.getClass(),
mainArgs))
.isEqualTo(
String.join(
" ",
java,
logfile,
logback,
log4j,
defaultJvmOpts,
jvmOpts,
defaultTmJvmOpts,
tmJvmOpts,
Utils.IGNORE_UNRECOGNIZED_VM_OPTIONS,
krb5,
jvmmem,
mainClass,
args,
redirects));
}
|
@Override
public CompletableFuture<Collection<TaskManagerLocation>> getPreferredLocations(
final ExecutionVertexID executionVertexId,
final Set<ExecutionVertexID> producersToIgnore) {
checkNotNull(executionVertexId);
checkNotNull(producersToIgnore);
final Collection<TaskManagerLocation> preferredLocationsBasedOnState =
getPreferredLocationsBasedOnState(executionVertexId);
if (!preferredLocationsBasedOnState.isEmpty()) {
return CompletableFuture.completedFuture(preferredLocationsBasedOnState);
}
return getPreferredLocationsBasedOnInputs(executionVertexId, producersToIgnore);
}
|
@Test
void testInputLocationsChoosesInputOfFewerLocations() {
final TestingInputsLocationsRetriever.Builder locationRetrieverBuilder =
new TestingInputsLocationsRetriever.Builder();
final ExecutionVertexID consumerId = new ExecutionVertexID(new JobVertexID(), 0);
int parallelism1 = 3;
final JobVertexID jobVertexId1 = new JobVertexID();
final List<ExecutionVertexID> producers1 = new ArrayList<>(parallelism1);
for (int i = 0; i < parallelism1; i++) {
final ExecutionVertexID producerId = new ExecutionVertexID(jobVertexId1, i);
producers1.add(producerId);
}
locationRetrieverBuilder.connectConsumerToProducers(consumerId, producers1);
final JobVertexID jobVertexId2 = new JobVertexID();
int parallelism2 = 5;
final List<ExecutionVertexID> producers2 = new ArrayList<>(parallelism2);
for (int i = 0; i < parallelism2; i++) {
final ExecutionVertexID producerId = new ExecutionVertexID(jobVertexId2, i);
producers2.add(producerId);
}
locationRetrieverBuilder.connectConsumerToProducers(consumerId, producers2);
final TestingInputsLocationsRetriever inputsLocationsRetriever =
locationRetrieverBuilder.build();
final List<TaskManagerLocation> expectedLocations = new ArrayList<>(parallelism1);
for (int i = 0; i < parallelism1; i++) {
inputsLocationsRetriever.assignTaskManagerLocation(producers1.get(i));
expectedLocations.add(
inputsLocationsRetriever
.getTaskManagerLocation(producers1.get(i))
.get()
.getNow(null));
}
for (int i = 0; i < parallelism2; i++) {
inputsLocationsRetriever.assignTaskManagerLocation(producers2.get(i));
}
final PreferredLocationsRetriever locationsRetriever =
new DefaultPreferredLocationsRetriever(
id -> Optional.empty(), inputsLocationsRetriever);
final CompletableFuture<Collection<TaskManagerLocation>> preferredLocations =
locationsRetriever.getPreferredLocations(consumerId, Collections.emptySet());
assertThat(preferredLocations.getNow(null))
.containsExactlyInAnyOrderElementsOf(expectedLocations);
}
|
public static int checkPositive(int i, String name) {
if (i <= INT_ZERO) {
throw new IllegalArgumentException(name + " : " + i + " (expected: > 0)");
}
return i;
}
|
@Test
public void testCheckPositiveFloatString() {
Exception actualEx = null;
try {
ObjectUtil.checkPositive(POS_ONE_FLOAT, NUM_POS_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNull(actualEx, TEST_RESULT_NULLEX_NOK);
actualEx = null;
try {
ObjectUtil.checkPositive(ZERO_FLOAT, NUM_ZERO_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNotNull(actualEx, TEST_RESULT_NULLEX_OK);
assertTrue(actualEx instanceof IllegalArgumentException, TEST_RESULT_EXTYPE_NOK);
actualEx = null;
try {
ObjectUtil.checkPositive(NEG_ONE_FLOAT, NUM_NEG_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNotNull(actualEx, TEST_RESULT_NULLEX_OK);
assertTrue(actualEx instanceof IllegalArgumentException, TEST_RESULT_EXTYPE_NOK);
}
|
public static Class findClassByName(String className) {
try {
return Class.forName(className);
} catch (Exception e) {
throw new NacosRuntimeException(SERVER_ERROR, "this class name not found");
}
}
|
@Test
void testFindClassByName2() {
assertThrows(NacosRuntimeException.class, () -> {
ClassUtils.findClassByName("not.exist.Class");
});
}
|
@Override
public void putAll(Map<String, ?> vars) {
throw new UnsupportedOperationException();
}
|
@Test
public void testPutAllJMeterVariables() {
assertThrowsUnsupportedOperation(
() -> unmodifiables.putAll(vars));
}
|
public Optional<UpdateCenter> getUpdateCenter() {
return getUpdateCenter(false);
}
|
@Test
public void cache_data() throws Exception {
when(reader.readString(new URI(URL_DEFAULT_VALUE), StandardCharsets.UTF_8)).thenReturn("sonar.versions=2.2,2.3");
underTest.getUpdateCenter();
underTest.getUpdateCenter();
verify(reader, times(1)).readString(new URI(URL_DEFAULT_VALUE), StandardCharsets.UTF_8);
}
|
@Nullable
String updateMessage(final String message)
{
final String[] messageWords = WHITESPACE_REGEXP.split(message);
boolean editedMessage = false;
for (int i = 0; i < messageWords.length; i++)
{
// Remove tags except for <lt> and <gt>
final String trigger = Text.removeFormattingTags(messageWords[i]);
final Emoji emoji = Emoji.getEmoji(trigger);
if (emoji == null)
{
continue;
}
final int emojiId = iconIds[emoji.ordinal()];
messageWords[i] = messageWords[i].replace(trigger, "<img=" + chatIconManager.chatIconIndex(emojiId) + ">");
editedMessage = true;
}
// If we haven't edited the message any, don't update it.
if (!editedMessage)
{
return null;
}
return String.join(" ", messageWords);
}
|
@Test
public void testEmojiUpdateMessage()
{
String PARTY_POPPER = "<img=" + Emoji.getEmoji("@@@").ordinal() + '>';
String OPEN_MOUTH = "<img=" + Emoji.getEmoji(":O").ordinal() + '>';
assertNull(emojiPlugin.updateMessage("@@@@@"));
assertEquals(PARTY_POPPER, emojiPlugin.updateMessage("@@@"));
assertEquals(PARTY_POPPER + ' ' + PARTY_POPPER, emojiPlugin.updateMessage("@@@ @@@"));
assertEquals(PARTY_POPPER + ' ' + OPEN_MOUTH, emojiPlugin.updateMessage("@@@\u00A0:O"));
assertEquals(PARTY_POPPER + ' ' + OPEN_MOUTH + ' ' + PARTY_POPPER, emojiPlugin.updateMessage("@@@\u00A0:O @@@"));
assertEquals(PARTY_POPPER + " Hello World " + PARTY_POPPER, emojiPlugin.updateMessage("@@@\u00A0Hello World\u00A0@@@"));
}
|
public List<Favorite> search(String userId, String appId, Pageable page) {
boolean isUserIdEmpty = Strings.isNullOrEmpty(userId);
boolean isAppIdEmpty = Strings.isNullOrEmpty(appId);
if (isAppIdEmpty && isUserIdEmpty) {
throw new BadRequestException("user id and app id can't be empty at the same time");
}
if (!isUserIdEmpty) {
UserInfo loginUser = userInfoHolder.getUser();
//user can only search his own favorite app
if (!Objects.equals(loginUser.getUserId(), userId)) {
userId = loginUser.getUserId();
}
}
//search by userId
if (isAppIdEmpty && !isUserIdEmpty) {
return favoriteRepository.findByUserIdOrderByPositionAscDataChangeCreatedTimeAsc(userId, page);
}
//search by appId
if (!isAppIdEmpty && isUserIdEmpty) {
return favoriteRepository.findByAppIdOrderByPositionAscDataChangeCreatedTimeAsc(appId, page);
}
//search by userId and appId
return Collections.singletonList(favoriteRepository.findByUserIdAndAppId(userId, appId));
}
|
@Test(expected = BadRequestException.class)
@Sql(scripts = "/sql/favorites/favorites.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD)
@Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD)
public void testSearchWithErrorParams() {
favoriteService.search(null, null, PageRequest.of(0, 10));
}
|
@Override
public void execute(ComputationStep.Context context) {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository)
.buildFor(List.of(duplicationFormula)))
.visit(treeRootHolder.getRoot());
}
|
@Test
public void compute_duplicated_lines_counts_lines_from_original_and_InnerDuplicate_of_a_single_line() {
duplicationRepository.addDuplication(FILE_1_REF, new TextBlock(1, 1), new TextBlock(2, 2));
setNewLines(FILE_1);
underTest.execute(new TestComputationStepContext());
assertRawMeasureValue(FILE_1_REF, NEW_DUPLICATED_LINES_KEY, 2);
}
|
public String getMetricsName() {
return metricsName;
}
|
@Test
public void testGetMetricsName() {
assertThat(endpoint.getMetricsName(), is(METRICS_NAME));
}
|
public static DateTime parseRFC2822(CharSequence source) {
if (source == null) {
return null;
}
// issue#I9C2D4
if(StrUtil.contains(source, ',')){
if(StrUtil.contains(source, "星期")){
return parse(source, FastDateFormat.getInstance(DatePattern.HTTP_DATETIME_PATTERN, Locale.CHINA));
}
return parse(source, DatePattern.HTTP_DATETIME_FORMAT_Z);
}
if(StrUtil.contains(source, "星期")){
return parse(source, FastDateFormat.getInstance(DatePattern.JDK_DATETIME_PATTERN, Locale.CHINA));
}
return parse(source, DatePattern.JDK_DATETIME_FORMAT);
}
|
@Test
public void parseRFC2822Test() {
final String dateStr = "Wed Sep 16 11:26:23 CST 2009";
final SimpleDateFormat sdf = new SimpleDateFormat(DatePattern.JDK_DATETIME_PATTERN, Locale.US);
// Asia/Shanghai是以地区命名的地区标准时,在中国叫CST,因此如果解析CST时不使用"Asia/Shanghai"而使用"GMT+08:00",会导致相差一个小时
sdf.setTimeZone(TimeZone.getTimeZone("Asia/Shanghai"));
final DateTime parse = DateUtil.parse(dateStr, sdf);
DateTime dateTime = DateUtil.parseRFC2822(dateStr);
assertEquals(parse, dateTime);
dateTime = DateUtil.parse(dateStr);
assertEquals(parse, dateTime);
}
|
static Set<String> findVariables(List<Statement> statements, EncodedValueLookup lookup) {
List<List<Statement>> groups = CustomModelParser.splitIntoGroup(statements);
Set<String> variables = new LinkedHashSet<>();
for (List<Statement> group : groups) findVariablesForGroup(variables, group, lookup);
return variables;
}
|
@Test
public void runVariables() {
DecimalEncodedValue prio1 = new DecimalEncodedValueImpl("my_priority", 5, 1, false);
IntEncodedValueImpl prio2 = new IntEncodedValueImpl("my_priority2", 5, -5, false, false);
EncodedValueLookup lookup = new EncodingManager.Builder().add(prio1).add(prio2).build();
assertEquals(Set.of(), findVariables("2", lookup));
assertEquals(Set.of("my_priority"), findVariables("2*my_priority", lookup));
Exception ex = assertThrows(IllegalArgumentException.class, () -> findVariables("-2*my_priority", lookup));
assertTrue(ex.getMessage().contains("illegal expression as it can result in a negative weight"));
}
|
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) {
final Fetch<K, V> fetch = Fetch.empty();
final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>();
int recordsRemaining = fetchConfig.maxPollRecords;
try {
while (recordsRemaining > 0) {
final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch();
if (nextInLineFetch == null || nextInLineFetch.isConsumed()) {
final CompletedFetch completedFetch = fetchBuffer.peek();
if (completedFetch == null)
break;
if (!completedFetch.isInitialized()) {
try {
fetchBuffer.setNextInLineFetch(initialize(completedFetch));
} catch (Exception e) {
// Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and
// (2) there are no fetched completedFetch with actual content preceding this exception.
// The first condition ensures that the completedFetches is not stuck with the same completedFetch
// in cases such as the TopicAuthorizationException, and the second condition ensures that no
// potential data loss due to an exception in a following record.
if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0)
fetchBuffer.poll();
throw e;
}
} else {
fetchBuffer.setNextInLineFetch(completedFetch);
}
fetchBuffer.poll();
} else if (subscriptions.isPaused(nextInLineFetch.partition)) {
// when the partition is paused we add the records back to the completedFetches queue instead of draining
// them so that they can be returned on a subsequent poll if the partition is resumed at that time
log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition);
pausedCompletedFetches.add(nextInLineFetch);
fetchBuffer.setNextInLineFetch(null);
} else {
final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch, recordsRemaining);
recordsRemaining -= nextFetch.numRecords();
fetch.add(nextFetch);
}
}
} catch (KafkaException e) {
if (fetch.isEmpty())
throw e;
} finally {
// add any polled completed fetches for paused partitions back to the completed fetches queue to be
// re-evaluated in the next poll
fetchBuffer.addAll(pausedCompletedFetches);
}
return fetch;
}
|
@Test
public void testFetchWithUnknownServerError() {
buildDependencies();
assignAndSeek(topicAPartition0);
// Try to data and validate that we get an empty Fetch back.
CompletedFetch completedFetch = completedFetchBuilder
.error(Errors.UNKNOWN_SERVER_ERROR)
.build();
fetchBuffer.add(completedFetch);
Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer);
assertTrue(fetch.isEmpty());
}
|
public QueryObjectBundle rewriteQuery(@Language("SQL") String query, QueryConfiguration queryConfiguration, ClusterType clusterType)
{
return rewriteQuery(query, queryConfiguration, clusterType, false);
}
|
@Test
public void testRewriteDate()
{
QueryBundle queryBundle = getQueryRewriter().rewriteQuery("SELECT date '2020-01-01', date(now()) today", CONFIGURATION, CONTROL);
assertCreateTableAs(queryBundle.getQuery(), "SELECT\n" +
" CAST(date '2020-01-01' AS timestamp)\n" +
", CAST(date(now()) AS timestamp) today");
}
|
public String geomap() {
return get(GEOMAP, null);
}
|
@Test
public void setGeomap() {
loadLayout(L1);
assertEquals("map not brighton", UK_BRIGHTON, cfg.geomap());
cfg.geomap(NEW_MAP);
assertEquals("not new map", NEW_MAP, cfg.geomap());
cfg.geomap(null);
assertNull("geomap not cleared", cfg.geomap());
}
|
@Operation(summary = "verifyResourceName", description = "VERIFY_RESOURCE_NAME_NOTES")
@Parameters({
@Parameter(name = "type", description = "RESOURCE_TYPE", required = true, schema = @Schema(implementation = ResourceType.class)),
@Parameter(name = "fullName", description = "RESOURCE_FULL_NAME", required = true, schema = @Schema(implementation = String.class))
})
@GetMapping(value = "/verify-name")
@ResponseStatus(HttpStatus.OK)
@ApiException(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR)
public Result<Object> verifyResourceName(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "fullName") String fullName,
@RequestParam(value = "type") ResourceType type) {
return resourceService.verifyResourceName(fullName, type, loginUser);
}
|
@Test
public void testVerifyResourceName() throws Exception {
Result mockResult = new Result<>();
mockResult.setCode(Status.TENANT_NOT_EXIST.getCode());
Mockito.when(resourcesService.verifyResourceName(Mockito.anyString(), Mockito.any(), Mockito.any()))
.thenReturn(mockResult);
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("fullName", "list_resources_1.sh");
paramsMap.add("type", "FILE");
MvcResult mvcResult = mockMvc.perform(get("/resources/verify-name")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertEquals(Status.TENANT_NOT_EXIST.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
|
@Override
public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) {
for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) {
Map<String, ?> sourceOffset = offsetEntry.getValue();
if (sourceOffset == null) {
// We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't
// want to prevent users from being able to clean it up using the REST API
continue;
}
Map<String, ?> sourcePartition = offsetEntry.getKey();
if (sourcePartition == null) {
throw new ConnectException("Source partitions may not be null");
}
MirrorUtils.validateSourcePartitionString(sourcePartition, CONSUMER_GROUP_ID_KEY);
MirrorUtils.validateSourcePartitionString(sourcePartition, TOPIC_KEY);
MirrorUtils.validateSourcePartitionPartition(sourcePartition);
MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, true);
}
// We don't actually use these offsets in the task class, so no additional effort is required beyond just validating
// the format of the user-supplied offsets
return true;
}
|
@Test
public void testAlterOffsetsMultiplePartitions() {
MirrorCheckpointConnector connector = new MirrorCheckpointConnector();
Map<String, ?> partition1 = sourcePartition("consumer-app-3", "t1", 0);
Map<String, ?> partition2 = sourcePartition("consumer-app-4", "t1", 1);
Map<Map<String, ?>, Map<String, ?>> offsets = new HashMap<>();
offsets.put(partition1, SOURCE_OFFSET);
offsets.put(partition2, SOURCE_OFFSET);
assertTrue(connector.alterOffsets(null, offsets));
}
|
public static InternalRequestSignature fromHeaders(Crypto crypto, byte[] requestBody, HttpHeaders headers) {
if (headers == null) {
return null;
}
String signatureAlgorithm = headers.getHeaderString(SIGNATURE_ALGORITHM_HEADER);
String encodedSignature = headers.getHeaderString(SIGNATURE_HEADER);
if (signatureAlgorithm == null || encodedSignature == null) {
return null;
}
Mac mac;
try {
mac = crypto.mac(signatureAlgorithm);
} catch (NoSuchAlgorithmException e) {
throw new BadRequestException(e.getMessage());
}
byte[] decodedSignature;
try {
decodedSignature = Base64.getDecoder().decode(encodedSignature);
} catch (IllegalArgumentException e) {
throw new BadRequestException(e.getMessage());
}
return new InternalRequestSignature(
requestBody,
mac,
decodedSignature
);
}
|
@Test
public void fromHeadersShouldThrowExceptionOnInvalidBase64Signature() {
assertThrows(BadRequestException.class, () -> InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY,
internalRequestHeaders("not valid base 64", SIGNATURE_ALGORITHM)));
}
|
public void createUsersWithListInput(List<User> body) throws RestClientException {
createUsersWithListInputWithHttpInfo(body);
}
|
@Test
public void createUsersWithListInputTest() {
List<User> body = null;
api.createUsersWithListInput(body);
// TODO: test validations
}
|
@ProcessElement
public void processElement(OutputReceiver<InitialPipelineState> receiver) throws IOException {
LOG.info(daoFactory.getStreamTableDebugString());
LOG.info(daoFactory.getMetadataTableDebugString());
LOG.info("ChangeStreamName: " + daoFactory.getChangeStreamName());
boolean resume = false;
DetectNewPartitionsState detectNewPartitionsState =
daoFactory.getMetadataTableDao().readDetectNewPartitionsState();
switch (existingPipelineOptions) {
case RESUME_OR_NEW:
// perform resumption.
if (detectNewPartitionsState != null) {
resume = true;
startTime = detectNewPartitionsState.getWatermark();
LOG.info("Resuming from previous pipeline with low watermark of {}", startTime);
} else {
LOG.info(
"Attempted to resume, but previous watermark does not exist, starting at {}",
startTime);
}
break;
case RESUME_OR_FAIL:
// perform resumption.
if (detectNewPartitionsState != null) {
resume = true;
startTime = detectNewPartitionsState.getWatermark();
LOG.info("Resuming from previous pipeline with low watermark of {}", startTime);
} else {
LOG.error("Previous pipeline with the same change stream name doesn't exist, stopping");
return;
}
break;
case FAIL_IF_EXISTS:
if (detectNewPartitionsState != null) {
LOG.error(
"A previous pipeline exists with the same change stream name and existingPipelineOption is set to FAIL_IF_EXISTS.");
return;
}
break;
case SKIP_CLEANUP:
if (detectNewPartitionsState != null) {
LOG.error(
"A previous pipeline exists with the same change stream name and existingPipelineOption is set to SKIP_CLEANUP. This option should only be used in tests.");
return;
}
break;
default:
LOG.error("Unexpected existingPipelineOptions option.");
// terminate pipeline
return;
}
daoFactory.getMetadataTableDao().writeDetectNewPartitionVersion();
receiver.output(new InitialPipelineState(startTime, resume));
}
|
@Test
public void testInitializeStopWithoutDNP() throws IOException {
// DNP row doesn't exist, so we don't need to stop the pipeline. But some random data row with
// the same prefix exists. We want to make sure we clean it up even in "STOP" option.
dataClient.mutateRow(
RowMutation.create(
tableId,
metadataTableAdminDao
.getChangeStreamNamePrefix()
.concat(ByteString.copyFromUtf8("existing_row")))
.setCell(
MetadataTableAdminDao.CF_WATERMARK, MetadataTableAdminDao.QUALIFIER_DEFAULT, 123));
Instant startTime = Instant.now();
InitializeDoFn initializeDoFn =
new InitializeDoFn(
daoFactory, startTime, BigtableIO.ExistingPipelineOptions.FAIL_IF_EXISTS);
initializeDoFn.processElement(outputReceiver);
verify(outputReceiver, times(1)).output(new InitialPipelineState(startTime, false));
assertNull(dataClient.readRow(tableId, metadataTableAdminDao.getChangeStreamNamePrefix()));
}
|
public static String getVersion() {
return VERSION;
}
|
@Test
public void testFromDefaultVersion() {
String version = VersionUtils.getVersion();
assertNotNull(version);
}
|
@Override
public GenericRow apply(final GenericRow left, final GenericRow right) {
final GenericRow row = new GenericRow(leftCount + rightCount + additionalCount);
if (left != null) {
row.appendAll(left.values());
} else {
fillWithNulls(row, leftCount);
}
if (right != null) {
row.appendAll(right.values());
} else {
fillWithNulls(row, rightCount);
}
// Potentially append additional nulls as a holder for a synthetic key columns.
// These columns are not populated, as they are not accessed, but must be present for the row
// to match the rows schema.
fillWithNulls(row, additionalCount);
return row;
}
|
@Test
public void shouldJoinValueLeftEmpty() {
final KsqlValueJoiner joiner = new KsqlValueJoiner(leftSchema.value().size(),
rightSchema.value().size(), 0
);
final GenericRow joined = joiner.apply(null, rightRow);
final List<Object> expected = Arrays.asList(null, null, 20L, "baz");
assertEquals(expected, joined.values());
}
|
@Override
public RelDataType deriveAvgAggType(RelDataTypeFactory typeFactory, RelDataType argumentType) {
switch (argumentType.getSqlTypeName()) {
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
case DECIMAL:
return typeFactory.createTypeWithNullability(
typeFactory.createSqlType(DECIMAL),
argumentType.isNullable()
);
case REAL:
case DOUBLE:
return typeFactory.createTypeWithNullability(
typeFactory.createSqlType(DOUBLE),
argumentType.isNullable()
);
default:
return argumentType;
}
}
|
@Test
public void deriveAvgAggTypeTest() {
assertEquals(type(VARCHAR), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(VARCHAR)));
assertEquals(type(BOOLEAN), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(BOOLEAN)));
assertEquals(type(DECIMAL), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(TINYINT)));
assertEquals(type(DECIMAL), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(SMALLINT)));
assertEquals(type(DECIMAL), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(INTEGER)));
assertEquals(type(DECIMAL), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(BIGINT)));
assertEquals(type(DECIMAL), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(DECIMAL)));
assertEquals(type(DOUBLE), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(REAL)));
assertEquals(type(DOUBLE), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(DOUBLE)));
assertEquals(type(TIME), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(TIME)));
assertEquals(type(DATE), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(DATE)));
assertEquals(type(TIMESTAMP), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(TIMESTAMP)));
assertEquals(
type(TIMESTAMP_WITH_LOCAL_TIME_ZONE),
HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(TIMESTAMP_WITH_LOCAL_TIME_ZONE))
);
assertEquals(type(OTHER), HazelcastTypeSystem.INSTANCE.deriveAvgAggType(TYPE_FACTORY, type(OTHER)));
}
|
@Override
public void start() {
if (!PluginConfigManager.getPluginConfig(DiscoveryPluginConfig.class).isEnableRegistry()) {
return;
}
final LbConfig lbConfig = PluginConfigManager.getPluginConfig(LbConfig.class);
maxSize = lbConfig.getMaxRetryConfigCache();
defaultRetry = Retry.create(DefaultRetryConfig.create());
initRetryPolicy(lbConfig);
}
|
@Test
public void start() {
retryService.start();
final Optional<Object> defaultRetry = ReflectUtils.getFieldValue(retryService, "defaultRetry");
Assert.assertTrue(defaultRetry.isPresent());
}
|
public static Builder withSchema(Schema schema) {
return new Builder(schema);
}
|
@Test
public void testByteBufferEquality() {
byte[] a0 = new byte[] {1, 2, 3, 4};
byte[] b0 = new byte[] {1, 2, 3, 4};
Schema schema = Schema.of(Schema.Field.of("bytes", Schema.FieldType.BYTES));
Row a = Row.withSchema(schema).addValue(ByteBuffer.wrap(a0)).build();
Row b = Row.withSchema(schema).addValue(ByteBuffer.wrap(b0)).build();
assertEquals(a, b);
}
|
public static String clusterPath(String basePath)
{
return String.format("%s/%s", normalizeBasePath(basePath), CLUSTER_PATH);
}
|
@Test (dataProvider = "clusterPaths")
public void testZKFSUtilClusterPath(String basePath, String clusterPath)
{
Assert.assertEquals(ZKFSUtil.clusterPath(basePath), clusterPath);
}
|
@Override
public void run() {
try {
// We kill containers until the kernel reports the OOM situation resolved
// Note: If the kernel has a delay this may kill more than necessary
while (true) {
String status = cgroups.getCGroupParam(
CGroupsHandler.CGroupController.MEMORY,
"",
CGROUP_PARAM_MEMORY_OOM_CONTROL);
if (!status.contains(CGroupsHandler.UNDER_OOM)) {
break;
}
boolean containerKilled = killContainer();
if (!containerKilled) {
// This can happen, if SIGKILL did not clean up
// non-PGID or containers or containers launched by other users
// or if a process was put to the root YARN cgroup.
throw new YarnRuntimeException(
"Could not find any containers but CGroups " +
"reserved for containers ran out of memory. " +
"I am giving up");
}
}
} catch (ResourceHandlerException ex) {
LOG.warn("Could not fetch OOM status. " +
"This is expected at shutdown. Exiting.", ex);
}
}
|
@Test
public void testOneGuaranteedContainerOverLimitUponOOM() throws Exception {
ConcurrentHashMap<ContainerId, Container> containers =
new ConcurrentHashMap<>();
Container c1 = createContainer(1, true, 2L, true);
containers.put(c1.getContainerId(), c1);
Container c2 = createContainer(2, true, 1L, true);
containers.put(c2.getContainerId(), c2);
ContainerExecutor ex = createContainerExecutor(containers);
Context context = mock(Context.class);
when(context.getContainers()).thenReturn(containers);
when(context.getContainerExecutor()).thenReturn(ex);
CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class);
when(cGroupsHandler.getCGroupParam(
CGroupsHandler.CGroupController.MEMORY,
"",
CGROUP_PARAM_MEMORY_OOM_CONTROL))
.thenReturn("under_oom 1").thenReturn("under_oom 0");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PROCS_FILE))
.thenReturn("1234").thenReturn("");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
.thenReturn(getMB(9));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
.thenReturn(getMB(9));
// container c2 is out of its limit
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PROCS_FILE))
.thenReturn("1235").thenReturn("");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
.thenReturn(getMB(11));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
.thenReturn(getMB(11));
DefaultOOMHandler handler =
new DefaultOOMHandler(context, false) {
@Override
protected CGroupsHandler getCGroupsHandler() {
return cGroupsHandler;
}
};
handler.run();
verify(ex, times(1)).signalContainer(
new ContainerSignalContext.Builder()
.setPid("1235")
.setContainer(c2)
.setSignal(ContainerExecutor.Signal.KILL)
.build()
);
verify(ex, times(1)).signalContainer(any());
}
|
@Override
public RemotingCommand processRequest(final ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
final long beginTimeMills = this.brokerController.getMessageStore().now();
request.addExtFieldIfNotExist(BORN_TIME, String.valueOf(System.currentTimeMillis()));
if (Objects.equals(request.getExtFields().get(BORN_TIME), "0")) {
request.addExtField(BORN_TIME, String.valueOf(System.currentTimeMillis()));
}
Channel channel = ctx.channel();
RemotingCommand response = RemotingCommand.createResponseCommand(PopMessageResponseHeader.class);
final PopMessageResponseHeader responseHeader = (PopMessageResponseHeader) response.readCustomHeader();
final PopMessageRequestHeader requestHeader =
(PopMessageRequestHeader) request.decodeCommandCustomHeader(PopMessageRequestHeader.class, true);
StringBuilder startOffsetInfo = new StringBuilder(64);
StringBuilder msgOffsetInfo = new StringBuilder(64);
StringBuilder orderCountInfo = null;
if (requestHeader.isOrder()) {
orderCountInfo = new StringBuilder(64);
}
brokerController.getConsumerManager().compensateBasicConsumerInfo(requestHeader.getConsumerGroup(),
ConsumeType.CONSUME_POP, MessageModel.CLUSTERING);
response.setOpaque(request.getOpaque());
if (brokerController.getBrokerConfig().isEnablePopLog()) {
POP_LOGGER.info("receive PopMessage request command, {}", request);
}
if (requestHeader.isTimeoutTooMuch()) {
response.setCode(ResponseCode.POLLING_TIMEOUT);
response.setRemark(String.format("the broker[%s] pop message is timeout too much",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
if (!PermName.isReadable(this.brokerController.getBrokerConfig().getBrokerPermission())) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark(String.format("the broker[%s] pop message is forbidden",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
if (requestHeader.getMaxMsgNums() > 32) {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(String.format("the broker[%s] pop message's num is greater than 32",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
if (!brokerController.getMessageStore().getMessageStoreConfig().isTimerWheelEnable()) {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(String.format("the broker[%s] pop message is forbidden because timerWheelEnable is false",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
TopicConfig topicConfig =
this.brokerController.getTopicConfigManager().selectTopicConfig(requestHeader.getTopic());
if (null == topicConfig) {
POP_LOGGER.error("The topic {} not exist, consumer: {} ", requestHeader.getTopic(),
RemotingHelper.parseChannelRemoteAddr(channel));
response.setCode(ResponseCode.TOPIC_NOT_EXIST);
response.setRemark(String.format("topic[%s] not exist, apply first please! %s", requestHeader.getTopic(),
FAQUrl.suggestTodo(FAQUrl.APPLY_TOPIC_URL)));
return response;
}
if (!PermName.isReadable(topicConfig.getPerm())) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark("the topic[" + requestHeader.getTopic() + "] peeking message is forbidden");
return response;
}
if (requestHeader.getQueueId() >= topicConfig.getReadQueueNums()) {
String errorInfo = String.format("queueId[%d] is illegal, topic:[%s] topicConfig.readQueueNums:[%d] " +
"consumer:[%s]",
requestHeader.getQueueId(), requestHeader.getTopic(), topicConfig.getReadQueueNums(),
channel.remoteAddress());
POP_LOGGER.warn(errorInfo);
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(errorInfo);
return response;
}
SubscriptionGroupConfig subscriptionGroupConfig =
this.brokerController.getSubscriptionGroupManager().findSubscriptionGroupConfig(requestHeader.getConsumerGroup());
if (null == subscriptionGroupConfig) {
response.setCode(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST);
response.setRemark(String.format("subscription group [%s] does not exist, %s",
requestHeader.getConsumerGroup(), FAQUrl.suggestTodo(FAQUrl.SUBSCRIPTION_GROUP_NOT_EXIST)));
return response;
}
if (!subscriptionGroupConfig.isConsumeEnable()) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark("subscription group no permission, " + requestHeader.getConsumerGroup());
return response;
}
BrokerConfig brokerConfig = brokerController.getBrokerConfig();
SubscriptionData subscriptionData = null;
ExpressionMessageFilter messageFilter = null;
if (requestHeader.getExp() != null && !requestHeader.getExp().isEmpty()) {
try {
subscriptionData = FilterAPI.build(requestHeader.getTopic(), requestHeader.getExp(), requestHeader.getExpType());
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
requestHeader.getTopic(), subscriptionData);
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
SubscriptionData retrySubscriptionData = FilterAPI.build(retryTopic, SubscriptionData.SUB_ALL, requestHeader.getExpType());
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
retryTopic, retrySubscriptionData);
ConsumerFilterData consumerFilterData = null;
if (!ExpressionType.isTagType(subscriptionData.getExpressionType())) {
consumerFilterData = ConsumerFilterManager.build(
requestHeader.getTopic(), requestHeader.getConsumerGroup(), requestHeader.getExp(),
requestHeader.getExpType(), System.currentTimeMillis()
);
if (consumerFilterData == null) {
POP_LOGGER.warn("Parse the consumer's subscription[{}] failed, group: {}",
requestHeader.getExp(), requestHeader.getConsumerGroup());
response.setCode(ResponseCode.SUBSCRIPTION_PARSE_FAILED);
response.setRemark("parse the consumer's subscription failed");
return response;
}
}
messageFilter = new ExpressionMessageFilter(subscriptionData, consumerFilterData,
brokerController.getConsumerFilterManager());
} catch (Exception e) {
POP_LOGGER.warn("Parse the consumer's subscription[{}] error, group: {}", requestHeader.getExp(),
requestHeader.getConsumerGroup());
response.setCode(ResponseCode.SUBSCRIPTION_PARSE_FAILED);
response.setRemark("parse the consumer's subscription failed");
return response;
}
} else {
try {
subscriptionData = FilterAPI.build(requestHeader.getTopic(), "*", ExpressionType.TAG);
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
requestHeader.getTopic(), subscriptionData);
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
SubscriptionData retrySubscriptionData = FilterAPI.build(retryTopic, "*", ExpressionType.TAG);
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
retryTopic, retrySubscriptionData);
} catch (Exception e) {
POP_LOGGER.warn("Build default subscription error, group: {}", requestHeader.getConsumerGroup());
}
}
int randomQ = random.nextInt(100);
int reviveQid;
if (requestHeader.isOrder()) {
reviveQid = KeyBuilder.POP_ORDER_REVIVE_QUEUE;
} else {
reviveQid = (int) Math.abs(ckMessageNumber.getAndIncrement() % this.brokerController.getBrokerConfig().getReviveQueueNum());
}
GetMessageResult getMessageResult = new GetMessageResult(requestHeader.getMaxMsgNums());
ExpressionMessageFilter finalMessageFilter = messageFilter;
StringBuilder finalOrderCountInfo = orderCountInfo;
// Due to the design of the fields startOffsetInfo, msgOffsetInfo, and orderCountInfo,
// a single POP request could only invoke the popMsgFromQueue method once
// for either a normal topic or a retry topic's queue. Retry topics v1 and v2 are
// considered the same type because they share the same retry flag in previous fields.
// Therefore, needRetryV1 is designed as a subset of needRetry, and within a single request,
// only one type of retry topic is able to call popMsgFromQueue.
boolean needRetry = randomQ % 5 == 0;
boolean needRetryV1 = false;
if (brokerConfig.isEnableRetryTopicV2() && brokerConfig.isRetrieveMessageFromPopRetryTopicV1()) {
needRetryV1 = randomQ % 2 == 0;
}
long popTime = System.currentTimeMillis();
CompletableFuture<Long> getMessageFuture = CompletableFuture.completedFuture(0L);
if (needRetry && !requestHeader.isOrder()) {
if (needRetryV1) {
String retryTopic = KeyBuilder.buildPopRetryTopicV1(requestHeader.getTopic(), requestHeader.getConsumerGroup());
getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
} else {
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
}
}
if (requestHeader.getQueueId() < 0) {
// read all queue
getMessageFuture = popMsgFromTopic(topicConfig, false, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
} else {
int queueId = requestHeader.getQueueId();
getMessageFuture = getMessageFuture.thenCompose(restNum ->
popMsgFromQueue(topicConfig.getTopicName(), requestHeader.getAttemptId(), false,
getMessageResult, requestHeader, queueId, restNum, reviveQid, channel, popTime, finalMessageFilter,
startOffsetInfo, msgOffsetInfo, finalOrderCountInfo));
}
// if not full , fetch retry again
if (!needRetry && getMessageResult.getMessageMapedList().size() < requestHeader.getMaxMsgNums() && !requestHeader.isOrder()) {
if (needRetryV1) {
String retryTopicV1 = KeyBuilder.buildPopRetryTopicV1(requestHeader.getTopic(), requestHeader.getConsumerGroup());
getMessageFuture = popMsgFromTopic(retryTopicV1, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
} else {
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
}
}
final RemotingCommand finalResponse = response;
SubscriptionData finalSubscriptionData = subscriptionData;
getMessageFuture.thenApply(restNum -> {
if (!getMessageResult.getMessageBufferList().isEmpty()) {
finalResponse.setCode(ResponseCode.SUCCESS);
getMessageResult.setStatus(GetMessageStatus.FOUND);
if (restNum > 0) {
// all queue pop can not notify specified queue pop, and vice versa
popLongPollingService.notifyMessageArriving(
requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getConsumerGroup(),
null, 0L, null, null);
}
} else {
PollingResult pollingResult = popLongPollingService.polling(
ctx, request, new PollingHeader(requestHeader), finalSubscriptionData, finalMessageFilter);
if (PollingResult.POLLING_SUC == pollingResult) {
if (restNum > 0) {
popLongPollingService.notifyMessageArriving(
requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getConsumerGroup(),
null, 0L, null, null);
}
return null;
} else if (PollingResult.POLLING_FULL == pollingResult) {
finalResponse.setCode(ResponseCode.POLLING_FULL);
} else {
finalResponse.setCode(ResponseCode.POLLING_TIMEOUT);
}
getMessageResult.setStatus(GetMessageStatus.NO_MESSAGE_IN_QUEUE);
}
responseHeader.setInvisibleTime(requestHeader.getInvisibleTime());
responseHeader.setPopTime(popTime);
responseHeader.setReviveQid(reviveQid);
responseHeader.setRestNum(restNum);
responseHeader.setStartOffsetInfo(startOffsetInfo.toString());
responseHeader.setMsgOffsetInfo(msgOffsetInfo.toString());
if (requestHeader.isOrder() && finalOrderCountInfo != null) {
responseHeader.setOrderCountInfo(finalOrderCountInfo.toString());
}
finalResponse.setRemark(getMessageResult.getStatus().name());
switch (finalResponse.getCode()) {
case ResponseCode.SUCCESS:
if (this.brokerController.getBrokerConfig().isTransferMsgByHeap()) {
final byte[] r = this.readGetMessageResult(getMessageResult, requestHeader.getConsumerGroup(),
requestHeader.getTopic(), requestHeader.getQueueId());
this.brokerController.getBrokerStatsManager().incGroupGetLatency(requestHeader.getConsumerGroup(),
requestHeader.getTopic(), requestHeader.getQueueId(),
(int) (this.brokerController.getMessageStore().now() - beginTimeMills));
finalResponse.setBody(r);
} else {
final GetMessageResult tmpGetMessageResult = getMessageResult;
try {
FileRegion fileRegion =
new ManyMessageTransfer(finalResponse.encodeHeader(getMessageResult.getBufferTotalSize()),
getMessageResult);
channel.writeAndFlush(fileRegion)
.addListener((ChannelFutureListener) future -> {
tmpGetMessageResult.release();
Attributes attributes = RemotingMetricsManager.newAttributesBuilder()
.put(LABEL_REQUEST_CODE, RemotingHelper.getRequestCodeDesc(request.getCode()))
.put(LABEL_RESPONSE_CODE, RemotingHelper.getResponseCodeDesc(finalResponse.getCode()))
.put(LABEL_RESULT, RemotingMetricsManager.getWriteAndFlushResult(future))
.build();
RemotingMetricsManager.rpcLatency.record(request.getProcessTimer().elapsed(TimeUnit.MILLISECONDS), attributes);
if (!future.isSuccess()) {
POP_LOGGER.error("Fail to transfer messages from page cache to {}",
channel.remoteAddress(), future.cause());
}
});
} catch (Throwable e) {
POP_LOGGER.error("Error occurred when transferring messages from page cache", e);
getMessageResult.release();
}
return null;
}
break;
default:
return finalResponse;
}
return finalResponse;
}).thenAccept(result -> NettyRemotingAbstract.writeResponse(channel, request, result));
return null;
}
|
@Test
public void testProcessRequest_TopicNotExist() throws RemotingCommandException {
when(messageStore.getMessageStoreConfig()).thenReturn(new MessageStoreConfig());
brokerController.getTopicConfigManager().getTopicConfigTable().remove(topic);
final RemotingCommand request = createPopMsgCommand();
RemotingCommand response = popMessageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.TOPIC_NOT_EXIST);
assertThat(response.getRemark()).contains("topic[" + topic + "] not exist");
}
|
@Override
public void unSubscribe(final AppAuthData appAuthData) {
SignAuthDataCache.getInstance().removeAuthData(appAuthData);
}
|
@Test
void unSubscribe() {
AppAuthData appAuthData = new AppAuthData();
appAuthData.setAppKey("D9FD95F496C9495DB5604222A13C3D08");
appAuthData.setAppSecret("02D25048AA1E466F8920E68B08E668DE");
appAuthData.setEnabled(true);
signAuthDataSubscriber.onSubscribe(appAuthData);
signAuthDataSubscriber.unSubscribe(appAuthData);
assertNull(SignAuthDataCache.getInstance().obtainAuthData("D9FD95F496C9495DB5604222A13C3D08"));
}
|
@Description("Returns a \"simplified\" version of the given geometry")
@ScalarFunction("simplify_geometry")
@SqlType(GEOMETRY_TYPE_NAME)
public static Slice simplifyGeometry(@SqlType(GEOMETRY_TYPE_NAME) Slice input, @SqlType(DOUBLE) double distanceTolerance)
{
if (isNaN(distanceTolerance)) {
throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "distanceTolerance is NaN");
}
if (distanceTolerance < 0) {
throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "distanceTolerance is negative");
}
if (distanceTolerance == 0) {
return input;
}
return serialize(simplify(deserialize(input), distanceTolerance));
}
|
@Test
public void testSimplifyGeometry()
{
// Eliminate unnecessary points on the same line.
assertFunction("ST_AsText(simplify_geometry(ST_GeometryFromText('POLYGON ((1 0, 2 1, 3 1, 3 1, 4 1, 1 0))'), 1.5))", VARCHAR, "POLYGON ((1 0, 2 1, 4 1, 1 0))");
// Use distanceTolerance to control fidelity.
assertFunction("ST_AsText(simplify_geometry(ST_GeometryFromText('POLYGON ((1 0, 1 1, 2 1, 2 3, 3 3, 3 1, 4 1, 4 0, 1 0))'), 1.0))", VARCHAR, "POLYGON ((1 0, 2 3, 3 3, 4 0, 1 0))");
assertFunction("ST_AsText(simplify_geometry(ST_GeometryFromText('POLYGON ((1 0, 1 1, 2 1, 2 3, 3 3, 3 1, 4 1, 4 0, 1 0))'), 0.5))", VARCHAR, "POLYGON ((1 0, 1 1, 2 1, 2 3, 3 3, 3 1, 4 1, 4 0, 1 0))");
// Negative distance tolerance is invalid.
assertInvalidFunction("ST_AsText(simplify_geometry(ST_GeometryFromText('" + "POLYGON ((1 0, 1 1, 2 1, 2 3, 3 3, 3 1, 4 1, 4 0, 1 0))" + "'), -0.5))", "distanceTolerance is negative");
}
|
@Override
public AppResponse process(Flow flow, AppRequest request) {
appSession = new AppSession();
appSession.setState(State.INITIALIZED.name());
appSession.setActivationMethod(ActivationMethod.APP);
appSession.setFlow(ActivateAppWithOtherAppFlow.NAME);
appSession.setAction("activate_with_app");
digidClient.remoteLog("1365", Map.of("", ""));
return new AppSessionResponse(appSession.getId(), Instant.now().getEpochSecond());
}
|
@Test
void processTest(){
AppResponse appResponse = startActivationWithOtherApp.process(mockedFlow, null);
verify(digidClientMock, times(1)).remoteLog("1365", ImmutableMap.of("", ""));
assertTrue(appResponse instanceof AppSessionResponse);
assertEquals(startActivationWithOtherApp.getAppSession().getId(), ((AppSessionResponse)appResponse).getAppSessionId());
assertEquals(ActivationMethod.APP, startActivationWithOtherApp.getAppSession().getActivationMethod());
}
|
public static Map<String, Object> map(String metricName, Metric metric) {
final Map<String, Object> metricMap = Maps.newHashMap();
metricMap.put("full_name", metricName);
metricMap.put("name", metricName.substring(metricName.lastIndexOf(".") + 1));
if (metric instanceof Timer) {
metricMap.put("metric", buildTimerMap((Timer) metric));
metricMap.put("type", "timer");
} else if(metric instanceof Meter) {
metricMap.put("metric", buildMeterMap((Meter) metric));
metricMap.put("type", "meter");
} else if(metric instanceof Histogram) {
metricMap.put("metric", buildHistogramMap((Histogram) metric));
metricMap.put("type", "histogram");
} else if(metric instanceof Counter) {
metricMap.put("metric", metric);
metricMap.put("type", "counter");
} else if(metric instanceof Gauge) {
metricMap.put("metric", metric);
metricMap.put("type", "gauge");
} else {
throw new IllegalArgumentException("Unknown metric type " + metric.getClass());
}
return metricMap;
}
|
@Test
public void mapSupportsHdrHistogram() {
final HdrHistogram histogram = new HdrHistogram(1000L, 0);
histogram.update(23);
final Map<String, Object> map = MetricUtils.map("metric", histogram);
assertThat(map)
.containsEntry("type", "histogram")
.extracting("metric")
.extracting("count")
.isEqualTo(1L);
}
|
private Watch.Listener watch(final BiConsumer<String, String> updateHandler,
final Consumer<String> deleteHandler) {
return Watch.listener(response -> {
for (WatchEvent event : response.getEvents()) {
String path = event.getKeyValue().getKey().toString(UTF_8);
String value = event.getKeyValue().getValue().toString(UTF_8);
switch (event.getEventType()) {
case PUT:
Optional.ofNullable(updateHandler).ifPresent(handler -> handler.accept(path, value));
continue;
case DELETE:
Optional.ofNullable(deleteHandler).ifPresent(handler -> handler.accept(path));
continue;
default:
}
}
}, throwable -> {
LOG.error("etcd watch error {}", throwable.getMessage(), throwable);
throw new ShenyuException(throwable);
});
}
|
@Test
public void watchTest() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
BiConsumer<String, String> updateHandler = mock(BiConsumer.class);
Consumer<String> deleteHandler = mock(Consumer.class);
final Method watch = EtcdClient.class.getDeclaredMethod("watch", BiConsumer.class, Consumer.class);
watch.setAccessible(true);
final Watch.Listener listener = (Watch.Listener) watch.invoke(etcdClient, updateHandler, deleteHandler);
final WatchResponse watchResponse = mock(WatchResponse.class);
List<WatchEvent> watchEvents = new ArrayList<>(2);
final WatchEvent watchEvent = mock(WatchEvent.class);
watchEvents.add(watchEvent);
when(watchResponse.getEvents()).thenReturn(watchEvents);
final KeyValue keyValue = mock(KeyValue.class);
when(watchEvent.getKeyValue()).thenReturn(keyValue);
when(keyValue.getValue()).thenReturn(ByteSequence.from("value", StandardCharsets.UTF_8));
when(keyValue.getKey()).thenReturn(ByteSequence.from("key", StandardCharsets.UTF_8));
when(watchEvent.getEventType()).thenReturn(WatchEvent.EventType.PUT);
assertDoesNotThrow(() -> listener.onNext(watchResponse));
when(watchEvent.getEventType()).thenReturn(WatchEvent.EventType.DELETE);
assertDoesNotThrow(() -> listener.onNext(watchResponse));
when(watchEvent.getEventType()).thenReturn(WatchEvent.EventType.UNRECOGNIZED);
assertDoesNotThrow(() -> listener.onNext(watchResponse));
}
|
@Override
public Flux<RawMetric> retrieve(KafkaCluster c, Node node) {
log.debug("Retrieving metrics from prometheus exporter: {}:{}", node.host(), c.getMetricsConfig().getPort());
MetricsConfig metricsConfig = c.getMetricsConfig();
var webClient = new WebClientConfigurator()
.configureBufferSize(DataSize.ofMegabytes(20))
.configureBasicAuth(metricsConfig.getUsername(), metricsConfig.getPassword())
.configureSsl(
c.getOriginalProperties().getSsl(),
new ClustersProperties.KeystoreConfig(
metricsConfig.getKeystoreLocation(),
metricsConfig.getKeystorePassword()))
.build();
return retrieve(webClient, node.host(), c.getMetricsConfig());
}
|
@Test
void callsMetricsEndpointAndConvertsResponceToRawMetric() {
var url = mockWebServer.url("/metrics");
mockWebServer.enqueue(prepareResponse());
MetricsConfig metricsConfig = prepareMetricsConfig(url.port(), null, null);
StepVerifier.create(retriever.retrieve(WebClient.create(), url.host(), metricsConfig))
.expectNextSequence(expectedRawMetrics())
// third metric should not be present, since it has "NaN" value
.verifyComplete();
}
|
public static HintValueContext extractHint(final String sql) {
if (!containsSQLHint(sql)) {
return new HintValueContext();
}
HintValueContext result = new HintValueContext();
int hintKeyValueBeginIndex = getHintKeyValueBeginIndex(sql);
String hintKeyValueText = sql.substring(hintKeyValueBeginIndex, sql.indexOf(SQL_COMMENT_SUFFIX, hintKeyValueBeginIndex));
Map<String, String> hintKeyValues = getSQLHintKeyValues(hintKeyValueText);
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.DATASOURCE_NAME_KEY)) {
result.setDataSourceName(getHintValue(hintKeyValues, SQLHintPropertiesKey.DATASOURCE_NAME_KEY));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.WRITE_ROUTE_ONLY_KEY)) {
result.setWriteRouteOnly(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.WRITE_ROUTE_ONLY_KEY)));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.SKIP_SQL_REWRITE_KEY)) {
result.setSkipSQLRewrite(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.SKIP_SQL_REWRITE_KEY)));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.DISABLE_AUDIT_NAMES_KEY)) {
String property = getHintValue(hintKeyValues, SQLHintPropertiesKey.DISABLE_AUDIT_NAMES_KEY);
result.getDisableAuditNames().addAll(getSplitterSQLHintValue(property));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.SHADOW_KEY)) {
result.setShadow(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.SHADOW_KEY)));
}
for (Entry<String, String> entry : hintKeyValues.entrySet()) {
Object value = convert(entry.getValue());
Comparable<?> comparable = value instanceof Comparable ? (Comparable<?>) value : Objects.toString(value);
if (containsHintKey(Objects.toString(entry.getKey()), SQLHintPropertiesKey.SHARDING_DATABASE_VALUE_KEY)) {
result.getShardingDatabaseValues().put(Objects.toString(entry.getKey()).toUpperCase(), comparable);
}
if (containsHintKey(Objects.toString(entry.getKey()), SQLHintPropertiesKey.SHARDING_TABLE_VALUE_KEY)) {
result.getShardingTableValues().put(Objects.toString(entry.getKey()).toUpperCase(), comparable);
}
}
return result;
}
|
@Test
void assertSQLHintDisableAuditNames() {
HintValueContext actual = SQLHintUtils.extractHint("/* SHARDINGSPHERE_HINT: DISABLE_AUDIT_NAMES=sharding_audit1 sharding_audit2 */");
assertThat(actual.getDisableAuditNames().size(), is(2));
assertTrue(actual.getDisableAuditNames().containsAll(Arrays.asList("sharding_audit1", "sharding_audit2")));
}
|
@Override
public String toString() {
return "ControllerRegistration(id=" + id +
", incarnationId=" + incarnationId +
", zkMigrationReady=" + zkMigrationReady +
", listeners=[" +
listeners.keySet().stream().sorted().
map(n -> listeners.get(n).toString()).
collect(Collectors.joining(", ")) +
"], supportedFeatures={" +
supportedFeatures.keySet().stream().sorted().
map(k -> k + ": " + supportedFeatures.get(k)).
collect(Collectors.joining(", ")) +
"}" +
")";
}
|
@Test
public void testToString() {
assertEquals("ControllerRegistration(id=1, " +
"incarnationId=ubT_wuD6R3uopZ_lV76dQg, " +
"zkMigrationReady=true, " +
"listeners=[" +
"Endpoint(listenerName='PLAINTEXT', securityProtocol=PLAINTEXT, host='localhost', port=9108), " +
"Endpoint(listenerName='SSL', securityProtocol=SSL, host='localhost', port=9208)]" +
", supportedFeatures={metadata.version: 1-10})",
REGISTRATIONS.get(1).toString());
}
|
protected void initializePipeline() {
// Set up rules for packet-out forwarding. We support only IPv4 routing.
final long cpuPort = capabilities.cpuPort().get();
flowRuleService.applyFlowRules(
ingressVlanRule(cpuPort, false, DEFAULT_VLAN),
fwdClassifierRule(cpuPort, null, Ethernet.TYPE_IPV4, FWD_IPV4_ROUTING,
DEFAULT_FLOW_PRIORITY));
}
|
@Test
public void testInitializePipeline() {
final Capture<FlowRule> capturedCpuIgVlanRule = newCapture(CaptureType.ALL);
final Capture<FlowRule> capturedCpuFwdClsRule = newCapture(CaptureType.ALL);
// ingress_port_vlan table for cpu port
final TrafficSelector cpuIgVlanSelector = DefaultTrafficSelector.builder()
.add(Criteria.matchInPort(PortNumber.portNumber(CPU_PORT)))
.add(PiCriterion.builder()
.matchExact(FabricConstants.HDR_VLAN_IS_VALID, ZERO)
.build())
.build();
final TrafficTreatment cpuIgVlanTreatment = DefaultTrafficTreatment.builder()
.piTableAction(PiAction.builder()
.withId(FabricConstants.FABRIC_INGRESS_FILTERING_PERMIT_WITH_INTERNAL_VLAN)
.withParameter(new PiActionParam(FabricConstants.VLAN_ID, DEFAULT_VLAN))
.withParameter(new PiActionParam(FabricConstants.PORT_TYPE, PORT_TYPE_INTERNAL))
.build())
.build();
final FlowRule expectedCpuIgVlanRule = DefaultFlowRule.builder()
.withSelector(cpuIgVlanSelector)
.withTreatment(cpuIgVlanTreatment)
.forTable(FabricConstants.FABRIC_INGRESS_FILTERING_INGRESS_PORT_VLAN)
.makePermanent()
.withPriority(DEFAULT_FLOW_PRIORITY)
.forDevice(DEVICE_ID)
.fromApp(APP_ID)
.build();
final TrafficSelector cpuFwdClsSelector = DefaultTrafficSelector.builder()
.matchInPort(PortNumber.portNumber(CPU_PORT))
.matchPi(PiCriterion.builder()
.matchExact(FabricConstants.HDR_IP_ETH_TYPE, Ethernet.TYPE_IPV4)
.build())
.build();
final TrafficTreatment cpuFwdClsTreatment = DefaultTrafficTreatment.builder()
.piTableAction(PiAction.builder()
.withId(FabricConstants.FABRIC_INGRESS_FILTERING_SET_FORWARDING_TYPE)
.withParameter(new PiActionParam(FabricConstants.FWD_TYPE, FWD_IPV4_ROUTING))
.build())
.build();
final FlowRule expectedCpuFwdClsRule = DefaultFlowRule.builder()
.withSelector(cpuFwdClsSelector)
.withTreatment(cpuFwdClsTreatment)
.forTable(FabricConstants.FABRIC_INGRESS_FILTERING_FWD_CLASSIFIER)
.makePermanent()
.withPriority(DEFAULT_FLOW_PRIORITY)
.forDevice(DEVICE_ID)
.fromApp(APP_ID)
.build();
flowRuleService.applyFlowRules(
capture(capturedCpuIgVlanRule),
capture(capturedCpuFwdClsRule));
replay(flowRuleService);
pipeliner.initializePipeline();
assertTrue(expectedCpuIgVlanRule.exactMatch(capturedCpuIgVlanRule.getValue()));
assertTrue(expectedCpuFwdClsRule.exactMatch(capturedCpuFwdClsRule.getValue()));
verify(flowRuleService);
reset(flowRuleService);
}
|
public void startAsync() {
try {
udfLoader.load();
ProcessingLogServerUtils.maybeCreateProcessingLogTopic(
serviceContext.getTopicClient(),
processingLogConfig,
ksqlConfig);
if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) {
log.warn("processing log auto-create is enabled, but this is not supported "
+ "for headless mode.");
}
rocksDBConfigSetterHandler.accept(ksqlConfig);
processesQueryFile(readQueriesFile(queriesFile));
showWelcomeMessage();
final Properties properties = new Properties();
ksqlConfig.originals().forEach((key, value) -> {
if (nonNull(value)) {
properties.put(key, value.toString());
}
});
versionChecker.start(KsqlModuleType.SERVER, properties);
} catch (final Exception e) {
log.error("Failed to start KSQL Server with query file: " + queriesFile, e);
throw e;
}
}
|
@Test
public void shouldRunUnSetStatements() {
// Given:
final PreparedStatement<SetProperty> setProp = PreparedStatement.of("SET",
new SetProperty(Optional.empty(), ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"));
final PreparedStatement<UnsetProperty> unsetProp = PreparedStatement.of("UNSET",
new UnsetProperty(Optional.empty(), ConsumerConfig.AUTO_OFFSET_RESET_CONFIG));
final PreparedStatement<CreateStream> cs = PreparedStatement.of("CS",
new CreateStream(SOME_NAME, SOME_ELEMENTS, false, false, JSON_PROPS, false));
final ConfiguredStatement<?> configured = ConfiguredStatement
.of(cs, SessionConfig.of(ksqlConfig, emptyMap()));
givenQueryFileParsesTo(setProp, unsetProp, cs);
// When:
standaloneExecutor.startAsync();
// Then:
verify(ksqlEngine).execute(serviceContext, configured);
}
|
public static <T> Bounded<T> from(BoundedSource<T> source) {
return new Bounded<>(null, source);
}
|
@Test
@Category({
NeedsRunner.class,
UsesUnboundedPCollections.class,
UsesUnboundedSplittableParDo.class
})
public void testUnboundedSdfWrapperCacheStartedReaders() {
long numElements = 1000L;
PCollection<Long> input =
pipeline.apply(Read.from(new ExpectCacheUnboundedSource(numElements)));
PAssert.that(input)
.containsInAnyOrder(
LongStream.rangeClosed(1L, numElements).boxed().collect(Collectors.toList()));
// TODO(https://github.com/apache/beam/issues/20530): Remove additional experiments when SDF
// read is default.
ExperimentalOptions.addExperiment(
pipeline.getOptions().as(ExperimentalOptions.class), "use_sdf_read");
// Force the pipeline to run with one thread to ensure the reader will be reused on one DoFn
// instance.
// We are not able to use DirectOptions because of circular dependency.
pipeline
.runWithAdditionalOptionArgs(ImmutableList.of("--targetParallelism=1"))
.waitUntilFinish();
}
|
@ApiOperation(value = "Get Asset Profile names (getAssetProfileNames)",
notes = "Returns a set of unique asset profile names owned by the tenant."
+ TENANT_OR_CUSTOMER_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAnyAuthority('TENANT_ADMIN', 'CUSTOMER_USER')")
@RequestMapping(value = "/assetProfile/names", method = RequestMethod.GET)
@ResponseBody
public List<EntityInfo> getAssetProfileNames(
@Parameter(description = "Flag indicating whether to retrieve exclusively the names of asset profiles that are referenced by tenant's assets.")
@RequestParam(value = "activeOnly", required = false, defaultValue = "false") boolean activeOnly) throws ThingsboardException {
SecurityUser user = getCurrentUser();
TenantId tenantId = user.getTenantId();
return checkNotNull(assetProfileService.findAssetProfileNamesByTenantId(tenantId, activeOnly));
}
|
@Test
public void testGetAssetProfileNames() throws Exception {
var pageLink = new PageLink(Integer.MAX_VALUE);
var assetProfileInfos = doGetTypedWithPageLink("/api/assetProfileInfos?",
new TypeReference<PageData<AssetProfileInfo>>() {
}, pageLink);
Assert.assertNotNull("Asset Profile Infos page data is null!", assetProfileInfos);
Assert.assertEquals("Asset Profile Infos Page data is empty! Expected to have default profile created!", 1, assetProfileInfos.getTotalElements());
List<EntityInfo> expectedAssetProfileNames = assetProfileInfos.getData().stream()
.map(info -> new EntityInfo(info.getId(), info.getName()))
.sorted(Comparator.comparing(EntityInfo::getName))
.collect(Collectors.toList());
var assetProfileNames = doGetTyped("/api/assetProfile/names", new TypeReference<List<EntityInfo>>() {
});
Assert.assertNotNull("Asset Profile Names list is null!", assetProfileNames);
Assert.assertFalse("Asset Profile Names list is empty!", assetProfileNames.isEmpty());
Assert.assertEquals(expectedAssetProfileNames, assetProfileNames);
Assert.assertEquals(1, assetProfileNames.size());
Assert.assertEquals(DEFAULT_DEVICE_TYPE, assetProfileNames.get(0).getName());
int count = 3;
for (int i = 0; i < count; i++) {
Asset asset = new Asset();
asset.setName("AssetName" + i);
asset.setType("AssetProfileName" + i);
Asset savedAsset = doPost("/api/asset", asset, Asset.class);
Assert.assertNotNull(savedAsset);
}
assetProfileInfos = doGetTypedWithPageLink("/api/assetProfileInfos?",
new TypeReference<>() {
}, pageLink);
Assert.assertNotNull("Asset Profile Infos page data is null!", assetProfileInfos);
Assert.assertEquals("Asset Profile Infos Page data is empty! Expected to have default profile created + count value!", 1 + count, assetProfileInfos.getTotalElements());
expectedAssetProfileNames = assetProfileInfos.getData().stream()
.map(info -> new EntityInfo(info.getId(), info.getName()))
.sorted(Comparator.comparing(EntityInfo::getName))
.collect(Collectors.toList());
assetProfileNames = doGetTyped("/api/assetProfile/names", new TypeReference<>() {
});
Assert.assertNotNull("Asset Profile Names list is null!", assetProfileNames);
Assert.assertFalse("Asset Profile Names list is empty!", assetProfileNames.isEmpty());
Assert.assertEquals(expectedAssetProfileNames, assetProfileNames);
Assert.assertEquals(1 + count, assetProfileNames.size());
assetProfileNames = doGetTyped("/api/assetProfile/names?activeOnly=true", new TypeReference<>() {
});
Assert.assertNotNull("Asset Profile Names list is null!", assetProfileNames);
Assert.assertFalse("Asset Profile Names list is empty!", assetProfileNames.isEmpty());
var expectedAssetProfileNamesWithoutDefault = expectedAssetProfileNames.stream()
.filter(entityInfo -> !entityInfo.getName().equals(DEFAULT_DEVICE_TYPE))
.collect(Collectors.toList());
Assert.assertEquals(expectedAssetProfileNamesWithoutDefault, assetProfileNames);
Assert.assertEquals(count, assetProfileNames.size());
}
|
@Override
public void serialize(Asn1OutputStream out, String value) {
out.write(BCD.encode(value));
}
|
@Test
public void shouldSerialize() {
assertArrayEquals(
new byte[] { 0x01, 0x12 },
serialize(new BcdAsStringConverter(), String.class, "0112")
);
}
|
@Around(CLIENT_INTERFACE_REMOVE_CONFIG_RPC)
Object removeConfigAroundRpc(ProceedingJoinPoint pjp, ConfigRemoveRequest request, RequestMeta meta)
throws Throwable {
final ConfigChangePointCutTypes configChangePointCutType = ConfigChangePointCutTypes.REMOVE_BY_RPC;
final List<ConfigChangePluginService> pluginServices = getPluginServices(
configChangePointCutType);
// didn't enabled or add relative plugin
if (pluginServices.isEmpty()) {
return pjp.proceed();
}
ConfigChangeRequest configChangeRequest = new ConfigChangeRequest(configChangePointCutType);
configChangeRequest.setArg("dataId", request.getDataId());
configChangeRequest.setArg("group", request.getGroup());
configChangeRequest.setArg("tenant", request.getTenant());
configChangeRequest.setArg("appName", request.getHeader("appName"));
configChangeRequest.setArg("srcIp", meta.getClientIp());
configChangeRequest.setArg("requestIpApp", request.getHeader("requestIpApp"));
configChangeRequest.setArg("srcUser", request.getHeader("src_user"));
configChangeRequest.setArg("use", request.getHeader("use"));
return configChangeServiceHandle(pjp, pluginServices, configChangeRequest);
}
|
@Test
void testRemoveConfigAroundRpcException() throws Throwable {
Mockito.when(configChangePluginService.executeType()).thenReturn(ConfigChangeExecuteTypes.EXECUTE_BEFORE_TYPE);
ProceedingJoinPoint proceedingJoinPoint = Mockito.mock(ProceedingJoinPoint.class);
ConfigRemoveRequest request = new ConfigRemoveRequest();
RequestMeta requestMeta = new RequestMeta();
Mockito.when(proceedingJoinPoint.proceed(any())).thenThrow(new NacosRuntimeException(503));
//execute
Object o = configChangeAspect.removeConfigAroundRpc(proceedingJoinPoint, request, requestMeta);
//expect
Mockito.verify(configChangePluginService, Mockito.times(1))
.execute(any(ConfigChangeRequest.class), any(ConfigChangeResponse.class));
assertTrue(((ConfigRemoveResponse) o).getMessage().contains("config change join point fail"));
}
|
public static Source flattenBaseSpecs(Source source) {
if (source.getBaseSpecs() == null) {
return source;
}
Map<String, Object> params = new HashMap<>();
for (Map<String, Object> baseSpec : source.getBaseSpecs()) {
params.putAll(baseSpec);
}
params.putAll(source.getSpec());
Source result = source.clone();
result.setSpec(params);
result.setBaseSpecs(null);
return result;
}
|
@Test
public void testFlattenBaseSpecs() throws Exception {
// G = grandparent, P = parent, C = child.
CloudObject grandparent = CloudObject.forClassName("text");
addString(grandparent, "G", "g_g");
addString(grandparent, "GP", "gp_g");
addString(grandparent, "GC", "gc_g");
addString(grandparent, "GPC", "gpc_g");
CloudObject parent = CloudObject.forClassName("text");
addString(parent, "P", "p_p");
addString(parent, "PC", "pc_p");
addString(parent, "GP", "gp_p");
addString(parent, "GPC", "gpc_p");
CloudObject child = CloudObject.forClassName("text");
addString(child, "C", "c_c");
addString(child, "PC", "pc_c");
addString(child, "GC", "gc_c");
addString(child, "GPC", "gpc_c");
Source source = new Source();
source.setBaseSpecs(new ArrayList<Map<String, Object>>());
source.getBaseSpecs().add(grandparent);
source.getBaseSpecs().add(parent);
source.setSpec(child);
source.setCodec(CloudObjects.asCloudObject(StringUtf8Coder.of(), /*sdkComponents=*/ null));
Source flat = CloudSourceUtils.flattenBaseSpecs(source);
assertNull(flat.getBaseSpecs());
assertEquals(
StringUtf8Coder.class.getName(),
getString(flat.getCodec(), PropertyNames.OBJECT_TYPE_NAME));
CloudObject flatSpec = CloudObject.fromSpec(flat.getSpec());
assertEquals("g_g", getString(flatSpec, "G"));
assertEquals("p_p", getString(flatSpec, "P"));
assertEquals("c_c", getString(flatSpec, "C"));
assertEquals("gp_p", getString(flatSpec, "GP"));
assertEquals("gc_c", getString(flatSpec, "GC"));
assertEquals("pc_c", getString(flatSpec, "PC"));
assertEquals("gpc_c", getString(flatSpec, "GPC"));
}
|
public static boolean overlapsOrdered(IndexIterationPointer left, IndexIterationPointer right, Comparator comparator) {
assert left.isDescending() == right.isDescending() : "Cannot compare pointer with different directions";
assert left.lastEntryKeyData == null && right.lastEntryKeyData == null : "Can merge only initial pointers";
// fast path for the same instance
if (left == right) {
return true;
}
assert comparator.compare(left.from, right.from) <= 0 : "Pointers must be ordered";
// if one of the ends is +/-inf respectively -> overlap
if (left.to == null || right.from == null) {
return true;
}
// if given end is equal the ranges overlap (or at least are adjacent)
// if at least one of the ranges is inclusive
boolean eqOverlaps = left.isToInclusive() || right.isFromInclusive();
// Check non-inf values, do not need to check the other way around because pointers are ordered
// Thanks to order we do not have to check `right.to`, we only need to check
// if `right.from` belongs to `left` pointer range.
// we must take into account inclusiveness, so we do not merge < X and > X ranges
int rfCmpLt = comparator.compare(right.from, left.to);
return eqOverlaps ? rfCmpLt <= 0 : rfCmpLt < 0;
}
|
@Test
void overlapsOrderedSingleton() {
assertTrue(overlapsOrdered(pointer(singleton(5)), pointer(singleton(5)),
OrderedIndexStore.SPECIAL_AWARE_COMPARATOR), "singleton value should overlap with itself");
assertFalse(overlapsOrdered(pointer(singleton(5)), pointer(singleton(6)),
OrderedIndexStore.SPECIAL_AWARE_COMPARATOR), "singleton value should not overlap with different singleton");
assertFalse(overlapsOrdered(pointer(singleton(5), true), pointer(singleton(6), true),
OrderedIndexStore.SPECIAL_AWARE_COMPARATOR), "singleton value should not overlap with different singleton");
}
|
@PostMapping("/authorize")
@Operation(summary = "申请授权", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【提交】调用")
@Parameters({
@Parameter(name = "response_type", required = true, description = "响应类型", example = "code"),
@Parameter(name = "client_id", required = true, description = "客户端编号", example = "tudou"),
@Parameter(name = "scope", description = "授权范围", example = "userinfo.read"), // 使用 Map<String, Boolean> 格式,Spring MVC 暂时不支持这么接收参数
@Parameter(name = "redirect_uri", required = true, description = "重定向 URI", example = "https://www.iocoder.cn"),
@Parameter(name = "auto_approve", required = true, description = "用户是否接受", example = "true"),
@Parameter(name = "state", example = "1")
})
public CommonResult<String> approveOrDeny(@RequestParam("response_type") String responseType,
@RequestParam("client_id") String clientId,
@RequestParam(value = "scope", required = false) String scope,
@RequestParam("redirect_uri") String redirectUri,
@RequestParam(value = "auto_approve") Boolean autoApprove,
@RequestParam(value = "state", required = false) String state) {
@SuppressWarnings("unchecked")
Map<String, Boolean> scopes = JsonUtils.parseObject(scope, Map.class);
scopes = ObjectUtil.defaultIfNull(scopes, Collections.emptyMap());
// 0. 校验用户已经登录。通过 Spring Security 实现
// 1.1 校验 responseType 是否满足 code 或者 token 值
OAuth2GrantTypeEnum grantTypeEnum = getGrantTypeEnum(responseType);
// 1.2 校验 redirectUri 重定向域名是否合法 + 校验 scope 是否在 Client 授权范围内
OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientId, null,
grantTypeEnum.getGrantType(), scopes.keySet(), redirectUri);
// 2.1 假设 approved 为 null,说明是场景一
if (Boolean.TRUE.equals(autoApprove)) {
// 如果无法自动授权通过,则返回空 url,前端不进行跳转
if (!oauth2ApproveService.checkForPreApproval(getLoginUserId(), getUserType(), clientId, scopes.keySet())) {
return success(null);
}
} else { // 2.2 假设 approved 非 null,说明是场景二
// 如果计算后不通过,则跳转一个错误链接
if (!oauth2ApproveService.updateAfterApproval(getLoginUserId(), getUserType(), clientId, scopes)) {
return success(OAuth2Utils.buildUnsuccessfulRedirect(redirectUri, responseType, state,
"access_denied", "User denied access"));
}
}
// 3.1 如果是 code 授权码模式,则发放 code 授权码,并重定向
List<String> approveScopes = convertList(scopes.entrySet(), Map.Entry::getKey, Map.Entry::getValue);
if (grantTypeEnum == OAuth2GrantTypeEnum.AUTHORIZATION_CODE) {
return success(getAuthorizationCodeRedirect(getLoginUserId(), client, approveScopes, redirectUri, state));
}
// 3.2 如果是 token 则是 implicit 简化模式,则发送 accessToken 访问令牌,并重定向
return success(getImplicitGrantRedirect(getLoginUserId(), client, approveScopes, redirectUri, state));
}
|
@Test // autoApprove = true,但是不通过
public void testApproveOrDeny_autoApproveNo() {
// 准备参数
String responseType = "code";
String clientId = randomString();
String scope = "{\"read\": true, \"write\": false}";
String redirectUri = randomString();
String state = randomString();
// mock 方法
OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class);
when(oauth2ClientService.validOAuthClientFromCache(eq(clientId), isNull(), eq("authorization_code"),
eq(asSet("read", "write")), eq(redirectUri))).thenReturn(client);
// 调用
CommonResult<String> result = oauth2OpenController.approveOrDeny(responseType, clientId,
scope, redirectUri, true, state);
// 断言
assertEquals(0, result.getCode());
assertNull(result.getData());
}
|
@Override
public double mean() {
return nu2 / (nu2 - 2.0);
}
|
@Test
public void testMean() {
System.out.println("mean");
FDistribution instance = new FDistribution(10, 20);
instance.rand();
assertEquals(10.0/9, instance.mean(), 1E-7);
}
|
public static boolean isServiceRegistered() {
return INSTANCE != null;
}
|
@Test
public void isServiceRegisteredTest() {
assertFalse(OmemoService.isServiceRegistered());
}
|
@Override
public void updateUserPassword(Long id, UserProfileUpdatePasswordReqVO reqVO) {
// 校验旧密码密码
validateOldPassword(id, reqVO.getOldPassword());
// 执行更新
AdminUserDO updateObj = new AdminUserDO().setId(id);
updateObj.setPassword(encodePassword(reqVO.getNewPassword())); // 加密密码
userMapper.updateById(updateObj);
}
|
@Test
public void testUpdateUserPassword_success() {
// mock 数据
AdminUserDO dbUser = randomAdminUserDO(o -> o.setPassword("encode:tudou"));
userMapper.insert(dbUser);
// 准备参数
Long userId = dbUser.getId();
UserProfileUpdatePasswordReqVO reqVO = randomPojo(UserProfileUpdatePasswordReqVO.class, o -> {
o.setOldPassword("tudou");
o.setNewPassword("yuanma");
});
// mock 方法
when(passwordEncoder.encode(anyString())).then(
(Answer<String>) invocationOnMock -> "encode:" + invocationOnMock.getArgument(0));
when(passwordEncoder.matches(eq(reqVO.getOldPassword()), eq(dbUser.getPassword()))).thenReturn(true);
// 调用
userService.updateUserPassword(userId, reqVO);
// 断言
AdminUserDO user = userMapper.selectById(userId);
assertEquals("encode:yuanma", user.getPassword());
}
|
@Deprecated
@Restricted(DoNotUse.class)
public static String resolve(ConfigurationContext context, String toInterpolate) {
return context.getSecretSourceResolver().resolve(toInterpolate);
}
|
@Test
public void resolve_FileKey() throws Exception {
Path path = getPath("secret.key");
String input = Paths.get("").toUri().relativize(path.toUri()).getPath();
String output = resolve("${readFile:" + input + "}");
assertThat(output, equalTo(FILE.lookup(input)));
assertThat(output, startsWith("-----BEGIN RSA PRIVATE KEY-----"));
}
|
public PluginWrapper(PluginManager parent, File archive, Manifest manifest, URL baseResourceURL,
ClassLoader classLoader, File disableFile,
List<Dependency> dependencies, List<Dependency> optionalDependencies) {
this.parent = parent;
this.manifest = manifest;
this.shortName = Util.intern(computeShortName(manifest, archive.getName()));
this.baseResourceURL = baseResourceURL;
this.classLoader = classLoader;
this.disableFile = disableFile;
this.active = !disableFile.exists();
this.dependencies = dependencies;
this.optionalDependencies = optionalDependencies;
for (Dependency d : optionalDependencies) {
assert d.optional : d + " included among optionalDependencies of " + shortName + " but was not marked optional";
}
this.archive = archive;
}
|
@Test
public void jenkinsCoreTooOld() {
PluginWrapper pw = pluginWrapper("fake").requiredCoreVersion("3.0").buildLoaded();
final IOException ex = assertThrows(IOException.class, pw::resolvePluginDependencies);
assertContains(ex, "Failed to load: Fake (fake 42)", "Jenkins (3.0) or higher required");
}
|
@Override
public List<String> listDbNames() {
return hmsOps.getAllDatabaseNames();
}
|
@Test
public void testListDbNames() {
List<String> databaseNames = hiveMetadata.listDbNames();
Assert.assertEquals(Lists.newArrayList("db1", "db2"), databaseNames);
CachingHiveMetastore queryLevelCache = CachingHiveMetastore.createQueryLevelInstance(cachingHiveMetastore, 100);
Assert.assertEquals(Lists.newArrayList("db1", "db2"), queryLevelCache.getAllDatabaseNames());
}
|
public void setSendFullErrorException(boolean sendFullErrorException) {
this.sendFullErrorException = sendFullErrorException;
}
|
@Test
void handleFlowableTaskAlreadyClaimedExceptionWithoutSendFullErrorException() throws Exception {
testController.exceptionSupplier = () -> new FlowableTaskAlreadyClaimedException("task-2", "tester");
handlerAdvice.setSendFullErrorException(false);
String body = mockMvc.perform(get("/"))
.andExpect(status().isConflict())
.andReturn()
.getResponse()
.getContentAsString();
assertThatJson(body)
.isEqualTo("{"
+ " message: 'Task was already claimed',"
+ " exception: \"Task 'task-2' is already claimed by someone else.\""
+ "}");
}
|
@Override
public void executeUpdate(final UnregisterStorageUnitStatement sqlStatement, final ContextManager contextManager) {
if (!sqlStatement.isIfExists()) {
checkExisted(sqlStatement.getStorageUnitNames());
}
checkInUsed(sqlStatement);
try {
contextManager.getPersistServiceFacade().getMetaDataManagerPersistService().unregisterStorageUnits(database.getName(), sqlStatement.getStorageUnitNames());
} catch (final SQLException | ShardingSphereServerException ex) {
throw new StorageUnitsOperateException("unregister", sqlStatement.getStorageUnitNames(), ex);
}
}
|
@Test
void assertExecuteUpdateWithStorageUnitInUsedWithIfExists() {
ShardingSphereRule rule = mock(ShardingSphereRule.class, RETURNS_DEEP_STUBS);
DataSourceMapperRuleAttribute ruleAttribute = mock(DataSourceMapperRuleAttribute.class);
when(ruleAttribute.getDataSourceMapper()).thenReturn(Collections.singletonMap("", Collections.singleton("foo_ds")));
when(rule.getAttributes()).thenReturn(new RuleAttributes(ruleAttribute));
when(database.getRuleMetaData()).thenReturn(new RuleMetaData(Collections.singleton(rule)));
assertThrows(InUsedStorageUnitException.class, () -> executor.executeUpdate(new UnregisterStorageUnitStatement(true, Collections.singleton("foo_ds"), true, false), contextManager));
}
|
@Override
public ExecuteContext after(ExecuteContext context) {
Object logStartupInfo = context.getMemberFieldValue("logStartupInfo");
if ((logStartupInfo instanceof Boolean) && (Boolean) logStartupInfo && INIT.compareAndSet(false, true)) {
final FlowControlInitServiceImpl service = PluginServiceManager.getPluginService(
FlowControlInitServiceImpl.class);
service.doStart();
}
return context;
}
|
@Test
public void testStart() throws Exception {
final SpringApplicationInterceptor springApplicationInterceptor = new SpringApplicationInterceptor();
springApplicationInterceptor.after(buildContext());
Assert.assertTrue(executed.get());
}
|
public List<TradeHistoryResponse> findTradeHistories(final Long memberId, final boolean isSeller) {
if (isSeller) {
return findHistories(memberId, tradeHistory.sellerId::eq);
}
return findHistories(memberId, tradeHistory.buyerId::eq);
}
|
@Test
void 구매자의_구매_내역을_조회한다() {
// when
List<TradeHistoryResponse> result = tradeHistoryQueryRepository.findTradeHistories(buyer.getId(), false);
// then
assertSoftly(softly -> {
softly.assertThat(result).hasSize(1);
softly.assertThat(result.get(0).buyerName()).isEqualTo(buyer.getNickname());
softly.assertThat(result.get(0).sellerName()).isEqualTo(seller.getNickname());
softly.assertThat(result.get(0).productTitle()).isEqualTo(product.getDescription().getTitle());
});
}
|
public String anonymize(final ParseTree tree) {
return build(tree);
}
|
@Test
public void shouldAnonymizeInsertIntoCorrectly() {
final String output = anon.anonymize(
"INSERT INTO my_stream SELECT user_id, browser_cookie, ip_address\n"
+ "FROM another_stream\n"
+ "WHERE user_id = 4214\n"
+ "AND browser_cookie = 'aefde34ec'\n"
+ "AND ip_address = '10.10.0.2';");
Approvals.verify(output);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.