focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static FileRewriteCoordinator get() {
return INSTANCE;
}
|
@TestTemplate
public void testCommitMultipleRewrites() throws NoSuchTableException, IOException {
sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName);
Dataset<Row> df = newDF(1000);
// add first two files
df.coalesce(1).writeTo(tableName).append();
df.coalesce(1).writeTo(tableName).append();
Table table = validationCatalog.loadTable(tableIdent);
String firstFileSetID = UUID.randomUUID().toString();
long firstFileSetSnapshotId = table.currentSnapshot().snapshotId();
ScanTaskSetManager taskSetManager = ScanTaskSetManager.get();
try (CloseableIterable<FileScanTask> tasks = table.newScan().planFiles()) {
// stage first 2 files for compaction
taskSetManager.stageTasks(table, firstFileSetID, Lists.newArrayList(tasks));
}
// add two more files
df.coalesce(1).writeTo(tableName).append();
df.coalesce(1).writeTo(tableName).append();
table.refresh();
String secondFileSetID = UUID.randomUUID().toString();
try (CloseableIterable<FileScanTask> tasks =
table.newScan().appendsAfter(firstFileSetSnapshotId).planFiles()) {
// stage 2 more files for compaction
taskSetManager.stageTasks(table, secondFileSetID, Lists.newArrayList(tasks));
}
ImmutableSet<String> fileSetIDs = ImmutableSet.of(firstFileSetID, secondFileSetID);
for (String fileSetID : fileSetIDs) {
// read and pack 2 files into 1 split
Dataset<Row> scanDF =
spark
.read()
.format("iceberg")
.option(SparkReadOptions.SCAN_TASK_SET_ID, fileSetID)
.option(SparkReadOptions.SPLIT_SIZE, Long.MAX_VALUE)
.load(tableName);
// write the combined data as one file
scanDF
.writeTo(tableName)
.option(SparkWriteOptions.REWRITTEN_FILE_SCAN_TASK_SET_ID, fileSetID)
.append();
}
// commit both rewrites at the same time
FileRewriteCoordinator rewriteCoordinator = FileRewriteCoordinator.get();
Set<DataFile> rewrittenFiles =
fileSetIDs.stream()
.flatMap(fileSetID -> taskSetManager.fetchTasks(table, fileSetID).stream())
.map(t -> t.asFileScanTask().file())
.collect(Collectors.toSet());
Set<DataFile> addedFiles =
fileSetIDs.stream()
.flatMap(fileSetID -> rewriteCoordinator.fetchNewFiles(table, fileSetID).stream())
.collect(Collectors.toSet());
table.newRewrite().rewriteFiles(rewrittenFiles, addedFiles).commit();
table.refresh();
assertThat(table.snapshots()).as("Should produce 5 snapshots").hasSize(5);
Map<String, String> summary = table.currentSnapshot().summary();
assertThat(summary.get("deleted-data-files"))
.as("Deleted files count must match")
.isEqualTo("4");
assertThat(summary.get("added-data-files")).as("Added files count must match").isEqualTo("2");
Object rowCount = scalarSql("SELECT count(*) FROM %s", tableName);
assertThat(rowCount).as("Row count must match").isEqualTo(4000L);
}
|
@Override
public RedisClusterNode clusterGetNodeForSlot(int slot) {
Iterable<RedisClusterNode> res = clusterGetNodes();
for (RedisClusterNode redisClusterNode : res) {
if (redisClusterNode.isMaster() && redisClusterNode.getSlotRange().contains(slot)) {
return redisClusterNode;
}
}
return null;
}
|
@Test
public void testClusterGetNodeForSlot() {
testInCluster(connection -> {
RedisClusterNode node1 = connection.clusterGetNodeForSlot(1);
RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000);
assertThat(node1.getId()).isNotEqualTo(node2.getId());
});
}
|
@InvokeOnHeader(Web3jConstants.ETH_GET_FILTER_LOGS)
void ethGetFilterLogs(Message message) throws IOException {
BigInteger filterId = message.getHeader(Web3jConstants.FILTER_ID, configuration::getFilterId, BigInteger.class);
Request<?, EthLog> request = web3j.ethGetFilterLogs(filterId);
setRequestId(message, request);
EthLog response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getLogs());
}
}
|
@Test
public void ethGetFilterLogsTest() throws Exception {
EthLog response = Mockito.mock(EthLog.class);
Mockito.when(mockWeb3j.ethGetFilterLogs(any())).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getLogs()).thenReturn(Collections.EMPTY_LIST);
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_FILTER_LOGS);
template.send(exchange);
List body = exchange.getIn().getBody(List.class);
assertTrue(body.isEmpty());
}
|
public CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> releaseAcquiredRecords(
String groupId,
String memberId
) {
log.trace("Release acquired records request for groupId: {}, memberId: {}", groupId, memberId);
List<TopicIdPartition> topicIdPartitions = cachedTopicIdPartitionsInShareSession(
groupId, Uuid.fromString(memberId));
if (topicIdPartitions.isEmpty()) {
return CompletableFuture.completedFuture(Collections.emptyMap());
}
Map<TopicIdPartition, CompletableFuture<Errors>> futuresMap = new HashMap<>();
topicIdPartitions.forEach(topicIdPartition -> {
SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey(groupId, topicIdPartition));
if (sharePartition == null) {
log.error("No share partition found for groupId {} topicPartition {} while releasing acquired topic partitions", groupId, topicIdPartition);
futuresMap.put(topicIdPartition, CompletableFuture.completedFuture(Errors.UNKNOWN_TOPIC_OR_PARTITION));
} else {
CompletableFuture<Errors> future = sharePartition.releaseAcquiredRecords(memberId).thenApply(throwable -> {
if (throwable.isPresent()) {
return Errors.forException(throwable.get());
}
return Errors.NONE;
});
futuresMap.put(topicIdPartition, future);
}
});
CompletableFuture<Void> allFutures = CompletableFuture.allOf(
futuresMap.values().toArray(new CompletableFuture[futuresMap.size()]));
return allFutures.thenApply(v -> {
Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = new HashMap<>();
futuresMap.forEach((topicIdPartition, future) -> result.put(topicIdPartition, new ShareAcknowledgeResponseData.PartitionData()
.setPartitionIndex(topicIdPartition.partition())
.setErrorCode(future.join().code())));
return result;
});
}
|
@Test
public void testReleaseAcquiredRecordsSuccess() {
String groupId = "grp";
Uuid memberId = Uuid.randomUuid();
TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0));
TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 2));
TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("baz", 4));
SharePartition sp1 = mock(SharePartition.class);
SharePartition sp2 = mock(SharePartition.class);
when(sp1.releaseAcquiredRecords(ArgumentMatchers.eq(memberId.toString()))).thenReturn(CompletableFuture.completedFuture(Optional.empty()));
when(sp2.releaseAcquiredRecords(ArgumentMatchers.eq(memberId.toString()))).thenReturn(CompletableFuture.completedFuture(
Optional.of(new InvalidRecordStateException("Unable to release acquired records for the batch"))
));
ShareSessionCache cache = mock(ShareSessionCache.class);
ShareSession shareSession = mock(ShareSession.class);
when(cache.get(new ShareSessionKey(groupId, memberId))).thenReturn(shareSession);
ImplicitLinkedHashCollection<CachedSharePartition> partitionMap = new ImplicitLinkedHashCollection<>(3);
partitionMap.add(new CachedSharePartition(tp1));
partitionMap.add(new CachedSharePartition(tp2));
partitionMap.add(new CachedSharePartition(tp3));
when(shareSession.partitionMap()).thenReturn(partitionMap);
Map<SharePartitionManager.SharePartitionKey, SharePartition> partitionCacheMap = new HashMap<>();
partitionCacheMap.put(new SharePartitionManager.SharePartitionKey(groupId, tp1), sp1);
partitionCacheMap.put(new SharePartitionManager.SharePartitionKey(groupId, tp2), sp2);
SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder()
.withCache(cache)
.withPartitionCacheMap(partitionCacheMap)
.build();
CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture =
sharePartitionManager.releaseAcquiredRecords(groupId, memberId.toString());
Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join();
assertEquals(3, result.size());
assertTrue(result.containsKey(tp1));
assertTrue(result.containsKey(tp2));
assertTrue(result.containsKey(tp3));
assertEquals(0, result.get(tp1).partitionIndex());
assertEquals(Errors.NONE.code(), result.get(tp1).errorCode());
assertEquals(2, result.get(tp2).partitionIndex());
assertEquals(Errors.INVALID_RECORD_STATE.code(), result.get(tp2).errorCode());
// tp3 was not a part of partitionCacheMap.
assertEquals(4, result.get(tp3).partitionIndex());
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp3).errorCode());
}
|
IdBatchAndWaitTime newIdBaseLocal(int batchSize) {
return newIdBaseLocal(Clock.currentTimeMillis(), getNodeId(), batchSize);
}
|
@Test
public void test_timeHighEdge() {
IdBatchAndWaitTime result = gen.newIdBaseLocal(DEFAULT_EPOCH_START + (1L << DEFAULT_BITS_TIMESTAMP) - 1L, 1234, 10);
assertEquals(9223372036850582738L, result.idBatch.base());
}
|
public boolean isDisabled() {
return _disabled;
}
|
@Test
public void withDisabledNull()
throws JsonProcessingException {
String confStr = "{\"disabled\": null}";
IndexConfig config = JsonUtils.stringToObject(confStr, IndexConfig.class);
assertFalse(config.isDisabled(), "Unexpected disabled");
}
|
public static int numberOfTrailingZeros(long hash, int indexBitLength)
{
long value = hash | (1L << (Long.SIZE - indexBitLength)); // place a 1 in the final position of the prefix to avoid flowing into prefix when the hash happens to be 0
return Long.numberOfTrailingZeros(value);
}
|
@Test
public void testNumberOfTrailingZeros()
{
for (int indexBitLength : new int[] {6, 12, 18}) {
for (int i = 0; i < Long.SIZE - 1; i++) {
long hash = 1L << i;
assertEquals(SfmSketch.numberOfTrailingZeros(hash, indexBitLength), Math.min(i, Long.SIZE - indexBitLength));
}
}
}
|
public Collection<ServerPluginInfo> loadPlugins() {
Map<String, ServerPluginInfo> bundledPluginsByKey = new LinkedHashMap<>();
for (ServerPluginInfo bundled : getBundledPluginsMetadata()) {
failIfContains(bundledPluginsByKey, bundled,
plugin -> MessageException.of(format("Found two versions of the plugin %s [%s] in the directory %s. Please remove one of %s or %s.",
bundled.getName(), bundled.getKey(), getRelativeDir(fs.getInstalledBundledPluginsDir()), bundled.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName())));
bundledPluginsByKey.put(bundled.getKey(), bundled);
}
Map<String, ServerPluginInfo> externalPluginsByKey = new LinkedHashMap<>();
for (ServerPluginInfo external : getExternalPluginsMetadata()) {
failIfContains(bundledPluginsByKey, external,
plugin -> MessageException.of(format("Found a plugin '%s' in the directory '%s' with the same key [%s] as a built-in feature '%s'. Please remove '%s'.",
external.getName(), getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getKey(), plugin.getName(),
new File(getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName()))));
failIfContains(externalPluginsByKey, external,
plugin -> MessageException.of(format("Found two versions of the plugin '%s' [%s] in the directory '%s'. Please remove %s or %s.", external.getName(), external.getKey(),
getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName())));
externalPluginsByKey.put(external.getKey(), external);
}
for (PluginInfo downloaded : getDownloadedPluginsMetadata()) {
failIfContains(bundledPluginsByKey, downloaded,
plugin -> MessageException.of(format("Fail to update plugin: %s. Built-in feature with same key already exists: %s. Move or delete plugin from %s directory",
plugin.getName(), plugin.getKey(), getRelativeDir(fs.getDownloadedPluginsDir()))));
ServerPluginInfo installedPlugin;
if (externalPluginsByKey.containsKey(downloaded.getKey())) {
deleteQuietly(externalPluginsByKey.get(downloaded.getKey()).getNonNullJarFile());
installedPlugin = moveDownloadedPluginToExtensions(downloaded);
LOG.info("Plugin {} [{}] updated to version {}", installedPlugin.getName(), installedPlugin.getKey(), installedPlugin.getVersion());
} else {
installedPlugin = moveDownloadedPluginToExtensions(downloaded);
LOG.info("Plugin {} [{}] installed", installedPlugin.getName(), installedPlugin.getKey());
}
externalPluginsByKey.put(downloaded.getKey(), installedPlugin);
}
Map<String, ServerPluginInfo> plugins = new HashMap<>(externalPluginsByKey.size() + bundledPluginsByKey.size());
plugins.putAll(externalPluginsByKey);
plugins.putAll(bundledPluginsByKey);
PluginRequirementsValidator.unloadIncompatiblePlugins(plugins);
return plugins.values();
}
|
@Test
public void load_installed_bundled_and_external_plugins() throws Exception {
copyTestPluginTo("test-base-plugin", fs.getInstalledExternalPluginsDir());
copyTestPluginTo("test-extend-plugin", fs.getInstalledBundledPluginsDir());
Collection<ServerPluginInfo> loadedPlugins = underTest.loadPlugins();
assertThat(loadedPlugins).extracting(PluginInfo::getKey).containsOnly("testbase", "testextend");
}
|
public static Expression generateFilterExpression(SearchArgument sarg) {
return translate(sarg.getExpression(), sarg.getLeaves());
}
|
@Test
public void testEqualsOperand() {
SearchArgument.Builder builder = SearchArgumentFactory.newBuilder();
SearchArgument arg = builder.startAnd().equals("salary", PredicateLeaf.Type.LONG, 3000L).end().build();
UnboundPredicate expected = Expressions.equal("salary", 3000L);
UnboundPredicate actual = (UnboundPredicate) HiveIcebergFilterFactory.generateFilterExpression(arg);
assertPredicatesMatch(expected, actual);
}
|
@Override
public Serde<GenericRow> create(
final FormatInfo format,
final PersistenceSchema schema,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> srClientFactory,
final String loggerNamePrefix,
final ProcessingLogContext processingLogContext,
final Optional<TrackedCallback> tracker
) {
final Serde<List<?>> formatSerde =
innerFactory.createFormatSerde("Value", format, schema, ksqlConfig, srClientFactory, false);
final Serde<GenericRow> genericRowSerde = toGenericRowSerde(formatSerde, schema);
final Serde<GenericRow> loggingSerde = innerFactory.wrapInLoggingSerde(
genericRowSerde,
loggerNamePrefix,
processingLogContext,
queryId);
final Serde<GenericRow> serde = tracker
.map(callback -> innerFactory.wrapInTrackingSerde(loggingSerde, callback))
.orElse(loggingSerde);
serde.configure(Collections.emptyMap(), false);
return serde;
}
|
@Test
public void shouldNotWrapInTrackingSerdeIfNoCallbackProvided() {
// When:
factory.create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt,
Optional.empty());
// Then:
verify(innerFactory, never()).wrapInTrackingSerde(any(), any());
}
|
public Page dropColumn(int channelIndex)
{
if (channelIndex < 0 || channelIndex >= getChannelCount()) {
throw new IndexOutOfBoundsException(format("Invalid channel %d in page with %s channels", channelIndex, getChannelCount()));
}
Block[] result = new Block[getChannelCount() - 1];
System.arraycopy(blocks, 0, result, 0, channelIndex);
System.arraycopy(blocks, channelIndex + 1, result, channelIndex, getChannelCount() - channelIndex - 1);
return wrapBlocksWithoutCopy(positionCount, result);
}
|
@Test
public void testDropColumn()
{
int entries = 10;
BlockBuilder blockBuilder = BIGINT.createBlockBuilder(null, entries);
for (int i = 0; i < entries; i++) {
BIGINT.writeLong(blockBuilder, i);
}
Block block = blockBuilder.build();
Page page = new Page(block, block, block);
assertEquals(page.getChannelCount(), 3);
Page newPage = page.dropColumn(1);
assertEquals(page.getChannelCount(), 3, "Page was modified");
assertEquals(newPage.getChannelCount(), 2);
assertEquals(newPage.getBlock(0).getLong(0), 0);
assertEquals(newPage.getBlock(1).getLong(1), 1);
}
|
public final <KIn, VIn, KOut, VOut> void addProcessor(final String name,
final ProcessorSupplier<KIn, VIn, KOut, VOut> supplier,
final String... predecessorNames) {
Objects.requireNonNull(name, "name must not be null");
Objects.requireNonNull(supplier, "supplier must not be null");
Objects.requireNonNull(predecessorNames, "predecessor names must not be null");
ApiUtils.checkSupplier(supplier);
if (nodeFactories.containsKey(name)) {
throw new TopologyException("Processor " + name + " is already added.");
}
if (predecessorNames.length == 0) {
throw new TopologyException("Processor " + name + " must have at least one parent");
}
for (final String predecessor : predecessorNames) {
Objects.requireNonNull(predecessor, "predecessor name must not be null");
if (predecessor.equals(name)) {
throw new TopologyException("Processor " + name + " cannot be a predecessor of itself.");
}
if (!nodeFactories.containsKey(predecessor)) {
throw new TopologyException("Predecessor processor " + predecessor + " is not added yet for " + name);
}
}
nodeFactories.put(name, new ProcessorNodeFactory<>(name, predecessorNames, supplier));
nodeGrouper.add(name);
nodeGrouper.unite(name, predecessorNames);
nodeGroups = null;
}
|
@Test
public void shouldNotAllowNullProcessorSupplier() {
assertThrows(
NullPointerException.class,
() -> builder.addProcessor(
"name",
(ProcessorSupplier<Object, Object, Object, Object>) null
)
);
}
|
public Capacity getCapacity(String group, String tenant) {
if (tenant != null) {
return getTenantCapacity(tenant);
}
return getGroupCapacity(group);
}
|
@Test
void testGetCapacity() {
GroupCapacity groupCapacity = new GroupCapacity();
groupCapacity.setId(1L);
when(groupCapacityPersistService.getGroupCapacity(eq("testGroup"))).thenReturn(groupCapacity);
TenantCapacity tenantCapacity = new TenantCapacity();
tenantCapacity.setId(2L);
when(tenantCapacityPersistService.getTenantCapacity(eq("testTenant"))).thenReturn(tenantCapacity);
Capacity resCapacity1 = service.getCapacity("testGroup", null);
assertEquals(1L, resCapacity1.getId().longValue());
Capacity resCapacity2 = service.getCapacity(null, "testTenant");
assertEquals(2L, resCapacity2.getId().longValue());
}
|
public static Range<LocalDateTime> localDateTimeRange(String range) {
return ofString(range, parseLocalDateTime().compose(unquote()), LocalDateTime.class);
}
|
@Test
public void localDateTimeTest() {
assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.1,)"));
assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.12,)"));
assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.123,)"));
assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.1234,)"));
assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.12345,)"));
assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.123456,)"));
assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.123456,infinity)"));
}
|
public static void ensureSerializable(Object obj) {
try {
InstantiationUtil.serializeObject(obj);
} catch (Exception e) {
throw new InvalidProgramException("Object " + obj + " is not serializable", e);
}
}
|
@Test
void testNonSerializable() {
assertThatThrownBy(
() -> {
MapCreator creator = new NonSerializableMapCreator();
MapFunction<Integer, Integer> map = creator.getMap();
ClosureCleaner.ensureSerializable(map);
int result = map.map(3);
assertThat(result).isEqualTo(4);
})
.isInstanceOf(InvalidProgramException.class);
}
|
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetGenericBiFunctionVariadic() throws NoSuchMethodException {
// Given:
final Type genericType = getClass().getMethod("partialGenericBiFunctionType").getGenericReturnType();
// When:
final ParamType returnType = UdfUtil.getVarArgsSchemaFromType(genericType);
// Then:
assertThat(returnType, is(LambdaType.of(ImmutableList.of(GenericType.of("T"), ParamTypes.BOOLEAN), GenericType.of("U"))));
}
|
public static DynamicVoter parse(String input) {
input = input.trim();
int atIndex = input.indexOf("@");
if (atIndex < 0) {
throw new IllegalArgumentException("No @ found in dynamic voter string.");
}
if (atIndex == 0) {
throw new IllegalArgumentException("Invalid @ at beginning of dynamic voter string.");
}
String idString = input.substring(0, atIndex);
int nodeId;
try {
nodeId = Integer.parseInt(idString);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Failed to parse node id in dynamic voter string.", e);
}
if (nodeId < 0) {
throw new IllegalArgumentException("Invalid negative node id " + nodeId +
" in dynamic voter string.");
}
input = input.substring(atIndex + 1);
if (input.isEmpty()) {
throw new IllegalArgumentException("No hostname found after node id.");
}
String host;
if (input.startsWith("[")) {
int endBracketIndex = input.indexOf("]");
if (endBracketIndex < 0) {
throw new IllegalArgumentException("Hostname began with left bracket, but no right " +
"bracket was found.");
}
host = input.substring(1, endBracketIndex);
input = input.substring(endBracketIndex + 1);
} else {
int endColonIndex = input.indexOf(":");
if (endColonIndex < 0) {
throw new IllegalArgumentException("No colon following hostname could be found.");
}
host = input.substring(0, endColonIndex);
input = input.substring(endColonIndex);
}
if (!input.startsWith(":")) {
throw new IllegalArgumentException("Port section must start with a colon.");
}
input = input.substring(1);
int endColonIndex = input.indexOf(":");
if (endColonIndex < 0) {
throw new IllegalArgumentException("No colon following port could be found.");
}
String portString = input.substring(0, endColonIndex);
int port;
try {
port = Integer.parseInt(portString);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Failed to parse port in dynamic voter string.", e);
}
if (port < 0 || port > 65535) {
throw new IllegalArgumentException("Invalid port " + port + " in dynamic voter string.");
}
String directoryIdString = input.substring(endColonIndex + 1);
Uuid directoryId;
try {
directoryId = Uuid.fromString(directoryIdString);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Failed to parse directory ID in dynamic voter string.", e);
}
return new DynamicVoter(directoryId, nodeId, host, port);
}
|
@Test
public void testParseDynamicVoterWithoutHostname() {
assertEquals("No hostname found after node id.",
assertThrows(IllegalArgumentException.class,
() -> DynamicVoter.parse("2@")).
getMessage());
}
|
public static boolean doubleEquals(double a, double b)
{
// the first check ensures +0 == -0 is true. the second ensures that NaN == NaN is true
// for all other cases a == b and doubleToLongBits(a) == doubleToLongBits(b) will return
// the same result
// doubleToLongBits converts all NaNs to the same representation
return a == b || doubleToLongBits(a) == doubleToLongBits(b);
}
|
@Test
public void testDoubleEquals()
{
assertTrue(doubleEquals(0, Double.parseDouble("-0")));
//0x7ff8123412341234L is a different representation of NaN
assertTrue(doubleEquals(Double.NaN, longBitsToDouble(0x7ff8123412341234L)));
}
|
@ExceptionHandler(UserNotFoundException.class)
protected ResponseEntity<Object> handleUserNotFoundException(final UserNotFoundException ex) {
CustomError customError = CustomError.builder()
.httpStatus(HttpStatus.NOT_FOUND)
.header(CustomError.Header.API_ERROR.getName())
.message(ex.getMessage())
.build();
return new ResponseEntity<>(customError, HttpStatus.NOT_FOUND);
}
|
@Test
void givenUserNotFoundException_whenHandleUserNotFoundException_thenRespondWithNotFound() {
// Given
UserNotFoundException ex = new UserNotFoundException();
CustomError expectedError = CustomError.builder()
.httpStatus(HttpStatus.NOT_FOUND)
.header(CustomError.Header.API_ERROR.getName())
.message("User not found!\n")
.build();
// When
ResponseEntity<Object> responseEntity = globalExceptionHandler.handleUserNotFoundException(ex);
// Then
assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND);
CustomError actualError = (CustomError) responseEntity.getBody();
checkCustomError(expectedError, actualError);
}
|
@Override
public BluePipeline get(String name) {
Job job = pipeline.mbp.getItem(name);
if (job == null) {
return null;
}
BlueOrganization organization = OrganizationFactory.getInstance().getContainingOrg(job);
if (organization == null) {
return null;
}
return new BranchImpl(organization, job, getLink());
}
|
@Test
public void testBranchOrdering() throws Exception {
j.jenkins.setSecurityRealm(j.createDummySecurityRealm());
hudson.model.User user = User.get("alice");
user.setFullName("Alice Cooper");
WorkflowMultiBranchProject mp = j.jenkins.createProject(WorkflowMultiBranchProject.class, "p");
mp.getSourcesList().add(new BranchSource(new GitSCMSource(null, sampleRepo.toString(), "", "*", "", false),
new DefaultBranchPropertyStrategy(new BranchProperty[0])));
for (SCMSource source : mp.getSCMSources()) {
assertEquals(mp, source.getOwner());
}
WorkflowJob p = scheduleAndFindBranchProject(mp, "master");
j.waitUntilNoActivity();
String token = getJwtToken(j.jenkins, "alice", "alice");
new RequestBuilder(baseUrl)
.put("/organizations/jenkins/pipelines/p/branches/feature2/favorite")
.jwtToken(token)
.data(MapsHelper.of("favorite", true))
.build(Map.class);
new RequestBuilder(baseUrl)
.put("/organizations/jenkins/pipelines/p/branches/feature4/favorite")
.jwtToken(token)
.data(MapsHelper.of("favorite", true))
.build(Map.class);
List l = request().get("/organizations/jenkins/pipelines/p/branches/")
.jwtToken(token)
.build(List.class);
Assert.assertEquals(4,l.size());
Map o = (Map)l.get(1);
Map o2 = (Map)l.get(0);
WorkflowJob j1 = findBranchProject(mp, (String)o.get("name"));
j.waitForCompletion(j1.getLastBuild());
j.waitForCompletion(j1.scheduleBuild2(0).waitForStart());
WorkflowJob j2 = findBranchProject(mp, (String)o2.get("name"));
j.waitForCompletion(j2.getLastBuild());
List l2 = request().get("/organizations/jenkins/pipelines/p/branches/")
.jwtToken(token)
.build(List.class);
Assert.assertEquals(4,l.size());
Map o1 = (Map)l2.get(0);
Map o3 = (Map)l2.get(1);
Assert.assertEquals(o2.get("name"), o1.get("name"));
}
|
@Override
public void processDeviceCreatedState(OpenstackNode osNode) {
try {
if (!isOvsdbConnected(osNode, ovsdbPortNum, ovsdbController, deviceService)) {
ovsdbController.connect(osNode.managementIp(), tpPort(ovsdbPortNum));
return;
}
if (osNode.type() == GATEWAY) {
addOrRemoveSystemInterface(osNode, INTEGRATION_BRIDGE,
osNode.uplinkPort(), deviceService, true);
}
if (osNode.dataIp() != null &&
!isIntfEnabled(osNode, VXLAN_TUNNEL)) {
createVxlanTunnelInterface(osNode);
}
if (osNode.dataIp() != null &&
!isIntfEnabled(osNode, GRE_TUNNEL)) {
createGreTunnelInterface(osNode);
}
if (osNode.dataIp() != null &&
!isIntfEnabled(osNode, GENEVE_TUNNEL)) {
createGeneveTunnelInterface(osNode);
}
if (osNode.dpdkConfig() != null && osNode.dpdkConfig().dpdkIntfs() != null) {
osNode.dpdkConfig().dpdkIntfs().stream()
.filter(dpdkintf -> dpdkintf.deviceName().equals(TUNNEL_BRIDGE))
.forEach(dpdkintf -> addOrRemoveDpdkInterface(
osNode, dpdkintf, ovsdbPortNum, ovsdbController, true));
osNode.dpdkConfig().dpdkIntfs().stream()
.filter(dpdkintf -> dpdkintf.deviceName().equals(INTEGRATION_BRIDGE))
.forEach(dpdkintf -> addOrRemoveDpdkInterface(
osNode, dpdkintf, ovsdbPortNum, ovsdbController, true));
}
// provision new physical interfaces on the given node
// this includes creating physical bridge, attaching physical port
// to physical bridge, adding patch ports to both physical bridge and br-int
provisionPhysicalInterfaces(osNode);
if (osNode.vlanIntf() != null &&
!isIntfEnabled(osNode, osNode.vlanIntf())) {
addOrRemoveSystemInterface(osNode, INTEGRATION_BRIDGE,
osNode.vlanIntf(), deviceService, true);
}
} catch (Exception e) {
log.error("Exception occurred because of {}", e);
}
}
|
@Test
public void testComputeNodeProcessDeviceCreatedState() {
testNodeManager.createNode(COMPUTE_2);
TEST_DEVICE_SERVICE.devMap.put(COMPUTE_2_OVSDB_DEVICE.id(), COMPUTE_2_OVSDB_DEVICE);
TEST_DEVICE_SERVICE.devMap.put(COMPUTE_2_INTG_DEVICE.id(), COMPUTE_2_INTG_DEVICE);
assertEquals(ERR_STATE_NOT_MATCH, DEVICE_CREATED,
testNodeManager.node(COMPUTE_2_HOSTNAME).state());
target.processDeviceCreatedState(COMPUTE_2);
assertEquals(ERR_STATE_NOT_MATCH, COMPLETE,
testNodeManager.node(COMPUTE_2_HOSTNAME).state());
}
|
@SuppressWarnings("unchecked")
@Override
public Concat.Output run(RunContext runContext) throws Exception {
File tempFile = runContext.workingDir().createTempFile(extension).toFile();
try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) {
List<String> finalFiles;
if (this.files instanceof List) {
finalFiles = (List<String>) this.files;
} else if (this.files instanceof String) {
final TypeReference<List<String>> reference = new TypeReference<>() {};
finalFiles = JacksonMapper.ofJson(false).readValue(
runContext.render((String) this.files),
reference
);
} else {
throw new Exception("Invalid `files` properties with type '" + this.files.getClass() + "'");
}
finalFiles.forEach(throwConsumer(s -> {
URI from = new URI(runContext.render(s));
try (InputStream inputStream = runContext.storage().getFile(from)) {
IOUtils.copyLarge(inputStream, fileOutputStream);
}
if (separator != null) {
IOUtils.copy(new ByteArrayInputStream(this.separator.getBytes()), fileOutputStream);
}
}));
}
return Concat.Output.builder()
.uri(runContext.storage().putFile(tempFile))
.build();
}
|
@Test
void json() throws Exception {
this.run(true);
}
|
ByteBuffer serialize(final int endPadding) {
final int sizeOfValueLength = Integer.BYTES;
final int sizeOfPriorValue = priorValue == null ? 0 : priorValue.length;
final int sizeOfOldValue = oldValue == null || priorValue == oldValue ? 0 : oldValue.length;
final int sizeOfNewValue = newValue == null ? 0 : newValue.length;
final byte[] serializedContext = recordContext.serialize();
final ByteBuffer buffer = ByteBuffer.allocate(
serializedContext.length
+ sizeOfValueLength + sizeOfPriorValue
+ sizeOfValueLength + sizeOfOldValue
+ sizeOfValueLength + sizeOfNewValue
+ endPadding
);
buffer.put(serializedContext);
addValue(buffer, priorValue);
if (oldValue == null) {
buffer.putInt(NULL_VALUE_SENTINEL);
} else if (Arrays.equals(priorValue, oldValue)) {
buffer.putInt(OLD_PREV_DUPLICATE_VALUE_SENTINEL);
} else {
buffer.putInt(sizeOfOldValue);
buffer.put(oldValue);
}
addValue(buffer, newValue);
return buffer;
}
|
@Test
public void shouldSerializeNulls() {
final ProcessorRecordContext context = new ProcessorRecordContext(0L, 0L, 0, "topic", new RecordHeaders());
final byte[] serializedContext = context.serialize();
final byte[] bytes = new BufferValue(null, null, null, context).serialize(0).array();
final byte[] withoutContext = Arrays.copyOfRange(bytes, serializedContext.length, bytes.length);
assertThat(withoutContext, is(ByteBuffer.allocate(Integer.BYTES * 3).putInt(-1).putInt(-1).putInt(-1).array()));
}
|
public static BufferedImage bufferedImageFromImage(final Image image)
{
if (image instanceof BufferedImage)
{
return (BufferedImage) image;
}
return toARGB(image);
}
|
@Test
public void bufferedImageFromImage()
{
final BufferedImage buffered = new BufferedImage(1, 1, BufferedImage.TYPE_INT_ARGB);
assertEquals(buffered, ImageUtil.bufferedImageFromImage(buffered));
}
|
public String generateToken(String host) throws SpnegoEngineException {
GSSContext gssContext = null;
byte[] token = null; // base64 decoded challenge
Oid negotiationOid;
try {
/*
* Using the SPNEGO OID is the correct method. Kerberos v5 works for IIS but not JBoss.
* Unwrapping the initial token when using SPNEGO OID looks like what is described here...
*
* http://msdn.microsoft.com/en-us/library/ms995330.aspx
*
* Another helpful URL...
*
* http://publib.boulder.ibm.com/infocenter/wasinfo/v7r0/index.jsp?topic=/com.ibm.websphere.express.doc/info/exp/ae/tsec_SPNEGO_token.html
*
* Unfortunately SPNEGO is JRE >=1.6.
*/
// Try SPNEGO by default, fall back to Kerberos later if error
negotiationOid = new Oid(SPNEGO_OID);
String spn = getCompleteServicePrincipalName(host);
try {
GSSManager manager = GSSManager.getInstance();
GSSName serverName = manager.createName(spn, GSSName.NT_HOSTBASED_SERVICE);
GSSCredential myCred = null;
if (username != null || loginContextName != null || customLoginConfig != null && !customLoginConfig.isEmpty()) {
String contextName = loginContextName;
if (contextName == null) {
contextName = "";
}
LoginContext loginContext = new LoginContext(contextName, null, getUsernamePasswordHandler(), getLoginConfiguration());
loginContext.login();
final Oid negotiationOidFinal = negotiationOid;
final PrivilegedExceptionAction<GSSCredential> action = () ->
manager.createCredential(null, GSSCredential.INDEFINITE_LIFETIME, negotiationOidFinal, GSSCredential.INITIATE_AND_ACCEPT);
myCred = Subject.doAs(loginContext.getSubject(), action);
}
gssContext = manager.createContext(useCanonicalHostname ? serverName.canonicalize(negotiationOid) : serverName,
negotiationOid,
myCred,
GSSContext.DEFAULT_LIFETIME);
gssContext.requestMutualAuth(true);
gssContext.requestCredDeleg(true);
} catch (GSSException ex) {
log.error("generateToken", ex);
// BAD MECH means we are likely to be using 1.5, fall back to Kerberos MECH.
// Rethrow any other exception.
if (ex.getMajor() == GSSException.BAD_MECH) {
log.debug("GSSException BAD_MECH, retry with Kerberos MECH");
} else {
throw ex;
}
}
if (gssContext == null) {
/* Kerberos v5 GSS-API mechanism defined in RFC 1964. */
log.debug("Using Kerberos MECH {}", KERBEROS_OID);
negotiationOid = new Oid(KERBEROS_OID);
GSSManager manager = GSSManager.getInstance();
GSSName serverName = manager.createName(spn, GSSName.NT_HOSTBASED_SERVICE);
gssContext = manager.createContext(serverName.canonicalize(negotiationOid), negotiationOid, null,
GSSContext.DEFAULT_LIFETIME);
gssContext.requestMutualAuth(true);
gssContext.requestCredDeleg(true);
}
// TODO suspicious: this will always be null because no value has been assigned before. Assign directly?
if (token == null) {
token = new byte[0];
}
token = gssContext.initSecContext(token, 0, token.length);
if (token == null) {
throw new SpnegoEngineException("GSS security context initialization failed");
}
/*
* IIS accepts Kerberos and SPNEGO tokens. Some other servers Jboss, Glassfish? seem to only accept SPNEGO. Below wraps Kerberos into SPNEGO token.
*/
if (spnegoGenerator != null && negotiationOid.toString().equals(KERBEROS_OID)) {
token = spnegoGenerator.generateSpnegoDERObject(token);
}
gssContext.dispose();
String tokenstr = Base64.getEncoder().encodeToString(token);
log.debug("Sending response '{}' back to the server", tokenstr);
return tokenstr;
} catch (GSSException gsse) {
log.error("generateToken", gsse);
if (gsse.getMajor() == GSSException.DEFECTIVE_CREDENTIAL || gsse.getMajor() == GSSException.CREDENTIALS_EXPIRED) {
throw new SpnegoEngineException(gsse.getMessage(), gsse);
}
if (gsse.getMajor() == GSSException.NO_CRED) {
throw new SpnegoEngineException(gsse.getMessage(), gsse);
}
if (gsse.getMajor() == GSSException.DEFECTIVE_TOKEN || gsse.getMajor() == GSSException.DUPLICATE_TOKEN
|| gsse.getMajor() == GSSException.OLD_TOKEN) {
throw new SpnegoEngineException(gsse.getMessage(), gsse);
}
// other error
throw new SpnegoEngineException(gsse.getMessage());
} catch (IOException | LoginException | PrivilegedActionException ex) {
throw new SpnegoEngineException(ex.getMessage());
}
}
|
@Test
public void testSpnegoGenerateTokenWithNullPasswordFail() {
SpnegoEngine spnegoEngine = new SpnegoEngine("alice",
null,
"bob",
"service.ws.apache.org",
false,
null,
"alice",
null);
assertThrows(SpnegoEngineException.class, () -> spnegoEngine.generateToken("localhost"), "No password provided");
}
|
public ValidationResult isSCMConfigurationValid(String pluginId, final SCMPropertyConfiguration scmConfiguration) {
return pluginRequestHelper.submitRequest(pluginId, REQUEST_VALIDATE_SCM_CONFIGURATION, new DefaultPluginInteractionCallback<>() {
@Override
public String requestBody(String resolvedExtensionVersion) {
return messageHandlerMap.get(resolvedExtensionVersion).requestMessageForIsSCMConfigurationValid(scmConfiguration);
}
@Override
public ValidationResult onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) {
return messageHandlerMap.get(resolvedExtensionVersion).responseMessageForIsSCMConfigurationValid(responseBody);
}
});
}
|
@Test
public void shouldTalkToPluginToCheckIfSCMConfigurationIsValid() throws Exception {
when(jsonMessageHandler.requestMessageForIsSCMConfigurationValid(scmPropertyConfiguration)).thenReturn(requestBody);
ValidationResult deserializedResponse = new ValidationResult();
when(jsonMessageHandler.responseMessageForIsSCMConfigurationValid(responseBody)).thenReturn(deserializedResponse);
ValidationResult response = scmExtension.isSCMConfigurationValid(PLUGIN_ID, scmPropertyConfiguration);
assertRequest(requestArgumentCaptor.getValue(), SCM_EXTENSION, "1.0", SCMExtension.REQUEST_VALIDATE_SCM_CONFIGURATION, requestBody);
verify(jsonMessageHandler).requestMessageForIsSCMConfigurationValid(scmPropertyConfiguration);
verify(jsonMessageHandler).responseMessageForIsSCMConfigurationValid(responseBody);
assertSame(response, deserializedResponse);
}
|
public ReplicationPolicy replicationPolicy() {
return replicationPolicy;
}
|
@Test
public void testIdentityReplicationTopicSource() {
MirrorClient client = new FakeMirrorClient(
identityReplicationPolicy("primary"), Collections.emptyList());
assertEquals("topic1", client.replicationPolicy()
.formatRemoteTopic("primary", "topic1"));
assertEquals("primary", client.replicationPolicy()
.topicSource("topic1"));
// Heartbeats are handled as a special case
assertEquals("backup.heartbeats", client.replicationPolicy()
.formatRemoteTopic("backup", "heartbeats"));
assertEquals("backup", client.replicationPolicy()
.topicSource("backup.heartbeats"));
}
|
public void replaceCollector(Collector collector) {
final CollectorRegistry newRegistry = newCollectorRegistry();
newRegistry.register(collector);
DefaultExports.register(newRegistry);
registryRef.set(newRegistry);
}
|
@Test
void runWithPopulatedCollector() throws Exception {
final MetricRegistry metricRegistry = new MetricRegistry();
final Counter counter = metricRegistry.counter("counter");
counter.inc();
server.replaceCollector(new DropwizardExports(metricRegistry));
doGET(server, "/metrics", response -> {
assertThat(response.body()).isNotNull();
assertThat(response.body().string()).contains("counter 1.0");
});
doGET(server, "/", response -> {
assertThat(response.body()).isNotNull();
assertThat(response.body().string()).contains("counter 1.0");
});
doGET(server, "/-/healthy", response -> {
assertThat(response.body()).isNotNull();
assertThat(response.body().string()).containsIgnoringCase("exporter is healthy");
});
}
|
public void verifyState(HttpRequest request, @Nullable String csrfState, @Nullable String login) {
if (!shouldRequestBeChecked(request)) {
return;
}
String failureCause = checkCsrf(csrfState, request.getHeader(CSRF_HEADER));
if (failureCause != null) {
throw AuthenticationException.newBuilder()
.setSource(Source.local(Method.JWT))
.setLogin(login)
.setMessage(failureCause)
.build();
}
}
|
@Test
public void verify_DELETE_request() {
mockRequestCsrf("other value");
when(request.getRequestURI()).thenReturn(JAVA_WS_URL);
when(request.getMethod()).thenReturn("DELETE");
assertThatThrownBy(() -> underTest.verifyState(request, CSRF_STATE, LOGIN))
.hasMessage("Wrong CSFR in request")
.isInstanceOf(AuthenticationException.class)
.hasFieldOrPropertyWithValue("login", LOGIN)
.hasFieldOrPropertyWithValue("source", Source.local(Method.JWT));
}
|
public static <T extends TypedSPI> T getService(final Class<T> serviceInterface, final Object type) {
return getService(serviceInterface, type, new Properties());
}
|
@Test
void assertGetServiceWithAlias() {
assertNotNull(TypedSPILoader.getService(TypedSPIFixture.class, "TYPED.ALIAS"));
}
|
public AgentBootstrapperArgs parse(String... args) {
AgentBootstrapperArgs result = new AgentBootstrapperArgs();
try {
new JCommander(result).parse(args);
if (result.help) {
printUsageAndExit(0);
}
return result;
} catch (ParameterException e) {
stderr.println(e.getMessage());
printUsageAndExit(1);
}
return null;
}
|
@Test
public void shouldRaiseExceptionWhenSSLCertificateFileIsNotPresent() {
assertThatCode(() -> agentCLI.parse("-serverUrl", "http://example.com/go", "-sslCertificateFile", UUID.randomUUID().toString()))
.isInstanceOf(ExitException.class)
.satisfies(o -> assertThat(((ExitException) o).getStatus()).isEqualTo(1));
assertThat(errorStream.toString()).contains("-sslCertificateFile must be a file that is readable.");
}
|
@Override
public List<PostDO> getPostList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return Collections.emptyList();
}
return postMapper.selectBatchIds(ids);
}
|
@Test
public void testGetPostList() {
// mock 数据
PostDO postDO01 = randomPojo(PostDO.class);
postMapper.insert(postDO01);
// 测试 id 不匹配
PostDO postDO02 = randomPojo(PostDO.class);
postMapper.insert(postDO02);
// 准备参数
List<Long> ids = singletonList(postDO01.getId());
// 调用
List<PostDO> list = postService.getPostList(ids);
// 断言
assertEquals(1, list.size());
assertPojoEquals(postDO01, list.get(0));
}
|
@Override
public int compareTo(Mod o) {
return this.modId.compareTo(o.modId);
}
|
@Test
public void comparable() {
mod1 = new Mod(AAA);
mod2 = new Mod(BBB);
assertNotEquals("what?", mod1, mod2);
assertTrue(mod1.compareTo(mod2) < 0);
assertTrue(mod2.compareTo(mod1) > 0);
}
|
@Deprecated
public ChannelFuture close() {
return closeOutbound();
}
|
@Test
public void testOutboundClosedAfterChannelInactive() throws Exception {
SslContext context = SslContextBuilder.forClient().build();
SSLEngine engine = context.newEngine(UnpooledByteBufAllocator.DEFAULT);
EmbeddedChannel channel = new EmbeddedChannel();
assertFalse(channel.finish());
channel.pipeline().addLast(new SslHandler(engine));
assertFalse(engine.isOutboundDone());
channel.close().syncUninterruptibly();
assertTrue(engine.isOutboundDone());
}
|
public long getDataLength() {
return getCounterResponseStream() == null ? 0 : getCounterResponseStream().getDataLength();
}
|
@Test
public void testCounterResponseStream() throws IOException {
final CounterResponseStream wrapper = new CounterResponseStream(new HttpResponse());
wrapper.write(1);
wrapper.write(new byte[8]);
wrapper.write(new byte[8], 1, 7);
assertEquals("dataLength", 16, wrapper.getDataLength());
wrapper.isReady();
wrapper.setWriteListener(null);
wrapper.flush();
wrapper.close();
}
|
@VisibleForTesting
static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options) {
return createStreamExecutionEnvironment(
options,
MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()),
options.getFlinkConfDir());
}
|
@Test
public void shouldRemoveHttpProtocolFromHostStreaming() {
FlinkPipelineOptions options = getDefaultPipelineOptions();
options.setRunner(FlinkRunner.class);
for (String flinkMaster :
new String[] {
"http://host:1234", " http://host:1234", "https://host:1234", " https://host:1234"
}) {
options.setFlinkMaster(flinkMaster);
StreamExecutionEnvironment sev =
FlinkExecutionEnvironments.createStreamExecutionEnvironment(options);
checkHostAndPort(sev, "host", 1234);
}
}
|
public static CreateSourceProperties from(final Map<String, Literal> literals) {
try {
return new CreateSourceProperties(literals, DurationParser::parse, false);
} catch (final ConfigException e) {
final String message = e.getMessage().replace(
"configuration",
"property"
);
throw new KsqlException(message, e);
}
}
|
@Test
public void shouldThrowOnTumblingWindowWithOutSize() {
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> CreateSourceProperties.from(
ImmutableMap.<String, Literal>builder()
.putAll(MINIMUM_VALID_PROPS)
.put(WINDOW_TYPE_PROPERTY, new StringLiteral("tumbling"))
.build())
);
// Then:
assertThat(e.getMessage(), containsString("TUMBLING windows require 'WINDOW_SIZE' to be provided in the WITH clause. "
+ "For example: 'WINDOW_SIZE'='10 SECONDS'"));
}
|
@Nonnull @Override
public ProgressState call() {
progTracker.reset();
stateMachineStep();
return progTracker.toProgressState();
}
|
@Test
public void when_barrier_then_snapshotDone() {
// When
init(singletonList(new SnapshotBarrier(2, false)));
ssContext.startNewSnapshotPhase1(2, "map", 0);
assertEquals(MADE_PROGRESS, sst.call());
assertEquals(MADE_PROGRESS, sst.call());
// Then
assertEquals(3, sst.pendingSnapshotId);
}
|
@Override
public void putTaskConfigs(final String connName, final List<Map<String, String>> configs, final Callback<Void> callback, InternalRequestSignature requestSignature) {
log.trace("Submitting put task configuration request {}", connName);
if (requestNotSignedProperly(requestSignature, callback)) {
return;
}
addRequest(
() -> {
if (!isLeader())
callback.onCompletion(new NotLeaderException("Only the leader may write task configurations.", leaderUrl()), null);
else if (!configState.contains(connName))
callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null);
else {
writeTaskConfigs(connName, configs);
callback.onCompletion(null, null);
}
return null;
},
forwardErrorAndTickThreadStages(callback)
);
}
|
@Test
public void testPutTaskConfigsDisallowedSignatureAlgorithm() {
when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2);
InternalRequestSignature signature = mock(InternalRequestSignature.class);
when(signature.keyAlgorithm()).thenReturn("HmacSHA489");
Callback<Void> taskConfigCb = mock(Callback.class);
herder.putTaskConfigs(CONN1, TASK_CONFIGS, taskConfigCb, signature);
ArgumentCaptor<Throwable> errorCapture = ArgumentCaptor.forClass(Throwable.class);
verify(taskConfigCb).onCompletion(errorCapture.capture(), isNull());
assertInstanceOf(BadRequestException.class, errorCapture.getValue());
verifyNoMoreInteractions(member, taskConfigCb);
}
|
@ScalarFunction(nullableParameters = true)
public static byte[] toCpcSketch(@Nullable Object input) {
return toCpcSketch(input, CommonConstants.Helix.DEFAULT_CPC_SKETCH_LGK);
}
|
@Test
public void testCpcCreation() {
for (Object i : _inputs) {
Assert.assertEquals(cpcEstimate(SketchFunctions.toCpcSketch(i)), 1.0);
Assert.assertEquals(cpcEstimate(SketchFunctions.toCpcSketch(i, 11)), 1.0);
}
Assert.assertEquals(cpcEstimate(SketchFunctions.toCpcSketch(null)), 0.0);
Assert.assertEquals(cpcEstimate(SketchFunctions.toCpcSketch(null, 11)), 0.0);
Assert.assertThrows(IllegalArgumentException.class, () -> SketchFunctions.toCpcSketch(new Object()));
Assert.assertThrows(IllegalArgumentException.class, () -> SketchFunctions.toCpcSketch(new Object(), 11));
}
|
public void compileToDestination(File src, File dst) throws IOException {
for (Schema schema : queue) {
OutputFile o = compile(schema);
o.writeToDestination(src, dst);
}
if (protocol != null) {
compileInterface(protocol).writeToDestination(src, dst);
}
}
|
@Test
void canReadTemplateFilesOnTheFilesystem() throws IOException {
SpecificCompiler compiler = createCompiler();
compiler.compileToDestination(this.src, OUTPUT_DIR);
assertTrue(new File(OUTPUT_DIR, "SimpleRecord.java").exists());
}
|
public static NamenodeRole convert(NamenodeRoleProto role) {
switch (role) {
case NAMENODE:
return NamenodeRole.NAMENODE;
case BACKUP:
return NamenodeRole.BACKUP;
case CHECKPOINT:
return NamenodeRole.CHECKPOINT;
}
return null;
}
|
@Test
public void testBlockECRecoveryCommand() {
DatanodeInfo[] dnInfos0 = new DatanodeInfo[] {
DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
DatanodeStorageInfo targetDnInfos_0 = BlockManagerTestUtil
.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(),
new DatanodeStorage("s00"));
DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil
.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(),
new DatanodeStorage("s01"));
DatanodeStorageInfo[] targetDnInfos0 = new DatanodeStorageInfo[] {
targetDnInfos_0, targetDnInfos_1 };
byte[] liveBlkIndices0 = new byte[2];
byte[] excludeReconstructedIndices0=new byte[2];
BlockECReconstructionInfo blkECRecoveryInfo0 = new BlockECReconstructionInfo(
new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0,
liveBlkIndices0, excludeReconstructedIndices0, StripedFileTestUtil.getDefaultECPolicy());
DatanodeInfo[] dnInfos1 = new DatanodeInfo[] {
DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil
.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(),
new DatanodeStorage("s02"));
DatanodeStorageInfo targetDnInfos_3 = BlockManagerTestUtil
.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(),
new DatanodeStorage("s03"));
DatanodeStorageInfo[] targetDnInfos1 = new DatanodeStorageInfo[] {
targetDnInfos_2, targetDnInfos_3 };
byte[] liveBlkIndices1 = new byte[2];
byte[] excludeReconstructedIndices = new byte[2];
BlockECReconstructionInfo blkECRecoveryInfo1 = new BlockECReconstructionInfo(
new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1,
liveBlkIndices1, excludeReconstructedIndices, StripedFileTestUtil.getDefaultECPolicy());
List<BlockECReconstructionInfo> blkRecoveryInfosList = new ArrayList<BlockECReconstructionInfo>();
blkRecoveryInfosList.add(blkECRecoveryInfo0);
blkRecoveryInfosList.add(blkECRecoveryInfo1);
BlockECReconstructionCommand blkECReconstructionCmd = new BlockECReconstructionCommand(
DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION, blkRecoveryInfosList);
BlockECReconstructionCommandProto blkECRecoveryCmdProto = PBHelper
.convert(blkECReconstructionCmd);
blkECReconstructionCmd = PBHelper.convert(blkECRecoveryCmdProto);
Iterator<BlockECReconstructionInfo> iterator = blkECReconstructionCmd.getECTasks()
.iterator();
assertBlockECRecoveryInfoEquals(blkECRecoveryInfo0, iterator.next());
assertBlockECRecoveryInfoEquals(blkECRecoveryInfo1, iterator.next());
}
|
public Condition getCondition() {
return condition;
}
|
@Test
public void getCondition_returns_object_passed_in_constructor() {
assertThat(new EvaluatedCondition(SOME_CONDITION, SOME_LEVEL, SOME_VALUE).getCondition()).isSameAs(SOME_CONDITION);
}
|
@VisibleForTesting
public void validateDictDataExists(Long id) {
if (id == null) {
return;
}
DictDataDO dictData = dictDataMapper.selectById(id);
if (dictData == null) {
throw exception(DICT_DATA_NOT_EXISTS);
}
}
|
@Test
public void testValidateDictDataExists_success() {
// mock 数据
DictDataDO dbDictData = randomDictDataDO();
dictDataMapper.insert(dbDictData);// @Sql: 先插入出一条存在的数据
// 调用成功
dictDataService.validateDictDataExists(dbDictData.getId());
}
|
public long getNthUncommittedOffsetAfterCommittedOffset(int index) {
Iterator<Long> offsetIter = emittedOffsets.iterator();
for (int i = 0; i < index - 1; i++) {
offsetIter.next();
}
return offsetIter.next();
}
|
@Test
public void testGetNthUncommittedOffsetAfterCommittedOffset() {
manager.addToEmitMsgs(initialFetchOffset + 1);
manager.addToEmitMsgs(initialFetchOffset + 2);
manager.addToEmitMsgs(initialFetchOffset + 5);
manager.addToEmitMsgs(initialFetchOffset + 30);
assertThat("The third uncommitted offset should be 5", manager.getNthUncommittedOffsetAfterCommittedOffset(3), is(initialFetchOffset + 5L));
assertThat("The fourth uncommitted offset should be 30", manager.getNthUncommittedOffsetAfterCommittedOffset(4), is(initialFetchOffset + 30L));
Assertions.assertThrows(NoSuchElementException.class, () -> manager.getNthUncommittedOffsetAfterCommittedOffset(5));
}
|
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuilder buf = new StringBuilder();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case START_STATE:
handleStartState(c, tokenList, buf);
break;
case DEFAULT_VAL_STATE:
handleDefaultValueState(c, tokenList, buf);
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addLiteralToken(tokenList, buf);
break;
case DEFAULT_VAL_STATE:
// trailing colon. see also LOGBACK-1140
buf.append(CoreConstants.COLON_CHAR);
addLiteralToken(tokenList, buf);
break;
case START_STATE:
// trailing $. see also LOGBACK-1149
buf.append(CoreConstants.DOLLAR);
addLiteralToken(tokenList, buf);
break;
}
return tokenList;
}
|
@Test
public void LOGBACK_1101() throws ScanException {
String input = "a:{y}";
Tokenizer tokenizer = new Tokenizer(input);
List<Token> tokenList = tokenizer.tokenize();
witnessList.add(new Token(Token.Type.LITERAL, "a"));
witnessList.add(new Token(Token.Type.LITERAL, ":"));
witnessList.add(Token.CURLY_LEFT_TOKEN);
witnessList.add(new Token(Token.Type.LITERAL, "y"));
witnessList.add(Token.CURLY_RIGHT_TOKEN);
assertEquals(witnessList, tokenList);
}
|
@Override
public SmsTemplateRespDTO getSmsTemplate(String apiTemplateId) throws Throwable {
// 1. 构建请求
// 参考链接 https://cloud.tencent.com/document/product/382/52067
TreeMap<String, Object> body = new TreeMap<>();
body.put("International", INTERNATIONAL_CHINA);
body.put("TemplateIdSet", new Integer[]{Integer.valueOf(apiTemplateId)});
JSONObject response = request("DescribeSmsTemplateList", body);
// TODO @scholar:会有请求失败的情况么?类似发送的(那块逻辑我补充了)
JSONObject TemplateStatusSet = response.getJSONObject("Response").getJSONArray("DescribeTemplateStatusSet").getJSONObject(0);
String content = TemplateStatusSet.get("TemplateContent").toString();
int templateStatus = Integer.parseInt(TemplateStatusSet.get("StatusCode").toString());
String auditReason = TemplateStatusSet.get("ReviewReply").toString();
return new SmsTemplateRespDTO().setId(apiTemplateId).setContent(content)
.setAuditStatus(convertSmsTemplateAuditStatus(templateStatus)).setAuditReason(auditReason);
}
|
@Test
public void testGetSmsTemplate() throws Throwable {
try (MockedStatic<HttpUtils> httpUtilsMockedStatic = mockStatic(HttpUtils.class)) {
// 准备参数
String apiTemplateId = "1122";
// mock 方法
httpUtilsMockedStatic.when(() -> HttpUtils.post(anyString(), anyMap(), anyString()))
.thenReturn("{ \"Response\": {\n" +
" \"DescribeTemplateStatusSet\": [\n" +
" {\n" +
" \"TemplateName\": \"验证码\",\n" +
" \"TemplateId\": 1122,\n" +
" \"International\": 0,\n" +
" \"ReviewReply\": \"审批备注\",\n" +
" \"CreateTime\": 1617379200,\n" +
" \"TemplateContent\": \"您的验证码是{1}\",\n" +
" \"StatusCode\": 0\n" +
" },\n" +
" \n" +
" ],\n" +
" \"RequestId\": \"f36e4f00-605e-49b1-ad0d-bfaba81c7325\"\n" +
" }}");
// 调用
SmsTemplateRespDTO result = smsClient.getSmsTemplate(apiTemplateId);
// 断言
assertEquals("1122", result.getId());
assertEquals("您的验证码是{1}", result.getContent());
assertEquals(SmsTemplateAuditStatusEnum.SUCCESS.getStatus(), result.getAuditStatus());
assertEquals("审批备注", result.getAuditReason());
}
}
|
@Override
public Neighbor<double[], E> nearest(double[] q) {
if (model == null) return super.nearest(q);
return nearest(q, 0.95, 100);
}
|
@Test
public void testNearest() {
System.out.println("nearest");
int recall = 0;
double error = 0.0;
int hit = 0;
for (double[] xi : testx) {
Neighbor neighbor = lsh.nearest(xi);
if (neighbor != null) {
hit++;
Neighbor truth = naive.nearest(xi);
if (neighbor.index == truth.index) {
recall++;
} else {
error += Math.abs(neighbor.distance - truth.distance) / truth.distance;
}
}
}
error /= (hit - recall);
assertEquals(1722, recall);
assertEquals(2007, hit);
assertEquals(0.0687, error, 1E-4);
System.out.format("recall is %.2f%%%n", 100.0 * recall / testx.length);
System.out.format("error when miss is %.2f%%%n", 100.0 * error);
System.out.format("null rate is %.2f%%%n", 100.0 - 100.0 * hit / testx.length);
}
|
public static JsonAsserter with(String json) {
return new JsonAsserterImpl(JsonPath.parse(json).json());
}
|
@Test
public void a_document_can_be_expected_not_to_contain_a_path() throws Exception {
with(JSON).assertNotDefined("$.store.bicycle.cool");
}
|
public static Path expandIfZip(Path filePath) throws IOException {
if (!isZipFile(filePath)) {
return filePath;
}
FileTime pluginZipDate = Files.getLastModifiedTime(filePath);
String fileName = filePath.getFileName().toString();
String directoryName = fileName.substring(0, fileName.lastIndexOf("."));
Path pluginDirectory = filePath.resolveSibling(directoryName);
if (!Files.exists(pluginDirectory) || pluginZipDate.compareTo(Files.getLastModifiedTime(pluginDirectory)) > 0) {
// expand '.zip' file
Unzip unzip = new Unzip();
unzip.setSource(filePath.toFile());
unzip.setDestination(pluginDirectory.toFile());
unzip.extract();
log.info("Expanded plugin zip '{}' in '{}'", filePath.getFileName(), pluginDirectory.getFileName());
}
return pluginDirectory;
}
|
@Test
public void expandIfZipNonZipFiles() throws Exception {
// File without .suffix
Path extra = pluginsPath.resolve("extra");
assertEquals(extra, FileUtils.expandIfZip(extra));
// Folder
Path folder = pluginsPath.resolve("folder");
assertEquals(folder, FileUtils.expandIfZip(folder));
}
|
@Override
public Iterator<V> distributedIterator(final String pattern) {
String iteratorName = "__redisson_set_cursor_{" + getRawName() + "}";
return distributedIterator(iteratorName, pattern, 10);
}
|
@Test
public void testDistributedIterator() {
RSet<String> set = redisson.getSet("set", StringCodec.INSTANCE);
// populate set with elements
List<String> stringsOne = IntStream.range(0, 128).mapToObj(i -> "one-" + i).collect(Collectors.toList());
List<String> stringsTwo = IntStream.range(0, 128).mapToObj(i -> "two-" + i).collect(Collectors.toList());
set.addAll(stringsOne);
set.addAll(stringsTwo);
Iterator<String> stringIterator = set.distributedIterator("iterator_{set}", "one*", 10);
// read some elements using iterator
List<String> strings = new ArrayList<>();
for (int i = 0; i < 64; i++) {
if (stringIterator.hasNext()) {
strings.add(stringIterator.next());
}
}
// create another iterator instance using the same name
RSet<String> set2 = redisson.getSet("set", StringCodec.INSTANCE);
Iterator<String> stringIterator2 = set2.distributedIterator("iterator_{set}", "one*", 10);
Assertions.assertTrue(stringIterator2.hasNext());
// read all remaining elements
stringIterator2.forEachRemaining(strings::add);
stringIterator.forEachRemaining(strings::add);
assertThat(strings).containsAll(stringsOne);
assertThat(strings).hasSize(stringsOne.size());
}
|
public static double[][] impute(double[][] data) {
int d = data[0].length;
int[] count = new int[d];
for (int i = 0; i < data.length; i++) {
int missing = 0;
for (int j = 0; j < d; j++) {
if (Double.isNaN(data[i][j])) {
missing++;
count[j]++;
}
}
if (missing == d) {
throw new IllegalArgumentException("The whole row " + i + " is missing");
}
}
for (int i = 0; i < d; i++) {
if (count[i] == data.length) {
throw new IllegalArgumentException("The whole column " + i + " is missing");
}
}
double[] mean = new double[d];
int[] n = new int[d];
for (double[] x : data) {
for (int j = 0; j < d; j++) {
if (!Double.isNaN(x[j])) {
n[j]++;
mean[j] += x[j];
}
}
}
for (int j = 0; j < d; j++) {
if (n[j] != 0) {
mean[j] /= n[j];
}
}
double[][] full = MathEx.clone(data);
for (double[] x : full) {
for (int j = 0; j < d; j++) {
if (Double.isNaN(x[j])) {
x[j] = mean[j];
}
}
}
return full;
}
|
@Test
public void testAverage() throws Exception {
System.out.println("Column Average Imputation");
double[][] data = SyntheticControl.x;
impute(SimpleImputer::impute, data, 0.01, 39.11);
impute(SimpleImputer::impute, data, 0.05, 48.86);
impute(SimpleImputer::impute, data, 0.10, 45.24);
impute(SimpleImputer::impute, data, 0.15, 44.59);
impute(SimpleImputer::impute, data, 0.20, 41.93);
impute(SimpleImputer::impute, data, 0.25, 44.77);
}
|
public long getUsedMemory() {
long used = 0;
for (long[] block : blocks) {
used += (block != null ? block.length : 0);
}
return used;
}
|
@Test
public void when_allocateFree_then_noLeak() {
final long addr1 = allocate();
final long addr2 = allocate();
free(addr1);
free(addr2);
assertEquals(0, memMgr.getUsedMemory());
}
|
@Override
public void run() {
// top-level command, do nothing
}
|
@Test
public void test_listJobs() {
// Given
Job job = newJob();
// When
run("list-jobs");
// Then
String actual = captureOut();
assertContains(actual, job.getName());
assertContains(actual, job.getIdString());
assertContains(actual, job.getStatus().toString());
}
|
static HoodieSyncTool instantiateMetaSyncTool(String syncToolClassName,
TypedProperties props,
Configuration hadoopConfig,
FileSystem fs,
String targetBasePath,
String baseFileFormat) {
TypedProperties properties = new TypedProperties();
properties.putAll(props);
properties.put(META_SYNC_BASE_PATH.key(), targetBasePath);
properties.put(META_SYNC_BASE_FILE_FORMAT.key(), baseFileFormat);
if (properties.containsKey(META_SYNC_TABLE_NAME.key())) {
String tableName = properties.getString(META_SYNC_TABLE_NAME.key());
if (!tableName.equals(tableName.toLowerCase())) {
LOG.warn(
"Table name \"" + tableName + "\" contains capital letters. Your metastore may automatically convert this to lower case and can cause table not found errors during subsequent syncs.");
}
}
if (ReflectionUtils.hasConstructor(syncToolClassName,
new Class<?>[] {Properties.class, Configuration.class})) {
return ((HoodieSyncTool) ReflectionUtils.loadClass(syncToolClassName,
new Class<?>[] {Properties.class, Configuration.class},
properties, hadoopConfig));
} else if (ReflectionUtils.hasConstructor(syncToolClassName,
new Class<?>[] {Properties.class})) {
return ((HoodieSyncTool) ReflectionUtils.loadClass(syncToolClassName,
new Class<?>[] {Properties.class},
properties));
} else if (ReflectionUtils.hasConstructor(syncToolClassName,
new Class<?>[] {TypedProperties.class, Configuration.class, FileSystem.class})) {
return ((HoodieSyncTool) ReflectionUtils.loadClass(syncToolClassName,
new Class<?>[] {TypedProperties.class, Configuration.class, FileSystem.class},
properties, hadoopConfig, fs));
} else if (ReflectionUtils.hasConstructor(syncToolClassName,
new Class<?>[] {Properties.class, FileSystem.class})) {
return ((HoodieSyncTool) ReflectionUtils.loadClass(syncToolClassName,
new Class<?>[] {Properties.class, FileSystem.class},
properties, fs));
} else {
throw new HoodieException("Could not load meta sync class " + syncToolClassName
+ ": no valid constructor found.");
}
}
|
@Test
public void testCreateInvalidSyncClass() {
Throwable t = assertThrows(HoodieException.class, () -> {
SyncUtilHelpers.instantiateMetaSyncTool(
InvalidSyncTool.class.getName(),
new TypedProperties(),
hadoopConf,
fileSystem,
BASE_PATH,
BASE_FORMAT
);
});
String expectedMessage = "Could not load meta sync class " + InvalidSyncTool.class.getName()
+ ": no valid constructor found.";
assertEquals(expectedMessage, t.getMessage());
}
|
public static void main(String[] args) {
if (args.length < 1 || args[0].equals("-h") || args[0].equals("--help")) {
System.out.println(usage);
return;
}
// Copy args, because CommandFormat mutates the list.
List<String> argsList = new ArrayList<String>(Arrays.asList(args));
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "-glob", "-jar");
try {
cf.parse(argsList);
} catch (UnknownOptionException e) {
terminate(1, "unrecognized option");
return;
}
String classPath = System.getProperty("java.class.path");
if (cf.getOpt("-glob")) {
// The classpath returned from the property has been globbed already.
System.out.println(classPath);
} else if (cf.getOpt("-jar")) {
if (argsList.isEmpty() || argsList.get(0) == null ||
argsList.get(0).isEmpty()) {
terminate(1, "-jar option requires path of jar file to write");
return;
}
// Write the classpath into the manifest of a temporary jar file.
Path workingDir = new Path(System.getProperty("user.dir"));
final String tmpJarPath;
try {
tmpJarPath = FileUtil.createJarWithClassPath(classPath, workingDir,
System.getenv())[0];
} catch (IOException e) {
terminate(1, "I/O error creating jar: " + e.getMessage());
return;
}
// Rename the temporary file to its final location.
String jarPath = argsList.get(0);
try {
FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath));
} catch (IOException e) {
terminate(1, "I/O error renaming jar temporary file to path: " +
e.getMessage());
return;
}
}
}
|
@Test
public void testHelp() {
Classpath.main(new String[] { "--help" });
String strOut = new String(stdout.toByteArray(), UTF8);
assertTrue(strOut.contains("Prints the classpath"));
assertTrue(stderr.toByteArray().length == 0);
}
|
public Mono<Integer> getInvocationCount() {
return Mono.just(this.count.get());
}
|
@Test
public void givenSleepBy100ms_whenGetInvocationCount_thenIsGreaterThanZero()
throws InterruptedException {
Thread.sleep(100L);
counter.getInvocationCount()
.as(StepVerifier::create)
.consumeNextWith(count -> assertThat(count).isGreaterThan(0))
.thenCancel()
.verify();
}
|
@Override
public List<IndexSegment> prune(List<IndexSegment> segments, QueryContext query) {
if (segments.isEmpty()) {
return segments;
}
// For LIMIT 0 case, keep one segment to create the schema
int limit = query.getLimit();
if (limit == 0) {
return Collections.singletonList(segments.get(0));
}
// Skip pruning segments for upsert table because valid doc index is equivalent to a filter
if (segments.get(0).getValidDocIds() != null) {
return segments;
}
if (query.getOrderByExpressions() == null) {
return pruneSelectionOnly(segments, query);
} else {
return pruneSelectionOrderBy(segments, query);
}
}
|
@Test
public void testLimit0() {
List<IndexSegment> indexSegments =
Arrays.asList(getIndexSegment(null, null, 10), getIndexSegment(0L, 10L, 10), getIndexSegment(-5L, 5L, 15));
// Should keep only the first segment
QueryContext queryContext = QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable LIMIT 0");
List<IndexSegment> result = _segmentPruner.prune(indexSegments, queryContext);
assertEquals(result.size(), 1);
assertSame(result.get(0), indexSegments.get(0));
queryContext =
QueryContextConverterUtils.getQueryContext("SELECT * FROM testTable ORDER BY testColumn LIMIT 0");
result = _segmentPruner.prune(indexSegments, queryContext);
assertEquals(result.size(), 1);
assertSame(result.get(0), indexSegments.get(0));
}
|
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key);
StateQueryRequest<ValueAndTimestamp<GenericRow>>
request = inStore(stateStore.getStateStoreName())
.withQuery(query)
.withPartitions(ImmutableSet.of(partition));
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<ValueAndTimestamp<GenericRow>>
result = stateStore.getKafkaStreams().query(request);
final QueryResult<ValueAndTimestamp<GenericRow>> queryResult =
result.getPartitionResults().get(partition);
// Some of these failures are retriable, and in the future, we may want to retry
// locally before throwing.
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
} else if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
} else {
final ValueAndTimestamp<GenericRow> row = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(
ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp()))
.iterator(),
queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldKeyQueryWithCorrectParams() {
// Given:
when(kafkaStreams.query(any())).thenReturn(getRowResult(ROW1));
// When:
table.get(A_KEY, PARTITION);
// Then:
verify(kafkaStreams).query(queryTypeCaptor.capture());
StateQueryRequest request = queryTypeCaptor.getValue();
assertThat(request.getQuery(), instanceOf(KeyQuery.class));
KeyQuery keyQuery = (KeyQuery)request.getQuery();
assertThat(keyQuery.getKey(), is(A_KEY));
}
|
@SuppressWarnings("unused") // Required for automatic type inference
public static <K> Builder0<K> forClass(final Class<K> type) {
return new Builder0<>();
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldThrowIfHandlerSupplierReturnsNullHandler1() {
HandlerMaps.forClass(BaseType.class).withArgType(String.class)
.put(LeafTypeA.class, () -> null)
.build();
}
|
public static ExecutionStats fromJson(JsonNode json) {
return new ExecutionStats(json);
}
|
@Test
public void testFromJson() {
// Run the test
final ExecutionStats result = ExecutionStats.fromJson(_mockBrokerResponse);
// Verify the results
assertNotNull(result);
assertEquals(10, result.getNumServersQueried());
}
|
@PatchMapping("/api/v1/meetings/{uuid}/unlock")
public void unlock(@PathVariable String uuid, @AuthAttendee long id) {
meetingService.unlock(uuid, id);
}
|
@DisplayName("약속을 잠금을 해제하면 200 OK를 반환한다.")
@Test
void unlock() {
Meeting meeting = meetingRepository.save(MeetingFixture.DINNER.create());
Attendee attendee = attendeeRepository.save(AttendeeFixture.HOST_JAZZ.create(meeting));
String token = getToken(attendee, meeting);
RestAssured.given().log().all()
.cookie("ACCESS_TOKEN", token)
.contentType(ContentType.JSON)
.pathParam("uuid", meeting.getUuid())
.when().patch("/api/v1/meetings/{uuid}/unlock")
.then().log().all()
.statusCode(HttpStatus.OK.value());
}
|
protected void setAlarmConditionMetadata(AlarmRuleState ruleState, TbMsgMetaData metaData) {
if (ruleState.getSpec().getType() == AlarmConditionSpecType.REPEATING) {
metaData.putValue(DataConstants.ALARM_CONDITION_REPEATS, String.valueOf(ruleState.getState().getEventCount()));
}
if (ruleState.getSpec().getType() == AlarmConditionSpecType.DURATION) {
metaData.putValue(DataConstants.ALARM_CONDITION_DURATION, String.valueOf(ruleState.getState().getDuration()));
}
}
|
@Test
public void testSetAlarmConditionMetadata_durationCondition() {
DurationAlarmConditionSpec spec = new DurationAlarmConditionSpec();
spec.setUnit(TimeUnit.SECONDS);
AlarmRuleState ruleState = createMockAlarmRuleState(spec);
int duration = 12;
ruleState.getState().setDuration(duration);
AlarmState alarmState = createMockAlarmState();
TbMsgMetaData metaData = new TbMsgMetaData();
alarmState.setAlarmConditionMetadata(ruleState, metaData);
assertEquals(AlarmConditionSpecType.DURATION, ruleState.getSpec().getType());
assertNotNull(metaData.getValue(DataConstants.ALARM_CONDITION_DURATION));
assertNull(metaData.getValue(DataConstants.ALARM_CONDITION_REPEATS));
assertEquals(String.valueOf(duration), metaData.getValue(DataConstants.ALARM_CONDITION_DURATION));
}
|
public boolean isNamespaceReferencedWithHotRestart(@Nonnull String namespace) {
return nodeEngine.getConfig()
.getCacheConfigs()
.values()
.stream()
.filter(cacheConfig -> cacheConfig.getDataPersistenceConfig().isEnabled())
.map(CacheSimpleConfig::getUserCodeNamespace)
.anyMatch(namespace::equals)
|| getCacheConfigs()
.stream()
.filter(cacheConfig -> cacheConfig.getHotRestartConfig().isEnabled())
.map(CacheConfig::getUserCodeNamespace)
.anyMatch(namespace::equals);
}
|
@Test
public void testIsNamespaceReferencedWithHotRestart_withNoCacheConfigs_false() {
CacheService cacheService = new TestCacheService(mockNodeEngine, true);
when(mockConfig.getCacheConfigs()).thenReturn(Map.of());
assertFalse(cacheService.isNamespaceReferencedWithHotRestart("ns1"));
}
|
public void commitSegmentFile(String realtimeTableName, CommittingSegmentDescriptor committingSegmentDescriptor)
throws Exception {
Preconditions.checkState(!_isStopping, "Segment manager is stopping");
String rawTableName = TableNameBuilder.extractRawTableName(realtimeTableName);
String segmentName = committingSegmentDescriptor.getSegmentName();
LOGGER.info("Committing segment file for segment: {}", segmentName);
// Copy the segment file to the controller
String segmentLocation = committingSegmentDescriptor.getSegmentLocation();
Preconditions.checkArgument(segmentLocation != null, "Segment location must be provided");
if (segmentLocation.regionMatches(true, 0, CommonConstants.Segment.PEER_SEGMENT_DOWNLOAD_SCHEME, 0,
CommonConstants.Segment.PEER_SEGMENT_DOWNLOAD_SCHEME.length())) {
LOGGER.info("No moving needed for segment on peer servers: {}", segmentLocation);
return;
}
URI tableDirURI = URIUtils.getUri(_controllerConf.getDataDir(), rawTableName);
PinotFS pinotFS = PinotFSFactory.create(tableDirURI.getScheme());
String uriToMoveTo = moveSegmentFile(rawTableName, segmentName, segmentLocation, pinotFS);
if (!isTmpSegmentAsyncDeletionEnabled()) {
try {
for (String uri : pinotFS.listFiles(tableDirURI, false)) {
if (uri.contains(SegmentCompletionUtils.getTmpSegmentNamePrefix(segmentName))) {
LOGGER.warn("Deleting temporary segment file: {}", uri);
Preconditions.checkState(pinotFS.delete(new URI(uri), true), "Failed to delete file: %s", uri);
}
}
} catch (Exception e) {
LOGGER.warn("Caught exception while deleting temporary segment files for segment: {}", segmentName, e);
}
}
committingSegmentDescriptor.setSegmentLocation(uriToMoveTo);
}
|
@Test
public void testSegmentAlreadyThereAndExtraneousFilesDeleted()
throws Exception {
PinotFSFactory.init(new PinotConfiguration());
File tableDir = new File(TEMP_DIR, RAW_TABLE_NAME);
String segmentName = new LLCSegmentName(RAW_TABLE_NAME, 0, 0, CURRENT_TIME_MS).getSegmentName();
String otherSegmentName = new LLCSegmentName(RAW_TABLE_NAME, 1, 0, CURRENT_TIME_MS).getSegmentName();
String segmentFileName = SegmentCompletionUtils.generateTmpSegmentFileName(segmentName);
String extraSegmentFileName = SegmentCompletionUtils.generateTmpSegmentFileName(segmentName);
String otherSegmentFileName = SegmentCompletionUtils.generateTmpSegmentFileName(otherSegmentName);
File segmentFile = new File(tableDir, segmentFileName);
File extraSegmentFile = new File(tableDir, extraSegmentFileName);
File otherSegmentFile = new File(tableDir, otherSegmentFileName);
FileUtils.write(segmentFile, "temporary file contents");
FileUtils.write(extraSegmentFile, "temporary file contents");
FileUtils.write(otherSegmentFile, "temporary file contents");
FakePinotLLCRealtimeSegmentManager segmentManager = new FakePinotLLCRealtimeSegmentManager();
String segmentLocation = SCHEME + tableDir + "/" + segmentFileName;
CommittingSegmentDescriptor committingSegmentDescriptor =
new CommittingSegmentDescriptor(segmentName, PARTITION_OFFSET.toString(), 0, segmentLocation);
segmentManager.commitSegmentFile(REALTIME_TABLE_NAME, committingSegmentDescriptor);
Assert.assertEquals(committingSegmentDescriptor.getSegmentLocation(),
URIUtils.getUri(tableDir.toString(), URIUtils.encode(segmentName)).toString());
assertFalse(segmentFile.exists());
assertFalse(extraSegmentFile.exists());
assertTrue(otherSegmentFile.exists());
}
|
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuffer buf = new StringBuffer();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case FORMAT_MODIFIER_STATE:
handleFormatModifierState(c, tokenList, buf);
break;
case OPTION_STATE:
processOption(c, tokenList, buf);
break;
case KEYWORD_STATE:
handleKeywordState(c, tokenList, buf);
break;
case RIGHT_PARENTHESIS_STATE:
handleRightParenthesisState(c, tokenList, buf);
break;
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addValuedToken(Token.LITERAL, buf, tokenList);
break;
case KEYWORD_STATE:
tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString()));
break;
case RIGHT_PARENTHESIS_STATE:
tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN);
break;
case FORMAT_MODIFIER_STATE:
case OPTION_STATE:
throw new ScanException("Unexpected end of pattern string");
}
return tokenList;
}
|
@Test
public void testEmpty() throws ScanException {
try {
new TokenStream("").tokenize();
fail("empty string not allowed");
} catch (IllegalArgumentException e) {
}
}
|
@Override
public SmsSendRespDTO sendSms(Long sendLogId, String mobile, String apiTemplateId,
List<KeyValue<String, Object>> templateParams) throws Throwable {
// 构建请求
SendSmsRequest request = new SendSmsRequest();
request.setPhoneNumbers(mobile);
request.setSignName(properties.getSignature());
request.setTemplateCode(apiTemplateId);
request.setTemplateParam(JsonUtils.toJsonString(MapUtils.convertMap(templateParams)));
request.setOutId(String.valueOf(sendLogId));
// 执行请求
SendSmsResponse response = client.getAcsResponse(request);
return new SmsSendRespDTO().setSuccess(Objects.equals(response.getCode(), API_CODE_SUCCESS)).setSerialNo(response.getBizId())
.setApiRequestId(response.getRequestId()).setApiCode(response.getCode()).setApiMsg(response.getMessage());
}
|
@Test
public void tesSendSms_fail() throws Throwable {
// 准备参数
Long sendLogId = randomLongId();
String mobile = randomString();
String apiTemplateId = randomString();
List<KeyValue<String, Object>> templateParams = Lists.newArrayList(
new KeyValue<>("code", 1234), new KeyValue<>("op", "login"));
// mock 方法
SendSmsResponse response = randomPojo(SendSmsResponse.class, o -> o.setCode("ERROR"));
when(client.getAcsResponse(argThat((ArgumentMatcher<SendSmsRequest>) acsRequest -> {
assertEquals(mobile, acsRequest.getPhoneNumbers());
assertEquals(properties.getSignature(), acsRequest.getSignName());
assertEquals(apiTemplateId, acsRequest.getTemplateCode());
assertEquals(toJsonString(MapUtils.convertMap(templateParams)), acsRequest.getTemplateParam());
assertEquals(sendLogId.toString(), acsRequest.getOutId());
return true;
}))).thenReturn(response);
// 调用
SmsSendRespDTO result = smsClient.sendSms(sendLogId, mobile, apiTemplateId, templateParams);
// 断言
assertFalse(result.getSuccess());
assertEquals(response.getRequestId(), result.getApiRequestId());
assertEquals(response.getCode(), result.getApiCode());
assertEquals(response.getMessage(), result.getApiMsg());
assertEquals(response.getBizId(), result.getSerialNo());
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName("from") String from, @ParameterName("grouping separator") String group, @ParameterName("decimal separator") String decimal) {
if ( from == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null"));
}
if ( group != null && !group.equals( " " ) && !group.equals( "." ) && !group.equals( "," ) ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "group", "not a valid one, can only be one of: dot ('.'), comma (','), space (' ') "));
}
if ( decimal != null ) {
if (!decimal.equals( "." ) && !decimal.equals( "," )) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "not a valid one, can only be one of: dot ('.'), comma (',') "));
} else if (group != null && decimal.equals( group )) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "cannot be the same as parameter 'group' "));
}
}
if ( group != null ) {
from = from.replaceAll( "\\" + group, "" );
}
if ( decimal != null ) {
from = from.replaceAll( "\\" + decimal, "." );
}
BigDecimal result = NumberEvalHelper.getBigDecimalOrNull(from );
if( from != null && result == null ) {
// conversion failed
return FEELFnResult.ofError( new InvalidParametersEvent(Severity.ERROR, "unable to calculate final number result" ) );
} else {
return FEELFnResult.ofResult( result );
}
}
|
@Test
void invokeNumberWithDecimalCharDot() {
FunctionTestUtil.assertResult(numberFunction.invoke("9.876", null, "."), BigDecimal.valueOf(9.876));
}
|
@Udf(description = "Converts a TIMESTAMP value into the"
+ " string representation of the timestamp in the given format. Single quotes in the"
+ " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'"
+ " The system default time zone is used when no time zone is explicitly provided."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String formatTimestamp(
@UdfParameter(
description = "TIMESTAMP value.") final Timestamp timestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
return formatTimestamp(timestamp, formatPattern, ZoneId.of("GMT").getId());
}
|
@Test
public void testUTCTimeZone() {
// When:
final String result = udf.formatTimestamp(new Timestamp(1534353043000L),
"yyyy-MM-dd HH:mm:ss", "UTC");
// Then:
assertThat(result, is("2018-08-15 17:10:43"));
}
|
@Override
public boolean isTopicExists(final String topic) {
LOG.trace("Checking for existence of topic '{}'", topic);
try {
ExecutorUtil.executeWithRetries(
() -> adminClient.get().describeTopics(
ImmutableList.of(topic),
new DescribeTopicsOptions().includeAuthorizedOperations(true)
).topicNameValues().get(topic).get(),
RetryBehaviour.ON_RETRYABLE.and(e -> !(e instanceof UnknownTopicOrPartitionException))
);
return true;
} catch (final TopicAuthorizationException e) {
throw new KsqlTopicAuthorizationException(
AclOperation.DESCRIBE, Collections.singleton(topic));
} catch (final Exception e) {
if (Throwables.getRootCause(e) instanceof UnknownTopicOrPartitionException) {
return false;
}
throw new KafkaResponseGetFailedException("Failed to check if exists for topic: " + topic, e);
}
}
|
@Test
public void shouldNotListAllTopicsWhenCallingIsTopicExists() {
// Given
givenTopicExists("foobar", 1, 1);
// When
kafkaTopicClient.isTopicExists("foobar");
// Then
verify(adminClient, never()).listTopics();
}
|
public final boolean isEmpty() {
return (this.firstNode == null);
}
|
@Test
public void testIsEmpty() {
assertThat(this.list.isEmpty()).as("Empty list should return true on isEmpty()").isTrue();
this.list.add( this.node1 );
assertThat(this.list.isEmpty()).as("Not empty list should return false on isEmpty()").isFalse();
}
|
public static boolean isValidIpv6Address(String ip) {
return IPV6_ADDRESS.matcher(ip).matches()
// Includes additional segment count check for the compressed address
|| (IPV6_COMPRESSED_ADDRESS.matcher(ip).matches() && ip.split(":").length > 0 && ip.split(":").length <= 7);
}
|
@Test
public void testIPv6() {
assertThat(IpAndDnsValidation.isValidIpv6Address("::1"), is(true));
assertThat(IpAndDnsValidation.isValidIpv6Address("fc01::"), is(true));
assertThat(IpAndDnsValidation.isValidIpv6Address("fc01::8d1c"), is(true));
assertThat(IpAndDnsValidation.isValidIpv6Address("::fc01:8d1c"), is(true));
assertThat(IpAndDnsValidation.isValidIpv6Address("fc01:8d1c::"), is(true));
assertThat(IpAndDnsValidation.isValidIpv6Address("1762:0:0:0:0:B03:1:AF18"), is(true));
assertThat(IpAndDnsValidation.isValidIpv6Address("::"), is(false));
assertThat(IpAndDnsValidation.isValidIpv6Address("fc01::8j1c"), is(false)); // j is not allowed character
assertThat(IpAndDnsValidation.isValidIpv6Address("fc01::176d::8d1c"), is(false)); // Too many ::
assertThat(IpAndDnsValidation.isValidIpv6Address("1762:0:0:0:0:B03::0:1:AF18"), is(false)); // Too many segements
assertThat(IpAndDnsValidation.isValidIpv6Address("fc01c::8d1c"), is(false)); // Segment with too many characters
assertThat(IpAndDnsValidation.isValidIpv6Address("fc01:::8d1c"), is(false)); // Triple :
assertThat(IpAndDnsValidation.isValidIpv6Address("176J:0:0:0:0:B03:1:AF18"), is(false));
assertThat(IpAndDnsValidation.isValidIpv6Address("1762:0:0:0:0:1:B03:1:AF18"), is(false)); // Too many segments
assertThat(IpAndDnsValidation.isValidIpv6Address("1762:0:0:0:0:B03:1:2AF18"), is(false)); // Segment with too many characters
assertThat(IpAndDnsValidation.isValidIpv6Address("1762:0:0:0:0:53B03:1:AF18"), is(false)); // Segment with too many characters
}
|
public Summary summarize() {
// ignore unused timers
List<Summary> summaries = Arrays.stream(timerTable)
.map(Timer::summarize)
.filter(s -> s.getTaskCount() > 0)
.collect(Collectors.toList());
// summarize data
long totalTaskTimeMillis = 0L;
long maxTaskTimeMillis = DEFAULT_TIME_MILLS;
long minTaskTimeMillis = DEFAULT_TIME_MILLS;
long taskCount = 0L;
for (Summary summary : summaries) {
if (taskCount > 0) {
maxTaskTimeMillis = Math.max(maxTaskTimeMillis, summary.getMaxTaskTimeMillis());
minTaskTimeMillis = Math.min(minTaskTimeMillis, summary.getMinTaskTimeMillis());
} else {
maxTaskTimeMillis = summary.getMaxTaskTimeMillis();
minTaskTimeMillis = summary.getMinTaskTimeMillis();
}
totalTaskTimeMillis += summary.getTotalTaskTimeMillis();
taskCount += summary.getTaskCount();
}
return new Summary(totalTaskTimeMillis, maxTaskTimeMillis, minTaskTimeMillis, taskCount);
}
|
@Test
public void testSummarize() {
ExtensibleThreadPoolExecutor executor = new ExtensibleThreadPoolExecutor(
"test", new DefaultThreadPoolPluginManager(),
3, 3, 1000L, TimeUnit.MILLISECONDS,
new ArrayBlockingQueue<>(1), Thread::new, new ThreadPoolExecutor.DiscardPolicy());
TaskTimeRecordPlugin plugin = new TaskTimeRecordPlugin(3);
executor.register(plugin);
executor.submit(() -> ThreadUtil.sleep(1000L));
executor.submit(() -> ThreadUtil.sleep(3000L));
executor.submit(() -> ThreadUtil.sleep(2000L));
executor.submit(() -> ThreadUtil.sleep(2000L));
// waiting for shutdown
executor.shutdown();
while (!executor.isTerminated()) {
}
TaskTimeRecordPlugin.Summary summary = plugin.summarize();
Assert.assertTrue(summary.getMinTaskTimeMillis() > 0L);
Assert.assertTrue(summary.getMaxTaskTimeMillis() > 0L);
Assert.assertTrue(summary.getAvgTaskTimeMillis() > 0L);
Assert.assertTrue(summary.getTotalTaskTimeMillis() > 0L);
// Assert.assertTrue(testInDeviation(summary.getMinTaskTimeMillis(), 1000L, 300L));
// Assert.assertTrue(testInDeviation(summary.getMaxTaskTimeMillis(), 3000L, 300L));
// Assert.assertTrue(testInDeviation(summary.getAvgTaskTimeMillis(), 2000L, 300L));
// Assert.assertTrue(testInDeviation(summary.getTotalTaskTimeMillis(), 8000L, 300L));
}
|
@Override
public int indexOf(int fromIndex, int toIndex, byte value) {
if (fromIndex <= toIndex) {
return ByteBufUtil.firstIndexOf(this, fromIndex, toIndex, value);
}
return ByteBufUtil.lastIndexOf(this, fromIndex, toIndex, value);
}
|
@Test
public void testIndexOfReleaseBuffer() {
ByteBuf buffer = releasedBuffer();
if (buffer.capacity() != 0) {
try {
buffer.indexOf(0, 1, (byte) 1);
fail();
} catch (IllegalReferenceCountException expected) {
// expected
}
} else {
assertEquals(-1, buffer.indexOf(0, 1, (byte) 1));
}
}
|
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext,
final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) {
SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext();
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof TCLStatement) {
return new ShardingDatabaseBroadcastRoutingEngine();
}
if (sqlStatement instanceof DDLStatement) {
if (sqlStatementContext instanceof CursorAvailable) {
return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props);
}
return getDDLRoutingEngine(shardingRule, database, sqlStatementContext);
}
if (sqlStatement instanceof DALStatement) {
return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext);
}
if (sqlStatement instanceof DCLStatement) {
return getDCLRoutingEngine(shardingRule, database, sqlStatementContext);
}
return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext);
}
|
@Test
void assertNewInstanceForShowCreateTableWithTableRule() {
DALStatement dalStatement = mock(MySQLShowCreateTableStatement.class);
when(sqlStatementContext.getSqlStatement()).thenReturn(dalStatement);
tableNames.add("table_1");
when(shardingRule.getShardingRuleTableNames(tableNames)).thenReturn(tableNames);
QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class));
ShardingRouteEngine actual =
ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet));
assertThat(actual, instanceOf(ShardingUnicastRoutingEngine.class));
}
|
public Map<SoftwareQuality, Severity> getDefaultImpacts() {
return defaultImpacts;
}
|
@Test
public void constructor_whenAdhocRuleHasNoProvidedImpact_shouldMapDefaultImpactAccordingly() {
NewAdHocRule adHocRule = new NewAdHocRule(ScannerReport.AdHocRule.newBuilder()
.setEngineId("eslint").setRuleId("no-cond-assign").setName("name")
.setType(ScannerReport.IssueType.CODE_SMELL)
.setSeverity(Constants.Severity.MINOR)
.build());
assertThat(adHocRule.getDefaultImpacts())
.containsExactlyEntriesOf(Map.of(SoftwareQuality.MAINTAINABILITY, Severity.LOW));
}
|
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
Configuration conf = FlinkOptions.fromMap(context.getCatalogTable().getOptions());
StoragePath path = new StoragePath(conf.getOptional(FlinkOptions.PATH).orElseThrow(() ->
new ValidationException("Option [path] should not be empty.")));
setupTableOptions(conf.getString(FlinkOptions.PATH), conf);
ResolvedSchema schema = context.getCatalogTable().getResolvedSchema();
setupConfOptions(conf, context.getObjectIdentifier(), context.getCatalogTable(), schema);
return new HoodieTableSource(
SerializableSchema.create(schema),
path,
context.getCatalogTable().getPartitionKeys(),
conf.getString(FlinkOptions.PARTITION_DEFAULT_NAME),
conf);
}
|
@Test
void testSetupReadOptionsForSource() {
// definition with simple primary key and partition path
ResolvedSchema schema1 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20))
.field("f2", DataTypes.TIMESTAMP(3))
.field("ts", DataTypes.TIMESTAMP(3))
.primaryKey("f0")
.build();
// set up new retains commits that is less than min archive commits
this.conf.setString(FlinkOptions.READ_END_COMMIT, "123");
final MockContext sourceContext1 = MockContext.getInstance(this.conf, schema1, "f2");
final HoodieTableSource tableSource1 = (HoodieTableSource) new HoodieTableFactory().createDynamicTableSource(sourceContext1);
final Configuration conf1 = tableSource1.getConf();
assertThat(conf1.getString(FlinkOptions.QUERY_TYPE), is(FlinkOptions.QUERY_TYPE_INCREMENTAL));
this.conf.removeConfig(FlinkOptions.READ_END_COMMIT);
this.conf.setString(FlinkOptions.READ_START_COMMIT, "123");
final MockContext sourceContext2 = MockContext.getInstance(this.conf, schema1, "f2");
final HoodieTableSource tableSource2 = (HoodieTableSource) new HoodieTableFactory().createDynamicTableSource(sourceContext2);
final Configuration conf2 = tableSource2.getConf();
assertThat(conf2.getString(FlinkOptions.QUERY_TYPE), is(FlinkOptions.QUERY_TYPE_INCREMENTAL));
}
|
@Override
public Object initialize(Object obj) {
Object object = obj;
if (object instanceof HazelcastInstanceAware aware) {
aware.setHazelcastInstance(instance);
}
if (object instanceof SerializationServiceAware aware) {
aware.setSerializationService(instance.getSerializationService());
}
if (hasExternalContext) {
object = externalContext.initialize(object);
}
return object;
}
|
@Test
public void testInitialize() {
DependencyInjectionUserClass initializedUserClass = (DependencyInjectionUserClass) serializationService.getManagedContext().initialize(userClass);
assertEquals(client, initializedUserClass.hazelcastInstance);
assertNull("The client doesn't inject the Node", initializedUserClass.node);
assertEquals(serializationService, initializedUserClass.serializationService);
assertTrue(userContext.wasCalled);
}
|
public void runExtractor(Message msg) {
try(final Timer.Context ignored = completeTimer.time()) {
final String field;
try (final Timer.Context ignored2 = conditionTimer.time()) {
// We can only work on Strings.
if (!(msg.getField(sourceField) instanceof String)) {
conditionMissesCounter.inc();
return;
}
field = (String) msg.getField(sourceField);
// Decide if to extract at all.
if (conditionType.equals(ConditionType.STRING)) {
if (field.contains(conditionValue)) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
} else if (conditionType.equals(ConditionType.REGEX)) {
if (regexConditionPattern.matcher(field).find()) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
}
}
try (final Timer.Context ignored2 = executionTimer.time()) {
Result[] results;
try {
results = run(field);
} catch (ExtractorException e) {
final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>";
msg.addProcessingError(new Message.ProcessingError(
ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e)));
return;
}
if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) {
return;
} else if (results.length == 1 && results[0].target == null) {
// results[0].target is null if this extractor cannot produce multiple fields use targetField in that case
msg.addField(targetField, results[0].getValue());
} else {
for (final Result result : results) {
msg.addField(result.getTarget(), result.getValue());
}
}
// Remove original from message?
if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) {
final StringBuilder sb = new StringBuilder(field);
final List<Result> reverseList = Arrays.stream(results)
.sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed())
.collect(Collectors.toList());
// remove all from reverse so that the indices still match
for (final Result result : reverseList) {
sb.delete(result.getBeginIndex(), result.getEndIndex());
}
final String builtString = sb.toString();
final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString;
msg.removeField(sourceField);
// TODO don't add an empty field back, or rather don't add fullyCutByExtractor
msg.addField(sourceField, finalResult);
}
runConverters(msg);
}
}
}
|
@Test
public void testWithOneValueOnlyResult() throws Exception {
final TestExtractor extractor = new TestExtractor.Builder()
.callback(new Callable<Result[]>() {
@Override
public Result[] call() throws Exception {
return new Result[]{
new Result("1", -1, -1)
};
}
})
.build();
final Message msg = createMessage("the hello");
extractor.runExtractor(msg);
assertThat(msg.getField("target")).isEqualTo("1");
}
|
public Claims getPayload(final String jwt) {
return Jwts.parser()
.verifyWith(tokenConfigurationParameter.getPublicKey())
.build()
.parseSignedClaims(jwt)
.getPayload();
}
|
@Test
void givenJwt_whenGetPayload_thenReturnPayload() {
// Given
String token = Jwts.builder()
.claim("user_id", "12345")
.issuedAt(new Date())
.expiration(new Date(System.currentTimeMillis() + 86400000L)) // 1 day expiration
.signWith(keyPair.getPrivate())
.compact();
// When
Claims payload = tokenService.getPayload(token);
// Then
assertThat(payload.get("user_id")).isEqualTo("12345");
}
|
public boolean waitingOnUnreleasedPartition(ConsumerGroupMember member) {
if (member.state() == MemberState.UNRELEASED_PARTITIONS) {
for (Map.Entry<Uuid, Set<Integer>> entry : targetAssignment().get(member.memberId()).partitions().entrySet()) {
Uuid topicId = entry.getKey();
Set<Integer> assignedPartitions = member.assignedPartitions().getOrDefault(topicId, Collections.emptySet());
for (int partition : entry.getValue()) {
if (!assignedPartitions.contains(partition) && currentPartitionEpoch(topicId, partition) != -1) {
return true;
}
}
}
}
return false;
}
|
@Test
public void testWaitingOnUnreleasedPartition() {
Uuid fooTopicId = Uuid.randomUuid();
Uuid barTopicId = Uuid.randomUuid();
Uuid zarTopicId = Uuid.randomUuid();
String memberId1 = Uuid.randomUuid().toString();
String memberId2 = Uuid.randomUuid().toString();
ConsumerGroup consumerGroup = createConsumerGroup("foo");
consumerGroup.updateTargetAssignment(memberId1, new Assignment(mkAssignment(
mkTopicAssignment(fooTopicId, 1, 2, 3),
mkTopicAssignment(zarTopicId, 7, 8, 9)
)));
ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1)
.setMemberEpoch(10)
.setState(MemberState.UNRELEASED_PARTITIONS)
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 1, 2, 3)))
.setPartitionsPendingRevocation(mkAssignment(
mkTopicAssignment(barTopicId, 4, 5, 6)))
.build();
consumerGroup.updateMember(member1);
assertFalse(consumerGroup.waitingOnUnreleasedPartition(member1));
ConsumerGroupMember member2 = new ConsumerGroupMember.Builder(memberId2)
.setMemberEpoch(10)
.setPartitionsPendingRevocation(mkAssignment(
mkTopicAssignment(zarTopicId, 7)))
.build();
consumerGroup.updateMember(member2);
assertTrue(consumerGroup.waitingOnUnreleasedPartition(member1));
}
|
@Override
public MaterializedWindowedTable windowed() {
return new KsqlMaterializedWindowedTable(inner.windowed());
}
|
@Test
public void shouldReturnEmptyIfInnerWindowedReturnsEmpty() {
// Given:
final MaterializedWindowedTable table = materialization.windowed();
when(innerWindowed.get(any(), anyInt(), any(), any(), any())).thenReturn(
KsMaterializedQueryResult.rowIteratorWithPosition(Collections.emptyIterator(), position));
givenNoopTransforms();
// When:
final Iterator<WindowedRow> result =
table.get(aKey, partition, windowStartBounds, windowEndBounds);
// Then:
assertThat(result.hasNext(), is(false));
}
|
@Override
@Deprecated
public ByteString getTag(String tag) {
ByteString b = maybeGetTag(tag);
if (b == null)
throw new IllegalArgumentException("Unknown tag " + tag);
return b;
}
|
@Test(expected = IllegalArgumentException.class)
public void exception() {
obj.getTag("non existent");
}
|
@Override
public <V> RBucket<V> getBucket(String name) {
return new RedissonBucket<V>(commandExecutor, name);
}
|
@Test
public void testDecoderError() {
redisson.getBucket("testbucket", new StringCodec()).set("{INVALID JSON!}");
for (int i = 0; i < 256; i++) {
try {
redisson.getBucket("testbucket", new JsonJacksonCodec()).get();
Assertions.fail();
} catch (Exception e) {
// skip
}
}
redisson.getBucket("testbucket2").set("should work");
}
|
@SuppressWarnings("unchecked")
public static <T> TypeInformation<T> convert(String jsonSchema) {
Preconditions.checkNotNull(jsonSchema, "JSON schema");
final ObjectMapper mapper = JacksonMapperFactory.createObjectMapper();
mapper.getFactory()
.enable(JsonParser.Feature.ALLOW_COMMENTS)
.enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES)
.enable(JsonParser.Feature.ALLOW_SINGLE_QUOTES);
final JsonNode node;
try {
node = mapper.readTree(jsonSchema);
} catch (IOException e) {
throw new IllegalArgumentException("Invalid JSON schema.", e);
}
return (TypeInformation<T>) convertType("<root>", node, node);
}
|
@Test
void testNullUnionTypes() {
final TypeInformation<?> result =
JsonRowSchemaConverter.convert("{ type: ['string', 'null'] }");
assertThat(result).isEqualTo(Types.STRING);
}
|
void importSingleVideo(FacebookClient client, VideoModel video) {
ArrayList<Parameter> params = new ArrayList<>();
params.add(Parameter.with("file_url", video.getContentUrl().toString()));
if (video.getDescription() != null)
params.add(Parameter.with("description", video.getDescription()));
String endpoint = "me/videos";
client.publish(endpoint, GraphResponse.class, params.toArray(new Parameter[0]));
}
|
@Test
public void testImportSingleVideo() {
importer.importSingleVideo(
client,
new VideoModel(
"title", VIDEO_URL, VIDEO_DESCRIPTION, "video/mp4", "videoId", null, false, null));
Parameter[] params = {
Parameter.with("file_url", VIDEO_URL), Parameter.with("description", VIDEO_DESCRIPTION)
};
verify(client).publish("me/videos", GraphResponse.class, params);
}
|
@Udf(description = "Converts a string representation of a date in the given format"
+ " into the TIMESTAMP value."
+ " Single quotes in the timestamp format can be escaped with '',"
+ " for example: 'yyyy-MM-dd''T''HH:mm:ssX'.")
public Timestamp parseTimestamp(
@UdfParameter(
description = "The string representation of a date.") final String formattedTimestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
return parseTimestamp(formattedTimestamp, formatPattern, ZoneId.of("GMT").getId());
}
|
@Test
public void shouldHandleNullFormat() {
// When:
final Object result = udf.parseTimestamp("2018-08-15 17:10:43",
null, "UTC");
// Then:
assertThat(result, is(nullValue()));
}
|
public PolarisConfigEndpoint(PolarisConfigProperties polarisConfigProperties) {
this.polarisConfigProperties = polarisConfigProperties;
}
|
@Test
public void testPolarisConfigEndpoint() {
Map<String, Object> content = new HashMap<>();
content.put("k1", "v1");
content.put("k2", "v2");
content.put("k3", "v3");
MockedConfigKVFile file = new MockedConfigKVFile(content);
PolarisPropertySource polarisPropertySource = new PolarisPropertySource(testNamespace, testServiceName, testFileName,
file, content);
PolarisPropertySourceManager.addPropertySource(polarisPropertySource);
PolarisConfigEndpoint endpoint = new PolarisConfigEndpoint(polarisConfigProperties);
Map<String, Object> info = endpoint.polarisConfig();
assertThat(polarisConfigProperties).isEqualTo(info.get("PolarisConfigProperties"));
assertThat(Lists.newArrayList(polarisPropertySource)).isEqualTo(info.get("PolarisPropertySource"));
}
|
public String orderClause(AmountRequest amountRequest) {
return orderClause(amountRequest, ORDER_TERM_TO_SQL_STRING);
}
|
@Test
void mapUpdatedAt() {
final AmountRequest pageRequest = ascOnUpdatedAt(2);
assertThat(amountMapper.orderClause(pageRequest)).isEqualTo(" ORDER BY updatedAt ASC");
}
|
@Nullable
@VisibleForTesting
InetAddress getIpFromFieldValue(String fieldValue) {
try {
return InetAddresses.forString(fieldValue.trim());
} catch (IllegalArgumentException e) {
// Do nothing, field is not an IP
}
return null;
}
|
@Test
public void getIpFromFieldValue() {
when(geoIpVendorResolverService.createCityResolver(any(GeoIpResolverConfig.class), any(Timer.class)))
.thenReturn(maxMindCityResolver);
when(geoIpVendorResolverService.createAsnResolver(any(GeoIpResolverConfig.class), any(Timer.class)))
.thenReturn(maxMindAsnResolver);
final GeoIpResolverEngine engine = new GeoIpResolverEngine(geoIpVendorResolverService, config, s3GeoIpFileService, metricRegistry);
final String ip = "127.0.0.1";
assertEquals(InetAddresses.forString(ip), engine.getIpFromFieldValue(ip));
assertNull(engine.getIpFromFieldValue("Message from \"127.0.0.1\""));
assertNull(engine.getIpFromFieldValue("Test message with no IP"));
}
|
@SuppressWarnings({"SimplifyBooleanReturn"})
public static Map<String, ParamDefinition> cleanupParams(Map<String, ParamDefinition> params) {
if (params == null || params.isEmpty()) {
return params;
}
Map<String, ParamDefinition> mapped =
params.entrySet().stream()
.collect(
MapHelper.toListMap(
Map.Entry::getKey,
p -> {
ParamDefinition param = p.getValue();
if (param.getType() == ParamType.MAP) {
MapParamDefinition mapParamDef = param.asMapParamDef();
if (mapParamDef.getValue() == null
&& (mapParamDef.getInternalMode() == InternalParamMode.OPTIONAL)) {
return mapParamDef;
}
return MapParamDefinition.builder()
.name(mapParamDef.getName())
.value(cleanupParams(mapParamDef.getValue()))
.expression(mapParamDef.getExpression())
.name(mapParamDef.getName())
.validator(mapParamDef.getValidator())
.tags(mapParamDef.getTags())
.mode(mapParamDef.getMode())
.meta(mapParamDef.getMeta())
.build();
} else {
return param;
}
}));
Map<String, ParamDefinition> filtered =
mapped.entrySet().stream()
.filter(
p -> {
ParamDefinition param = p.getValue();
if (param.getInternalMode() == InternalParamMode.OPTIONAL) {
if (param.getValue() == null && param.getExpression() == null) {
return false;
} else if (param.getType() == ParamType.MAP
&& param.asMapParamDef().getValue() != null
&& param.asMapParamDef().getValue().isEmpty()) {
return false;
} else {
return true;
}
} else {
Checks.checkTrue(
param.getValue() != null || param.getExpression() != null,
String.format(
"[%s] is a required parameter (type=[%s])",
p.getKey(), param.getType()));
return true;
}
})
.collect(MapHelper.toListMap(Map.Entry::getKey, Map.Entry::getValue));
return cleanIntermediateMetadata(filtered);
}
|
@Test
public void testCleanupOptionalEmptyNestedMapEmptyElement() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
parseParamDefMap(
"{'map': {'type': 'MAP','value': {'nested': {'type': 'MAP','value': {'str': {'type': 'STRING', 'internal_mode': 'OPTIONAL'}}, 'internal_mode': 'OPTIONAL'}}, 'internal_mode': 'OPTIONAL'}}");
Map<String, ParamDefinition> cleanedParams = ParamsMergeHelper.cleanupParams(allParams);
assertEquals(0, cleanedParams.size());
}
|
public static Read read() {
return Read.create();
}
|
@Test
public void testReadWithRuntimeParametersValidationFailed() {
ReadOptions options = PipelineOptionsFactory.fromArgs().withValidation().as(ReadOptions.class);
BigtableIO.Read read =
BigtableIO.read()
.withProjectId(options.getBigtableProject())
.withInstanceId(options.getBigtableInstanceId())
.withTableId(options.getBigtableTableId());
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("tableId was not supplied");
p.apply(read);
}
|
@VisibleForTesting
static String formatTimestamp(Long timestampMicro) {
// timestampMicro is in "microseconds since epoch" format,
// e.g., 1452062291123456L means "2016-01-06 06:38:11.123456 UTC".
// Separate into seconds and microseconds.
long timestampSec = timestampMicro / 1_000_000;
long micros = timestampMicro % 1_000_000;
if (micros < 0) {
micros += 1_000_000;
timestampSec -= 1;
}
String dayAndTime = DATE_AND_SECONDS_FORMATTER.print(timestampSec * 1000);
if (micros == 0) {
return String.format("%s UTC", dayAndTime);
}
return String.format("%s.%06d UTC", dayAndTime, micros);
}
|
@Test
public void testFormatTimestampTrailingZeroesOnMicros() {
assertThat(
BigQueryAvroUtils.formatTimestamp(1452062291123000L),
equalTo("2016-01-06 06:38:11.123000 UTC"));
}
|
@Nullable
@Override
public JobGraph recoverJobGraph(JobID jobId) throws Exception {
checkNotNull(jobId, "Job ID");
LOG.debug("Recovering job graph {} from {}.", jobId, jobGraphStateHandleStore);
final String name = jobGraphStoreUtil.jobIDToName(jobId);
synchronized (lock) {
verifyIsRunning();
boolean success = false;
RetrievableStateHandle<JobGraph> jobGraphRetrievableStateHandle;
try {
try {
jobGraphRetrievableStateHandle = jobGraphStateHandleStore.getAndLock(name);
} catch (StateHandleStore.NotExistException ignored) {
success = true;
return null;
} catch (Exception e) {
throw new FlinkException(
"Could not retrieve the submitted job graph state handle "
+ "for "
+ name
+ " from the submitted job graph store.",
e);
}
JobGraph jobGraph;
try {
jobGraph = jobGraphRetrievableStateHandle.retrieveState();
} catch (ClassNotFoundException cnfe) {
throw new FlinkException(
"Could not retrieve submitted JobGraph from state handle under "
+ name
+ ". This indicates that you are trying to recover from state written by an "
+ "older Flink version which is not compatible. Try cleaning the state handle store.",
cnfe);
} catch (IOException ioe) {
throw new FlinkException(
"Could not retrieve submitted JobGraph from state handle under "
+ name
+ ". This indicates that the retrieved state handle is broken. Try cleaning the state handle "
+ "store.",
ioe);
}
addedJobGraphs.add(jobGraph.getJobID());
LOG.info("Recovered {}.", jobGraph);
success = true;
return jobGraph;
} finally {
if (!success) {
jobGraphStateHandleStore.release(name);
}
}
}
}
|
@Test
public void testOnAddedJobGraphShouldOnlyProcessUnknownJobGraphs() throws Exception {
final RetrievableStateHandle<JobGraph> stateHandle =
jobGraphStorageHelper.store(testingJobGraph);
final TestingStateHandleStore<JobGraph> stateHandleStore =
builder.setGetFunction(ignore -> stateHandle)
.setAddFunction((ignore, state) -> jobGraphStorageHelper.store(state))
.build();
final JobGraphStore jobGraphStore = createAndStartJobGraphStore(stateHandleStore);
jobGraphStore.recoverJobGraph(testingJobGraph.getJobID());
// Known recovered job
testingJobGraphStoreWatcher.addJobGraph(testingJobGraph.getJobID());
// Unknown job
final JobID unknownJobId = JobID.generate();
testingJobGraphStoreWatcher.addJobGraph(unknownJobId);
assertThat(testingJobGraphListener.getAddedJobGraphs().size(), is(1));
assertThat(testingJobGraphListener.getAddedJobGraphs(), contains(unknownJobId));
}
|
public GenericRecord serialize(Object o, Schema schema) {
StructObjectInspector soi = (StructObjectInspector) objectInspector;
GenericData.Record record = new GenericData.Record(schema);
List<? extends StructField> outputFieldRefs = soi.getAllStructFieldRefs();
if (outputFieldRefs.size() != columnNames.size()) {
throw new HoodieException("Number of input columns was different than output columns (in = " + columnNames.size() + " vs out = " + outputFieldRefs.size());
}
int size = schema.getFields().size();
List<? extends StructField> allStructFieldRefs = soi.getAllStructFieldRefs();
List<Object> structFieldsDataAsList = soi.getStructFieldsDataAsList(o);
for (int i = 0; i < size; i++) {
Schema.Field field = schema.getFields().get(i);
if (i >= columnTypes.size()) {
break;
}
try {
setUpRecordFieldFromWritable(columnTypes.get(i), structFieldsDataAsList.get(i),
allStructFieldRefs.get(i).getFieldObjectInspector(), record, field);
} catch (Exception e) {
LOG.error(String.format("current columnNames: %s", columnNames.stream().collect(Collectors.joining(","))));
LOG.error(String.format("current type: %s", columnTypes.stream().map(f -> f.getTypeName()).collect(Collectors.joining(","))));
LOG.error(String.format("current value: %s", HoodieRealtimeRecordReaderUtils.arrayWritableToString((ArrayWritable) o)));
throw e;
}
}
return record;
}
|
@Test
public void testNestedValueSerialize() {
Schema nestedSchema = new Schema.Parser().parse(NESTED_SCHEMA);
GenericRecord avroRecord = new GenericData.Record(nestedSchema);
avroRecord.put("firstname", "person1");
avroRecord.put("lastname", "person2");
GenericArray scores = new GenericData.Array<>(avroRecord.getSchema().getField("scores").schema(), Arrays.asList(1,2));
avroRecord.put("scores", scores);
GenericRecord studentRecord = new GenericData.Record(avroRecord.getSchema().getField("student").schema());
studentRecord.put("firstname", "person1");
studentRecord.put("lastname", "person2");
avroRecord.put("student", studentRecord);
GenericArray teachers = new GenericData.Array<>(avroRecord.getSchema().getField("teachers").schema(), Arrays.asList(studentRecord));
avroRecord.put("teachers", teachers);
assertTrue(GenericData.get().validate(nestedSchema, avroRecord));
ArrayWritable writable = (ArrayWritable) HoodieRealtimeRecordReaderUtils.avroToArrayWritable(avroRecord, nestedSchema, true);
List<TypeInfo> columnTypeList = createHiveTypeInfoFrom("string,string,array<int>,struct<firstname:string,lastname:string>,array<struct<firstname:string,lastname:string>>");
List<String> columnNameList = createHiveColumnsFrom("firstname,lastname,arrayRecord,student,teachers");
StructTypeInfo rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(columnNameList, columnTypeList);
GenericRecord testRecord = new HiveAvroSerializer(new ArrayWritableObjectInspector(rowTypeInfo), columnNameList, columnTypeList).serialize(writable, nestedSchema);
assertTrue(GenericData.get().validate(nestedSchema, testRecord));
}
|
public static <K> KStreamHolder<K> build(
final KStreamHolder<K> stream,
final StreamFilter<K> step,
final RuntimeBuildContext buildContext) {
return build(stream, step, buildContext, SqlPredicate::new);
}
|
@Test
public void shouldBuildSqlPredicateCorrectly() {
// When:
step.build(planBuilder, planInfo);
// Then:
verify(predicateFactory).create(
filterExpression,
schema,
ksqlConfig,
functionRegistry
);
}
|
public static FromEndOfWindow pastEndOfWindow() {
return new FromEndOfWindow();
}
|
@Test
public void testToStringExcludesNeverTrigger() {
Trigger trigger =
AfterWatermark.pastEndOfWindow()
.withEarlyFirings(Never.ever())
.withLateFirings(Never.ever());
assertEquals("AfterWatermark.pastEndOfWindow()", trigger.toString());
}
|
@Override
public Result invoke(Invocation invocation) throws RpcException {
Result result;
String value = getUrl().getMethodParameter(
RpcUtils.getMethodName(invocation), MOCK_KEY, Boolean.FALSE.toString())
.trim();
if (ConfigUtils.isEmpty(value)) {
// no mock
result = this.invoker.invoke(invocation);
} else if (value.startsWith(FORCE_KEY)) {
if (logger.isWarnEnabled()) {
logger.warn(
CLUSTER_FAILED_MOCK_REQUEST,
"force mock",
"",
"force-mock: " + RpcUtils.getMethodName(invocation) + " force-mock enabled , url : "
+ getUrl());
}
// force:direct mock
result = doMockInvoke(invocation, null);
} else {
// fail-mock
try {
result = this.invoker.invoke(invocation);
// fix:#4585
if (result.getException() != null && result.getException() instanceof RpcException) {
RpcException rpcException = (RpcException) result.getException();
if (rpcException.isBiz()) {
throw rpcException;
} else {
result = doMockInvoke(invocation, rpcException);
}
}
} catch (RpcException e) {
if (e.isBiz()) {
throw e;
}
if (logger.isWarnEnabled()) {
logger.warn(
CLUSTER_FAILED_MOCK_REQUEST,
"failed to mock invoke",
"",
"fail-mock: " + RpcUtils.getMethodName(invocation) + " fail-mock enabled , url : "
+ getUrl(),
e);
}
result = doMockInvoke(invocation, e);
}
}
return result;
}
|
@Test
void testMockInvokerFromOverride_Invoke_Fock_someMethods() {
URL url = URL.valueOf("remote://1.2.3.4/" + IHelloService.class.getName())
.addParameter(
REFER_KEY,
URL.encode(PATH_KEY + "=" + IHelloService.class.getName()
+ "&" + "getSomething.mock=fail:return x"
+ "&" + "getSomething2.mock=force:return y"));
Invoker<IHelloService> cluster = getClusterInvoker(url);
// Configured with mock
RpcInvocation invocation = new RpcInvocation();
invocation.setMethodName("getSomething");
Result ret = cluster.invoke(invocation);
Assertions.assertEquals("something", ret.getValue());
// If no mock was configured, return null directly
invocation = new RpcInvocation();
invocation.setMethodName("getSomething2");
ret = cluster.invoke(invocation);
Assertions.assertEquals("y", ret.getValue());
// If no mock was configured, return null directly
invocation = new RpcInvocation();
invocation.setMethodName("getSomething3");
ret = cluster.invoke(invocation);
Assertions.assertEquals("something3", ret.getValue());
// If no mock was configured, return null directly
invocation = new RpcInvocation();
invocation.setMethodName("sayHello");
ret = cluster.invoke(invocation);
Assertions.assertNull(ret.getValue());
}
|
public FEELFnResult<String> invoke(@ParameterName("string") String string, @ParameterName("match") String match) {
if ( string == null ) {
return FEELFnResult.ofError( new InvalidParametersEvent( Severity.ERROR, "string", "cannot be null" ) );
}
if ( match == null ) {
return FEELFnResult.ofError( new InvalidParametersEvent( Severity.ERROR, "match", "cannot be null" ) );
}
int index = string.indexOf( match );
if ( index > 0 ) {
return FEELFnResult.ofResult( string.substring( 0, index ) );
} else {
return FEELFnResult.ofResult( "" );
}
}
|
@Test
void invokeMatchNotExists() {
FunctionTestUtil.assertResult(substringBeforeFunction.invoke("foobar", "oook"), "");
}
|
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) {
return decoder.decodeFunctionResult(rawInput, outputParameters);
}
|
@Test
public void testEmptyResultFunctionDecode() {
Function function =
new Function(
"test",
Collections.emptyList(),
Collections.singletonList(new TypeReference<Uint>() {}));
assertEquals(
FunctionReturnDecoder.decode("0x", function.getOutputParameters()),
(Collections.emptyList()));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.