focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public SSLContext createContext(ContextAware context) throws NoSuchProviderException,
NoSuchAlgorithmException, KeyManagementException,
UnrecoverableKeyException, KeyStoreException, CertificateException {
SSLContext sslContext = getProvider() != null ?
SSLContext.getInstance(getProtocol(), getProvider())
: SSLContext.getInstance(getProtocol());
context.addInfo("SSL protocol '" + sslContext.getProtocol()
+ "' provider '" + sslContext.getProvider() + "'");
KeyManager[] keyManagers = createKeyManagers(context);
TrustManager[] trustManagers = createTrustManagers(context);
SecureRandom secureRandom = createSecureRandom(context);
sslContext.init(keyManagers, trustManagers, secureRandom);
return sslContext;
}
|
@Test
public void testCreateDefaultContext() throws Exception {
// should be able to create a context with no configuration at all
assertNotNull(factoryBean.createContext(context));
assertTrue(context.hasInfoMatching(SSL_CONFIGURATION_MESSAGE_PATTERN));
}
|
@Override
public Iterable<Product> findAllProducts(String filter) {
if (filter != null && !filter.isBlank()) {
return this.productRepository.findAllByTitleLikeIgnoreCase("%" + filter + "%");
} else {
return this.productRepository.findAll();
}
}
|
@Test
void findAllProducts_FilterIsSet_ReturnsFilteredProductsList() {
// given
var products = IntStream.range(1, 4)
.mapToObj(i -> new Product(i, "Товар №%d".formatted(i), "Описание товара №%d".formatted(i)))
.toList();
doReturn(products).when(this.productRepository).findAllByTitleLikeIgnoreCase("%товар%");
// when
var result = this.service.findAllProducts("товар");
// then
assertEquals(products, result);
verify(this.productRepository).findAllByTitleLikeIgnoreCase("%товар%");
verifyNoMoreInteractions(this.productRepository);
}
|
public static String subPath(String rootDir, File file) {
try {
return subPath(rootDir, file.getCanonicalPath());
} catch (IOException e) {
throw new IORuntimeException(e);
}
}
|
@Test
public void subPathTest() {
final Path path = Paths.get("/aaa/bbb/ccc/ddd/eee/fff");
Path subPath = FileUtil.subPath(path, 5, 4);
assertEquals("eee", subPath.toString());
subPath = FileUtil.subPath(path, 0, 1);
assertEquals("aaa", subPath.toString());
subPath = FileUtil.subPath(path, 1, 0);
assertEquals("aaa", subPath.toString());
// 负数
subPath = FileUtil.subPath(path, -1, 0);
assertEquals("aaa/bbb/ccc/ddd/eee", subPath.toString().replace('\\', '/'));
subPath = FileUtil.subPath(path, -1, Integer.MAX_VALUE);
assertEquals("fff", subPath.toString());
subPath = FileUtil.subPath(path, -1, path.getNameCount());
assertEquals("fff", subPath.toString());
subPath = FileUtil.subPath(path, -2, -3);
assertEquals("ddd", subPath.toString());
}
|
public static String getTypeName(final int type) {
switch (type) {
case START_EVENT_V3:
return "Start_v3";
case STOP_EVENT:
return "Stop";
case QUERY_EVENT:
return "Query";
case ROTATE_EVENT:
return "Rotate";
case INTVAR_EVENT:
return "Intvar";
case LOAD_EVENT:
return "Load";
case NEW_LOAD_EVENT:
return "New_load";
case SLAVE_EVENT:
return "Slave";
case CREATE_FILE_EVENT:
return "Create_file";
case APPEND_BLOCK_EVENT:
return "Append_block";
case DELETE_FILE_EVENT:
return "Delete_file";
case EXEC_LOAD_EVENT:
return "Exec_load";
case RAND_EVENT:
return "RAND";
case XID_EVENT:
return "Xid";
case USER_VAR_EVENT:
return "User var";
case FORMAT_DESCRIPTION_EVENT:
return "Format_desc";
case TABLE_MAP_EVENT:
return "Table_map";
case PRE_GA_WRITE_ROWS_EVENT:
return "Write_rows_event_old";
case PRE_GA_UPDATE_ROWS_EVENT:
return "Update_rows_event_old";
case PRE_GA_DELETE_ROWS_EVENT:
return "Delete_rows_event_old";
case WRITE_ROWS_EVENT_V1:
return "Write_rows_v1";
case UPDATE_ROWS_EVENT_V1:
return "Update_rows_v1";
case DELETE_ROWS_EVENT_V1:
return "Delete_rows_v1";
case BEGIN_LOAD_QUERY_EVENT:
return "Begin_load_query";
case EXECUTE_LOAD_QUERY_EVENT:
return "Execute_load_query";
case INCIDENT_EVENT:
return "Incident";
case HEARTBEAT_LOG_EVENT:
case HEARTBEAT_LOG_EVENT_V2:
return "Heartbeat";
case IGNORABLE_LOG_EVENT:
return "Ignorable";
case ROWS_QUERY_LOG_EVENT:
return "Rows_query";
case WRITE_ROWS_EVENT:
return "Write_rows";
case UPDATE_ROWS_EVENT:
return "Update_rows";
case DELETE_ROWS_EVENT:
return "Delete_rows";
case GTID_LOG_EVENT:
return "Gtid";
case ANONYMOUS_GTID_LOG_EVENT:
return "Anonymous_Gtid";
case PREVIOUS_GTIDS_LOG_EVENT:
return "Previous_gtids";
case PARTIAL_UPDATE_ROWS_EVENT:
return "Update_rows_partial";
case TRANSACTION_CONTEXT_EVENT :
return "Transaction_context";
case VIEW_CHANGE_EVENT :
return "view_change";
case XA_PREPARE_LOG_EVENT :
return "Xa_prepare";
case TRANSACTION_PAYLOAD_EVENT :
return "transaction_payload";
default:
return "Unknown type:" + type;
}
}
|
@Test
public void getTypeNameInputPositiveOutputNotNull7() {
// Arrange
final int type = 10;
// Act
final String actual = LogEvent.getTypeName(type);
// Assert result
Assert.assertEquals("Exec_load", actual);
}
|
@Override
protected Release findLatestActiveRelease(String appId, String clusterName, String namespaceName,
ApolloNotificationMessages clientMessages) {
String messageKey = ReleaseMessageKeyGenerator.generate(appId, clusterName, namespaceName);
String cacheKey = messageKey;
if (bizConfig.isConfigServiceCacheKeyIgnoreCase()) {
cacheKey = cacheKey.toLowerCase();
}
Tracer.logEvent(TRACER_EVENT_CACHE_GET, cacheKey);
ConfigCacheEntry cacheEntry = configCache.getUnchecked(cacheKey);
//cache is out-dated
if (clientMessages != null && clientMessages.has(messageKey) &&
clientMessages.get(messageKey) > cacheEntry.getNotificationId()) {
//invalidate the cache and try to load from db again
invalidate(cacheKey);
cacheEntry = configCache.getUnchecked(cacheKey);
}
return cacheEntry.getRelease();
}
|
@Test
public void testFindLatestActiveReleaseWithIrrelevantMessages() throws Exception {
long someNewNotificationId = someNotificationId + 1;
String someIrrelevantKey = "someIrrelevantKey";
when(releaseMessageService.findLatestReleaseMessageForMessages(Lists.newArrayList(someKey))).thenReturn
(someReleaseMessage);
when(releaseService.findLatestActiveRelease(someAppId, someClusterName, someNamespaceName)).thenReturn
(someRelease);
when(someReleaseMessage.getId()).thenReturn(someNotificationId);
Release release = configServiceWithCache.findLatestActiveRelease(someAppId, someClusterName, someNamespaceName,
someNotificationMessages);
Release stillOldRelease = configServiceWithCache.findLatestActiveRelease(someAppId, someClusterName,
someNamespaceName, someNotificationMessages);
someNotificationMessages.put(someIrrelevantKey, someNewNotificationId);
Release shouldStillBeOldRelease = configServiceWithCache.findLatestActiveRelease(someAppId, someClusterName,
someNamespaceName, someNotificationMessages);
assertEquals(someRelease, release);
assertEquals(someRelease, stillOldRelease);
assertEquals(someRelease, shouldStillBeOldRelease);
verify(releaseMessageService, times(1)).findLatestReleaseMessageForMessages(Lists.newArrayList(someKey));
verify(releaseService, times(1)).findLatestActiveRelease(someAppId, someClusterName, someNamespaceName);
}
|
public static Method getApplyMethod(ScalarFn scalarFn) {
Class<? extends ScalarFn> clazz = scalarFn.getClass();
Collection<Method> matches =
ReflectHelpers.declaredMethodsWithAnnotation(
ScalarFn.ApplyMethod.class, clazz, ScalarFn.class);
if (matches.isEmpty()) {
throw new IllegalArgumentException(
String.format(
"No method annotated with @%s found in class %s.",
ScalarFn.ApplyMethod.class.getSimpleName(), clazz.getName()));
}
// If we have at least one match, then either it should be the only match
// or it should be an extension of the other matches (which came from parent
// classes).
Method first = matches.iterator().next();
for (Method other : matches) {
if (!first.getName().equals(other.getName())
|| !Arrays.equals(first.getParameterTypes(), other.getParameterTypes())) {
throw new IllegalArgumentException(
String.format(
"Found multiple methods annotated with @%s. [%s] and [%s]",
ScalarFn.ApplyMethod.class.getSimpleName(),
ReflectHelpers.formatMethod(first),
ReflectHelpers.formatMethod(other)));
}
}
// Method must be public.
if ((first.getModifiers() & Modifier.PUBLIC) == 0) {
throw new IllegalArgumentException(
String.format("Method %s is not public.", ReflectHelpers.formatMethod(first)));
}
return first;
}
|
@Test
public void testDifferentMethodSignatureThrowsIllegalArgumentException() {
thrown.expect(instanceOf(IllegalArgumentException.class));
thrown.expectMessage("Found multiple methods annotated with @ApplyMethod.");
ScalarFnReflector.getApplyMethod(new IncrementFnDifferentSignature());
}
|
public static PersistenceSchema from(
final List<? extends SimpleColumn> columns,
final SerdeFeatures features
) {
return new PersistenceSchema(columns, features);
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldThrowOnUnwrapIfMultipleFields() {
PersistenceSchema
.from(MULTI_COLUMN, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES));
}
|
public String getRestfulArtifactUrl(JobIdentifier jobIdentifier, String filePath) {
return format("/files/%s", jobIdentifier.artifactLocator(filePath));
}
|
@Test
public void shouldReturnProperDownloadUrl() {
String downloadUrl1 = urlService.getRestfulArtifactUrl(jobIdentifier, "file");
String downloadUrl2 = urlService.getRestfulArtifactUrl(jobIdentifier, "/file");
assertThat(downloadUrl1, is("/files/pipelineName/LATEST/stageName/LATEST/buildName/file"));
assertThat(downloadUrl1, is(downloadUrl2));
}
|
@Override
public int compareTo(SemanticVersion o) {
int cmp;
cmp = compareIntegers(major, o.major);
if (cmp != 0) {
return cmp;
}
cmp = compareIntegers(minor, o.minor);
if (cmp != 0) {
return cmp;
}
cmp = compareIntegers(patch, o.patch);
if (cmp != 0) {
return cmp;
}
cmp = compareBooleans(o.prerelease, prerelease);
if (cmp != 0) {
return cmp;
}
if (pre != null) {
if (o.pre != null) {
return pre.compareTo(o.pre);
} else {
return -1;
}
} else if (o.pre != null) {
return 1;
}
return 0;
}
|
@Test
public void testCompare() {
assertTrue(new SemanticVersion(1, 8, 1).compareTo(new SemanticVersion(1, 8, 1)) == 0);
assertTrue(new SemanticVersion(1, 8, 0).compareTo(new SemanticVersion(1, 8, 1)) < 0);
assertTrue(new SemanticVersion(1, 8, 2).compareTo(new SemanticVersion(1, 8, 1)) > 0);
assertTrue(new SemanticVersion(1, 8, 1).compareTo(new SemanticVersion(1, 8, 1)) == 0);
assertTrue(new SemanticVersion(1, 8, 0).compareTo(new SemanticVersion(1, 8, 1)) < 0);
assertTrue(new SemanticVersion(1, 8, 2).compareTo(new SemanticVersion(1, 8, 1)) > 0);
assertTrue(new SemanticVersion(1, 7, 0).compareTo(new SemanticVersion(1, 8, 0)) < 0);
assertTrue(new SemanticVersion(1, 9, 0).compareTo(new SemanticVersion(1, 8, 0)) > 0);
assertTrue(new SemanticVersion(0, 0, 0).compareTo(new SemanticVersion(1, 0, 0)) < 0);
assertTrue(new SemanticVersion(2, 0, 0).compareTo(new SemanticVersion(1, 0, 0)) > 0);
assertTrue(new SemanticVersion(1, 8, 100).compareTo(new SemanticVersion(1, 9, 0)) < 0);
assertTrue(new SemanticVersion(1, 8, 0).compareTo(new SemanticVersion(1, 8, 0, true)) > 0);
assertTrue(new SemanticVersion(1, 8, 0, true).compareTo(new SemanticVersion(1, 8, 0, true)) == 0);
assertTrue(new SemanticVersion(1, 8, 0, true).compareTo(new SemanticVersion(1, 8, 0)) < 0);
}
|
@Override
public KStream<K, V> repartition() {
return doRepartition(Repartitioned.as(null));
}
|
@Test
public void shouldNotAllowNullRepartitionedOnRepartition() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.repartition(null));
assertThat(exception.getMessage(), equalTo("repartitioned can't be null"));
}
|
public String getParameter(String key) {
return urlParam.getParameter(key);
}
|
@Test
void testGetParameter() {
URL url = URL.valueOf("http://127.0.0.1:8080/path?i=1&b=false");
assertEquals(Integer.valueOf(1), url.getParameter("i", Integer.class));
assertEquals(Boolean.FALSE, url.getParameter("b", Boolean.class));
}
|
@Override
public String getAuthenticationMethodName() {
return PostgreSQLAuthenticationMethod.MD5.getMethodName();
}
|
@Test
void assertAuthenticationMethodName() {
assertThat(new PostgreSQLMD5PasswordAuthenticator().getAuthenticationMethodName(), is("md5"));
}
|
public ProtocolBuilder payload(Integer payload) {
this.payload = payload;
return getThis();
}
|
@Test
void payload() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.payload(40);
Assertions.assertEquals(40, builder.build().getPayload());
}
|
@Override
public SqlRequest refactor(QueryParamEntity entity, Object... args) {
if (injector == null) {
initInjector();
}
return injector.refactor(entity, args);
}
|
@Test
void testTableFunctionJoin() {
QueryAnalyzerImpl analyzer = new QueryAnalyzerImpl(
database,
"select t1.*,t2.key from s_test t1 left join json_each_text('{\"name\":\"test\"}') t2 on t2.key='test' and t2.value='test1'");
SqlRequest request = analyzer
.refactor(QueryParamEntity.of().and("t2.key", "like", "test%"), 1);
System.out.println(request);
}
|
public void notifyKvStateRegistered(
JobVertexID jobVertexId,
KeyGroupRange keyGroupRange,
String registrationName,
KvStateID kvStateId,
InetSocketAddress kvStateServerAddress) {
KvStateLocation location = lookupTable.get(registrationName);
if (location == null) {
// First registration for this operator, create the location info
ExecutionJobVertex vertex = jobVertices.get(jobVertexId);
if (vertex != null) {
int parallelism = vertex.getMaxParallelism();
location = new KvStateLocation(jobId, jobVertexId, parallelism, registrationName);
lookupTable.put(registrationName, location);
} else {
throw new IllegalArgumentException("Unknown JobVertexID " + jobVertexId);
}
}
// Duplicated name if vertex IDs don't match
if (!location.getJobVertexId().equals(jobVertexId)) {
IllegalStateException duplicate =
new IllegalStateException(
"Registration name clash. KvState with name '"
+ registrationName
+ "' has already been registered by another operator ("
+ location.getJobVertexId()
+ ").");
ExecutionJobVertex vertex = jobVertices.get(jobVertexId);
if (vertex != null) {
vertex.fail(new SuppressRestartsException(duplicate));
}
throw duplicate;
}
location.registerKvState(keyGroupRange, kvStateId, kvStateServerAddress);
}
|
@Test
void testRegisterDuplicateName() throws Exception {
ExecutionJobVertex[] vertices =
new ExecutionJobVertex[] {createJobVertex(32), createJobVertex(13)};
Map<JobVertexID, ExecutionJobVertex> vertexMap = createVertexMap(vertices);
String registrationName = "duplicated-name";
KvStateLocationRegistry registry = new KvStateLocationRegistry(new JobID(), vertexMap);
// First operator registers
registry.notifyKvStateRegistered(
vertices[0].getJobVertexId(),
new KeyGroupRange(0, 0),
registrationName,
new KvStateID(),
new InetSocketAddress(InetAddress.getLocalHost(), 12328));
assertThatThrownBy(
() ->
// Second operator registers same name
registry.notifyKvStateRegistered(
vertices[1].getJobVertexId(),
new KeyGroupRange(0, 0),
registrationName,
new KvStateID(),
new InetSocketAddress(InetAddress.getLocalHost(), 12032)))
.withFailMessage("Did not throw expected Exception after duplicated name")
.isInstanceOf(IllegalStateException.class);
}
|
@Operation(summary = "verifyTenantCode", description = "VERIFY_TENANT_CODE_NOTES")
@Parameters({
@Parameter(name = "tenantCode", description = "TENANT_CODE", required = true, schema = @Schema(implementation = String.class))
})
@GetMapping(value = "/verify-code")
@ResponseStatus(HttpStatus.OK)
@ApiException(VERIFY_OS_TENANT_CODE_ERROR)
public Result<Boolean> verifyTenantCode(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "tenantCode") String tenantCode) {
tenantService.verifyTenantCode(tenantCode);
return Result.success(true);
}
|
@Test
public void testVerifyTenantCode() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("tenantCode", "cxc_test");
MvcResult mvcResult = mockMvc.perform(get("/tenants/verify-code")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
|
private Mono<ServerResponse> fetchThemeConfig(ServerRequest request) {
return themeNameInPathVariableOrActivated(request)
.flatMap(themeName -> client.fetch(Theme.class, themeName))
.mapNotNull(theme -> theme.getSpec().getConfigMapName())
.flatMap(configMapName -> client.fetch(ConfigMap.class, configMapName))
.flatMap(configMap -> ServerResponse.ok().bodyValue(configMap));
}
|
@Test
void fetchThemeConfig() {
Theme theme = new Theme();
theme.setMetadata(new Metadata());
theme.getMetadata().setName("fake");
theme.setSpec(new Theme.ThemeSpec());
theme.getSpec().setConfigMapName("fake-config");
when(client.fetch(eq(ConfigMap.class), eq("fake-config")))
.thenReturn(Mono.just(new ConfigMap()));
when(client.fetch(eq(Theme.class), eq("fake"))).thenReturn(Mono.just(theme));
webTestClient.get()
.uri("/themes/fake/config")
.exchange()
.expectStatus().isOk();
verify(client).fetch(eq(ConfigMap.class), eq("fake-config"));
verify(client).fetch(eq(Theme.class), eq("fake"));
}
|
public void reset() {
cb.clear();
}
|
@Test
public void reset() {
cyclicBufferAppender.append("foobar");
assertEquals(1, cyclicBufferAppender.getLength());
cyclicBufferAppender.reset();
assertEquals(0, cyclicBufferAppender.getLength());
}
|
@SuppressWarnings("dereference.of.nullable")
public static PTransform<PCollection<Failure>, PDone> getDlqTransform(String fullConfig) {
List<String> strings = Splitter.on(":").limit(2).splitToList(fullConfig);
checkArgument(
strings.size() == 2, "Invalid config, must start with `identifier:`. %s", fullConfig);
String key = strings.get(0);
String config = strings.get(1).trim();
GenericDlqProvider provider = PROVIDERS.get(key);
checkArgument(
provider != null, "Invalid config, no DLQ provider exists with identifier `%s`.", key);
return provider.newDlqTransform(config);
}
|
@Test
@Category(NeedsRunner.class)
public void testDlq() {
StoringDlqProvider.reset();
Failure failure1 = Failure.newBuilder().setError("a").setPayload("b".getBytes(UTF_8)).build();
Failure failure2 = Failure.newBuilder().setError("c").setPayload("d".getBytes(UTF_8)).build();
p.apply(Create.of(failure1, failure2))
.apply(
GenericDlq.getDlqTransform(
StoringDlqProvider.ID + ": " + StoringDlqProvider.CONFIG + " "));
p.run().waitUntilFinish();
assertThat(StoringDlqProvider.getFailures(), CoreMatchers.hasItems(failure1, failure2));
}
|
@Override
public String getProviderName() {
return findManagedInstanceService()
.map(ManagedInstanceService::getProviderName)
.orElseThrow(() -> NOT_MANAGED_INSTANCE_EXCEPTION);
}
|
@Test
public void getProviderName_whenManaged_shouldReturnName() {
DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(Set.of(new AlwaysManagedInstanceService()));
assertThat(managedInstanceService.getProviderName()).isEqualTo("Always");
}
|
void printJobQueueInfo(JobQueueInfo jobQueueInfo, Writer writer)
throws IOException {
printJobQueueInfo(jobQueueInfo, writer, "");
}
|
@Test
@SuppressWarnings("deprecation")
public void testPrintJobQueueInfo() throws IOException {
JobQueueClient queueClient = new JobQueueClient();
JobQueueInfo parent = new JobQueueInfo();
JobQueueInfo child = new JobQueueInfo();
JobQueueInfo grandChild = new JobQueueInfo();
child.addChild(grandChild);
parent.addChild(child);
grandChild.setQueueName("GrandChildQueue");
ByteArrayOutputStream bbos = new ByteArrayOutputStream();
PrintWriter writer = new PrintWriter(bbos);
queueClient.printJobQueueInfo(parent, writer);
Assert.assertTrue("printJobQueueInfo did not print grandchild's name",
bbos.toString().contains("GrandChildQueue"));
}
|
@Override
public void demote(NodeId instance, DeviceId deviceId) {
checkNotNull(instance, NODE_ID_NULL);
checkNotNull(deviceId, DEVICE_ID_NULL);
checkPermission(CLUSTER_WRITE);
store.demote(instance, deviceId);
}
|
@Test
public void demote() {
mgr.setRole(NID1, DID1, MASTER);
mgr.setRole(NID2, DID1, STANDBY);
mgr.setRole(NID3, DID1, STANDBY);
List<NodeId> stdbys = Lists.newArrayList(NID2, NID3);
MastershipInfo mastershipInfo = mgr.getMastershipFor(DID1);
assertTrue(mastershipInfo.master().isPresent());
assertEquals("wrong role", NID1, mastershipInfo.master().get());
assertEquals("wrong backups", stdbys, mastershipInfo.backups());
// No effect, it is the master
mgr.demote(NID1, DID1);
assertEquals("wrong role", NID1, mastershipInfo.master().get());
assertEquals("wrong backups", stdbys, mastershipInfo.backups());
// No effect, it is not part of the mastership
mgr.demote(NID4, DID1);
assertEquals("wrong role", NID1, mastershipInfo.master().get());
assertEquals("wrong backups", stdbys, mastershipInfo.backups());
// Demote N2
mgr.demote(NID2, DID1);
stdbys = Lists.newArrayList(NID3, NID2);
mastershipInfo = mgr.getMastershipFor(DID1);
assertEquals("wrong role", NID1, mastershipInfo.master().get());
assertEquals("wrong backups", stdbys, mastershipInfo.backups());
}
|
@Override
public void upgrade() {
if (hasBeenRunSuccessfully()) {
LOG.debug("Migration already completed.");
return;
}
final Map<String, String> savedSearchToViewsMap = new HashMap<>();
final Map<View, Search> newViews = this.savedSearchService.streamAll()
.map(savedSearch -> {
final Map.Entry<View, Search> newView = migrateSavedSearch(savedSearch);
savedSearchToViewsMap.put(savedSearch.id(), newView.getKey().id());
return newView;
})
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
newViews.forEach((view, search) -> {
viewService.save(view);
searchService.save(search);
});
final MigrationCompleted migrationCompleted = MigrationCompleted.create(savedSearchToViewsMap);
writeMigrationCompleted(migrationCompleted);
}
|
@Test
@MongoDBFixtures("sample_saved_search_absolute_with_interval_field.json")
public void migrateSavedSearchAbsoluteWithIntervalField() throws Exception {
this.migration.upgrade();
final MigrationCompleted migrationCompleted = captureMigrationCompleted();
assertThat(migrationCompleted.savedSearchIds())
.containsExactly(new AbstractMap.SimpleEntry<>("5de660b7b2d44b5813c1d7f6", "000000020000000000000000"));
assertViewServiceCreatedViews(1, resourceFile("sample_saved_search_absolute_with_interval_field-expected_views.json"));
assertSearchServiceCreated(1, resourceFile("sample_saved_search_absolute_with_interval_field-expected_searches.json"));
}
|
public boolean isDefault() {
return (state & MASK_DEFAULT) != 0;
}
|
@Test
public void isDefault() {
LacpState state = new LacpState((byte) 0x40);
assertTrue(state.isDefault());
}
|
@Override
public Set<ReservationAllocation> getReservationsAtTime(long tick) {
return getReservations(null, new ReservationInterval(tick, tick), "");
}
|
@Test
public void testGetReservationsAtTime() {
Plan plan = new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
int[] alloc = { 10, 10, 10, 10, 10, 10 };
int start = 100;
ReservationAllocation rAllocation =
createReservationAllocation(reservationID, start, alloc);
Assert.assertNull(plan.getReservationById(reservationID));
try {
plan.addReservation(rAllocation, false);
} catch (PlanningException e) {
Assert.fail(e.getMessage());
}
Set<ReservationAllocation> rAllocations =
plan.getReservationsAtTime(rAllocation.getStartTime());
Assert.assertTrue(rAllocations.size() == 1);
Assert.assertTrue(rAllocation
.compareTo((ReservationAllocation) rAllocations.toArray()[0]) == 0);
}
|
@Override
public Type classify(final Throwable e) {
Type type = Type.UNKNOWN;
if (e instanceof KsqlSerializationException
|| (e instanceof StreamsException
&& (ExceptionUtils.indexOfThrowable(e, KsqlSerializationException.class) != -1))) {
if (!hasInternalTopicPrefix(e)) {
type = Type.USER;
LOG.info(
"Classified error as USER error based on schema mismatch. Query ID: {} Exception: {}",
queryId,
e);
}
}
return type;
}
|
@Test
public void shouldClassifyWrappedKsqlSerializationExceptionWithUserTopicAsUserError() {
// Given:
final String topic = "foo.bar";
final Exception e = new StreamsException(
new KsqlSerializationException(
topic,
"Error serializing message to topic: " + topic,
new DataException("Struct schemas do not match.")));
// When:
final Type type = new KsqlSerializationClassifier("").classify(e);
// Then:
assertThat(type, is(Type.USER));
}
|
@Override
public void preflight(Path file) throws BackgroundException {
assumeRole(file, WRITEPERMISSION);
}
|
@Test
public void testPreflightFileAccessGrantedCustomProps() throws Exception {
final Path file = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
file.setAttributes(file.attributes().withAcl(new Acl(new Acl.CanonicalUser(), WRITEPERMISSION)));
new CteraWriteFeature(session).preflight(file);
// assert no fail
}
|
public static RemotingServer bind(String url, ChannelHandler... handler) throws RemotingException {
return bind(URL.valueOf(url), handler);
}
|
@Test
void testBind() throws RemotingException {
Assertions.assertThrows(RuntimeException.class, () -> Transporters.bind((String) null));
Assertions.assertThrows(RuntimeException.class, () -> Transporters.bind((URL) null));
Assertions.assertThrows(RuntimeException.class, () -> Transporters.bind(url));
Assertions.assertNotNull(Transporters.bind(url, channel));
Assertions.assertNotNull(Transporters.bind(url, channel, channel));
}
|
public void putKVConfigValue(final String namespace, final String key, final String value, final long timeoutMillis)
throws RemotingException, MQClientException, InterruptedException {
PutKVConfigRequestHeader requestHeader = new PutKVConfigRequestHeader();
requestHeader.setNamespace(namespace);
requestHeader.setKey(key);
requestHeader.setValue(value);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.PUT_KV_CONFIG, requestHeader);
List<String> nameServerAddressList = this.remotingClient.getNameServerAddressList();
if (nameServerAddressList != null) {
RemotingCommand errResponse = null;
for (String namesrvAddr : nameServerAddressList) {
RemotingCommand response = this.remotingClient.invokeSync(namesrvAddr, request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
break;
}
default:
errResponse = response;
}
}
if (errResponse != null) {
throw new MQClientException(errResponse.getCode(), errResponse.getRemark());
}
}
}
|
@Test
public void testPutKVConfigValue() throws RemotingException, InterruptedException, MQClientException {
mockInvokeSync();
mqClientAPI.putKVConfigValue("", "", "", defaultTimeout);
}
|
public static void tryCloseConnections(HazelcastInstance hazelcastInstance) {
if (hazelcastInstance == null) {
return;
}
HazelcastInstanceImpl factory = (HazelcastInstanceImpl) hazelcastInstance;
closeSockets(factory);
}
|
@Test
public void testTryCloseConnections_shouldDoNothingWhenThrowableIsThrown() {
tryCloseConnections(hazelcastInstanceThrowsException);
}
|
public static Collection<SubquerySegment> getSubquerySegments(final SelectStatement selectStatement) {
List<SubquerySegment> result = new LinkedList<>();
extractSubquerySegments(result, selectStatement);
return result;
}
|
@Test
void assertGetSubquerySegmentsInWhere() {
SelectStatement subquerySelectStatement = mock(SelectStatement.class);
when(subquerySelectStatement.getFrom()).thenReturn(Optional.of(new SimpleTableSegment(new TableNameSegment(73, 99, new IdentifierValue("t_order")))));
ProjectionsSegment subqueryProjections = new ProjectionsSegment(59, 66);
when(subquerySelectStatement.getProjections()).thenReturn(subqueryProjections);
subqueryProjections.getProjections().add(new ColumnProjectionSegment(new ColumnSegment(59, 66, new IdentifierValue("order_id"))));
ColumnSegment subqueryWhereLeft = new ColumnSegment(87, 92, new IdentifierValue("status"));
LiteralExpressionSegment subqueryWhereRight = new LiteralExpressionSegment(96, 99, "OK");
WhereSegment subqueryWhereSegment = new WhereSegment(81, 99, new BinaryOperationExpression(87, 99, subqueryWhereLeft, subqueryWhereRight, "=", "status = 'OK'"));
when(subquerySelectStatement.getWhere()).thenReturn(Optional.of(subqueryWhereSegment));
SelectStatement selectStatement = mock(SelectStatement.class);
when(selectStatement.getFrom()).thenReturn(Optional.of(new SimpleTableSegment(new TableNameSegment(21, 32, new IdentifierValue("t_order_item")))));
when(selectStatement.getProjections()).thenReturn(new ProjectionsSegment(7, 14));
selectStatement.getProjections().getProjections().add(new ColumnProjectionSegment(new ColumnSegment(7, 14, new IdentifierValue("order_id"))));
ColumnSegment left = new ColumnSegment(40, 47, new IdentifierValue("order_id"));
SubqueryExpressionSegment right = new SubqueryExpressionSegment(new SubquerySegment(51, 100, subquerySelectStatement, ""));
WhereSegment whereSegment = new WhereSegment(34, 100, new BinaryOperationExpression(40, 100, left, right, "=", "order_id = (SELECT order_id FROM t_order WHERE status = 'OK')"));
when(selectStatement.getWhere()).thenReturn(Optional.of(whereSegment));
Collection<SubquerySegment> actual = SubqueryExtractUtils.getSubquerySegments(selectStatement);
assertThat(actual.size(), is(1));
assertThat(actual.iterator().next(), is(right.getSubquery()));
}
|
@Override
public T add(K name, V value) {
validateName(nameValidator, true, name);
validateValue(valueValidator, name, value);
checkNotNull(value, "value");
int h = hashingStrategy.hashCode(name);
int i = index(h);
add0(h, i, name, value);
return thisT();
}
|
@Test
public void testAddSelf() {
final TestDefaultHeaders headers = newInstance();
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
headers.add(headers);
}
});
}
|
public void validate(ProjectReactor reactor) {
List<String> validationMessages = new ArrayList<>();
for (ProjectDefinition moduleDef : reactor.getProjects()) {
validateModule(moduleDef, validationMessages);
}
if (isBranchFeatureAvailable()) {
branchParamsValidator.validate(validationMessages);
} else {
validateBranchParamsWhenPluginAbsent(validationMessages);
validatePullRequestParamsWhenPluginAbsent(validationMessages);
}
if (!validationMessages.isEmpty()) {
throw MessageException.of("Validation of project failed:\n o " +
String.join("\n o ", validationMessages));
}
}
|
@Test
void fail_when_pull_request_branch_is_specified_but_branch_plugin_not_present() {
ProjectDefinition def = ProjectDefinition.create().setProperty(CoreProperties.PROJECT_KEY_PROPERTY, "foo");
ProjectReactor reactor = new ProjectReactor(def);
when(settings.get(ScannerProperties.PULL_REQUEST_BRANCH)).thenReturn(Optional.of("feature1"));
assertThatThrownBy(() -> underTest.validate(reactor))
.isInstanceOf(MessageException.class)
.hasMessageContaining(format("To use the property \"sonar.pullrequest.branch\" and analyze pull requests, Developer Edition or above is required. See %s for more information.",
LINK_TO_DOC));
}
|
@Override
public V fetch(final K key, final long time) {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType);
for (final ReadOnlyWindowStore<K, V> windowStore : stores) {
try {
final V result = windowStore.fetch(key, time);
if (result != null) {
return result;
}
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException(
"State store is not available anymore and may have been migrated to another instance; " +
"please re-discover its location from the state metadata.");
}
}
return null;
}
|
@Test
public void shouldThrowNPEIfKeyIsNull() {
assertThrows(NullPointerException.class, () -> windowStore.fetch(null, ofEpochMilli(0), ofEpochMilli(0)));
}
|
@Override
public SpringCache getCache(final String name) {
return springCaches.computeIfAbsent(name, n -> {
final Cache<Object, Object> nativeCache = this.nativeCacheManager.getCache(n);
return new SpringCache(nativeCache, reactive);
});
}
|
@Test
public final void getCacheShouldReturnDifferentInstancesForDifferentNames() {
withCacheManager(new CacheManagerCallable(TestCacheManagerFactory.createCacheManager()) {
@Override
public void call() {
// Given
cm.defineConfiguration("thisCache", new ConfigurationBuilder().build());
cm.defineConfiguration("thatCache", new ConfigurationBuilder().build());
final SpringEmbeddedCacheManager objectUnderTest = new SpringEmbeddedCacheManager(cm);
final String firstCacheName = "thisCache";
final String secondCacheName = "thatCache";
// When
final SpringCache firstObtainedSpringCache = objectUnderTest.getCache(firstCacheName);
final SpringCache secondObtainedSpringCache = objectUnderTest.getCache(secondCacheName);
// Then
assertNotSame(
"getCache() should have returned different SpringCache instances for different names",
firstObtainedSpringCache, secondObtainedSpringCache);
}
});
}
|
public boolean isRegisterEnabled() {
return registerEnabled;
}
|
@Test
public void testIsRegisterEnabled() {
assertThat(polarisRegistration1.isRegisterEnabled()).isTrue();
}
|
@UdafFactory(description = "Compute average of column with type Integer.",
aggregateSchema = "STRUCT<SUM integer, COUNT bigint>")
public static TableUdaf<Integer, Struct, Double> averageInt() {
return getAverageImplementation(
0,
STRUCT_INT,
(sum, newValue) -> sum.getInt32(SUM) + newValue,
(sum, count) -> sum.getInt32(SUM) / count,
(sum1, sum2) -> sum1.getInt32(SUM) + sum2.getInt32(SUM),
(sum, valueToUndo) -> sum.getInt32(SUM) - valueToUndo);
}
|
@Test
public void shouldIgnoreNull() {
final TableUdaf<Integer, Struct, Double> udaf = AverageUdaf.averageInt();
Struct agg = udaf.initialize();
final Integer[] values = new Integer[] {1, 1, 1};
for (final int thisValue : values) {
agg = udaf.aggregate(thisValue, agg);
}
agg = udaf.aggregate(null, agg);
final double avg = udaf.map(agg);
assertThat(1.0, equalTo(avg));
}
|
public boolean isValid(String value) {
if (value == null) {
return false;
}
URI uri; // ensure value is a valid URI
try {
uri = new URI(value);
} catch (URISyntaxException e) {
return false;
}
// OK, perfom additional validation
String scheme = uri.getScheme();
if (!isValidScheme(scheme)) {
return false;
}
String authority = uri.getRawAuthority();
if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority
return true; // this is a local file - nothing more to do here
} else if ("file".equals(scheme) && authority != null && authority.contains(":")) {
return false;
} else {
// Validate the authority
if (!isValidAuthority(authority)) {
return false;
}
}
if (!isValidPath(uri.getRawPath())) {
return false;
}
if (!isValidQuery(uri.getRawQuery())) {
return false;
}
if (!isValidFragment(uri.getRawFragment())) {
return false;
}
return true;
}
|
@Test
public void testValidator452() {
UrlValidator urlValidator = new UrlValidator();
assertTrue(urlValidator.isValid("http://[::FFFF:129.144.52.38]:80/index.html"));
}
|
@Override
public Serde<GenericKey> create(
final FormatInfo format,
final PersistenceSchema schema,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> schemaRegistryClientFactory,
final String loggerNamePrefix,
final ProcessingLogContext processingLogContext,
final Optional<TrackedCallback> tracker
) {
return createInner(
format,
schema,
ksqlConfig,
schemaRegistryClientFactory,
loggerNamePrefix,
processingLogContext,
tracker
);
}
|
@Test
public void shouldNotWrapInTrackingSerdeIfNoCallbackProvided() {
// When:
factory.create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt,
Optional.empty());
// Then:
verify(innerFactory, never()).wrapInTrackingSerde(any(), any());
}
|
private byte[] readToken(HttpURLConnection conn)
throws IOException, AuthenticationException {
int status = conn.getResponseCode();
if (status == HttpURLConnection.HTTP_OK || status == HttpURLConnection.HTTP_UNAUTHORIZED) {
String authHeader = conn.getHeaderField(WWW_AUTHENTICATE);
if (authHeader == null) {
authHeader = conn.getHeaderField(WWW_AUTHENTICATE.toLowerCase());
}
if (authHeader == null || !authHeader.trim().startsWith(NEGOTIATE)) {
throw new AuthenticationException("Invalid SPNEGO sequence, '" + WWW_AUTHENTICATE +
"' header incorrect: " + authHeader);
}
String negotiation = authHeader.trim().substring((NEGOTIATE + " ").length()).trim();
return base64.decode(negotiation);
}
throw new AuthenticationException("Invalid SPNEGO sequence, status code: " + status);
}
|
@Test(timeout = 60000)
public void testReadToken() throws NoSuchMethodException, IOException, IllegalAccessException,
InvocationTargetException {
KerberosAuthenticator kerberosAuthenticator = new KerberosAuthenticator();
FieldUtils.writeField(kerberosAuthenticator, "base64", new Base64(), true);
Base64 base64 = new Base64();
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
Mockito.when(conn.getResponseCode()).thenReturn(HttpURLConnection.HTTP_UNAUTHORIZED);
Mockito.when(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE))
.thenReturn(KerberosAuthenticator.NEGOTIATE + " " +
Arrays.toString(base64.encode("foobar".getBytes())));
Method method = KerberosAuthenticator.class.getDeclaredMethod("readToken",
HttpURLConnection.class);
method.setAccessible(true);
method.invoke(kerberosAuthenticator, conn); // expecting this not to throw an exception
}
|
@Override
protected void init(@Nonnull Context context) {
topics = new ArrayList<>(topicsConfig.getTopicNames());
for (String topic : topics) {
offsets.put(topic, new long[0]);
}
processorIndex = context.globalProcessorIndex();
totalParallelism = context.totalParallelism();
processingGuarantee = context.processingGuarantee();
consumer = kafkaConsumerFn.apply(context);
if (processingGuarantee == NONE) {
warnWhenInitialOffsetsProvided();
}
}
|
@Test
public void when_customProjection_then_used() throws Exception {
// When
var processor = createProcessor(properties(), 2, r -> r.key() + "=" + r.value(), 10_000);
TestOutbox outbox = new TestOutbox(new int[]{10}, 10);
processor.init(outbox, new TestProcessorContext());
kafkaTestSupport.produceSync(topic1Name, 0, "0");
// Then
assertEquals("0=0", consumeEventually(processor, outbox));
}
|
@Override
public String getActionDescription() {
return "retrieve authentication method for " + registryEndpointRequestProperties.getServerUrl();
}
|
@Test
public void testGetActionDescription() {
Assert.assertEquals(
"retrieve authentication method for someServerUrl",
testAuthenticationMethodRetriever.getActionDescription());
}
|
public static Snowflake getSnowflake(long workerId, long datacenterId) {
return Singleton.get(Snowflake.class, workerId, datacenterId);
}
|
@Test
public void getSnowflakeTest() {
Snowflake snowflake = IdUtil.getSnowflake(1, 1);
long id = snowflake.nextId();
assertTrue(id > 0);
}
|
@Override
public CompletableFuture<TopicStatsTable> getTopicStatsInfo(String address,
GetTopicStatsInfoRequestHeader requestHeader, long timeoutMillis) {
CompletableFuture<TopicStatsTable> future = new CompletableFuture<>();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_TOPIC_STATS_INFO, requestHeader);
remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> {
if (response.getCode() == ResponseCode.SUCCESS) {
TopicStatsTable topicStatsTable = TopicStatsTable.decode(response.getBody(), TopicStatsTable.class);
future.complete(topicStatsTable);
} else {
log.warn("getTopicStatsInfo getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), requestHeader);
future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark()));
}
});
return future;
}
|
@Test
public void assertGetTopicStatsInfoWithSuccess() throws Exception {
TopicStatsTable responseBody = new TopicStatsTable();
setResponseSuccess(RemotingSerializable.encode(responseBody));
GetTopicStatsInfoRequestHeader requestHeader = mock(GetTopicStatsInfoRequestHeader.class);
CompletableFuture<TopicStatsTable> actual = mqClientAdminImpl.getTopicStatsInfo(defaultBrokerAddr, requestHeader, defaultTimeout);
TopicStatsTable topicStatsTable = actual.get();
assertNotNull(topicStatsTable);
assertEquals(0, topicStatsTable.getOffsetTable().size());
}
|
@Override
public RuleNodePath getRuleNodePath() {
return INSTANCE;
}
|
@Test
void assertNew() {
RuleNodePathProvider ruleNodePathProvider = new ReadwriteSplittingRuleNodePathProvider();
RuleNodePath actualRuleNodePath = ruleNodePathProvider.getRuleNodePath();
assertThat(actualRuleNodePath.getNamedItems().size(), is(2));
assertTrue(actualRuleNodePath.getNamedItems().containsKey(ReadwriteSplittingRuleNodePathProvider.DATA_SOURCE_GROUPS));
assertTrue(actualRuleNodePath.getNamedItems().containsKey(ReadwriteSplittingRuleNodePathProvider.LOAD_BALANCERS));
assertTrue(actualRuleNodePath.getUniqueItems().isEmpty());
assertThat(actualRuleNodePath.getRoot().getRuleType(), is(ReadwriteSplittingRuleNodePathProvider.RULE_TYPE));
}
|
public Set<MessageOutput> getOutputsForMessage(final Message msg) {
final Set<MessageOutput> result = getStreamOutputsForMessage(msg);
result.add(defaultMessageOutput);
return result;
}
|
@Test
public void testAlwaysIncludeDefaultOutput() throws Exception {
final Message message = mock(Message.class);
final OutputRouter outputRouter = new OutputRouter(defaultMessageOutput, outputRegistry);
final Collection<MessageOutput> messageOutputs = outputRouter.getOutputsForMessage(message);
assertEquals(1, messageOutputs.size());
assertTrue(messageOutputs.contains(defaultMessageOutput));
}
|
public void start(long period, TimeUnit unit) {
start(period, period, unit);
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldDisallowToStartReportingMultipleTimesOnCustomExecutor() throws Exception {
reporterWithCustomExecutor.start(200, TimeUnit.MILLISECONDS);
reporterWithCustomExecutor.start(200, TimeUnit.MILLISECONDS);
}
|
public Expression rewrite(final Expression expression) {
return new ExpressionTreeRewriter<>(new OperatorPlugin()::process)
.rewrite(expression, null);
}
|
@Test
public void shouldNotReplaceBetweenExpressionOnNonString() {
// Given:
final Expression predicate = getPredicate(
"SELECT * FROM orders where ROWTIME BETWEEN 123456 AND 147258;");
// When:
final Expression rewritten = rewriter.rewrite(predicate);
// Then:
assertThat(rewritten, is(predicate));
}
|
@SuppressWarnings("unchecked")
public <IN, OUT> AvroDatumConverter<IN, OUT> create(Class<IN> inputClass) {
boolean isMapOnly = ((JobConf) getConf()).getNumReduceTasks() == 0;
if (AvroKey.class.isAssignableFrom(inputClass)) {
Schema schema;
if (isMapOnly) {
schema = AvroJob.getMapOutputKeySchema(getConf());
if (null == schema) {
schema = AvroJob.getOutputKeySchema(getConf());
}
} else {
schema = AvroJob.getOutputKeySchema(getConf());
}
if (null == schema) {
throw new IllegalStateException("Writer schema for output key was not set. Use AvroJob.setOutputKeySchema().");
}
return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema);
}
if (AvroValue.class.isAssignableFrom(inputClass)) {
Schema schema;
if (isMapOnly) {
schema = AvroJob.getMapOutputValueSchema(getConf());
if (null == schema) {
schema = AvroJob.getOutputValueSchema(getConf());
}
} else {
schema = AvroJob.getOutputValueSchema(getConf());
}
if (null == schema) {
throw new IllegalStateException(
"Writer schema for output value was not set. Use AvroJob.setOutputValueSchema().");
}
return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema);
}
if (BooleanWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new BooleanWritableConverter();
}
if (BytesWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new BytesWritableConverter();
}
if (ByteWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new ByteWritableConverter();
}
if (DoubleWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new DoubleWritableConverter();
}
if (FloatWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new FloatWritableConverter();
}
if (IntWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new IntWritableConverter();
}
if (LongWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new LongWritableConverter();
}
if (NullWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new NullWritableConverter();
}
if (Text.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new TextConverter();
}
throw new UnsupportedOperationException("Unsupported input type: " + inputClass.getName());
}
|
@Test
void convertFloatWritable() {
AvroDatumConverter<FloatWritable, Float> converter = mFactory.create(FloatWritable.class);
assertEquals(2.2f, converter.convert(new FloatWritable(2.2f)), 0.00001);
}
|
public static boolean notMarkedWithNoAutoStart(Object o) {
if (o == null) {
return false;
}
Class<?> clazz = o.getClass();
NoAutoStart a = findAnnotation(clazz, NoAutoStart.class);
return a == null;
}
|
@Test
public void noAutoStartOnInterface() {
ComponentWithNoAutoStartOnInterface o = new ComponentWithNoAutoStartOnInterface();
assertFalse(NoAutoStartUtil.notMarkedWithNoAutoStart(o));
}
|
public static Schema assignIncreasingFreshIds(Schema schema) {
AtomicInteger lastColumnId = new AtomicInteger(0);
return TypeUtil.assignFreshIds(schema, lastColumnId::incrementAndGet);
}
|
@Test
public void testAssignIncreasingFreshIdWithIdentifier() {
Schema schema =
new Schema(
Lists.newArrayList(
required(10, "a", Types.IntegerType.get()),
required(11, "A", Types.IntegerType.get())),
Sets.newHashSet(10));
Schema expectedSchema =
new Schema(
Lists.newArrayList(
required(1, "a", Types.IntegerType.get()),
required(2, "A", Types.IntegerType.get())),
Sets.newHashSet(1));
final Schema actualSchema = TypeUtil.assignIncreasingFreshIds(schema);
assertThat(actualSchema.asStruct()).isEqualTo(expectedSchema.asStruct());
assertThat(actualSchema.identifierFieldIds())
.as("identifier field ID should change based on source schema")
.isEqualTo(expectedSchema.identifierFieldIds());
}
|
@Override
protected void analyzeDependency(final Dependency dependency, final Engine engine) throws AnalysisException {
// batch request component-reports for all dependencies
synchronized (FETCH_MUTIX) {
if (reports == null) {
try {
requestDelay();
reports = requestReports(engine.getDependencies());
} catch (TransportException ex) {
final String message = ex.getMessage();
final boolean warnOnly = getSettings().getBoolean(Settings.KEYS.ANALYZER_OSSINDEX_WARN_ONLY_ON_REMOTE_ERRORS, false);
this.setEnabled(false);
if (StringUtils.endsWith(message, "401")) {
LOG.error("Invalid credentials for the OSS Index, disabling the analyzer");
throw new AnalysisException("Invalid credentials provided for OSS Index", ex);
} else if (StringUtils.endsWith(message, "403")) {
LOG.error("OSS Index access forbidden, disabling the analyzer");
throw new AnalysisException("OSS Index access forbidden", ex);
} else if (StringUtils.endsWith(message, "429")) {
if (warnOnly) {
LOG.warn("OSS Index rate limit exceeded, disabling the analyzer", ex);
} else {
throw new AnalysisException("OSS Index rate limit exceeded, disabling the analyzer", ex);
}
} else if (warnOnly) {
LOG.warn("Error requesting component reports, disabling the analyzer", ex);
} else {
LOG.debug("Error requesting component reports, disabling the analyzer", ex);
throw new AnalysisException("Failed to request component-reports", ex);
}
} catch (SocketTimeoutException e) {
final boolean warnOnly = getSettings().getBoolean(Settings.KEYS.ANALYZER_OSSINDEX_WARN_ONLY_ON_REMOTE_ERRORS, false);
this.setEnabled(false);
if (warnOnly) {
LOG.warn("OSS Index socket timeout, disabling the analyzer", e);
} else {
LOG.debug("OSS Index socket timeout", e);
throw new AnalysisException("Failed to establish socket to OSS Index", e);
}
} catch (Exception e) {
LOG.debug("Error requesting component reports", e);
throw new AnalysisException("Failed to request component-reports", e);
}
}
// skip enrichment if we failed to fetch reports
if (reports != null) {
enrich(dependency);
}
}
}
|
@Test
public void should_analyzeDependency_only_warn_when_socket_error_from_sonatype() throws Exception {
// Given
OssIndexAnalyzer analyzer = new OssIndexAnalyzerThrowingSocketTimeout();
getSettings().setBoolean(Settings.KEYS.ANALYZER_OSSINDEX_WARN_ONLY_ON_REMOTE_ERRORS, true);
analyzer.initialize(getSettings());
Identifier identifier = new PurlIdentifier("maven", "test", "test", "1.0",
Confidence.HIGHEST);
Dependency dependency = new Dependency();
dependency.addSoftwareIdentifier(identifier);
Settings settings = getSettings();
Engine engine = new Engine(settings);
engine.setDependencies(Collections.singletonList(dependency));
// When
try {
analyzer.analyzeDependency(dependency, engine);
} catch (AnalysisException e) {
Assert.fail("Analysis exception thrown upon remote error although only a warning should have been logged");
} finally {
analyzer.close();
engine.close();
}
}
|
public void addAll(PartitionIdSet other) {
bitSet.or(other.bitSet);
resetSize();
}
|
@Test
public void test_addAll() {
partitionIdSet.addAll(listOf(0, 1, 2, 3, 4));
assertContents(partitionIdSet);
}
|
@SuppressWarnings("unchecked")
public static <W extends BoundedWindow> StateContext<W> nullContext() {
return (StateContext<W>) NULL_CONTEXT;
}
|
@Test
public void nullContextThrowsOnSideInput() {
StateContext<BoundedWindow> context = StateContexts.nullContext();
thrown.expect(IllegalArgumentException.class);
context.sideInput(view);
}
|
public void perform(String url, FetchHandler handler) throws InterruptedException {
int retryCount = 0;
while (true) {
retryCount++;
String message = "";
try {
int rc = download(httpService, url, handler);
if (handler.handleResult(rc, goPublisher)) {
return;
}
message = String.format("Unsuccessful response '%s' from the server", rc);
} catch (Exception e) {
message = String.format("Caught an exception '%s'", e.getMessage());
}
if (retryCount > 3) {
message = String.format("Giving up fetching resource '%s'. Tried 4 times and failed.", url);
LOG.error(message);
throw new RuntimeException(message);
}
long backout = Math.round(backout(retryCount));
publishDownloadError(url, message, backout);
clock.sleepForSeconds(backout);
}
}
|
@Test
public void shouldRetryWhenCreatingFolderZipCache() throws Exception {
when(fetchHandler.handleResult(200, publisher)).thenReturn(true);
MockCachingFetchZipHttpService httpService = new MockCachingFetchZipHttpService(3);
DownloadAction downloadAction = new DownloadAction(httpService, publisher, clock);
downloadAction.perform("foo", fetchHandler);
assertThat(httpService.timesCalled, is(3));
assertThat(clock.getSleeps(), hasItems(5000L));
}
|
@VisibleForTesting
void sinkTo(
DataStream<Event> input,
Sink<Event> sink,
String sinkName,
OperatorID schemaOperatorID) {
DataStream<Event> stream = input;
// Pre-write topology
if (sink instanceof WithPreWriteTopology) {
stream = ((WithPreWriteTopology<Event>) sink).addPreWriteTopology(stream);
}
if (sink instanceof TwoPhaseCommittingSink) {
addCommittingTopology(sink, stream, sinkName, schemaOperatorID);
} else {
stream.transform(
SINK_WRITER_PREFIX + sinkName,
CommittableMessageTypeInfo.noOutput(),
new DataSinkWriterOperatorFactory<>(sink, schemaOperatorID));
}
}
|
@Test
void testPreWriteWithoutCommitSink() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
ArrayList<Event> mockEvents = Lists.newArrayList(new EmptyEvent(), new EmptyEvent());
DataStreamSource<Event> inputStream = env.fromCollection(mockEvents);
DataSinkTranslator translator = new DataSinkTranslator();
// Node hash must be a 32 character String that describes a hex code
String uid = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
MockPreWriteWithoutCommitSink mockPreWriteWithoutCommitSink =
new MockPreWriteWithoutCommitSink(uid);
translator.sinkTo(
inputStream,
mockPreWriteWithoutCommitSink,
"testPreWriteWithoutCommitSink",
new OperatorID());
// Check if the `addPreWriteTopology` is called, and the uid is set when the transformation
// added
OneInputTransformation<Event, Event> oneInputTransformation =
(OneInputTransformation) env.getTransformations().get(0);
Transformation<?> reblanceTransformation = oneInputTransformation.getInputs().get(0);
Assertions.assertEquals(uid, reblanceTransformation.getUserProvidedNodeHash());
}
|
@Override
public void doSendMail(MailSendMessage message) {
// 1. 创建发送账号
MailAccountDO account = validateMailAccount(message.getAccountId());
MailAccount mailAccount = MailAccountConvert.INSTANCE.convert(account, message.getNickname());
// 2. 发送邮件
try {
String messageId = MailUtil.send(mailAccount, message.getMail(),
message.getTitle(), message.getContent(),true);
// 3. 更新结果(成功)
mailLogService.updateMailSendResult(message.getLogId(), messageId, null);
} catch (Exception e) {
// 3. 更新结果(异常)
mailLogService.updateMailSendResult(message.getLogId(), null, e);
}
}
|
@Test
public void testDoSendMail_exception() {
try (MockedStatic<MailUtil> mailUtilMock = mockStatic(MailUtil.class)) {
// 准备参数
MailSendMessage message = randomPojo(MailSendMessage.class, o -> o.setNickname("芋艿"));
// mock 方法(获得邮箱账号)
MailAccountDO account = randomPojo(MailAccountDO.class, o -> o.setMail("[email protected]"));
when(mailAccountService.getMailAccountFromCache(eq(message.getAccountId())))
.thenReturn(account);
// mock 方法(发送邮件)
Exception e = new NullPointerException("啦啦啦");
mailUtilMock.when(() -> MailUtil.send(argThat(mailAccount -> {
assertEquals("芋艿 <[email protected]>", mailAccount.getFrom());
assertTrue(mailAccount.isAuth());
assertEquals(account.getUsername(), mailAccount.getUser());
assertEquals(account.getPassword(), mailAccount.getPass());
assertEquals(account.getHost(), mailAccount.getHost());
assertEquals(account.getPort(), mailAccount.getPort());
assertEquals(account.getSslEnable(), mailAccount.isSslEnable());
return true;
}), eq(message.getMail()), eq(message.getTitle()), eq(message.getContent()), eq(true)))
.thenThrow(e);
// 调用
mailSendService.doSendMail(message);
// 断言
verify(mailLogService).updateMailSendResult(eq(message.getLogId()), isNull(), same(e));
}
}
|
public PackageRepository find(final String repoId) {
return stream().filter(repository -> repository.getId().equals(repoId)).findFirst().orElse(null);
}
|
@Test
void shouldReturnNullIfNoMatchingRepoFound() throws Exception {
PackageRepositories packageRepositories = new PackageRepositories();
assertThat(packageRepositories.find("not-found")).isNull();
}
|
public static void validate(WindowConfig windowConfig) {
if (windowConfig.getWindowLengthDurationMs() == null && windowConfig.getWindowLengthCount() == null) {
throw new IllegalArgumentException("Window length is not specified");
}
if (windowConfig.getWindowLengthDurationMs() != null && windowConfig.getWindowLengthCount() != null) {
throw new IllegalArgumentException(
"Window length for time and count are set! Please set one or the other.");
}
if (windowConfig.getWindowLengthCount() != null) {
if (windowConfig.getWindowLengthCount() <= 0) {
throw new IllegalArgumentException(
"Window length must be positive [" + windowConfig.getWindowLengthCount() + "]");
}
}
if (windowConfig.getWindowLengthDurationMs() != null) {
if (windowConfig.getWindowLengthDurationMs() <= 0) {
throw new IllegalArgumentException(
"Window length must be positive [" + windowConfig.getWindowLengthDurationMs() + "]");
}
}
if (windowConfig.getSlidingIntervalCount() != null) {
if (windowConfig.getSlidingIntervalCount() <= 0) {
throw new IllegalArgumentException(
"Sliding interval must be positive [" + windowConfig.getSlidingIntervalCount() + "]");
}
}
if (windowConfig.getSlidingIntervalDurationMs() != null) {
if (windowConfig.getSlidingIntervalDurationMs() <= 0) {
throw new IllegalArgumentException(
"Sliding interval must be positive [" + windowConfig.getSlidingIntervalDurationMs() + "]");
}
}
if (windowConfig.getTimestampExtractorClassName() != null) {
if (windowConfig.getMaxLagMs() != null) {
if (windowConfig.getMaxLagMs() < 0) {
throw new IllegalArgumentException(
"Lag duration must be positive [" + windowConfig.getMaxLagMs() + "]");
}
}
if (windowConfig.getWatermarkEmitIntervalMs() != null) {
if (windowConfig.getWatermarkEmitIntervalMs() <= 0) {
throw new IllegalArgumentException(
"Watermark interval must be positive [" + windowConfig.getWatermarkEmitIntervalMs() + "]");
}
}
}
}
|
@Test
public void testSettingSlidingTimeWindow() throws Exception {
final Object[][] args = new Object[][]{
{-1L, 10L},
{10L, -1L},
{0L, 10L},
{10L, 0L},
{0L, 0L},
{-1L, -1L},
{5L, 10L},
{1L, 1L},
{10L, 5L},
{100L, 10L},
{100L, 100L},
{200L, 100L},
{500L, 100L},
{null, null},
{null, 1L},
{1L, null},
{null, -1L},
{-1L, null}
};
for (Object[] arg : args) {
Object arg0 = arg[0];
Object arg1 = arg[1];
try {
Long windowLengthDuration = null;
if (arg0 != null) {
windowLengthDuration = (Long) arg0;
}
Long slidingIntervalDuration = null;
if (arg1 != null) {
slidingIntervalDuration = (Long) arg1;
}
WindowConfig windowConfig = new WindowConfig();
windowConfig.setWindowLengthDurationMs(windowLengthDuration);
windowConfig.setSlidingIntervalDurationMs(slidingIntervalDuration);
WindowConfigUtils.validate(windowConfig);
if (arg0 == null) {
fail(String.format("Window length cannot be null -- "
+ "windowLengthCount: %s slidingIntervalCount: %s", arg0, arg1));
}
if ((Long) arg0 <= 0) {
fail(String.format("Window length cannot be zero or less -- "
+ "windowLengthCount: %s slidingIntervalCount: %s", arg0, arg1));
}
if (arg1 != null && (Long) arg1 <= 0) {
fail(String.format("Sliding interval length cannot be zero or less -- "
+ "windowLengthCount: %s slidingIntervalCount: %s", arg0, arg1));
}
} catch (IllegalArgumentException e) {
if (arg0 != null && arg1 != null && (Long) arg0 > 0 && (Long) arg1 > 0) {
fail(String.format("Exception: %s thrown on valid input -- windowLengthDuration: %s "
+ "slidingIntervalDuration: %s", e.getMessage(), arg0, arg1));
}
}
}
}
|
@Override
public <KR, VR> KStream<KR, VR> map(final KeyValueMapper<? super K, ? super V, ? extends KeyValue<? extends KR, ? extends VR>> mapper) {
return map(mapper, NamedInternal.empty());
}
|
@Test
public void shouldNotAllowNullNamedOnMap() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.map(KeyValue::pair, null));
assertThat(exception.getMessage(), equalTo("named can't be null"));
}
|
public <V> boolean contains(IndexDefinition<T, V> indexDefinition, V value) {
FieldIndex<T, V> index = (FieldIndex<T, V>) mIndices.get(indexDefinition);
if (index == null) {
throw new IllegalStateException("the given index isn't defined for this IndexedSet");
}
return index.containsField(value);
}
|
@Test
public void NonUniqueContains() {
for (long l = 0; l < 9; l++) {
assertTrue(mSet.contains(mUniqueLongIndex, l));
}
assertFalse(mSet.contains(mUniqueLongIndex, 9L));
}
|
Set<SourceName> analyzeExpression(
final Expression expression,
final String clauseType
) {
final Validator extractor = new Validator(clauseType);
extractor.process(expression, null);
return extractor.referencedSources;
}
|
@Test
public void shouldGetSourceForQualifiedColumnRef() {
// Given:
final QualifiedColumnReferenceExp expression = new QualifiedColumnReferenceExp(
SourceName.of("something"),
ColumnName.of("else")
);
when(sourceSchemas.sourcesWithField(any(), any()))
.thenReturn(ImmutableSet.of(SourceName.of("something")));
// When:
final Set<SourceName> columnRefs = analyzer.analyzeExpression(expression, CLAUSE_TYPE);
// Then:
verify(sourceSchemas).sourcesWithField(
Optional.of(expression.getQualifier()),
expression.getColumnName()
);
assertThat(
Iterables.getOnlyElement(columnRefs),
is(SourceName.of("something")));
}
|
@Override
public void execute(ComputationStep.Context context) {
executeForBranch(treeRootHolder.getRoot());
}
|
@Test
public void givenRulesWhereAddedModifiedOrRemoved_whenEventStep_thenQPChangeEventIsAddedWithDetails() {
QualityProfile qp1 = qp(QP_NAME_1, LANGUAGE_KEY_1, BEFORE_DATE);
QualityProfile qp2 = qp(QP_NAME_1, LANGUAGE_KEY_1, AFTER_DATE);
// mock updated profile
qProfileStatusRepository.register(qp2.getQpKey(), UPDATED);
mockQualityProfileMeasures(treeRootHolder.getRoot(), arrayOf(qp1), arrayOf(qp2));
Language language = mockLanguageInRepository(LANGUAGE_KEY_1);
// mock rule changes
when(qualityProfileRuleChangeTextResolver.mapChangeToNumberOfRules(qp2, treeRootHolder.getRoot().getUuid())).thenReturn(CHANGE_TO_NUMBER_OF_RULES_MAP);
underTest.execute(new TestComputationStepContext());
verify(eventRepository).add(eventArgumentCaptor.capture());
verifyNoMoreInteractions(eventRepository);
verifyEvent(eventArgumentCaptor.getValue(),
"\"" + qp2.getQpName() + "\" (" + language.getName() + ") updated with " + RULE_CHANGE_TEXT,
"from=" + UtcDateUtils.formatDateTime(BEFORE_DATE_PLUS_1_SEC) +
";key=" + qp1.getQpKey() +
";languageKey=" + qp2.getLanguageKey()+
";name=" + qp2.getQpName() +
";to=" + UtcDateUtils.formatDateTime(AFTER_DATE_PLUS_1_SEC),
RULE_CHANGE_TEXT);
}
|
public static TransactionManager get() {
if (SingletonHolder.INSTANCE == null) {
throw new ShouldNeverHappenException("TransactionManager is NOT ready!");
}
return SingletonHolder.INSTANCE;
}
|
@Test
void getTest() {
Assertions.assertThrows(ShouldNeverHappenException.class, () -> { TransactionManagerHolder.set(null);
TransactionManagerHolder.get();});
}
|
public static void assertSoftly(Consumer<SoftAssertions> softly) {
SoftAssertionsProvider.assertSoftly(SoftAssertions.class, softly);
}
|
@Test
void should_assert_using_assertSoftly() {
assertThatThrownBy(() -> assertSoftly(assertions -> {
assertions.assertThat(true).isFalse();
assertions.assertThat(42).isEqualTo("meaning of life");
assertions.assertThat("red").isEqualTo("blue");
})).as("it should call assertAll() and fail with multiple validation errors")
.hasMessageContaining("meaning of life")
.hasMessageContaining("blue");
}
|
@Override
public void addKey(DeviceKey deviceKey) {
checkNotNull(deviceKey, "Device key cannot be null");
store.createOrUpdateDeviceKey(deviceKey);
}
|
@Test(expected = NullPointerException.class)
public void testAddNullKey() {
manager.addKey(null);
}
|
public static Bech32Data decode(final String str) throws AddressFormatException {
boolean lower = false, upper = false;
if (str.length() < 8)
throw new AddressFormatException.InvalidDataLength("Input too short: " + str.length());
if (str.length() > 90)
throw new AddressFormatException.InvalidDataLength("Input too long: " + str.length());
for (int i = 0; i < str.length(); ++i) {
char c = str.charAt(i);
if (c < 33 || c > 126) throw new AddressFormatException.InvalidCharacter(c, i);
if (c >= 'a' && c <= 'z') {
if (upper)
throw new AddressFormatException.InvalidCharacter(c, i);
lower = true;
}
if (c >= 'A' && c <= 'Z') {
if (lower)
throw new AddressFormatException.InvalidCharacter(c, i);
upper = true;
}
}
final int pos = str.lastIndexOf('1');
if (pos < 1) throw new AddressFormatException.InvalidPrefix("Missing human-readable part");
final int dataPartLength = str.length() - 1 - pos;
if (dataPartLength < 6) throw new AddressFormatException.InvalidDataLength("Data part too short: " + dataPartLength);
byte[] values = new byte[dataPartLength];
for (int i = 0; i < dataPartLength; ++i) {
char c = str.charAt(i + pos + 1);
if (CHARSET_REV[c] == -1) throw new AddressFormatException.InvalidCharacter(c, i + pos + 1);
values[i] = CHARSET_REV[c];
}
String hrp = str.substring(0, pos).toLowerCase(Locale.ROOT);
Encoding encoding = verifyChecksum(hrp, values);
if (encoding == null) throw new AddressFormatException.InvalidChecksum();
return new Bech32Data(encoding, hrp, Arrays.copyOfRange(values, 0, values.length - 6));
}
|
@Test(expected = AddressFormatException.InvalidPrefix.class)
public void decode_invalidHrp() {
Bech32.decode("1pzry9x0s0muk");
}
|
public static MongoDatabase createDatabaseProxy(final MongoDatabase database) {
if (DISABLED) {
return database;
}
SERVICES_COUNTER.setDisplayed(!COUNTER_HIDDEN);
SERVICES_COUNTER.setUsed(true);
return JdbcWrapper.createProxy(database, new MongoDatabaseHandler(database));
}
|
@Test
public void testCreateDatabaseProxy() {
try {
Class.forName("com.mongodb.ReadPreference");
} catch (final ClassNotFoundException e) {
LogManager.getRootLogger().info(e.toString());
// si mongodb-driver-core n'est pas disponible dans le classpath (test depuis Ant),
// on ne peut pas exécuter ce test
return;
}
final MongoDatabase database = createNiceMock(MongoDatabase.class);
final MongoCollection<Document> collection = createNiceMock(MongoCollection.class);
final CodecRegistry codecRegistry = createNiceMock(CodecRegistry.class);
expect(database.withCodecRegistry(codecRegistry)).andReturn(database).anyTimes();
expect(database.getCollection("collection")).andReturn(collection).anyTimes();
expect(database.getName()).andReturn("db").anyTimes();
expect(collection.withCodecRegistry(codecRegistry)).andReturn(collection).anyTimes();
replay(database);
replay(collection);
replay(codecRegistry);
final MongoDatabase databaseProxy = MongoWrapper.createDatabaseProxy(database);
assertNotNull("createDatabaseProxy", databaseProxy);
assertNotNull("databaseProxy", databaseProxy.withCodecRegistry(codecRegistry));
assertNotNull("getCollection", databaseProxy.getCollection("collection"));
assertEquals("getName", "db", databaseProxy.getName());
assertNotNull("collectionProxy",
databaseProxy.getCollection("collection").withCodecRegistry(codecRegistry));
databaseProxy.getCollection("collection").getCodecRegistry();
databaseProxy.getCollection("collection").find();
verify(database);
verify(collection);
verify(codecRegistry);
}
|
@Override
public boolean next() throws SQLException {
if (skipAll) {
return false;
}
if (!paginationContext.getActualRowCount().isPresent()) {
return getMergedResult().next();
}
return rowNumber++ < paginationContext.getActualRowCount().get() && getMergedResult().next();
}
|
@Test
void assertNextForRowCountBoundOpenedTrue() throws SQLException {
OracleSelectStatement selectStatement = new OracleSelectStatement();
selectStatement.setProjections(new ProjectionsSegment(0, 0));
WhereSegment whereSegment = mock(WhereSegment.class);
BinaryOperationExpression binaryOperationExpression = mock(BinaryOperationExpression.class);
when(binaryOperationExpression.getLeft()).thenReturn(new ColumnSegment(0, 0, new IdentifierValue("row_id")));
when(binaryOperationExpression.getRight()).thenReturn(new LiteralExpressionSegment(0, 0, 2));
when(binaryOperationExpression.getOperator()).thenReturn(">=");
when(whereSegment.getExpr()).thenReturn(binaryOperationExpression);
SubqueryTableSegment subqueryTableSegment = mock(SubqueryTableSegment.class);
SubquerySegment subquerySegment = mock(SubquerySegment.class);
SelectStatement subSelectStatement = mock(MySQLSelectStatement.class);
ProjectionsSegment subProjectionsSegment = mock(ProjectionsSegment.class);
TopProjectionSegment topProjectionSegment = mock(TopProjectionSegment.class);
when(topProjectionSegment.getAlias()).thenReturn("row_id");
when(topProjectionSegment.getTop()).thenReturn(new NumberLiteralRowNumberValueSegment(0, 0, 4L, true));
when(subProjectionsSegment.getProjections()).thenReturn(Collections.singletonList(topProjectionSegment));
when(subSelectStatement.getProjections()).thenReturn(subProjectionsSegment);
when(subquerySegment.getSelect()).thenReturn(subSelectStatement);
when(subqueryTableSegment.getSubquery()).thenReturn(subquerySegment);
selectStatement.setFrom(subqueryTableSegment);
selectStatement.setWhere(whereSegment);
ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "Oracle"));
ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), null, selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList());
when(database.getName()).thenReturn(DefaultDatabase.LOGIC_NAME);
MergedResult actual = resultMerger.merge(Arrays.asList(mockQueryResult(), mockQueryResult(), mockQueryResult(), mockQueryResult()), selectStatementContext, database,
mock(ConnectionContext.class));
assertTrue(actual.next());
assertTrue(actual.next());
assertTrue(actual.next());
assertFalse(actual.next());
}
|
@Override
public String getName() {
return ANALYZER_NAME;
}
|
@Test
public void testGetName() {
HintAnalyzer instance = new HintAnalyzer();
String expResult = "Hint Analyzer";
String result = instance.getName();
assertEquals(expResult, result);
}
|
@Override
public <T> void register(Class<T> remoteInterface, T object) {
register(remoteInterface, object, 1);
}
|
@Test
public void testRx() {
RedissonRxClient r1 = createInstance().rxJava();
r1.getRemoteService().register(RemoteInterface.class, new RemoteImpl());
RedissonRxClient r2 = createInstance().rxJava();
RemoteInterfaceRx ri = r2.getRemoteService().get(RemoteInterfaceRx.class);
Completable f = ri.voidMethod("someName", 100L);
f.blockingAwait();
Single<Long> resFuture = ri.resultMethod(100L);
assertThat(resFuture.blockingGet()).isEqualTo(200);
r1.shutdown();
r2.shutdown();
}
|
@Override
public String toString() {
if (null != table && null != column) {
return String.format("database.table.column: '%s'.'%s'.'%s'", database, table, column);
}
if (null != table) {
return String.format("database.table: '%s'.'%s'", database, table);
}
return String.format("database: '%s'", database);
}
|
@Test
void assertToStringForColumnIdentifier() {
assertThat(new SQLExceptionIdentifier("foo_db", "foo_tbl", "foo_col").toString(), is("database.table.column: 'foo_db'.'foo_tbl'.'foo_col'"));
}
|
String getWarFilePath() {
TaskProvider<Task> bootWarTask = TaskCommon.getBootWarTaskProvider(project);
if (bootWarTask != null && bootWarTask.get().getEnabled()) {
return bootWarTask.get().getOutputs().getFiles().getAsPath();
}
TaskProvider<Task> warTask = TaskCommon.getWarTaskProvider(project);
return Verify.verifyNotNull(warTask).get().getOutputs().getFiles().getAsPath();
}
|
@Test
public void testGetWarFilePath_bootWarDisabled() throws IOException {
Path outputDir = temporaryFolder.newFolder("output").toPath();
project.getPlugins().apply("war");
War war = project.getTasks().withType(War.class).getByName("war");
war.getDestinationDirectory().set(outputDir.toFile());
project.getPlugins().apply("org.springframework.boot");
project.getTasks().getByName("bootWar").setEnabled(false);
assertThat(gradleProjectProperties.getWarFilePath())
.isEqualTo(outputDir.resolve("my-app.war").toString());
}
|
public static BtcFormat getSymbolInstance() { return getSymbolInstance(defaultLocale()); }
|
@Ignore("non-determinism between OpenJDK versions")
@Test
public void suffixTest() {
BtcFormat deFormat = BtcFormat.getSymbolInstance(Locale.GERMANY);
// int
assertEquals("1,00 ฿", deFormat.format(100000000));
assertEquals("1,01 ฿", deFormat.format(101000000));
assertEquals("1.011,00 ₥฿", deFormat.format(101100000));
assertEquals("1.000,01 ₥฿", deFormat.format(100001000));
assertEquals("1.000.001,00 µ฿", deFormat.format(100000100));
assertEquals("1.000.000,10 µ฿", deFormat.format(100000010));
assertEquals("1.000.000,01 µ฿", deFormat.format(100000001));
}
|
@NonNull
@Override
public EncodeStrategy getEncodeStrategy(@NonNull Options options) {
Boolean encodeTransformation = options.get(ENCODE_TRANSFORMATION);
return encodeTransformation != null && encodeTransformation
? EncodeStrategy.TRANSFORMED
: EncodeStrategy.SOURCE;
}
|
@Test
public void testEncodeStrategy_withEncodeTransformationUnSet_returnsSource() {
options.set(ReEncodingGifResourceEncoder.ENCODE_TRANSFORMATION, null);
assertThat(encoder.getEncodeStrategy(options)).isEqualTo(EncodeStrategy.SOURCE);
}
|
@Override
public Object handle(ProceedingJoinPoint proceedingJoinPoint, Bulkhead bulkhead,
String methodName) throws Throwable {
BulkheadOperator<?> bulkheadOperator = BulkheadOperator.of(bulkhead);
Object returnValue = proceedingJoinPoint.proceed();
return executeRxJava3Aspect(bulkheadOperator, returnValue);
}
|
@Test
public void testRxTypes() throws Throwable {
Bulkhead bulkhead = Bulkhead.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test"));
assertThat(rxJava3BulkheadAspectExt.handle(proceedingJoinPoint, bulkhead, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test"));
assertThat(rxJava3BulkheadAspectExt.handle(proceedingJoinPoint, bulkhead, "testMethod"))
.isNotNull();
}
|
@Override
public void refreshTable(String srDbName, Table table, List<String> partitionNames, boolean onlyCachedPartitions) {
if (isResourceMappingCatalog(catalogName)) {
refreshTableWithResource(table);
} else {
IcebergTable icebergTable = (IcebergTable) table;
String dbName = icebergTable.getRemoteDbName();
String tableName = icebergTable.getRemoteTableName();
tables.remove(TableIdentifier.of(dbName, tableName));
try {
icebergCatalog.refreshTable(dbName, tableName, jobPlanningExecutor);
} catch (Exception e) {
LOG.error("Failed to refresh table {}.{}.{}. invalidate cache", catalogName, dbName, tableName, e);
icebergCatalog.invalidateCache(new CachingIcebergCatalog.IcebergTableName(dbName, tableName));
}
}
}
|
@Test
public void testRefreshTableException(@Mocked CachingIcebergCatalog icebergCatalog) {
new Expectations() {
{
icebergCatalog.refreshTable(anyString, anyString, null);
result = new StarRocksConnectorException("refresh failed");
}
};
IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergCatalog,
Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null);
IcebergTable icebergTable = new IcebergTable(1, "srTableName", CATALOG_NAME, "resource_name", "db_name",
"table_name", "", new ArrayList<>(), mockedNativeTableD, Maps.newHashMap());
metadata.refreshTable("db", icebergTable, null, true);
}
|
@Override
public boolean isSplittable() {
return false;
}
|
@Test
void testSplittable() {
assertThat(AvroParquetReaders.forGenericRecord(schema).isSplittable()).isFalse();
}
|
public int length() {
split();
return splitted.size() - 1;
}
|
@Test
public void testLengthCPs() {
final UnicodeHelper lh = new UnicodeHelper("a", Method.CODEPOINTS);
assertEquals(1, lh.length());
final UnicodeHelper lh2 = new UnicodeHelper(new String(Character.toChars(0x1f600)), Method.CODEPOINTS);
assertEquals(1, lh2.length());
final UnicodeHelper lh3 = new UnicodeHelper(UCSTR, Method.CODEPOINTS);
assertEquals(5, lh3.length());
final UnicodeHelper lh4 = new UnicodeHelper("a" + UCSTR + "A", Method.CODEPOINTS);
assertEquals(7, lh4.length());
final UnicodeHelper lh5 = new UnicodeHelper("k\u035fh", Method.CODEPOINTS);
assertEquals(3, lh5.length());
}
|
public static boolean contains(File file, String pattern) throws IOException {
try (Scanner fileScanner = new Scanner(file, UTF_8.name())) {
final Pattern regex = Pattern.compile(pattern);
if (fileScanner.findWithinHorizon(regex, 0) != null) {
return true;
}
}
return false;
}
|
@Test
public void testContains_File_String() throws Exception {
File file = BaseTest.getResourceAsFile(this, "SearchTest.txt");
String pattern = "blue";
boolean expResult = false;
boolean result = FileContentSearch.contains(file, pattern);
assertEquals(expResult, result);
pattern = "test";
expResult = false;
result = FileContentSearch.contains(file, pattern);
assertEquals(expResult, result);
pattern = "(?i)test";
expResult = true;
result = FileContentSearch.contains(file, pattern);
assertEquals(expResult, result);
}
|
public static BsonTimestamp decodeTimestamp(BsonDocument resumeToken) {
BsonValue bsonValue =
Objects.requireNonNull(resumeToken, "Missing ResumeToken.").get(DATA_FIELD);
final byte[] keyStringBytes;
// Resume Tokens format: https://www.mongodb.com/docs/manual/changeStreams/#resume-tokens
if (bsonValue.isBinary()) { // BinData
keyStringBytes = bsonValue.asBinary().getData();
} else if (bsonValue.isString()) { // Hex-encoded string (v0 or v1)
keyStringBytes = hexToUint8Array(bsonValue.asString().getValue());
} else {
throw new IllegalArgumentException(
"Unknown resume token format: " + resumeToken.toJson());
}
ByteBuffer buffer = ByteBuffer.wrap(keyStringBytes).order(ByteOrder.BIG_ENDIAN);
int kType = buffer.get() & 0xff;
if (kType != K_TIMESTAMP) {
throw new IllegalArgumentException("Unknown keyType of timestamp: " + kType);
}
int t = buffer.getInt();
int i = buffer.getInt();
return new BsonTimestamp(t, i);
}
|
@Test
public void testDecodeBinDataFormat() {
BsonDocument resumeToken =
BsonDocument.parse(
"{\"_data\": {\"$binary\": {\"base64\": \"gmNXqzwAAAABRmRfaWQAZGNXqj41xq4H4ebHNwBaEATmzwG2DzpOl4tpOyYEG9zABA==\", \"subType\": \"00\"}}}");
BsonTimestamp expected = new BsonTimestamp(1666689852, 1);
BsonTimestamp actual = ResumeTokenUtils.decodeTimestamp(resumeToken);
assertEquals(expected, actual);
}
|
public void updateAll() throws InterruptedException {
LOGGER.debug("DAILY UPDATE ALL");
var extensions = repositories.findAllPublicIds();
var extensionPublicIdsMap = extensions.stream()
.filter(e -> StringUtils.isNotEmpty(e.getPublicId()))
.collect(Collectors.toMap(e -> e.getId(), e -> e.getPublicId()));
var namespacePublicIdsMap = extensions.stream()
.map(e -> e.getNamespace())
.filter(n -> StringUtils.isNotEmpty(n.getPublicId()))
.collect(Collectors.toMap(n -> n.getId(), n -> n.getPublicId(), (id1, id2) -> id1));
var upstreamExtensionPublicIds = new HashMap<Long, String>();
var upstreamNamespacePublicIds = new HashMap<Long, String>();
for(var extension : extensions) {
if(BuiltInExtensionUtil.isBuiltIn(extension)) {
LOGGER.trace("SKIP BUILT-IN EXTENSION {}", NamingUtil.toExtensionId(extension));
continue;
}
LOGGER.trace("GET UPSTREAM PUBLIC ID: {} | {}", extension.getId(), NamingUtil.toExtensionId(extension));
var publicIds = service.getUpstreamPublicIds(extension);
if(upstreamExtensionPublicIds.get(extension.getId()) == null) {
LOGGER.trace("ADD EXTENSION PUBLIC ID: {} - {}", extension.getId(), publicIds.extension());
upstreamExtensionPublicIds.put(extension.getId(), publicIds.extension());
}
var namespace = extension.getNamespace();
if(upstreamNamespacePublicIds.get(namespace.getId()) == null) {
LOGGER.trace("ADD NAMESPACE PUBLIC ID: {} - {}", namespace.getId(), publicIds.namespace());
upstreamNamespacePublicIds.put(namespace.getId(), publicIds.namespace());
}
}
var changedExtensionPublicIds = getChangedPublicIds(upstreamExtensionPublicIds, extensionPublicIdsMap);
LOGGER.debug("UPSTREAM EXTENSIONS: {}", upstreamExtensionPublicIds.size());
LOGGER.debug("CHANGED EXTENSIONS: {}", changedExtensionPublicIds.size());
if(!changedExtensionPublicIds.isEmpty()) {
LOGGER.debug("CHANGED EXTENSION PUBLIC IDS");
for(var entry : changedExtensionPublicIds.entrySet()) {
LOGGER.debug("{}: {}", entry.getKey(), entry.getValue());
}
repositories.updateExtensionPublicIds(changedExtensionPublicIds);
}
var changedNamespacePublicIds = getChangedPublicIds(upstreamNamespacePublicIds, namespacePublicIdsMap);
LOGGER.debug("UPSTREAM NAMESPACES: {}", upstreamNamespacePublicIds.size());
LOGGER.debug("CHANGED NAMESPACES: {}", changedNamespacePublicIds.size());
if(!changedNamespacePublicIds.isEmpty()) {
LOGGER.debug("CHANGED NAMESPACE PUBLIC IDS");
for(var entry : changedNamespacePublicIds.entrySet()) {
LOGGER.debug("{}: {}", entry.getKey(), entry.getValue());
}
repositories.updateNamespacePublicIds(changedNamespacePublicIds);
}
}
|
@Test
public void testUpdateAllNoChanges() throws InterruptedException {
var namespaceName1 = "foo";
var namespacePublicId1 = UUID.randomUUID().toString();
var extensionName1 = "bar";
var extensionPublicId1 = UUID.randomUUID().toString();
var namespace1 = new Namespace();
namespace1.setId(1L);
namespace1.setName(namespaceName1);
namespace1.setPublicId(namespacePublicId1);
var extension1 = new Extension();
extension1.setId(2L);
extension1.setName(extensionName1);
extension1.setNamespace(namespace1);
extension1.setPublicId(extensionPublicId1);
var namespaceName2 = "baz";
var namespacePublicId2 = UUID.randomUUID().toString();
var extensionName2 = "foobar";
var extensionPublicId2 = UUID.randomUUID().toString();
var namespace2 = new Namespace();
namespace2.setId(3L);
namespace2.setName(namespaceName2);
namespace2.setPublicId(namespacePublicId2);
var extension2 = new Extension();
extension2.setId(4L);
extension2.setName(extensionName2);
extension2.setPublicId(extensionPublicId2);
extension2.setNamespace(namespace2);
var namespaceName3 = "baz2";
var namespacePublicId3 = UUID.randomUUID().toString();
var extensionName3 = "foobar2";
var extensionPublicId3 = UUID.randomUUID().toString();
var namespace3 = new Namespace();
namespace3.setId(5L);
namespace3.setName(namespaceName3);
namespace3.setPublicId(namespacePublicId3);
var extension3 = new Extension();
extension3.setId(6L);
extension3.setName(extensionName3);
extension3.setPublicId(extensionPublicId3);
extension3.setNamespace(namespace3);
Mockito.when(repositories.findAllPublicIds()).thenReturn(List.of(extension1, extension2, extension3));
Mockito.doAnswer(invocation -> {
var extension = invocation.getArgument(0, Extension.class);
return new PublicIds(extension.getNamespace().getPublicId(), extension.getPublicId());
}).when(idService).getUpstreamPublicIds(Mockito.any(Extension.class));
updateService.updateAll();
Mockito.verify(repositories, Mockito.never()).updateExtensionPublicIds(Mockito.anyMap());
Mockito.verify(repositories, Mockito.never()).updateNamespacePublicIds(Mockito.anyMap());
}
|
public URL getInterNodeListener(
final Function<URL, Integer> portResolver
) {
return getInterNodeListener(portResolver, LOGGER);
}
|
@Test
public void shouldResolveInterNodeListenerToFirstListenerSetToIpv4Loopback() {
// Given:
final URL expected = url("https://127.0.0.2:12345");
final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder()
.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
.put(LISTENERS_CONFIG, expected.toString() + ",http://localhost:2589")
.build()
);
// When:
final URL actual = config.getInterNodeListener(portResolver, logger);
// Then:
assertThat(actual, is(expected));
verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG);
verifyLogsLoopBackWarning(expected, QUOTED_FIRST_LISTENER_CONFIG);
verifyNoMoreInteractions(logger);
}
|
public Map<String, String> getPdiParameters() {
return pdiParameters;
}
|
@Test
public void getPdiParameters() {
JobScheduleRequest jobScheduleRequest = mock( JobScheduleRequest.class );
when( jobScheduleRequest.getPdiParameters() ).thenCallRealMethod();
Map<String, String> pdiParameters = new HashMap<>();
pdiParameters.put( "hitachi", "vantara" );
ReflectionTestUtils.setField( jobScheduleRequest, "pdiParameters", pdiParameters );
Assert.assertEquals( pdiParameters, jobScheduleRequest.getPdiParameters() );
}
|
@SuppressWarnings("deprecation")
public static <K> KStreamHolder<K> build(
final KStreamHolder<K> left,
final KStreamHolder<K> right,
final StreamStreamJoin<K> join,
final RuntimeBuildContext buildContext,
final StreamJoinedFactory streamJoinedFactory) {
final QueryContext queryContext = join.getProperties().getQueryContext();
final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext);
final LogicalSchema leftSchema;
final LogicalSchema rightSchema;
final Formats rightFormats;
final Formats leftFormats;
if (join.getJoinType().equals(RIGHT)) {
leftFormats = join.getRightInternalFormats();
rightFormats = join.getLeftInternalFormats();
leftSchema = right.getSchema();
rightSchema = left.getSchema();
} else {
leftFormats = join.getLeftInternalFormats();
rightFormats = join.getRightInternalFormats();
leftSchema = left.getSchema();
rightSchema = right.getSchema();
}
final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from(
leftSchema,
leftFormats.getKeyFeatures(),
leftFormats.getValueFeatures()
);
final Serde<GenericRow> leftSerde = buildContext.buildValueSerde(
leftFormats.getValueFormat(),
leftPhysicalSchema,
stacker.push(LEFT_SERDE_CTX).getQueryContext()
);
final PhysicalSchema rightPhysicalSchema = PhysicalSchema.from(
rightSchema,
rightFormats.getKeyFeatures(),
rightFormats.getValueFeatures()
);
final Serde<GenericRow> rightSerde = buildContext.buildValueSerde(
rightFormats.getValueFormat(),
rightPhysicalSchema,
stacker.push(RIGHT_SERDE_CTX).getQueryContext()
);
final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde(
leftFormats.getKeyFormat(),
leftPhysicalSchema,
queryContext
);
final StreamJoined<K, GenericRow, GenericRow> joined = streamJoinedFactory.create(
keySerde,
leftSerde,
rightSerde,
StreamsUtil.buildOpName(queryContext),
StreamsUtil.buildOpName(queryContext)
);
final JoinParams joinParams = JoinParamsFactory
.create(join.getKeyColName(), leftSchema, rightSchema);
JoinWindows joinWindows;
// Grace, as optional, helps to identify if a user specified the GRACE PERIOD syntax in the
// join window. If specified, then we'll call the new KStreams API ofTimeDifferenceAndGrace()
// which enables the "spurious" results bugfix with left/outer joins (see KAFKA-10847).
if (join.getGraceMillis().isPresent()) {
joinWindows = JoinWindows.ofTimeDifferenceAndGrace(
join.getBeforeMillis(),
join.getGraceMillis().get());
} else {
joinWindows = JoinWindows.of(join.getBeforeMillis());
}
joinWindows = joinWindows.after(join.getAfterMillis());
final KStream<K, GenericRow> result;
switch (join.getJoinType()) {
case LEFT:
result = left.getStream().leftJoin(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case RIGHT:
result = right.getStream().leftJoin(
left.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case OUTER:
result = left.getStream().outerJoin(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
case INNER:
result = left.getStream().join(
right.getStream(), joinParams.getJoiner(), joinWindows, joined);
break;
default:
throw new IllegalStateException("invalid join type");
}
return left.withStream(result, joinParams.getSchema());
}
|
@Test
public void shouldBuildRightSerdeCorrectly() {
// Given:
givenInnerJoin(L_KEY);
// When:
join.build(planBuilder, planInfo);
// Then:
final QueryContext leftCtx = QueryContext.Stacker.of(CTX).push("Right").getQueryContext();
verify(buildContext).buildValueSerde(FormatInfo.of(FormatFactory.AVRO.name()), RIGHT_PHYSICAL, leftCtx);
}
|
public static <K, V> Read<K, V> read() {
return new AutoValue_KafkaIO_Read.Builder<K, V>()
.setTopics(new ArrayList<>())
.setTopicPartitions(new ArrayList<>())
.setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN)
.setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES)
.setMaxNumRecords(Long.MAX_VALUE)
.setCommitOffsetsInFinalizeEnabled(false)
.setDynamicRead(false)
.setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime())
.setConsumerPollingTimeout(2L)
.setRedistributed(false)
.setAllowDuplicates(false)
.setRedistributeNumKeys(0)
.build();
}
|
@Test
public void testUnboundedSourceWithExceptionInKafkaFetch() {
// Similar testUnboundedSource, but with an injected exception inside Kafk Consumer poll.
// The reader should throw an IOException:
thrown.expectCause(isA(IOException.class));
thrown.expectCause(hasMessage(containsString("Exception while reading from Kafka")));
// The original exception is from MockConsumer.poll():
thrown.expectCause(hasCause(isA(KafkaException.class)));
thrown.expectCause(hasCause(hasMessage(containsString("Injected error in consumer.poll()"))));
int numElements = 1000;
String topic = "my_topic";
KafkaIO.Read<Integer, Long> reader =
KafkaIO.<Integer, Long>read()
.withBootstrapServers("none")
.withTopic("my_topic")
.withConsumerFactoryFn(
new ConsumerFactoryFn(
ImmutableList.of(topic), 10, numElements, OffsetResetStrategy.EARLIEST))
.withMaxNumRecords(2 * numElements) // Try to read more messages than available.
.withConsumerConfigUpdates(ImmutableMap.of("inject.error.at.eof", true))
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class);
PCollection<Long> input = p.apply(reader.withoutMetadata()).apply(Values.create());
addCountingAsserts(input, numElements);
p.run();
}
|
@Override
public long size() {
return colIndex[n];
}
|
@Test
public void testSize() {
System.out.println("size");
assertEquals(7, sparse.size());
}
|
public ValueMetaInterface getValueMetaFor( int resultType, String name ) throws KettlePluginException {
// don't need any synchronization as data instance belongs only to one step instance
ValueMetaInterface meta = resultMetaMapping.get( resultType );
if ( meta == null ) {
meta = ValueMetaFactory.createValueMeta( name, resultType );
resultMetaMapping.put( resultType, meta );
}
return meta;
}
|
@Test
public void dataReturnsCachedValues() throws Exception {
KettleEnvironment.init( false );
CalculatorData data = new CalculatorData();
ValueMetaInterface valueMeta = data.getValueMetaFor( ValueMetaInterface.TYPE_INTEGER, null );
ValueMetaInterface shouldBeTheSame = data.getValueMetaFor( ValueMetaInterface.TYPE_INTEGER, null );
assertTrue( "CalculatorData should cache loaded value meta instances", valueMeta == shouldBeTheSame );
}
|
static int evaluateLevenshteinDistanceBestHits(LevenshteinDistance levenshteinDistance, List<String> terms,
List<String> texts) {
logger.debug("evaluateLevenshteinDistanceBestHits {} {}", terms, texts);
int batchSize = terms.size();
int limit = texts.size() - batchSize + 1;
String toSearch = String.join(" ", terms);
SortedMap<Integer, AtomicInteger> distancesMap = new TreeMap<>();
for (int i = 0; i < limit; i++) {
String subText = String.join(" ", texts.subList(i, i + batchSize));
int distance = evaluateLevenshteinDistance(levenshteinDistance, toSearch, subText);
if (distance > -1) {
if (distancesMap.containsKey(distance)) {
distancesMap.get(distance).addAndGet(1);
} else {
distancesMap.put(distance, new AtomicInteger(1));
}
}
}
return distancesMap.get(distancesMap.firstKey()).get();
}
|
@Test
void evaluateLevenshteinDistanceBestHits() {
String wordSeparatorCharacterRE = "\\s+"; // brown-foxy does not match
Pattern pattern = Pattern.compile(wordSeparatorCharacterRE);
List<String> terms = KiePMMLTextIndex.splitText("The", pattern);
List<String> texts = KiePMMLTextIndex.splitText(TEXT_0, pattern);
LevenshteinDistance levenshteinDistance = new LevenshteinDistance(0);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceBestHits(levenshteinDistance, terms, texts)).isEqualTo(2);
levenshteinDistance = new LevenshteinDistance(1);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceBestHits(levenshteinDistance, terms, texts)).isEqualTo(2);
levenshteinDistance = new LevenshteinDistance(2);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceBestHits(levenshteinDistance, terms, texts)).isEqualTo(2);
//---
wordSeparatorCharacterRE = "[\\s\\-]"; // brown-foxy match
pattern = Pattern.compile(wordSeparatorCharacterRE);
terms = KiePMMLTextIndex.splitText("The", pattern);
texts = KiePMMLTextIndex.splitText(TEXT_0, pattern);
levenshteinDistance = new LevenshteinDistance(0);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceBestHits(levenshteinDistance, terms, texts)).isEqualTo(2);
levenshteinDistance = new LevenshteinDistance(1);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceBestHits(levenshteinDistance, terms, texts)).isEqualTo(2);
levenshteinDistance = new LevenshteinDistance(2);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceBestHits(levenshteinDistance, terms, texts)).isEqualTo(2);
}
|
@Override
public Collection<RedisServer> masters() {
List<Map<String, String>> masters = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_MASTERS);
return toRedisServersList(masters);
}
|
@Test
public void testMasters() {
Collection<RedisServer> masters = connection.masters();
assertThat(masters).hasSize(1);
}
|
public void encryptColumns(
String inputFile, String outputFile, List<String> paths, FileEncryptionProperties fileEncryptionProperties)
throws IOException {
Path inPath = new Path(inputFile);
Path outPath = new Path(outputFile);
RewriteOptions options = new RewriteOptions.Builder(conf, inPath, outPath)
.encrypt(paths)
.encryptionProperties(fileEncryptionProperties)
.build();
ParquetRewriter rewriter = new ParquetRewriter(options);
rewriter.processBlocks();
rewriter.close();
}
|
@Test
public void testColumnIndex() throws IOException {
String[] encryptColumns = {"Name"};
testSetup("GZIP");
columnEncryptor.encryptColumns(
inputFile.getFileName(),
outputFile,
Arrays.asList(encryptColumns),
EncDecProperties.getFileEncryptionProperties(encryptColumns, ParquetCipher.AES_GCM_V1, false));
verifyResultDecryptionWithValidKey();
verifyOffsetIndexes();
}
|
@SuppressWarnings("unchecked")
@Override
public synchronized ProxyInfo<T> getProxy() {
if (currentUsedHandler != null) {
return currentUsedHandler;
}
Map<String, ProxyInfo<T>> targetProxyInfos = new HashMap<>();
StringBuilder combinedInfo = new StringBuilder("[");
for (int i = 0; i < proxies.size(); i++) {
ProxyInfo<T> pInfo = super.getProxy();
incrementProxyIndex();
targetProxyInfos.put(pInfo.proxyInfo, pInfo);
combinedInfo.append(pInfo.proxyInfo).append(',');
}
combinedInfo.append(']');
T wrappedProxy = (T) Proxy.newProxyInstance(
RequestHedgingInvocationHandler.class.getClassLoader(),
new Class<?>[]{xface},
new RequestHedgingInvocationHandler(targetProxyInfos));
currentUsedHandler =
new ProxyInfo<T>(wrappedProxy, combinedInfo.toString());
return currentUsedHandler;
}
|
@Test
public void testExceptionInfo() throws Exception {
final ClientProtocol goodMock = mock(ClientProtocol.class);
when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
private boolean first = true;
@Override
public long[] answer(InvocationOnMock invocation)
throws Throwable {
if (first) {
Thread.sleep(1000);
first = false;
return new long[] {1};
} else {
throw new IOException("Expected Exception Info");
}
}
});
final ClientProtocol badMock = mock(ClientProtocol.class);
when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
@Override
public long[] answer(InvocationOnMock invocation)
throws Throwable {
throw new IOException("Bad Mock! This is Standby!");
}
});
RequestHedgingProxyProvider<ClientProtocol> provider =
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
createFactory(badMock, goodMock));
ClientProtocol proxy = provider.getProxy().proxy;
proxy.getStats();
// Test getting the exception when the successful proxy encounters one.
try {
proxy.getStats();
} catch (IOException e) {
assertExceptionContains("Expected Exception Info", e);
}
}
|
public void increaseUsage(long value) {
if (value == 0) {
return;
}
usageLock.writeLock().lock();
try {
usage += value;
setPercentUsage(caclPercentUsage());
} finally {
usageLock.writeLock().unlock();
}
if (parent != null) {
parent.increaseUsage(value);
}
}
|
@Test
public final void testPercentUsageNeedsNoThread() {
int activeThreadCount = Thread.activeCount();
underTest.setLimit(10);
underTest.start();
underTest.increaseUsage(1);
assertEquals("usage is correct", 10, underTest.getPercentUsage());
assertEquals("no new thread created without listener or callback",activeThreadCount, Thread.activeCount());
}
|
protected static boolean isValidQueueName(String queueName) {
if (queueName != null) {
if (queueName.equals(FairSchedulerUtilities.trimQueueName(queueName)) &&
!queueName.startsWith(DOT) &&
!queueName.endsWith(DOT)) {
return true;
}
}
return false;
}
|
@Test
public void testIsValidQueueName() {
// permutations of valid/invalid names
final String valid = "valid";
final String validRooted = "root.valid";
final String rootOnly = "root";
final String startDot = ".invalid";
final String endDot = "invalid.";
final String startSpace = " invalid";
final String endSpace = "invalid ";
final String unicodeSpace = "\u00A0invalid";
assertFalse("'null' queue was not marked as invalid",
isValidQueueName(null));
assertTrue("empty queue was not tagged valid", isValidQueueName(""));
assertTrue("Simple queue name was not tagged valid (valid)",
isValidQueueName(valid));
assertTrue("Root only queue was not tagged valid (root)",
isValidQueueName(rootOnly));
assertTrue("Root prefixed queue was not tagged valid (root.valid)",
isValidQueueName(validRooted));
assertFalse("Queue starting with dot was not tagged invalid (.invalid)",
isValidQueueName(startDot));
assertFalse("Queue ending with dot was not tagged invalid (invalid.)",
isValidQueueName(endDot));
assertFalse("Queue starting with space was not tagged invalid ( invalid)",
isValidQueueName(startSpace));
assertFalse("Queue ending with space was not tagged invalid (invalid )",
isValidQueueName(endSpace));
// just one for sanity check extensive tests are in the scheduler utils
assertFalse("Queue with unicode space was not tagged as invalid (unicode)",
isValidQueueName(unicodeSpace));
}
|
public static String diffProvenance(ModelProvenance originalProvenance, ModelProvenance newProvenance) throws JsonProcessingException {
Iterator<Pair<String, Provenance>> originalIter = originalProvenance.iterator();
Iterator<Pair<String, Provenance>> newIter = newProvenance.iterator();
String report = mapper.writeValueAsString(diffProvenanceIterators(originalIter, newIter));
return report;
}
|
@Test
public void testProvDiffWithTransformTrainer() throws IOException, URISyntaxException {
//TODO: Expand this to actually assert something
CSVDataSource<Label> csvSource = getCSVDataSource();
MutableDataset<Label> datasetFromCSV = new MutableDataset<>(csvSource);
LogisticRegressionTrainer trainer = new LogisticRegressionTrainer();
LinearSGDModel model = (LinearSGDModel) trainer.train(datasetFromCSV);
TransformationMap transformations = new TransformationMap(List.of(new LinearScalingTransformation(0,1)));
TransformTrainer<Label> transformed = new TransformTrainer<>(trainer, transformations);
Model<Label> transformedModel = transformed.train(datasetFromCSV);
String report = ReproUtil.diffProvenance(model.getProvenance(), transformedModel.getProvenance());
// TODO: Evaluate report value, this requires address fact that timestamps will change so can't just
// encode the expected report as String
//System.out.println(report);
}
|
static void setTableInputInformation(
TableInput.Builder tableInputBuilder, TableMetadata metadata) {
setTableInputInformation(tableInputBuilder, metadata, null);
}
|
@Test
public void testSetTableInputInformationWithRemovedColumns() {
// Actual TableInput
TableInput.Builder actualTableInputBuilder = TableInput.builder();
Schema schema =
new Schema(
Types.NestedField.required(1, "x", Types.StringType.get(), "comment1"),
Types.NestedField.required(
2,
"y",
Types.StructType.of(Types.NestedField.required(3, "z", Types.IntegerType.get())),
"comment2"));
PartitionSpec partitionSpec =
PartitionSpec.builderFor(schema).identity("x").withSpecId(1000).build();
TableMetadata tableMetadata =
TableMetadata.newTableMetadata(schema, partitionSpec, "s3://test", tableLocationProperties);
Schema newSchema =
new Schema(Types.NestedField.required(1, "x", Types.StringType.get(), "comment1"));
tableMetadata = tableMetadata.updateSchema(newSchema, 3);
IcebergToGlueConverter.setTableInputInformation(actualTableInputBuilder, tableMetadata);
TableInput actualTableInput = actualTableInputBuilder.build();
// Expected TableInput
TableInput expectedTableInput =
TableInput.builder()
.storageDescriptor(
StorageDescriptor.builder()
.additionalLocations(Sets.newHashSet(tableLocationProperties.values()))
.location("s3://test")
.columns(
ImmutableList.of(
Column.builder()
.name("x")
.type("string")
.comment("comment1")
.parameters(
ImmutableMap.of(
IcebergToGlueConverter.ICEBERG_FIELD_ID, "1",
IcebergToGlueConverter.ICEBERG_FIELD_OPTIONAL, "false",
IcebergToGlueConverter.ICEBERG_FIELD_CURRENT, "true"))
.build(),
Column.builder()
.name("y")
.type("struct<z:int>")
.comment("comment2")
.parameters(
ImmutableMap.of(
IcebergToGlueConverter.ICEBERG_FIELD_ID, "2",
IcebergToGlueConverter.ICEBERG_FIELD_OPTIONAL, "false",
IcebergToGlueConverter.ICEBERG_FIELD_CURRENT, "false"))
.build()))
.build())
.build();
assertThat(actualTableInput.storageDescriptor().additionalLocations())
.as("additionalLocations should match")
.isEqualTo(expectedTableInput.storageDescriptor().additionalLocations());
assertThat(actualTableInput.storageDescriptor().location())
.as("Location should match")
.isEqualTo(expectedTableInput.storageDescriptor().location());
assertThat(actualTableInput.storageDescriptor().columns())
.as("Columns should match")
.isEqualTo(expectedTableInput.storageDescriptor().columns());
}
|
public Printed<K, V> withKeyValueMapper(final KeyValueMapper<? super K, ? super V, String> mapper) {
Objects.requireNonNull(mapper, "mapper can't be null");
this.mapper = mapper;
return this;
}
|
@Test
public void shouldPrintWithKeyValueMapper() throws UnsupportedEncodingException {
final Processor<String, Integer, Void, Void> processor = new PrintedInternal<>(
sysOutPrinter.withKeyValueMapper((key, value) -> String.format("%s -> %d", key, value))
).build("processor").get();
processor.process(new Record<>("hello", 1, 0L));
processor.close();
assertThat(sysOut.toString(StandardCharsets.UTF_8.name()), equalTo("[processor]: hello -> 1\n"));
}
|
@Override
public void finishSink(String dbName, String tableName, List<TSinkCommitInfo> commitInfos, String branch) {
boolean isOverwrite = false;
if (!commitInfos.isEmpty()) {
TSinkCommitInfo sinkCommitInfo = commitInfos.get(0);
if (sinkCommitInfo.isSetIs_overwrite()) {
isOverwrite = sinkCommitInfo.is_overwrite;
}
}
List<TIcebergDataFile> dataFiles = commitInfos.stream()
.map(TSinkCommitInfo::getIceberg_data_file).collect(Collectors.toList());
IcebergTable table = (IcebergTable) getTable(dbName, tableName);
org.apache.iceberg.Table nativeTbl = table.getNativeTable();
Transaction transaction = nativeTbl.newTransaction();
BatchWrite batchWrite = getBatchWrite(transaction, isOverwrite);
if (branch != null) {
batchWrite.toBranch(branch);
}
PartitionSpec partitionSpec = nativeTbl.spec();
for (TIcebergDataFile dataFile : dataFiles) {
Metrics metrics = IcebergApiConverter.buildDataFileMetrics(dataFile);
DataFiles.Builder builder =
DataFiles.builder(partitionSpec)
.withMetrics(metrics)
.withPath(dataFile.path)
.withFormat(dataFile.format)
.withRecordCount(dataFile.record_count)
.withFileSizeInBytes(dataFile.file_size_in_bytes)
.withSplitOffsets(dataFile.split_offsets);
if (partitionSpec.isPartitioned()) {
String relativePartitionLocation = getIcebergRelativePartitionPath(
nativeTbl.location(), dataFile.partition_path);
PartitionData partitionData = partitionDataFromPath(
relativePartitionLocation, partitionSpec);
builder.withPartition(partitionData);
}
batchWrite.addFile(builder.build());
}
try {
batchWrite.commit();
transaction.commitTransaction();
asyncRefreshOthersFeMetadataCache(dbName, tableName);
} catch (Exception e) {
List<String> toDeleteFiles = dataFiles.stream()
.map(TIcebergDataFile::getPath)
.collect(Collectors.toList());
icebergCatalog.deleteUncommittedDataFiles(toDeleteFiles);
LOG.error("Failed to commit iceberg transaction on {}.{}", dbName, tableName, e);
throw new StarRocksConnectorException(e.getMessage());
} finally {
icebergCatalog.invalidateCacheWithoutTable(new CachingIcebergCatalog.IcebergTableName(dbName, tableName));
}
}
|
@Test
public void testFinishSink() {
IcebergHiveCatalog icebergHiveCatalog = new IcebergHiveCatalog(CATALOG_NAME, new Configuration(), DEFAULT_CONFIG);
IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergHiveCatalog,
Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null);
IcebergTable icebergTable = new IcebergTable(1, "srTableName", CATALOG_NAME, "resource_name", "iceberg_db",
"iceberg_table", "", Lists.newArrayList(), mockedNativeTableA, Maps.newHashMap());
new Expectations(metadata) {
{
metadata.getTable(anyString, anyString);
result = icebergTable;
minTimes = 0;
}
};
TSinkCommitInfo tSinkCommitInfo = new TSinkCommitInfo();
TIcebergDataFile tIcebergDataFile = new TIcebergDataFile();
String path = mockedNativeTableA.location() + "/data/data_bucket=0/c.parquet";
String format = "parquet";
long recordCount = 10;
long fileSize = 2000;
String partitionPath = mockedNativeTableA.location() + "/data/data_bucket=0/";
List<Long> splitOffsets = Lists.newArrayList(4L);
tIcebergDataFile.setPath(path);
tIcebergDataFile.setFormat(format);
tIcebergDataFile.setRecord_count(recordCount);
tIcebergDataFile.setSplit_offsets(splitOffsets);
tIcebergDataFile.setPartition_path(partitionPath);
tIcebergDataFile.setFile_size_in_bytes(fileSize);
tSinkCommitInfo.setIs_overwrite(false);
tSinkCommitInfo.setIceberg_data_file(tIcebergDataFile);
metadata.finishSink("iceberg_db", "iceberg_table", Lists.newArrayList(tSinkCommitInfo), null);
List<FileScanTask> fileScanTasks = Lists.newArrayList(mockedNativeTableA.newScan().planFiles());
Assert.assertEquals(1, fileScanTasks.size());
FileScanTask task = fileScanTasks.get(0);
Assert.assertEquals(0, task.deletes().size());
DataFile dataFile = task.file();
Assert.assertEquals(path, dataFile.path());
Assert.assertEquals(format, dataFile.format().name().toLowerCase(Locale.ROOT));
Assert.assertEquals(1, dataFile.partition().size());
Assert.assertEquals(recordCount, dataFile.recordCount());
Assert.assertEquals(fileSize, dataFile.fileSizeInBytes());
Assert.assertEquals(4, dataFile.splitOffsets().get(0).longValue());
tSinkCommitInfo.setIs_overwrite(true);
recordCount = 22;
fileSize = 3333;
tIcebergDataFile.setRecord_count(recordCount);
tIcebergDataFile.setFile_size_in_bytes(fileSize);
Map<Integer, Long> valueCounts = new HashMap<>();
valueCounts.put(1, 111L);
TIcebergColumnStats columnStats = new TIcebergColumnStats();
columnStats.setColumn_sizes(new HashMap<>());
columnStats.setValue_counts(valueCounts);
columnStats.setNull_value_counts(new HashMap<>());
columnStats.setLower_bounds(new HashMap<>());
columnStats.setUpper_bounds(new HashMap<>());
tIcebergDataFile.setColumn_stats(columnStats);
tSinkCommitInfo.setIceberg_data_file(tIcebergDataFile);
metadata.finishSink("iceberg_db", "iceberg_table", Lists.newArrayList(tSinkCommitInfo), null);
mockedNativeTableA.refresh();
TableScan scan = mockedNativeTableA.newScan().includeColumnStats();
fileScanTasks = Lists.newArrayList(scan.planFiles());
Assert.assertEquals(1, fileScanTasks.size());
task = fileScanTasks.get(0);
Assert.assertEquals(0, task.deletes().size());
dataFile = task.file();
Assert.assertEquals(path, dataFile.path());
Assert.assertEquals(format, dataFile.format().name().toLowerCase(Locale.ROOT));
Assert.assertEquals(1, dataFile.partition().size());
Assert.assertEquals(recordCount, dataFile.recordCount());
Assert.assertEquals(fileSize, dataFile.fileSizeInBytes());
Assert.assertEquals(4, dataFile.splitOffsets().get(0).longValue());
Assert.assertEquals(111L, dataFile.valueCounts().get(1).longValue());
}
|
public static ByteBuf copyInt(int value) {
ByteBuf buf = buffer(4);
buf.writeInt(value);
return buf;
}
|
@Test
public void testWrapInt() {
ByteBuf buffer = copyInt(1, 4);
assertEquals(8, buffer.capacity());
assertEquals(1, buffer.readInt());
assertEquals(4, buffer.readInt());
assertFalse(buffer.isReadable());
buffer.release();
buffer = copyInt(null);
assertEquals(0, buffer.capacity());
buffer.release();
buffer = copyInt(new int[] {});
assertEquals(0, buffer.capacity());
buffer.release();
}
|
@Override
public void copyTo(byte[] dest, int destPos) {
if (totalSize() > 0) {
System.arraycopy(payload, 0, dest, destPos, payload.length);
}
}
|
@Test
public void copyTo() {
byte[] inputBytes = "12345678890".getBytes();
HeapData heap = new HeapData(inputBytes);
byte[] bytes = new byte[inputBytes.length];
heap.copyTo(bytes, 0);
assertEquals(new String(inputBytes), new String(bytes));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.