focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static <T extends Collection<E>, E> T filter(T collection, final Filter<E> filter) {
return IterUtil.filter(collection, filter);
}
|
@Test
public void filterTest() {
final ArrayList<String> list = CollUtil.newArrayList("a", "b", "c");
final Collection<String> filtered = CollUtil.edit(list, t -> t + 1);
assertEquals(CollUtil.newArrayList("a1", "b1", "c1"), filtered);
}
|
public Collection<PluginUpdateAggregate> aggregate(@Nullable Collection<PluginUpdate> pluginUpdates) {
if (pluginUpdates == null || pluginUpdates.isEmpty()) {
return Collections.emptyList();
}
Map<Plugin, PluginUpdateAggregateBuilder> builders = new HashMap<>();
for (PluginUpdate pluginUpdate : pluginUpdates) {
Plugin plugin = pluginUpdate.getPlugin();
PluginUpdateAggregateBuilder builder = builders.get(plugin);
if (builder == null) {
builder = PluginUpdateAggregateBuilder.builderFor(plugin);
builders.put(plugin, builder);
}
builder.add(pluginUpdate);
}
return Lists.newArrayList(transform(builders.values(), PluginUpdateAggregateBuilder::build));
}
|
@Test
public void aggregate_put_pluginUpdates_with_same_plugin_in_the_same_PluginUpdateAggregate() {
PluginUpdate pluginUpdate1 = createPluginUpdate("key1");
PluginUpdate pluginUpdate2 = createPluginUpdate("key1");
PluginUpdate pluginUpdate3 = createPluginUpdate("key1");
Collection<PluginUpdateAggregator.PluginUpdateAggregate> aggregates = underTest.aggregate(ImmutableList.of(
pluginUpdate1,
pluginUpdate2,
pluginUpdate3));
assertThat(aggregates).hasSize(1);
Collection<PluginUpdate> releases = aggregates.iterator().next().getUpdates();
assertThat(releases)
.hasSize(3)
.contains(pluginUpdate1)
.contains(pluginUpdate2)
.contains(pluginUpdate3);
}
|
@Override
public String getColumnLabel(final int column) {
Preconditions.checkArgument(1 == column);
return generatedKeyColumn;
}
|
@Test
void assertGetColumnLabel() throws SQLException {
assertThat(actualMetaData.getColumnLabel(1), is("order_id"));
}
|
@Override
@CheckForNull
public String message(Locale locale, String key, @Nullable String defaultValue, Object... parameters) {
String bundleKey = propertyToBundles.get(key);
String value = null;
if (bundleKey != null) {
try {
ResourceBundle resourceBundle = ResourceBundle.getBundle(bundleKey, locale, classloader, control);
value = resourceBundle.getString(key);
} catch (MissingResourceException e1) {
// ignore
}
}
if (value == null) {
value = defaultValue;
}
return formatMessage(value, parameters);
}
|
@Test
public void load_core_bundle() {
assertThat(underTest.message(Locale.ENGLISH, "any", null)).isEqualTo("Any");
}
|
List<Condition> run(boolean useKRaft) {
List<Condition> warnings = new ArrayList<>();
checkKafkaReplicationConfig(warnings);
checkKafkaBrokersStorage(warnings);
if (useKRaft) {
// Additional checks done for KRaft clusters
checkKRaftControllerStorage(warnings);
checkKRaftControllerCount(warnings);
checkKafkaMetadataVersion(warnings);
checkInterBrokerProtocolVersionInKRaft(warnings);
checkLogMessageFormatVersionInKRaft(warnings);
} else {
// Additional checks done for ZooKeeper-based clusters
checkKafkaLogMessageFormatVersion(warnings);
checkKafkaInterBrokerProtocolVersion(warnings);
checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings);
}
return warnings;
}
|
@Test
public void testKRaftWithEvenNumberOfControllers() {
KafkaNodePool controllers = new KafkaNodePoolBuilder(CONTROLLERS)
.editSpec()
.withReplicas(4)
.endSpec()
.build();
KafkaSpecChecker checker = generateChecker(KAFKA, List.of(controllers, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE);
List<Condition> warnings = checker.run(true);
assertThat(warnings, hasSize(1));
assertThat(warnings.get(0).getReason(), is("KafkaKRaftControllerNodeCount"));
assertThat(warnings.get(0).getMessage(), is("Running KRaft controller quorum with an odd number of nodes is recommended."));
}
|
public ExecutorServiceBuilder executorService(String nameFormat) {
return new ExecutorServiceBuilder(this, nameFormat);
}
|
@Test
void executorServiceThreadFactory() {
final String expectedName = "DropWizard ThreadFactory Test";
final String expectedNamePattern = expectedName + "-%d";
final ThreadFactory tfactory = buildThreadFactory(expectedNamePattern);
final ExecutorService executorService = environment.executorService("Dropwizard Service", tfactory).build();
assertThat(executorService.submit(() -> Thread.currentThread().getName()))
.succeedsWithin(1, TimeUnit.SECONDS, as(STRING))
.startsWith(expectedName);
}
|
@Override
public void updateUserPassword(Long id, UserProfileUpdatePasswordReqVO reqVO) {
// 校验旧密码密码
validateOldPassword(id, reqVO.getOldPassword());
// 执行更新
AdminUserDO updateObj = new AdminUserDO().setId(id);
updateObj.setPassword(encodePassword(reqVO.getNewPassword())); // 加密密码
userMapper.updateById(updateObj);
}
|
@Test
public void testUpdateUserPassword02_success() {
// mock 数据
AdminUserDO dbUser = randomAdminUserDO();
userMapper.insert(dbUser);
// 准备参数
Long userId = dbUser.getId();
String password = "yudao";
// mock 方法
when(passwordEncoder.encode(anyString())).then(
(Answer<String>) invocationOnMock -> "encode:" + invocationOnMock.getArgument(0));
// 调用
userService.updateUserPassword(userId, password);
// 断言
AdminUserDO user = userMapper.selectById(userId);
assertEquals("encode:" + password, user.getPassword());
}
|
public static ClusterOperatorConfig buildFromMap(Map<String, String> map) {
warningsForRemovedEndVars(map);
KafkaVersion.Lookup lookup = parseKafkaVersions(map.get(STRIMZI_KAFKA_IMAGES), map.get(STRIMZI_KAFKA_CONNECT_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES));
return buildFromMap(map, lookup);
}
|
@Test
public void testConfigParsingWithMissingEnvVar() {
Map<String, String> envVars = new HashMap<>(5);
envVars.put(ClusterOperatorConfig.STRIMZI_KAFKA_IMAGES, KafkaVersionTestUtils.getKafkaImagesEnvVarString());
envVars.put(ClusterOperatorConfig.STRIMZI_KAFKA_CONNECT_IMAGES, KafkaVersionTestUtils.getKafkaConnectImagesEnvVarString());
envVars.put(ClusterOperatorConfig.STRIMZI_KAFKA_MIRROR_MAKER_IMAGES, KafkaVersionTestUtils.getKafkaMirrorMakerImagesEnvVarString());
envVars.put(ClusterOperatorConfig.STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES, KafkaVersionTestUtils.getKafkaMirrorMaker2ImagesEnvVarString());
for (Map.Entry<String, String> envVar : envVars.entrySet()) {
Map<String, String> editedEnvVars = new HashMap<>(envVars);
editedEnvVars.remove(envVar.getKey());
InvalidConfigurationException e = assertThrows(InvalidConfigurationException.class, () -> ClusterOperatorConfig.buildFromMap(editedEnvVars));
assertThat(e.getMessage(), containsString(envVar.getKey()));
}
}
|
static void invalidateAllSessions() {
invalidateAllSessionsExceptCurrentSession(null);
}
|
@Test
public void testInvalidateAllSessions() {
final SessionTestImpl session = new SessionTestImpl(true);
sessionListener.sessionCreated(new HttpSessionEvent(session));
SessionListener.invalidateAllSessions();
if (!session.isInvalidated()) {
fail("invalidateAllSessions");
}
}
|
@Override
public void onEvent(Event event) {
if (event instanceof MetadataEvent.InstanceMetadataEvent) {
handleInstanceMetadataEvent((MetadataEvent.InstanceMetadataEvent) event);
} else if (event instanceof MetadataEvent.ServiceMetadataEvent) {
handleServiceMetadataEvent((MetadataEvent.ServiceMetadataEvent) event);
} else {
handleClientDisconnectEvent((ClientEvent.ClientDisconnectEvent) event);
}
}
|
@Test
void testOnEvent() {
Mockito.when(instanceMetadataEvent.getService()).thenReturn(service);
Mockito.when(instanceMetadataEvent.getMetadataId()).thenReturn(METADATA_ID);
Mockito.when(serviceMetadataEvent.getService()).thenReturn(service);
Mockito.when(clientDisconnectEvent.getClient()).thenReturn(client);
namingMetadataManager.onEvent(instanceMetadataEvent);
Mockito.verify(instanceMetadataEvent, Mockito.times(2)).getMetadataId();
Mockito.verify(instanceMetadataEvent, Mockito.times(2)).getService();
namingMetadataManager.onEvent(serviceMetadataEvent);
Mockito.verify(serviceMetadataEvent).getService();
namingMetadataManager.onEvent(clientDisconnectEvent);
Mockito.verify(clientDisconnectEvent).getClient();
}
|
public static @Nullable CastRule<?, ?> resolve(LogicalType inputType, LogicalType targetType) {
return INSTANCE.internalResolve(inputType, targetType);
}
|
@Test
void testResolveIntToBigIntWithDistinct() {
assertThat(CastRuleProvider.resolve(INT, DISTINCT_BIG_INT))
.isSameAs(NumericPrimitiveCastRule.INSTANCE);
}
|
public static NullNode getInstance() {
return instance;
}
|
@Test
void testGetInstance() {
final var instance = NullNode.getInstance();
assertNotNull(instance);
assertSame(instance, NullNode.getInstance());
}
|
public static Optional<String> urlEncode(String raw) {
try {
return Optional.of(URLEncoder.encode(raw, UTF_8.toString()));
} catch (UnsupportedEncodingException e) {
return Optional.empty();
}
}
|
@Test
public void urlEncode_whenAlreadyEncoded_encodesAgain() {
assertThat(urlEncode("%2F")).hasValue("%252F");
assertThat(urlEncode("%252F")).hasValue("%25252F");
}
|
@Override
public void post(SpanAdapter span, Exchange exchange, Endpoint endpoint) {
if (exchange.isFailed()) {
span.setError(true);
if (exchange.getException() != null) {
Map<String, String> logEvent = new HashMap<>();
logEvent.put("event", "error");
logEvent.put("error.kind", "Exception");
logEvent.put("message", exchange.getException().getMessage());
span.log(logEvent);
}
}
}
|
@Test
public void testPostExchangeFailed() {
Exchange exchange = Mockito.mock(Exchange.class);
Mockito.when(exchange.isFailed()).thenReturn(true);
Exception e = new Exception("Test Message");
Mockito.when(exchange.getException()).thenReturn(e);
SpanDecorator decorator = new AbstractSpanDecorator() {
@Override
public String getComponent() {
return null;
}
@Override
public String getComponentClassName() {
return null;
}
};
MockSpanAdapter span = new MockSpanAdapter();
decorator.post(span, exchange, null);
assertEquals(true, span.tags().get(TagConstants.ERROR));
assertEquals(1, span.logEntries().size());
assertEquals("error", span.logEntries().get(0).fields().get("event"));
assertEquals("Exception", span.logEntries().get(0).fields().get("error.kind"));
assertEquals(e.getMessage(), span.logEntries().get(0).fields().get("message"));
}
|
public void encode(DataSchema schema) throws IOException
{
encode(schema, true);
}
|
@Test
public void testEncodeWithPreserve() throws IOException {
SchemaParser parser = new SchemaParser();
String commonSchemaJson =
"{ \"type\": \"record\", \"name\": \"ReferencedFieldType\", \"namespace\": \"com.linkedin.common\", \"fields\" : []}";
parser.parse(commonSchemaJson);
String originalSchemaJsonOne = "{ " + " \"type\": \"record\"," + " \"name\": \"OriginalOne\","
+ " \"namespace\": \"com.linkedin.test.data\","
+ " \"include\": [ \"com.linkedin.common.ReferencedFieldType\" ]," + " \"fields\" : ["
+ " {\"name\": \"inlineFieldType\", \"type\": { \"type\": \"record\", \"name\": \"InlineOne\", \"fields\": [] }},"
+ " {\"name\": \"referencedFieldType\", \"type\": \"com.linkedin.common.ReferencedFieldType\" },"
+ " {\"name\": \"referencedTyperefType\", \"type\": { \"type\": \"typeref\", \"name\": \"ReferencedTyperef\", \"ref\": \"com.linkedin.common.ReferencedFieldType\" }}"
+ " ]" + "}";
parser.parse(originalSchemaJsonOne);
String originalSchemaJsonTwo = "{ " + " \"type\": \"record\"," + " \"name\": \"OriginalTwo\","
+ " \"namespace\": \"com.linkedin.test.data\","
+ " \"include\": [ \"com.linkedin.common.ReferencedFieldType\" ]," + " \"fields\" : ["
+ " {\"name\": \"inlineFieldType\", \"type\": { \"type\": \"record\", \"name\": \"InlineTwo\", \"fields\": [] }},"
+ " {\"name\": \"referencedFieldType\", \"type\": \"com.linkedin.common.ReferencedFieldType\" },"
+ " {\"name\": \"referencedTyperefType\", \"type\": { \"type\": \"typeref\", \"name\": \"ReferencedTyperef\", \"ref\": \"com.linkedin.common.ReferencedFieldType\" }}"
+ " ]" + "}";
parser.parse(originalSchemaJsonTwo);
JsonBuilder originalBuilder = new JsonBuilder(JsonBuilder.Pretty.INDENTED);
SchemaToJsonEncoder originalEncoder = new SchemaToJsonEncoder(originalBuilder);
originalEncoder.setTypeReferenceFormat(SchemaToJsonEncoder.TypeReferenceFormat.PRESERVE);
for (DataSchema schema : parser.topLevelDataSchemas()) {
originalEncoder.encode(schema);
}
String expected = String.join("\n", commonSchemaJson, originalSchemaJsonOne, originalSchemaJsonTwo);
assertEqualsIgnoringSpacing(originalBuilder.result(), expected);
}
|
public Class<T> getType() {
return type;
}
|
@Test
public void testGetType() {
assertEquals(ClusterState.class, clusterStateChange.getType());
assertEquals(ClusterState.class, clusterStateChangeSameAttributes.getType());
assertEquals(Version.class, clusterStateChangeOtherType.getType());
assertEquals(ClusterState.class, clusterStateChangeOtherNewState.getType());
}
|
public AuthenticationResponse authenticateUser(String pluginId, final String username, final String password, List<SecurityAuthConfig> authConfigs, List<PluginRoleConfig> pluginRoleConfigs) {
errorOutIfEmpty(authConfigs, pluginId);
return pluginRequestHelper.submitRequest(pluginId, REQUEST_AUTHENTICATE_USER, new DefaultPluginInteractionCallback<>() {
@Override
public String requestBody(String resolvedExtensionVersion) {
return getMessageConverter(resolvedExtensionVersion).authenticateUserRequestBody(username, password, authConfigs, pluginRoleConfigs);
}
@Override
public AuthenticationResponse onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) {
return getMessageConverter(resolvedExtensionVersion).getAuthenticatedUserFromResponseBody(responseBody);
}
});
}
|
@Test
void authenticateUser_shouldErrorOutInAbsenceOfSecurityAuthConfigs() {
Executable codeThatShouldThrowError = () -> authorizationExtension.authenticateUser(PLUGIN_ID, "bob", "secret", null, null);
MissingAuthConfigsException exception = assertThrows(MissingAuthConfigsException.class, codeThatShouldThrowError);
assertThat(exception.getMessage()).isEqualTo("No AuthConfigs configured for plugin: plugin-id, Plugin would need at-least one auth_config to authenticate user.");
verifyNoMoreInteractions(pluginManager);
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName("string") String string) {
if ( string == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "string", "cannot be null"));
} else {
return FEELFnResult.ofResult(NumberEvalHelper.getBigDecimalOrNull(string.codePointCount(0, string.length())));
}
}
|
@Test
void invokeEmptyString() {
FunctionTestUtil.assertResult(stringLengthFunction.invoke(""), BigDecimal.ZERO);
}
|
@Override
public boolean isHidden() throws FileSystemException {
return resolvedFileObject.isHidden();
}
|
@Test
public void testDelegatesIsHidden() throws FileSystemException {
when( resolvedFileObject.isHidden() ).thenReturn( true );
assertTrue( fileObject.isHidden() );
when( resolvedFileObject.isHidden() ).thenReturn( false );
assertFalse( fileObject.isHidden() );
verify( resolvedFileObject, times( 2 ) ).isHidden();
}
|
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public HistoryInfo get() {
return getHistoryInfo();
}
|
@Test
public void testInfo() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("info").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyHSInfo(json.getJSONObject("historyInfo"), appContext);
}
|
public ProjectCleaner purge(DbSession session, String rootUuid, String projectUuid, Configuration projectConfig, Set<String> disabledComponentUuids) {
long start = System.currentTimeMillis();
profiler.reset();
periodCleaner.clean(session, rootUuid, projectConfig);
PurgeConfiguration configuration = newDefaultPurgeConfiguration(projectConfig, rootUuid, projectUuid, disabledComponentUuids);
purgeDao.purge(session, configuration, purgeListener, profiler);
session.commit();
logProfiling(start, projectConfig);
return this;
}
|
@Test
public void profiling_when_property_is_true() {
settings.setProperty(CoreProperties.PROFILING_LOG_PROPERTY, true);
when(profiler.getProfilingResult(anyLong())).thenReturn(List.of(DUMMY_PROFILE_CONTENT));
underTest.purge(mock(DbSession.class), "root", "project", settings.asConfig(), emptySet());
verify(profiler).getProfilingResult(anyLong());
assertThat(logTester.getLogs(Level.INFO).stream()
.map(LogAndArguments::getFormattedMsg)
.collect(Collectors.joining()))
.contains("Profiling for purge")
.contains(DUMMY_PROFILE_CONTENT);
}
|
@Override
public void pre(SpanAdapter span, Exchange exchange, Endpoint endpoint) {
super.pre(span, exchange, endpoint);
String httpUrl = getHttpURL(exchange, endpoint);
if (httpUrl != null) {
span.setTag(TagConstants.HTTP_URL, httpUrl);
}
span.setTag(TagConstants.HTTP_METHOD, getHttpMethod(exchange, endpoint));
}
|
@Test
public void testPreUri() {
Endpoint endpoint = Mockito.mock(Endpoint.class);
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Mockito.when(endpoint.getEndpointUri()).thenReturn(TEST_URI);
Mockito.when(exchange.getIn()).thenReturn(message);
Mockito.when(message.getHeader(Exchange.HTTP_URI, String.class)).thenReturn(TEST_URI);
SpanDecorator decorator = new AbstractHttpSpanDecorator() {
@Override
public String getComponent() {
return null;
}
@Override
public String getComponentClassName() {
return null;
}
};
MockSpanAdapter span = new MockSpanAdapter();
decorator.pre(span, exchange, endpoint);
assertEquals(TEST_URI, span.tags().get(TagConstants.HTTP_URL));
assertTrue(span.tags().containsKey(TagConstants.HTTP_METHOD));
}
|
public boolean execute(final File clusterDir)
{
if (!clusterDir.exists() || !clusterDir.isDirectory())
{
throw new IllegalArgumentException("invalid cluster directory: " + clusterDir.getAbsolutePath());
}
final RecordingLog.Entry entry = ClusterTool.findLatestValidSnapshot(clusterDir);
if (null == entry)
{
throw new ClusterException("no valid snapshot found");
}
final long recordingId = entry.recordingId;
final ClusterNodeControlProperties properties = ClusterTool.loadControlProperties(clusterDir);
final RecordingSignalCapture recordingSignalCapture = new RecordingSignalCapture();
try (Aeron aeron = Aeron.connect(new Aeron.Context().aeronDirectoryName(properties.aeronDirectoryName));
AeronArchive archive = AeronArchive.connect(new AeronArchive.Context()
.controlRequestChannel(archiveLocalRequestChannel)
.controlRequestStreamId(archiveLocalRequestStreamId)
.controlResponseChannel(IPC_CHANNEL)
.recordingSignalConsumer(recordingSignalCapture)
.aeron(aeron)))
{
final SnapshotReader snapshotReader = new SnapshotReader();
replayLocalSnapshotRecording(aeron, archive, recordingId, snapshotReader);
final long targetNextServiceSessionId = max(
max(snapshotReader.nextServiceSessionId, snapshotReader.maxClusterSessionId + 1),
snapshotReader.logServiceSessionId + 1 + snapshotReader.pendingServiceMessageCount);
final long targetLogServiceSessionId =
targetNextServiceSessionId - 1 - snapshotReader.pendingServiceMessageCount;
if (targetNextServiceSessionId != snapshotReader.nextServiceSessionId ||
targetLogServiceSessionId != snapshotReader.logServiceSessionId ||
0 != snapshotReader.pendingServiceMessageCount &&
(targetLogServiceSessionId + 1 != snapshotReader.minClusterSessionId ||
targetNextServiceSessionId - 1 != snapshotReader.maxClusterSessionId))
{
final long tempRecordingId = createNewSnapshotRecording(
aeron, archive, recordingId, targetLogServiceSessionId, targetNextServiceSessionId);
final long stopPosition = awaitRecordingStopPosition(archive, recordingId);
final long newStopPosition = awaitRecordingStopPosition(archive, tempRecordingId);
if (stopPosition != newStopPosition)
{
throw new ClusterException("new snapshot recording incomplete: expectedStopPosition=" +
stopPosition + ", actualStopPosition=" + newStopPosition);
}
recordingSignalCapture.reset();
archive.truncateRecording(recordingId, 0);
recordingSignalCapture.awaitSignalForRecordingId(archive, recordingId, RecordingSignal.DELETE);
final long replicationId = archive.replicate(
tempRecordingId, recordingId, archive.context().controlRequestStreamId(), IPC_CHANNEL, null);
recordingSignalCapture.reset();
recordingSignalCapture.awaitSignalForCorrelationId(archive, replicationId, RecordingSignal.SYNC);
final long replicatedStopPosition = recordingSignalCapture.position();
if (stopPosition != replicatedStopPosition)
{
throw new ClusterException("incomplete replication of the new recording: expectedStopPosition=" +
stopPosition + ", replicatedStopPosition=" + replicatedStopPosition);
}
recordingSignalCapture.reset();
archive.purgeRecording(tempRecordingId);
recordingSignalCapture.awaitSignalForRecordingId(archive, tempRecordingId, RecordingSignal.DELETE);
return true;
}
}
return false;
}
|
@Test
void executeThrowsClusterExceptionIfClusterDirDoesNotContainARecordingLog(final @TempDir File tempDir)
{
final File clusterDir = new File(tempDir, "cluster-dir");
assertTrue(clusterDir.mkdir());
final ClusterException exception = assertThrowsExactly(
ClusterException.class,
() -> new ConsensusModuleSnapshotPendingServiceMessagesPatch().execute(clusterDir));
assertInstanceOf(IOException.class, exception.getCause());
}
|
@Override
public void blame(BlameInput input, BlameOutput output) {
File basedir = input.fileSystem().baseDir();
try (Repository repo = JGitUtils.buildRepository(basedir.toPath())) {
File gitBaseDir = repo.getWorkTree();
if (cloneIsInvalid(gitBaseDir)) {
return;
}
Profiler profiler = Profiler.create(LOG);
profiler.startDebug("Collecting committed files");
Map<String, InputFile> inputFileByGitRelativePath = getCommittedFilesToBlame(repo, gitBaseDir, input);
profiler.stopDebug();
BlameAlgorithmEnum blameAlgorithmEnum = this.blameStrategy.getBlameAlgorithm(Runtime.getRuntime().availableProcessors(), inputFileByGitRelativePath.size());
LOG.debug("Using {} strategy to blame files", blameAlgorithmEnum);
if (blameAlgorithmEnum == GIT_FILES_BLAME) {
blameWithFilesGitCommand(output, repo, inputFileByGitRelativePath);
} else {
blameWithNativeGitCommand(output, repo, inputFileByGitRelativePath, gitBaseDir);
}
}
}
|
@Test
@UseDataProvider("blameAlgorithms")
public void return_early_when_shallow_clone_detected(BlameAlgorithmEnum strategy) throws IOException {
CompositeBlameCommand blameCommand = new CompositeBlameCommand(analysisWarnings, pathResolver, jGitBlameCommand, nativeGitBlameCommand, (p, f) -> strategy);
File projectDir = createNewTempFolder();
javaUnzip("shallow-git.zip", projectDir);
File baseDir = new File(projectDir, "shallow-git");
setUpBlameInputWithFile(baseDir.toPath());
// register warning with default wrapper
BlameCommand.BlameOutput output = mock(BlameCommand.BlameOutput.class);
blameCommand.blame(input, output);
verify(analysisWarnings).addUnique(startsWith("Shallow clone detected"));
}
|
@Udf(description = "Converts a number of days since epoch to a DATE value.")
public Date fromDays(final int days) {
return new Date(TimeUnit.DAYS.toMillis(days));
}
|
@Test
public void shouldConvertToTimestamp() {
assertThat(udf.fromDays(50), is(new Date(4320000000L)));
assertThat(udf.fromDays(-50), is(new Date(-4320000000L)));
}
|
public static void info(Logger logger, String format, Object... arguments) {
if (!isEligible(format))
return;
logger.info(format, arguments);
}
|
@Test
public void testLogger1() throws Exception {
OneTimeLogger.info(log, "Format: {}; Pew: {};", 1, 2);
}
|
@Override
public Path find() throws BackgroundException {
return this.find(Context.files);
}
|
@Test
public void testFindWithDefaultPath() throws Exception {
final Host bookmark = new Host(new NextcloudProtocol(), new Credentials("u"));
final NextcloudHomeFeature feature = new NextcloudHomeFeature(bookmark);
for(String s : variants("remote.php/webdav")) {
bookmark.setDefaultPath(s);
assertEquals(new Path("/ocs/v1.php", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.ocs));
assertEquals(new Path("/remote.php/dav/files/u", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.files));
}
for(String s : variants("remote.php/webdav/d")) {
bookmark.setDefaultPath(s);
assertEquals(new Path("/ocs/v1.php", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.ocs));
assertEquals(new Path("/remote.php/dav/files/u/d", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.files));
assertEquals(new Path("/remote.php/dav/meta", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.meta));
assertEquals(new Path("/remote.php/dav/versions/u", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.versions));
}
for(String s : variants("remote.php/dav/files/u")) {
bookmark.setDefaultPath(s);
assertEquals(new Path("/ocs/v1.php", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.ocs));
assertEquals(new Path("/remote.php/dav/files/u/", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.files));
assertEquals(new Path("/remote.php/dav/meta", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.meta));
assertEquals(new Path("/remote.php/dav/versions/u", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.versions));
}
for(String s : variants("remote.php/dav/files/u/d")) {
bookmark.setDefaultPath(s);
assertEquals(new Path("/ocs/v1.php", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.ocs));
assertEquals(new Path("/remote.php/dav/files/u/d", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.files));
assertEquals(new Path("/remote.php/dav/meta", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.meta));
assertEquals(new Path("/remote.php/dav/versions/u", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.versions));
}
for(String s : variants("d")) {
bookmark.setDefaultPath(s);
assertEquals(new Path("/ocs/v1.php", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.ocs));
assertEquals(new Path("/remote.php/dav/files/u/d", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.files));
}
for(String s : variants("w/remote.php/webdav")) {
bookmark.setDefaultPath(s);
assertEquals(new Path("/w/ocs/v1.php", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.ocs));
assertEquals(new Path("/w/remote.php/dav/files/u", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.files));
}
for(String s : variants("w/remote.php/webdav/d")) {
bookmark.setDefaultPath(s);
assertEquals(new Path("/w/ocs/v1.php", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.ocs));
assertEquals(new Path("/w/remote.php/dav/files/u/d", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.files));
}
for(String s : variants("w/remote.php/dav/files/u/d")) {
bookmark.setDefaultPath(s);
assertEquals(new Path("/w/ocs/v1.php", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.ocs));
assertEquals(new Path("/w/remote.php/dav/files/u/d", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.files));
}
}
|
@Override
public long getSplitBacklogBytes() {
return backlogReader.computeMessageStats(fetchOffset).getMessageBytes();
}
|
@Test
public void getSplitBacklogBytes() throws Exception {
startSubscriber();
advancePastMessage(2);
doReturn(ComputeMessageStatsResponse.newBuilder().setMessageBytes(42).build())
.when(backlogReader)
.computeMessageStats(Offset.of(3));
assertEquals(42, reader.getSplitBacklogBytes());
}
|
@Override
public OID getPrivacyProtocol() {
return this.privacyProtocol;
}
|
@Test
public void testGetPrivacyProtocol() {
assertEquals(PrivAES128.ID, v3SnmpConfiguration.getPrivacyProtocol());
}
|
public static Builder custom() {
return new Builder();
}
|
@Test(expected = IllegalArgumentException.class)
public void slowCallRateThresholdAboveHundredShouldFail() {
custom().slowCallRateThreshold(101).build();
}
|
public void addValue(long value)
{
sum.addAndGet(value);
count.incrementAndGet();
max.accumulateAndGet(value, Math::max);
min.accumulateAndGet(value, Math::min);
}
|
@Test
public void testJsonWhenUnitIsUnavailable()
{
RuntimeMetric metric1 = new RuntimeMetric(TEST_METRIC_NAME, NONE);
metric1.addValue(101);
metric1.addValue(202);
RuntimeMetric metric2 = new RuntimeMetric(TEST_METRIC_NAME, null);
metric2.addValue(202);
metric2.addValue(101);
String json = "{\"name\" : \"test_metric\", \"sum\" : 303, \"count\" : 2, \"max\" : 202, \"min\" : 101}";
RuntimeMetric actual = JsonCodec.jsonCodec(RuntimeMetric.class).fromJson(json);
assertRuntimeMetricEquals(actual, metric1);
assertRuntimeMetricEquals(actual, metric2);
}
|
protected String[] getTwoPhaseArgs(Method method, Class<?>[] argsClasses) {
Annotation[][] parameterAnnotations = method.getParameterAnnotations();
String[] keys = new String[parameterAnnotations.length];
/*
* get parameter's key
* if method's parameter list is like
* (BusinessActionContext, @BusinessActionContextParameter("a") A a, @BusinessActionContextParameter("b") B b)
* the keys will be [null, a, b]
*/
for (int i = 0; i < parameterAnnotations.length; i++) {
for (int j = 0; j < parameterAnnotations[i].length; j++) {
if (parameterAnnotations[i][j] instanceof BusinessActionContextParameter) {
BusinessActionContextParameter param = (BusinessActionContextParameter) parameterAnnotations[i][j];
String key = ActionContextUtil.getParamNameFromAnnotation(param);
keys[i] = key;
break;
}
}
if (keys[i] == null && !(argsClasses[i].equals(BusinessActionContext.class))) {
throw new IllegalArgumentException("non-BusinessActionContext parameter should use annotation " +
"BusinessActionContextParameter");
}
}
return keys;
}
|
@Test
public void testGetTwoPhaseArgs() throws Exception {
Class<?>[] argsCommitClasses = new Class[]{BusinessActionContext.class, TccParam.class, Integer.class};
Method commitMethod = TccAction.class.getMethod("commitWithArg", argsCommitClasses);
Assertions.assertThrows(IllegalArgumentException.class, () -> {
tccRegisterResourceParser.getTwoPhaseArgs(commitMethod, argsCommitClasses);
});
Class<?>[] argsRollbackClasses = new Class[]{BusinessActionContext.class, TccParam.class};
Method rollbackMethod = TccAction.class.getMethod("rollbackWithArg", argsRollbackClasses);
String[] keys = tccRegisterResourceParser.getTwoPhaseArgs(rollbackMethod, argsRollbackClasses);
Assertions.assertNull(keys[0]);
Assertions.assertEquals("tccParam", keys[1]);
}
|
@Override
public Map<String, Double> getValue() {
if (!lifetimeBaseline.isSet()) { return Map.of(); }
if (!retentionWindows.isSet()) { return Map.of(); }
final Optional<FlowCapture> possibleCapture = doCapture();
if (possibleCapture.isEmpty()) { return Map.of(); }
final FlowCapture currentCapture = possibleCapture.get();
final Map<String, Double> rates = new LinkedHashMap<>();
this.retentionWindows.get()
.forEach(window -> window.baseline(currentCapture.nanoTime())
.or(() -> windowDefaultBaseline(window))
.map((baseline) -> calculateRate(currentCapture, baseline))
.orElseGet(OptionalDouble::empty)
.ifPresent((rate) -> rates.put(window.policy.policyName(), rate)));
injectLifetime(currentCapture, rates);
return Collections.unmodifiableMap(rates);
}
|
@Test
public void testFunctionalityWithinSecondsOfInitialization() {
final ManualAdvanceClock clock = new ManualAdvanceClock(Instant.now());
final LongCounter numeratorMetric = new LongCounter(MetricKeys.EVENTS_KEY.asJavaString());
final Metric<Number> denominatorMetric = new UptimeMetric("uptime", clock::nanoTime).withUnitsPrecise(UptimeMetric.ScaleUnits.SECONDS);
final ExtendedFlowMetric flowMetric = new ExtendedFlowMetric(clock::nanoTime, "flow", numeratorMetric, denominatorMetric);
assertThat(flowMetric.getValue(), is(anEmptyMap()));
clock.advance(Duration.ofSeconds(1));
numeratorMetric.increment(17);
// clock has advanced 1s, but we have performed no explicit captures.
final Map<String, Double> flowMetricValue = flowMetric.getValue();
assertThat(flowMetricValue, is(not(anEmptyMap())));
assertThat(flowMetricValue, hasEntry("current", 17.0));
assertThat(flowMetricValue, hasEntry("lifetime", 17.0));
}
|
public static long convertBytesToLong(byte[] bytes) {
byte[] paddedBytes = paddingTo8Byte(bytes);
long temp = 0L;
for (int i = 7; i >= 0; i--) {
temp = temp | (((long) paddedBytes[i] & 0xff) << (7 - i) * 8);
}
return temp;
}
|
@Test
public void testConvertBytesToLongWithPadding() {
byte[] bytes = new byte[2];
bytes[0] = 2;
bytes[1] = 127;
assertEquals(BinaryUtil.convertBytesToLong(bytes), 2 * 256 + 127);
}
|
@Override
public double variance() {
return r * (1 - p) / (p * p);
}
|
@Test
public void testVariance() {
System.out.println("variance");
NegativeBinomialDistribution instance = new NegativeBinomialDistribution(3, 0.3);
instance.rand();
assertEquals(7/0.3, instance.variance(), 1E-7);
}
|
public String buildSql(List<HiveColumnHandle> columns, TupleDomain<HiveColumnHandle> tupleDomain)
{
// SELECT clause
StringBuilder sql = new StringBuilder("SELECT ");
if (columns.isEmpty()) {
sql.append("' '");
}
else {
String columnNames = columns.stream()
.map(this::getFullyQualifiedColumnName)
.collect(joining(", "));
sql.append(columnNames);
}
// FROM clause
sql.append(" FROM ");
sql.append(DATA_SOURCE);
// WHERE clause
List<String> clauses = toConjuncts(columns, tupleDomain);
if (!clauses.isEmpty()) {
sql.append(" WHERE ")
.append(Joiner.on(" AND ").join(clauses));
}
return sql.toString();
}
|
@Test
public void testEmptyColumns()
{
// CSV
IonSqlQueryBuilder queryBuilder = new IonSqlQueryBuilder(createTestFunctionAndTypeManager(), CSV);
assertEquals("SELECT ' ' FROM S3Object s", queryBuilder.buildSql(ImmutableList.of(), TupleDomain.all()));
// JSON
queryBuilder = new IonSqlQueryBuilder(createTestFunctionAndTypeManager(), JSON);
assertEquals(queryBuilder.buildSql(ImmutableList.of(), TupleDomain.all()), "SELECT ' ' FROM S3Object s");
}
|
@Override
public String toString() {
return "SerializerAdapter{serializer=" + serializer + '}';
}
|
@Test
public void testString() {
assertNotNull(adapter.toString());
}
|
public static UArrayAccess create(UExpression arrayExpr, UExpression indexExpr) {
return new AutoValue_UArrayAccess(arrayExpr, indexExpr);
}
|
@Test
public void unify() {
UExpression arrayIdent = mock(UExpression.class);
when(arrayIdent.unify(ident("array"), isA(Unifier.class))).thenReturn(Choice.of(unifier));
assertUnifies("array[5]", UArrayAccess.create(arrayIdent, ULiteral.intLit(5)));
}
|
public Optional<Integer> getPort() {
if (0 == args.length) {
return Optional.empty();
}
try {
int port = Integer.parseInt(args[0]);
if (port < 0) {
return Optional.empty();
}
return Optional.of(port);
} catch (final NumberFormatException ignored) {
throw new IllegalArgumentException(String.format("Invalid port `%s`.", args[0]));
}
}
|
@Test
void assertGetPortWithTwoArgument() {
Optional<Integer> actual = new BootstrapArguments(new String[]{"3306", "/test_conf/"}).getPort();
assertTrue(actual.isPresent());
assertThat(actual.get(), is(3306));
}
|
public long getAndIncrement() {
return getAndAddVal(1L);
}
|
@Test
public void testGetAndIncrement() {
PaddedAtomicLong counter = new PaddedAtomicLong();
long value = counter.getAndIncrement();
assertEquals(0L, value);
assertEquals(1, counter.get());
}
|
@Override
public Graph<EntityDescriptor> resolveNativeEntity(EntityDescriptor entityDescriptor) {
final MutableGraph<EntityDescriptor> mutableGraph = GraphBuilder.directed().build();
mutableGraph.addNode(entityDescriptor);
final ModelId modelId = entityDescriptor.id();
try {
final Input input = inputService.find(modelId.toString());
final InputWithExtractors inputWithExtractors = InputWithExtractors.create(input, inputService.getExtractors(input));
resolveNativeEntityLookupTable(entityDescriptor, inputWithExtractors, mutableGraph);
resolveNativeEntityGrokPattern(entityDescriptor, inputWithExtractors, mutableGraph);
return ImmutableGraph.copyOf(mutableGraph);
} catch (NotFoundException e) {
LOG.debug("Couldn't find input {}", entityDescriptor, e);
}
return ImmutableGraph.copyOf(mutableGraph);
}
|
@Test
@MongoDBFixtures("InputFacadeTest.json")
public void resolveEntityDescriptor() {
final EntityDescriptor descriptor = EntityDescriptor.create("5acc84f84b900a4ff290d9a7", ModelTypes.INPUT_V1);
final Graph<EntityDescriptor> graph = facade.resolveNativeEntity(descriptor);
assertThat(graph.nodes()).containsOnly(descriptor);
}
|
@Override
public DataflowPipelineJob run(Pipeline pipeline) {
// Multi-language pipelines and pipelines that include upgrades should automatically be upgraded
// to Runner v2.
if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_runner_v2")) {
LOG.info(
"Automatically enabling Dataflow Runner v2 since the pipeline used cross-language"
+ " transforms or pipeline needed a transform upgrade.");
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build());
}
}
if (useUnifiedWorker(options)) {
if (hasExperiment(options, "disable_runner_v2")
|| hasExperiment(options, "disable_runner_v2_until_2023")
|| hasExperiment(options, "disable_prime_runner_v2")) {
throw new IllegalArgumentException(
"Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set.");
}
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("use_runner_v2")) {
experiments.add("use_runner_v2");
}
if (!experiments.contains("use_unified_worker")) {
experiments.add("use_unified_worker");
}
if (!experiments.contains("beam_fn_api")) {
experiments.add("beam_fn_api");
}
if (!experiments.contains("use_portable_job_submission")) {
experiments.add("use_portable_job_submission");
}
options.setExperiments(ImmutableList.copyOf(experiments));
}
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
logWarningIfBigqueryDLQUnused(pipeline);
if (shouldActAsStreaming(pipeline)) {
options.setStreaming(true);
if (useUnifiedWorker(options)) {
options.setEnableStreamingEngine(true);
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("enable_streaming_engine")) {
experiments.add("enable_streaming_engine");
}
if (!experiments.contains("enable_windmill_service")) {
experiments.add("enable_windmill_service");
}
}
}
if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) {
ProjectionPushdownOptimizer.optimize(pipeline);
}
LOG.info(
"Executing pipeline on the Dataflow Service, which will have billing implications "
+ "related to Google Compute Engine usage and other Google Cloud Services.");
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions);
// This incorrectly puns the worker harness container image (which implements v1beta3 API)
// with the SDK harness image (which implements Fn API).
//
// The same Environment is used in different and contradictory ways, depending on whether
// it is a v1 or v2 job submission.
RunnerApi.Environment defaultEnvironmentForDataflow =
Environments.createDockerEnvironment(workerHarnessContainerImageURL);
// The SdkComponents for portable an non-portable job submission must be kept distinct. Both
// need the default environment.
SdkComponents portableComponents = SdkComponents.create();
portableComponents.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
RunnerApi.Pipeline portablePipelineProto =
PipelineTranslation.toProto(pipeline, portableComponents, false);
// Note that `stageArtifacts` has to be called before `resolveArtifact` because
// `resolveArtifact` updates local paths to staged paths in pipeline proto.
portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto);
List<DataflowPackage> packages = stageArtifacts(portablePipelineProto);
portablePipelineProto = resolveArtifacts(portablePipelineProto);
portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Portable pipeline proto:\n{}",
TextFormat.printer().printToString(portablePipelineProto));
}
// Stage the portable pipeline proto, retrieving the staged pipeline path, then update
// the options on the new job
// TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options
LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation());
byte[] serializedProtoPipeline = portablePipelineProto.toByteArray();
DataflowPackage stagedPipeline =
options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME);
dataflowOptions.setPipelineUrl(stagedPipeline.getLocation());
if (useUnifiedWorker(options)) {
LOG.info("Skipping v1 transform replacements since job will run on v2.");
} else {
// Now rewrite things to be as needed for v1 (mutates the pipeline)
// This way the job submitted is valid for v1 and v2, simultaneously
replaceV1Transforms(pipeline);
}
// Capture the SdkComponents for look up during step translations
SdkComponents dataflowV1Components = SdkComponents.create();
dataflowV1Components.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
// No need to perform transform upgrading for the Runner v1 proto.
RunnerApi.Pipeline dataflowV1PipelineProto =
PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Dataflow v1 pipeline proto:\n{}",
TextFormat.printer().printToString(dataflowV1PipelineProto));
}
// Set a unique client_request_id in the CreateJob request.
// This is used to ensure idempotence of job creation across retried
// attempts to create a job. Specifically, if the service returns a job with
// a different client_request_id, it means the returned one is a different
// job previously created with the same job name, and that the job creation
// has been effectively rejected. The SDK should return
// Error::Already_Exists to user in that case.
int randomNum = new Random().nextInt(9000) + 1000;
String requestId =
DateTimeFormat.forPattern("YYYYMMddHHmmssmmm")
.withZone(DateTimeZone.UTC)
.print(DateTimeUtils.currentTimeMillis())
+ "_"
+ randomNum;
JobSpecification jobSpecification =
translator.translate(
pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages);
if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_staged_dataflow_worker_jar")) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("use_staged_dataflow_worker_jar")
.build());
}
}
Job newJob = jobSpecification.getJob();
try {
newJob
.getEnvironment()
.setSdkPipelineOptions(
MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class));
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
newJob.setClientRequestId(requestId);
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String version = dataflowRunnerInfo.getVersion();
checkState(
!"${pom.version}".equals(version),
"Unable to submit a job to the Dataflow service with unset version ${pom.version}");
LOG.info("Dataflow SDK version: {}", version);
newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties());
// The Dataflow Service may write to the temporary directory directly, so
// must be verified.
if (!isNullOrEmpty(options.getGcpTempLocation())) {
newJob
.getEnvironment()
.setTempStoragePrefix(
dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation()));
}
newJob.getEnvironment().setDataset(options.getTempDatasetId());
if (options.getWorkerRegion() != null) {
newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion());
}
if (options.getWorkerZone() != null) {
newJob.getEnvironment().setWorkerZone(options.getWorkerZone());
}
if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED");
} else if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED");
}
// Represent the minCpuPlatform pipeline option as an experiment, if not already present.
if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
List<String> minCpuFlags =
experiments.stream()
.filter(p -> p.startsWith("min_cpu_platform"))
.collect(Collectors.toList());
if (minCpuFlags.isEmpty()) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform())
.build());
} else {
LOG.warn(
"Flag min_cpu_platform is defined in both top level PipelineOption, "
+ "as well as under experiments. Proceed using {}.",
minCpuFlags.get(0));
}
}
newJob
.getEnvironment()
.setExperiments(
ImmutableList.copyOf(
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList())));
// Set the Docker container image that executes Dataflow worker harness, residing in Google
// Container Registry. Translator is guaranteed to create a worker pool prior to this point.
// For runner_v1, only worker_harness_container is set.
// For runner_v2, both worker_harness_container and sdk_harness_container are set to the same
// value.
String containerImage = getContainerImageForJob(options);
for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) {
workerPool.setWorkerHarnessContainerImage(containerImage);
}
configureSdkHarnessContainerImages(options, portablePipelineProto, newJob);
newJob.getEnvironment().setVersion(getEnvironmentVersion(options));
if (hooks != null) {
hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment());
}
// enable upload_graph when the graph is too large
byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8);
int jobGraphByteSize = jobGraphBytes.length;
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES
&& !hasExperiment(options, "upload_graph")
&& !useUnifiedWorker(options)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build());
LOG.info(
"The job graph size ({} in bytes) is larger than {}. Automatically add "
+ "the upload_graph option to experiments.",
jobGraphByteSize,
CREATE_JOB_REQUEST_LIMIT_BYTES);
}
if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) {
ArrayList<String> experiments = new ArrayList<>(options.getExperiments());
while (experiments.remove("upload_graph")) {}
options.setExperiments(experiments);
LOG.warn(
"The upload_graph experiment was specified, but it does not apply "
+ "to runner v2 jobs. Option has been automatically removed.");
}
// Upload the job to GCS and remove the graph object from the API call. The graph
// will be downloaded from GCS by the service.
if (hasExperiment(options, "upload_graph")) {
DataflowPackage stagedGraph =
options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME);
newJob.getSteps().clear();
newJob.setStepsLocation(stagedGraph.getLocation());
}
if (!isNullOrEmpty(options.getDataflowJobFile())
|| !isNullOrEmpty(options.getTemplateLocation())) {
boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation());
if (isTemplate) {
checkArgument(
isNullOrEmpty(options.getDataflowJobFile()),
"--dataflowJobFile and --templateLocation are mutually exclusive.");
}
String fileLocation =
firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile());
checkArgument(
fileLocation.startsWith("/") || fileLocation.startsWith("gs://"),
"Location must be local or on Cloud Storage, got %s.",
fileLocation);
ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */);
String workSpecJson = DataflowPipelineTranslator.jobToString(newJob);
try (PrintWriter printWriter =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(
Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)),
UTF_8)))) {
printWriter.print(workSpecJson);
LOG.info("Printed job specification to {}", fileLocation);
} catch (IOException ex) {
String error = String.format("Cannot create output file at %s", fileLocation);
if (isTemplate) {
throw new RuntimeException(error, ex);
} else {
LOG.warn(error, ex);
}
}
if (isTemplate) {
LOG.info("Template successfully created.");
return new DataflowTemplateJob();
}
}
String jobIdToUpdate = null;
if (options.isUpdate()) {
jobIdToUpdate = getJobIdFromName(options.getJobName());
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setReplaceJobId(jobIdToUpdate);
}
if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) {
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot());
}
Job jobResult;
try {
jobResult = dataflowClient.createJob(newJob);
} catch (GoogleJsonResponseException e) {
String errorMessages = "Unexpected errors";
if (e.getDetails() != null) {
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) {
errorMessages =
"The size of the serialized JSON representation of the pipeline "
+ "exceeds the allowable limit. "
+ "For more information, please see the documentation on job submission:\n"
+ "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs";
} else {
errorMessages = e.getDetails().getMessage();
}
}
throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e);
} catch (IOException e) {
throw new RuntimeException("Failed to create a workflow job", e);
}
// Use a raw client for post-launch monitoring, as status calls may fail
// regularly and need not be retried automatically.
DataflowPipelineJob dataflowPipelineJob =
new DataflowPipelineJob(
DataflowClient.create(options),
jobResult.getId(),
options,
jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(),
portablePipelineProto);
// If the service returned client request id, the SDK needs to compare it
// with the original id generated in the request, if they are not the same
// (i.e., the returned job is not created by this request), throw
// DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException
// depending on whether this is a reload or not.
if (jobResult.getClientRequestId() != null
&& !jobResult.getClientRequestId().isEmpty()
&& !jobResult.getClientRequestId().equals(requestId)) {
// If updating a job.
if (options.isUpdate()) {
throw new DataflowJobAlreadyUpdatedException(
dataflowPipelineJob,
String.format(
"The job named %s with id: %s has already been updated into job id: %s "
+ "and cannot be updated again.",
newJob.getName(), jobIdToUpdate, jobResult.getId()));
} else {
throw new DataflowJobAlreadyExistsException(
dataflowPipelineJob,
String.format(
"There is already an active job named %s with id: %s. If you want to submit a"
+ " second job, try again by setting a different name using --jobName.",
newJob.getName(), jobResult.getId()));
}
}
LOG.info(
"To access the Dataflow monitoring console, please navigate to {}",
MonitoringUtil.getJobMonitoringPageURL(
options.getProject(), options.getRegion(), jobResult.getId()));
LOG.info("Submitted job: {}", jobResult.getId());
LOG.info(
"To cancel the job using the 'gcloud' tool, run:\n> {}",
MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId()));
return dataflowPipelineJob;
}
|
@Test
public void testPubsubSinkDynamicOverride() throws IOException {
PipelineOptions options = buildPipelineOptions();
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
dataflowOptions.setStreaming(true);
Pipeline p = Pipeline.create(options);
List<PubsubMessage> testValues =
Arrays.asList(
new PubsubMessage("foo".getBytes(StandardCharsets.UTF_8), Collections.emptyMap())
.withTopic(""));
PCollection<PubsubMessage> input =
p.apply("CreateValuesBytes", Create.of(testValues))
.setIsBoundedInternal(PCollection.IsBounded.UNBOUNDED);
input.apply(PubsubIO.writeMessagesDynamic());
p.run();
AtomicBoolean sawPubsubOverride = new AtomicBoolean(false);
p.traverseTopologically(
new PipelineVisitor.Defaults() {
@Override
public void visitPrimitiveTransform(@UnknownKeyFor @NonNull @Initialized Node node) {
if (node.getTransform() instanceof DataflowRunner.StreamingPubsubIOWrite) {
sawPubsubOverride.set(true);
}
}
});
assertTrue(sawPubsubOverride.get());
}
|
@SuppressWarnings("OptionalGetWithoutIsPresent")
public StatementExecutorResponse execute(
final ConfiguredStatement<DescribeConnector> configuredStatement,
final SessionProperties sessionProperties,
final KsqlExecutionContext ksqlExecutionContext,
final ServiceContext serviceContext
) {
final String connectorName = configuredStatement
.getStatement()
.getConnectorName();
final ConnectResponse<ConnectorStateInfo> statusResponse = serviceContext
.getConnectClient()
.status(connectorName);
if (statusResponse.error().isPresent()) {
final String errorMsg = "Failed to query connector status: " + statusResponse.error().get();
throw new KsqlRestException(EndpointResponse.create()
.status(statusResponse.httpCode())
.entity(new KsqlErrorMessage(Errors.toErrorCode(statusResponse.httpCode()), errorMsg))
.build()
);
}
final ConnectResponse<ConnectorInfo> infoResponse = serviceContext
.getConnectClient()
.describe(connectorName);
if (infoResponse.error().isPresent()) {
final String errorMsg = "Failed to describe connector: " + infoResponse.error().get();
throw new KsqlRestException(EndpointResponse.create()
.status(infoResponse.httpCode())
.entity(new KsqlErrorMessage(Errors.toErrorCode(infoResponse.httpCode()), errorMsg))
.build()
);
}
final ConnectorStateInfo status = statusResponse.datum().get();
final ConnectorInfo info = infoResponse.datum().get();
final Optional<Connector> connector = connectorFactory.apply(info);
final List<KsqlWarning> warnings;
final List<String> topics;
if (connector.isPresent()) {
// Small optimization. If a connector's info is not found in the response, don't query for
// active topics with the given connectorName
final ConnectResponse<Map<String, Map<String, List<String>>>> topicsResponse = serviceContext
.getConnectClient()
.topics(connectorName);
// topics endpoint is relatively new (KAFKA-9422), so 404 here is expected behavior for older
// Connect versions. Rather than showing a scary warning to the user, we just log it to the
// server logs.
if (topicsResponse.error().isPresent()
&& topicsResponse.httpCode() == HttpStatus.SC_NOT_FOUND) {
topics = ImmutableList.of();
warnings = ImmutableList.of();
LOG.warn("Could not list related topics due to error: " + topicsResponse.error().get());
} else if (topicsResponse.error().isPresent()) {
topics = ImmutableList.of();
warnings = ImmutableList.of(
new KsqlWarning("Could not list related topics due to error: "
+ topicsResponse.error().get()));
} else {
topics = topicsResponse.datum()
.get()
.get(connectorName)
.getOrDefault(TOPICS_KEY, ImmutableList.of());
warnings = ImmutableList.of();
}
} else {
topics = ImmutableList.of();
warnings = ImmutableList.of();
}
final List<SourceDescription> sources;
if (connector.isPresent()) {
sources = ksqlExecutionContext
.getMetaStore()
.getAllDataSources()
.values()
.stream()
.filter(source -> topics.contains(source.getKafkaTopicName()))
.map(source -> SourceDescriptionFactory.create(
source,
false,
ImmutableList.of(),
ImmutableList.of(),
Optional.empty(),
ImmutableList.of(),
ImmutableList.of(),
ksqlExecutionContext.metricCollectors()
)
)
.collect(Collectors.toList());
} else {
sources = ImmutableList.of();
}
final ConnectorDescription description = new ConnectorDescription(
configuredStatement.getMaskedStatementText(),
info.config().get(CONNECTOR_CLASS_CONFIG),
status,
sources,
topics,
warnings
);
return StatementExecutorResponse.handled(Optional.of(description));
}
|
@Test
public void shouldThrowIfConnectClientFailsDescribe() {
// Given:
when(connectClient.describe(any())).thenReturn(ConnectResponse.failure("error", HttpStatus.SC_INTERNAL_SERVER_ERROR));
// When:
final KsqlRestException e = assertThrows(
KsqlRestException.class,
() -> executor.execute(describeStatement, mock(SessionProperties.class), engine, serviceContext));
// Then:
verify(connectClient).status("connector");
verify(connectClient).describe("connector");
assertThat(e.getResponse().getStatus(), is(HttpStatus.SC_INTERNAL_SERVER_ERROR));
final KsqlErrorMessage err = (KsqlErrorMessage) e.getResponse().getEntity();
assertThat(err.getErrorCode(), is(Errors.toErrorCode(HttpStatus.SC_INTERNAL_SERVER_ERROR)));
assertThat(err.getMessage(), containsString("Failed to describe connector: error"));
}
|
public Stream<CsvRow> stream() {
return StreamSupport.stream(spliterator(), false)
.onClose(() -> {
try {
close();
} catch (final IOException e) {
throw new IORuntimeException(e);
}
});
}
|
@Test
@Disabled
public void streamTest() {
final CsvReader reader = CsvUtil.getReader(ResourceUtil.getUtf8Reader("test_bean.csv"));
reader.stream().limit(2).forEach(Console::log);
}
|
public static Builder forCurrentMagic(ProduceRequestData data) {
return forMagic(RecordBatch.CURRENT_MAGIC_VALUE, data);
}
|
@Test
public void testV3AndAboveShouldContainOnlyOneRecordBatch() {
ByteBuffer buffer = ByteBuffer.allocate(256);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, TimestampType.CREATE_TIME, 0L);
builder.append(10L, null, "a".getBytes());
builder.close();
builder = MemoryRecords.builder(buffer, Compression.NONE, TimestampType.CREATE_TIME, 1L);
builder.append(11L, "1".getBytes(), "b".getBytes());
builder.append(12L, null, "c".getBytes());
builder.close();
buffer.flip();
ProduceRequest.Builder requestBuilder = ProduceRequest.forCurrentMagic(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(
new ProduceRequestData.TopicProduceData()
.setName("test")
.setPartitionData(Collections.singletonList(
new ProduceRequestData.PartitionProduceData()
.setIndex(0)
.setRecords(MemoryRecords.readableRecords(buffer))))).iterator()))
.setAcks((short) 1)
.setTimeoutMs(5000));
assertThrowsForAllVersions(requestBuilder, InvalidRecordException.class);
}
|
@Override
public Lock readLock() {
return readLock;
}
|
@Test(timeout=10000)
public void testReadLock() throws Exception {
String testname = name.getMethodName();
InstrumentedReadWriteLock readWriteLock = new InstrumentedReadWriteLock(
true, testname, LOG, 2000, 300);
final AutoCloseableLock readLock = new AutoCloseableLock(
readWriteLock.readLock());
final AutoCloseableLock writeLock = new AutoCloseableLock(
readWriteLock.writeLock());
try (AutoCloseableLock lock = readLock.acquire()) {
Thread competingReadThread = new Thread() {
@Override
public void run() {
assertTrue(readLock.tryLock());
readLock.release();
}
};
competingReadThread.start();
competingReadThread.join();
Thread competingWriteThread = new Thread() {
@Override
public void run() {
assertFalse(writeLock.tryLock());
}
};
competingWriteThread.start();
competingWriteThread.join();
}
}
|
@Override
public boolean matchesJdbcUrl(String jdbcConnectionURL) {
return StringUtils.startsWithIgnoreCase(jdbcConnectionURL, "jdbc:h2:");
}
|
@Test
public void matchesJdbcURL() {
assertThat(underTest.matchesJdbcUrl("jdbc:h2:foo")).isTrue();
assertThat(underTest.matchesJdbcUrl("jdbc:hsql:foo")).isFalse();
}
|
@Override
public void pushMsgToRuleEngine(TopicPartitionInfo tpi, UUID msgId, ToRuleEngineMsg msg, TbQueueCallback callback) {
log.trace("PUSHING msg: {} to:{}", msg, tpi);
producerProvider.getRuleEngineMsgProducer().send(tpi, new TbProtoQueueMsg<>(msgId, msg), callback);
toRuleEngineMsgs.incrementAndGet();
}
|
@Test
public void testPushMsgToRuleEngineWithTenantIdIsNullUuidAndEntityIsTenantUseQueueFromMsgIsTrue() {
TbQueueProducer<TbProtoQueueMsg<TransportProtos.ToRuleEngineMsg>> tbREQueueProducer = mock(TbQueueProducer.class);
TbQueueCallback callback = mock(TbQueueCallback.class);
TenantId tenantId = TenantId.fromUUID(UUID.fromString("3c8bd350-1239-4a3b-b9c3-4dd76f8e20f1"));
TbMsg requestMsg = TbMsg.newMsg(DataConstants.HP_QUEUE_NAME, TbMsgType.REST_API_REQUEST, tenantId, TbMsgMetaData.EMPTY, TbMsg.EMPTY_JSON_OBJECT);
when(producerProvider.getRuleEngineMsgProducer()).thenReturn(tbREQueueProducer);
clusterService.pushMsgToRuleEngine(TenantId.SYS_TENANT_ID, tenantId, requestMsg, true, callback);
verify(producerProvider).getRuleEngineMsgProducer();
verify(ruleEngineProducerService).sendToRuleEngine(tbREQueueProducer, tenantId, requestMsg, callback);
}
|
@ScalarFunction
@Description("estimated cardinality of an SfmSketch object")
@SqlType(StandardTypes.BIGINT)
public static long cardinality(@SqlType(SfmSketchType.NAME) Slice serializedSketch)
{
return SfmSketch.deserialize(serializedSketch).cardinality();
}
|
@Test
public void testCardinality()
{
SfmSketch sketch = createSketch(1, 10_000, 4);
assertEquals(SfmSketchFunctions.cardinality(sketch.serialize()), sketch.cardinality());
}
|
static <K, V> CacheConfig<K, V> getCacheConfig(HazelcastClientInstanceImpl client,
String cacheName, String simpleCacheName) {
ClientMessage request = CacheGetConfigCodec.encodeRequest(cacheName, simpleCacheName);
try {
int partitionId = client.getClientPartitionService().getPartitionId(cacheName);
ClientInvocation clientInvocation = new ClientInvocation(client, request, cacheName, partitionId);
Future<ClientMessage> future = clientInvocation.invoke();
ClientMessage responseMessage = future.get();
SerializationService serializationService = client.getSerializationService();
CacheConfigHolder cacheConfigHolder = CacheGetConfigCodec.decodeResponse(responseMessage);
if (cacheConfigHolder == null) {
return null;
}
return cacheConfigHolder.asCacheConfig(serializationService);
} catch (Exception e) {
throw rethrow(e);
}
}
|
@Test
public void testGetCacheConfig_withSimpleCacheName() {
CacheConfig<String, String> cacheConfig = getCacheConfig(client, SIMPLE_CACHE_NAME, SIMPLE_CACHE_NAME);
assertNotNull(cacheConfig);
assertEquals(SIMPLE_CACHE_NAME, cacheConfig.getName());
}
|
public static <K, E> Collector<E, ImmutableSetMultimap.Builder<K, E>, ImmutableSetMultimap<K, E>> unorderedIndex(Function<? super E, K> keyFunction) {
return unorderedIndex(keyFunction, Function.identity());
}
|
@Test
public void unorderedIndex_supports_duplicate_keys() {
SetMultimap<Integer, MyObj> multimap = LIST_WITH_DUPLICATE_ID.stream().collect(unorderedIndex(MyObj::getId));
assertThat(multimap.keySet()).containsOnly(1, 2);
assertThat(multimap.get(1)).containsOnly(MY_OBJ_1_A, MY_OBJ_1_C);
assertThat(multimap.get(2)).containsOnly(MY_OBJ_2_B);
}
|
@SuppressWarnings("unchecked")
@Override
public <K, V> void forward(final Record<K, V> record) {
final ProcessorNode<?, ?, ?, ?> previousNode = currentNode();
try {
for (final ProcessorNode<?, ?, ?, ?> child : currentNode().children()) {
setCurrentNode(child);
((ProcessorNode<K, V, ?, ?>) child).process(record);
}
} finally {
setCurrentNode(previousNode);
}
}
|
@Test
public void shouldForwardToSingleChild() {
doNothing().when(child).process(any());
when(recordContext.timestamp()).thenReturn(0L);
when(recordContext.headers()).thenReturn(new RecordHeaders());
globalContext.forward((Object /*forcing a call to the K/V forward*/) null, null);
}
|
void handleStatement(final QueuedCommand queuedCommand) {
throwIfNotConfigured();
handleStatementWithTerminatedQueries(
queuedCommand.getAndDeserializeCommand(commandDeserializer),
queuedCommand.getAndDeserializeCommandId(),
queuedCommand.getStatus(),
Mode.EXECUTE,
queuedCommand.getOffset(),
false
);
}
|
@Test
public void shouldThrowOnDropSourceWithoutPlan() {
// Given:
when(mockParser.parseSingleStatement("DROP STREAM"))
.thenReturn(PreparedStatement.of("DROP STREAM", mock(DropStream.class)));
final Command command = new Command(
"DROP STREAM",
emptyMap(),
emptyMap(),
Optional.empty());
// When
final Exception e = assertThrows(
KsqlException.class,
() -> handleStatement(statementExecutorWithMocks, command, COMMAND_ID, Optional.empty(), 0L)
);
// Then
assertThat(e.getMessage(), containsString(
"This version of ksqlDB does not support executing statements submitted prior to ksqlDB "
+ "0.8.0 or Confluent Platform ksqlDB 5.5. Please see the upgrading guide to upgrade."));
}
|
public static boolean canDrop(FilterPredicate pred, List<ColumnChunkMetaData> columns) {
Objects.requireNonNull(pred, "pred cannot be null");
Objects.requireNonNull(columns, "columns cannot be null");
return pred.accept(new StatisticsFilter(columns));
}
|
@Test
public void testContainsAnd() {
Operators.Contains<Integer> yes = contains(eq(intColumn, 9));
Operators.Contains<Double> no = contains(eq(doubleColumn, 50D));
assertTrue(canDrop(and(yes, yes), columnMetas));
assertTrue(canDrop(and(yes, no), columnMetas));
assertTrue(canDrop(and(no, yes), columnMetas));
assertFalse(canDrop(and(no, no), columnMetas));
}
|
public static InfluxDBSinkConfig load(String yamlFile) throws IOException {
ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
return mapper.readValue(new File(yamlFile), InfluxDBSinkConfig.class);
}
|
@Test
public final void loadFromMapTest() throws IOException {
Map<String, Object> map = new HashMap<>();
map.put("influxdbUrl", "http://localhost:8086");
map.put("database", "test_db");
map.put("consistencyLevel", "ONE");
map.put("logLevel", "NONE");
map.put("retentionPolicy", "autogen");
map.put("gzipEnable", "false");
map.put("batchTimeMs", "1000");
map.put("batchSize", "100");
map.put("username", "admin");
map.put("password", "admin");
SinkContext sinkContext = Mockito.mock(SinkContext.class);
InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext);
assertNotNull(config);
assertEquals("http://localhost:8086", config.getInfluxdbUrl());
assertEquals("test_db", config.getDatabase());
assertEquals("ONE", config.getConsistencyLevel());
assertEquals("NONE", config.getLogLevel());
assertEquals("autogen", config.getRetentionPolicy());
assertEquals(Boolean.parseBoolean("false"), config.isGzipEnable());
assertEquals(Long.parseLong("1000"), config.getBatchTimeMs());
assertEquals(Integer.parseInt("100"), config.getBatchSize());
assertEquals("admin", config.getUsername());
assertEquals("admin", config.getPassword());
}
|
public static Method getMethod(Class<?> clazz, String methodName, Class<?>... paramTypes) throws SecurityException {
return getMethod(clazz, false, methodName, paramTypes);
}
|
@Test
public void getMethodTest() {
Method method = ReflectUtil.getMethod(ExamInfoDict.class, "getId");
assertEquals("getId", method.getName());
assertEquals(0, method.getParameterTypes().length);
method = ReflectUtil.getMethod(ExamInfoDict.class, "getId", Integer.class);
assertEquals("getId", method.getName());
assertEquals(1, method.getParameterTypes().length);
}
|
@Override public SlotAssignmentResult ensure(long key1, long key2) {
assert key1 != unassignedSentinel : "ensure() called with key1 == nullKey1 (" + unassignedSentinel + ')';
return super.ensure0(key1, key2);
}
|
@Test
public void testCursor_key1() {
final long key1 = randomKey();
final long key2 = randomKey();
hsa.ensure(key1, key2);
HashSlotCursor16byteKey cursor = hsa.cursor();
cursor.advance();
assertEquals(key1, cursor.key1());
}
|
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback);
return bean;
}
|
@Test
void beansWithMethodsAnnotatedWithRecurringAnnotationCronDisabled() {
// GIVEN
final RecurringJobPostProcessor recurringJobPostProcessor = getRecurringJobPostProcessor();
// WHEN
recurringJobPostProcessor.postProcessAfterInitialization(new MyServiceWithRecurringCronJobDisabled(), "not important");
// THEN
verify(jobScheduler).deleteRecurringJob("my-recurring-job");
}
|
@Override
public int totalSize() {
return payload != null ? payload.length : 0;
}
|
@Test
public void totalSize_whenNullByteArray() {
HeapData heapData = new HeapData(null);
assertEquals(0, heapData.totalSize());
}
|
static void setDefaultEnsemblePlacementPolicy(
ClientConfiguration bkConf,
ServiceConfiguration conf,
MetadataStore store
) {
bkConf.setProperty(BookieRackAffinityMapping.METADATA_STORE_INSTANCE, store);
if (conf.isBookkeeperClientRackawarePolicyEnabled() || conf.isBookkeeperClientRegionawarePolicyEnabled()) {
if (conf.isBookkeeperClientRegionawarePolicyEnabled()) {
bkConf.setEnsemblePlacementPolicy(RegionAwareEnsemblePlacementPolicy.class);
bkConf.setProperty(
REPP_ENABLE_VALIDATION,
conf.getProperties().getProperty(REPP_ENABLE_VALIDATION, "true")
);
bkConf.setProperty(
REPP_REGIONS_TO_WRITE,
conf.getProperties().getProperty(REPP_REGIONS_TO_WRITE, null)
);
bkConf.setProperty(
REPP_MINIMUM_REGIONS_FOR_DURABILITY,
conf.getProperties().getProperty(REPP_MINIMUM_REGIONS_FOR_DURABILITY, "2")
);
bkConf.setProperty(
REPP_ENABLE_DURABILITY_ENFORCEMENT_IN_REPLACE,
conf.getProperties().getProperty(REPP_ENABLE_DURABILITY_ENFORCEMENT_IN_REPLACE, "true")
);
} else {
bkConf.setEnsemblePlacementPolicy(RackawareEnsemblePlacementPolicy.class);
}
bkConf.setMinNumRacksPerWriteQuorum(conf.getBookkeeperClientMinNumRacksPerWriteQuorum());
bkConf.setEnforceMinNumRacksPerWriteQuorum(conf.isBookkeeperClientEnforceMinNumRacksPerWriteQuorum());
bkConf.setProperty(REPP_DNS_RESOLVER_CLASS,
conf.getProperties().getProperty(
REPP_DNS_RESOLVER_CLASS,
BookieRackAffinityMapping.class.getName()));
bkConf.setProperty(NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
conf.getProperties().getProperty(
NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
""));
}
if (conf.getBookkeeperClientIsolationGroups() != null && !conf.getBookkeeperClientIsolationGroups().isEmpty()) {
bkConf.setEnsemblePlacementPolicy(IsolatedBookieEnsemblePlacementPolicy.class);
bkConf.setProperty(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS,
conf.getBookkeeperClientIsolationGroups());
bkConf.setProperty(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS,
conf.getBookkeeperClientSecondaryIsolationGroups());
}
}
|
@Test
public void testSetDefaultEnsemblePlacementPolicyRackAwareDisabled() {
ClientConfiguration bkConf = new ClientConfiguration();
ServiceConfiguration conf = new ServiceConfiguration();
assertNull(bkConf.getProperty(REPP_ENABLE_VALIDATION));
assertNull(bkConf.getProperty(REPP_REGIONS_TO_WRITE));
assertNull(bkConf.getProperty(REPP_MINIMUM_REGIONS_FOR_DURABILITY));
assertNull(bkConf.getProperty(REPP_ENABLE_DURABILITY_ENFORCEMENT_IN_REPLACE));
assertNull(bkConf.getProperty(REPP_DNS_RESOLVER_CLASS));
assertNull(bkConf.getProperty(MIN_NUM_RACKS_PER_WRITE_QUORUM));
assertNull(bkConf.getProperty(ENFORCE_MIN_NUM_RACKS_PER_WRITE_QUORUM));
BookKeeperClientFactoryImpl.setDefaultEnsemblePlacementPolicy(
bkConf,
conf,
null
);
assertNull(bkConf.getProperty(REPP_ENABLE_VALIDATION));
assertNull(bkConf.getProperty(REPP_REGIONS_TO_WRITE));
assertNull(bkConf.getProperty(REPP_MINIMUM_REGIONS_FOR_DURABILITY));
assertNull(bkConf.getProperty(REPP_ENABLE_DURABILITY_ENFORCEMENT_IN_REPLACE));
assertEquals(
bkConf.getProperty(REPP_DNS_RESOLVER_CLASS),
BookieRackAffinityMapping.class.getName());
assertFalse(bkConf.getEnforceMinNumRacksPerWriteQuorum());
assertEquals(2, bkConf.getMinNumRacksPerWriteQuorum());
}
|
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
}
|
@Test
public void testAileronsLS() {
test(Loss.ls(), "ailerons", Ailerons.formula, Ailerons.data, 0.0002);
}
|
public PrepareResult prepare(HostValidator hostValidator, DeployLogger logger, PrepareParams params,
Optional<ApplicationVersions> activeApplicationVersions, Instant now, File serverDbSessionDir,
ApplicationPackage applicationPackage, SessionZooKeeperClient sessionZooKeeperClient) {
ApplicationId applicationId = params.getApplicationId();
Preparation preparation = new Preparation(hostValidator, logger, params, activeApplicationVersions,
TenantRepository.getTenantPath(applicationId.tenant()),
serverDbSessionDir, applicationPackage, sessionZooKeeperClient,
onnxModelCost, endpointCertificateSecretStores);
preparation.preprocess();
try {
AllocatedHosts allocatedHosts = preparation.buildModels(now);
preparation.makeResult(allocatedHosts);
if ( ! params.isDryRun()) {
FileReference fileReference = preparation.triggerDistributionOfApplicationPackage();
preparation.writeStateZK(fileReference);
preparation.writeEndpointCertificateMetadataZK();
preparation.writeContainerEndpointsZK();
}
log.log(Level.FINE, () -> "time used " + params.getTimeoutBudget().timesUsed() + " : " + applicationId);
return preparation.result();
}
catch (IllegalArgumentException e) {
if (e instanceof InvalidApplicationException)
throw e;
throw new InvalidApplicationException("Invalid application package", e);
}
}
|
@Test(expected = CertificateNotReadyException.class)
public void endpoint_certificate_is_missing_when_certificate_not_in_secretstore() throws IOException {
var tlskey = "vespa.tlskeys.tenant1--app1";
var applicationId = applicationId("test");
var params = new PrepareParams.Builder().applicationId(applicationId).endpointCertificateMetadata("{\"keyName\": \"vespa.tlskeys.tenant1--app1-key\", \"certName\":\"vespa.tlskeys.tenant1--app1-cert\", \"version\": 7}").build();
secretStore.put(tlskey+"-key", 7, "KEY");
prepare(new File("src/test/resources/deploy/hosted-app"), params);
}
|
@Override
public void disableAutoTrack(List<AutoTrackEventType> eventTypeList) {
}
|
@Test
public void disableAutoTrack() {
mSensorsAPI.disableAutoTrack(SensorsDataAPI.AutoTrackEventType.APP_START);
Assert.assertFalse(mSensorsAPI.isAutoTrackEnabled());
}
|
@Override
public int computeSourceParallelismUpperBound(JobVertexID jobVertexId, int maxParallelism) {
if (globalDefaultSourceParallelism > maxParallelism) {
LOG.info(
"The global default source parallelism {} is larger than the maximum parallelism {}. "
+ "Use {} as the upper bound parallelism of source job vertex {}.",
globalDefaultSourceParallelism,
maxParallelism,
maxParallelism,
jobVertexId);
return maxParallelism;
} else {
return globalDefaultSourceParallelism;
}
}
|
@Test
void testComputeSourceParallelismUpperBound() {
Configuration configuration = new Configuration();
configuration.setInteger(
BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_DEFAULT_SOURCE_PARALLELISM,
DEFAULT_SOURCE_PARALLELISM);
VertexParallelismAndInputInfosDecider vertexParallelismAndInputInfosDecider =
DefaultVertexParallelismAndInputInfosDecider.from(MAX_PARALLELISM, configuration);
assertThat(
vertexParallelismAndInputInfosDecider.computeSourceParallelismUpperBound(
new JobVertexID(), VERTEX_MAX_PARALLELISM))
.isEqualTo(DEFAULT_SOURCE_PARALLELISM);
}
|
public boolean executeDynamicPartitionForTable(Long dbId, Long tableId) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
LOG.warn("Automatically removes the schedule because database does not exist, dbId: {}", dbId);
return true;
}
ArrayList<AddPartitionClause> addPartitionClauses = new ArrayList<>();
ArrayList<DropPartitionClause> dropPartitionClauses;
String tableName;
boolean skipAddPartition = false;
OlapTable olapTable;
olapTable = (OlapTable) db.getTable(tableId);
if (olapTable == null) {
LOG.warn("Automatically removes the schedule because table does not exist, " +
"tableId: {}", tableId);
return true;
}
// Only OlapTable has DynamicPartitionProperty
try (AutoCloseableLock ignore =
new AutoCloseableLock(new Locker(), db, Lists.newArrayList(olapTable.getId()), LockType.READ)) {
if (!olapTable.dynamicPartitionExists()) {
LOG.warn("Automatically removes the schedule because " +
"table[{}] does not have dynamic partition", olapTable.getName());
return true;
}
if (!olapTable.getTableProperty().getDynamicPartitionProperty().isEnabled()) {
LOG.warn("Automatically removes the schedule because table[{}] " +
"does not enable dynamic partition", olapTable.getName());
return true;
}
if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) {
String errorMsg = "Table[" + olapTable.getName() + "]'s state is not NORMAL." +
"Do not allow doing dynamic add partition. table state=" + olapTable.getState();
recordCreatePartitionFailedMsg(db.getOriginName(), olapTable.getName(), errorMsg);
skipAddPartition = true;
}
// Determine the partition column type
// if column type is Date, format partition name as yyyyMMdd
// if column type is DateTime, format partition name as yyyyMMddHHssmm
// scheduler time should be record even no partition added
createOrUpdateRuntimeInfo(olapTable.getName(), LAST_SCHEDULER_TIME, TimeUtils.getCurrentFormatTime());
RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) olapTable.getPartitionInfo();
if (rangePartitionInfo.getPartitionColumnsSize() != 1) {
// currently only support partition with single column.
LOG.warn("Automatically removes the schedule because " +
"table[{}] has more than one partition column", olapTable.getName());
return true;
}
try {
Column partitionColumn = rangePartitionInfo.getPartitionColumns(olapTable.getIdToColumn()).get(0);
String partitionFormat = DynamicPartitionUtil.getPartitionFormat(partitionColumn);
if (!skipAddPartition) {
addPartitionClauses = getAddPartitionClause(db, olapTable, partitionColumn, partitionFormat);
}
dropPartitionClauses = getDropPartitionClause(db, olapTable, partitionColumn, partitionFormat);
tableName = olapTable.getName();
} catch (Exception e) {
LOG.warn("create or drop partition failed", e);
recordCreatePartitionFailedMsg(db.getOriginName(), olapTable.getName(), e.getMessage());
return false;
}
}
WarehouseManager warehouseManager = GlobalStateMgr.getCurrentState().getWarehouseMgr();
ConnectContext ctx = Util.getOrCreateConnectContext();
ctx.setCurrentWarehouse(warehouseManager.getBackgroundWarehouse().getName());
Locker locker = new Locker();
for (DropPartitionClause dropPartitionClause : dropPartitionClauses) {
if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) {
LOG.warn("db: {}({}) has been dropped, skip", db.getFullName(), db.getId());
return false;
}
try {
AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(olapTable);
analyzer.analyze(ctx, dropPartitionClause);
GlobalStateMgr.getCurrentState().getLocalMetastore().dropPartition(db, olapTable, dropPartitionClause);
clearDropPartitionFailedMsg(tableName);
} catch (DdlException e) {
recordDropPartitionFailedMsg(db.getOriginName(), tableName, e.getMessage());
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
if (!skipAddPartition) {
for (AddPartitionClause addPartitionClause : addPartitionClauses) {
try {
AlterTableClauseAnalyzer alterTableClauseVisitor = new AlterTableClauseAnalyzer(olapTable);
alterTableClauseVisitor.analyze(ctx, addPartitionClause);
GlobalStateMgr.getCurrentState().getLocalMetastore().addPartitions(ctx,
db, tableName, addPartitionClause);
clearCreatePartitionFailedMsg(tableName);
} catch (DdlException e) {
recordCreatePartitionFailedMsg(db.getOriginName(), tableName, e.getMessage());
}
}
}
return false;
}
|
@Test
public void testPartitionColumnDateUseDynamicHour() throws Exception {
new MockUp<LocalDateTime>() {
@Mock
public LocalDateTime now() {
return LocalDateTime.of(2023, 3, 30, 1, 1, 1);
}
};
starRocksAssert.withDatabase("test").useDatabase("test")
.withTable("CREATE TABLE `test_hour_partition2` (\n" +
" `event_day` date NULL COMMENT \"\",\n" +
" `site_id` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" +
" `city_code` varchar(100) NULL COMMENT \"\",\n" +
" `user_name` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" +
" `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" +
") ENGINE=OLAP \n" +
"DUPLICATE KEY(`event_day`, `site_id`, `city_code`, `user_name`)\n" +
"PARTITION BY RANGE(`event_day`)\n" +
"()\n" +
"DISTRIBUTED BY HASH(`event_day`, `site_id`) BUCKETS 32 \n" +
"PROPERTIES (\n" +
"\"replication_num\" = \"1\",\n" +
"\"dynamic_partition.enable\" = \"true\",\n" +
"\"dynamic_partition.time_unit\" = \"DAY\",\n" +
"\"dynamic_partition.time_zone\" = \"Asia/Shanghai\",\n" +
"\"dynamic_partition.start\" = \"-1\",\n" +
"\"dynamic_partition.end\" = \"10\",\n" +
"\"dynamic_partition.prefix\" = \"p\",\n" +
"\"dynamic_partition.buckets\" = \"3\",\n" +
"\"dynamic_partition.history_partition_num\" = \"0\",\n" +
"\"in_memory\" = \"false\",\n" +
"\"storage_format\" = \"DEFAULT\",\n" +
"\"enable_persistent_index\" = \"false\",\n" +
"\"compression\" = \"LZ4\"\n" +
");");
DynamicPartitionScheduler dynamicPartitionScheduler = GlobalStateMgr.getCurrentState()
.getDynamicPartitionScheduler();
Database db = GlobalStateMgr.getCurrentState().getDb("test");
OlapTable tbl = (OlapTable) db.getTable("test_hour_partition2");
DynamicPartitionProperty dynamicPartitionProperty = tbl.getTableProperty().getDynamicPartitionProperty();
dynamicPartitionProperty.setTimeUnit("HOUR");
boolean result = dynamicPartitionScheduler.executeDynamicPartitionForTable(db.getId(), tbl.getId());
Assert.assertFalse(result);
}
|
public static Bson idsIn(Collection<ObjectId> ids) {
return Filters.in("_id", ids);
}
|
@Test
void testIdsIn() {
final String missingId1 = "6627add0ee216425dd6df36a";
final String missingId2 = "6627add0ee216425dd6df36b";
final String idA = "6627add0ee216425dd6df37a";
final String idB = "6627add0ee216425dd6df37b";
final String idC = "6627add0ee216425dd6df37c";
final String idD = "6627add0ee216425dd6df37d";
final String idE = "6627add0ee216425dd6df37e";
final String idF = "6627add0ee216425dd6df37f";
final var a = new DTO(idA, "a");
final var b = new DTO(idB, "b");
final var c = new DTO(idC, "c");
final var d = new DTO(idD, "d");
final var e = new DTO(idE, "e");
final var f = new DTO(idF, "f");
collection.insertMany(List.of(a, b, c, d, e, f));
assertThat(collection.find(stringIdsIn(Set.of(idA, idF)))).contains(a, f);
assertThat(collection.find(stringIdsIn(Set.of(idA, idF)))).hasSize(2);
assertThat(collection.find(stringIdsIn(Set.of(missingId1, missingId2)))).hasSize(0);
assertThat(collection.find(stringIdsIn(Set.of(idA, idB, missingId1, missingId2)))).contains(a, b);
assertThat(collection.find(stringIdsIn(Set.of(idA, idB, missingId1, missingId2)))).hasSize(2);
assertThat(collection.find(stringIdsIn(Set.of(idA, idB, idC, idD, idE, idF)))).hasSize(6);
assertThat(collection.find(stringIdsIn(Set.of(idA, idB, idC, idD, idE, idF)))).contains(a, b, c, d, e, f);
assertThat(collection.find(stringIdsIn(Set.of(idA, idB, idC, idD, idE, idF, missingId1, missingId2)))).hasSize(6);
assertThat(collection.find(stringIdsIn(Set.of(idA, idB, idC, idD, idE, idF, missingId1, missingId2)))).contains(a, b, c, d, e, f);
}
|
int calculatePartBufferSize(HazelcastProperties hazelcastProperties, long jarSize) {
int partBufferSize = hazelcastProperties.getInteger(JOB_UPLOAD_PART_SIZE);
// If jar size is smaller, then use it
if (jarSize < partBufferSize) {
partBufferSize = (int) jarSize;
}
return partBufferSize;
}
|
@Test
public void calculatePartBufferSize_when_invalidProperty() {
SubmitJobPartCalculator submitJobPartCalculator = new SubmitJobPartCalculator();
Properties properties = new Properties();
properties.setProperty(ClientProperty.JOB_UPLOAD_PART_SIZE.getName(), "E");
ClientConfig clientConfig = new ClientConfig();
clientConfig.setProperty(ClientProperty.JOB_UPLOAD_PART_SIZE.getName(), "E");
HazelcastProperties hazelcastProperties = new HazelcastProperties(properties);
long jarSize = 2_000;
assertThrows(NumberFormatException.class,
() -> submitJobPartCalculator.calculatePartBufferSize(hazelcastProperties, jarSize));
}
|
@Override
public int getPort(final String alias) {
if(StringUtils.isBlank(alias)) {
return -1;
}
return configuration.lookup(alias).getPort();
}
|
@Test
public void testPort() {
OpenSSHHostnameConfigurator c = new OpenSSHHostnameConfigurator(
new OpenSshConfig(
new Local("src/test/resources", "openssh/config")));
assertEquals(555, c.getPort("portalias"));
assertEquals(-1, c.getPort(null));
}
|
@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
// namespace
String namespace = commandLine.getOptionValue('s').trim();
// key name
String key = commandLine.getOptionValue('k').trim();
// key name
String value = commandLine.getOptionValue('v').trim();
if (commandLine.hasOption('n')) {
defaultMQAdminExt.setNamesrvAddr(commandLine.getOptionValue('n').trim());
}
defaultMQAdminExt.start();
defaultMQAdminExt.createAndUpdateKvConfig(namespace, key, value);
System.out.printf("create or update kv config to namespace success.%n");
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
|
@Test
public void testExecute() throws SubCommandException {
UpdateKvConfigCommand cmd = new UpdateKvConfigCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {
"-s namespace", "-k topicname", "-v unit_test",
String.format("-n localhost:%d", nameServerMocker.listenPort())};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName() + cmd.commandDesc(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
cmd.execute(commandLine, options, null);
}
|
@Override
public List<DatabaseTableRespVO> getDatabaseTableList(Long dataSourceConfigId, String name, String comment) {
List<TableInfo> tables = databaseTableService.getTableList(dataSourceConfigId, name, comment);
// 移除在 Codegen 中,已经存在的
Set<String> existsTables = convertSet(
codegenTableMapper.selectListByDataSourceConfigId(dataSourceConfigId), CodegenTableDO::getTableName);
tables.removeIf(table -> existsTables.contains(table.getName()));
return BeanUtils.toBean(tables, DatabaseTableRespVO.class);
}
|
@Test
public void testGetDatabaseTableList() {
// 准备参数
Long dataSourceConfigId = randomLongId();
String name = randomString();
String comment = randomString();
// mock 方法
TableInfo tableInfo01 = mock(TableInfo.class);
when(tableInfo01.getName()).thenReturn("t_yunai");
when(tableInfo01.getComment()).thenReturn("芋艿");
TableInfo tableInfo02 = mock(TableInfo.class);
when(tableInfo02.getName()).thenReturn("t_yunai_02");
when(tableInfo02.getComment()).thenReturn("芋艿_02");
when(databaseTableService.getTableList(eq(dataSourceConfigId), eq(name), eq(comment)))
.thenReturn(ListUtil.toList(tableInfo01, tableInfo02));
// mock 数据
CodegenTableDO tableDO = randomPojo(CodegenTableDO.class,
o -> o.setScene(CodegenSceneEnum.ADMIN.getScene())
.setTableName("t_yunai_02")
.setDataSourceConfigId(dataSourceConfigId));
codegenTableMapper.insert(tableDO);
// 调用
List<DatabaseTableRespVO> result = codegenService.getDatabaseTableList(dataSourceConfigId, name, comment);
// 断言
assertEquals(1, result.size());
assertEquals("t_yunai", result.get(0).getName());
assertEquals("芋艿", result.get(0).getComment());
}
|
public void createView(View view, boolean replace, boolean ifNotExists) {
if (ifNotExists) {
relationsStorage.putIfAbsent(view.name(), view);
} else if (replace) {
relationsStorage.put(view.name(), view);
} else if (!relationsStorage.putIfAbsent(view.name(), view)) {
throw QueryException.error("Mapping or view already exists: " + view.name());
}
}
|
@Test
public void when_createsDuplicateViews_then_throws() {
// given
View view = view();
given(relationsStorage.putIfAbsent(eq(view.name()), isA(View.class))).willReturn(false);
// when
// then
assertThatThrownBy(() -> catalog.createView(view, false, false))
.isInstanceOf(QueryException.class)
.hasMessageContaining("Mapping or view already exists: name");
verifyNoInteractions(listener);
}
|
public ClusterStatsResponse clusterStats() {
return execute(() -> {
Request request = new Request("GET", "/_cluster/stats");
Response response = restHighLevelClient.getLowLevelClient().performRequest(request);
return ClusterStatsResponse.toClusterStatsResponse(gson.fromJson(EntityUtils.toString(response.getEntity()), JsonObject.class));
});
}
|
@Test
public void should_add_authentication_header() throws InterruptedException {
mockWebServer.enqueue(new MockResponse()
.setResponseCode(200)
.setBody(EXAMPLE_CLUSTER_STATS_JSON)
.setHeader("Content-Type", "application/json"));
String password = "test-password";
EsClient underTest = new EsClient(password, null, null, new HttpHost(mockWebServer.getHostName(), mockWebServer.getPort()));
assertThat(underTest.clusterStats()).isNotNull();
assertThat(mockWebServer.takeRequest().getHeader("Authorization")).isEqualTo("Basic ZWxhc3RpYzp0ZXN0LXBhc3N3b3Jk");
}
|
public static boolean isEditionBundled(Plugin plugin) {
return SONARSOURCE_ORGANIZATION.equalsIgnoreCase(plugin.getOrganization())
&& Arrays.stream(SONARSOURCE_COMMERCIAL_LICENSES).anyMatch(s -> s.equalsIgnoreCase(plugin.getLicense()));
}
|
@Test
public void isEditionBundled_on_PluginInfo_returns_false_for_SonarSource_and_non_commercial_license() {
PluginInfo pluginInfo = newPluginInfo(randomizeCase("SonarSource"), randomAlphanumeric(3));
assertThat(EditionBundledPlugins.isEditionBundled(pluginInfo)).isFalse();
}
|
List<GcpAddress> getAddresses() {
try {
return RetryUtils.retry(this::fetchGcpAddresses, RETRIES, NON_RETRYABLE_KEYWORDS);
} catch (RestClientException e) {
handleKnownException(e);
return emptyList();
}
}
|
@Test
public void getAddressesWithPrivateKeyPath() {
// given
given(gcpMetadataApi.accessToken()).willReturn(null);
given(gcpAuthenticator.refreshAccessToken(PRIVATE_KEY_PATH)).willReturn(ACCESS_TOKEN);
given(gcpComputeApi.instances(CURRENT_PROJECT, CURRENT_ZONE, null, ACCESS_TOKEN)).willReturn(ADDRESSES);
GcpConfig gcpConfig = GcpConfig.builder().setPrivateKeyPath(PRIVATE_KEY_PATH).build();
GcpClient gcpClient = new GcpClient(gcpMetadataApi, gcpComputeApi, gcpAuthenticator, gcpConfig);
// when
List<GcpAddress> result = gcpClient.getAddresses();
// then
assertEquals(ADDRESSES, result);
}
|
String getAgentStatusReportRequestBody(JobIdentifier identifier, String elasticAgentId, Map<String, String> clusterProfile) {
JsonObject jsonObject = new JsonObject();
if (identifier != null) {
jsonObject.add("job_identifier", jobIdentifierJson(identifier));
}
jsonObject.add("cluster_profile_properties", mapToJsonObject(clusterProfile));
jsonObject.addProperty("elastic_agent_id", elasticAgentId);
return FORCED_EXPOSE_GSON.toJson(jsonObject);
}
|
@Test
public void shouldJSONizeElasticAgentStatusReportRequestBodyWhenElasticAgentIdIsProvided() throws Exception {
String elasticAgentId = "my-fancy-elastic-agent-id";
String actual = new ElasticAgentExtensionConverterV5().getAgentStatusReportRequestBody(null, elasticAgentId, clusterProfile);
String expected = format("{" +
"\"cluster_profile_properties\":{" +
" \"key\":\"value\"" +
" }," +
" \"elastic_agent_id\": \"%s\"" +
"}", elasticAgentId);
assertThatJson(expected).isEqualTo(actual);
}
|
public Exception getException() {
if (exception != null) return exception;
try {
final Class<? extends Exception> exceptionClass = ReflectionUtils.toClass(getExceptionType());
if (getExceptionCauseType() != null) {
final Class<? extends Exception> exceptionCauseClass = ReflectionUtils.toClass(getExceptionCauseType());
final Exception exceptionCause = getExceptionCauseMessage() != null ? ReflectionUtils.newInstanceCE(exceptionCauseClass, getExceptionCauseMessage()) : ReflectionUtils.newInstanceCE(exceptionCauseClass);
exceptionCause.setStackTrace(new StackTraceElement[]{});
return getExceptionMessage() != null ? ReflectionUtils.newInstanceCE(exceptionClass, getExceptionMessage(), exceptionCause) : ReflectionUtils.newInstanceCE(exceptionClass, exceptionCause);
} else {
return getExceptionMessage() != null ? ReflectionUtils.newInstanceCE(exceptionClass, getExceptionMessage()) : ReflectionUtils.newInstanceCE(exceptionClass);
}
} catch (ReflectiveOperationException e) {
throw new IllegalStateException("Could not reconstruct exception for class " + getExceptionType() + " and message " + getExceptionMessage(), e);
}
}
|
@Test
void getExceptionWithMessage() {
final FailedState failedState = new FailedState("JobRunr message", new CustomException("custom exception message"));
assertThat(failedState.getException())
.isInstanceOf(CustomException.class)
.hasMessage("custom exception message");
}
|
@Override
public ConfigOperateResult insertOrUpdateTagCas(final ConfigInfo configInfo, final String tag, final String srcIp,
final String srcUser) {
if (findConfigInfo4TagState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant(), tag)
== null) {
return addConfigInfo4Tag(configInfo, tag, srcIp, srcUser);
} else {
return updateConfigInfo4TagCas(configInfo, tag, srcIp, srcUser);
}
}
|
@Test
void testInsertOrUpdateTagCasOfAdd() {
String dataId = "dataId111222";
String group = "group";
String tenant = "tenant";
String appName = "appname1234";
String content = "c12345";
ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content);
configInfo.setEncryptedDataKey("key23456");
configInfo.setMd5("casMd5");
//mock query config state empty and return obj after insert
ConfigInfoStateWrapper configInfoStateWrapper = new ConfigInfoStateWrapper();
configInfoStateWrapper.setLastModified(System.currentTimeMillis());
configInfoStateWrapper.setId(234567890L);
String tag = "tag123";
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant, tag}),
eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenThrow(new EmptyResultDataAccessException(1))
.thenReturn(configInfoStateWrapper);
String srcIp = "ip345678";
String srcUser = "user1234567";
ConfigOperateResult configOperateResult = externalConfigInfoTagPersistService.insertOrUpdateTagCas(configInfo, tag, srcIp, srcUser);
//verify insert to be invoked
Mockito.verify(jdbcTemplate, times(1))
.update(anyString(), eq(dataId), eq(group), eq(tenant), eq(tag), eq(appName), eq(configInfo.getContent()),
eq(MD5Utils.md5Hex(configInfo.getContent(), Constants.PERSIST_ENCODE)), eq(srcIp), eq(srcUser),
any(Timestamp.class), any(Timestamp.class));
assertEquals(configInfoStateWrapper.getId(), configOperateResult.getId());
assertEquals(configInfoStateWrapper.getLastModified(), configOperateResult.getLastModified());
}
|
@Override
public void close() {
if (CONTAINER_DATASOURCE_NAMES.contains(dataSource.getClass().getSimpleName())) {
close(dataSource);
} else {
xaTransactionManagerProvider.removeRecoveryResource(resourceName, xaDataSource);
}
enlistedTransactions.remove();
}
|
@Test
void assertCloseAtomikosDataSourceBean() {
DataSource dataSource = DataSourceUtils.build(AtomikosDataSourceBean.class, TypedSPILoader.getService(DatabaseType.class, "H2"), "ds11");
XATransactionDataSource transactionDataSource = new XATransactionDataSource(TypedSPILoader.getService(DatabaseType.class, "H2"), "ds11", dataSource, xaTransactionManagerProvider);
transactionDataSource.close();
verify(xaTransactionManagerProvider, times(0)).removeRecoveryResource(anyString(), any(XADataSource.class));
}
|
public static NetworkEndpoint forIpAndPort(String ipAddress, int port) {
checkArgument(InetAddresses.isInetAddress(ipAddress), "'%s' is not an IP address.", ipAddress);
checkArgument(
0 <= port && port <= MAX_PORT_NUMBER,
"Port out of range. Expected [0, %s], actual %s.",
MAX_PORT_NUMBER,
port);
return forIp(ipAddress).toBuilder()
.setType(NetworkEndpoint.Type.IP_PORT)
.setPort(Port.newBuilder().setPortNumber(port))
.build();
}
|
@Test
public void forIpAndPort_withInvalidPort_throwsIllegalArgumentException() {
assertThrows(
IllegalArgumentException.class, () -> NetworkEndpointUtils.forIpAndPort("abc", -1));
assertThrows(
IllegalArgumentException.class, () -> NetworkEndpointUtils.forIpAndPort("abc", 65536));
}
|
@Override
public Map<String, Metric> getMetrics() {
return metricRegistry.getMetrics();
}
|
@Test
public void shouldReturnTotalNumberOfRequestsAs3ForFailAsync() {
AsyncHelloWorldService helloWorldService = mock(AsyncHelloWorldService.class);
ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor();
CompletableFuture<String> failedFuture = new CompletableFuture<>();
failedFuture.completeExceptionally(new HelloWorldException());
given(helloWorldService.returnHelloWorld())
.willReturn(failedFuture)
.willReturn(completedFuture("Success"));
Retry retry = Retry.of("metrics", RetryConfig.<String>custom()
.retryExceptions(Exception.class)
.maxAttempts(5)
.failAfterMaxAttempts(true)
.build());
Supplier<CompletionStage<String>> supplier = Retry.decorateCompletionStage(retry, scheduler, helloWorldService::returnHelloWorld);
String result = awaitResult(supplier.get(), 5);
assertThat(retry.getMetrics().getNumberOfTotalCalls()).isEqualTo(2);
assertThat(result).isEqualTo("Success");
}
|
@Override
public void checkSubjectAccess(
final KsqlSecurityContext securityContext,
final String subjectName,
final AclOperation operation
) {
checkAccess(new CacheKey(securityContext,
AuthObjectType.SUBJECT,
subjectName,
operation));
}
|
@Test
public void shouldThrowExceptionWhenBackendSubjectValidatorThrowsAnException() {
// Given
doThrow(RuntimeException.class).when(backendValidator)
.checkSubjectAccess(securityContext, SUBJECT_1, AclOperation.READ);
// When:
assertThrows(
RuntimeException.class,
() -> cache.checkSubjectAccess(securityContext, SUBJECT_1, AclOperation.READ)
);
}
|
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
}
|
@Test
public void testMergePartialTags() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
parseParamDefMap(
"{'basetag': {'type': 'STRING','tags': ['tag1'], 'value': 'hello'}, 'mergetag': {'type': 'STRING', 'value': 'hello'}}");
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap(
"{'basetag': {'type': 'STRING', 'value': 'goodbye'}, 'mergetag': {'type': 'STRING','value': 'goodbye', 'tags': ['tag1']} }");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext);
assertEquals(2, allParams.size());
assertEquals(1, allParams.get("basetag").asStringParamDef().getTags().getTags().size());
assertEquals(1, allParams.get("mergetag").asStringParamDef().getTags().getTags().size());
}
|
@Override
public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) {
return getSqlRecordIteratorBatch(value, descending, null);
}
|
@Test
public void getRecordsWithCursorUsingExactValueInequalityAscending() {
var expectedOrder = List.of(1, 4, 7, 2, 5, 8);
performCursorTest(3, expectedOrder, cursor -> store.getSqlRecordIteratorBatch(Comparison.GREATER_OR_EQUAL, 1, false, cursor));
}
|
@Override
public boolean imbalanceDetected(LoadImbalance imbalance) {
long min = imbalance.minimumLoad;
long max = imbalance.maximumLoad;
if (min == Long.MIN_VALUE || max == Long.MAX_VALUE) {
return false;
}
long lowerBound = (long) (MIN_MAX_RATIO_MIGRATION_THRESHOLD * max);
return min < lowerBound;
}
|
@Test
public void testImbalanceDetected_shouldReturnFalseWhenNoKnownMinimum() {
imbalance.minimumLoad = Long.MIN_VALUE;
boolean imbalanceDetected = strategy.imbalanceDetected(imbalance);
assertFalse(imbalanceDetected);
}
|
public static <T> void validateRegistrationIds(final Account account, final Collection<T> messages,
Function<T, Byte> getDeviceId, Function<T, Integer> getRegistrationId, boolean usePhoneNumberIdentity)
throws StaleDevicesException {
validateRegistrationIds(account,
messages.stream().map(m -> new Pair<>(getDeviceId.apply(m), getRegistrationId.apply(m))),
usePhoneNumberIdentity);
}
|
@Test
void testDuplicateDeviceIds() {
final Account account = mockAccountWithDeviceAndRegId(Map.of(Device.PRIMARY_ID, 17));
try {
DestinationDeviceValidator.validateRegistrationIds(account,
Stream.of(new Pair<>(Device.PRIMARY_ID, 16), new Pair<>(Device.PRIMARY_ID, 17)), false);
Assertions.fail("duplicate devices should throw StaleDevicesException");
} catch (StaleDevicesException e) {
Assertions.assertThat(e.getStaleDevices()).hasSameElementsAs(Collections.singletonList(Device.PRIMARY_ID));
}
}
|
static SearchProtocol.SearchRequest convertFromQuery(Query query, int hits, String serverId, double requestTimeout) {
var builder = SearchProtocol.SearchRequest.newBuilder().setHits(hits).setOffset(query.getOffset())
.setTimeout((int) (requestTimeout * 1000));
var documentDb = query.getModel().getDocumentDb();
if (documentDb != null) {
builder.setDocumentType(documentDb);
}
GrowableByteBuffer scratchPad = threadLocalBuffer.get();
builder.setQueryTreeBlob(serializeQueryTree(query.getModel().getQueryTree(), scratchPad));
if (query.getGroupingSessionCache() || query.getRanking().getQueryCache()) {
// TODO verify that the session key is included whenever rank properties would have been
builder.setSessionKey(query.getSessionId(serverId).toString());
}
if (query.properties().getBoolean(Model.ESTIMATE)) {
builder.setHits(0);
}
if (GroupingExecutor.hasGroupingList(query)) {
List<Grouping> groupingList = GroupingExecutor.getGroupingList(query);
scratchPad.clear();
BufferSerializer gbuf = new BufferSerializer(scratchPad);
gbuf.putInt(null, groupingList.size());
for (Grouping g : groupingList) {
g.serialize(gbuf);
}
gbuf.getBuf().flip();
builder.setGroupingBlob(ByteString.copyFrom(gbuf.getBuf().getByteBuffer()));
}
if (query.getGroupingSessionCache()) {
builder.setCacheGrouping(true);
}
int traceLevel = getTraceLevelForBackend(query);
builder.setTraceLevel(traceLevel);
builder.setProfileDepth(query.getTrace().getProfileDepth());
if (traceLevel > 0) {
mergeToSearchRequestFromProfiling(query.getTrace().getProfiling(), builder);
}
mergeToSearchRequestFromRanking(query.getRanking(), scratchPad, builder);
return builder.build();
}
|
@Test
void profiling_parameters_are_serialized_in_search_request() {
var q = new Query("?query=test&trace.level=1&" +
"trace.profiling.matching.depth=3&" +
"trace.profiling.firstPhaseRanking.depth=5&" +
"trace.profiling.secondPhaseRanking.depth=-7");
var req = ProtobufSerialization.convertFromQuery(q, 1, "serverId", 0.5);
assertEquals(3, req.getProfiling().getMatch().getDepth());
assertEquals(5, req.getProfiling().getFirstPhase().getDepth());
assertEquals(-7, req.getProfiling().getSecondPhase().getDepth());
}
|
@Override
public CheckResult runCheck() {
String filter = buildQueryFilter(stream.getId(), query);
String query = field + ":\"" + value + "\"";
Integer backlogSize = getBacklog();
boolean backlogEnabled = false;
int searchLimit = 1;
if(backlogSize != null && backlogSize > 0) {
backlogEnabled = true;
searchLimit = backlogSize;
}
try {
SearchResult result = searches.search(
query,
filter,
RelativeRange.create(configuration.getAlertCheckInterval()),
searchLimit,
0,
new Sorting(Message.FIELD_TIMESTAMP, Sorting.Direction.DESC)
);
final List<MessageSummary> summaries;
if (backlogEnabled) {
summaries = Lists.newArrayListWithCapacity(result.getResults().size());
for (ResultMessage resultMessage : result.getResults()) {
final Message msg = resultMessage.getMessage();
summaries.add(new MessageSummary(resultMessage.getIndex(), msg));
}
} else {
summaries = Collections.emptyList();
}
final long count = result.getTotalResults();
final String resultDescription = "Stream received messages matching <" + query + "> "
+ "(Current grace time: " + grace + " minutes)";
if (count > 0) {
LOG.debug("Alert check <{}> found [{}] messages.", id, count);
return new CheckResult(true, this, resultDescription, Tools.nowUTC(), summaries);
} else {
LOG.debug("Alert check <{}> returned no results.", id);
return new NegativeCheckResult();
}
} catch (InvalidRangeParametersException e) {
// cannot happen lol
LOG.error("Invalid timerange.", e);
return null;
}
}
|
@Test
public void testRunMatchingMessagesInStream() throws Exception {
final ResultMessage searchHit = resultMessageFactory.parseFromSource("some_id", "graylog_test",
Collections.singletonMap("message", "something is in here"));
final DateTime now = DateTime.now(DateTimeZone.UTC);
final IndexRange indexRange = MongoIndexRange.create("graylog_test", now.minusDays(1), now, now, 0);
final Set<IndexRange> indexRanges = Sets.newHashSet(indexRange);
final SearchResult searchResult = spy(new SearchResult(Collections.singletonList(searchHit),
1L,
indexRanges,
"message:something",
null,
100L));
when(searchResult.getTotalResults()).thenReturn(1L);
when(searches.search(
anyString(),
anyString(),
any(RelativeRange.class),
anyInt(),
anyInt(),
any(Sorting.class)))
.thenReturn(searchResult);
final FieldContentValueAlertCondition condition = getCondition(getParametersMap(0, "message", "something"), "Alert Condition for testing");
final AlertCondition.CheckResult result = condition.runCheck();
assertTriggered(condition, result);
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
final P4ActionParamModel other = (P4ActionParamModel) obj;
return Objects.equals(this.id, other.id)
&& Objects.equals(this.bitWidth, other.bitWidth);
}
|
@Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(actionParamModel, sameAsActionParamModel)
.addEqualityGroup(actionParamModel4, sameAsActionParamModel4)
.addEqualityGroup(actionParamModel2)
.addEqualityGroup(actionParamModel3)
.addEqualityGroup(actionParamModel5)
.testEquals();
}
|
public String transform() throws ScanException {
StringBuilder stringBuilder = new StringBuilder();
compileNode(node, stringBuilder, new Stack<Node>());
return stringBuilder.toString();
}
|
@Test
public void LOGBACK_1101() throws ScanException {
String input = "a: {y}";
Node node = makeNode(input);
NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0);
assertEquals("a: {y}", nodeToStringTransformer.transform());
}
|
@Override
public String format(final Schema schema) {
final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema);
return options.contains(Option.AS_COLUMN_LIST)
? stripTopLevelStruct(converted)
: converted;
}
|
@Test
public void shouldFormatDouble() {
assertThat(DEFAULT.format(Schema.FLOAT64_SCHEMA), is("DOUBLE"));
assertThat(STRICT.format(Schema.FLOAT64_SCHEMA), is("DOUBLE NOT NULL"));
}
|
public static String getCertFingerPrint(Certificate cert) {
byte [] digest = null;
try {
byte[] encCertInfo = cert.getEncoded();
MessageDigest md = MessageDigest.getInstance("SHA-1");
digest = md.digest(encCertInfo);
} catch (Exception e) {
logger.error("Exception:", e);
}
if (digest != null) {
return bytesToHex(digest).toLowerCase();
}
return null;
}
|
@Test
public void testGetCertFingerPrintCa() throws Exception {
X509Certificate cert = null;
try (InputStream is = Config.getInstance().getInputStreamFromFile("ca.crt")){
CertificateFactory cf = CertificateFactory.getInstance("X.509");
cert = (X509Certificate) cf.generateCertificate(is);
} catch (Exception e) {
e.printStackTrace();
}
String fp = FingerPrintUtil.getCertFingerPrint(cert);
Assert.assertEquals("da2794f442f08a73ac9eef7f9378dd7a5bbcf8c6", fp);
}
|
@Override
public List<GrokPattern> saveAll(Collection<GrokPattern> patterns,
ImportStrategy importStrategy) throws ValidationException {
if (importStrategy == ABORT_ON_CONFLICT) {
for (GrokPattern pattern : loadAll()) {
final boolean patternExists = patterns.stream().anyMatch(p -> p.name().equals(pattern.name()));
if (patternExists) {
throw new ValidationException("Grok pattern " + pattern.name() + " already exists");
}
}
}
validateAllOrThrow(patterns, importStrategy);
final List<GrokPattern> grokPatterns = patterns.stream()
.map(this::uncheckedSave)
.collect(Collectors.toList());
final Set<String> patternNames = grokPatterns.stream()
.map(GrokPattern::name)
.collect(Collectors.toSet());
if (!patternNames.isEmpty()) {
clusterBus.post(GrokPatternsUpdatedEvent.create(patternNames));
}
return grokPatterns;
}
|
@Test
public void saveAll() throws Exception {
Collection<GrokPattern> patterns = ImmutableList.of(GrokPattern.create("1", ".*"),
GrokPattern.create("2", ".+"));
final List<GrokPattern> saved = service.saveAll(patterns, ABORT_ON_CONFLICT);
assertThat(saved).hasSize(2);
// should fail because already exists
assertThatThrownBy(() -> service.saveAll(patterns, ABORT_ON_CONFLICT))
.isInstanceOf(ValidationException.class);
// should add the patterns again
service.saveAll(patterns, REPLACE_ON_CONFLICT);
assertThat(service.loadAll()).hasSize(4);
// replaced all patterns
service.saveAll(patterns, DROP_ALL_EXISTING);
assertThat(service.loadAll()).hasSize(2);
}
|
public Plan validateReservationSubmissionRequest(
ReservationSystem reservationSystem, ReservationSubmissionRequest request,
ReservationId reservationId) throws YarnException {
String message;
if (reservationId == null) {
message = "Reservation id cannot be null. Please try again specifying "
+ " a valid reservation id by creating a new reservation id.";
throw RPCUtil.getRemoteException(message);
}
// Check if it is a managed queue
String queue = request.getQueue();
Plan plan = getPlanFromQueue(reservationSystem, queue,
AuditConstants.SUBMIT_RESERVATION_REQUEST);
validateReservationDefinition(reservationId,
request.getReservationDefinition(), plan,
AuditConstants.SUBMIT_RESERVATION_REQUEST);
return plan;
}
|
@Test
public void testSubmitReservationMaxPeriodIndivisibleByRecurrenceExp() {
long indivisibleRecurrence =
YarnConfiguration.DEFAULT_RM_RESERVATION_SYSTEM_MAX_PERIODICITY / 2 + 1;
String recurrenceExp = Long.toString(indivisibleRecurrence);
ReservationSubmissionRequest request =
createSimpleReservationSubmissionRequest(1, 1, 1, 5, 3, recurrenceExp);
plan = null;
try {
plan = rrValidator.validateReservationSubmissionRequest(rSystem, request,
ReservationSystemTestUtil.getNewReservationId());
Assert.fail();
} catch (YarnException e) {
Assert.assertNull(plan);
String message = e.getMessage();
Assert.assertTrue(message.startsWith("The maximum periodicity:"));
LOG.info(message);
}
}
|
public boolean shouldRecord() {
return this.recordingLevel.shouldRecord(config.recordLevel().id);
}
|
@Test
public void testRecordLevelEnum() {
Sensor.RecordingLevel configLevel = Sensor.RecordingLevel.INFO;
assertTrue(Sensor.RecordingLevel.INFO.shouldRecord(configLevel.id));
assertFalse(Sensor.RecordingLevel.DEBUG.shouldRecord(configLevel.id));
assertFalse(Sensor.RecordingLevel.TRACE.shouldRecord(configLevel.id));
configLevel = Sensor.RecordingLevel.DEBUG;
assertTrue(Sensor.RecordingLevel.INFO.shouldRecord(configLevel.id));
assertTrue(Sensor.RecordingLevel.DEBUG.shouldRecord(configLevel.id));
assertFalse(Sensor.RecordingLevel.TRACE.shouldRecord(configLevel.id));
configLevel = Sensor.RecordingLevel.TRACE;
assertTrue(Sensor.RecordingLevel.INFO.shouldRecord(configLevel.id));
assertTrue(Sensor.RecordingLevel.DEBUG.shouldRecord(configLevel.id));
assertTrue(Sensor.RecordingLevel.TRACE.shouldRecord(configLevel.id));
assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.DEBUG.toString()),
Sensor.RecordingLevel.DEBUG);
assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.INFO.toString()),
Sensor.RecordingLevel.INFO);
assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.TRACE.toString()),
Sensor.RecordingLevel.TRACE);
}
|
boolean shouldReplicateTopic(String topic) {
return (topicFilter.shouldReplicateTopic(topic) || replicationPolicy.isHeartbeatsTopic(topic))
&& !replicationPolicy.isInternalTopic(topic) && !isCycle(topic);
}
|
@Test
public void testIdentityReplication() {
MirrorSourceConnector connector = new MirrorSourceConnector(new SourceAndTarget("source", "target"),
new IdentityReplicationPolicy(), x -> true, getConfigPropertyFilter());
assertTrue(connector.shouldReplicateTopic("target.topic1"), "should allow cycles");
assertTrue(connector.shouldReplicateTopic("target.source.topic1"), "should allow cycles");
assertTrue(connector.shouldReplicateTopic("source.target.topic1"), "should allow cycles");
assertTrue(connector.shouldReplicateTopic("target.source.target.topic1"), "should allow cycles");
assertTrue(connector.shouldReplicateTopic("source.target.source.topic1"), "should allow cycles");
assertTrue(connector.shouldReplicateTopic("topic1"), "should allow normal topics");
assertTrue(connector.shouldReplicateTopic("othersource.topic1"), "should allow normal topics");
assertFalse(connector.shouldReplicateTopic("target.heartbeats"), "should not allow heartbeat cycles");
assertFalse(connector.shouldReplicateTopic("target.source.heartbeats"), "should not allow heartbeat cycles");
assertFalse(connector.shouldReplicateTopic("source.target.heartbeats"), "should not allow heartbeat cycles");
assertFalse(connector.shouldReplicateTopic("target.source.target.heartbeats"), "should not allow heartbeat cycles");
assertFalse(connector.shouldReplicateTopic("source.target.source.heartbeats"), "should not allow heartbeat cycles");
assertTrue(connector.shouldReplicateTopic("heartbeats"), "should allow heartbeat topics");
assertTrue(connector.shouldReplicateTopic("othersource.heartbeats"), "should allow heartbeat topics");
}
|
public String getMasterAddr() {
return masterAddr;
}
|
@Test
public void testGetMasterAddr() {
Assert.assertEquals(BROKER_ADDR, slaveSynchronize.getMasterAddr());
}
|
public static String getAliasByCode(byte code) {
return TYPE_CODE_MAP.getKey(code);
}
|
@Test
public void getAliasByCode() {
Assert.assertEquals("test", SerializerFactory.getAliasByCode((byte) 117));
}
|
public double add(int i, int j, double b) {
return A[index(i, j)] += b;
}
|
@Test
public void testAdd() {
System.out.println("add");
double[][] A = {
{ 0.7220180, 0.07121225, 0.6881997f},
{-0.2648886, -0.89044952, 0.3700456f},
{-0.6391588, 0.44947578, 0.6240573f}
};
double[][] B = {
{0.6881997, -0.07121225, 0.7220180f},
{0.3700456, 0.89044952, -0.2648886f},
{0.6240573, -0.44947578, -0.6391588f}
};
double[][] C = {
{ 1.4102177, 0, 1.4102177f},
{ 0.1051570, 0, 0.1051570f},
{-0.0151015, 0, -0.0151015f}
};
Matrix a = Matrix.of(A);
Matrix b = Matrix.of(B);
a.add(1.0, b);
assertTrue(MathEx.equals(C, a.toArray(), 1E-7));
}
|
@Override
public GroupAssignment assign(
GroupSpec groupSpec,
SubscribedTopicDescriber subscribedTopicDescriber
) throws PartitionAssignorException {
if (groupSpec.memberIds().isEmpty())
return new GroupAssignment(Collections.emptyMap());
if (groupSpec.subscriptionType().equals(HOMOGENEOUS)) {
return assignHomogenous(groupSpec, subscribedTopicDescriber);
} else {
return assignHeterogeneous(groupSpec, subscribedTopicDescriber);
}
}
|
@Test
public void testAssignWithThreeMembersThreeTopicsHeterogeneous() {
Map<Uuid, TopicMetadata> topicMetadata = new HashMap<>();
topicMetadata.put(TOPIC_1_UUID, new TopicMetadata(
TOPIC_1_UUID,
TOPIC_1_NAME,
3,
Collections.emptyMap()
));
topicMetadata.put(TOPIC_2_UUID, new TopicMetadata(
TOPIC_2_UUID,
"topic2",
3,
Collections.emptyMap()
));
topicMetadata.put(TOPIC_3_UUID, new TopicMetadata(
TOPIC_3_UUID,
TOPIC_3_NAME,
2,
Collections.emptyMap()
));
Map<String, MemberSubscriptionAndAssignmentImpl> members = new TreeMap<>();
members.put(MEMBER_A, new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.empty(),
mkSet(TOPIC_1_UUID, TOPIC_2_UUID),
Assignment.EMPTY
));
members.put(MEMBER_B, new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.empty(),
mkSet(TOPIC_3_UUID),
Assignment.EMPTY
));
String memberC = "C";
members.put(memberC, new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.empty(),
mkSet(TOPIC_2_UUID, TOPIC_3_UUID),
Assignment.EMPTY
));
GroupSpec groupSpec = new GroupSpecImpl(
members,
HETEROGENEOUS,
Collections.emptyMap()
);
SubscribedTopicDescriberImpl subscribedTopicMetadata = new SubscribedTopicDescriberImpl(topicMetadata);
GroupAssignment computedAssignment = assignor.assign(
groupSpec,
subscribedTopicMetadata
);
Map<String, Map<Uuid, Set<Integer>>> expectedAssignment = new HashMap<>();
expectedAssignment.put(MEMBER_A, mkAssignment(
mkTopicAssignment(TOPIC_1_UUID, 0, 1, 2),
mkTopicAssignment(TOPIC_2_UUID, 0, 1, 2)
));
expectedAssignment.put(MEMBER_B, mkAssignment(
mkTopicAssignment(TOPIC_3_UUID, 0, 1)
));
expectedAssignment.put(memberC, mkAssignment(
mkTopicAssignment(TOPIC_2_UUID, 0, 1, 2),
mkTopicAssignment(TOPIC_3_UUID, 0, 1)
));
assertAssignment(expectedAssignment, computedAssignment);
}
|
public Result runExtractor(String value) {
final Matcher matcher = pattern.matcher(value);
final boolean found = matcher.find();
if (!found) {
return null;
}
final int start = matcher.groupCount() > 0 ? matcher.start(1) : -1;
final int end = matcher.groupCount() > 0 ? matcher.end(1) : -1;
final String s;
try {
s = replaceAll ? matcher.replaceAll(replacement) : matcher.replaceFirst(replacement);
} catch (Exception e) {
throw new RuntimeException("Error while trying to replace string", e);
}
return new Result(s, start, end);
}
|
@Test
public void testReplacementWithoutReplaceAll() throws Exception {
final Message message = messageFactory.createMessage("Foobar 123 Foobaz 456", "source", Tools.nowUTC());
final RegexReplaceExtractor extractor = new RegexReplaceExtractor(
metricRegistry,
"id",
"title",
0L,
Extractor.CursorStrategy.COPY,
"message",
"message",
ImmutableMap.<String, Object>of("regex", "(\\w+) (\\d+)", "replacement", "$2/$1", "replace_all", false),
"user",
Collections.<Converter>emptyList(),
Extractor.ConditionType.NONE,
null);
extractor.runExtractor(message);
assertThat(message.getMessage()).isEqualTo("123/Foobar Foobaz 456");
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.