focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
protected GelfMessage toGELFMessage(final Message message) {
final DateTime timestamp;
final Object fieldTimeStamp = message.getField(Message.FIELD_TIMESTAMP);
if (fieldTimeStamp instanceof DateTime) {
timestamp = (DateTime) fieldTimeStamp;
} else {
timestamp = Tools.nowUTC();
}
final GelfMessageLevel messageLevel = extractLevel(message.getField(Message.FIELD_LEVEL));
final String fullMessage = (String) message.getField(Message.FIELD_FULL_MESSAGE);
final String forwarder = GelfOutput.class.getCanonicalName();
final GelfMessageBuilder builder = new GelfMessageBuilder(message.getMessage(), message.getSource())
.timestamp(timestamp.getMillis() / 1000.0d)
.additionalField("_forwarder", forwarder)
.additionalFields(message.getFields());
if (messageLevel != null) {
builder.level(messageLevel);
}
if (fullMessage != null) {
builder.fullMessage(fullMessage);
}
return builder.build();
}
|
@Test
public void testToGELFMessageWithInvalidTypeLevel() throws Exception {
final GelfTransport transport = mock(GelfTransport.class);
final GelfOutput gelfOutput = new GelfOutput(transport);
final DateTime now = DateTime.now(DateTimeZone.UTC);
final Message message = messageFactory.createMessage("Test", "Source", now);
message.addField("level", new Object());
final GelfMessage gelfMessage = gelfOutput.toGELFMessage(message);
assertEquals(GelfMessageLevel.ALERT, gelfMessage.getLevel());
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void bootstrapFailed() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/logs/bootstrap.txt")),
CrashReportAnalyzer.Rule.BOOTSTRAP_FAILED);
assertEquals("prefab", result.getMatcher().group("id"));
}
|
@Override
public Object[] toArray() {
return rawList.toArray();
}
|
@Test
public void toArrayTest() {
String jsonStr = FileUtil.readString("exam_test.json", CharsetUtil.CHARSET_UTF_8);
JSONArray array = JSONUtil.parseArray(jsonStr);
//noinspection SuspiciousToArrayCall
Exam[] list = array.toArray(new Exam[0]);
assertNotEquals(0, list.length);
assertSame(Exam.class, list[0].getClass());
}
|
@Override
public Result apply(PathData item, int depth) throws IOException {
String name = getPath(item).getName();
if (!caseSensitive) {
name = StringUtils.toLowerCase(name);
}
if (globPattern.matches(name)) {
return Result.PASS;
} else {
return Result.FAIL;
}
}
|
@Test
public void applyGlobNotMatch() throws IOException {
setup("n*e");
PathData item = new PathData("/directory/path/notmatch", mockFs.getConf());
assertEquals(Result.FAIL, name.apply(item, -1));
}
|
@Override
public FilterRegistration.Dynamic addFilter(String name, String filterClass) {
try {
Class<?> newFilterClass = getClassLoader().loadClass(filterClass);
if (!Filter.class.isAssignableFrom(newFilterClass)) {
throw new IllegalArgumentException(filterClass + " does not implement Filter");
}
@SuppressWarnings("unchecked")
Class<? extends Filter> filterCastClass = (Class<? extends Filter>)newFilterClass;
return addFilter(name, filterCastClass);
} catch (ClassNotFoundException e) {
log.error("Could not find filter class", e);
throw new IllegalStateException("Filter class " + filterClass + " not found");
}
}
|
@Test
void addFilter_doesNotImplementFilter_expectException() {
AwsServletContext ctx = new AwsServletContext(null);
try {
ctx.addFilter("filter", this.getClass().getName());
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith(this.getClass().getName() + " does not implement Filter"));
return;
}
fail("Expected IllegalArgumentException");
}
|
static String convertEnvVars(String input){
// check for any non-alphanumeric chars and convert to underscore
// convert to upper case
if (input == null) {
return null;
}
return input.replaceAll("[^A-Za-z0-9]", "_").toUpperCase();
}
|
@Test
public void testConvertEnvVarsUsingDotInValueWithCamelCasing() {
String testInput = ConfigInjection.convertEnvVars("server.ENVIRONMENT");
Assert.assertEquals("SERVER_ENVIRONMENT", testInput);
}
|
public static Coder<SdkHttpMetadata> sdkHttpMetadataWithoutHeaders() {
return new SdkHttpMetadataCoder(false);
}
|
@Test
public void testSdkHttpMetadataWithoutHeadersDecodeEncodeEquals() throws Exception {
SdkHttpMetadata value = buildSdkHttpMetadata();
SdkHttpMetadata clone = CoderUtils.clone(AwsCoders.sdkHttpMetadataWithoutHeaders(), value);
assertThat(clone.getHttpStatusCode(), equalTo(value.getHttpStatusCode()));
assertThat(clone.getHttpHeaders().isEmpty(), equalTo(true));
}
|
@Udf(description = "Converts the number of days since 1970-01-01 00:00:00 UTC/GMT to a date "
+ "string using the given format pattern. The format pattern should be in the format"
+ " expected by java.time.format.DateTimeFormatter")
public String dateToString(
@UdfParameter(
description = "The Epoch Day to convert,"
+ " based on the epoch 1970-01-01") final int epochDays,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
if (formatPattern == null) {
return null;
}
try {
final DateTimeFormatter formatter = formatters.get(formatPattern);
return LocalDate.ofEpochDay(epochDays).format(formatter);
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to format date " + epochDays
+ " with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void shouldSupportEmbeddedChars() {
// When:
final Object result = udf.dateToString(12345, "yyyy-dd-MM'Fred'");
// Then:
assertThat(result, is("2003-20-10Fred"));
}
|
public static long fingerprint64(Schema schema) {
long fingerPrint = fingerprint64(INIT, schema.getTypeName());
fingerPrint = fingerprint64(fingerPrint, schema.getFieldCount());
for (FieldDescriptor descriptor : schema.getFields()) {
fingerPrint = fingerprint64(fingerPrint, descriptor.getFieldName());
fingerPrint = fingerprint64(fingerPrint, descriptor.getKind().getId());
}
return fingerPrint;
}
|
@Test
public void testRabinFingerprintIsConsistentWithWrittenData() throws IOException {
SchemaWriter writer = new SchemaWriter("typeName");
writer.addField(new FieldDescriptor("a", FieldKind.BOOLEAN));
writer.addField(new FieldDescriptor("b", FieldKind.ARRAY_OF_BOOLEAN));
writer.addField(new FieldDescriptor("c", FieldKind.TIMESTAMP_WITH_TIMEZONE));
Schema schema = writer.build();
InternalSerializationService internalSerializationService = new DefaultSerializationServiceBuilder()
.setByteOrder(ByteOrder.LITTLE_ENDIAN).build();
BufferObjectDataOutput output = internalSerializationService.createObjectDataOutput();
schema.writeData(output);
long fingerprint64 = RabinFingerprint.fingerprint64(output.toByteArray());
assertEquals(fingerprint64, schema.getSchemaId());
}
|
@Override
public ChannelFuture writePing(ChannelHandlerContext ctx, boolean ack, long data, ChannelPromise promise) {
return frameWriter.writePing(ctx, ack, data, promise);
}
|
@Test
public void pingWriteAfterGoAwayShouldSucceed() throws Exception {
ChannelPromise promise = newPromise();
goAwayReceived(0);
encoder.writePing(ctx, false, 0L, promise);
verify(writer).writePing(eq(ctx), eq(false), eq(0L), eq(promise));
}
|
public static boolean isUnanimousCandidate(
final ClusterMember[] clusterMembers, final ClusterMember candidate, final int gracefulClosedLeaderId)
{
int possibleVotes = 0;
for (final ClusterMember member : clusterMembers)
{
if (member.id == gracefulClosedLeaderId)
{
continue;
}
if (NULL_POSITION == member.logPosition || compareLog(candidate, member) < 0)
{
return false;
}
possibleVotes++;
}
return possibleVotes >= ClusterMember.quorumThreshold(clusterMembers.length);
}
|
@Test
void isUnanimousCandidateReturnTrueIfTheCandidateHasTheMostUpToDateLog()
{
final int gracefulClosedLeaderId = Aeron.NULL_VALUE;
final ClusterMember candidate = newMember(2, 10, 800);
final ClusterMember[] members = new ClusterMember[]
{
newMember(10, 2, 100),
newMember(20, 8, 6),
newMember(30, 10, 800)
};
assertTrue(isUnanimousCandidate(members, candidate, gracefulClosedLeaderId));
}
|
@Override
@Transactional(rollbackFor = Exception.class)
@CacheEvict(value = RedisKeyConstants.PERMISSION_MENU_ID_LIST,
allEntries = true) // allEntries 清空所有缓存,因为此时不知道 id 对应的 permission 是多少。直接清理,简单有效
public void deleteMenu(Long id) {
// 校验是否还有子菜单
if (menuMapper.selectCountByParentId(id) > 0) {
throw exception(MENU_EXISTS_CHILDREN);
}
// 校验删除的菜单是否存在
if (menuMapper.selectById(id) == null) {
throw exception(MENU_NOT_EXISTS);
}
// 标记删除
menuMapper.deleteById(id);
// 删除授予给角色的权限
permissionService.processMenuDeleted(id);
}
|
@Test
public void testDeleteMenu_existChildren() {
// mock 数据(构造父子菜单)
MenuDO sonMenu = createParentAndSonMenu();
// 准备参数
Long parentId = sonMenu.getParentId();
// 调用并断言异常
assertServiceException(() -> menuService.deleteMenu(parentId), MENU_EXISTS_CHILDREN);
}
|
@Override
public void addDestinationInfo(ConnectionContext context, DestinationInfo info) throws Exception {
DestinationAction action = new DestinationAction(context, info.getDestination(), "create");
assertAuthorized(action);
super.addDestinationInfo(context, info);
}
|
@Test(expected=UnauthorizedException.class)
public void testAddDestinationInfoNotAuthorized() throws Exception {
String name = "myTopic";
ActiveMQDestination dest = new ActiveMQTopic(name);
DestinationInfo info = new DestinationInfo(null, DestinationInfo.ADD_OPERATION_TYPE, dest);
Subject subject = new PermsSubject();
ConnectionContext context = createContext(subject);
filter.addDestinationInfo(context, info);
}
|
public <T> SideInput<T> fetchSideInput(
PCollectionView<T> view,
BoundedWindow sideWindow,
String stateFamily,
SideInputState state,
Supplier<Closeable> scopedReadStateSupplier) {
Callable<SideInput<T>> loadSideInputFromWindmill =
() -> loadSideInputFromWindmill(view, sideWindow, stateFamily, scopedReadStateSupplier);
SideInputCache.Key<T> sideInputCacheKey =
SideInputCache.Key.create(
getInternalTag(view), sideWindow, getViewFn(view).getTypeDescriptor());
try {
if (state == SideInputState.KNOWN_READY) {
Optional<SideInput<T>> existingCacheEntry = sideInputCache.get(sideInputCacheKey);
if (!existingCacheEntry.isPresent()) {
return sideInputCache.getOrLoad(sideInputCacheKey, loadSideInputFromWindmill);
}
if (!existingCacheEntry.get().isReady()) {
return sideInputCache.invalidateThenLoadNewEntry(
sideInputCacheKey, loadSideInputFromWindmill);
}
return existingCacheEntry.get();
}
return sideInputCache.getOrLoad(sideInputCacheKey, loadSideInputFromWindmill);
} catch (Exception e) {
LOG.error("Fetch failed: ", e);
throw new RuntimeException("Exception while fetching side input: ", e);
}
}
|
@Test
public void testEmptyFetchGlobalData() {
SideInputStateFetcherFactory factory =
SideInputStateFetcherFactory.fromOptions(
PipelineOptionsFactory.as(DataflowStreamingPipelineOptions.class));
SideInputStateFetcher fetcher = factory.createSideInputStateFetcher(server::getSideInputData);
ByteString encodedIterable = ByteString.EMPTY;
PCollectionView<Long> view =
TestPipeline.create()
.apply(Create.empty(VarLongCoder.of()))
.apply(Sum.longsGlobally().asSingletonView());
String tag = view.getTagInternal().getId();
// Test three calls in a row. First, data is not ready, then data is ready,
// then the data is already cached.
when(server.getSideInputData(any(Windmill.GlobalDataRequest.class)))
.thenReturn(buildGlobalDataResponse(tag, true, encodedIterable));
assertEquals(
0L,
(long)
fetcher
.fetchSideInput(
view,
GlobalWindow.INSTANCE,
STATE_FAMILY,
SideInputState.UNKNOWN,
readStateSupplier)
.value()
.orElse(null));
verify(server).getSideInputData(buildGlobalDataRequest(tag));
verifyNoMoreInteractions(server);
}
|
@Udf(description = "Converts a TIME value into the"
+ " string representation of the time in the given format."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String formatTime(
@UdfParameter(
description = "TIME value.") final Time time,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
if (time == null || formatPattern == null) {
return null;
}
try {
final DateTimeFormatter formatter = formatters.get(formatPattern);
return LocalTime.ofNanoOfDay(TimeUnit.MILLISECONDS.toNanos(time.getTime())).format(formatter);
} catch (ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to format time "
+ LocalTime.ofNanoOfDay(time.getTime() * 1000000)
+ " with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void shouldReturnNullOnNullTime() {
// When:
final String result = udf.formatTime(null, "HHmmss");
// Then:
assertThat(result, is(nullValue()));
}
|
@Override
public GetAllResourceProfilesResponse getResourceProfiles(
GetAllResourceProfilesRequest request) throws YarnException, IOException {
if (request == null) {
routerMetrics.incrGetResourceProfilesFailedRetrieved();
String msg = "Missing getResourceProfiles request.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_RESOURCEPROFILES, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, null);
}
long startTime = clock.getTime();
ClientMethod remoteMethod = new ClientMethod("getResourceProfiles",
new Class[] {GetAllResourceProfilesRequest.class}, new Object[] {request});
Collection<GetAllResourceProfilesResponse> resourceProfiles = null;
try {
resourceProfiles = invokeConcurrent(remoteMethod, GetAllResourceProfilesResponse.class);
} catch (Exception ex) {
routerMetrics.incrGetResourceProfilesFailedRetrieved();
String msg = "Unable to get resource profiles due to exception.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_RESOURCEPROFILES, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException("Unable to get resource profiles due to exception.",
ex);
}
long stopTime = clock.getTime();
routerMetrics.succeededGetResourceProfilesRetrieved(stopTime - startTime);
RouterAuditLogger.logSuccess(user.getShortUserName(), GET_RESOURCEPROFILES,
TARGET_CLIENT_RM_SERVICE);
return RouterYarnClientUtils.mergeClusterResourceProfilesResponse(resourceProfiles);
}
|
@Test
public void testGetResourceProfiles() throws Exception {
LOG.info("Test FederationClientInterceptor : Get Resource Profiles request.");
// null request
LambdaTestUtils.intercept(YarnException.class, "Missing getResourceProfiles request.",
() -> interceptor.getResourceProfiles(null));
// normal request
GetAllResourceProfilesRequest request = GetAllResourceProfilesRequest.newInstance();
GetAllResourceProfilesResponse response = interceptor.getResourceProfiles(request);
Assert.assertNotNull(response);
Map<String, Resource> resProfiles = response.getResourceProfiles();
Resource maxResProfiles = resProfiles.get("maximum");
Assert.assertEquals(32768, maxResProfiles.getMemorySize());
Assert.assertEquals(16, maxResProfiles.getVirtualCores());
Resource defaultResProfiles = resProfiles.get("default");
Assert.assertEquals(8192, defaultResProfiles.getMemorySize());
Assert.assertEquals(8, defaultResProfiles.getVirtualCores());
Resource minimumResProfiles = resProfiles.get("minimum");
Assert.assertEquals(4096, minimumResProfiles.getMemorySize());
Assert.assertEquals(4, minimumResProfiles.getVirtualCores());
}
|
public static String encodeBase64Zipped( byte[] src ) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream( 1024 );
try ( Base64OutputStream base64OutputStream = new Base64OutputStream( baos );
GZIPOutputStream gzos = new GZIPOutputStream( base64OutputStream ) ) {
gzos.write( src );
}
return baos.toString();
}
|
@Test
public void testEncodeBase64Zipped() throws Exception {
TestClass testClass = new TestClass();
testClass.setTestProp1( "testPropValue1" );
testClass.setTestProp2( "testPropValue2" );
String base64ZippedString = this.encode( testClass );
Assert.assertNotNull( base64ZippedString );
Assert.assertTrue( !base64ZippedString.trim().isEmpty() );
}
|
public void subscribeNewReplyReasonForComment(Comment comment) {
subscribeReply(identityFrom(comment.getSpec().getOwner()));
}
|
@Test
void subscribeNewReplyReasonForCommentTest() {
var comment = createComment();
var spyNotificationSubscriptionHelper = spy(notificationSubscriptionHelper);
doNothing().when(spyNotificationSubscriptionHelper).subscribeReply(any(UserIdentity.class));
spyNotificationSubscriptionHelper.subscribeNewReplyReasonForComment(comment);
verify(spyNotificationSubscriptionHelper).subscribeReply(
eq(ReplyNotificationSubscriptionHelper.identityFrom(
comment.getSpec().getOwner()))
);
}
|
@Override
public <VR> KStream<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> valueMapper) {
return mapValues(withKey(valueMapper));
}
|
@Test
public void shouldNotAllowNullMapperOnMapValuesWithKeyWithNamed() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.mapValues(
(ValueMapperWithKey<Object, Object, Object>) null,
Named.as("valueMapperWithKey")));
assertThat(exception.getMessage(), equalTo("valueMapperWithKey can't be null"));
}
|
public static Optional<VariableDeclarator> getVariableDeclarator(final MethodDeclaration methodDeclaration,
final String variableName) {
final BlockStmt body = methodDeclaration.getBody()
.orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration)));
return getVariableDeclarator(body, variableName);
}
|
@Test
void getVariableDeclarator() {
final String variableName = "variableName";
final BlockStmt body = new BlockStmt();
assertThat(CommonCodegenUtils.getVariableDeclarator(body, variableName)).isNotPresent();
final VariableDeclarationExpr variableDeclarationExpr = new VariableDeclarationExpr(parseClassOrInterfaceType("String"), variableName);
body.addStatement(variableDeclarationExpr);
Optional<VariableDeclarator> retrieved = CommonCodegenUtils.getVariableDeclarator(body, variableName);
assertThat(retrieved).isPresent();
VariableDeclarator variableDeclarator = retrieved.get();
assertThat(variableDeclarator.getName().asString()).isEqualTo(variableName);
}
|
public synchronized int requestUpdate(final boolean resetEquivalentResponseBackoff) {
this.needFullUpdate = true;
if (resetEquivalentResponseBackoff) {
this.equivalentResponseCount = 0;
}
return this.updateVersion;
}
|
@Test
public void testRequestUpdate() {
assertFalse(metadata.updateRequested());
int[] epochs = {42, 42, 41, 41, 42, 43, 43, 42, 41, 44};
boolean[] updateResult = {true, false, false, false, false, true, false, false, false, true};
TopicPartition tp = new TopicPartition("topic", 0);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1,
Collections.emptyMap(), Collections.singletonMap("topic", 1), _tp -> 0);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
for (int i = 0; i < epochs.length; i++) {
metadata.updateLastSeenEpochIfNewer(tp, epochs[i]);
if (updateResult[i]) {
assertTrue(metadata.updateRequested(), "Expected metadata update to be requested [" + i + "]");
} else {
assertFalse(metadata.updateRequested(), "Did not expect metadata update to be requested [" + i + "]");
}
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
assertFalse(metadata.updateRequested());
}
}
|
public static <T> NavigableSet<Point<T>> fastKNearestPoints(SortedSet<Point<T>> points, Instant time, int k) {
checkNotNull(points, "The input SortedSet of Points cannot be null");
checkNotNull(time, "The input time cannot be null");
checkArgument(k >= 0, "k (" + k + ") must be non-negative");
if (k >= points.size()) {
return newTreeSet(points);
}
Point<T> stub = points.first();
Point<T> searchPoint = Point.builder(stub).time(time).latLong(0.0, 0.0).build();
//create two iterators, one goes up from the searchPoint, one goes down from the searchPoint
NavigableSet<Point<T>> headSet = ((NavigableSet<Point<T>>) points).headSet(searchPoint, true);
NavigableSet<Point<T>> tailSet = ((NavigableSet<Point<T>>) points).tailSet(searchPoint, false);
Iterator<Point<T>> headIter = headSet.descendingIterator();
Iterator<Point<T>> tailIter = tailSet.iterator();
TreeSet<Point<T>> results = newTreeSet();
Point<T> up = (headIter.hasNext()) ? headIter.next() : null;
Point<T> down = (tailIter.hasNext()) ? tailIter.next() : null;
while (results.size() < k) {
//add an element from the "down set" when we are out of elements in the "up set"
if (up == null) {
results.add(down);
down = tailIter.next();
continue;
}
//add an element from the "up set" when we are out of elements in the "down set"
if (down == null) {
results.add(up);
up = headIter.next();
continue;
}
//add the nearest point when we can choose between the "up set" and the "down set"
Duration upDistance = Duration.between(up.time(), time);
Duration downDistance = Duration.between(time, down.time());
if (theDuration(upDistance).isLessThanOrEqualTo(downDistance)) {
results.add(up);
up = (headIter.hasNext()) ? headIter.next() : null;
} else {
results.add(down);
down = (tailIter.hasNext()) ? tailIter.next() : null;
}
}
return results;
}
|
@Test
public void testFastKNearestPoints_6() {
NavigableSet<Point<String>> knn = fastKNearestPoints(points, EPOCH.plusSeconds(5), 100);
assertEquals(points.size(), knn.size());
assertFalse(knn == points, "The datasets are different");
knn.pollFirst();
assertEquals(points.size(), knn.size() + 1, "We removed an item from the knn");
}
|
public void onProcessingTime(long timestamp) throws Exception {
for (Bucket<IN, BucketID> bucket : activeBuckets.values()) {
bucket.onProcessingTime(timestamp);
}
}
|
@Test
void testOnProcessingTime() throws Exception {
final File outDir = TempDirUtils.newFolder(tempFolder);
final Path path = new Path(outDir.toURI());
final OnProcessingTimePolicy<String, String> rollOnProcessingTimeCountingPolicy =
new OnProcessingTimePolicy<>(2L);
final Buckets<String, String> buckets =
createBuckets(path, rollOnProcessingTimeCountingPolicy, 0);
// it takes the current processing time of the context for the creation time,
// and for the last modification time.
buckets.onElement("test", new TestUtils.MockSinkContext(1L, 2L, 3L));
// now it should roll
buckets.onProcessingTime(7L);
assertThat(rollOnProcessingTimeCountingPolicy.getOnProcessingTimeRollCounter()).isOne();
final Map<String, Bucket<String, String>> activeBuckets = buckets.getActiveBuckets();
assertThat(activeBuckets).hasSize(1).containsKey("test");
final Bucket<String, String> bucket = activeBuckets.get("test");
assertThat(bucket.getBucketId()).isEqualTo("test");
assertThat(bucket.getBucketPath()).isEqualTo(new Path(path, "test"));
assertThat(bucket.getBucketId()).isEqualTo("test");
assertThat(bucket.getInProgressPart()).isNull();
assertThat(bucket.getPendingFileRecoverablesForCurrentCheckpoint()).hasSize(1);
assertThat(bucket.getPendingFileRecoverablesPerCheckpoint()).isEmpty();
}
|
public COM verifyCom(byte[] data, Class<? extends COM> type) throws RdaException {
final COM com = read(data, type);
if (!com.getDataGroups().containsAll(com.getRdaDataGroups())) {
throw new RdaException(
RdaError.COM, String.format("Not all data groups are available: %s", com.getDataGroups())
);
}
return com;
}
|
@Test
public void shouldThrowErrorIfBasicDataGroupsAreMissingFromTravelDocument() throws Exception {
final CardVerifier verifier = verifier(null, null);
final byte[] com = readFixture("nik2014/efCom");
com[20] += 2;
Exception exception = assertThrows(RdaException.class, () -> {
verifier.verifyCom(com, TravelDocumentCOM.class);
});
assertEquals(RdaError.COM, ((RdaException) exception).error);
assertEquals("Not all data groups are available: [3, 2, 15, 14]", exception.getMessage());
}
|
public static FusedPipeline fuse(Pipeline p) {
return new GreedyPipelineFuser(p).fusedPipeline;
}
|
@Test
public void statefulParDoRootsStage() {
// (impulse.out) -> parDo -> (parDo.out)
// (parDo.out) -> stateful -> stateful.out
// stateful has a state spec which prevents it from fusing with an upstream ParDo
PTransform parDoTransform =
PTransform.newBuilder()
.setUniqueName("ParDo")
.putInputs("input", "impulse.out")
.putOutputs("output", "parDo.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.setPayload(
ParDoPayload.newBuilder()
.setDoFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.setEnvironmentId("common")
.build();
PTransform statefulTransform =
PTransform.newBuilder()
.setUniqueName("StatefulParDo")
.putInputs("input", "parDo.out")
.putOutputs("output", "stateful.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.setPayload(
ParDoPayload.newBuilder()
.setDoFn(FunctionSpec.newBuilder())
.putStateSpecs("state", StateSpec.getDefaultInstance())
.build()
.toByteString()))
.setEnvironmentId("common")
.build();
Components components =
partialComponents
.toBuilder()
.putTransforms("parDo", parDoTransform)
.putPcollections("parDo.out", pc("parDo.out"))
.putTransforms("stateful", statefulTransform)
.putPcollections("stateful.out", pc("stateful.out"))
.putEnvironments("common", Environments.createDockerEnvironment("common"))
.build();
FusedPipeline fused =
GreedyPipelineFuser.fuse(
Pipeline.newBuilder()
.setComponents(components)
.addRequirements(ParDoTranslation.REQUIRES_STATEFUL_PROCESSING_URN)
.build());
assertThat(
fused.getRunnerExecutedTransforms(),
containsInAnyOrder(
PipelineNode.pTransform("impulse", components.getTransformsOrThrow("impulse"))));
assertThat(
fused.getFusedStages(),
containsInAnyOrder(
ExecutableStageMatcher.withInput("impulse.out")
.withOutputs("parDo.out")
.withTransforms("parDo"),
ExecutableStageMatcher.withInput("parDo.out")
.withNoOutputs()
.withTransforms("stateful")));
}
|
@Override
public boolean canReadView(ViewLike view) {
final String viewId = view.id();
// If a resolved view id is provided, delegate the permissions check to the resolver.
final ViewResolverDecoder decoder = new ViewResolverDecoder(viewId);
if (decoder.isResolverViewId()) {
final ViewResolver viewResolver = viewResolvers.get(decoder.getResolverName());
if (viewResolver != null) {
return viewResolver.canReadView(viewId, isPermitted, isPermittedEntity);
} else {
// Resolved view could not be found, so permissions cannot be checked.
LOG.error("View resolver [{}] could not be found.", decoder.getResolverName());
return false;
}
}
// Proceed to standard views permission check.
return isPermitted(ViewsRestPermissions.VIEW_READ, viewId)
|| (view.type().equals(ViewDTO.Type.DASHBOARD) && isPermitted(RestPermissions.DASHBOARDS_READ, viewId));
}
|
@Test
void testViewReadAccess() {
// Verify that all combinations of permission and view ids test successfully.
assertThat(searchUserRequiringPermission("missing-permission", "bad-id")
.canReadView(new TestView("do-not-match-id"))).isFalse();
assertThat(searchUserRequiringPermission(ViewsRestPermissions.VIEW_READ, "bad-id")
.canReadView(new TestView("do-not-match-id"))).isFalse();
assertThat(searchUserRequiringPermission("missing-permission", "good-id")
.canReadView(new TestView("good-id"))).isFalse();
assertThat(searchUserRequiringPermission(ViewsRestPermissions.VIEW_READ, "good-id")
.canReadView(new TestView("good-id"))).isTrue();
}
|
@Override
public ManifestIdentifier identify(Config config) {
Path manifestFile = getFileFromProperty("android_merged_manifest");
Path resourcesDir = getFileFromProperty("android_merged_resources");
Path assetsDir = getFileFromProperty("android_merged_assets");
Path apkFile = getFileFromProperty("android_resource_apk");
String packageName = properties.getProperty("android_custom_package");
String manifestConfig = config.manifest();
if (Config.NONE.equals(manifestConfig)) {
Logger.info(
"@Config(manifest = Config.NONE) specified while using Build System API, ignoring");
} else if (!Config.DEFAULT_MANIFEST_NAME.equals(manifestConfig)) {
manifestFile = getResource(manifestConfig);
}
if (!Config.DEFAULT_RES_FOLDER.equals(config.resourceDir())) {
resourcesDir = getResource(config.resourceDir());
}
if (!Config.DEFAULT_ASSET_FOLDER.equals(config.assetDir())) {
assetsDir = getResource(config.assetDir());
}
if (!Config.DEFAULT_PACKAGE_NAME.equals(config.packageName())) {
packageName = config.packageName();
}
List<ManifestIdentifier> libraryDirs = emptyList();
if (config.libraries().length > 0) {
Logger.info("@Config(libraries) specified while using Build System API, ignoring");
}
return new ManifestIdentifier(
packageName, manifestFile, resourcesDir, assetsDir, libraryDirs, apkFile);
}
|
@Test
public void identify() {
Properties properties = new Properties();
properties.put("android_merged_manifest", "gradle/AndroidManifest.xml");
properties.put("android_merged_resources", "gradle/res");
properties.put("android_merged_assets", "gradle/assets");
DefaultManifestFactory factory = new DefaultManifestFactory(properties);
ManifestIdentifier identifier = factory.identify(Config.Builder.defaults().build());
AndroidManifest manifest = RobolectricTestRunner.createAndroidManifest(identifier);
assertThat(manifest.getAndroidManifestFile())
.isEqualTo(Paths.get("gradle/AndroidManifest.xml"));
assertThat(manifest.getResDirectory()).isEqualTo(Paths.get("gradle/res"));
assertThat(manifest.getAssetsDirectory()).isEqualTo(Paths.get("gradle/assets"));
assertThat(manifest.getApkFile()).isNull();
}
|
@ScalarOperator(INDETERMINATE)
@SqlType(StandardTypes.BOOLEAN)
public static boolean indeterminate(@SqlType(StandardTypes.DOUBLE) double value, @IsNull boolean isNull)
{
return isNull;
}
|
@Test
public void testIndeterminate()
{
assertOperator(INDETERMINATE, "cast(null as double)", BOOLEAN, true);
assertOperator(INDETERMINATE, "1.2", BOOLEAN, false);
assertOperator(INDETERMINATE, "cast(1.2 as double)", BOOLEAN, false);
assertOperator(INDETERMINATE, "cast(1 as double)", BOOLEAN, false);
}
|
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetIntegerSchemaForIntPrimitiveClassVariadic() {
assertThat(
UdfUtil.getVarArgsSchemaFromType(int.class),
equalTo(ParamTypes.INTEGER)
);
}
|
@VisibleForTesting
protected void copyResourcesFromJar(JarFile inputJar) throws IOException {
Enumeration<JarEntry> inputJarEntries = inputJar.entries();
// The zip spec allows multiple files with the same name; the Java zip libraries do not.
// Keep track of the files we've already written to filter out duplicates.
// Also, ignore the old manifest; we want to write our own.
Set<String> previousEntryNames = new HashSet<>(ImmutableList.of(JarFile.MANIFEST_NAME));
while (inputJarEntries.hasMoreElements()) {
JarEntry inputJarEntry = inputJarEntries.nextElement();
InputStream inputStream = inputJar.getInputStream(inputJarEntry);
String entryName = inputJarEntry.getName();
if (previousEntryNames.contains(entryName)) {
LOG.debug("Skipping duplicated file {}", entryName);
} else {
JarEntry outputJarEntry = new JarEntry(inputJarEntry);
outputStream.putNextEntry(outputJarEntry);
LOG.trace("Copying jar entry {}", inputJarEntry);
IOUtils.copy(inputStream, outputStream);
previousEntryNames.add(entryName);
}
}
}
|
@Test
public void testCopyResourcesFromJar_ignoresDuplicates() throws IOException {
List<JarEntry> duplicateEntries = ImmutableList.of(new JarEntry("foo"), new JarEntry("foo"));
when(inputJar.entries()).thenReturn(Collections.enumeration(duplicateEntries));
jarCreator.copyResourcesFromJar(inputJar);
verify(outputStream, times(1)).putNextEntry(any());
}
|
@Override
public int hashCode() {
return Objects.hash(memberId, groupInstanceId, clientId, host, assignment, targetAssignment);
}
|
@Test
public void testEqualsWithoutGroupInstanceId() {
MemberDescription dynamicMemberDescription = new MemberDescription(MEMBER_ID,
CLIENT_ID,
HOST,
ASSIGNMENT);
MemberDescription identityDescription = new MemberDescription(MEMBER_ID,
CLIENT_ID,
HOST,
ASSIGNMENT);
assertNotEquals(STATIC_MEMBER_DESCRIPTION, dynamicMemberDescription);
assertNotEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), dynamicMemberDescription.hashCode());
// Check self equality.
assertEquals(dynamicMemberDescription, dynamicMemberDescription);
assertEquals(dynamicMemberDescription, identityDescription);
assertEquals(dynamicMemberDescription.hashCode(), identityDescription.hashCode());
}
|
void generateAndEnrichToken(Consumer consumer, ConsumerToken consumerToken) {
Preconditions.checkArgument(consumer != null);
if (consumerToken.getDataChangeCreatedTime() == null) {
consumerToken.setDataChangeCreatedTime(new Date());
}
consumerToken.setToken(generateToken(consumer.getAppId(), consumerToken
.getDataChangeCreatedTime(), portalConfig.consumerTokenSalt()));
}
|
@Test
public void testGenerateAndEnrichConsumerTokenWithConsumerNotFound() throws Exception {
long someConsumerIdNotExist = 1;
ConsumerToken consumerToken = new ConsumerToken();
consumerToken.setConsumerId(someConsumerIdNotExist);
assertThrows(IllegalArgumentException.class,
() -> consumerService.generateAndEnrichToken(null, consumerToken)
);
}
|
@Override
public boolean supportsGetGeneratedKeys() {
return false;
}
|
@Test
void assertSupportsGetGeneratedKeys() {
assertFalse(metaData.supportsGetGeneratedKeys());
}
|
@Override
public UnboundFunction loadFunction(Identifier ident) throws NoSuchFunctionException {
try {
return super.loadFunction(ident);
} catch (NoSuchFunctionException e) {
return getSessionCatalog().loadFunction(ident);
}
}
|
@Test
public void testLoadFunction() {
String functionClass = "org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper";
// load permanent UDF in Hive via FunctionCatalog
spark.sql(String.format("CREATE FUNCTION perm_upper AS '%s'", functionClass));
Assert.assertEquals("Load permanent UDF in Hive", "XYZ", scalarSql("SELECT perm_upper('xyz')"));
// load temporary UDF in Hive via FunctionCatalog
spark.sql(String.format("CREATE TEMPORARY FUNCTION temp_upper AS '%s'", functionClass));
Assert.assertEquals("Load temporary UDF in Hive", "XYZ", scalarSql("SELECT temp_upper('xyz')"));
// TODO: fix loading Iceberg built-in functions in SessionCatalog
}
|
public abstract boolean isTopicAllowed(String topic, boolean excludeInternalTopics);
|
@Test
public void testIncludeLists() {
IncludeList topicFilter1 = new TopicFilter.IncludeList("yes1,yes2");
assertTrue(topicFilter1.isTopicAllowed("yes2", true));
assertTrue(topicFilter1.isTopicAllowed("yes2", false));
assertFalse(topicFilter1.isTopicAllowed("no1", true));
assertFalse(topicFilter1.isTopicAllowed("no1", false));
IncludeList topicFilter2 = new IncludeList(".+");
assertTrue(topicFilter2.isTopicAllowed("alltopics", true));
assertFalse(topicFilter2.isTopicAllowed(Topic.GROUP_METADATA_TOPIC_NAME, true));
assertTrue(topicFilter2.isTopicAllowed(Topic.GROUP_METADATA_TOPIC_NAME, false));
assertFalse(topicFilter2.isTopicAllowed(Topic.TRANSACTION_STATE_TOPIC_NAME, true));
assertTrue(topicFilter2.isTopicAllowed(Topic.TRANSACTION_STATE_TOPIC_NAME, false));
IncludeList topicFilter3 = new IncludeList("included-topic.+");
assertTrue(topicFilter3.isTopicAllowed("included-topic1", true));
assertFalse(topicFilter3.isTopicAllowed("no1", true));
IncludeList topicFilter4 = new IncludeList("test-(?!bad\\b)[\\w]+");
assertTrue(topicFilter4.isTopicAllowed("test-good", true));
assertFalse(topicFilter4.isTopicAllowed("test-bad", true));
}
|
@Override
public Enumeration<URL> getResources(String name) throws IOException {
List<URL> resources = new ArrayList<>();
ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name);
log.trace("Received request to load resources '{}'", name);
for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) {
switch (classLoadingSource) {
case APPLICATION:
if (getParent() != null) {
resources.addAll(Collections.list(getParent().getResources(name)));
}
break;
case PLUGIN:
resources.addAll(Collections.list(findResources(name)));
break;
case DEPENDENCIES:
resources.addAll(findResourcesFromDependencies(name));
break;
}
}
return Collections.enumeration(resources);
}
|
@Test
void parentFirstGetResourcesExistsInBothParentAndPlugin() throws URISyntaxException, IOException {
Enumeration<URL> resources = parentFirstPluginClassLoader.getResources("META-INF/file-in-both-parent-and-plugin");
assertNumberOfResourcesAndFirstLineOfFirstElement(2, "parent", resources);
}
|
@Override
public void evictAll() {
map.evictAll();
}
|
@Test
public void testEvictAll() {
mapWithLoader.put(23, "value-23");
mapWithLoader.put(42, "value-42");
mapWithLoader.put(65, "value-65");
adapterWithLoader.evictAll();
assertEquals(0, mapWithLoader.size());
assertFalse(mapWithLoader.containsKey(23));
assertFalse(mapWithLoader.containsKey(42));
assertFalse(mapWithLoader.containsKey(65));
}
|
public static boolean test(byte[] bloomBytes, byte[]... topics) {
Bloom bloom = new Bloom(bloomBytes);
if (topics == null) {
throw new IllegalArgumentException("topics can not be null");
}
for (byte[] topic : topics) {
if (!bloom.test(topic)) {
return false;
}
}
return true;
}
|
@Test
public void testStaticMethodTestWhenOneTopicIsNotInBloom() {
boolean result =
Bloom.test(
ethereumSampleLogsBloom,
ethereumSampleLogs.get(0),
ethereumSampleLogs.get(100),
"0xff");
assertFalse(result, "expected to return false (but false-positive is possible)");
}
|
@GetMapping
public RestResult<List<Namespace>> getNamespaces() {
return RestResultUtils.success(namespaceOperationService.getNamespaceList());
}
|
@Test
void testGetNamespaces() throws Exception {
Namespace namespace = new Namespace("", "public");
when(namespaceOperationService.getNamespaceList()).thenReturn(Collections.singletonList(namespace));
RestResult<List<Namespace>> actual = namespaceController.getNamespaces();
assertTrue(actual.ok());
assertEquals(200, actual.getCode());
assertEquals(namespace, actual.getData().get(0));
}
|
@Override
@SuppressWarnings("unchecked")
public void onApplicationEvent(@NotNull final DataChangedEvent event) {
for (DataChangedListener listener : listeners) {
if ((!(listener instanceof AbstractDataChangedListener))
&& clusterProperties.isEnabled()
&& Objects.nonNull(shenyuClusterSelectMasterService)
&& !shenyuClusterSelectMasterService.isMaster()) {
LOG.info("received DataChangedEvent, not master, pass");
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("received DataChangedEvent, dispatching, event:{}", JsonUtils.toJson(event));
}
switch (event.getGroupKey()) {
case APP_AUTH:
listener.onAppAuthChanged((List<AppAuthData>) event.getSource(), event.getEventType());
break;
case PLUGIN:
listener.onPluginChanged((List<PluginData>) event.getSource(), event.getEventType());
break;
case RULE:
listener.onRuleChanged((List<RuleData>) event.getSource(), event.getEventType());
break;
case SELECTOR:
listener.onSelectorChanged((List<SelectorData>) event.getSource(), event.getEventType());
break;
case META_DATA:
listener.onMetaDataChanged((List<MetaData>) event.getSource(), event.getEventType());
break;
case PROXY_SELECTOR:
listener.onProxySelectorChanged((List<ProxySelectorData>) event.getSource(), event.getEventType());
break;
case DISCOVER_UPSTREAM:
listener.onDiscoveryUpstreamChanged((List<DiscoverySyncData>) event.getSource(), event.getEventType());
applicationContext.getBean(LoadServiceDocEntry.class).loadDocOnUpstreamChanged((List<DiscoverySyncData>) event.getSource(), event.getEventType());
break;
default:
throw new IllegalStateException("Unexpected value: " + event.getGroupKey());
}
}
}
|
@Test
public void onApplicationEventWithPluginConfigGroupTest() {
when(clusterProperties.isEnabled()).thenReturn(true);
when(shenyuClusterSelectMasterService.isMaster()).thenReturn(true);
ConfigGroupEnum configGroupEnum = ConfigGroupEnum.PLUGIN;
DataChangedEvent dataChangedEvent = new DataChangedEvent(configGroupEnum, null, new ArrayList<>());
dataChangedEventDispatcher.onApplicationEvent(dataChangedEvent);
verify(httpLongPollingDataChangedListener, times(1)).onPluginChanged(anyList(), any());
verify(nacosDataChangedListener, times(1)).onPluginChanged(anyList(), any());
verify(websocketDataChangedListener, times(1)).onPluginChanged(anyList(), any());
verify(zookeeperDataChangedListener, times(1)).onPluginChanged(anyList(), any());
}
|
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getBeforeImage();
}
|
@Test
public void getUndoRows() {
OracleUndoDeleteExecutor executor = upperCase();
Assertions.assertEquals(executor.getUndoRows(), executor.getSqlUndoLog().getBeforeImage());
}
|
public KsqlGenericRecord build(
final List<ColumnName> columnNames,
final List<Expression> expressions,
final LogicalSchema schema,
final DataSourceType dataSourceType
) {
final List<ColumnName> columns = columnNames.isEmpty()
? implicitColumns(schema)
: columnNames;
if (columns.size() != expressions.size()) {
throw new KsqlException(
"Expected a value for each column."
+ " Expected Columns: " + columnNames
+ ". Got " + expressions);
}
final LogicalSchema schemaWithPseudoColumns = withPseudoColumns(schema);
for (ColumnName col : columns) {
if (!schemaWithPseudoColumns.findColumn(col).isPresent()) {
throw new KsqlException("Column name " + col + " does not exist.");
}
if (SystemColumns.isDisallowedForInsertValues(col)) {
throw new KsqlException("Inserting into column " + col + " is not allowed.");
}
}
final Map<ColumnName, Object> values = resolveValues(
columns,
expressions,
schemaWithPseudoColumns,
functionRegistry,
config
);
if (dataSourceType == DataSourceType.KTABLE) {
final String noValue = schemaWithPseudoColumns.key().stream()
.map(Column::name)
.filter(colName -> !values.containsKey(colName))
.map(ColumnName::text)
.collect(Collectors.joining(", "));
if (!noValue.isEmpty()) {
throw new KsqlException("Value for primary key column(s) "
+ noValue + " is required for tables");
}
}
final long ts = (long) values.getOrDefault(SystemColumns.ROWTIME_NAME, clock.getAsLong());
final GenericKey key = buildKey(schema, values);
final GenericRow value = buildValue(schema, values);
return KsqlGenericRecord.of(key, value, ts);
}
|
@Test
public void shouldThrowOnUnknownColumn() {
// Given:
final LogicalSchema schema = LogicalSchema.builder()
.keyColumn(KEY, SqlTypes.STRING)
.valueColumn(COL0, SqlTypes.STRING)
.build();
final List<ColumnName> names = ImmutableList.of(KEY, COL1);
final Expression exp = new StringLiteral("a");
// When:
final KsqlException e = assertThrows(KsqlException.class, () -> recordFactory.build(
names, ImmutableList.of(exp, exp), schema, DataSourceType.KSTREAM
));
// Then:
assertThat(e.getMessage(), containsString("does not exist"));
}
|
public static double validateLongitude(double longitude) {
if (Double.isNaN(longitude) || longitude < LONGITUDE_MIN || longitude > LONGITUDE_MAX) {
throw new IllegalArgumentException("invalid longitude: " + longitude);
}
return longitude;
}
|
@Test
public void validateLongitudeTest() {
LatLongUtils.validateLongitude(LatLongUtils.LONGITUDE_MAX);
LatLongUtils.validateLongitude(LatLongUtils.LONGITUDE_MIN);
verifyInvalidLongitude(Double.NaN);
verifyInvalidLongitude(Math.nextAfter(LatLongUtils.LONGITUDE_MAX, Double.POSITIVE_INFINITY));
verifyInvalidLongitude(Math.nextAfter(LatLongUtils.LONGITUDE_MIN, Double.NEGATIVE_INFINITY));
}
|
@Udf
public <T> Boolean contains(
@UdfParameter final String jsonArray,
@UdfParameter final T val
) {
try (JsonParser parser = PARSER_FACTORY.createParser(jsonArray)) {
if (parser.nextToken() != START_ARRAY) {
return false;
}
while (parser.nextToken() != null) {
final JsonToken token = parser.currentToken();
if (token == null) {
return val == null;
} else if (token == END_ARRAY) {
return false;
}
parser.skipChildren();
if (TOKEN_COMPAT.getOrDefault(token, foo -> false).test(val)) {
if (token == VALUE_NULL
|| (val != null && Objects.equals(parser.readValueAs(val.getClass()), val))) {
return true;
}
}
}
return false;
} catch (final IOException e) {
return false;
}
}
|
@Test
public void shouldHandleNullsInJsonArray() {
assertEquals(false, jsonUdf.contains("[false, false, true, false]", null));
}
|
@Override
public boolean wasNull() throws SQLException {
return queryResult.wasNull();
}
|
@Test
void assertWasNull() throws SQLException {
assertFalse(createMergedEncryptColumnsMergedResult(queryResult, mock(EncryptRule.class)).wasNull());
}
|
public FileInputStream openInputStream(File file) {
try {
return openInputStreamOrThrowIOE(file);
} catch (IOException e) {
throw new IllegalStateException("Can not open file " + file, e);
}
}
|
@Test
public void openInputStream_throws_ISE_if_file_does_not_exist() throws Exception {
final File file = temp.newFile();
assertThat(file.delete()).isTrue();
assertThatThrownBy(() -> underTest.openInputStream(file))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Can not open file " + file)
.hasRootCauseMessage("File " + file + " does not exist");
}
|
@Nullable
@Override
public Message decode(@Nonnull final RawMessage rawMessage) {
final GELFMessage gelfMessage = new GELFMessage(rawMessage.getPayload(), rawMessage.getRemoteAddress());
final String json = gelfMessage.getJSON(decompressSizeLimit, charset);
final JsonNode node;
try {
node = objectMapper.readTree(json);
if (node == null) {
throw new IOException("null result");
}
} catch (final Exception e) {
log.error("Could not parse JSON, first 400 characters: " +
StringUtils.abbreviate(json, 403), e);
throw new IllegalStateException("JSON is null/could not be parsed (invalid JSON)", e);
}
try {
validateGELFMessage(node, rawMessage.getId(), rawMessage.getRemoteAddress());
} catch (IllegalArgumentException e) {
log.trace("Invalid GELF message <{}>", node);
throw e;
}
// Timestamp.
final double messageTimestamp = timestampValue(node);
final DateTime timestamp;
if (messageTimestamp <= 0) {
timestamp = rawMessage.getTimestamp();
} else {
// we treat this as a unix timestamp
timestamp = Tools.dateTimeFromDouble(messageTimestamp);
}
final Message message = messageFactory.createMessage(
stringValue(node, "short_message"),
stringValue(node, "host"),
timestamp
);
message.addField(Message.FIELD_FULL_MESSAGE, stringValue(node, "full_message"));
final String file = stringValue(node, "file");
if (file != null && !file.isEmpty()) {
message.addField("file", file);
}
final long line = longValue(node, "line");
if (line > -1) {
message.addField("line", line);
}
// Level is set by server if not specified by client.
final int level = intValue(node, "level");
if (level > -1) {
message.addField("level", level);
}
// Facility is set by server if not specified by client.
final String facility = stringValue(node, "facility");
if (facility != null && !facility.isEmpty()) {
message.addField("facility", facility);
}
// Add additional data if there is some.
final Iterator<Map.Entry<String, JsonNode>> fields = node.fields();
while (fields.hasNext()) {
final Map.Entry<String, JsonNode> entry = fields.next();
String key = entry.getKey();
// Do not index useless GELF "version" field.
if ("version".equals(key)) {
continue;
}
// Don't include GELF syntax underscore in message field key.
if (key.startsWith("_") && key.length() > 1) {
key = key.substring(1);
}
// We already set short_message and host as message and source. Do not add as fields again.
if ("short_message".equals(key) || "host".equals(key)) {
continue;
}
// Skip standard or already set fields.
if (message.getField(key) != null || Message.RESERVED_FIELDS.contains(key) && !Message.RESERVED_SETTABLE_FIELDS.contains(key)) {
continue;
}
// Convert JSON containers to Strings, and pick a suitable number representation.
final JsonNode value = entry.getValue();
final Object fieldValue;
if (value.isContainerNode()) {
fieldValue = value.toString();
} else if (value.isFloatingPointNumber()) {
fieldValue = value.asDouble();
} else if (value.isIntegralNumber()) {
fieldValue = value.asLong();
} else if (value.isNull()) {
log.debug("Field [{}] is NULL. Skipping.", key);
continue;
} else if (value.isTextual()) {
fieldValue = value.asText();
} else {
log.debug("Field [{}] has unknown value type. Skipping.", key);
continue;
}
message.addField(key, fieldValue);
}
return message;
}
|
@Test
public void decodeFailsWithWrongTypeForMessage() throws Exception {
final String json = "{"
+ "\"version\": \"1.1\","
+ "\"host\": \"example.org\","
+ "\"message\": 42"
+ "}";
final RawMessage rawMessage = new RawMessage(json.getBytes(StandardCharsets.UTF_8));
assertThatIllegalArgumentException().isThrownBy(() -> codec.decode(rawMessage))
.withNoCause()
.withMessageMatching("GELF message <[0-9a-f-]+> has invalid \"message\": 42");
}
|
public T divide(BigDecimal by) {
return create(value.divide(by, MAX_VALUE_SCALE, RoundingMode.DOWN));
}
|
@Test
void testDivideNegative() {
final Resource resource = new TestResource(1.2);
final BigDecimal by = BigDecimal.valueOf(-0.5);
assertThatThrownBy(() -> resource.divide(by)).isInstanceOf(IllegalArgumentException.class);
}
|
public static Deserializer<RouterAdvertisement> deserializer() {
return (data, offset, length) -> {
checkInput(data, offset, length, HEADER_LENGTH);
RouterAdvertisement routerAdvertisement = new RouterAdvertisement();
ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
int bscratch;
routerAdvertisement.currentHopLimit = bb.get();
bscratch = bb.get();
routerAdvertisement.mFlag = (byte) ((bscratch >> 7) & 0x1);
routerAdvertisement.oFlag = (byte) ((bscratch >> 6) & 0x1);
routerAdvertisement.routerLifetime = bb.getShort();
routerAdvertisement.reachableTime = bb.getInt();
routerAdvertisement.retransmitTimer = bb.getInt();
if (bb.limit() - bb.position() > 0) {
NeighborDiscoveryOptions options = NeighborDiscoveryOptions.deserializer()
.deserialize(data, bb.position(), bb.limit() - bb.position());
for (NeighborDiscoveryOptions.Option option : options.options()) {
routerAdvertisement.addOption(option.type(), option.data());
}
}
return routerAdvertisement;
};
}
|
@Test
public void testDeserializeBadInput() throws Exception {
PacketTestUtils.testDeserializeBadInput(RouterAdvertisement.deserializer());
}
|
@VisibleForTesting
/**
* This initializes the caches in SharedCache by getting the objects from Metastore DB via
* ObjectStore and populating the respective caches
*/
static void prewarm(RawStore rawStore) {
if (isCachePrewarmed.get()) {
return;
}
long startTime = System.nanoTime();
LOG.info("Prewarming CachedStore");
long sleepTime = 100;
while (!isCachePrewarmed.get()) {
// Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy
Deadline.registerIfNot(1000000);
Collection<String> catalogsToCache;
try {
catalogsToCache = catalogsToCache(rawStore);
LOG.info("Going to cache catalogs: " + org.apache.commons.lang3.StringUtils.join(catalogsToCache, ", "));
List<Catalog> catalogs = new ArrayList<>(catalogsToCache.size());
for (String catName : catalogsToCache) {
catalogs.add(rawStore.getCatalog(catName));
}
sharedCache.populateCatalogsInCache(catalogs);
} catch (MetaException | NoSuchObjectException e) {
LOG.warn("Failed to populate catalogs in cache, going to try again", e);
try {
Thread.sleep(sleepTime);
sleepTime = sleepTime * 2;
} catch (InterruptedException timerEx) {
LOG.info("sleep interrupted", timerEx.getMessage());
}
// try again
continue;
}
LOG.info("Finished prewarming catalogs, starting on databases");
List<Database> databases = new ArrayList<>();
for (String catName : catalogsToCache) {
try {
List<String> dbNames = rawStore.getAllDatabases(catName);
LOG.info("Number of databases to prewarm in catalog {}: {}", catName, dbNames.size());
for (String dbName : dbNames) {
try {
databases.add(rawStore.getDatabase(catName, dbName));
} catch (NoSuchObjectException e) {
// Continue with next database
LOG.warn("Failed to cache database " + DatabaseName.getQualified(catName, dbName) + ", moving on", e);
}
}
} catch (MetaException e) {
LOG.warn("Failed to cache databases in catalog " + catName + ", moving on", e);
}
}
sharedCache.populateDatabasesInCache(databases);
LOG.info("Databases cache is now prewarmed. Now adding tables, partitions and statistics to the cache");
int numberOfDatabasesCachedSoFar = 0;
for (Database db : databases) {
String catName = StringUtils.normalizeIdentifier(db.getCatalogName());
String dbName = StringUtils.normalizeIdentifier(db.getName());
List<String> tblNames;
try {
tblNames = rawStore.getAllTables(catName, dbName);
} catch (MetaException e) {
LOG.warn("Failed to cache tables for database " + DatabaseName.getQualified(catName, dbName) + ", moving on");
// Continue with next database
continue;
}
tblsPendingPrewarm.addTableNamesForPrewarming(tblNames);
int totalTablesToCache = tblNames.size();
int numberOfTablesCachedSoFar = 0;
while (tblsPendingPrewarm.hasMoreTablesToPrewarm()) {
try {
String tblName = StringUtils.normalizeIdentifier(tblsPendingPrewarm.getNextTableNameToPrewarm());
if (!shouldCacheTable(catName, dbName, tblName)) {
continue;
}
Table table;
try {
table = rawStore.getTable(catName, dbName, tblName);
} catch (MetaException e) {
LOG.debug(ExceptionUtils.getStackTrace(e));
// It is possible the table is deleted during fetching tables of the database,
// in that case, continue with the next table
continue;
}
List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
try {
ColumnStatistics tableColStats = null;
List<Partition> partitions = null;
List<ColumnStatistics> partitionColStats = null;
AggrStats aggrStatsAllPartitions = null;
AggrStats aggrStatsAllButDefaultPartition = null;
TableCacheObjects cacheObjects = new TableCacheObjects();
if (!table.getPartitionKeys().isEmpty()) {
Deadline.startTimer("getPartitions");
partitions = rawStore.getPartitions(catName, dbName, tblName, -1);
Deadline.stopTimer();
cacheObjects.setPartitions(partitions);
List<String> partNames = new ArrayList<>(partitions.size());
for (Partition p : partitions) {
partNames.add(Warehouse.makePartName(table.getPartitionKeys(), p.getValues()));
}
if (!partNames.isEmpty()) {
// Get partition column stats for this table
Deadline.startTimer("getPartitionColumnStatistics");
partitionColStats =
rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, CacheUtils.HIVE_ENGINE);
Deadline.stopTimer();
cacheObjects.setPartitionColStats(partitionColStats);
// Get aggregate stats for all partitions of a table and for all but default
// partition
Deadline.startTimer("getAggrPartitionColumnStatistics");
aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, CacheUtils.HIVE_ENGINE);
Deadline.stopTimer();
cacheObjects.setAggrStatsAllPartitions(aggrStatsAllPartitions);
// Remove default partition from partition names and get aggregate
// stats again
List<FieldSchema> partKeys = table.getPartitionKeys();
String defaultPartitionValue =
MetastoreConf.getVar(rawStore.getConf(), ConfVars.DEFAULTPARTITIONNAME);
List<String> partCols = new ArrayList<>();
List<String> partVals = new ArrayList<>();
for (FieldSchema fs : partKeys) {
partCols.add(fs.getName());
partVals.add(defaultPartitionValue);
}
String defaultPartitionName = FileUtils.makePartName(partCols, partVals);
partNames.remove(defaultPartitionName);
Deadline.startTimer("getAggrPartitionColumnStatistics");
aggrStatsAllButDefaultPartition =
rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, CacheUtils.HIVE_ENGINE);
Deadline.stopTimer();
cacheObjects.setAggrStatsAllButDefaultPartition(aggrStatsAllButDefaultPartition);
}
} else {
Deadline.startTimer("getTableColumnStatistics");
tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, CacheUtils.HIVE_ENGINE);
Deadline.stopTimer();
cacheObjects.setTableColStats(tableColStats);
}
Deadline.startTimer("getAllTableConstraints");
SQLAllTableConstraints tableConstraints = rawStore.getAllTableConstraints(new AllTableConstraintsRequest(catName, dbName, tblName));
Deadline.stopTimer();
cacheObjects.setTableConstraints(tableConstraints);
// If the table could not cached due to memory limit, stop prewarm
boolean isSuccess = sharedCache
.populateTableInCache(table, cacheObjects);
if (isSuccess) {
LOG.trace("Cached Database: {}'s Table: {}.", dbName, tblName);
} else {
LOG.info("Unable to cache Database: {}'s Table: {}, since the cache memory is full. "
+ "Will stop attempting to cache any more tables.", dbName, tblName);
completePrewarm(startTime, false);
return;
}
} catch (MetaException | NoSuchObjectException e) {
LOG.debug(ExceptionUtils.getStackTrace(e));
// Continue with next table
continue;
}
LOG.debug("Processed database: {}'s table: {}. Cached {} / {} tables so far.", dbName, tblName,
++numberOfTablesCachedSoFar, totalTablesToCache);
} catch (EmptyStackException e) {
// We've prewarmed this database, continue with the next one
continue;
}
}
LOG.debug("Processed database: {}. Cached {} / {} databases so far.", dbName, ++numberOfDatabasesCachedSoFar,
databases.size());
}
sharedCache.clearDirtyFlags();
completePrewarm(startTime, true);
}
}
|
@Test public void testPrewarm() throws Exception {
Configuration conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb");
MetaStoreTestUtils.setConfForStandloneMode(conf);
CachedStore cachedStore = new CachedStore();
CachedStore.clearSharedCache();
cachedStore.setConfForTest(conf);
ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore();
// Prewarm CachedStore
CachedStore.setCachePrewarmedState(false);
CachedStore.prewarm(objectStore);
List<String> allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME);
Assert.assertEquals(2, allDatabases.size());
Assert.assertTrue(allDatabases.contains(db1.getName()));
Assert.assertTrue(allDatabases.contains(db2.getName()));
List<String> db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName());
Assert.assertEquals(2, db1Tables.size());
Assert.assertTrue(db1Tables.contains(db1Utbl1.getTableName()));
Assert.assertTrue(db1Tables.contains(db1Ptbl1.getTableName()));
List<String> db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName());
Assert.assertEquals(2, db2Tables.size());
Assert.assertTrue(db2Tables.contains(db2Utbl1.getTableName()));
Assert.assertTrue(db2Tables.contains(db2Ptbl1.getTableName()));
// cs_db1_ptntbl1
List<Partition> db1Ptbl1Partitions =
cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), db1Ptbl1.getTableName(), -1);
Assert.assertEquals(25, db1Ptbl1Partitions.size());
Deadline.startTimer("");
List<Partition> db1Ptbl1PartitionsOS =
objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db1Ptbl1.getTableName(), -1);
Assert.assertTrue(db1Ptbl1Partitions.containsAll(db1Ptbl1PartitionsOS));
// cs_db2_ptntbl1
List<Partition> db2Ptbl1Partitions =
cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1);
Assert.assertEquals(25, db2Ptbl1Partitions.size());
Deadline.startTimer("");
List<Partition> db2Ptbl1PartitionsOS =
objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1);
Assert.assertTrue(db2Ptbl1Partitions.containsAll(db2Ptbl1PartitionsOS));
cachedStore.shutdown();
}
|
static int[] shiftLeftMultiPrecision(int[] number, int length, int shifts)
{
if (shifts == 0) {
return number;
}
// wordShifts = shifts / 32
int wordShifts = shifts >>> 5;
// we don't want to lose any leading bits
for (int i = 0; i < wordShifts; i++) {
checkState(number[length - i - 1] == 0);
}
if (wordShifts > 0) {
arraycopy(number, 0, number, wordShifts, length - wordShifts);
fill(number, 0, wordShifts, 0);
}
// bitShifts = shifts % 32
int bitShifts = shifts & 0b11111;
if (bitShifts > 0) {
// we don't want to lose any leading bits
checkState(number[length - 1] >>> (Integer.SIZE - bitShifts) == 0);
for (int position = length - 1; position > 0; position--) {
number[position] = (number[position] << bitShifts) | (number[position - 1] >>> (Integer.SIZE - bitShifts));
}
number[0] = number[0] << bitShifts;
}
return number;
}
|
@Test
public void testShiftLeftMultiPrecision()
{
assertEquals(shiftLeftMultiPrecision(
new int[] {0b10100001010001011010000101000101, 0b01010110100101101011010101010101, 0b01010010111110001111100010101010,
0b11111111000000011010101010101011, 0b00000000000000000000000000000000}, 4, 0),
new int[] {0b10100001010001011010000101000101, 0b01010110100101101011010101010101, 0b01010010111110001111100010101010,
0b11111111000000011010101010101011, 0b00000000000000000000000000000000});
assertEquals(shiftLeftMultiPrecision(
new int[] {0b10100001010001011010000101000101, 0b01010110100101101011010101010101, 0b01010010111110001111100010101010,
0b11111111000000011010101010101011, 0b00000000000000000000000000000000}, 5, 1),
new int[] {0b01000010100010110100001010001010, 0b10101101001011010110101010101011, 0b10100101111100011111000101010100,
0b11111110000000110101010101010110, 0b00000000000000000000000000000001});
assertEquals(shiftLeftMultiPrecision(
new int[] {0b10100001010001011010000101000101, 0b01010110100101101011010101010101, 0b01010010111110001111100010101010,
0b11111111000000011010101010101011, 0b00000000000000000000000000000000}, 5, 31),
new int[] {0b10000000000000000000000000000000, 0b11010000101000101101000010100010, 0b00101011010010110101101010101010,
0b10101001011111000111110001010101, 0b1111111100000001101010101010101});
assertEquals(shiftLeftMultiPrecision(
new int[] {0b10100001010001011010000101000101, 0b01010110100101101011010101010101, 0b01010010111110001111100010101010,
0b11111111000000011010101010101011, 0b00000000000000000000000000000000}, 5, 32),
new int[] {0b00000000000000000000000000000000, 0b10100001010001011010000101000101, 0b01010110100101101011010101010101,
0b01010010111110001111100010101010, 0b11111111000000011010101010101011});
assertEquals(shiftLeftMultiPrecision(
new int[] {0b10100001010001011010000101000101, 0b01010110100101101011010101010101, 0b01010010111110001111100010101010,
0b11111111000000011010101010101011, 0b00000000000000000000000000000000, 0b00000000000000000000000000000000}, 6, 33),
new int[] {0b00000000000000000000000000000000, 0b01000010100010110100001010001010, 0b10101101001011010110101010101011,
0b10100101111100011111000101010100, 0b11111110000000110101010101010110, 0b00000000000000000000000000000001});
assertEquals(shiftLeftMultiPrecision(
new int[] {0b10100001010001011010000101000101, 0b01010110100101101011010101010101, 0b01010010111110001111100010101010,
0b11111111000000011010101010101011, 0b00000000000000000000000000000000, 0b00000000000000000000000000000000}, 6, 37),
new int[] {0b00000000000000000000000000000000, 0b00101000101101000010100010100000, 0b11010010110101101010101010110100,
0b01011111000111110001010101001010, 0b11100000001101010101010101101010, 0b00000000000000000000000000011111});
assertEquals(shiftLeftMultiPrecision(
new int[] {0b10100001010001011010000101000101, 0b01010110100101101011010101010101, 0b01010010111110001111100010101010,
0b11111111000000011010101010101011, 0b00000000000000000000000000000000, 0b00000000000000000000000000000000}, 6, 64),
new int[] {0b00000000000000000000000000000000, 0b00000000000000000000000000000000, 0b10100001010001011010000101000101,
0b01010110100101101011010101010101, 0b01010010111110001111100010101010, 0b11111111000000011010101010101011});
}
|
public ManagedProcess launch(AbstractCommand command) {
EsInstallation esInstallation = command.getEsInstallation();
if (esInstallation != null) {
cleanupOutdatedEsData(esInstallation);
writeConfFiles(esInstallation);
}
Process process;
if (command instanceof JavaCommand<?> javaCommand) {
process = launchJava(javaCommand);
} else {
throw new IllegalStateException("Unexpected type of command: " + command.getClass());
}
ProcessId processId = command.getProcessId();
try {
if (processId == ProcessId.ELASTICSEARCH) {
checkArgument(esInstallation != null, "Incorrect configuration EsInstallation is null");
EsConnectorImpl esConnector = new EsConnectorImpl(singleton(HostAndPort.fromParts(esInstallation.getHost(),
esInstallation.getHttpPort())), esInstallation.getBootstrapPassword(), esInstallation.getHttpKeyStoreLocation(),
esInstallation.getHttpKeyStorePassword().orElse(null));
return new EsManagedProcess(process, processId, esConnector);
} else {
ProcessCommands commands = allProcessesCommands.createAfterClean(processId.getIpcIndex());
return new ProcessCommandsManagedProcess(process, processId, commands);
}
} catch (Exception e) {
// just in case
if (process != null) {
process.destroyForcibly();
}
throw new IllegalStateException(format("Fail to launch monitor of process [%s]", processId.getHumanReadableName()), e);
}
}
|
@Test
public void clean_up_old_es_data() throws Exception {
File tempDir = temp.newFolder();
File homeDir = temp.newFolder();
File dataDir = temp.newFolder();
File logDir = temp.newFolder();
ProcessLauncher underTest = new ProcessLauncherImpl(tempDir, commands, TestProcessBuilder::new);
JavaCommand command = createEsCommand(tempDir, homeDir, dataDir, logDir);
File outdatedEsDir = new File(dataDir, "es");
assertThat(outdatedEsDir.mkdir()).isTrue();
assertThat(outdatedEsDir).exists();
underTest.launch(command);
assertThat(outdatedEsDir).doesNotExist();
}
|
public static byte[] toArray(ByteBuffer buffer) {
return toArray(buffer, 0, buffer.remaining());
}
|
@Test
public void toArray() {
byte[] input = {0, 1, 2, 3, 4};
ByteBuffer buffer = ByteBuffer.wrap(input);
assertArrayEquals(input, Utils.toArray(buffer));
assertEquals(0, buffer.position());
assertArrayEquals(new byte[] {1, 2}, Utils.toArray(buffer, 1, 2));
assertEquals(0, buffer.position());
buffer.position(2);
assertArrayEquals(new byte[] {2, 3, 4}, Utils.toArray(buffer));
assertEquals(2, buffer.position());
}
|
public static String getLatestCheckJobIdPath(final String jobId) {
return String.join("/", getJobRootPath(jobId), "check", "latest_job_id");
}
|
@Test
void assertGetLatestCheckJobIdPath() {
assertThat(PipelineMetaDataNode.getLatestCheckJobIdPath(jobId), is(jobCheckRootPath + "/latest_job_id"));
}
|
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetPartialGenericBiFunctionVariadic() throws NoSuchMethodException {
// Given:
final Type genericType = getClass().getMethod("partialGenericBiFunctionType").getGenericReturnType();
// When:
final ParamType returnType = UdfUtil.getVarArgsSchemaFromType(genericType);
// Then:
assertThat(returnType, is(LambdaType.of(ImmutableList.of(GenericType.of("T"), ParamTypes.BOOLEAN), GenericType.of("U"))));
}
|
@Override
public List<Node> sniff(List<Node> nodes) {
if (attribute == null || value == null) {
return nodes;
}
return nodes.stream()
.filter(node -> nodeMatchesFilter(node, attribute, value))
.collect(Collectors.toList());
}
|
@Test
void returnsNoNodesIfFilterDoesNotMatch() throws Exception {
final List<Node> nodes = mockNodes();
final NodesSniffer nodesSniffer = new FilteredOpenSearchNodesSniffer("location", "alaska");
assertThat(nodesSniffer.sniff(nodes)).isEmpty();
}
|
@Override
public void fromPB(EncryptionKeyPB pb, KeyMgr mgr) {
super.fromPB(pb, mgr);
if (pb.algorithm == null) {
throw new IllegalArgumentException("no algorithm in EncryptionKeyPB for NormalKey id:" + id);
}
algorithm = pb.algorithm;
if (pb.plainKey != null) {
plainKey = pb.plainKey;
} else if (pb.encryptedKey != null) {
encryptedKey = pb.encryptedKey;
} else {
throw new IllegalArgumentException("no encryptedKey in EncryptionKeyPB for NormalKey id:" + id);
}
}
|
@Test
public void testFromPB_NoAlgorithm() {
NormalKey key = new NormalKey();
EncryptionKeyPB pb = new EncryptionKeyPB();
pb.encryptedKey = new byte[16];
KeyMgr mgr = new KeyMgr();
assertThrows(IllegalArgumentException.class, () -> {
key.fromPB(pb, mgr);
});
}
|
static Map<String, String> fromSystemProperties()
{
final HashMap<String, String> result = new HashMap<>();
final Properties properties = System.getProperties();
for (final Map.Entry<Object, Object> entry : properties.entrySet())
{
result.put((String)entry.getKey(), (String)entry.getValue());
}
return result;
}
|
@Test
void shouldReadSystemProperties()
{
final Map<String, String> expectedValues = new HashMap<>();
try
{
expectedValues.put(DISABLED_ARCHIVE_EVENT_CODES, "abc");
expectedValues.put(LOG_FILENAME, "");
expectedValues.put(ENABLED_CLUSTER_EVENT_CODES, "1,2,3");
for (final Map.Entry<String, String> entry : expectedValues.entrySet())
{
System.setProperty(entry.getKey(), entry.getValue());
}
System.setProperty("ignore me", "1000");
final Map<String, String> values = fromSystemProperties();
assertEquals("abc", values.get(DISABLED_ARCHIVE_EVENT_CODES));
assertEquals("", values.get(LOG_FILENAME));
assertEquals("1,2,3", values.get(ENABLED_CLUSTER_EVENT_CODES));
}
finally
{
System.clearProperty(DISABLED_ARCHIVE_EVENT_CODES);
System.clearProperty(LOG_FILENAME);
System.clearProperty(ENABLED_CLUSTER_EVENT_CODES);
System.clearProperty("ignore me");
}
}
|
public static String byteCountToDisplaySize(long size) {
if (size < 1024L) {
return String.valueOf(size) + (size > 1 ? " bytes" : " byte");
}
long exp = (long) (Math.log(size) / Math.log((long) 1024));
double value = size / Math.pow((long) 1024, exp);
char unit = "KMGTPEZY".charAt((int) exp - 1);
return String.format("%.1f %s%s", value, unit, "B");
}
|
@Test
public void shouldConvertBytesToPB() {
long twoGiga = 2L * 1024 * 1024 * 1024 * 1024 * 1024 + 512L * 1024 * 1024 * 1024 * 1024;
assertThat(FileSizeUtils.byteCountToDisplaySize(twoGiga), is("2.5 PB"));
}
|
static int getEncryptedPacketLength(ByteBuf buffer, int offset) {
int packetLength = 0;
// SSLv3 or TLS - Check ContentType
boolean tls;
switch (buffer.getUnsignedByte(offset)) {
case SSL_CONTENT_TYPE_CHANGE_CIPHER_SPEC:
case SSL_CONTENT_TYPE_ALERT:
case SSL_CONTENT_TYPE_HANDSHAKE:
case SSL_CONTENT_TYPE_APPLICATION_DATA:
case SSL_CONTENT_TYPE_EXTENSION_HEARTBEAT:
tls = true;
break;
default:
// SSLv2 or bad data
tls = false;
}
if (tls) {
// SSLv3 or TLS or GMSSLv1.0 or GMSSLv1.1 - Check ProtocolVersion
int majorVersion = buffer.getUnsignedByte(offset + 1);
int version = buffer.getShort(offset + 1);
if (majorVersion == 3 || version == GMSSL_PROTOCOL_VERSION) {
// SSLv3 or TLS or GMSSLv1.0 or GMSSLv1.1
packetLength = unsignedShortBE(buffer, offset + 3) + SSL_RECORD_HEADER_LENGTH;
if (packetLength <= SSL_RECORD_HEADER_LENGTH) {
// Neither SSLv3 or TLSv1 (i.e. SSLv2 or bad data)
tls = false;
}
} else if (version == DTLS_1_0 || version == DTLS_1_2 || version == DTLS_1_3) {
if (buffer.readableBytes() < offset + DTLS_RECORD_HEADER_LENGTH) {
return NOT_ENOUGH_DATA;
}
// length is the last 2 bytes in the 13 byte header.
packetLength = unsignedShortBE(buffer, offset + DTLS_RECORD_HEADER_LENGTH - 2) +
DTLS_RECORD_HEADER_LENGTH;
} else {
// Neither SSLv3 or TLSv1 (i.e. SSLv2 or bad data)
tls = false;
}
}
if (!tls) {
// SSLv2 or bad data - Check the version
int headerLength = (buffer.getUnsignedByte(offset) & 0x80) != 0 ? 2 : 3;
int majorVersion = buffer.getUnsignedByte(offset + headerLength + 1);
if (majorVersion == 2 || majorVersion == 3) {
// SSLv2
packetLength = headerLength == 2 ?
(shortBE(buffer, offset) & 0x7FFF) + 2 : (shortBE(buffer, offset) & 0x3FFF) + 3;
if (packetLength <= headerLength) {
return NOT_ENOUGH_DATA;
}
} else {
return NOT_ENCRYPTED;
}
}
return packetLength;
}
|
@SuppressWarnings("deprecation")
@Test
public void testPacketLength() throws SSLException, NoSuchAlgorithmException {
SSLEngine engineLE = newEngine();
SSLEngine engineBE = newEngine();
ByteBuffer empty = ByteBuffer.allocate(0);
ByteBuffer cTOsLE = ByteBuffer.allocate(17 * 1024).order(ByteOrder.LITTLE_ENDIAN);
ByteBuffer cTOsBE = ByteBuffer.allocate(17 * 1024);
assertTrue(engineLE.wrap(empty, cTOsLE).bytesProduced() > 0);
cTOsLE.flip();
assertTrue(engineBE.wrap(empty, cTOsBE).bytesProduced() > 0);
cTOsBE.flip();
ByteBuf bufferLE = Unpooled.buffer().order(ByteOrder.LITTLE_ENDIAN).writeBytes(cTOsLE);
ByteBuf bufferBE = Unpooled.buffer().writeBytes(cTOsBE);
// Test that the packet-length for BE and LE is the same
assertEquals(getEncryptedPacketLength(bufferBE, 0), getEncryptedPacketLength(bufferLE, 0));
assertEquals(getEncryptedPacketLength(new ByteBuffer[] { bufferBE.nioBuffer() }, 0),
getEncryptedPacketLength(new ByteBuffer[] { bufferLE.nioBuffer().order(ByteOrder.LITTLE_ENDIAN) }, 0));
}
|
int parseAndConvert(String[] args) throws Exception {
Options opts = createOptions();
int retVal = 0;
try {
if (args.length == 0) {
LOG.info("Missing command line arguments");
printHelp(opts);
return 0;
}
CommandLine cliParser = new GnuParser().parse(opts, args);
if (cliParser.hasOption(CliOption.HELP.shortSwitch)) {
printHelp(opts);
return 0;
}
FSConfigToCSConfigConverter converter =
prepareAndGetConverter(cliParser);
converter.convert(converterParams);
String outputDir = converterParams.getOutputDirectory();
boolean skipVerification =
cliParser.hasOption(CliOption.SKIP_VERIFICATION.shortSwitch);
if (outputDir != null && !skipVerification) {
validator.validateConvertedConfig(
converterParams.getOutputDirectory());
}
} catch (ParseException e) {
String msg = "Options parsing failed: " + e.getMessage();
logAndStdErr(e, msg);
printHelp(opts);
retVal = -1;
} catch (PreconditionException e) {
String msg = "Cannot start FS config conversion due to the following"
+ " precondition error: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (UnsupportedPropertyException e) {
String msg = "Unsupported property/setting encountered during FS config "
+ "conversion: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (ConversionException | IllegalArgumentException e) {
String msg = "Fatal error during FS config conversion: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (VerificationException e) {
Throwable cause = e.getCause();
String msg = "Verification failed: " + e.getCause().getMessage();
conversionOptions.handleVerificationFailure(cause, msg);
retVal = -1;
}
conversionOptions.handleParsingFinished();
return retVal;
}
|
@Test
public void testFairSchedulerXmlIsNotDefinedIfItsDefinedInYarnSiteXml()
throws Exception {
setupFSConfigConversionFiles(true);
FSConfigToCSConfigArgumentHandler argumentHandler =
createArgumentHandler();
argumentHandler.parseAndConvert(getDefaultArgumentsAsArray());
}
|
@Override
public Object read(final PostgreSQLPacketPayload payload, final int parameterValueLength) {
byte[] result = new byte[parameterValueLength];
payload.getByteBuf().readBytes(result);
return new UUID(ByteConverter.int8(result, 0), ByteConverter.int8(result, 8));
}
|
@Test
void assertRead() {
UUID uuid = UUID.fromString("00000000-000-0000-0000-000000000001");
byte[] expected = new byte[16];
ByteBuffer buffer = ByteBuffer.wrap(expected);
buffer.putLong(uuid.getMostSignificantBits());
buffer.putLong(uuid.getLeastSignificantBits());
ByteBuf byteBuf = Unpooled.wrappedBuffer(expected);
PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8);
assertThat(new PostgreSQLUUIDBinaryProtocolValue().read(payload, 16), is(uuid));
}
|
public void isNull() {
standardIsEqualTo(null);
}
|
@Test
public void isNullWhenSubjectForbidsIsEqualTo() {
assertAbout(objectsForbiddingEqualityCheck()).that(null).isNull();
}
|
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
}
|
@Test
public void kTableAnonymousMaterializedMapValuesShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Object, Object> table = builder.table("input-topic");
table.mapValues(
(readOnlyKey, value) -> null,
Materialized.<Object, Object, KeyValueStore<Bytes, byte[]>>with(null, null)
.withStoreType(Materialized.StoreType.IN_MEMORY));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" +
" --> KTABLE-SOURCE-0000000002\n" +
" Processor: KTABLE-SOURCE-0000000002 (stores: [])\n" +
" --> KTABLE-MAPVALUES-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
// previously, this was
// Processor: KTABLE-MAPVALUES-0000000004 (stores: [KTABLE-MAPVALUES-STATE-STORE-0000000003]
// but we added a change not to materialize non-queryable stores. This change shouldn't break compatibility.
" Processor: KTABLE-MAPVALUES-0000000004 (stores: [])\n" +
" --> none\n" +
" <-- KTABLE-SOURCE-0000000002\n" +
"\n",
describe.toString());
}
|
public static ParsedCommand parse(
// CHECKSTYLE_RULES.ON: CyclomaticComplexity
final String sql, final Map<String, String> variables) {
validateSupportedStatementType(sql);
final String substituted;
try {
substituted = VariableSubstitutor.substitute(KSQL_PARSER.parse(sql).get(0), variables);
} catch (ParseFailedException e) {
throw new MigrationException(String.format(
"Failed to parse the statement. Statement: %s. Reason: %s",
sql, e.getMessage()));
}
final SqlBaseParser.SingleStatementContext statementContext = KSQL_PARSER.parse(substituted)
.get(0).getStatement();
final boolean isStatement = StatementType.get(statementContext.statement().getClass())
== StatementType.STATEMENT;
return new ParsedCommand(substituted,
isStatement ? Optional.empty() : Optional.of(new AstBuilder(TypeRegistry.EMPTY)
.buildStatement(statementContext)));
}
|
@Test
public void shouldParseAssertTopic() {
// Given:
final String assertTopics = "assert topic abc; assert not exists topic 'abcd' with (foo=2, bar=3) timeout 4 minutes;"
+ "assert topic ${topic} with (replicas=${replicas}, partitions=${partitions}) timeout 10 seconds;";
// When:
List<CommandParser.ParsedCommand> commands = parse(assertTopics, ImmutableMap.of("replicas", "3", "partitions", "5", "topic", "name"));
// Then:
assertThat(commands.size(), is(3));
assertThat(commands.get(0).getCommand(), is("assert topic abc;"));
assertThat(commands.get(0).getStatement().isPresent(), is (true));
assertThat(commands.get(0).getStatement().get(), instanceOf(AssertTopic.class));
assertThat(((AssertTopic) commands.get(0).getStatement().get()).getTopic(), is("abc"));
assertThat(((AssertTopic) commands.get(0).getStatement().get()).getConfig().size(), is(0));
assertThat(((AssertTopic) commands.get(0).getStatement().get()).checkExists(), is(true));
assertThat(((AssertTopic) commands.get(0).getStatement().get()).getTimeout(), is(Optional.empty()));
assertThat(commands.get(1).getCommand(), is( "assert not exists topic 'abcd' with (foo=2, bar=3) timeout 4 minutes;"));
assertThat(commands.get(1).getStatement().isPresent(), is (true));
assertThat(commands.get(1).getStatement().get(), instanceOf(AssertTopic.class));
assertThat(((AssertTopic) commands.get(1).getStatement().get()).getTopic(), is("abcd"));
assertThat(((AssertTopic) commands.get(1).getStatement().get()).getConfig().size(), is(2));
assertThat(((AssertTopic) commands.get(1).getStatement().get()).getConfig().get("FOO").getValue(), is(2));
assertThat(((AssertTopic) commands.get(1).getStatement().get()).getConfig().get("BAR").getValue(), is(3));
assertThat(((AssertTopic) commands.get(1).getStatement().get()).checkExists(), is(false));
assertThat(((AssertTopic) commands.get(1).getStatement().get()).getTimeout().get(), is(WindowTimeClause.of(4, TimeUnit.MINUTES.name())));
assertThat(commands.get(2).getCommand(), is( "assert topic name with (replicas=3, partitions=5) timeout 10 seconds;"));
assertThat(commands.get(2).getStatement().isPresent(), is (true));
assertThat(commands.get(2).getStatement().get(), instanceOf(AssertTopic.class));
assertThat(((AssertTopic) commands.get(2).getStatement().get()).getTopic(), is("name"));
assertThat(((AssertTopic) commands.get(2).getStatement().get()).getConfig().size(), is(2));
assertThat(((AssertTopic) commands.get(2).getStatement().get()).getConfig().get("REPLICAS").getValue(), is(3));
assertThat(((AssertTopic) commands.get(2).getStatement().get()).getConfig().get("PARTITIONS").getValue(), is(5));
assertThat(((AssertTopic) commands.get(2).getStatement().get()).checkExists(), is(true));
assertThat(((AssertTopic) commands.get(2).getStatement().get()).getTimeout().get(), is(WindowTimeClause.of(10, TimeUnit.SECONDS.name())));
}
|
public static KeyFormat sanitizeKeyFormat(
final KeyFormat keyFormat,
final List<SqlType> newKeyColumnSqlTypes,
final boolean allowKeyFormatChangeToSupportNewKeySchema
) {
return sanitizeKeyFormatWrapping(
!allowKeyFormatChangeToSupportNewKeySchema ? keyFormat :
sanitizeKeyFormatForTypeCompatibility(
sanitizeKeyFormatForMultipleColumns(
keyFormat,
newKeyColumnSqlTypes.size()),
newKeyColumnSqlTypes
),
newKeyColumnSqlTypes.size() == 1
);
}
|
@Test
public void shouldNotConvertDelimitedFormatForMulticolKeysWithPrimitiveTypes() {
// Given:
final KeyFormat format = KeyFormat.nonWindowed(
FormatInfo.of(DelimitedFormat.NAME),
SerdeFeatures.of());
// When:
final KeyFormat sanitized = SerdeFeaturesFactory.sanitizeKeyFormat(format, MULTI_SQL_TYPES, true);
// Then:
assertThat(sanitized.getFormatInfo(), equalTo(FormatInfo.of(DelimitedFormat.NAME)));
assertThat(sanitized.getFeatures(), equalTo(SerdeFeatures.of()));
}
|
@Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException
{
int totalBytesRead = 0;
while (totalBytesRead < length) {
int bytesRead = read(
position + totalBytesRead,
buffer,
offset + totalBytesRead,
length - totalBytesRead);
if (bytesRead == -1) {
throw new EOFException();
}
totalBytesRead += bytesRead;
}
}
|
@Test
public void testValidateDataEnabledWithDataMatched()
throws IOException
{
byte[] inputData = new byte[] {1, 2, 3};
FSDataInputStream dataTierInputStream = new TestFSDataInputStream(inputData);
FSDataInputStream fileInStream = new TestFSDataInputStream(inputData);
CacheValidatingInputStream fileInputStream = new CacheValidatingInputStream(fileInStream, dataTierInputStream);
byte[] buffer = new byte[3];
fileInputStream.readFully(0, buffer, 0, buffer.length);
validateBuffer(inputData, 0, buffer, 0, inputData.length);
}
|
@Override
public String getDriverVersion() {
return null;
}
|
@Test
void assertGetDriverVersion() {
assertNull(metaData.getDriverVersion());
}
|
@Override
public DataflowPipelineJob run(Pipeline pipeline) {
// Multi-language pipelines and pipelines that include upgrades should automatically be upgraded
// to Runner v2.
if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_runner_v2")) {
LOG.info(
"Automatically enabling Dataflow Runner v2 since the pipeline used cross-language"
+ " transforms or pipeline needed a transform upgrade.");
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build());
}
}
if (useUnifiedWorker(options)) {
if (hasExperiment(options, "disable_runner_v2")
|| hasExperiment(options, "disable_runner_v2_until_2023")
|| hasExperiment(options, "disable_prime_runner_v2")) {
throw new IllegalArgumentException(
"Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set.");
}
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("use_runner_v2")) {
experiments.add("use_runner_v2");
}
if (!experiments.contains("use_unified_worker")) {
experiments.add("use_unified_worker");
}
if (!experiments.contains("beam_fn_api")) {
experiments.add("beam_fn_api");
}
if (!experiments.contains("use_portable_job_submission")) {
experiments.add("use_portable_job_submission");
}
options.setExperiments(ImmutableList.copyOf(experiments));
}
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
logWarningIfBigqueryDLQUnused(pipeline);
if (shouldActAsStreaming(pipeline)) {
options.setStreaming(true);
if (useUnifiedWorker(options)) {
options.setEnableStreamingEngine(true);
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("enable_streaming_engine")) {
experiments.add("enable_streaming_engine");
}
if (!experiments.contains("enable_windmill_service")) {
experiments.add("enable_windmill_service");
}
}
}
if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) {
ProjectionPushdownOptimizer.optimize(pipeline);
}
LOG.info(
"Executing pipeline on the Dataflow Service, which will have billing implications "
+ "related to Google Compute Engine usage and other Google Cloud Services.");
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions);
// This incorrectly puns the worker harness container image (which implements v1beta3 API)
// with the SDK harness image (which implements Fn API).
//
// The same Environment is used in different and contradictory ways, depending on whether
// it is a v1 or v2 job submission.
RunnerApi.Environment defaultEnvironmentForDataflow =
Environments.createDockerEnvironment(workerHarnessContainerImageURL);
// The SdkComponents for portable an non-portable job submission must be kept distinct. Both
// need the default environment.
SdkComponents portableComponents = SdkComponents.create();
portableComponents.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
RunnerApi.Pipeline portablePipelineProto =
PipelineTranslation.toProto(pipeline, portableComponents, false);
// Note that `stageArtifacts` has to be called before `resolveArtifact` because
// `resolveArtifact` updates local paths to staged paths in pipeline proto.
portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto);
List<DataflowPackage> packages = stageArtifacts(portablePipelineProto);
portablePipelineProto = resolveArtifacts(portablePipelineProto);
portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Portable pipeline proto:\n{}",
TextFormat.printer().printToString(portablePipelineProto));
}
// Stage the portable pipeline proto, retrieving the staged pipeline path, then update
// the options on the new job
// TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options
LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation());
byte[] serializedProtoPipeline = portablePipelineProto.toByteArray();
DataflowPackage stagedPipeline =
options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME);
dataflowOptions.setPipelineUrl(stagedPipeline.getLocation());
if (useUnifiedWorker(options)) {
LOG.info("Skipping v1 transform replacements since job will run on v2.");
} else {
// Now rewrite things to be as needed for v1 (mutates the pipeline)
// This way the job submitted is valid for v1 and v2, simultaneously
replaceV1Transforms(pipeline);
}
// Capture the SdkComponents for look up during step translations
SdkComponents dataflowV1Components = SdkComponents.create();
dataflowV1Components.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
// No need to perform transform upgrading for the Runner v1 proto.
RunnerApi.Pipeline dataflowV1PipelineProto =
PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Dataflow v1 pipeline proto:\n{}",
TextFormat.printer().printToString(dataflowV1PipelineProto));
}
// Set a unique client_request_id in the CreateJob request.
// This is used to ensure idempotence of job creation across retried
// attempts to create a job. Specifically, if the service returns a job with
// a different client_request_id, it means the returned one is a different
// job previously created with the same job name, and that the job creation
// has been effectively rejected. The SDK should return
// Error::Already_Exists to user in that case.
int randomNum = new Random().nextInt(9000) + 1000;
String requestId =
DateTimeFormat.forPattern("YYYYMMddHHmmssmmm")
.withZone(DateTimeZone.UTC)
.print(DateTimeUtils.currentTimeMillis())
+ "_"
+ randomNum;
JobSpecification jobSpecification =
translator.translate(
pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages);
if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_staged_dataflow_worker_jar")) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("use_staged_dataflow_worker_jar")
.build());
}
}
Job newJob = jobSpecification.getJob();
try {
newJob
.getEnvironment()
.setSdkPipelineOptions(
MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class));
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
newJob.setClientRequestId(requestId);
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String version = dataflowRunnerInfo.getVersion();
checkState(
!"${pom.version}".equals(version),
"Unable to submit a job to the Dataflow service with unset version ${pom.version}");
LOG.info("Dataflow SDK version: {}", version);
newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties());
// The Dataflow Service may write to the temporary directory directly, so
// must be verified.
if (!isNullOrEmpty(options.getGcpTempLocation())) {
newJob
.getEnvironment()
.setTempStoragePrefix(
dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation()));
}
newJob.getEnvironment().setDataset(options.getTempDatasetId());
if (options.getWorkerRegion() != null) {
newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion());
}
if (options.getWorkerZone() != null) {
newJob.getEnvironment().setWorkerZone(options.getWorkerZone());
}
if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED");
} else if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED");
}
// Represent the minCpuPlatform pipeline option as an experiment, if not already present.
if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
List<String> minCpuFlags =
experiments.stream()
.filter(p -> p.startsWith("min_cpu_platform"))
.collect(Collectors.toList());
if (minCpuFlags.isEmpty()) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform())
.build());
} else {
LOG.warn(
"Flag min_cpu_platform is defined in both top level PipelineOption, "
+ "as well as under experiments. Proceed using {}.",
minCpuFlags.get(0));
}
}
newJob
.getEnvironment()
.setExperiments(
ImmutableList.copyOf(
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList())));
// Set the Docker container image that executes Dataflow worker harness, residing in Google
// Container Registry. Translator is guaranteed to create a worker pool prior to this point.
// For runner_v1, only worker_harness_container is set.
// For runner_v2, both worker_harness_container and sdk_harness_container are set to the same
// value.
String containerImage = getContainerImageForJob(options);
for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) {
workerPool.setWorkerHarnessContainerImage(containerImage);
}
configureSdkHarnessContainerImages(options, portablePipelineProto, newJob);
newJob.getEnvironment().setVersion(getEnvironmentVersion(options));
if (hooks != null) {
hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment());
}
// enable upload_graph when the graph is too large
byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8);
int jobGraphByteSize = jobGraphBytes.length;
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES
&& !hasExperiment(options, "upload_graph")
&& !useUnifiedWorker(options)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build());
LOG.info(
"The job graph size ({} in bytes) is larger than {}. Automatically add "
+ "the upload_graph option to experiments.",
jobGraphByteSize,
CREATE_JOB_REQUEST_LIMIT_BYTES);
}
if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) {
ArrayList<String> experiments = new ArrayList<>(options.getExperiments());
while (experiments.remove("upload_graph")) {}
options.setExperiments(experiments);
LOG.warn(
"The upload_graph experiment was specified, but it does not apply "
+ "to runner v2 jobs. Option has been automatically removed.");
}
// Upload the job to GCS and remove the graph object from the API call. The graph
// will be downloaded from GCS by the service.
if (hasExperiment(options, "upload_graph")) {
DataflowPackage stagedGraph =
options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME);
newJob.getSteps().clear();
newJob.setStepsLocation(stagedGraph.getLocation());
}
if (!isNullOrEmpty(options.getDataflowJobFile())
|| !isNullOrEmpty(options.getTemplateLocation())) {
boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation());
if (isTemplate) {
checkArgument(
isNullOrEmpty(options.getDataflowJobFile()),
"--dataflowJobFile and --templateLocation are mutually exclusive.");
}
String fileLocation =
firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile());
checkArgument(
fileLocation.startsWith("/") || fileLocation.startsWith("gs://"),
"Location must be local or on Cloud Storage, got %s.",
fileLocation);
ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */);
String workSpecJson = DataflowPipelineTranslator.jobToString(newJob);
try (PrintWriter printWriter =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(
Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)),
UTF_8)))) {
printWriter.print(workSpecJson);
LOG.info("Printed job specification to {}", fileLocation);
} catch (IOException ex) {
String error = String.format("Cannot create output file at %s", fileLocation);
if (isTemplate) {
throw new RuntimeException(error, ex);
} else {
LOG.warn(error, ex);
}
}
if (isTemplate) {
LOG.info("Template successfully created.");
return new DataflowTemplateJob();
}
}
String jobIdToUpdate = null;
if (options.isUpdate()) {
jobIdToUpdate = getJobIdFromName(options.getJobName());
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setReplaceJobId(jobIdToUpdate);
}
if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) {
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot());
}
Job jobResult;
try {
jobResult = dataflowClient.createJob(newJob);
} catch (GoogleJsonResponseException e) {
String errorMessages = "Unexpected errors";
if (e.getDetails() != null) {
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) {
errorMessages =
"The size of the serialized JSON representation of the pipeline "
+ "exceeds the allowable limit. "
+ "For more information, please see the documentation on job submission:\n"
+ "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs";
} else {
errorMessages = e.getDetails().getMessage();
}
}
throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e);
} catch (IOException e) {
throw new RuntimeException("Failed to create a workflow job", e);
}
// Use a raw client for post-launch monitoring, as status calls may fail
// regularly and need not be retried automatically.
DataflowPipelineJob dataflowPipelineJob =
new DataflowPipelineJob(
DataflowClient.create(options),
jobResult.getId(),
options,
jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(),
portablePipelineProto);
// If the service returned client request id, the SDK needs to compare it
// with the original id generated in the request, if they are not the same
// (i.e., the returned job is not created by this request), throw
// DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException
// depending on whether this is a reload or not.
if (jobResult.getClientRequestId() != null
&& !jobResult.getClientRequestId().isEmpty()
&& !jobResult.getClientRequestId().equals(requestId)) {
// If updating a job.
if (options.isUpdate()) {
throw new DataflowJobAlreadyUpdatedException(
dataflowPipelineJob,
String.format(
"The job named %s with id: %s has already been updated into job id: %s "
+ "and cannot be updated again.",
newJob.getName(), jobIdToUpdate, jobResult.getId()));
} else {
throw new DataflowJobAlreadyExistsException(
dataflowPipelineJob,
String.format(
"There is already an active job named %s with id: %s. If you want to submit a"
+ " second job, try again by setting a different name using --jobName.",
newJob.getName(), jobResult.getId()));
}
}
LOG.info(
"To access the Dataflow monitoring console, please navigate to {}",
MonitoringUtil.getJobMonitoringPageURL(
options.getProject(), options.getRegion(), jobResult.getId()));
LOG.info("Submitted job: {}", jobResult.getId());
LOG.info(
"To cancel the job using the 'gcloud' tool, run:\n> {}",
MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId()));
return dataflowPipelineJob;
}
|
@Test
public void testStreamingOnCreateMatcher() throws Exception {
options.setStreaming(true);
Pipeline p = TestPipeline.create(options);
PCollection<Integer> pc = p.apply(Create.of(1, 2, 3));
PAssert.that(pc).containsInAnyOrder(1, 2, 3);
final DataflowPipelineJob mockJob = Mockito.mock(DataflowPipelineJob.class);
when(mockJob.getState()).thenReturn(State.DONE);
when(mockJob.getProjectId()).thenReturn("test-project");
when(mockJob.getJobId()).thenReturn("test-job");
DataflowRunner mockRunner = Mockito.mock(DataflowRunner.class);
when(mockRunner.run(any(Pipeline.class))).thenReturn(mockJob);
TestDataflowRunner runner = TestDataflowRunner.fromOptionsAndClient(options, mockClient);
options.as(TestPipelineOptions.class).setOnCreateMatcher(new TestSuccessMatcher(mockJob, 0));
when(mockJob.waitUntilFinish(any(Duration.class), any(JobMessagesHandler.class)))
.thenReturn(State.DONE);
when(mockClient.getJobMetrics(anyString()))
.thenReturn(generateMockMetricResponse(true /* success */, true /* tentative */));
runner.run(p, mockRunner);
}
|
@Udf
public String extractQuery(
@UdfParameter(description = "a valid URL to extract a query from") final String input) {
return UrlParser.extract(input, URI::getQuery);
}
|
@Test
public void shouldThrowExceptionForMalformedURL() {
// When:
final KsqlException e = assertThrows(
KsqlException.class,
() -> extractUdf.extractQuery("http://257.1/bogus/[url")
);
// Given:
assertThat(e.getMessage(), containsString("URL input has invalid syntax: http://257.1/bogus/[url"));
}
|
public ReliableTopicConfig setTopicOverloadPolicy(TopicOverloadPolicy topicOverloadPolicy) {
this.topicOverloadPolicy = checkNotNull(topicOverloadPolicy, "topicOverloadPolicy can't be null");
return this;
}
|
@Test(expected = NullPointerException.class)
public void setTopicOverloadPolicy_whenNull() {
ReliableTopicConfig config = new ReliableTopicConfig("foo");
config.setTopicOverloadPolicy(null);
}
|
@Override
public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
try {
// Copies file
// If segmented file, copies manifest (creating a link between new object and original segments)
// Use with caution.
session.getClient().copyObject(regionService.lookup(source),
containerService.getContainer(source).getName(), containerService.getKey(source),
containerService.getContainer(target).getName(), containerService.getKey(target));
listener.sent(status.getLength());
// Copy original file attributes
return target.withAttributes(source.attributes());
}
catch(GenericException e) {
throw new SwiftExceptionMappingService().map("Cannot copy {0}", e, source);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Cannot copy {0}", e, source);
}
}
|
@Test
public void testCopyToExistingFile() throws Exception {
final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume));
container.attributes().setRegion("IAD");
final Path folder = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new SwiftDirectoryFeature(session).mkdir(folder, new TransferStatus());
final Path test = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SwiftTouchFeature(session, new SwiftRegionService(session)).touch(test, new TransferStatus());
final Path copy = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SwiftTouchFeature(session, new SwiftRegionService(session)).touch(copy, new TransferStatus());
new SwiftDefaultCopyFeature(session).copy(test, copy, new TransferStatus().exists(true), new DisabledConnectionCallback(), new DisabledStreamListener());
final Find find = new DefaultFindFeature(session);
assertTrue(find.find(test));
assertTrue(find.find(copy));
new SwiftDeleteFeature(session).delete(Arrays.asList(test, copy), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static String sanitizeStreamName(String streamName) {
Matcher problemCharacterMatcher = Pattern.compile("(?![A-Za-z_\\-\\.]).").matcher(streamName);
if (streamName.length() > 0 && Character.isLetter(streamName.charAt(0))) {
return problemCharacterMatcher.replaceAll("_");
} else {
return "_s" + problemCharacterMatcher.replaceAll("_");
}
}
|
@Test
public void testSanitizeStreamName() {
// replaces the expected characters with underscores (everything except A-Z, a-z, dot, dash, and underscore)
assertEquals("my-stream_with.all_characterClasses____",
UIHelpers.sanitizeStreamName("my-stream:with.all_characterClasses1/\\2"));
// has the expected effect when streamName begins with a non-alpha character
assertEquals("_s_foo", UIHelpers.sanitizeStreamName("3foo"));
// handles empty string, though that's not an expected stream name
assertEquals("_s", UIHelpers.sanitizeStreamName(""));
}
|
@Override
public String getName() {
return this.name;
}
|
@Test
public void shouldReturnTheCorrectName() {
assertThat(circuitBreaker.getName()).isEqualTo("testName");
}
|
public static IcebergTableName from(String name)
{
Matcher match = TABLE_PATTERN.matcher(name);
if (!match.matches()) {
throw new PrestoException(NOT_SUPPORTED, "Invalid Iceberg table name: " + name);
}
String table = match.group("table");
String typeString = match.group("type");
String version1 = match.group("ver1");
String version2 = match.group("ver2");
IcebergTableType type = DATA;
if (typeString != null) {
try {
type = IcebergTableType.valueOf(typeString.toUpperCase(ROOT));
}
catch (IllegalArgumentException e) {
throw new PrestoException(NOT_SUPPORTED, format("Invalid Iceberg table name (unknown type '%s'): %s", typeString, name));
}
}
if (!type.isPublic()) {
throw new PrestoException(NOT_SUPPORTED, format("Internal Iceberg table name (type '%s'): %s", typeString, name));
}
Optional<Long> version = Optional.empty();
Optional<Long> changelogEndVersion = Optional.empty();
if (type == DATA || type == PARTITIONS || type == MANIFESTS || type == FILES) {
if (version1 != null && version2 != null) {
throw new PrestoException(NOT_SUPPORTED, "Invalid Iceberg table name (cannot specify two @ versions): " + name);
}
if (version1 != null) {
version = Optional.of(parseLong(version1));
}
else if (version2 != null) {
version = Optional.of(parseLong(version2));
}
}
else if (type == CHANGELOG) {
version = Optional.ofNullable(version1).map(Long::parseLong);
changelogEndVersion = Optional.ofNullable(version2).map(Long::parseLong);
}
else if (version1 != null || version2 != null) {
throw new PrestoException(NOT_SUPPORTED, format("Invalid Iceberg table name (cannot use @ version with table type '%s'): %s", type, name));
}
return new IcebergTableName(table, type, version, changelogEndVersion);
}
|
@Test
public void testFrom()
{
assertFrom("abc", "abc", IcebergTableType.DATA);
assertFrom("abc@123", "abc", IcebergTableType.DATA, Optional.of(123L));
assertFrom("abc$data", "abc", IcebergTableType.DATA);
assertFrom("xyz@456", "xyz", IcebergTableType.DATA, Optional.of(456L));
assertFrom("xyz$data@456", "xyz", IcebergTableType.DATA, Optional.of(456L));
assertFrom("abc$partitions@456", "abc", IcebergTableType.PARTITIONS, Optional.of(456L));
assertFrom("abc$manifests@456", "abc", IcebergTableType.MANIFESTS, Optional.of(456L));
assertFrom("abc$manifests@456", "abc", IcebergTableType.MANIFESTS, Optional.of(456L));
assertFrom("abc$history", "abc", IcebergTableType.HISTORY);
assertFrom("abc$snapshots", "abc", IcebergTableType.SNAPSHOTS);
assertFrom("abc$changelog", "abc", IcebergTableType.CHANGELOG);
assertFrom("abc@123$changelog", "abc", IcebergTableType.CHANGELOG, Optional.of(123L));
assertFrom("abc$changelog@123", "abc", IcebergTableType.CHANGELOG, Optional.empty(), Optional.of(123L));
assertFrom("abc@123$changelog@124", "abc", IcebergTableType.CHANGELOG, Optional.of(123L), Optional.of(124L));
assertInvalid("abc@xyz", "Invalid Iceberg table name: abc@xyz");
assertInvalid("abc$what", "Invalid Iceberg table name (unknown type 'what'): abc$what");
assertInvalid("abc@123$data@456", "Invalid Iceberg table name (cannot specify two @ versions): abc@123$data@456");
assertInvalid("abc@123$snapshots", "Invalid Iceberg table name (cannot use @ version with table type 'SNAPSHOTS'): abc@123$snapshots");
assertInvalid("abc$snapshots@456", "Invalid Iceberg table name (cannot use @ version with table type 'SNAPSHOTS'): abc$snapshots@456");
}
|
public StrBuilder del(int start, int end) throws StringIndexOutOfBoundsException {
if (start < 0) {
start = 0;
}
if (end >= this.position) {
// end在边界及以外,相当于删除后半部分
this.position = start;
return this;
} else if (end < 0) {
// start和end都为0的情况下表示删除全部
end = 0;
}
int len = end - start;
// 截取中间部分,需要将后半部分复制到删除的开始位置
if (len > 0) {
System.arraycopy(value, start + len, value, start, this.position - end);
this.position -= len;
} else if (len < 0) {
throw new StringIndexOutOfBoundsException("Start is greater than End.");
}
return this;
}
|
@Test
public void delTest() {
// 删除全部测试
StrBuilder strBuilder = new StrBuilder("ABCDEFG");
int length = strBuilder.length();
StrBuilder builder = strBuilder.del(0, length);
assertEquals("", builder.toString());
}
|
@Override
public boolean eval(Object arg) {
QueryableEntry entry = (QueryableEntry) arg;
Data keyData = entry.getKeyData();
return (key == null || key.equals(keyData)) && predicate.apply((Map.Entry) arg);
}
|
@Test
public void testEval_givenFilterDoesNotContainKey_whenPredicateIsNotMatching_thenReturnFalse() {
//given
Predicate<Object, Object> predicate = Predicates.alwaysFalse();
QueryEventFilter filter = new QueryEventFilter(null, predicate, true);
//when
Data key2 = serializationService.toData("key");
QueryableEntry entry = mockEntryWithKeyData(key2);
//then
boolean result = filter.eval(entry);
assertFalse(result);
}
|
@Override
public void updateHealthStatusForPersistentInstance(String namespace, String fullServiceName, String clusterName,
String ip, int port, boolean healthy) throws NacosException {
String groupName = NamingUtils.getGroupName(fullServiceName);
String serviceName = NamingUtils.getServiceName(fullServiceName);
Service service = Service.newService(namespace, groupName, serviceName);
Optional<ServiceMetadata> serviceMetadata = metadataManager.getServiceMetadata(service);
if (!serviceMetadata.isPresent() || !serviceMetadata.get().getClusters().containsKey(clusterName)) {
throwHealthCheckerException(fullServiceName, clusterName);
}
ClusterMetadata clusterMetadata = serviceMetadata.get().getClusters().get(clusterName);
if (!HealthCheckType.NONE.name().equals(clusterMetadata.getHealthyCheckType())) {
throwHealthCheckerException(fullServiceName, clusterName);
}
String clientId = IpPortBasedClient.getClientId(ip + InternetAddressUtil.IP_PORT_SPLITER + port, false);
Client client = clientManager.getClient(clientId);
if (null == client) {
return;
}
InstancePublishInfo oldInstance = client.getInstancePublishInfo(service);
if (null == oldInstance) {
return;
}
Instance newInstance = InstanceUtil.parseToApiInstance(service, oldInstance);
newInstance.setHealthy(healthy);
clientOperationService.registerInstance(service, newInstance, clientId);
}
|
@Test
void testUpdateHealthStatusForPersistentInstance() {
try {
ServiceMetadata metadata = new ServiceMetadata();
Map<String, ClusterMetadata> clusterMap = new HashMap<>(2);
ClusterMetadata cluster = Mockito.mock(ClusterMetadata.class);
clusterMap.put("C", cluster);
metadata.setClusters(clusterMap);
Instance instance = new Instance();
instance.setIp("1.1.1.1");
instance.setPort(8080);
Mockito.when(cluster.getHealthyCheckType()).thenReturn(HealthCheckType.NONE.name());
Mockito.when(metadataManager.getServiceMetadata(Mockito.any())).thenReturn(Optional.of(metadata));
ConnectionBasedClient client = Mockito.mock(ConnectionBasedClient.class);
Mockito.when(clientManager.getClient(Mockito.anyString())).thenReturn(client);
InstancePublishInfo instancePublishInfo = new InstancePublishInfo();
instancePublishInfo.setExtendDatum(new HashMap<>(2));
Mockito.when(client.getInstancePublishInfo(Mockito.any())).thenReturn(instancePublishInfo);
healthOperatorV2.updateHealthStatusForPersistentInstance("A", "B", "C", "1.1.1.1", 8080, true);
} catch (NacosException e) {
e.printStackTrace();
fail(e.getMessage());
}
}
|
@Override
public void processElement(WindowedValue<InputT> compressedElem) {
if (observesWindow) {
for (WindowedValue<InputT> elem : compressedElem.explodeWindows()) {
invokeProcessElement(elem);
}
} else {
invokeProcessElement(compressedElem);
}
}
|
@Test
public void testTimerSet() {
WindowFn<?, ?> windowFn = new GlobalWindows();
DoFnWithTimers<GlobalWindow> fn = new DoFnWithTimers(windowFn.windowCoder());
DoFnRunner<String, String> runner =
new SimpleDoFnRunner<>(
null,
fn,
NullSideInputReader.empty(),
null,
null,
Collections.emptyList(),
mockStepContext,
null,
Collections.emptyMap(),
WindowingStrategy.of(new GlobalWindows()),
DoFnSchemaInformation.create(),
Collections.emptyMap());
// Setting the timer needs the current time, as it is set relative
Instant currentTime = new Instant(42);
when(mockTimerInternals.currentInputWatermarkTime()).thenReturn(currentTime);
runner.processElement(WindowedValue.valueInGlobalWindow("anyValue"));
verify(mockTimerInternals)
.setTimer(
StateNamespaces.window(new GlobalWindows().windowCoder(), GlobalWindow.INSTANCE),
TimerDeclaration.PREFIX + DoFnWithTimers.TIMER_ID,
"",
currentTime.plus(DoFnWithTimers.TIMER_OFFSET),
currentTime.plus(DoFnWithTimers.TIMER_OFFSET),
TimeDomain.EVENT_TIME);
}
|
public boolean quoteReservedWords() {
return databaseInterface.quoteReservedWords();
}
|
@Test
public void testQuoteReservedWords() {
DatabaseMeta databaseMeta = mock( DatabaseMeta.class );
doCallRealMethod().when( databaseMeta ).quoteReservedWords( any( RowMetaInterface.class ) );
doCallRealMethod().when( databaseMeta ).quoteField( anyString() );
doCallRealMethod().when( databaseMeta ).setDatabaseInterface( any( DatabaseInterface.class ) );
doReturn( "\"" ).when( databaseMeta ).getStartQuote();
doReturn( "\"" ).when( databaseMeta ).getEndQuote();
final DatabaseInterface databaseInterface = mock( DatabaseInterface.class );
doReturn( true ).when( databaseInterface ).isQuoteAllFields();
databaseMeta.setDatabaseInterface( databaseInterface );
final RowMeta fields = new RowMeta();
for ( int i = 0; i < 10; i++ ) {
final ValueMetaInterface valueMeta = new ValueMetaNone( "test_" + i );
fields.addValueMeta( valueMeta );
}
for ( int i = 0; i < 10; i++ ) {
databaseMeta.quoteReservedWords( fields );
}
for ( int i = 0; i < 10; i++ ) {
databaseMeta.quoteReservedWords( fields );
final String name = fields.getValueMeta( i ).getName();
// check valueMeta index in list
assertTrue( name.contains( "test_" + i ) );
// check valueMeta is found by quoted name
assertNotNull( fields.searchValueMeta( name ) );
}
}
|
public static AwsCredentialsProvider create(boolean isCloud,
@Nullable String stsRegion,
@Nullable String accessKey,
@Nullable String secretKey,
@Nullable String assumeRoleArn) {
AwsCredentialsProvider awsCredentials = isCloud ? getCloudAwsCredentialsProvider(accessKey, secretKey) :
getAwsCredentialsProvider(accessKey, secretKey);
// Apply the Assume Role ARN Authorization if specified. All AWSCredentialsProviders support this.
if (!isNullOrEmpty(assumeRoleArn) && !isNullOrEmpty(stsRegion)) {
LOG.debug("Creating cross account assume role credentials");
return buildStsCredentialsProvider(awsCredentials, stsRegion, assumeRoleArn, accessKey);
}
return awsCredentials;
}
|
@Test
public void testAutomaticAuth() {
assertThat(AWSAuthFactory.create(false, null, null, null, null))
.isExactlyInstanceOf(DefaultCredentialsProvider.class);
}
|
@Override
public SubClusterId getHomeSubcluster(
ApplicationSubmissionContext appSubmissionContext,
List<SubClusterId> blackListSubClusters) throws YarnException {
// null checks and default-queue behavior
validate(appSubmissionContext);
List<ResourceRequest> rrList =
appSubmissionContext.getAMContainerResourceRequests();
// Fast path for FailForward to WeightedRandomRouterPolicy
if (rrList == null || rrList.isEmpty() || (rrList.size() == 1
&& ResourceRequest.isAnyLocation(rrList.get(0).getResourceName()))) {
return super.getHomeSubcluster(appSubmissionContext, blackListSubClusters);
}
if (rrList.size() != 3) {
throw new FederationPolicyException(
"Invalid number of resource requests: " + rrList.size());
}
Map<SubClusterId, SubClusterInfo> activeSubClusters = getActiveSubclusters();
Set<SubClusterId> validSubClusters = activeSubClusters.keySet();
FederationPolicyUtils.validateSubClusterAvailability(activeSubClusters.keySet(),
blackListSubClusters);
if (blackListSubClusters != null) {
// Remove from the active SubClusters from StateStore the blacklisted ones
validSubClusters.removeAll(blackListSubClusters);
}
try {
// With three requests, this has been processed by the
// ResourceRequestInterceptorREST, and should have
// node, rack, and any
SubClusterId targetId = null;
ResourceRequest nodeRequest = null;
ResourceRequest rackRequest = null;
ResourceRequest anyRequest = null;
for (ResourceRequest rr : rrList) {
// Handle "node" requests
try {
targetId = resolver.getSubClusterForNode(rr.getResourceName());
nodeRequest = rr;
} catch (YarnException e) {
LOG.error("Cannot resolve node : {}.", e.getMessage());
}
// Handle "rack" requests
try {
resolver.getSubClustersForRack(rr.getResourceName());
rackRequest = rr;
} catch (YarnException e) {
LOG.error("Cannot resolve rack : {}.", e.getMessage());
}
// Handle "ANY" requests
if (ResourceRequest.isAnyLocation(rr.getResourceName())) {
anyRequest = rr;
continue;
}
}
if (nodeRequest == null) {
throw new YarnException("Missing node request.");
}
if (rackRequest == null) {
throw new YarnException("Missing rack request.");
}
if (anyRequest == null) {
throw new YarnException("Missing any request.");
}
LOG.info("Node request: {} , Rack request: {} , Any request: {}.",
nodeRequest.getResourceName(), rackRequest.getResourceName(),
anyRequest.getResourceName());
// Handle "node" requests
if (validSubClusters.contains(targetId) && enabledSCs
.contains(targetId)) {
LOG.info("Node {} is in SubCluster: {}.", nodeRequest.getResourceName(), targetId);
return targetId;
} else {
throw new YarnException("The node " + nodeRequest.getResourceName()
+ " is in a blacklist SubCluster or not active. ");
}
} catch (YarnException e) {
LOG.error("Validating resource requests failed, " +
"Falling back to WeightedRandomRouterPolicy placement : {}.", e.getMessage());
// FailForward to WeightedRandomRouterPolicy
// Overwrite request to use a default ANY
ResourceRequest amReq = Records.newRecord(ResourceRequest.class);
amReq.setPriority(appSubmissionContext.getPriority());
amReq.setResourceName(ResourceRequest.ANY);
amReq.setCapability(appSubmissionContext.getResource());
amReq.setNumContainers(1);
amReq.setRelaxLocality(true);
amReq.setNodeLabelExpression(appSubmissionContext.getNodeLabelExpression());
amReq.setExecutionTypeRequest(ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED));
appSubmissionContext.setAMContainerResourceRequests(Collections.singletonList(amReq));
return super.getHomeSubcluster(appSubmissionContext, blackListSubClusters);
}
}
|
@Test
public void testNodeNotInPolicy() throws YarnException {
// Blacklist SubCluster3
String subClusterToBlacklist = "subcluster3";
// Remember the current value of subcluster3
Float value =
getPolicyInfo().getRouterPolicyWeights().get(subClusterToBlacklist);
getPolicyInfo().getRouterPolicyWeights().remove(subClusterToBlacklist);
initializePolicy(new YarnConfiguration());
FederationPoliciesTestUtil
.initializePolicyContext(getFederationPolicyContext(), getPolicy(),
getPolicyInfo(), getActiveSubclusters(), new Configuration());
List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
boolean relaxLocality = true;
requests.add(ResourceRequest
.newInstance(Priority.UNDEFINED, "node4", Resource.newInstance(10, 1),
1, relaxLocality));
requests.add(ResourceRequest
.newInstance(Priority.UNDEFINED, "rack1", Resource.newInstance(10, 1),
1));
requests.add(ResourceRequest
.newInstance(Priority.UNDEFINED, ResourceRequest.ANY,
Resource.newInstance(10, 1), 1));
ApplicationSubmissionContext asc = ApplicationSubmissionContext
.newInstance(null, null, null, null, null, false, false, 0,
Resources.none(), null, false, null, null);
asc.setAMContainerResourceRequests(requests);
try {
SubClusterId targetId =
((FederationRouterPolicy) getPolicy()).getHomeSubcluster(asc, null);
// The selected subcluster HAS no to be the same as the one blacklisted.
Assert.assertNotEquals(targetId.getId(), subClusterToBlacklist);
} catch (FederationPolicyException e) {
Assert.fail();
}
// Set again the previous value for the other tests
getPolicyInfo().getRouterPolicyWeights()
.put(new SubClusterIdInfo(subClusterToBlacklist), value);
}
|
public String abbreviate(String fqClassName) {
StringBuilder buf = new StringBuilder(targetLength);
if (fqClassName == null) {
throw new IllegalArgumentException("Class name may not be null");
}
int inLen = fqClassName.length();
if (inLen < targetLength) {
return fqClassName;
}
int[] dotIndexesArray = new int[ClassicConstants.MAX_DOTS];
// a.b.c contains 2 dots but 2+1 parts.
// see also http://jira.qos.ch/browse/LBCLASSIC-110
int[] lengthArray = new int[ClassicConstants.MAX_DOTS + 1];
int dotCount = computeDotIndexes(fqClassName, dotIndexesArray);
// System.out.println();
// System.out.println("Dot count for [" + className + "] is " + dotCount);
// if there are not dots than abbreviation is not possible
if (dotCount == 0) {
return fqClassName;
}
// printArray("dotArray: ", dotArray);
computeLengthArray(fqClassName, dotIndexesArray, lengthArray, dotCount);
// printArray("lengthArray: ", lengthArray);
for (int i = 0; i <= dotCount; i++) {
if (i == 0) {
buf.append(fqClassName.substring(0, lengthArray[i] - 1));
} else {
buf.append(fqClassName.substring(dotIndexesArray[i - 1],
dotIndexesArray[i - 1] + lengthArray[i]));
}
// System.out.println("i=" + i + ", buf=" + buf);
}
return buf.toString();
}
|
@Test
public void testOneDot() {
{
TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(1);
String name = "hello.world";
assertEquals("h.world", abbreviator.abbreviate(name));
}
{
TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(1);
String name = "h.world";
assertEquals("h.world", abbreviator.abbreviate(name));
}
{
TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(1);
String name = ".world";
assertEquals(".world", abbreviator.abbreviate(name));
}
}
|
@Override
public boolean contains(Object o) {
if (!(o instanceof Integer))
return false;
int i = (int) o;
return contains(i);
}
|
@Test
public void contains() throws Exception {
RangeSet rs = new RangeSet(4);
assertFalse(rs.contains(5));
assertTrue(rs.contains(1));
}
|
@Override
public int readUnsignedByte()
throws EOFException {
if (availableLong() < 1) {
throw new EOFException();
}
return _dataBuffer.getByte(_currentOffset++) & 0xFF;
}
|
@Test
void testReadUnsignedByte()
throws EOFException {
int read = _dataBufferPinotInputStream.readUnsignedByte();
assertEquals(read, _byteBuffer.get(0) & 0xFF);
assertEquals(_dataBufferPinotInputStream.getCurrentOffset(), 1);
}
|
public static <K, V> Read<K, V> read() {
return new AutoValue_KafkaIO_Read.Builder<K, V>()
.setTopics(new ArrayList<>())
.setTopicPartitions(new ArrayList<>())
.setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN)
.setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES)
.setMaxNumRecords(Long.MAX_VALUE)
.setCommitOffsetsInFinalizeEnabled(false)
.setDynamicRead(false)
.setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime())
.setConsumerPollingTimeout(2L)
.setRedistributed(false)
.setAllowDuplicates(false)
.setRedistributeNumKeys(0)
.build();
}
|
@Test
public void testWithValidConsumerPollingTimeout() {
KafkaIO.Read<Integer, Long> reader =
KafkaIO.<Integer, Long>read().withConsumerPollingTimeout(15L);
assertEquals(15, reader.getConsumerPollingTimeout());
}
|
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image)
throws IOException
{
return createFromImage(document, image, 0.75f);
}
|
@Test
void testCreateFromImage256() throws IOException
{
PDDocument document = new PDDocument();
BufferedImage image = ImageIO.read(JPEGFactoryTest.class.getResourceAsStream("jpeg256.jpg"));
assertEquals(1, image.getColorModel().getNumComponents());
PDImageXObject ximage = JPEGFactory.createFromImage(document, image);
validate(ximage, 8, 344, 287, "jpg", PDDeviceGray.INSTANCE.getName());
doWritePDF(document, ximage, TESTRESULTSDIR, "jpeg256.pdf");
}
|
public static SSLHandlerFactory createRestClientSSLEngineFactory(final Configuration config)
throws Exception {
ClientAuth clientAuth =
SecurityOptions.isRestSSLAuthenticationEnabled(config)
? ClientAuth.REQUIRE
: ClientAuth.NONE;
SslContext sslContext = createRestNettySSLContext(config, true, clientAuth);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled for REST endpoints.");
}
return new SSLHandlerFactory(sslContext, -1, -1);
}
|
@Test
void testRESTClientSSLMissingPassword() {
Configuration config = new Configuration();
config.set(SecurityOptions.SSL_REST_ENABLED, true);
config.set(SecurityOptions.SSL_REST_TRUSTSTORE, TRUST_STORE_PATH);
assertThatThrownBy(() -> SSLUtils.createRestClientSSLEngineFactory(config))
.isInstanceOf(IllegalConfigurationException.class);
}
|
public void importCounters(String[] counterNames, String[] counterKinds, long[] counterDeltas) {
final int length = counterNames.length;
if (counterKinds.length != length || counterDeltas.length != length) {
throw new AssertionError("array lengths do not match");
}
for (int i = 0; i < length; ++i) {
final CounterName name = CounterName.named(counterPrefix + counterNames[i]);
final String kind = counterKinds[i];
final long delta = counterDeltas[i];
switch (kind) {
case "sum":
counterFactory.longSum(name).addValue(delta);
break;
case "max":
counterFactory.longMax(name).addValue(delta);
break;
case "min":
counterFactory.longMin(name).addValue(delta);
break;
default:
throw new IllegalArgumentException("unsupported counter kind: " + kind);
}
}
}
|
@Test
public void testSingleCounter() throws Exception {
String[] names = {"sum_counter"};
String[] kinds = {"sum"};
long[] deltas = {122};
counters.importCounters(names, kinds, deltas);
counterSet.extractUpdates(false, mockUpdateExtractor);
verify(mockUpdateExtractor)
.longSum(named("stageName-systemName-dataset-sum_counter"), false, 122L);
verifyNoMoreInteractions(mockUpdateExtractor);
}
|
public StepExpression createExpression(StepDefinition stepDefinition) {
List<ParameterInfo> parameterInfos = stepDefinition.parameterInfos();
if (parameterInfos.isEmpty()) {
return createExpression(
stepDefinition.getPattern(),
stepDefinitionDoesNotTakeAnyParameter(stepDefinition),
false);
}
ParameterInfo parameterInfo = parameterInfos.get(parameterInfos.size() - 1);
return createExpression(
stepDefinition.getPattern(),
parameterInfo.getTypeResolver()::resolve,
parameterInfo.isTransposed());
}
|
@Test
void docstring_expression_transform_doc_string_to_string() {
String docString = "A rather long and boring string of documentation";
StepDefinition stepDefinition = new StubStepDefinition("Given some stuff:", String.class);
StepExpression expression = stepExpressionFactory.createExpression(stepDefinition);
List<Argument> match = expression.match("Given some stuff:", docString, null);
assertThat(match.get(0).getValue(), is(equalTo(docString)));
}
|
@CanIgnoreReturnValue
public final Ordered containsExactlyElementsIn(@Nullable Iterable<?> expected) {
return containsExactlyElementsIn(expected, false);
}
|
@Test
@SuppressWarnings("ContainsExactlyNone")
public void iterableContainsExactlyElementsInWithEmptyExpected() {
expectFailureWhenTestingThat(asList("foo")).containsExactlyElementsIn(ImmutableList.of());
assertFailureKeys("expected to be empty", "but was");
}
|
protected AbstractContainerCollector(NodeEngine nodeEngine) {
this.operationExecutor = ((OperationServiceImpl) nodeEngine.getOperationService()).getOperationExecutor();
this.partitionService = nodeEngine.getPartitionService();
this.mergePolicyProvider = nodeEngine.getSplitBrainMergePolicyProvider();
}
|
@Test
public void testAbstractContainerCollector() {
TestContainerCollector collector = new TestContainerCollector(nodeEngine, true, true);
assertEqualsStringFormat("Expected the to have %d containers, but found %d", 1, collector.containers.size());
collector.run();
assertEqualsStringFormat("Expected %d merging values, but found %d", 1L, collector.getMergingValueCount());
assertEquals("Expected the collected containers to be removed from the container map", 0, collector.containers.size());
}
|
@Override
public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) {
ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new);
String tableNameSuffix = String.valueOf(doSharding(parseDate(shardingValue.getValue())));
return ShardingAutoTableAlgorithmUtils.findMatchedTargetName(availableTargetNames, tableNameSuffix, shardingValue.getDataNodeInfo()).orElse(null);
}
|
@Test
void assertRangeDoShardingWithoutLowerBound() {
List<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1", "t_order_2", "t_order_3");
Collection<String> actual = shardingAlgorithm.doSharding(availableTargetNames,
new RangeShardingValue<>("t_order", "create_time", DATA_NODE_INFO, Range.lessThan("2020-01-01 00:00:11")));
assertThat(actual.size(), is(4));
assertTrue(actual.contains("t_order_0"));
assertTrue(actual.contains("t_order_1"));
assertTrue(actual.contains("t_order_2"));
assertTrue(actual.contains("t_order_3"));
}
|
public FileMetadata fileMetadata() throws IOException {
if (knownFileMetadata == null) {
int footerSize = footerSize();
byte[] footer = readInput(fileSize - footerSize, footerSize);
checkMagic(footer, PuffinFormat.FOOTER_START_MAGIC_OFFSET);
int footerStructOffset = footerSize - PuffinFormat.FOOTER_STRUCT_LENGTH;
checkMagic(footer, footerStructOffset + PuffinFormat.FOOTER_STRUCT_MAGIC_OFFSET);
PuffinCompressionCodec footerCompression = PuffinCompressionCodec.NONE;
for (Flag flag : decodeFlags(footer, footerStructOffset)) {
switch (flag) {
case FOOTER_PAYLOAD_COMPRESSED:
footerCompression = PuffinFormat.FOOTER_COMPRESSION_CODEC;
break;
default:
throw new IllegalStateException("Unsupported flag: " + flag);
}
}
int footerPayloadSize =
PuffinFormat.readIntegerLittleEndian(
footer, footerStructOffset + PuffinFormat.FOOTER_STRUCT_PAYLOAD_SIZE_OFFSET);
Preconditions.checkState(
footerSize
== PuffinFormat.FOOTER_START_MAGIC_LENGTH
+ footerPayloadSize
+ PuffinFormat.FOOTER_STRUCT_LENGTH,
"Unexpected footer payload size value %s for footer size %s",
footerPayloadSize,
footerSize);
ByteBuffer footerPayload = ByteBuffer.wrap(footer, 4, footerPayloadSize);
ByteBuffer footerJson = PuffinFormat.decompress(footerCompression, footerPayload);
this.knownFileMetadata = parseFileMetadata(footerJson);
}
return knownFileMetadata;
}
|
@Test
public void testValidateFooterSizeValue() throws Exception {
// Ensure the definition of SAMPLE_METRIC_DATA_COMPRESSED_ZSTD_FOOTER_SIZE remains accurate
InMemoryInputFile inputFile =
new InMemoryInputFile(readTestResource("v1/sample-metric-data-compressed-zstd.bin"));
try (PuffinReader reader =
Puffin.read(inputFile)
.withFooterSize(SAMPLE_METRIC_DATA_COMPRESSED_ZSTD_FOOTER_SIZE)
.build()) {
assertThat(reader.fileMetadata().properties())
.isEqualTo(ImmutableMap.of("created-by", "Test 1234"));
}
}
|
@JsonProperty(FIELD_SCOPE)
public abstract String scope();
|
@Test
void testExplicitScope() {
final TestScopedEntity scopedEntity = TestScopedEntity.builder().title(TITLE).scope(ARBITRARY_SCOPE).build();
assertEquals(ARBITRARY_SCOPE, scopedEntity.scope());
assertEquals(TITLE, scopedEntity.title());
}
|
@ScalarFunction
@SqlType(ColorType.NAME)
public static long rgb(@SqlType(StandardTypes.BIGINT) long red, @SqlType(StandardTypes.BIGINT) long green, @SqlType(StandardTypes.BIGINT) long blue)
{
checkCondition(red >= 0 && red <= 255, INVALID_FUNCTION_ARGUMENT, "red must be between 0 and 255");
checkCondition(green >= 0 && green <= 255, INVALID_FUNCTION_ARGUMENT, "green must be between 0 and 255");
checkCondition(blue >= 0 && blue <= 255, INVALID_FUNCTION_ARGUMENT, "blue must be between 0 and 255");
return (red << 16) | (green << 8) | blue;
}
|
@Test
public void testToRgb()
{
assertEquals(rgb(0xFF, 0, 0), 0xFF_00_00);
assertEquals(rgb(0, 0xFF, 0), 0x00_FF_00);
assertEquals(rgb(0, 0, 0xFF), 0x00_00_FF);
}
|
public ProjectStatusResponse.ProjectStatus format() {
if (!optionalMeasureData.isPresent()) {
return newResponseWithoutQualityGateDetails();
}
JsonObject json = JsonParser.parseString(optionalMeasureData.get()).getAsJsonObject();
ProjectStatusResponse.Status qualityGateStatus = measureLevelToQualityGateStatus(json.get("level").getAsString());
projectStatusBuilder.setStatus(qualityGateStatus);
projectStatusBuilder.setCaycStatus(caycStatus.toString());
formatIgnoredConditions(json);
formatConditions(json.getAsJsonArray("conditions"));
formatPeriods();
return projectStatusBuilder.build();
}
|
@Test
public void fail_when_measure_level_is_unknown() {
String measureData = "{\n" +
" \"level\": \"UNKNOWN\",\n" +
" \"conditions\": [\n" +
" {\n" +
" \"metric\": \"new_coverage\",\n" +
" \"op\": \"LT\",\n" +
" \"period\": 1,\n" +
" \"warning\": \"80\",\n" +
" \"error\": \"85\",\n" +
" \"actual\": \"82.2985024398452\",\n" +
" \"level\": \"ERROR\"\n" +
" }\n" +
" ]\n" +
"}";
underTest = newQualityGateDetailsFormatter(measureData, new SnapshotDto());
assertThatThrownBy(() -> underTest.format())
.isInstanceOf(IllegalStateException.class)
.hasMessageContaining("Unknown quality gate status 'UNKNOWN'");
}
|
@VisibleForTesting
void validateMenu(Long parentId, String name, Long id) {
MenuDO menu = menuMapper.selectByParentIdAndName(parentId, name);
if (menu == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的菜单
if (id == null) {
throw exception(MENU_NAME_DUPLICATE);
}
if (!menu.getId().equals(id)) {
throw exception(MENU_NAME_DUPLICATE);
}
}
|
@Test
public void testValidateMenu_success() {
// mock 父子菜单
MenuDO sonMenu = createParentAndSonMenu();
// 准备参数
Long parentId = sonMenu.getParentId();
Long otherSonMenuId = randomLongId();
String otherSonMenuName = randomString();
// 调用,无需断言
menuService.validateMenu(parentId, otherSonMenuName, otherSonMenuId);
}
|
@Override
public SelType call(String methodName, SelType[] args) {
if (args.length == 1) {
if ("withZone".equals(methodName)) {
return new SelJodaDateTimeFormatter(
val.withZone(((SelJodaDateTimeZone) args[0]).getInternalVal()));
} else if ("parseDateTime".equals(methodName)) {
switch (args[0].type()) {
case STRING:
case LONG:
return SelJodaDateTime.of(val.parseDateTime(args[0].toString()));
}
} else if ("parseMillis".equals(methodName)) {
return SelLong.of(val.parseMillis(((SelString) args[0]).getInternalVal()));
} else if ("forPattern".equals(methodName)) {
return new SelJodaDateTimeFormatter(
DateTimeFormat.forPattern(((SelString) args[0]).getInternalVal()));
} else if ("print".equals(methodName)) {
switch (args[0].type()) {
case LONG:
return SelString.of(val.print(((SelLong) args[0]).longVal()));
case DATETIME:
return SelString.of(val.print(((SelJodaDateTime) args[0]).getInternalVal()));
}
}
}
throw new UnsupportedOperationException(
type()
+ " DO NOT support calling method: "
+ methodName
+ " with args: "
+ Arrays.toString(args));
}
|
@Test(expected = UnsupportedOperationException.class)
public void testInvalidCallMethod() {
one.call("invalid", new SelType[] {});
}
|
public CreateTableBuilder addPkColumn(ColumnDef columnDef, ColumnFlag... flags) {
pkColumnDefs.add(requireNonNull(columnDef, "column def can't be null"));
addFlags(columnDef, flags);
return this;
}
|
@Test
public void addPkColumn_throws_NPE_if_ColumnDef_is_null() {
assertThatThrownBy(() -> underTest.addPkColumn(null))
.isInstanceOf(NullPointerException.class)
.hasMessageContaining("column def can't be null");
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.