focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public FEELFnResult<Boolean> invoke(@ParameterName( "range" ) Range range, @ParameterName( "point" ) Comparable point) {
if ( point == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null"));
}
if ( range == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null"));
}
try {
boolean result = ( range.getLowBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getLowEndPoint() ) == 0 );
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range"));
}
}
|
@Test
void invokeParamsCantBeCompared() {
FunctionTestUtil.assertResultError( startedByFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, 1, 2, Range.RangeBoundary.CLOSED ) ), InvalidParametersEvent.class );
}
|
protected static List<UnidirectionalEvent> parseUnidirectionalEventTemplateOutput(String content) throws Exception {
List<UnidirectionalEvent> results = new ArrayList<>();
JsonNode root = YAML_MAPPER.readTree(sanitizeYamlContent(content));
if (root.getNodeType() == JsonNodeType.ARRAY) {
Iterator<JsonNode> examples = root.elements();
while (examples.hasNext()) {
JsonNode example = examples.next();
// Deal with parsing message.
JsonNode message = example.path("message");
EventMessage event = new EventMessage();
JsonNode headersNode = message.path(HEADERS_NODE);
event.setHeaders(buildHeaders(headersNode));
event.setMediaType("application/json");
event.setContent(getMessageContent("application/json", message.path("payload")));
results.add(new UnidirectionalEvent(event));
}
}
return results;
}
|
@Test
void testParseEventMessageOutputYaml() {
String aiResponse = """
- example: 1
message:
headers:
my-app-header: 42
payload:
id: "12345"
sendAt: "2022-01-01T10:00:00Z"
fullName: "John Doe"
email: "[email protected]"
age: 25
- example: 2
message:
headers:
my-app-header: 75
payload:
id: "98765"
sendAt: "2022-01-02T14:30:00Z"
fullName: "Jane Smith"
email: "[email protected]"
age: 28
""";
Service service = new Service();
service.setType(ServiceType.EVENT);
Operation operation = new Operation();
operation.setName("SUBSCRIBE user/signedup");
List<UnidirectionalEvent> results = null;
try {
results = AICopilotHelper.parseUnidirectionalEventTemplateOutput(aiResponse);
} catch (Exception e) {
fail("Exception should not be thrown here");
}
assertNotNull(results);
assertEquals(2, results.size());
// Check that message 1 has been correctly parsed.
EventMessage event1 = results.get(0).getEventMessage();
assertEquals(1, event1.getHeaders().size());
assertNotNull(event1.getContent());
}
|
@Override
public void destroy() {
throw new UnsupportedOperationException();
}
|
@Test(expected = UnsupportedOperationException.class)
public void testDestroy() {
context.destroy();
}
|
public WithJsonPath(JsonPath jsonPath, Matcher<T> resultMatcher) {
this.jsonPath = jsonPath;
this.resultMatcher = resultMatcher;
}
|
@Test
public void shouldMatchExistingStringJsonPath() {
assertThat(BOOKS_JSON, withJsonPath("$.expensive"));
assertThat(BOOKS_JSON, withJsonPath("$.store.bicycle"));
assertThat(BOOKS_JSON, withJsonPath("$.store.book[2].title"));
assertThat(BOOKS_JSON, withJsonPath("$.store.book[*].author"));
}
|
public static Map<String, Object> toMap(final Object object) {
try {
String json = MAPPER.writeValueAsString(object);
final MapType mapType = MAPPER.getTypeFactory().constructMapType(LinkedHashMap.class, String.class, Object.class);
return MAPPER.readValue(json, mapType);
} catch (IOException e) {
LOG.warn("write to map error: " + object, e);
return new LinkedHashMap<>();
}
}
|
@Test
public void testToMap() {
TestObject testObject = JsonUtils.jsonToObject(EXPECTED_JSON, TestObject.class);
Map<String, Object> testObjectMap = JsonUtils.toMap(testObject);
assertNotNull(testObjectMap);
assertEquals(testObjectMap.get("name"), "test object");
}
|
public void assignStates() {
checkStateMappingCompleteness(allowNonRestoredState, operatorStates, tasks);
Map<OperatorID, OperatorState> localOperators = new HashMap<>(operatorStates);
// find the states of all operators belonging to this task and compute additional
// information in first pass
for (ExecutionJobVertex executionJobVertex : tasks) {
List<OperatorIDPair> operatorIDPairs = executionJobVertex.getOperatorIDs();
Map<OperatorID, OperatorState> operatorStates =
CollectionUtil.newHashMapWithExpectedSize(operatorIDPairs.size());
for (OperatorIDPair operatorIDPair : operatorIDPairs) {
OperatorID operatorID =
operatorIDPair
.getUserDefinedOperatorID()
.filter(localOperators::containsKey)
.orElse(operatorIDPair.getGeneratedOperatorID());
OperatorState operatorState = localOperators.remove(operatorID);
if (operatorState == null) {
operatorState =
new OperatorState(
operatorID,
executionJobVertex.getParallelism(),
executionJobVertex.getMaxParallelism());
}
operatorStates.put(operatorIDPair.getGeneratedOperatorID(), operatorState);
}
final TaskStateAssignment stateAssignment =
new TaskStateAssignment(
executionJobVertex,
operatorStates,
consumerAssignment,
vertexAssignments);
vertexAssignments.put(executionJobVertex, stateAssignment);
for (final IntermediateResult producedDataSet : executionJobVertex.getInputs()) {
consumerAssignment.put(producedDataSet.getId(), stateAssignment);
}
}
// repartition state
for (TaskStateAssignment stateAssignment : vertexAssignments.values()) {
if (stateAssignment.hasNonFinishedState
// FLINK-31963: We need to run repartitioning for stateless operators that have
// upstream output or downstream input states.
|| stateAssignment.hasUpstreamOutputStates()
|| stateAssignment.hasDownstreamInputStates()) {
assignAttemptState(stateAssignment);
}
}
// actually assign the state
for (TaskStateAssignment stateAssignment : vertexAssignments.values()) {
// If upstream has output states or downstream has input states, even the empty task
// state should be assigned for the current task in order to notify this task that the
// old states will send to it which likely should be filtered.
if (stateAssignment.hasNonFinishedState
|| stateAssignment.isFullyFinished
|| stateAssignment.hasUpstreamOutputStates()
|| stateAssignment.hasDownstreamInputStates()) {
assignTaskStateToExecutionJobVertices(stateAssignment);
}
}
}
|
@Test
void testChannelStateAssignmentUpscaling() throws JobException, JobExecutionException {
List<OperatorID> operatorIds = buildOperatorIds(2);
Map<OperatorID, OperatorState> states = buildOperatorStates(operatorIds, 2);
Map<OperatorID, ExecutionJobVertex> vertices =
buildVertices(operatorIds, 3, RANGE, ROUND_ROBIN);
new StateAssignmentOperation(0, new HashSet<>(vertices.values()), states, false)
.assignStates();
for (OperatorID operatorId : operatorIds) {
// input is range partitioned, so there is an overlap
assertState(
vertices, operatorId, states, 0, OperatorSubtaskState::getInputChannelState, 0);
assertState(
vertices,
operatorId,
states,
1,
OperatorSubtaskState::getInputChannelState,
0,
1);
assertState(
vertices, operatorId, states, 2, OperatorSubtaskState::getInputChannelState, 1);
// output is round robin redistributed
assertState(
vertices,
operatorId,
states,
0,
OperatorSubtaskState::getResultSubpartitionState,
0);
assertState(
vertices,
operatorId,
states,
1,
OperatorSubtaskState::getResultSubpartitionState,
1);
assertState(
vertices,
operatorId,
states,
2,
OperatorSubtaskState::getResultSubpartitionState);
}
assertThat(
getAssignedState(vertices.get(operatorIds.get(0)), operatorIds.get(0), 0)
.getOutputRescalingDescriptor())
.isEqualTo(
rescalingDescriptor(to(0), array(mappings(to(0), to(0, 1), to(1))), set()));
assertThat(
getAssignedState(vertices.get(operatorIds.get(0)), operatorIds.get(0), 1)
.getOutputRescalingDescriptor())
.isEqualTo(
rescalingDescriptor(to(1), array(mappings(to(0), to(0, 1), to(1))), set()));
// unmapped subtask index, so nothing to do
assertThat(
getAssignedState(vertices.get(operatorIds.get(0)), operatorIds.get(0), 2)
.getOutputRescalingDescriptor())
.isEqualTo(InflightDataRescalingDescriptor.NO_RESCALE);
assertThat(
getAssignedState(vertices.get(operatorIds.get(1)), operatorIds.get(1), 0)
.getInputRescalingDescriptor())
.isEqualTo(
rescalingDescriptor(to(0), array(mappings(to(0), to(1), to())), set(0, 1)));
assertThat(
getAssignedState(vertices.get(operatorIds.get(1)), operatorIds.get(1), 1)
.getInputRescalingDescriptor())
.isEqualTo(
rescalingDescriptor(
to(0, 1), array(mappings(to(0), to(1), to())), set(0, 1)));
assertThat(
getAssignedState(vertices.get(operatorIds.get(1)), operatorIds.get(1), 2)
.getInputRescalingDescriptor())
.isEqualTo(
rescalingDescriptor(to(1), array(mappings(to(0), to(1), to())), set(0, 1)));
}
|
public static String createToken(Map<String, Object> payload, byte[] key) {
return createToken(null, payload, key);
}
|
@Test
public void createTest(){
byte[] key = "1234".getBytes();
Map<String, Object> map = new HashMap<String, Object>() {
private static final long serialVersionUID = 1L;
{
put("uid", Integer.parseInt("123"));
put("expire_time", System.currentTimeMillis() + 1000 * 60 * 60 * 24 * 15);
}
};
JWTUtil.createToken(map, key);
}
|
static URL[] saveFilesLocally(String driverJars) {
List<String> listOfJarPaths = Splitter.on(',').trimResults().splitToList(driverJars);
final String destRoot = Files.createTempDir().getAbsolutePath();
List<URL> driverJarUrls = new ArrayList<>();
listOfJarPaths.stream()
.forEach(
jarPath -> {
try {
ResourceId sourceResourceId = FileSystems.matchNewResource(jarPath, false);
@SuppressWarnings("nullness")
File destFile = Paths.get(destRoot, sourceResourceId.getFilename()).toFile();
ResourceId destResourceId =
FileSystems.matchNewResource(destFile.getAbsolutePath(), false);
copy(sourceResourceId, destResourceId);
LOG.info("Localized jar: " + sourceResourceId + " to: " + destResourceId);
driverJarUrls.add(destFile.toURI().toURL());
} catch (IOException e) {
LOG.warn("Unable to copy " + jarPath, e);
}
});
return driverJarUrls.stream().toArray(URL[]::new);
}
|
@Test
public void testSavesFilesAsExpected() throws IOException {
File tempFile1 = temporaryFolder.newFile();
File tempFile2 = temporaryFolder.newFile();
String expectedContent1 = "hello world";
String expectedContent2 = "hello world 2";
Files.write(tempFile1.toPath(), expectedContent1.getBytes(StandardCharsets.UTF_8));
Files.write(tempFile2.toPath(), expectedContent2.getBytes(StandardCharsets.UTF_8));
URL[] urls =
JdbcUtil.saveFilesLocally(tempFile1.getAbsolutePath() + "," + tempFile2.getAbsolutePath());
assertEquals(2, urls.length);
assertEquals(
expectedContent1,
new String(Files.readAllBytes(Paths.get(urls[0].getFile())), StandardCharsets.UTF_8));
assertEquals(
expectedContent2,
new String(Files.readAllBytes(Paths.get(urls[1].getFile())), StandardCharsets.UTF_8));
}
|
@Override
public void accept(final MeterEntity entity, final BucketedValues value) {
if (dataset.size() > 0) {
if (!value.isCompatible(dataset)) {
throw new IllegalArgumentException(
"Incompatible BucketedValues [" + value + "] for current HistogramFunction[" + dataset + "]");
}
}
this.entityId = entity.id();
final long[] values = value.getValues();
for (int i = 0; i < values.length; i++) {
final long bucket = value.getBuckets()[i];
String bucketName = bucket == Long.MIN_VALUE ? Bucket.INFINITE_NEGATIVE : String.valueOf(bucket);
final long bucketValue = values[i];
dataset.valueAccumulation(bucketName, bucketValue);
}
}
|
@Test
public void testFunction() {
HistogramFunctionInst inst = new HistogramFunctionInst();
inst.accept(
MeterEntity.newService("service-test", Layer.GENERAL),
new BucketedValues(
BUCKETS, new long[] {
0,
4,
10,
10
})
);
inst.accept(
MeterEntity.newService("service-test", Layer.GENERAL),
new BucketedValues(
BUCKETS, new long[] {
1,
2,
3,
4
})
);
final int[] results = inst.getDataset().sortedValues(new HeatMap.KeyComparator(true)).stream()
.flatMapToInt(l -> IntStream.of(l.intValue()))
.toArray();
Assertions.assertArrayEquals(new int[] {
1,
6,
13,
14
}, results);
}
|
@VisibleForTesting
List<String> getFuseInfo() {
return mFuseInfo;
}
|
@Test
public void localKernelDataCacheDisabled() {
Assume.assumeTrue(Configuration.getInt(PropertyKey.FUSE_JNIFUSE_LIBFUSE_VERSION) == 2);
try (FuseUpdateChecker checker = getUpdateCheckerWithMountOptions("direct_io")) {
Assert.assertFalse(containsTargetInfo(checker.getFuseInfo(),
FuseUpdateChecker.LOCAL_KERNEL_DATA_CACHE));
}
}
|
public static boolean unblock(
final UnsafeBuffer[] termBuffers,
final UnsafeBuffer logMetaDataBuffer,
final long blockedPosition,
final int termLength)
{
final int positionBitsToShift = LogBufferDescriptor.positionBitsToShift(termLength);
final int blockedTermCount = (int)(blockedPosition >> positionBitsToShift);
final int blockedOffset = (int)blockedPosition & (termLength - 1);
final int activeTermCount = activeTermCount(logMetaDataBuffer);
if (activeTermCount == (blockedTermCount - 1) && blockedOffset == 0)
{
final int currentTermId = termId(rawTailVolatile(logMetaDataBuffer, indexByTermCount(activeTermCount)));
rotateLog(logMetaDataBuffer, activeTermCount, currentTermId);
return true;
}
final int blockedIndex = indexByTermCount(blockedTermCount);
final long rawTail = rawTailVolatile(logMetaDataBuffer, blockedIndex);
final int termId = termId(rawTail);
final int tailOffset = termOffset(rawTail, termLength);
final UnsafeBuffer termBuffer = termBuffers[blockedIndex];
switch (TermUnblocker.unblock(logMetaDataBuffer, termBuffer, blockedOffset, tailOffset, termId))
{
case NO_ACTION:
break;
case UNBLOCKED_TO_END:
rotateLog(logMetaDataBuffer, blockedTermCount, termId);
return true;
case UNBLOCKED:
return true;
}
return false;
}
|
@Test
void shouldNotUnblockWhenPositionHasCompleteMessage()
{
final int blockedOffset = HEADER_LENGTH * 4;
final long blockedPosition = computePosition(TERM_ID_1, blockedOffset, positionBitsToShift, TERM_ID_1);
final int activeIndex = indexByPosition(blockedPosition, positionBitsToShift);
when(termBuffers[activeIndex].getIntVolatile(blockedOffset)).thenReturn(HEADER_LENGTH);
assertFalse(LogBufferUnblocker.unblock(termBuffers, logMetaDataBuffer, blockedPosition, TERM_LENGTH));
final long rawTail = rawTailVolatile(logMetaDataBuffer);
assertEquals(blockedPosition, computePosition(termId(rawTail), blockedOffset, positionBitsToShift, TERM_ID_1));
}
|
public static BinaryTag readTag(final @NotNull ByteBuf buf) {
final byte id = buf.readByte();
if (id >= BINARY_TAG_TYPES.length) {
throw new DecoderException("Invalid binary tag id: " + id);
}
final BinaryTagType<? extends BinaryTag> type = BINARY_TAG_TYPES[id];
try {
return type.read(new ByteBufInputStream(buf));
} catch (final IOException e) {
throw new DecoderException(e);
}
}
|
@Test
void testReadInvalidTagId() {
this.buf.writeByte(BinaryTagTypes.LONG_ARRAY.id() + 1);
assertThrows(DecoderException.class, () -> BufUtil.readTag(this.buf));
}
|
@Override
public String buildContext() {
final String selector = ((Collection<?>) getSource())
.stream()
.map(s -> ((SelectorDO) s).getName())
.collect(Collectors.joining(","));
return String.format("the selector[%s] is %s", selector, StringUtils.lowerCase(getType().getType().toString()));
}
|
@Test
void buildContext() {
String expectMsg = String.format("the selector[%s] is %s", selectorDO.getName(), StringUtils.lowerCase(batchSelectorDeletedEvent.getType().getType().toString()));
String actualMsg = batchSelectorDeletedEvent.buildContext();
assertEquals(expectMsg, actualMsg);
}
|
public void processPriorCommands(final PersistentQueryCleanupImpl queryCleanup) {
try {
final List<QueuedCommand> restoreCommands = commandStore.getRestoreCommands();
final List<QueuedCommand> compatibleCommands = checkForIncompatibleCommands(restoreCommands);
LOG.info("Restoring previous state from {} commands.", compatibleCommands.size());
final Optional<QueuedCommand> terminateCmd =
findTerminateCommand(compatibleCommands, commandDeserializer);
if (terminateCmd.isPresent()) {
LOG.info("Cluster previously terminated: terminating.");
terminateCluster(terminateCmd.get().getAndDeserializeCommand(commandDeserializer));
return;
}
final List<QueuedCommand> compacted = compactor.apply(compatibleCommands);
compacted.forEach(
command -> {
currentCommandRef.set(new Pair<>(command, clock.instant()));
RetryUtil.retryWithBackoff(
maxRetries,
STATEMENT_RETRY_MS,
MAX_STATEMENT_RETRY_MS,
() -> statementExecutor.handleRestore(command),
WakeupException.class
);
currentCommandRef.set(null);
}
);
final List<PersistentQueryMetadata> queries = statementExecutor
.getKsqlEngine()
.getPersistentQueries();
if (commandStore.corruptionDetected()) {
LOG.info("Corruption detected, queries will not be started.");
queries.forEach(QueryMetadata::setCorruptionQueryError);
} else {
LOG.info("Restarting {} queries.", queries.size());
queries.forEach(PersistentQueryMetadata::start);
queryCleanup.cleanupLeakedQueries(queries);
//We only want to clean up if the queries are read properly
//We do not want to clean up potentially important stuff
//when the cluster is in a bad state
}
LOG.info("Restore complete");
} catch (final Exception e) {
LOG.error("Error during restore", e);
throw e;
}
}
|
@Test
public void shouldNotCleanUpInDegradedMode() {
// Given:
when(commandStore.corruptionDetected()).thenReturn(true);
givenQueuedCommands(queuedCommand1, queuedCommand2, queuedCommand3);
when(ksqlEngine.getPersistentQueries()).thenReturn(ImmutableList.of(queryMetadata1, queryMetadata2, queryMetadata3));
// When:
commandRunner.processPriorCommands(persistentQueryCleanupImpl);
// Then:
final InOrder inOrder = inOrder(statementExecutor);
inOrder.verify(statementExecutor).handleRestore(eq(queuedCommand1));
inOrder.verify(statementExecutor).handleRestore(eq(queuedCommand2));
inOrder.verify(statementExecutor).handleRestore(eq(queuedCommand3));
verify(persistentQueryCleanupImpl, never()).cleanupLeakedQueries(any());
}
|
public List<AclInfo> listAcl(String addr, String subjectFilter, String resourceFilter, long millis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException {
ListAclsRequestHeader requestHeader = new ListAclsRequestHeader(subjectFilter, resourceFilter);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.AUTH_LIST_ACL, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(addr, request, millis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return RemotingSerializable.decodeList(response.getBody(), AclInfo.class);
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark());
}
|
@Test
public void assertListAcl() throws RemotingException, InterruptedException, MQBrokerException {
mockInvokeSync();
setResponseBody(Collections.singletonList(createAclInfo()));
List<AclInfo> actual = mqClientAPI.listAcl(defaultBrokerAddr, "", "", defaultTimeout);
assertNotNull(actual);
assertEquals("subject", actual.get(0).getSubject());
assertEquals(1, actual.get(0).getPolicies().size());
}
|
@Override
protected Byte parseSerializeType(String serialization) {
Byte serializeType;
if (SERIALIZE_HESSIAN.equals(serialization)
|| SERIALIZE_HESSIAN2.equals(serialization)) {
serializeType = RemotingConstants.SERIALIZE_CODE_HESSIAN;
} else if (SERIALIZE_PROTOBUF.equals(serialization)) {
serializeType = RemotingConstants.SERIALIZE_CODE_PROTOBUF;
} else if (SERIALIZE_JAVA.equals(serialization)) {
serializeType = RemotingConstants.SERIALIZE_CODE_JAVA;
} else {
serializeType = super.parseSerializeType(serialization);
}
return serializeType;
}
|
@Test(expected = SofaRpcRuntimeException.class)
public void testParseSerializeTypeException() {
ConsumerConfig consumerConfig = new ConsumerConfig().setProtocol("bolt");
ConsumerBootstrap bootstrap = Bootstraps.from(consumerConfig);
BoltClientProxyInvoker invoker = new BoltClientProxyInvoker(bootstrap);
invoker.parseSerializeType("unknown");
}
|
public CompletableFuture<Optional<Account>> getByPhoneNumberIdentifierAsync(final UUID pni) {
return checkRedisThenAccountsAsync(
getByNumberTimer,
() -> redisGetBySecondaryKeyAsync(getAccountMapKey(pni.toString()), redisPniGetTimer),
() -> accounts.getByPhoneNumberIdentifierAsync(pni)
);
}
|
@Test
void testGetAccountByPniBrokenCacheAsync() {
UUID uuid = UUID.randomUUID();
UUID pni = UUID.randomUUID();
Account account = AccountsHelper.generateTestAccount("+14152222222", uuid, pni, new ArrayList<>(), new byte[UnidentifiedAccessUtil.UNIDENTIFIED_ACCESS_KEY_LENGTH]);
when(asyncCommands.get(eq("AccountMap::" + pni)))
.thenReturn(MockRedisFuture.failedFuture(new RedisException("OH NO")));
when(asyncCommands.setex(any(), anyLong(), any())).thenReturn(MockRedisFuture.completedFuture("OK"));
when(accounts.getByPhoneNumberIdentifierAsync(pni))
.thenReturn(CompletableFuture.completedFuture(Optional.of(account)));
Optional<Account> retrieved = accountsManager.getByPhoneNumberIdentifierAsync(pni).join();
assertTrue(retrieved.isPresent());
assertSame(retrieved.get(), account);
verify(asyncCommands).get(eq("AccountMap::" + pni));
verify(asyncCommands).setex(eq("AccountMap::" + pni), anyLong(), eq(uuid.toString()));
verify(asyncCommands).setex(eq("Account3::" + uuid), anyLong(), anyString());
verifyNoMoreInteractions(asyncCommands);
verify(accounts).getByPhoneNumberIdentifierAsync(pni);
verifyNoMoreInteractions(accounts);
}
|
@Override
public int getTotalNumberOfRecords(Configuration conf) throws HiveJdbcDatabaseAccessException {
Connection conn = null;
PreparedStatement ps = null;
ResultSet rs = null;
try {
initializeDatabaseConnection(conf);
String tableName = getQualifiedTableName(conf);
// Always use JDBC_QUERY if available both for correctness and performance. JDBC_QUERY can be set by the user
// or the CBO including pushdown optimizations. SELECT all query should be used only when JDBC_QUERY is null.
String sql = firstNonNull(conf.get(Constants.JDBC_QUERY), selectAllFromTable(tableName));
String countQuery = "SELECT COUNT(*) FROM (" + sql + ") tmptable";
LOGGER.info("Query to execute is [{}]", countQuery);
conn = dbcpDataSource.getConnection();
ps = conn.prepareStatement(countQuery);
rs = ps.executeQuery();
if (rs.next()) {
return rs.getInt(1);
}
else {
LOGGER.warn("The count query {} did not return any results.", countQuery);
throw new HiveJdbcDatabaseAccessException("Count query did not return any results.");
}
}
catch (HiveJdbcDatabaseAccessException he) {
throw he;
}
catch (Exception e) {
LOGGER.error("Caught exception while trying to get the number of records: " + e.getMessage(), e);
throw new HiveJdbcDatabaseAccessException(e);
}
finally {
cleanupResources(conn, ps, rs);
}
}
|
@Test(expected = HiveJdbcDatabaseAccessException.class)
public void testGetTotalNumberOfRecords_invalidQuery() throws HiveJdbcDatabaseAccessException {
Configuration conf = buildConfiguration();
conf.set(JdbcStorageConfig.QUERY.getPropertyName(), "select * from strategyx where strategy_id = '5'");
DatabaseAccessor accessor = DatabaseAccessorFactory.getAccessor(conf);
@SuppressWarnings("unused")
int numRecords = accessor.getTotalNumberOfRecords(conf);
}
|
@Override
public String getDescription() {
return "Webhooks";
}
|
@Test
public void has_description() {
assertThat(underTest.getDescription()).isNotEmpty();
}
|
public void shutdown() {
// Now that segments can't report metric, destroy metric for this table
_scheduledExecutor.shutdown(); // ScheduledExecutor is installed in constructor so must always be cancelled
if (!_isServerReadyToServeQueries.get()) {
// Do not update the tracker state during server startup period
return;
}
// Remove partitions so their related metrics get uninstalled.
for (Integer partitionId : _ingestionInfoMap.keySet()) {
removePartitionId(partitionId);
}
}
|
@Test
public void testShutdown() {
final long maxTestDelay = 100;
IngestionDelayTracker ingestionDelayTracker = createTracker();
// Use fixed clock so samples don't age
Instant now = Instant.now();
ZoneId zoneId = ZoneId.systemDefault();
Clock clock = Clock.fixed(now, zoneId);
ingestionDelayTracker.setClock(clock);
// Test Shutdown with partitions active
for (int partitionId = 0; partitionId <= maxTestDelay; partitionId++) {
String segmentName = new LLCSegmentName(RAW_TABLE_NAME, partitionId, 0, 123).getSegmentName();
long ingestionTimeMs = clock.millis() - partitionId;
ingestionDelayTracker.updateIngestionMetrics(segmentName, partitionId, ingestionTimeMs, ingestionTimeMs, null,
null);
Assert.assertEquals(ingestionDelayTracker.getPartitionIngestionDelayMs(partitionId), partitionId);
Assert.assertEquals(ingestionDelayTracker.getPartitionEndToEndIngestionDelayMs(partitionId), partitionId);
}
ingestionDelayTracker.shutdown();
// Test shutdown with no partitions
ingestionDelayTracker = createTracker();
ingestionDelayTracker.shutdown();
}
|
@Override
public int getOrder() {
return PluginEnum.GLOBAL.getCode();
}
|
@Test
public void testGetOrder() {
assertEquals(-1, globalPlugin.getOrder());
}
|
public static <T> Either<String, T> resolveImportDMN(Import importElement, Collection<T> dmns, Function<T, QName> idExtractor) {
final String importerDMNNamespace = ((Definitions) importElement.getParent()).getNamespace();
final String importerDMNName = ((Definitions) importElement.getParent()).getName();
final String importNamespace = importElement.getNamespace();
final String importName = importElement.getName();
final String importLocationURI = importElement.getLocationURI(); // This is optional
final String importModelName = importElement.getAdditionalAttributes().get(TImport.MODELNAME_QNAME);
LOGGER.debug("Resolving an Import in DMN Model with name={} and namespace={}. " +
"Importing a DMN model with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
List<T> matchingDMNList = dmns.stream()
.filter(m -> idExtractor.apply(m).getNamespaceURI().equals(importNamespace))
.toList();
if (matchingDMNList.size() == 1) {
T located = matchingDMNList.get(0);
// Check if the located DMN Model in the NS, correspond for the import `drools:modelName`.
if (importModelName == null || idExtractor.apply(located).getLocalPart().equals(importModelName)) {
LOGGER.debug("DMN Model with name={} and namespace={} successfully imported a DMN " +
"with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
return Either.ofRight(located);
} else {
LOGGER.error("DMN Model with name={} and namespace={} can't import a DMN with namespace={}, name={}, modelName={}, " +
"located within namespace only {} but does not match for the actual modelName",
importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, idExtractor.apply(located));
return Either.ofLeft(String.format(
"DMN Model with name=%s and namespace=%s can't import a DMN with namespace=%s, name=%s, modelName=%s, " +
"located within namespace only %s but does not match for the actual modelName",
importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, idExtractor.apply(located)));
}
} else {
List<T> usingNSandName = matchingDMNList.stream()
.filter(dmn -> idExtractor.apply(dmn).getLocalPart().equals(importModelName))
.toList();
if (usingNSandName.size() == 1) {
LOGGER.debug("DMN Model with name={} and namespace={} successfully imported a DMN " +
"with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
return Either.ofRight(usingNSandName.get(0));
} else if (usingNSandName.isEmpty()) {
LOGGER.error("DMN Model with name={} and namespace={} failed to import a DMN with namespace={} name={} locationURI={}, modelName={}.",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName);
return Either.ofLeft(String.format(
"DMN Model with name=%s and namespace=%s failed to import a DMN with namespace=%s name=%s locationURI=%s, modelName=%s. ",
importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName));
} else {
LOGGER.error("DMN Model with name={} and namespace={} detected a collision ({} elements) trying to import a DMN with namespace={} name={} locationURI={}, modelName={}",
importerDMNName, importerDMNNamespace, usingNSandName.size(), importNamespace, importName, importLocationURI, importModelName);
return Either.ofLeft(String.format(
"DMN Model with name=%s and namespace=%s detected a collision trying to import a DMN with %s namespace, " +
"%s name and modelName %s. There are %s DMN files with the same namespace in your project. " +
"Please change the DMN namespaces and make them unique to fix this issue.",
importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, usingNSandName.size()));
}
}
}
|
@Test
void locateInNSunexistent() {
final Import i = makeImport("nsA", null, "boh");
final List<QName> available = Arrays.asList(new QName("nsA", "m1"),
new QName("nsA", "m2"),
new QName("nsB", "m3"));
final Either<String, QName> result = ImportDMNResolverUtil.resolveImportDMN(i, available, Function.identity());
assertThat(result.isLeft()).isTrue();
}
|
@Override
public WhitelistedSite saveNew(WhitelistedSite whitelistedSite) {
if (whitelistedSite.getId() != null) {
throw new IllegalArgumentException("A new whitelisted site cannot be created with an id value already set: " + whitelistedSite.getId());
}
return repository.save(whitelistedSite);
}
|
@Test
public void saveNew_success() {
WhitelistedSite site = Mockito.mock(WhitelistedSite.class);
Mockito.when(site.getId()).thenReturn(null);
service.saveNew(site);
Mockito.verify(repository).save(site);
}
|
@Override
public void build(DefaultGoPublisher publisher, EnvironmentVariableContext environmentVariableContext, TaskExtension taskExtension, ArtifactExtension artifactExtension, PluginRequestProcessorRegistry pluginRequestProcessorRegistry, Charset consoleLogCharset) {
downloadMetadataFile(publisher);
try {
pluginRequestProcessorRegistry.registerProcessorFor(CONSOLE_LOG.requestName(), ArtifactRequestProcessor.forFetchArtifact(publisher, environmentVariableContext));
final String message = format("[%s] Fetching pluggable artifact using plugin `%s`.", GoConstants.PRODUCT_NAME, artifactStore.getPluginId());
LOGGER.info(message);
publisher.taggedConsumeLine(TaggedStreamConsumer.OUT, message);
List<FetchArtifactEnvironmentVariable> newEnvironmentVariables = artifactExtension.fetchArtifact(
artifactStore.getPluginId(), artifactStore, configuration, getMetadataFromFile(artifactId), agentWorkingDirectory());
updateEnvironmentVariableContextWith(publisher, environmentVariableContext, newEnvironmentVariables);
} catch (Exception e) {
publisher.taggedConsumeLine(TaggedStreamConsumer.ERR, e.getMessage());
LOGGER.error(e.getMessage(), e);
throw new RuntimeException(e);
} finally {
pluginRequestProcessorRegistry.removeProcessorFor(CONSOLE_LOG.requestName());
}
}
|
@Test
public void shouldUpdateEnvironmentVariableContextAfterFetchingArtifact() {
final FetchPluggableArtifactBuilder builder = new FetchPluggableArtifactBuilder(new RunIfConfigs(), new NullBuilder(), "", jobIdentifier, artifactStore, fetchPluggableArtifactTask.getConfiguration(), fetchPluggableArtifactTask.getArtifactId(), sourceOnServer, metadataDest.toFile(), checksumFileHandler);
EnvironmentVariableContext environmentVariableContext = new EnvironmentVariableContext();
environmentVariableContext.setProperty("VAR1", "old-value1", false);
environmentVariableContext.setProperty("VAR2", "old-value2", true);
environmentVariableContext.setProperty("VAR3", "old-value3", true);
environmentVariableContext.setProperty("VAR4", "old-value4", true);
when(artifactExtension.fetchArtifact(eq(PLUGIN_ID), eq(artifactStore), any(), anyMap(), eq(metadataDest.getParent().toString())))
.thenReturn(List.of(
new FetchArtifactEnvironmentVariable("VAR1", "value1-is-now-secure", true),
new FetchArtifactEnvironmentVariable("VAR2", "value2-is-now-insecure", false),
new FetchArtifactEnvironmentVariable("VAR3", "value3-but-secure-is-unchanged", true),
new FetchArtifactEnvironmentVariable("VAR5", "new-value5-insecure", false),
new FetchArtifactEnvironmentVariable("VAR6", "new-value6-secure", true)
));
builder.build(publisher, environmentVariableContext, null, artifactExtension, registry, UTF_8);
Map<String, String> newVariablesAfterFetchArtifact = environmentVariableContext.getProperties();
assertThat(newVariablesAfterFetchArtifact.size(), is(6));
assertVariable(environmentVariableContext, "VAR1", "value1-is-now-secure", true);
assertVariable(environmentVariableContext, "VAR2", "value2-is-now-insecure", false);
assertVariable(environmentVariableContext, "VAR3", "value3-but-secure-is-unchanged", true);
assertVariable(environmentVariableContext, "VAR4", "old-value4", true);
assertVariable(environmentVariableContext, "VAR5", "new-value5-insecure", false);
assertVariable(environmentVariableContext, "VAR6", "new-value6-secure", true);
ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
verify(publisher, atLeastOnce()).taggedConsumeLine(eq(OUT), captor.capture());
assertThat(captor.getAllValues(), hasItems(
"WARNING: Replacing environment variable: VAR1 = ******** (previously: old-value1)",
"WARNING: Replacing environment variable: VAR2 = value2-is-now-insecure (previously: ********)",
"WARNING: Replacing environment variable: VAR3 = ******** (previously: ********)",
" NOTE: Setting new environment variable: VAR5 = new-value5-insecure",
" NOTE: Setting new environment variable: VAR6 = ********"));
String consoleOutput = String.join(" -- ", captor.getAllValues());
assertThat(consoleOutput, not(containsString("value1-is-now-secure")));
assertThat(consoleOutput, not(containsString("value3-but-secure-is-unchanged")));
assertThat(consoleOutput, not(containsString("new-value6-secure")));
}
|
public static Map<String, ShardingSphereSchema> build(final String databaseName, final DatabaseType databaseType, final ConfigurationProperties props) {
SystemDatabase systemDatabase = new SystemDatabase(databaseType);
Map<String, ShardingSphereSchema> result = new LinkedHashMap<>(systemDatabase.getSystemSchemas().size(), 1F);
boolean isSystemSchemaMetaDataEnabled = isSystemSchemaMetaDataEnabled(props.getProps());
YamlTableSwapper swapper = new YamlTableSwapper();
for (String each : getSystemSchemas(databaseName, databaseType, systemDatabase)) {
result.put(each.toLowerCase(), createSchema(each, SystemSchemaManager.getAllInputStreams(databaseType.getType(), each), swapper, isSystemSchemaMetaDataEnabled));
}
return result;
}
|
@Test
void assertBuildForMySQL() {
DatabaseType databaseType = TypedSPILoader.getService(DatabaseType.class, "MySQL");
ConfigurationProperties configProps = new ConfigurationProperties(new Properties());
Map<String, ShardingSphereSchema> actualInformationSchema = SystemSchemaBuilder.build("information_schema", databaseType, configProps);
assertThat(actualInformationSchema.size(), is(1));
assertTrue(actualInformationSchema.containsKey("information_schema"));
assertThat(actualInformationSchema.get("information_schema").getTables().size(), is(95));
Map<String, ShardingSphereSchema> actualMySQLSchema = SystemSchemaBuilder.build("mysql", databaseType, configProps);
assertThat(actualMySQLSchema.size(), is(1));
assertTrue(actualMySQLSchema.containsKey("mysql"));
assertThat(actualMySQLSchema.get("mysql").getTables().size(), is(40));
Map<String, ShardingSphereSchema> actualPerformanceSchema = SystemSchemaBuilder.build("performance_schema", databaseType, configProps);
assertThat(actualPerformanceSchema.size(), is(1));
assertTrue(actualPerformanceSchema.containsKey("performance_schema"));
assertThat(actualPerformanceSchema.get("performance_schema").getTables().size(), is(114));
Map<String, ShardingSphereSchema> actualSysSchema = SystemSchemaBuilder.build("sys", databaseType, configProps);
assertThat(actualSysSchema.size(), is(1));
assertTrue(actualSysSchema.containsKey("sys"));
assertThat(actualSysSchema.get("sys").getTables().size(), is(53));
}
|
public void onFragment(final DirectBuffer buffer, final int offset, final int length, final Header header)
{
final byte flags = header.flags();
if ((flags & UNFRAGMENTED) == UNFRAGMENTED)
{
delegate.onFragment(buffer, offset, length, header);
}
else
{
handleFragment(buffer, offset, length, header, flags);
}
}
|
@Test
void shouldDoNotingIfEndArrivesWithoutBegin()
{
when(header.flags()).thenReturn(FrameDescriptor.END_FRAG_FLAG);
final UnsafeBuffer srcBuffer = new UnsafeBuffer(new byte[1024]);
final int offset = 0;
final int length = srcBuffer.capacity() / 2;
assembler.onFragment(srcBuffer, offset, length, header);
verify(delegateFragmentHandler, never()).onFragment(any(), anyInt(), anyInt(), any());
}
|
@Override
public DataflowPipelineJob run(Pipeline pipeline) {
// Multi-language pipelines and pipelines that include upgrades should automatically be upgraded
// to Runner v2.
if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_runner_v2")) {
LOG.info(
"Automatically enabling Dataflow Runner v2 since the pipeline used cross-language"
+ " transforms or pipeline needed a transform upgrade.");
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build());
}
}
if (useUnifiedWorker(options)) {
if (hasExperiment(options, "disable_runner_v2")
|| hasExperiment(options, "disable_runner_v2_until_2023")
|| hasExperiment(options, "disable_prime_runner_v2")) {
throw new IllegalArgumentException(
"Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set.");
}
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("use_runner_v2")) {
experiments.add("use_runner_v2");
}
if (!experiments.contains("use_unified_worker")) {
experiments.add("use_unified_worker");
}
if (!experiments.contains("beam_fn_api")) {
experiments.add("beam_fn_api");
}
if (!experiments.contains("use_portable_job_submission")) {
experiments.add("use_portable_job_submission");
}
options.setExperiments(ImmutableList.copyOf(experiments));
}
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
logWarningIfBigqueryDLQUnused(pipeline);
if (shouldActAsStreaming(pipeline)) {
options.setStreaming(true);
if (useUnifiedWorker(options)) {
options.setEnableStreamingEngine(true);
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("enable_streaming_engine")) {
experiments.add("enable_streaming_engine");
}
if (!experiments.contains("enable_windmill_service")) {
experiments.add("enable_windmill_service");
}
}
}
if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) {
ProjectionPushdownOptimizer.optimize(pipeline);
}
LOG.info(
"Executing pipeline on the Dataflow Service, which will have billing implications "
+ "related to Google Compute Engine usage and other Google Cloud Services.");
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions);
// This incorrectly puns the worker harness container image (which implements v1beta3 API)
// with the SDK harness image (which implements Fn API).
//
// The same Environment is used in different and contradictory ways, depending on whether
// it is a v1 or v2 job submission.
RunnerApi.Environment defaultEnvironmentForDataflow =
Environments.createDockerEnvironment(workerHarnessContainerImageURL);
// The SdkComponents for portable an non-portable job submission must be kept distinct. Both
// need the default environment.
SdkComponents portableComponents = SdkComponents.create();
portableComponents.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
RunnerApi.Pipeline portablePipelineProto =
PipelineTranslation.toProto(pipeline, portableComponents, false);
// Note that `stageArtifacts` has to be called before `resolveArtifact` because
// `resolveArtifact` updates local paths to staged paths in pipeline proto.
portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto);
List<DataflowPackage> packages = stageArtifacts(portablePipelineProto);
portablePipelineProto = resolveArtifacts(portablePipelineProto);
portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Portable pipeline proto:\n{}",
TextFormat.printer().printToString(portablePipelineProto));
}
// Stage the portable pipeline proto, retrieving the staged pipeline path, then update
// the options on the new job
// TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options
LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation());
byte[] serializedProtoPipeline = portablePipelineProto.toByteArray();
DataflowPackage stagedPipeline =
options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME);
dataflowOptions.setPipelineUrl(stagedPipeline.getLocation());
if (useUnifiedWorker(options)) {
LOG.info("Skipping v1 transform replacements since job will run on v2.");
} else {
// Now rewrite things to be as needed for v1 (mutates the pipeline)
// This way the job submitted is valid for v1 and v2, simultaneously
replaceV1Transforms(pipeline);
}
// Capture the SdkComponents for look up during step translations
SdkComponents dataflowV1Components = SdkComponents.create();
dataflowV1Components.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
// No need to perform transform upgrading for the Runner v1 proto.
RunnerApi.Pipeline dataflowV1PipelineProto =
PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Dataflow v1 pipeline proto:\n{}",
TextFormat.printer().printToString(dataflowV1PipelineProto));
}
// Set a unique client_request_id in the CreateJob request.
// This is used to ensure idempotence of job creation across retried
// attempts to create a job. Specifically, if the service returns a job with
// a different client_request_id, it means the returned one is a different
// job previously created with the same job name, and that the job creation
// has been effectively rejected. The SDK should return
// Error::Already_Exists to user in that case.
int randomNum = new Random().nextInt(9000) + 1000;
String requestId =
DateTimeFormat.forPattern("YYYYMMddHHmmssmmm")
.withZone(DateTimeZone.UTC)
.print(DateTimeUtils.currentTimeMillis())
+ "_"
+ randomNum;
JobSpecification jobSpecification =
translator.translate(
pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages);
if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_staged_dataflow_worker_jar")) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("use_staged_dataflow_worker_jar")
.build());
}
}
Job newJob = jobSpecification.getJob();
try {
newJob
.getEnvironment()
.setSdkPipelineOptions(
MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class));
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
newJob.setClientRequestId(requestId);
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String version = dataflowRunnerInfo.getVersion();
checkState(
!"${pom.version}".equals(version),
"Unable to submit a job to the Dataflow service with unset version ${pom.version}");
LOG.info("Dataflow SDK version: {}", version);
newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties());
// The Dataflow Service may write to the temporary directory directly, so
// must be verified.
if (!isNullOrEmpty(options.getGcpTempLocation())) {
newJob
.getEnvironment()
.setTempStoragePrefix(
dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation()));
}
newJob.getEnvironment().setDataset(options.getTempDatasetId());
if (options.getWorkerRegion() != null) {
newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion());
}
if (options.getWorkerZone() != null) {
newJob.getEnvironment().setWorkerZone(options.getWorkerZone());
}
if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED");
} else if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED");
}
// Represent the minCpuPlatform pipeline option as an experiment, if not already present.
if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
List<String> minCpuFlags =
experiments.stream()
.filter(p -> p.startsWith("min_cpu_platform"))
.collect(Collectors.toList());
if (minCpuFlags.isEmpty()) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform())
.build());
} else {
LOG.warn(
"Flag min_cpu_platform is defined in both top level PipelineOption, "
+ "as well as under experiments. Proceed using {}.",
minCpuFlags.get(0));
}
}
newJob
.getEnvironment()
.setExperiments(
ImmutableList.copyOf(
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList())));
// Set the Docker container image that executes Dataflow worker harness, residing in Google
// Container Registry. Translator is guaranteed to create a worker pool prior to this point.
// For runner_v1, only worker_harness_container is set.
// For runner_v2, both worker_harness_container and sdk_harness_container are set to the same
// value.
String containerImage = getContainerImageForJob(options);
for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) {
workerPool.setWorkerHarnessContainerImage(containerImage);
}
configureSdkHarnessContainerImages(options, portablePipelineProto, newJob);
newJob.getEnvironment().setVersion(getEnvironmentVersion(options));
if (hooks != null) {
hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment());
}
// enable upload_graph when the graph is too large
byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8);
int jobGraphByteSize = jobGraphBytes.length;
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES
&& !hasExperiment(options, "upload_graph")
&& !useUnifiedWorker(options)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build());
LOG.info(
"The job graph size ({} in bytes) is larger than {}. Automatically add "
+ "the upload_graph option to experiments.",
jobGraphByteSize,
CREATE_JOB_REQUEST_LIMIT_BYTES);
}
if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) {
ArrayList<String> experiments = new ArrayList<>(options.getExperiments());
while (experiments.remove("upload_graph")) {}
options.setExperiments(experiments);
LOG.warn(
"The upload_graph experiment was specified, but it does not apply "
+ "to runner v2 jobs. Option has been automatically removed.");
}
// Upload the job to GCS and remove the graph object from the API call. The graph
// will be downloaded from GCS by the service.
if (hasExperiment(options, "upload_graph")) {
DataflowPackage stagedGraph =
options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME);
newJob.getSteps().clear();
newJob.setStepsLocation(stagedGraph.getLocation());
}
if (!isNullOrEmpty(options.getDataflowJobFile())
|| !isNullOrEmpty(options.getTemplateLocation())) {
boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation());
if (isTemplate) {
checkArgument(
isNullOrEmpty(options.getDataflowJobFile()),
"--dataflowJobFile and --templateLocation are mutually exclusive.");
}
String fileLocation =
firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile());
checkArgument(
fileLocation.startsWith("/") || fileLocation.startsWith("gs://"),
"Location must be local or on Cloud Storage, got %s.",
fileLocation);
ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */);
String workSpecJson = DataflowPipelineTranslator.jobToString(newJob);
try (PrintWriter printWriter =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(
Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)),
UTF_8)))) {
printWriter.print(workSpecJson);
LOG.info("Printed job specification to {}", fileLocation);
} catch (IOException ex) {
String error = String.format("Cannot create output file at %s", fileLocation);
if (isTemplate) {
throw new RuntimeException(error, ex);
} else {
LOG.warn(error, ex);
}
}
if (isTemplate) {
LOG.info("Template successfully created.");
return new DataflowTemplateJob();
}
}
String jobIdToUpdate = null;
if (options.isUpdate()) {
jobIdToUpdate = getJobIdFromName(options.getJobName());
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setReplaceJobId(jobIdToUpdate);
}
if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) {
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot());
}
Job jobResult;
try {
jobResult = dataflowClient.createJob(newJob);
} catch (GoogleJsonResponseException e) {
String errorMessages = "Unexpected errors";
if (e.getDetails() != null) {
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) {
errorMessages =
"The size of the serialized JSON representation of the pipeline "
+ "exceeds the allowable limit. "
+ "For more information, please see the documentation on job submission:\n"
+ "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs";
} else {
errorMessages = e.getDetails().getMessage();
}
}
throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e);
} catch (IOException e) {
throw new RuntimeException("Failed to create a workflow job", e);
}
// Use a raw client for post-launch monitoring, as status calls may fail
// regularly and need not be retried automatically.
DataflowPipelineJob dataflowPipelineJob =
new DataflowPipelineJob(
DataflowClient.create(options),
jobResult.getId(),
options,
jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(),
portablePipelineProto);
// If the service returned client request id, the SDK needs to compare it
// with the original id generated in the request, if they are not the same
// (i.e., the returned job is not created by this request), throw
// DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException
// depending on whether this is a reload or not.
if (jobResult.getClientRequestId() != null
&& !jobResult.getClientRequestId().isEmpty()
&& !jobResult.getClientRequestId().equals(requestId)) {
// If updating a job.
if (options.isUpdate()) {
throw new DataflowJobAlreadyUpdatedException(
dataflowPipelineJob,
String.format(
"The job named %s with id: %s has already been updated into job id: %s "
+ "and cannot be updated again.",
newJob.getName(), jobIdToUpdate, jobResult.getId()));
} else {
throw new DataflowJobAlreadyExistsException(
dataflowPipelineJob,
String.format(
"There is already an active job named %s with id: %s. If you want to submit a"
+ " second job, try again by setting a different name using --jobName.",
newJob.getName(), jobResult.getId()));
}
}
LOG.info(
"To access the Dataflow monitoring console, please navigate to {}",
MonitoringUtil.getJobMonitoringPageURL(
options.getProject(), options.getRegion(), jobResult.getId()));
LOG.info("Submitted job: {}", jobResult.getId());
LOG.info(
"To cancel the job using the 'gcloud' tool, run:\n> {}",
MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId()));
return dataflowPipelineJob;
}
|
@Test
public void testSettingFlexRS() throws IOException {
DataflowPipelineOptions options = buildPipelineOptions();
options.setFlexRSGoal(DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED);
Pipeline p = Pipeline.create(options);
p.run();
ArgumentCaptor<Job> jobCaptor = ArgumentCaptor.forClass(Job.class);
Mockito.verify(mockJobs).create(eq(PROJECT_ID), eq(REGION_ID), jobCaptor.capture());
assertEquals(
"FLEXRS_COST_OPTIMIZED",
jobCaptor.getValue().getEnvironment().getFlexResourceSchedulingGoal());
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
return this.resourcesByName.equals(((ResourceVector) o).resourcesByName);
}
|
@Test
public void testEquals() {
ResourceVector resourceVector = ResourceVector.of(13);
ResourceVector resourceVectorOther = ResourceVector.of(14);
Resource resource = Resource.newInstance(13, 13);
Assert.assertNotEquals(null, resourceVector);
Assert.assertNotEquals(resourceVectorOther, resourceVector);
Assert.assertNotEquals(resource, resourceVector);
ResourceVector resourceVectorOne = ResourceVector.of(1);
resourceVectorOther.decrement(resourceVectorOne);
Assert.assertEquals(resourceVectorOther, resourceVector);
}
|
public void addSpeaker(BgpSpeakerConfig speaker) {
// Create the new speaker node and set the parameters
ObjectNode speakerNode = JsonNodeFactory.instance.objectNode();
speakerNode.put(NAME, speaker.name().get());
speakerNode.put(VLAN, speaker.vlan().toString());
speakerNode.put(CONNECT_POINT, speaker.connectPoint().elementId().toString()
+ "/" + speaker.connectPoint().port().toString());
ArrayNode peersNode = speakerNode.putArray(PEERS);
for (IpAddress peerAddress: speaker.peers()) {
peersNode.add(peerAddress.toString());
}
// Add the new BGP speaker to the existing node array
ArrayNode speakersArray = bgpSpeakers().isEmpty() ?
initBgpSpeakersConfiguration() : (ArrayNode) object.get(SPEAKERS);
speakersArray.add(speakerNode);
}
|
@Test
public void testAddSpeaker() throws Exception {
int initialSize = bgpConfig.bgpSpeakers().size();
BgpConfig.BgpSpeakerConfig newSpeaker = createNewSpeaker();
bgpConfig.addSpeaker(newSpeaker);
assertEquals(initialSize + 1, bgpConfig.bgpSpeakers().size());
speakers.add(newSpeaker);
assertEquals(speakers, bgpConfig.bgpSpeakers());
}
|
@Nullable public String getValue(@Nullable TraceContext context) {
if (context == null) return null;
return this.context.getValue(this, context);
}
|
@Test void getValue_extracted_doesntExist() {
assertThat(AMZN_TRACE_ID.getValue(requestIdExtraction))
.isNull();
assertThat(AMZN_TRACE_ID.getValue(emptyExtraction))
.isNull();
assertThat(AMZN_TRACE_ID.getValue(TraceContextOrSamplingFlags.EMPTY))
.isNull();
}
|
@Udf
public boolean check(@UdfParameter(description = "The input JSON string") final String input) {
if (input == null) {
return false;
}
try {
return !UdfJsonMapper.parseJson(input).isMissingNode();
} catch (KsqlFunctionException e) {
return false;
}
}
|
@Test
public void shouldInterpretNullString() {
assertTrue(udf.check("null"));
}
|
@Udf(description = "Splits a string into an array of substrings based on a delimiter.")
public List<String> split(
@UdfParameter(
description = "The string to be split. If NULL, then function returns NULL.")
final String string,
@UdfParameter(
description = "The delimiter to split a string by. If NULL, then function returns NULL.")
final String delimiter) {
if (string == null || delimiter == null) {
return null;
}
// Java split() accepts regular expressions as a delimiter, but the behavior of this UDF split()
// is to accept only literal strings. This method uses Guava Splitter instead, which does not
// accept any regex pattern. This is to avoid a confusion to users when splitting by regex
// special characters, such as '.' and '|'.
try {
// Guava Splitter does not accept empty delimiters. Use the Java split() method instead.
if (delimiter.isEmpty()) {
return Arrays.asList(EMPTY_DELIMITER.split(string));
} else {
return Splitter.on(delimiter).splitToList(string);
}
} catch (final Exception e) {
throw new KsqlFunctionException(
String.format("Invalid delimiter '%s' in the split() function.", delimiter), e);
}
}
|
@Test
public void shouldSplitAndAddEmptySpacesIfDelimiterBytesIsFoundAtTheBeginningOrEnd() {
final ByteBuffer aBytes = ByteBuffer.wrap(new byte[]{'A'});
final ByteBuffer bBytes = ByteBuffer.wrap(new byte[]{'B'});
final ByteBuffer dollarBytes = ByteBuffer.wrap(new byte[]{'$'});
assertThat(
splitUdf.split(
ByteBuffer.wrap(new byte[]{'$','A'}),
dollarBytes),
contains(EMPTY_BYTES, aBytes));
assertThat(
splitUdf.split(
ByteBuffer.wrap(new byte[]{'$','A','$','B'}),
dollarBytes),
contains(EMPTY_BYTES, aBytes, bBytes));
assertThat(
splitUdf.split(
ByteBuffer.wrap(new byte[]{'A','$'}),
dollarBytes),
contains(aBytes, EMPTY_BYTES));
assertThat(
splitUdf.split(
ByteBuffer.wrap(new byte[]{'A','$','B','$'}),
dollarBytes),
contains(aBytes, bBytes, EMPTY_BYTES));
assertThat(
splitUdf.split(
ByteBuffer.wrap(new byte[]{'$','A','$','B','$'}),
dollarBytes),
contains(EMPTY_BYTES, aBytes, bBytes, EMPTY_BYTES));
}
|
public void insertBulk(final List<MemberCoupon> memberCoupons) {
String sql = "INSERT IGNORE INTO member_coupon (member_id, coupon_id, created_at, updated_at)" +
" VALUES (:memberId, :couponId, :createdAt, :updatedAt)";
namedParameterJdbcTemplate.batchUpdate(sql, chargeStationSqlParameterSource(memberCoupons));
}
|
@Test
void 멤버_쿠폰을_벌크_저장한다() {
// given
MemberCoupon memberCoupon = 멤버_쿠폰_생성();
// when & then
Assertions.assertDoesNotThrow(() -> memberCouponJdbcRepository.insertBulk(List.of(memberCoupon, memberCoupon, memberCoupon)));
}
|
@SuppressWarnings("checkstyle:npathcomplexity")
public void verify() {
List<String> enabledDiscoveries = new ArrayList<>();
int countEnabled = 0;
if (getTcpIpConfig().isEnabled()) {
countEnabled++;
enabledDiscoveries.add("TCP/IP");
}
if (getMulticastConfig().isEnabled()) {
countEnabled++;
enabledDiscoveries.add("Multicast");
}
if (getAwsConfig().isEnabled()) {
countEnabled++;
enabledDiscoveries.add("AWS discovery");
}
if (getGcpConfig().isEnabled()) {
countEnabled++;
enabledDiscoveries.add("GCP discovery");
}
if (getAzureConfig().isEnabled()) {
countEnabled++;
enabledDiscoveries.add("Azure discovery");
}
if (getKubernetesConfig().isEnabled()) {
countEnabled++;
enabledDiscoveries.add("Kubernetes discovery");
}
if (getEurekaConfig().isEnabled()) {
countEnabled++;
enabledDiscoveries.add("Eureka discovery");
}
Collection<DiscoveryStrategyConfig> discoveryStrategyConfigs = discoveryConfig.getDiscoveryStrategyConfigs();
if (!discoveryStrategyConfigs.isEmpty()) {
countEnabled++;
enabledDiscoveries.add("Discovery SPI");
}
if (countEnabled > 1) {
throw new InvalidConfigurationException("Only one discovery method can be enabled at a time. "
+ "Keep only one of the following method enabled by removing the others from the configuration, "
+ "or setting enabled to 'false': " + String.join(", ", enabledDiscoveries));
}
}
|
@Test
public void testEqualsAndHashCode() {
assumeDifferentHashCodes();
EqualsVerifier.forClass(JoinConfig.class)
.usingGetClass()
.suppress(Warning.NONFINAL_FIELDS)
.verify();
}
|
@Override
public List<Intent> compile(PointToPointIntent intent, List<Intent> installable) {
log.trace("compiling {} {}", intent, installable);
ConnectPoint ingressPoint = intent.filteredIngressPoint().connectPoint();
ConnectPoint egressPoint = intent.filteredEgressPoint().connectPoint();
//TODO: handle protected path case with suggested path!!
//Idea: use suggested path as primary and another path from path service as protection
if (intent.suggestedPath() != null && intent.suggestedPath().size() > 0) {
Path path = new DefaultPath(PID, intent.suggestedPath(), new ScalarWeight(1));
//Check intent constraints against suggested path and suggested path availability
if (checkPath(path, intent.constraints()) && pathAvailable(intent)) {
allocateIntentBandwidth(intent, path);
return asList(createLinkCollectionIntent(ImmutableSet.copyOf(intent.suggestedPath()),
DEFAULT_COST, intent));
}
}
if (ingressPoint.deviceId().equals(egressPoint.deviceId())) {
return createZeroHopLinkCollectionIntent(intent);
}
// proceed with no protected paths
if (!ProtectionConstraint.requireProtectedPath(intent)) {
return createUnprotectedLinkCollectionIntent(intent);
}
try {
// attempt to compute and implement backup path
return createProtectedIntent(ingressPoint, egressPoint, intent, installable);
} catch (PathNotFoundException e) {
log.warn("Could not find disjoint Path for {}", intent);
// no disjoint path extant -- maximum one path exists between devices
return createSinglePathIntent(ingressPoint, egressPoint, intent, installable);
}
}
|
@Test
public void testKeyRGBandwidthConstrainedIntentAllocation() {
final double bpsTotal = 1000.0;
String[] hops = {S1, S2, S3};
final ResourceService resourceService =
MockResourceService.makeCustomBandwidthResourceService(bpsTotal);
final List<Constraint> constraints =
Collections.singletonList(new BandwidthConstraint(Bandwidth.bps(BPS_TO_RESERVE)));
final PointToPointIntent intent = makeIntent(new ConnectPoint(DID_1, PORT_1),
new ConnectPoint(DID_3, PORT_2),
constraints);
PointToPointIntentCompiler compiler = makeCompiler(hops, resourceService);
compiler.compile(intent, null);
Key intentKey = intent.key();
ResourceGroup resourceGroup = ResourceGroup.of(100);
final PointToPointIntent newIntent = makeIntent(intentKey,
new ConnectPoint(DID_1, PORT_1),
new ConnectPoint(DID_3, PORT_2),
constraints,
resourceGroup);
compiler.compile(newIntent, null);
ResourceAllocation rAOne = new ResourceAllocation(RESOURCE_SW1_P1, resourceGroup);
ResourceAllocation rATwo = new ResourceAllocation(RESOURCE_SW1_P2, resourceGroup);
ResourceAllocation rAThree = new ResourceAllocation(RESOURCE_SW2_P1, resourceGroup);
ResourceAllocation rAFour = new ResourceAllocation(RESOURCE_SW2_P2, resourceGroup);
ResourceAllocation rAFive = new ResourceAllocation(RESOURCE_SW3_P1, resourceGroup);
ResourceAllocation rASix = new ResourceAllocation(RESOURCE_SW3_P2, resourceGroup);
Set<ResourceAllocation> expectedresourceAllocations =
ImmutableSet.of(rAOne, rATwo, rAThree, rAFour, rAFive, rASix);
Set<ResourceAllocation> resourceAllocations =
ImmutableSet.copyOf(resourceService.getResourceAllocations(resourceGroup));
assertThat(resourceAllocations, hasSize(6));
assertEquals(expectedresourceAllocations, resourceAllocations);
}
|
@Override
public String toString(final RouteUnit routeUnit) {
Map<String, String> logicAndActualTables = getLogicAndActualTables(routeUnit);
StringBuilder result = new StringBuilder();
int index = 0;
for (Projection each : projections) {
if (index > 0) {
result.append(COLUMN_NAME_SPLITTER);
}
result.append(getColumnExpression(each, logicAndActualTables));
index++;
}
return result.toString();
}
|
@Test
void assertToStringWithSubqueryProjection() {
Collection<Projection> projections = Arrays.asList(new ColumnProjection(new IdentifierValue("temp", QuoteCharacter.BACK_QUOTE),
new IdentifierValue("id", QuoteCharacter.BACK_QUOTE), new IdentifierValue("id", QuoteCharacter.BACK_QUOTE), mock(DatabaseType.class)),
new SubqueryProjection(new SubqueryProjectionSegment(null, "(SELECT name FROM t_order)"), new ColumnProjection(null, "name", null, mock(DatabaseType.class)),
new IdentifierValue("name"), mock(DatabaseType.class)));
assertThat(new SubstitutableColumnNameToken(0, 1, projections, TypedSPILoader.getService(DatabaseType.class, "MySQL")).toString(mock(RouteUnit.class)), is("`temp`.`id` AS `id`, `name`"));
}
|
public static Schema project(Schema schema, Set<Integer> fieldIds) {
Preconditions.checkNotNull(schema, "Schema cannot be null");
Types.StructType result = project(schema.asStruct(), fieldIds);
if (schema.asStruct().equals(result)) {
return schema;
} else if (result != null) {
if (schema.getAliases() != null) {
return new Schema(result.fields(), schema.getAliases());
} else {
return new Schema(result.fields());
}
}
return new Schema(Collections.emptyList(), schema.getAliases());
}
|
@Test
public void testProjectNaturallyEmpty() {
Schema schema =
new Schema(
Lists.newArrayList(
required(
12,
"someStruct",
Types.StructType.of(
required(
15,
"anotherStruct",
Types.StructType.of(required(20, "empty", Types.StructType.of())))))));
Schema expectedDepthOne =
new Schema(Lists.newArrayList(required(12, "someStruct", Types.StructType.of())));
Schema actualDepthOne = TypeUtil.project(schema, Sets.newHashSet(12));
assertThat(actualDepthOne.asStruct()).isEqualTo(expectedDepthOne.asStruct());
Schema expectedDepthTwo =
new Schema(
Lists.newArrayList(
required(
12,
"someStruct",
Types.StructType.of(required(15, "anotherStruct", Types.StructType.of())))));
Schema actualDepthTwo = TypeUtil.project(schema, Sets.newHashSet(12, 15));
assertThat(actualDepthTwo.asStruct()).isEqualTo(expectedDepthTwo.asStruct());
Schema expectedDepthThree =
new Schema(
Lists.newArrayList(
required(
12,
"someStruct",
Types.StructType.of(
required(
15,
"anotherStruct",
Types.StructType.of(required(20, "empty", Types.StructType.of())))))));
Schema actualDepthThree = TypeUtil.project(schema, Sets.newHashSet(12, 15, 20));
Schema actualDepthThreeChildren = TypeUtil.project(schema, Sets.newHashSet(20));
assertThat(actualDepthThree.asStruct()).isEqualTo(expectedDepthThree.asStruct());
assertThat(actualDepthThreeChildren.asStruct()).isEqualTo(expectedDepthThree.asStruct());
}
|
@Override
public String toString() {
if (command != null) {
return "SmppMessage: " + command;
} else {
return "SmppMessage: " + getBody();
}
}
|
@Test
public void toStringShouldReturnTheShortMessageIfTheCommandIsNotNull() {
DeliverSm command = new DeliverSm();
command.setShortMessage("Hello SMPP world!".getBytes());
message = new SmppMessage(camelContext, command, new SmppConfiguration());
assertEquals("SmppMessage: PDUHeader(0, 00000000, 00000000, 0)", message.toString());
}
|
public static int realCompare(float a, float b)
{
// these three ifs can only be true if neither value is NaN
if (a < b) {
return -1;
}
if (a > b) {
return 1;
}
// this check ensure floatCompare(+0, -0) will return 0
// if we just did floatToIntBits comparison, then they
// would not compare as equal
if (a == b) {
return 0;
}
// this ensures that realCompare(NaN, NaN) will return 0
// floatToIntBits converts all NaNs to the same representation
int aBits = floatToIntBits(a);
int bBits = floatToIntBits(b);
return Integer.compare(aBits, bBits);
}
|
@Test
public void testRealCompare()
{
assertEquals(realCompare(0, Float.parseFloat("-0")), 0);
assertEquals(realCompare(Float.NaN, Float.NaN), 0);
// 0x7fc01234 is a different representation of NaN
assertEquals(realCompare(Float.NaN, intBitsToFloat(0x7fc01234)), 0);
}
|
public T setEnableSource(boolean enableSource) {
attributes.put("_source", ImmutableSortedMap.of(ENABLED, enableSource));
return castThis();
}
|
@Test
@UseDataProvider("indexWithAndWithoutRelations")
public void index_with_source(Index index) {
NewIndex newIndex = new SimplestNewIndex(IndexType.main(index, "foo"), defaultSettingsConfiguration);
newIndex.setEnableSource(true);
assertThat(newIndex).isNotNull();
assertThat(getAttributeAsMap(newIndex, "_source")).containsExactly(entry("enabled", true));
}
|
protected boolean isValidRequestor(HttpServletRequest request, Configuration conf)
throws IOException {
UserGroupInformation ugi = getUGI(request, conf);
if (LOG.isDebugEnabled()) {
LOG.debug("Validating request made by " + ugi.getUserName() +
" / " + ugi.getShortUserName() + ". This user is: " +
UserGroupInformation.getLoginUser());
}
Set<String> validRequestors = new HashSet<String>();
validRequestors.addAll(DFSUtil.getAllNnPrincipals(conf));
try {
validRequestors.add(
SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
SecondaryNameNode.getHttpAddress(conf).getHostName()));
} catch (Exception e) {
// Don't halt if SecondaryNameNode principal could not be added.
LOG.debug("SecondaryNameNode principal could not be added", e);
String msg = String.format(
"SecondaryNameNode principal not considered, %s = %s, %s = %s",
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY,
conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
LOG.warn(msg);
}
// Check the full principal name of all the configured valid requestors.
for (String v : validRequestors) {
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is comparing to valid requestor: " + v);
if (v != null && v.equals(ugi.getUserName())) {
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is allowing: " + ugi.getUserName());
return true;
}
}
// Additionally, we compare the short name of the requestor to this JN's
// username, because we want to allow requests from other JNs during
// recovery, but we can't enumerate the full list of JNs.
if (ugi.getShortUserName().equals(
UserGroupInformation.getLoginUser().getShortUserName())) {
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is allowing other JN principal: " +
ugi.getUserName());
return true;
}
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is rejecting: " + ugi.getUserName());
return false;
}
|
@Test
public void testRequestNameNode() throws IOException, ServletException {
// Test: Make a request from a namenode
HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getParameter(UserParam.NAME)).thenReturn("nn/[email protected]");
boolean isValid = SERVLET.isValidRequestor(request, CONF);
assertThat(isValid).isTrue();
}
|
public Plan validateReservationUpdateRequest(
ReservationSystem reservationSystem, ReservationUpdateRequest request)
throws YarnException {
ReservationId reservationId = request.getReservationId();
Plan plan = validateReservation(reservationSystem, reservationId,
AuditConstants.UPDATE_RESERVATION_REQUEST);
validateReservationDefinition(reservationId,
request.getReservationDefinition(), plan,
AuditConstants.UPDATE_RESERVATION_REQUEST);
return plan;
}
|
@Test
public void testUpdateReservationInvalidRecurrenceExpression() {
// first check recurrence expression
ReservationUpdateRequest request =
createSimpleReservationUpdateRequest(1, 1, 1, 5, 3, "123abc");
plan = null;
try {
plan =
rrValidator.validateReservationUpdateRequest(rSystem, request);
Assert.fail();
} catch (YarnException e) {
Assert.assertNull(plan);
String message = e.getMessage();
Assert.assertTrue(message
.startsWith("Invalid period "));
LOG.info(message);
}
// now check duration
request =
createSimpleReservationUpdateRequest(1, 1, 1, 50, 3, "10");
plan = null;
try {
plan =
rrValidator.validateReservationUpdateRequest(rSystem, request);
Assert.fail();
} catch (YarnException e) {
Assert.assertNull(plan);
String message = e.getMessage();
Assert.assertTrue(message
.startsWith("Duration of the requested reservation:"));
LOG.info(message);
}
}
|
@Udf
public <T> Map<String, T> union(
@UdfParameter(description = "first map to union") final Map<String, T> map1,
@UdfParameter(description = "second map to union") final Map<String, T> map2) {
final List<Map<String, T>> nonNullInputs =
Stream.of(map1, map2)
.filter(Objects::nonNull)
.collect(Collectors.toList());
if (nonNullInputs.size() == 0) {
return null;
}
final Map<String, T> output = new HashMap<>();
nonNullInputs
.forEach(output::putAll);
return output;
}
|
@Test
public void shouldHandleComplexValueTypes() {
final Map<String, List<Double>> input1 = Maps.newHashMap();
input1.put("apple", Arrays.asList(Double.valueOf(12.34), Double.valueOf(56.78)));
input1.put("banana", Arrays.asList(Double.valueOf(43.21), Double.valueOf(87.65)));
final Map<String, List<Double>> input2 = Maps.newHashMap();
input2.put("foo", Arrays.asList(Double.valueOf(123.456)));
final Map<String, List<Double>> result = udf.union(input1, input2);
assertThat(result.size(), is(3));
assertThat(result.get("banana"), contains(Double.valueOf(43.21), Double.valueOf(87.65)));
assertThat(result.keySet(), containsInAnyOrder("foo", "banana", "apple"));
}
|
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
if (executor.isShutdown()) {
return;
}
BlockingQueue<Runnable> workQueue = executor.getQueue();
Runnable firstWork = workQueue.poll();
boolean newTaskAdd = workQueue.offer(r);
if (firstWork != null) {
firstWork.run();
}
if (!newTaskAdd) {
executor.execute(r);
}
}
|
@Test
public void testRejectedExecutionWhenExecutorIsShutDown() {
when(threadPoolExecutor.isShutdown()).thenReturn(true);
runsOldestTaskPolicy.rejectedExecution(runnable, threadPoolExecutor);
verify(threadPoolExecutor, never()).execute(runnable);
verify(runnable, never()).run();
}
|
@Override
public Cursor<Tuple> zScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<Tuple>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<Tuple> doScan(byte[] key, long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<Tuple>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, ZSCAN, args.toArray());
ListScanResult<Tuple> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<Tuple>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
|
@Test
public void testZScan() {
connection.zAdd("key".getBytes(), 1, "value1".getBytes());
connection.zAdd("key".getBytes(), 2, "value2".getBytes());
Cursor<RedisZSetCommands.Tuple> t = connection.zScan("key".getBytes(), ScanOptions.scanOptions().build());
assertThat(t.hasNext()).isTrue();
assertThat(t.next().getValue()).isEqualTo("value1".getBytes());
assertThat(t.hasNext()).isTrue();
assertThat(t.next().getValue()).isEqualTo("value2".getBytes());
}
|
@Deprecated
public RemotingCommand getConsumeStatus(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
final GetConsumerStatusRequestHeader requestHeader =
(GetConsumerStatusRequestHeader) request.decodeCommandCustomHeader(GetConsumerStatusRequestHeader.class);
Map<MessageQueue, Long> offsetTable = this.mqClientFactory.getConsumerStatus(requestHeader.getTopic(), requestHeader.getGroup());
GetConsumerStatusBody body = new GetConsumerStatusBody();
body.setMessageQueueTable(offsetTable);
response.setBody(body.encode());
response.setCode(ResponseCode.SUCCESS);
return response;
}
|
@Test
public void testGetConsumeStatus() throws Exception {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
RemotingCommand request = mock(RemotingCommand.class);
when(request.getCode()).thenReturn(RequestCode.GET_CONSUMER_STATUS_FROM_CLIENT);
GetConsumerStatusRequestHeader requestHeader = new GetConsumerStatusRequestHeader();
when(request.decodeCommandCustomHeader(GetConsumerStatusRequestHeader.class)).thenReturn(requestHeader);
assertNotNull(processor.processRequest(ctx, request));
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStartBounds,
final Range<Instant> windowEndBounds,
final Optional<Position> position
) {
try {
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query =
WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper);
StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request =
inStore(stateStore.getStateStoreName()).withQuery(query);
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final KafkaStreams streams = stateStore.getKafkaStreams();
final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result =
streams.query(request);
final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult =
result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
}
if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
}
try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it
= queryResult.getResult()) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next();
final Instant windowStart = Instant.ofEpochMilli(next.key);
if (!windowStartBounds.contains(windowStart)) {
continue;
}
final Instant windowEnd = windowStart.plus(windowSize);
if (!windowEndBounds.contains(windowEnd)) {
continue;
}
final TimeWindow window =
new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
new Windowed<>(key, window),
next.value.value(),
next.value.timestamp()
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIteratorWithPosition(
builder.build().iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
@SuppressWarnings("unchecked")
public void shouldCloseIterator() {
// When:
final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> partitionResult = new StateQueryResult<>();
final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result = QueryResult.forResult(fetchIterator);
result.setPosition(POSITION);
partitionResult.addResult(PARTITION, result);
when(kafkaStreams.query(any(StateQueryRequest.class))).thenReturn(partitionResult);
when(fetchIterator.hasNext()).thenReturn(false);
table.get(A_KEY, PARTITION, WINDOW_START_BOUNDS, WINDOW_END_BOUNDS);
// Then:
verify(fetchIterator).close();
}
|
@Override
public GroupAssignment assign(
GroupSpec groupSpec,
SubscribedTopicDescriber subscribedTopicDescriber
) throws PartitionAssignorException {
if (groupSpec.memberIds().isEmpty()) {
return new GroupAssignment(Collections.emptyMap());
} else if (groupSpec.subscriptionType() == SubscriptionType.HOMOGENEOUS) {
return assignHomogeneousGroup(groupSpec, subscribedTopicDescriber);
} else {
return assignHeterogeneousGroup(groupSpec, subscribedTopicDescriber);
}
}
|
@Test
public void testOneMemberNoTopic() {
SubscribedTopicDescriberImpl subscribedTopicMetadata = new SubscribedTopicDescriberImpl(
Collections.singletonMap(
topic1Uuid,
new TopicMetadata(
topic1Uuid,
topic1Name,
3,
Collections.emptyMap()
)
)
);
Map<String, MemberSubscriptionAndAssignmentImpl> members = Collections.singletonMap(
memberA,
new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.empty(),
Collections.emptySet(),
Assignment.EMPTY
)
);
GroupSpec groupSpec = new GroupSpecImpl(
members,
HOMOGENEOUS,
Collections.emptyMap()
);
GroupAssignment groupAssignment = assignor.assign(
groupSpec,
subscribedTopicMetadata
);
Map<String, MemberAssignment> expectedAssignment = Collections.singletonMap(
memberA,
new MemberAssignmentImpl(Collections.emptyMap())
);
assertEquals(expectedAssignment, groupAssignment.members());
}
|
public static byte[] getValue(byte[] raw) {
try (final Asn1InputStream is = new Asn1InputStream(raw)) {
is.readTag();
return is.read(is.readLength());
}
}
|
@Test
public void getValueShouldSkipTagAndLength() {
assertArrayEquals(new byte[] { 0x31 }, Asn1Utils.getValue(new byte[] { 0x10, 1, 0x31}));
}
|
@VisibleForTesting
void startKsql(final KsqlConfig ksqlConfigWithPort) {
cleanupOldState();
initialize(ksqlConfigWithPort);
}
|
@Test
public void shouldRecordStartLatency() {
// When:
app.startKsql(ksqlConfig);
// Then:
long duration = Duration.between(start, Instant.now()).toMillis();
final Metric metric = metricCollectors.getMetrics().metric(
metricCollectors.getMetrics().metricName("startup-time-ms", "ksql-rest-application")
);
assertThat(metric, not(nullValue()));
// compare start time recorded to time around startKsql. add a second for clock jitter
assertThat((Double) metric.metricValue(), lessThanOrEqualTo((double) (duration + 1000)));
}
|
public static void smooth(PointList pointList, double maxElevationDelta) {
internSmooth(pointList, 0, pointList.size() - 1, maxElevationDelta);
}
|
@Test
public void smoothRamerNoMaximumFound() {
PointList pl2 = new PointList(3, true);
pl2.add(60.03307, 20.82262, 5.35);
pl2.add(60.03309, 20.82269, 5.42);
pl2.add(60.03307, 20.82262, 5.35);
EdgeElevationSmoothingRamer.smooth(pl2, 10);
assertEquals(3, pl2.size());
assertEquals(5.35, pl2.getEle(0), 0.01);
assertEquals(5.35, pl2.getEle(1), 0.01);
assertEquals(5.35, pl2.getEle(2), 0.01);
}
|
@Override
protected void doStart() throws Exception {
super.doStart();
LOG.debug("Creating connection to Azure ServiceBus");
client = getEndpoint().getServiceBusClientFactory().createServiceBusProcessorClient(getConfiguration(),
this::processMessage, this::processError);
client.start();
}
|
@Test
void synchronizationCallsExceptionHandlerOnFailure() throws Exception {
try (ServiceBusConsumer consumer = new ServiceBusConsumer(endpoint, processor)) {
consumer.setExceptionHandler(exceptionHandler);
consumer.doStart();
verify(client).start();
verify(clientFactory).createServiceBusProcessorClient(any(), any(), any());
when(messageContext.getMessage()).thenReturn(message);
processMessageCaptor.getValue().accept(messageContext);
verify(processor).process(any(Exchange.class), any(AsyncCallback.class));
Exchange exchange = exchangeCaptor.getValue();
assertThat(exchange).isNotNull();
final Exception testException = new Exception("Test exception");
exchange.setException(testException);
Synchronization synchronization = exchange.getExchangeExtension().handoverCompletions().get(0);
synchronization.onFailure(exchange);
verify(exceptionHandler).handleException(anyString(), eq(exchange), eq(testException));
}
}
|
public String toBaseMessageIdString(Object messageId) {
if (messageId == null) {
return null;
} else if (messageId instanceof String) {
String stringId = (String) messageId;
// If the given string has a type encoding prefix,
// we need to escape it as an encoded string (even if
// the existing encoding prefix was also for string)
if (hasTypeEncodingPrefix(stringId)) {
return AMQP_STRING_PREFIX + stringId;
} else {
return stringId;
}
} else if (messageId instanceof UUID) {
return AMQP_UUID_PREFIX + messageId.toString();
} else if (messageId instanceof UnsignedLong) {
return AMQP_ULONG_PREFIX + messageId.toString();
} else if (messageId instanceof Binary) {
ByteBuffer dup = ((Binary) messageId).asByteBuffer();
byte[] bytes = new byte[dup.remaining()];
dup.get(bytes);
String hex = convertBinaryToHexString(bytes);
return AMQP_BINARY_PREFIX + hex;
} else {
throw new IllegalArgumentException("Unsupported type provided: " + messageId.getClass());
}
}
|
@Test
public void testToBaseMessageIdStringWithStringBeginningWithEncodingPrefixForBinary() {
String binaryStringMessageId = AMQPMessageIdHelper.AMQP_BINARY_PREFIX + "0123456789ABCDEF";
String expected = AMQPMessageIdHelper.AMQP_STRING_PREFIX + binaryStringMessageId;
String baseMessageIdString = messageIdHelper.toBaseMessageIdString(binaryStringMessageId);
assertNotNull("null string should not have been returned", baseMessageIdString);
assertEquals("expected base id string was not returned", expected, baseMessageIdString);
}
|
public void isTrue() {
if (actual == null) {
isEqualTo(true); // fails
} else if (!actual) {
failWithoutActual(simpleFact("expected to be true"));
}
}
|
@Test
public void nullIsTrueFailing() {
expectFailureWhenTestingThat(null).isTrue();
assertFailureKeys("expected", "but was");
assertFailureValue("expected", "true");
assertFailureValue("but was", "null");
}
|
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
}
|
@Test
public void testPrivateFinishBundle() throws Exception {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("finishBundle()");
thrown.expectMessage("Must be public");
thrown.expectMessage(getClass().getName() + "$");
DoFnSignatures.getSignature(
new DoFn<String, String>() {
@ProcessElement
public void processElement() {}
@FinishBundle
void finishBundle() {}
}.getClass());
}
|
public List<ScimGroupDto> findAll(DbSession dbSession) {
return mapper(dbSession).findAll();
}
|
@Test
void findAll_ifNoData_returnsEmptyList() {
assertThat(scimGroupDao.findAll(db.getSession())).isEmpty();
}
|
@Override
protected Map<String, Object> getPredictedValues(PMML4Result pmml4Result, DMNResult dmnr) {
Map<String, Object> toReturn = new HashMap<>();
String resultName = pmml4Result.getResultObjectName();
Object value = pmml4Result.getResultVariables().get(resultName);
toReturn.put(resultName, NumberEvalHelper.coerceNumber(value));
return toReturn;
}
|
@Test
void getPredictedValues() {
List<Object> values = getValues();
values.forEach(value -> {
PMML4Result result = getPMML4Result(value);
Map<String, Object> retrieved = dmnKiePMMLTrustyInvocationEvaluator.getPredictedValues(result, null);
assertThat(retrieved).containsKey(result.getResultObjectName());
Object retObject = retrieved.get(result.getResultObjectName());
Object expected = NumberEvalHelper.coerceNumber(value);
assertThat(retObject).isEqualTo(expected);
});
}
|
@Override
public Long createMailTemplate(MailTemplateSaveReqVO createReqVO) {
// 校验 code 是否唯一
validateCodeUnique(null, createReqVO.getCode());
// 插入
MailTemplateDO template = BeanUtils.toBean(createReqVO, MailTemplateDO.class)
.setParams(parseTemplateContentParams(createReqVO.getContent()));
mailTemplateMapper.insert(template);
return template.getId();
}
|
@Test
public void testCreateMailTemplate_success() {
// 准备参数
MailTemplateSaveReqVO reqVO = randomPojo(MailTemplateSaveReqVO.class)
.setId(null); // 防止 id 被赋值
// 调用
Long mailTemplateId = mailTemplateService.createMailTemplate(reqVO);
// 断言
assertNotNull(mailTemplateId);
// 校验记录的属性是否正确
MailTemplateDO mailTemplate = mailTemplateMapper.selectById(mailTemplateId);
assertPojoEquals(reqVO, mailTemplate, "id");
}
|
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to validate internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final ValidationResult validationResult = new ValidationResult();
final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet());
final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet());
while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) {
Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap();
if (!topicDescriptionsStillToValidate.isEmpty()) {
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate);
descriptionsForTopic = describeTopicsResult.topicNameValues();
}
Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap();
if (!topicConfigsStillToValidate.isEmpty()) {
final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(
topicConfigsStillToValidate.stream()
.map(topic -> new ConfigResource(Type.TOPIC, topic))
.collect(Collectors.toSet())
);
configsForTopic = describeConfigsResult.values().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue));
}
while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
if (!descriptionsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
descriptionsForTopic,
topicConfigs,
topicDescriptionsStillToValidate,
(streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide)
);
}
if (!configsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
configsForTopic,
topicConfigs,
topicConfigsStillToValidate,
(streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide)
);
}
maybeThrowTimeoutException(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
String.format("Could not validate internal topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs)
);
if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
"validated"
);
}
log.info("Completed validation of internal topics {}.", topicConfigs.keySet());
return validationResult;
}
|
@Test
public void shouldValidateSuccessfullyWithEmptyInternalTopics() {
setupTopicInMockAdminClient(topic1, repartitionTopicConfig());
final ValidationResult validationResult = internalTopicManager.validate(Collections.emptyMap());
assertThat(validationResult.missingTopics(), empty());
assertThat(validationResult.misconfigurationsForTopics(), anEmptyMap());
}
|
protected static Number findMultiplicationPattern(BigDecimal[] numbers) {
if ( numbers == null || numbers.length < MIN_NUMBER_OF_RESTRICTIONS ) {
return null;
}
try {
BigDecimal gap;
Number missingNumber = null;
BigDecimal a = numbers[0];
BigDecimal b = numbers[1];
BigDecimal c = numbers[2];
BigDecimal d = numbers[3];
// Uses first four numbers to check if there is a pattern and to
// calculate the gap between them. One missing value is allowed.
if ( b.divide( a ).equals( c.divide( b ) ) ) {
gap = b.divide( a );
} else if ( c.divide( b ).equals( d.divide( c ) ) ) {
gap = c.divide( b );
} else if ( b.divide( a ).equals( d.divide( c ) ) ) {
gap = b.divide( a );
} else {
// No pattern found.
return null;
}
BigDecimal first;
BigDecimal second;
for ( int i = 0; i < (numbers.length - 1); i++ ) {
first = numbers[i];
second = numbers[i + 1];
if (!second.divide( first ).equals( gap )) {
if (missingNumber == null) {
missingNumber = first.multiply(gap);
} else {
// Happends if there is no pattern found, or more than 1
// missing number.
return null;
}
}
}
return missingNumber;
} catch ( Exception e ) {
return null;
}
}
|
@Test
void testFindMultiplicationPattern() {
// Multiplication
// *2 missing number 4
assertThat(FindMissingNumber.findMultiplicationPattern(
new BigDecimal[]{BigDecimal.valueOf(2),
BigDecimal.valueOf(8), BigDecimal.valueOf(16),
BigDecimal.valueOf(32), BigDecimal.valueOf(64)})
.doubleValue() == 4).isTrue();
// *17 missing number 383214
assertThat(FindMissingNumber.findMultiplicationPattern(
new BigDecimal[]{BigDecimal.valueOf(78),
BigDecimal.valueOf(1326), BigDecimal.valueOf(22542),
BigDecimal.valueOf(6514638)}).doubleValue() == 383214).isTrue();
// *1,23 missing number 2016.6957
assertThat(FindMissingNumber.findMultiplicationPattern(
new BigDecimal[]{BigDecimal.valueOf(1333),
BigDecimal.valueOf(1639.59),
BigDecimal.valueOf(2480.535711),
BigDecimal.valueOf(3051.05892453)}).doubleValue() == 2016.6957).isTrue();
// Division
// /2 (*0.5) missing number 128
assertThat(FindMissingNumber.findMultiplicationPattern(
new BigDecimal[]{BigDecimal.valueOf(256),
BigDecimal.valueOf(64), BigDecimal.valueOf(32),
BigDecimal.valueOf(16), BigDecimal.valueOf(8),
BigDecimal.valueOf(4), BigDecimal.valueOf(2)})
.doubleValue() == 128).isTrue();
// /10 (*0.1) missing number 1
assertThat(FindMissingNumber.findMultiplicationPattern(
new BigDecimal[]{BigDecimal.valueOf(10000),
BigDecimal.valueOf(1000), BigDecimal.valueOf(100),
BigDecimal.valueOf(10), BigDecimal.valueOf(0.1),
BigDecimal.valueOf(0.01)}).doubleValue() == 1).isTrue();
// Not valid
// Not in pattern.
assertThat(FindMissingNumber.findMultiplicationPattern(new BigDecimal[]{
BigDecimal.valueOf(111.2), BigDecimal.valueOf(3323),
BigDecimal.valueOf(234.434), BigDecimal.valueOf(44343),
BigDecimal.valueOf(434)}) == null).isTrue();
assertThat(FindMissingNumber.findMultiplicationPattern(new BigDecimal[]{
BigDecimal.valueOf(1), BigDecimal.valueOf(2),
BigDecimal.valueOf(3), BigDecimal.valueOf(4),
BigDecimal.valueOf(5), BigDecimal.valueOf(6),
BigDecimal.valueOf(7), BigDecimal.valueOf(5),
BigDecimal.valueOf(4), BigDecimal.valueOf(3),
BigDecimal.valueOf(2), BigDecimal.valueOf(1),
BigDecimal.valueOf(1), BigDecimal.valueOf(1)}) == null).isTrue();
}
|
@GetMapping("/export")
@RequiresPermissions("system:manager:exportConfig")
public ResponseEntity<byte[]> exportConfigs(final HttpServletResponse response) {
ShenyuAdminResult result = configsService.configsExport();
if (!Objects.equals(CommonErrorCode.SUCCESSFUL, result.getCode())) {
throw new ShenyuException(result.getMessage());
}
HttpHeaders headers = new HttpHeaders();
String fileName = generateFileName();
response.setHeader("Access-Control-Expose-Headers", "Content-Disposition");
headers.add("Content-Disposition", "attachment;filename=" + fileName);
return new ResponseEntity<>((byte[]) result.getData(), headers, HttpStatus.OK);
}
|
@Test
public void testExportConfigs() throws Exception {
when(this.configsService.configsExport()).thenReturn(
ShenyuAdminResult.success(ShenyuResultMessage.SUCCESS));
// Run the test
final MockHttpServletResponse response = mockMvc.perform(get("/configs/export")
.accept(MediaType.APPLICATION_JSON))
.andExpect(status().isOk())
.andReturn().getResponse();
// Verify the results
assertThat(response.getStatus()).isEqualTo(HttpStatus.OK.value());
}
|
public static String initNamespaceForNaming(NacosClientProperties properties) {
String tmpNamespace = null;
String isUseCloudNamespaceParsing = properties.getProperty(PropertyKeyConst.IS_USE_CLOUD_NAMESPACE_PARSING,
properties.getProperty(SystemPropertyKeyConst.IS_USE_CLOUD_NAMESPACE_PARSING,
String.valueOf(Constants.DEFAULT_USE_CLOUD_NAMESPACE_PARSING)));
if (Boolean.parseBoolean(isUseCloudNamespaceParsing)) {
tmpNamespace = TenantUtil.getUserTenantForAns();
LogUtils.NAMING_LOGGER.info("initializer namespace from ans.namespace attribute : {}", tmpNamespace);
tmpNamespace = TemplateUtils.stringEmptyAndThenExecute(tmpNamespace, () -> {
String namespace = properties.getProperty(PropertyKeyConst.SystemEnv.ALIBABA_ALIWARE_NAMESPACE);
LogUtils.NAMING_LOGGER.info("initializer namespace from ALIBABA_ALIWARE_NAMESPACE attribute :" + namespace);
return namespace;
});
}
tmpNamespace = TemplateUtils.stringEmptyAndThenExecute(tmpNamespace, () -> {
String namespace = properties.getPropertyFrom(SourceType.JVM, PropertyKeyConst.NAMESPACE);
LogUtils.NAMING_LOGGER.info("initializer namespace from namespace attribute :" + namespace);
return namespace;
});
if (StringUtils.isEmpty(tmpNamespace)) {
tmpNamespace = properties.getProperty(PropertyKeyConst.NAMESPACE);
}
tmpNamespace = TemplateUtils.stringEmptyAndThenExecute(tmpNamespace, () -> UtilAndComs.DEFAULT_NAMESPACE_ID);
return tmpNamespace;
}
|
@Test
void testInitNamespaceFromDefaultNamespaceWithCloudParsing() {
final NacosClientProperties properties = NacosClientProperties.PROTOTYPE.derive();
properties.setProperty(PropertyKeyConst.IS_USE_CLOUD_NAMESPACE_PARSING, "true");
String actual = InitUtils.initNamespaceForNaming(properties);
assertEquals(UtilAndComs.DEFAULT_NAMESPACE_ID, actual);
}
|
public Set<MessageQueue> lockBatchMQ(
final String addr,
final LockBatchRequestBody requestBody,
final long timeoutMillis) throws RemotingException, MQBrokerException, InterruptedException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.LOCK_BATCH_MQ, new LockBatchMqRequestHeader());
request.setBody(requestBody.encode());
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
LockBatchResponseBody responseBody = LockBatchResponseBody.decode(response.getBody(), LockBatchResponseBody.class);
Set<MessageQueue> messageQueues = responseBody.getLockOKMQSet();
return messageQueues;
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), addr);
}
|
@Test
public void assertLockBatchMQ() throws RemotingException, InterruptedException, MQBrokerException {
mockInvokeSync();
LockBatchRequestBody responseBody = new LockBatchRequestBody();
setResponseBody(responseBody);
Set<MessageQueue> actual = mqClientAPI.lockBatchMQ(defaultBrokerAddr, responseBody, defaultTimeout);
assertNotNull(actual);
assertEquals(0, actual.size());
}
|
@Override
public int size() {
return partitionMaps.values().stream().mapToInt(Map::size).sum();
}
|
@Test
public void testSize() {
PartitionMap<String> map = PartitionMap.create(SPECS);
map.put(UNPARTITIONED_SPEC.specId(), null, "v1");
map.put(BY_DATA_SPEC.specId(), Row.of("aaa"), "v2");
map.put(BY_DATA_SPEC.specId(), Row.of("bbb"), "v3");
map.put(BY_DATA_CATEGORY_BUCKET_SPEC.specId(), Row.of("ccc", 2), "v4");
assertThat(map).isNotEmpty();
assertThat(map).hasSize(4);
}
|
public static String indexToColName(int index) {
if (index < 0) {
return null;
}
final StringBuilder colName = StrUtil.builder();
do {
if (colName.length() > 0) {
index--;
}
int remainder = index % 26;
colName.append((char) (remainder + 'A'));
index = (index - remainder) / 26;
} while (index > 0);
return colName.reverse().toString();
}
|
@Test
public void indexToColNameTest() {
assertEquals("A", ExcelUtil.indexToColName(0));
assertEquals("B", ExcelUtil.indexToColName(1));
assertEquals("C", ExcelUtil.indexToColName(2));
assertEquals("AA", ExcelUtil.indexToColName(26));
assertEquals("AB", ExcelUtil.indexToColName(27));
assertEquals("AC", ExcelUtil.indexToColName(28));
assertEquals("AAA", ExcelUtil.indexToColName(702));
assertEquals("AAB", ExcelUtil.indexToColName(703));
assertEquals("AAC", ExcelUtil.indexToColName(704));
}
|
static QueryId buildId(
final Statement statement,
final EngineContext engineContext,
final QueryIdGenerator idGenerator,
final OutputNode outputNode,
final boolean createOrReplaceEnabled,
final Optional<String> withQueryId) {
if (withQueryId.isPresent()) {
final String queryId = withQueryId.get().toUpperCase();
validateWithQueryId(queryId);
return new QueryId(queryId);
}
if (statement instanceof CreateTable && ((CreateTable) statement).isSource()) {
// Use the CST name as part of the QueryID
final String suffix = ((CreateTable) statement).getName().text().toUpperCase()
+ "_" + idGenerator.getNext().toUpperCase();
return new QueryId(ReservedQueryIdsPrefixes.CST + suffix);
}
if (!outputNode.getSinkName().isPresent()) {
final String prefix =
"transient_" + outputNode.getSource().getLeftmostSourceNode().getAlias().text() + "_";
return new QueryId(prefix + Math.abs(ThreadLocalRandom.current().nextLong()));
}
final KsqlStructuredDataOutputNode structured = (KsqlStructuredDataOutputNode) outputNode;
if (!structured.createInto()) {
return new QueryId(ReservedQueryIdsPrefixes.INSERT + idGenerator.getNext());
}
final SourceName sink = outputNode.getSinkName().get();
final Set<QueryId> queriesForSink = engineContext.getQueryRegistry().getQueriesWithSink(sink);
if (queriesForSink.size() > 1) {
throw new KsqlException("REPLACE for sink " + sink + " is not supported because there are "
+ "multiple queries writing into it: " + queriesForSink);
} else if (!queriesForSink.isEmpty()) {
if (!createOrReplaceEnabled) {
final String type = outputNode.getNodeOutputType().getKsqlType().toLowerCase();
throw new UnsupportedOperationException(
String.format(
"Cannot add %s '%s': A %s with the same name already exists",
type,
sink.text(),
type));
}
return Iterables.getOnlyElement(queriesForSink);
}
final String suffix = outputNode.getId().toString().toUpperCase()
+ "_" + idGenerator.getNext().toUpperCase();
return new QueryId(
outputNode.getNodeOutputType() == DataSourceType.KTABLE
? ReservedQueryIdsPrefixes.CTAS + suffix
: ReservedQueryIdsPrefixes.CSAS + suffix
);
}
|
@Test
public void shouldComputeQueryIdCorrectlyForNewSourceTable() {
// Given:
final CreateTable createTableStmt = mock(CreateTable.class);
when(createTableStmt.getName()).thenReturn(SourceName.of("FOO"));
when(createTableStmt.isSource()).thenReturn(true);
when(idGenerator.getNext()).thenReturn("1");
// When:
final QueryId queryId = QueryIdUtil.buildId(createTableStmt, engineContext, idGenerator, plan,
false, Optional.empty());
// Then:
assertThat(queryId, is(new QueryId("CST_FOO_1")));
}
|
@Override
public ObjectNode encode(KubevirtFloatingIp fip, CodecContext context) {
checkNotNull(fip, "Kubevirt floating IP cannot be null");
ObjectNode result = context.mapper().createObjectNode()
.put(ID, fip.id())
.put(ROUTER_NAME, fip.routerName())
.put(NETWORK_NAME, fip.networkName())
.put(FLOATING_IP, fip.floatingIp().toString());
if (fip.podName() != null) {
result.put(POD_NAME, fip.podName());
}
if (fip.vmName() != null) {
result.put(VM_NAME, fip.vmName());
}
if (fip.fixedIp() != null) {
result.put(FIXED_IP, fip.fixedIp().toString());
}
return result;
}
|
@Test
public void testKubevirtFloatingIpEncode() {
KubevirtFloatingIp floatingIp = DefaultKubevirtFloatingIp.builder()
.id("fip-id")
.routerName("router-1")
.networkName("flat-1")
.floatingIp(IpAddress.valueOf("10.10.10.10"))
.podName("pod-1")
.vmName("vm-1")
.fixedIp(IpAddress.valueOf("20.20.20.20"))
.build();
ObjectNode floatingIpJson = kubevirtFloatingIpCodec.encode(floatingIp, context);
assertThat(floatingIpJson, matchesKubevirtFloatingIp(floatingIp));
}
|
@Override
public V load(K key) {
awaitSuccessfulInit();
try (SqlResult queryResult = sqlService.execute(queries.load(), key)) {
Iterator<SqlRow> it = queryResult.iterator();
V value = null;
if (it.hasNext()) {
SqlRow sqlRow = it.next();
if (it.hasNext()) {
throw new IllegalStateException("multiple matching rows for a key " + key);
}
// If there is a single column as the value, return that column as the value
if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) {
value = sqlRow.getObject(1);
} else {
//noinspection unchecked
value = (V) toGenericRecord(sqlRow, genericMapStoreProperties);
}
}
return value;
}
}
|
@Test
public void givenRow_whenLoad_thenReturnSingleColumn() {
ObjectSpec spec = objectProvider.createObject(mapName);
objectProvider.insertItems(spec, 1);
mapLoaderSingleColumn = createMapLoaderSingleColumn();
String name = mapLoaderSingleColumn.load(0);
assertThat(name).isEqualTo("name-0");
}
|
public Optional<Group> takeGroup(Set<Integer> rejectedGroups) {
synchronized (this) {
Optional<GroupStatus> best = scheduler.takeNextGroup(rejectedGroups);
if (best.isPresent()) {
GroupStatus gs = best.get();
gs.allocate();
Group ret = gs.group;
log.fine(() -> "Offering <" + ret + "> for query connection");
return Optional.of(ret);
} else {
return Optional.empty();
}
}
}
|
@Test
void requireThatLoadBalancerServesMultiGroupSetups() {
Node n1 = new Node("test", 0, "test-node1", 0);
Node n2 = new Node("test", 1, "test-node2", 1);
LoadBalancer lb = new LoadBalancer(List.of(new Group(0, List.of(n1)), new Group(1,List.of(n2))), LoadBalancer.Policy.ROUNDROBIN);
Optional<Group> grp = lb.takeGroup(null);
Group group = grp.orElseThrow(() -> {
throw new IllegalStateException("Expected a SearchCluster.Group");
});
assertEquals(1, group.nodes().size());
}
|
public static Key of(String key, ApplicationId appId) {
return new StringKey(key, appId);
}
|
@Test
public void stringKeyCompare() {
Key stringKey1 = Key.of(KEY_1, NetTestTools.APP_ID);
Key copyOfStringKey1 = Key.of(KEY_1, NetTestTools.APP_ID);
Key stringKey2 = Key.of(KEY_2, NetTestTools.APP_ID);
Key copyOfStringKey2 = Key.of(KEY_2, NetTestTools.APP_ID);
Key stringKey3 = Key.of(KEY_3, NetTestTools.APP_ID);
Key copyOfStringKey3 = Key.of(KEY_3, NetTestTools.APP_ID);
assertThat(stringKey1, comparesEqualTo(copyOfStringKey1));
assertThat(stringKey1, lessThan(stringKey2));
assertThat(stringKey1, lessThan(stringKey3));
assertThat(stringKey2, greaterThan(stringKey1));
assertThat(stringKey2, comparesEqualTo(copyOfStringKey2));
assertThat(stringKey2, lessThan(stringKey3));
assertThat(stringKey3, greaterThan(stringKey1));
assertThat(stringKey3, greaterThan(stringKey2));
assertThat(stringKey3, comparesEqualTo(copyOfStringKey3));
}
|
@Override
protected int rsv(WebSocketFrame msg) {
return msg instanceof TextWebSocketFrame || msg instanceof BinaryWebSocketFrame?
msg.rsv() | WebSocketExtension.RSV1 : msg.rsv();
}
|
@Test
public void testFragmentedFrame() {
EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false,
NEVER_SKIP));
EmbeddedChannel decoderChannel = new EmbeddedChannel(
ZlibCodecFactory.newZlibDecoder(ZlibWrapper.NONE));
// initialize
byte[] payload1 = new byte[100];
random.nextBytes(payload1);
byte[] payload2 = new byte[100];
random.nextBytes(payload2);
byte[] payload3 = new byte[100];
random.nextBytes(payload3);
BinaryWebSocketFrame frame1 = new BinaryWebSocketFrame(false,
WebSocketExtension.RSV3,
Unpooled.wrappedBuffer(payload1));
ContinuationWebSocketFrame frame2 = new ContinuationWebSocketFrame(false,
WebSocketExtension.RSV3,
Unpooled.wrappedBuffer(payload2));
ContinuationWebSocketFrame frame3 = new ContinuationWebSocketFrame(true,
WebSocketExtension.RSV3,
Unpooled.wrappedBuffer(payload3));
// execute
assertTrue(encoderChannel.writeOutbound(frame1));
assertTrue(encoderChannel.writeOutbound(frame2));
assertTrue(encoderChannel.writeOutbound(frame3));
BinaryWebSocketFrame compressedFrame1 = encoderChannel.readOutbound();
ContinuationWebSocketFrame compressedFrame2 = encoderChannel.readOutbound();
ContinuationWebSocketFrame compressedFrame3 = encoderChannel.readOutbound();
// test
assertNotNull(compressedFrame1);
assertNotNull(compressedFrame2);
assertNotNull(compressedFrame3);
assertEquals(WebSocketExtension.RSV1 | WebSocketExtension.RSV3, compressedFrame1.rsv());
assertEquals(WebSocketExtension.RSV3, compressedFrame2.rsv());
assertEquals(WebSocketExtension.RSV3, compressedFrame3.rsv());
assertFalse(compressedFrame1.isFinalFragment());
assertFalse(compressedFrame2.isFinalFragment());
assertTrue(compressedFrame3.isFinalFragment());
assertTrue(decoderChannel.writeInbound(compressedFrame1.content()));
ByteBuf uncompressedPayload1 = decoderChannel.readInbound();
byte[] finalPayload1 = new byte[100];
uncompressedPayload1.readBytes(finalPayload1);
assertArrayEquals(finalPayload1, payload1);
uncompressedPayload1.release();
assertTrue(decoderChannel.writeInbound(compressedFrame2.content()));
ByteBuf uncompressedPayload2 = decoderChannel.readInbound();
byte[] finalPayload2 = new byte[100];
uncompressedPayload2.readBytes(finalPayload2);
assertArrayEquals(finalPayload2, payload2);
uncompressedPayload2.release();
assertTrue(decoderChannel.writeInbound(compressedFrame3.content()));
assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate()));
ByteBuf uncompressedPayload3 = decoderChannel.readInbound();
byte[] finalPayload3 = new byte[100];
uncompressedPayload3.readBytes(finalPayload3);
assertArrayEquals(finalPayload3, payload3);
uncompressedPayload3.release();
}
|
@Override
public Note get(String noteId, String notePath, AuthenticationInfo subject) throws IOException {
BlobId blobId = makeBlobId(noteId, notePath);
byte[] contents;
try {
contents = storage.readAllBytes(blobId);
} catch (StorageException se) {
throw new IOException("Could not read " + blobId.toString() + ": " + se.getMessage(), se);
}
try {
return noteParser.fromJson(noteId, new String(contents, encoding));
} catch (CorruptedNoteException jpe) {
throw new IOException(
"Could note parse as json " + blobId.toString() + jpe.getMessage(), jpe);
}
}
|
@Test
void testGet_nonexistent() throws Exception {
zConf.setProperty(ConfVars.ZEPPELIN_NOTEBOOK_GCS_STORAGE_DIR.getVarName(), DEFAULT_URL);
this.notebookRepo = new GCSNotebookRepo(zConf, noteParser, storage);
assertThrows(IOException.class, () -> {
notebookRepo.get("id", "", AUTH_INFO);
});
}
|
@Override
public boolean isReachable(DeviceId deviceId) {
SnmpDevice snmpDevice = controller.getDevice(deviceId);
if (snmpDevice == null) {
log.warn("BAD REQUEST: the requested device id: "
+ deviceId.toString()
+ " is not associated to any SNMP Device");
return false;
}
return snmpDevice.isReachable();
}
|
@Test
public void eventNotRelevant() {
assertFalse("Event should not be relevant", provider.cfgLister.isRelevant(deviceAddedIrrelevantEvent));
assertFalse("Device should not be reachable", provider.isReachable(wrongDeviceId));
}
|
@RequestMapping("/error")
public ModelAndView handleError(HttpServletRequest request) {
Object status = request.getAttribute(RequestDispatcher.ERROR_STATUS_CODE);
ModelAndView modelAndView = new ModelAndView();
if (status != null) {
int statusCode = Integer.parseInt(status.toString());
if (statusCode == HttpStatus.NOT_FOUND.value()) {
modelAndView.setStatus(HttpStatus.OK);
modelAndView.setViewName("forward:/ui/index.html");
return modelAndView;
}
modelAndView.setStatus(HttpStatus.valueOf(statusCode));
}
return modelAndView;
}
|
@Test
void handleError_ReturnsModelAndViewWithNullStatus() {
HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getAttribute(RequestDispatcher.ERROR_STATUS_CODE)).thenReturn(null);
ModelAndView modelAndView = new FrontendRedirector().handleError(request);
assertNull(modelAndView.getStatus());
assertNull(modelAndView.getViewName());
}
|
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload,
final ConnectionSession connectionSession) {
switch (commandPacketType) {
case COM_QUIT:
return new MySQLComQuitPacket();
case COM_INIT_DB:
return new MySQLComInitDbPacket(payload);
case COM_FIELD_LIST:
return new MySQLComFieldListPacket(payload);
case COM_QUERY:
return new MySQLComQueryPacket(payload);
case COM_STMT_PREPARE:
return new MySQLComStmtPreparePacket(payload);
case COM_STMT_EXECUTE:
MySQLServerPreparedStatement serverPreparedStatement =
connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex()));
return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount());
case COM_STMT_SEND_LONG_DATA:
return new MySQLComStmtSendLongDataPacket(payload);
case COM_STMT_RESET:
return new MySQLComStmtResetPacket(payload);
case COM_STMT_CLOSE:
return new MySQLComStmtClosePacket(payload);
case COM_SET_OPTION:
return new MySQLComSetOptionPacket(payload);
case COM_PING:
return new MySQLComPingPacket();
case COM_RESET_CONNECTION:
return new MySQLComResetConnectionPacket();
default:
return new MySQLUnsupportedCommandPacket(commandPacketType);
}
}
|
@Test
void assertNewInstanceWithComDaemonPacket() {
assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_DAEMON, payload, connectionSession), instanceOf(MySQLUnsupportedCommandPacket.class));
}
|
@Override
public List<AwsEndpoint> getClusterEndpoints() {
List<AwsEndpoint> result = new ArrayList<>();
EurekaHttpClient client = null;
try {
client = clientFactory.newClient();
EurekaHttpResponse<Applications> response = client.getVip(vipAddress);
if (validResponse(response)) {
Applications applications = response.getEntity();
if (applications != null) {
applications.shuffleInstances(true); // filter out non-UP instances
List<InstanceInfo> validInstanceInfos = applications.getInstancesByVirtualHostName(vipAddress);
for (InstanceInfo instanceInfo : validInstanceInfos) {
AwsEndpoint endpoint = ResolverUtils.instanceInfoToEndpoint(clientConfig, transportConfig, instanceInfo);
if (endpoint != null) {
result.add(endpoint);
}
}
logger.debug("Retrieved endpoint list {}", result);
return result;
}
}
} catch (Exception e) {
logger.error("Error contacting server for endpoints with vipAddress:{}", vipAddress, e);
} finally {
if (client != null) {
client.shutdown();
}
}
logger.info("Returning empty endpoint list");
return Collections.emptyList();
}
|
@Test
public void testErrorResponseFromRemoteServer() {
when(httpClient.getVip(vipAddress)).thenReturn(EurekaHttpResponse.anEurekaHttpResponse(500, (Applications)null).build());
List<AwsEndpoint> endpoints = resolver.getClusterEndpoints();
assertThat(endpoints.isEmpty(), is(true));
verify(httpClient, times(1)).shutdown();
}
|
public ServiceInfo processServiceInfo(String json) {
ServiceInfo serviceInfo = JacksonUtils.toObj(json, ServiceInfo.class);
serviceInfo.setJsonFromServer(json);
return processServiceInfo(serviceInfo);
}
|
@Test
void testProcessServiceInfo2() {
String json = "{\"groupName\":\"a\",\"name\":\"b\",\"clusters\":\"c\"}";
ServiceInfo actual = holder.processServiceInfo(json);
ServiceInfo expect = new ServiceInfo("a@@b@@c");
expect.setJsonFromServer(json);
assertEquals(expect.getKey(), actual.getKey());
}
|
@Override
public boolean add(E element) {
return add(element, element.hashCode());
}
|
@Test
public void testIsEmpty() {
final OAHashSet<Integer> set = new OAHashSet<>(8);
assertTrue("Set should be empty", set.isEmpty());
set.add(1);
assertFalse("Set should not be empty", set.isEmpty());
}
|
public String doLayout(ILoggingEvent event) {
if (!isStarted()) {
return CoreConstants.EMPTY_STRING;
}
return writeLoopOnConverters(event);
}
|
@Test
public void mdcWithDefaultValue() {
String pattern = "%msg %mdc{foo} %mdc{bar:-[null]}";
pl.setPattern(OptionHelper.substVars(pattern, lc));
pl.start();
MDC.put("foo", "foo");
try {
String val = pl.doLayout(getEventObject());
assertEquals("Some message foo [null]", val);
} finally {
MDC.remove("foo");
}
}
|
@Override
protected void parse(final ProtocolFactory protocols, final Local file) throws AccessDeniedException {
try (final BufferedReader in = new BufferedReader(new InputStreamReader(file.getInputStream(), StandardCharsets.UTF_8))) {
Host current = null;
String line;
while((line = in.readLine()) != null) {
if(line.startsWith("[")) {
if(current != null) {
this.add(current);
}
current = new Host(protocols.forScheme(Scheme.ftp));
current.getCredentials().setUsername(
PreferencesFactory.get().getProperty("connection.login.anon.name"));
Pattern pattern = Pattern.compile("\\[(.*)\\]");
Matcher matcher = pattern.matcher(line);
if(matcher.matches()) {
current.setNickname(matcher.group(1));
}
}
else {
if(null == current) {
log.warn("Failed to detect start of bookmark");
continue;
}
final Scanner scanner = new Scanner(line);
scanner.useDelimiter("=");
if(!scanner.hasNext()) {
log.warn("Missing key in line:" + line);
continue;
}
final String name = scanner.next().toLowerCase(Locale.ROOT);
if(!scanner.hasNext()) {
log.warn("Missing value in line:" + line);
continue;
}
final String value = scanner.next();
switch(name) {
case "host":
current.setHostname(value);
break;
case "directory":
current.setDefaultPath(value);
break;
case "username":
current.getCredentials().setUsername(value);
break;
default:
log.warn(String.format("Ignore property %s", name));
break;
}
}
}
if(current != null) {
this.add(current);
}
}
catch(IOException e) {
throw new AccessDeniedException(e.getMessage(), e);
}
}
|
@Test(expected = AccessDeniedException.class)
public void testParseNotFound() throws Exception {
new TotalCommanderBookmarkCollection().parse(new ProtocolFactory(Collections.emptySet()), new Local(System.getProperty("java.io.tmpdir"), "f"));
}
|
public static <K, V> AsMultimap<K, V> asMultimap() {
return new AsMultimap<>(false);
}
|
@Test
@Category(ValidatesRunner.class)
public void testMultimapAsEntrySetSideInput() {
final PCollectionView<Map<String, Iterable<Integer>>> view =
pipeline
.apply(
"CreateSideInput",
Create.of(KV.of("a", 1), KV.of("a", 1), KV.of("a", 2), KV.of("b", 3)))
.apply(View.asMultimap());
PCollection<KV<String, Integer>> output =
pipeline
.apply("CreateMainInput", Create.of(2 /* size */))
.apply(
"OutputSideInputs",
ParDo.of(
new DoFn<Integer, KV<String, Integer>>() {
@ProcessElement
public void processElement(ProcessContext c) {
assertEquals((int) c.element(), c.sideInput(view).size());
assertEquals((int) c.element(), c.sideInput(view).entrySet().size());
for (Entry<String, Iterable<Integer>> entry :
c.sideInput(view).entrySet()) {
for (Integer value : entry.getValue()) {
c.output(KV.of(entry.getKey(), value));
}
}
}
})
.withSideInputs(view));
PAssert.that(output)
.containsInAnyOrder(KV.of("a", 1), KV.of("a", 1), KV.of("a", 2), KV.of("b", 3));
pipeline.run();
}
|
public String formatSourceAndFixImports(String input) throws FormatterException {
input = ImportOrderer.reorderImports(input, options.style());
input = RemoveUnusedImports.removeUnusedImports(input);
String formatted = formatSource(input);
formatted = StringWrapper.wrap(formatted, this);
return formatted;
}
|
@Test
public void importsFixedIfRequested() throws FormatterException {
String input =
"package com.google.example;\n"
+ UNORDERED_IMPORTS
+ "\npublic class ExampleTest {\n"
+ " @Nullable List<?> xs;\n"
+ "}\n";
String output = new Formatter().formatSourceAndFixImports(input);
String expect =
"package com.google.example;\n\n"
+ "import java.util.List;\n"
+ "import javax.annotation.Nullable;\n\n"
+ "public class ExampleTest {\n"
+ " @Nullable List<?> xs;\n"
+ "}\n";
assertThat(output).isEqualTo(expect);
}
|
@Override
public ClientHttpResponse intercept(HttpRequest request, byte[] body,
ClientHttpRequestExecution execution) throws IOException {
if (traceContext.tracer() != null) {
request = traceContext.tracer().inject(request);
}
ClientHttpResponse response = execution.execute(request, body);
return response;
}
|
@Test
public void testInterceptor() throws IOException {
ClientHttpRequestExecution execution = Mockito.mock(ClientHttpRequestExecution.class);
HttpRequest request = Mockito.mock(HttpRequest.class);
byte[] body = new byte[]{};
ApolloAuditTracer tracer = Mockito.mock(ApolloAuditTracer.class);
HttpRequest mockInjected = Mockito.mock(HttpRequest.class);
Mockito.when(traceContext.tracer()).thenReturn(tracer);
Mockito.when(tracer.inject(Mockito.eq(request)))
.thenReturn(mockInjected);
interceptor.intercept(request, body, execution);
Mockito.verify(execution, Mockito.times(1))
.execute(Mockito.eq(mockInjected), Mockito.eq(body));
}
|
@Nonnull
public static List<JetSqlRow> evaluate(
@Nullable Expression<Boolean> predicate,
@Nullable List<Expression<?>> projection,
@Nonnull Stream<JetSqlRow> rows,
@Nonnull ExpressionEvalContext context
) {
return rows
.map(row -> evaluate(predicate, projection, row, context))
.filter(Objects::nonNull)
.collect(Collectors.toList());
}
|
@Test
public void test_evaluateWithPredicate() {
List<Object[]> rows = asList(new Object[]{0, "a"}, new Object[]{1, "b"}, new Object[]{2, "c"});
Expression<Boolean> predicate = new FunctionalPredicateExpression(row -> {
int value = row.get(0);
return value != 1;
});
List<JetSqlRow> evaluated = ExpressionUtil.evaluate(predicate, null, rows.stream().map(v -> new JetSqlRow(TEST_SS, v)), createExpressionEvalContext());
assertThat(toList(evaluated, JetSqlRow::getValues)).containsExactly(new Object[]{0, "a"}, new Object[]{2, "c"});
}
|
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
try {
if (statement.getStatement() instanceof CreateAsSelect) {
registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement);
} else if (statement.getStatement() instanceof CreateSource) {
registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
// Remove schema id from SessionConfig
return stripSchemaIdConfig(statement);
}
|
@Test
public void shouldPropagateErrorOnFailureToPlanQuery() {
// Given:
givenStatement("CREATE STREAM sink WITH(value_format='AVRO') AS SELECT * FROM SOURCE;");
doThrow(new KsqlException("fail!")).when(executionSandbox).plan(any(), eq(statement));
// When:
final Exception e = assertThrows(
KsqlStatementException.class,
() -> injector.inject(statement)
);
// Then:
assertThat(e.getMessage(), containsString(
"Could not determine output schema for query due to error: fail!"));
}
|
static CommandLineOptions parse(Iterable<String> options) {
CommandLineOptions.Builder optionsBuilder = CommandLineOptions.builder();
List<String> expandedOptions = new ArrayList<>();
expandParamsFiles(options, expandedOptions);
Iterator<String> it = expandedOptions.iterator();
while (it.hasNext()) {
String option = it.next();
if (!option.startsWith("-")) {
optionsBuilder.filesBuilder().add(option).addAll(it);
break;
}
String flag;
String value;
int idx = option.indexOf('=');
if (idx >= 0) {
flag = option.substring(0, idx);
value = option.substring(idx + 1);
} else {
flag = option;
value = null;
}
// NOTE: update usage information in UsageException when new flags are added
switch (flag) {
case "-i":
case "-r":
case "-replace":
case "--replace":
optionsBuilder.inPlace(true);
break;
case "--lines":
case "-lines":
case "--line":
case "-line":
parseRangeSet(optionsBuilder.linesBuilder(), getValue(flag, it, value));
break;
case "--offset":
case "-offset":
optionsBuilder.addOffset(parseInteger(it, flag, value));
break;
case "--length":
case "-length":
optionsBuilder.addLength(parseInteger(it, flag, value));
break;
case "--aosp":
case "-aosp":
case "-a":
optionsBuilder.aosp(true);
break;
case "--version":
case "-version":
case "-v":
optionsBuilder.version(true);
break;
case "--help":
case "-help":
case "-h":
optionsBuilder.help(true);
break;
case "--fix-imports-only":
optionsBuilder.fixImportsOnly(true);
break;
case "--skip-sorting-imports":
optionsBuilder.sortImports(false);
break;
case "--skip-removing-unused-imports":
optionsBuilder.removeUnusedImports(false);
break;
case "--skip-reflowing-long-strings":
optionsBuilder.reflowLongStrings(false);
break;
case "--skip-javadoc-formatting":
optionsBuilder.formatJavadoc(false);
break;
case "-":
optionsBuilder.stdin(true);
break;
case "-n":
case "--dry-run":
optionsBuilder.dryRun(true);
break;
case "--set-exit-if-changed":
optionsBuilder.setExitIfChanged(true);
break;
case "-assume-filename":
case "--assume-filename":
optionsBuilder.assumeFilename(getValue(flag, it, value));
break;
default:
throw new IllegalArgumentException("unexpected flag: " + flag);
}
}
return optionsBuilder.build();
}
|
@Test
public void stdin() {
assertThat(CommandLineOptionsParser.parse(Arrays.asList("-")).stdin()).isTrue();
}
|
public static JsonNode buildTypeSchemaFromJson(String jsonText) throws IOException {
return buildTypeSchemaFromJson(JsonSchemaValidator.getJsonNode(jsonText));
}
|
@Test
void testBuildTypeSchemaFromJson() {
JsonNode jsonNode = null;
ObjectMapper mapper = new ObjectMapper();
String jsonText = "{\"foo\": \"bar\", \"flag\": true, " + "\"list\": [1, 2], " + "\"obj\": {\"fizz\": \"bar\"}, "
+ "\"objList\": [{\"number\": 1}, {\"number\": 2}]}";
String expectedSchema = "{\"type\":\"object\",\"properties\":{\"foo\":{\"type\":\"string\"},\"flag\":{\"type\":\"boolean\"},\"list\":{\"type\":\"array\",\"items\":{\"type\":\"number\"}},\"obj\":{\"type\":\"object\",\"properties\":{\"fizz\":{\"type\":\"string\"}}},\"objList\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"number\":{\"type\":\"number\"}}}}}}";
try {
jsonNode = mapper.readTree(jsonText);
} catch (Exception e) {
fail("Exception should not occur");
}
JsonNode schemaNode = OpenAPISchemaBuilder.buildTypeSchemaFromJson(jsonNode);
try {
String actual = mapper.writeValueAsString(schemaNode);
assertEquals(expectedSchema, actual);
} catch (JsonProcessingException e) {
fail("No exception should be thrown");
}
}
|
@Override
public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) {
return getSqlRecordIteratorBatch(value, descending, null);
}
|
@Test
public void getSqlRecordIteratorBatchLeftExcludedRightIncludedAscending() {
var expectedKeyOrder = List.of(1, 4, 7);
var result = store.getSqlRecordIteratorBatch(0, false, 1, true, false);
assertResult(expectedKeyOrder, result);
}
|
public boolean createJob(BusinessJobConfig jobConfig) {
if (null == jobConfig) {
log.info("参数jobConfig 不能为空!");
return false;
} else if (!StringUtils.isBlank(jobConfig.getServiceName()) && !StringUtils.isBlank(jobConfig.getMethodName()) && !StringUtils.isBlank(jobConfig.getCronExpression())) {
if (null == jobConfig.getParams()) {
jobConfig.setParams(new HashMap());
}
if (this.jobManager.createJob(jobConfig)) {
log.info("创建任务成功 -> 服务名称:" + jobConfig.getServiceName() + " 方法名称:" + jobConfig.getMethodName() + " 表达式:" + jobConfig.getCronExpression() + " id:" + jobConfig.getId());
return true;
} else {
log.info("创建任务失败 -> 任务已存在," + jobConfig.getServiceName() + " 方法名称:" + jobConfig.getMethodName() + " id:" + jobConfig.getId());
return false;
}
} else {
log.info("创建任务失败,由于服务名称、方法名称或表达式为空。服务名称:" + jobConfig.getServiceName() + " 方法名称:" + jobConfig.getMethodName() + " 表达式:" + jobConfig.getCronExpression() + " id:" + jobConfig.getId());
return false;
}
}
|
@Test
public void should_create_job() throws Exception {
BusinessJobConfig jobConfig = new BusinessJobConfig();
jobConfig.setServiceName("testApp");
jobConfig.setMethodName("run");
jobConfig.setId("testApp");
jobConfig.setCronExpression("0/5 * * * * ?");
jobConfig.setStatus("1");
jobConfig.setRunAsAdmin(false);
Map<String, Object> params = new HashMap<>();
params.put("jobName", "testApp");
params.put("messageId", "testApp");
jobConfig.setParams(params);
jobService.createJob(jobConfig);
for (int i = 0; i < 10; i++) {
Thread.sleep(10000);
}
}
|
@Override
public OAuth2AccessTokenDO refreshAccessToken(String refreshToken, String clientId) {
// 查询访问令牌
OAuth2RefreshTokenDO refreshTokenDO = oauth2RefreshTokenMapper.selectByRefreshToken(refreshToken);
if (refreshTokenDO == null) {
throw exception0(GlobalErrorCodeConstants.BAD_REQUEST.getCode(), "无效的刷新令牌");
}
// 校验 Client 匹配
OAuth2ClientDO clientDO = oauth2ClientService.validOAuthClientFromCache(clientId);
if (ObjectUtil.notEqual(clientId, refreshTokenDO.getClientId())) {
throw exception0(GlobalErrorCodeConstants.BAD_REQUEST.getCode(), "刷新令牌的客户端编号不正确");
}
// 移除相关的访问令牌
List<OAuth2AccessTokenDO> accessTokenDOs = oauth2AccessTokenMapper.selectListByRefreshToken(refreshToken);
if (CollUtil.isNotEmpty(accessTokenDOs)) {
oauth2AccessTokenMapper.deleteBatchIds(convertSet(accessTokenDOs, OAuth2AccessTokenDO::getId));
oauth2AccessTokenRedisDAO.deleteList(convertSet(accessTokenDOs, OAuth2AccessTokenDO::getAccessToken));
}
// 已过期的情况下,删除刷新令牌
if (DateUtils.isExpired(refreshTokenDO.getExpiresTime())) {
oauth2RefreshTokenMapper.deleteById(refreshTokenDO.getId());
throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "刷新令牌已过期");
}
// 创建访问令牌
return createOAuth2AccessToken(refreshTokenDO, clientDO);
}
|
@Test
public void testRefreshAccessToken_null() {
// 准备参数
String refreshToken = randomString();
String clientId = randomString();
// mock 方法
// 调用,并断言
assertServiceException(() -> oauth2TokenService.refreshAccessToken(refreshToken, clientId),
new ErrorCode(400, "无效的刷新令牌"));
}
|
public static double shuffleCompressionRatio(
SparkSession spark, FileFormat outputFileFormat, String outputCodec) {
if (outputFileFormat == FileFormat.ORC || outputFileFormat == FileFormat.PARQUET) {
return columnarCompression(shuffleCodec(spark), outputCodec);
} else if (outputFileFormat == FileFormat.AVRO) {
return rowBasedCompression(shuffleCodec(spark), outputCodec);
} else {
return 1.0;
}
}
|
@Test
public void testNullFileCodec() {
configureShuffle("lz4", true);
double ratio1 = shuffleCompressionRatio(PARQUET, null);
assertThat(ratio1).isEqualTo(2.0);
double ratio2 = shuffleCompressionRatio(ORC, null);
assertThat(ratio2).isEqualTo(2.0);
double ratio3 = shuffleCompressionRatio(AVRO, null);
assertThat(ratio3).isEqualTo(1.0);
}
|
protected void validateMessageType(Type expected) {
if (expected != messageType) {
throw new IllegalArgumentException("Message type is expected to be "
+ expected + " but got " + messageType);
}
}
|
@Test(expected = IllegalArgumentException.class)
public void testValidateMessageException() {
RpcMessage msg = getRpcMessage(0, RpcMessage.Type.RPC_CALL);
msg.validateMessageType(RpcMessage.Type.RPC_REPLY);
}
|
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
cachedNanoClock.update(nowNs);
dutyCycleTracker.measureAndUpdate(nowNs);
final int workCount = commandQueue.drain(CommandProxy.RUN_TASK, Configuration.COMMAND_DRAIN_LIMIT);
final long shortSendsBefore = shortSends.get();
final int bytesSent = doSend(nowNs);
int bytesReceived = 0;
if (0 == bytesSent ||
++dutyCycleCounter >= dutyCycleRatio ||
(controlPollDeadlineNs - nowNs < 0) ||
shortSendsBefore < shortSends.get())
{
bytesReceived = controlTransportPoller.pollTransports();
dutyCycleCounter = 0;
controlPollDeadlineNs = nowNs + statusMessageReadTimeoutNs;
}
if (reResolutionCheckIntervalNs > 0 && (reResolutionDeadlineNs - nowNs) < 0)
{
reResolutionDeadlineNs = nowNs + reResolutionCheckIntervalNs;
controlTransportPoller.checkForReResolutions(nowNs, conductorProxy);
}
return workCount + bytesSent + bytesReceived;
}
|
@Test
void shouldNotBeAbleToSendAfterUsingUpYourWindow()
{
final UnsafeBuffer buffer = new UnsafeBuffer(ByteBuffer.allocateDirect(PAYLOAD.length));
buffer.putBytes(0, PAYLOAD);
appendUnfragmentedMessage(rawLog, 0, INITIAL_TERM_ID, 0, headerWriter, buffer, 0, PAYLOAD.length);
final StatusMessageFlyweight msg = mock(StatusMessageFlyweight.class);
when(msg.consumptionTermId()).thenReturn(INITIAL_TERM_ID);
when(msg.consumptionTermOffset()).thenReturn(0);
when(msg.receiverWindowLength()).thenReturn(ALIGNED_FRAME_LENGTH);
publication.onStatusMessage(msg, rcvAddress, mockDriverConductorProxy);
sender.doWork();
assertThat(receivedFrames.size(), is(1));
dataHeader.wrap(new UnsafeBuffer(receivedFrames.remove()));
assertThat(dataHeader.frameLength(), is(FRAME_LENGTH));
assertThat(dataHeader.termId(), is(INITIAL_TERM_ID));
assertThat(dataHeader.streamId(), is(STREAM_ID));
assertThat(dataHeader.sessionId(), is(SESSION_ID));
assertThat(dataHeader.termOffset(), is(offsetOfMessage(1)));
assertThat(dataHeader.headerType(), is(HeaderFlyweight.HDR_TYPE_DATA));
assertThat(dataHeader.flags(), is(DataHeaderFlyweight.BEGIN_AND_END_FLAGS));
assertThat(dataHeader.version(), is((short)HeaderFlyweight.CURRENT_VERSION));
appendUnfragmentedMessage(rawLog, 0, INITIAL_TERM_ID, 0, headerWriter, buffer, 0, PAYLOAD.length);
sender.doWork();
assertThat(receivedFrames.size(), is(0));
}
|
@Override
public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final S3Object object = this.getDetails(file, status);
final DelayedHttpEntityCallable<StorageObject> command = new DelayedHttpEntityCallable<StorageObject>(file) {
@Override
public StorageObject call(final HttpEntity entity) throws BackgroundException {
try {
final RequestEntityRestStorageService client = session.getClient();
final Path bucket = containerService.getContainer(file);
client.putObjectWithRequestEntityImpl(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), object, entity, status.getParameters());
if(log.isDebugEnabled()) {
log.debug(String.format("Saved object %s with checksum %s", file, object.getETag()));
}
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Upload {0} failed", e, file);
}
return object;
}
@Override
public long getContentLength() {
return status.getLength();
}
};
return this.write(file, status, command);
}
|
@Test
public void testWritePublicReadCannedAcl() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final TransferStatus status = new TransferStatus();
final byte[] content = RandomUtils.nextBytes(1033);
status.setChecksum(new SHA256ChecksumCompute().compute(new ByteArrayInputStream(content), status));
status.setLength(content.length);
status.setAcl(Acl.CANNED_PUBLIC_READ);
final HttpResponseOutputStream<StorageObject> out = new S3WriteFeature(session, new S3AccessControlListFeature(session)).write(test, status, new DisabledConnectionCallback());
new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out);
out.close();
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test));
assertTrue(new S3AccessControlListFeature(session)
.getPermission(test).asList().contains(new Acl.UserAndRole(new Acl.GroupUser(Acl.GroupUser.EVERYONE), new Acl.Role(Acl.Role.READ))));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static <T> List<T> list(boolean isLinked) {
return ListUtil.list(isLinked);
}
|
@Test
public void listTest2() {
final List<String> list1 = CollUtil.list(false, "a", "b", "c");
final List<String> list2 = CollUtil.list(true, "a", "b", "c");
assertEquals("[a, b, c]", list1.toString());
assertEquals("[a, b, c]", list2.toString());
}
|
@Override
public StatusOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
if(!session.getClient().setFileType(FTPClient.BINARY_FILE_TYPE)) {
throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString());
}
final OutputStream out = new DataConnectionActionExecutor(session).data(new DataConnectionAction<OutputStream>() {
@Override
public OutputStream execute() throws BackgroundException {
try {
if(status.isAppend()) {
if(!status.isExists()) {
log.warn(String.format("Allocate %d bytes for file %s", status.getOffset(), file));
session.getClient().allocate((int) status.getOffset());
}
return session.getClient().appendFileStream(file.getAbsolute());
}
else {
return session.getClient().storeFileStream(file.getAbsolute());
}
}
catch(IOException e) {
throw new FTPExceptionMappingService().map(e);
}
}
});
return new ReadReplyOutputStream(out, status);
}
catch(IOException e) {
throw new FTPExceptionMappingService().map("Upload {0} failed", e, file);
}
}
|
@Test
public void testWriteContentRange() throws Exception {
final FTPWriteFeature feature = new FTPWriteFeature(session);
final Path test = new Path(new FTPWorkdirService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final byte[] content = RandomUtils.nextBytes(64000);
{
final TransferStatus status = new TransferStatus();
status.setLength(1024L);
final OutputStream out = feature.write(test, status, new DisabledConnectionCallback());
// Write first 1024
new StreamCopier(status, status).withOffset(0L).withLimit(status.getLength()).transfer(new ByteArrayInputStream(content), out);
out.close();
}
assertTrue(new DefaultFindFeature(session).find(test));
assertEquals(1024L, new DefaultAttributesFinderFeature(session).find(test).getSize());
{
// Remaining chunked transfer with offset
final TransferStatus status = new TransferStatus();
status.setLength(content.length - 1024L);
status.setOffset(1024L);
status.setAppend(true);
final OutputStream out = feature.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(content), out);
out.close();
}
final ByteArrayOutputStream out = new ByteArrayOutputStream(content.length);
IOUtils.copy(new FTPReadFeature(session).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()), out);
assertArrayEquals(content, out.toByteArray());
new FTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@VisibleForTesting
static Optional<DoubleRange> calculateRange(Type type, List<HiveColumnStatistics> columnStatistics)
{
if (!isRangeSupported(type)) {
return Optional.empty();
}
return columnStatistics.stream()
.map(statistics -> createRange(type, statistics))
.filter(Optional::isPresent)
.map(Optional::get)
.reduce(DoubleRange::union);
}
|
@Test
public void testCalculateRange()
{
assertEquals(calculateRange(VARCHAR, ImmutableList.of()), Optional.empty());
assertEquals(calculateRange(VARCHAR, ImmutableList.of(integerRange(OptionalLong.empty(), OptionalLong.empty()))), Optional.empty());
assertEquals(calculateRange(VARCHAR, ImmutableList.of(integerRange(1, 2))), Optional.empty());
assertEquals(calculateRange(BIGINT, ImmutableList.of(integerRange(1, 2))), Optional.of(new DoubleRange(1, 2)));
assertEquals(calculateRange(BIGINT, ImmutableList.of(integerRange(Long.MIN_VALUE, Long.MAX_VALUE))), Optional.of(new DoubleRange(Long.MIN_VALUE, Long.MAX_VALUE)));
assertEquals(calculateRange(INTEGER, ImmutableList.of(integerRange(Long.MIN_VALUE, Long.MAX_VALUE))), Optional.of(new DoubleRange(Integer.MIN_VALUE, Integer.MAX_VALUE)));
assertEquals(calculateRange(SMALLINT, ImmutableList.of(integerRange(Long.MIN_VALUE, Long.MAX_VALUE))), Optional.of(new DoubleRange(Short.MIN_VALUE, Short.MAX_VALUE)));
assertEquals(calculateRange(TINYINT, ImmutableList.of(integerRange(Long.MIN_VALUE, Long.MAX_VALUE))), Optional.of(new DoubleRange(Byte.MIN_VALUE, Byte.MAX_VALUE)));
assertEquals(calculateRange(BIGINT, ImmutableList.of(integerRange(1, 5), integerRange(3, 7))), Optional.of(new DoubleRange(1, 7)));
assertEquals(calculateRange(BIGINT, ImmutableList.of(integerRange(OptionalLong.empty(), OptionalLong.empty()), integerRange(3, 7))), Optional.of(new DoubleRange(3, 7)));
assertEquals(calculateRange(BIGINT, ImmutableList.of(integerRange(OptionalLong.empty(), OptionalLong.of(8)), integerRange(3, 7))), Optional.of(new DoubleRange(3, 7)));
assertEquals(calculateRange(DOUBLE, ImmutableList.of(integerRange(1, 2))), Optional.empty());
assertEquals(calculateRange(REAL, ImmutableList.of(integerRange(1, 2))), Optional.empty());
assertEquals(calculateRange(DOUBLE, ImmutableList.of(doubleRange(OptionalDouble.empty(), OptionalDouble.empty()))), Optional.empty());
assertEquals(calculateRange(DOUBLE, ImmutableList.of(doubleRange(0.1, 0.2))), Optional.of(new DoubleRange(0.1, 0.2)));
assertEquals(calculateRange(BIGINT, ImmutableList.of(doubleRange(0.1, 0.2))), Optional.empty());
assertEquals(calculateRange(DOUBLE, ImmutableList.of(doubleRange(0.1, 0.2), doubleRange(0.15, 0.25))), Optional.of(new DoubleRange(0.1, 0.25)));
assertEquals(calculateRange(REAL, ImmutableList.of(doubleRange(0.1, 0.2), doubleRange(0.15, 0.25))), Optional.of(new DoubleRange(0.1, 0.25)));
assertEquals(calculateRange(REAL, ImmutableList.of(doubleRange(OptionalDouble.empty(), OptionalDouble.of(0.2)), doubleRange(0.15, 0.25))), Optional.of(new DoubleRange(0.15, 0.25)));
assertEquals(calculateRange(DOUBLE, ImmutableList.of(doubleRange(NaN, 0.2))), Optional.empty());
assertEquals(calculateRange(DOUBLE, ImmutableList.of(doubleRange(0.1, NaN))), Optional.empty());
assertEquals(calculateRange(DOUBLE, ImmutableList.of(doubleRange(NaN, NaN))), Optional.empty());
assertEquals(calculateRange(DOUBLE, ImmutableList.of(doubleRange(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY))), Optional.of(new DoubleRange(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY)));
assertEquals(calculateRange(REAL, ImmutableList.of(doubleRange(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY))), Optional.of(new DoubleRange(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY)));
assertEquals(calculateRange(DOUBLE, ImmutableList.of(doubleRange(Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY))), Optional.of(new DoubleRange(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY)));
assertEquals(calculateRange(DOUBLE, ImmutableList.of(doubleRange(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY), doubleRange(0.1, 0.2))), Optional.of(new DoubleRange(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY)));
assertEquals(calculateRange(DATE, ImmutableList.of(doubleRange(0.1, 0.2))), Optional.empty());
assertEquals(calculateRange(DATE, ImmutableList.of(dateRange("1970-01-01", "1970-01-02"))), Optional.of(new DoubleRange(0, 1)));
assertEquals(calculateRange(DATE, ImmutableList.of(dateRange(Optional.empty(), Optional.empty()))), Optional.empty());
assertEquals(calculateRange(DATE, ImmutableList.of(dateRange(Optional.of("1970-01-01"), Optional.empty()))), Optional.empty());
assertEquals(calculateRange(DATE, ImmutableList.of(dateRange("1970-01-01", "1970-01-05"), dateRange("1970-01-03", "1970-01-07"))), Optional.of(new DoubleRange(0, 6)));
assertEquals(calculateRange(DECIMAL, ImmutableList.of(doubleRange(0.1, 0.2))), Optional.empty());
assertEquals(calculateRange(DECIMAL, ImmutableList.of(decimalRange(BigDecimal.valueOf(1), BigDecimal.valueOf(5)))), Optional.of(new DoubleRange(1, 5)));
assertEquals(calculateRange(DECIMAL, ImmutableList.of(decimalRange(Optional.empty(), Optional.empty()))), Optional.empty());
assertEquals(calculateRange(DECIMAL, ImmutableList.of(decimalRange(Optional.of(BigDecimal.valueOf(1)), Optional.empty()))), Optional.empty());
assertEquals(calculateRange(DECIMAL, ImmutableList.of(decimalRange(BigDecimal.valueOf(1), BigDecimal.valueOf(5)), decimalRange(BigDecimal.valueOf(3), BigDecimal.valueOf(7)))), Optional.of(new DoubleRange(1, 7)));
}
|
@Override
public boolean rejoinNeededOrPending() {
if (!subscriptions.hasAutoAssignedPartitions())
return false;
// we need to rejoin if we performed the assignment and metadata has changed;
// also for those owned-but-no-longer-existed partitions we should drop them as lost
if (assignmentSnapshot != null && !assignmentSnapshot.matches(metadataSnapshot)) {
final String fullReason = String.format("cached metadata has changed from %s at the beginning of the rebalance to %s",
assignmentSnapshot, metadataSnapshot);
requestRejoinIfNecessary("cached metadata has changed", fullReason);
return true;
}
// we need to join if our subscription has changed since the last join
if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) {
final String fullReason = String.format("subscription has changed from %s at the beginning of the rebalance to %s",
joinedSubscription, subscriptions.subscription());
requestRejoinIfNecessary("subscription has changed", fullReason);
return true;
}
return super.rejoinNeededOrPending();
}
|
@Test
public void testPatternJoinGroupFollower() {
final Set<String> subscription = Utils.mkSet(topic1, topic2);
final List<TopicPartition> owned = Collections.emptyList();
final List<TopicPartition> assigned = Arrays.asList(t1p, t2p);
subscriptions.subscribe(Pattern.compile("test.*"), Optional.of(rebalanceListener));
// partially update the metadata with one topic first,
// let the leader to refresh metadata during assignment
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// normal join group
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) &&
sync.data().generationId() == 1 &&
sync.groupAssignments().isEmpty();
}, syncGroupResponse(assigned, Errors.NONE));
// expect client to force updating the metadata, if yes gives it both topics
client.prepareMetadataUpdate(metadataResponse);
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(assigned.size(), subscriptions.numAssignedPartitions());
assertEquals(subscription, subscriptions.subscription());
assertEquals(0, rebalanceListener.revokedCount);
assertNull(rebalanceListener.revoked);
assertEquals(1, rebalanceListener.assignedCount);
assertEquals(getAdded(owned, assigned), rebalanceListener.assigned);
}
|
public static IntrinsicMapTaskExecutor withSharedCounterSet(
List<Operation> operations,
CounterSet counters,
ExecutionStateTracker executionStateTracker) {
return new IntrinsicMapTaskExecutor(operations, counters, executionStateTracker);
}
|
@Test
public void testExceptionInStartAbortsAllOperations() throws Exception {
Operation o1 = Mockito.mock(Operation.class);
Operation o2 = Mockito.mock(Operation.class);
Operation o3 = Mockito.mock(Operation.class);
Mockito.doThrow(new Exception("in start")).when(o2).start();
ExecutionStateTracker stateTracker = ExecutionStateTracker.newForTest();
try (IntrinsicMapTaskExecutor executor =
IntrinsicMapTaskExecutor.withSharedCounterSet(
Arrays.<Operation>asList(o1, o2, o3), counterSet, stateTracker)) {
executor.execute();
fail("Should have thrown");
} catch (Exception e) {
InOrder inOrder = Mockito.inOrder(o1, o2, o3);
inOrder.verify(o3).start();
inOrder.verify(o2).start();
// Order of abort doesn't matter
Mockito.verify(o1).abort();
Mockito.verify(o2).abort();
Mockito.verify(o3).abort();
Mockito.verifyNoMoreInteractions(o1, o2, o3);
}
}
|
public DecoderResult decode(AztecDetectorResult detectorResult) throws FormatException {
ddata = detectorResult;
BitMatrix matrix = detectorResult.getBits();
boolean[] rawbits = extractBits(matrix);
CorrectedBitsResult correctedBits = correctBits(rawbits);
byte[] rawBytes = convertBoolArrayToByteArray(correctedBits.correctBits);
String result = getEncodedData(correctedBits.correctBits);
DecoderResult decoderResult =
new DecoderResult(rawBytes, result, null, String.format("%d%%", correctedBits.ecLevel));
decoderResult.setNumBits(correctedBits.correctBits.length);
decoderResult.setErrorsCorrected(correctedBits.errorsCorrected);
return decoderResult;
}
|
@Test
public void testAztecResult() throws FormatException {
BitMatrix matrix = BitMatrix.parse(
"X X X X X X X X X X X X X X \n" +
"X X X X X X X X X X X X X X X \n" +
" X X X X X X X X X X X X \n" +
" X X X X X X X X X X \n" +
" X X X X X X X X \n" +
" X X X X X X X X X X X X X X X X X X \n" +
" X X X X X X X X X \n" +
" X X X X X X X X X X X X X X X X X \n" +
" X X X X X X X X X \n" +
" X X X X X X X X X X X X X X X X \n" +
" X X X X X X X X X X X X \n" +
" X X X X X X X X X X X \n" +
" X X X X X X X X X X X X \n" +
" X X X X X X X X X X X X X X X X X \n" +
"X X X X X X X X X X X \n" +
" X X X X X X X X X X X X X X \n" +
" X X X X X X X X \n" +
" X X X X X X X X X X X X X X X X X X X \n" +
"X X X X X X X X X \n" +
"X X X X X X X X X X X X X X X \n" +
"X X X X X X X X X X X X \n" +
"X X X X X X X X X X X X X X \n" +
" X X X X X X X X X X X X X \n",
"X ", " ");
AztecDetectorResult r = new AztecDetectorResult(matrix, NO_POINTS, false, 30, 2);
DecoderResult result = new Decoder().decode(r);
assertEquals("88888TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT", result.getText());
assertArrayEquals(
new byte[] {-11, 85, 85, 117, 107, 90, -42, -75, -83, 107,
90, -42, -75, -83, 107, 90, -42, -75, -83, 107,
90, -42, -80},
result.getRawBytes());
assertEquals(180, result.getNumBits());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.