focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public EntityExcerpt createExcerpt(NotificationDto nativeEntity) {
return EntityExcerpt.builder()
.id(ModelId.of(nativeEntity.id()))
.type(ModelTypes.NOTIFICATION_V1)
.title(nativeEntity.title())
.build();
}
|
@Test
@MongoDBFixtures("NotificationFacadeTest.json")
public void createExcerpt() {
final Optional<NotificationDto> notificationDto = notificationService.get(
"5d4d33753d27460ad18e0c4d");
assertThat(notificationDto).isPresent();
final EntityExcerpt excerpt = facade.createExcerpt(notificationDto.get());
assertThat(excerpt.title()).isEqualTo("title");
assertThat(excerpt.id()).isEqualTo(ModelId.of("5d4d33753d27460ad18e0c4d"));
assertThat(excerpt.type()).isEqualTo(ModelTypes.NOTIFICATION_V1);
}
|
@Override
public void checkBeforeUpdate(final CreateBroadcastTableRuleStatement sqlStatement) {
ShardingSpherePreconditions.checkNotEmpty(database.getResourceMetaData().getStorageUnits(), () -> new EmptyStorageUnitException(database.getName()));
if (!sqlStatement.isIfNotExists()) {
checkDuplicate(sqlStatement);
}
}
|
@Test
void assertCheckSQLStatementWithEmptyStorageUnit() {
BroadcastRuleConfiguration currentConfig = mock(BroadcastRuleConfiguration.class);
when(currentConfig.getTables()).thenReturn(Collections.singleton("t_address"));
ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
when(database.getResourceMetaData().getStorageUnits()).thenReturn(Collections.emptyMap());
executor.setDatabase(database);
BroadcastRule rule = mock(BroadcastRule.class);
when(rule.getConfiguration()).thenReturn(currentConfig);
executor.setRule(rule);
CreateBroadcastTableRuleStatement sqlStatement = new CreateBroadcastTableRuleStatement(false, Collections.singleton("t_address"));
assertThrows(EmptyStorageUnitException.class, () -> executor.checkBeforeUpdate(sqlStatement));
}
|
static void handleJvmOptions(String[] args, String lsJavaOpts) {
final JvmOptionsParser parser = new JvmOptionsParser(args[0]);
final String jvmOpts = args.length == 2 ? args[1] : null;
try {
Optional<Path> jvmOptions = parser.lookupJvmOptionsFile(jvmOpts);
parser.handleJvmOptions(jvmOptions, lsJavaOpts);
} catch (JvmOptionsFileParserException pex) {
System.err.printf(Locale.ROOT,
"encountered [%d] error%s parsing [%s]",
pex.invalidLines().size(),
pex.invalidLines().size() == 1 ? "" : "s",
pex.jvmOptionsFile());
int errorCounter = 0;
for (final Map.Entry<Integer, String> entry : pex.invalidLines().entrySet()) {
errorCounter++;
System.err.printf(Locale.ROOT,
"[%d]: encountered improperly formatted JVM option in [%s] on line number [%d]: [%s]",
errorCounter,
pex.jvmOptionsFile(),
entry.getKey(),
entry.getValue());
}
} catch (IOException ex) {
System.err.println("Error accessing jvm.options file");
System.exit(1);
}
}
|
@Test
public void test_LS_JAVA_OPTS_isUsedWhenNoJvmOptionsIsAvailable() {
JvmOptionsParser.handleJvmOptions(new String[] {temp.toString()}, "-Xblabla");
// Verify
final String output = outputStreamCaptor.toString();
assertTrue("Output MUST contains the options present in LS_JAVA_OPTS", output.contains("-Xblabla"));
}
|
@Override
public void pre(SpanAdapter span, Exchange exchange, Endpoint endpoint) {
super.pre(span, exchange, endpoint);
span.setTag(TagConstants.MESSAGE_BUS_DESTINATION, getDestination(exchange, endpoint));
String messageId = getMessageId(exchange);
if (messageId != null) {
span.setTag(TagConstants.MESSAGE_ID, messageId);
}
}
|
@Test
public void testPreMessageBusDestination() {
Endpoint endpoint = Mockito.mock(Endpoint.class);
Mockito.when(endpoint.getEndpointUri()).thenReturn("jms://MyQueue?hello=world");
SpanDecorator decorator = new AbstractMessagingSpanDecorator() {
@Override
public String getComponent() {
return null;
}
@Override
public String getComponentClassName() {
return null;
}
};
MockSpanAdapter span = new MockSpanAdapter();
decorator.pre(span, null, endpoint);
assertEquals("MyQueue", span.tags().get(TagConstants.MESSAGE_BUS_DESTINATION));
}
|
static TypeName buildTypeName(String typeDeclaration) throws ClassNotFoundException {
return buildTypeName(typeDeclaration, false);
}
|
@Test
public void testBuildTypeName() throws Exception {
assertEquals(buildTypeName("uint256"), (ClassName.get(Uint256.class)));
assertEquals(buildTypeName("uint64"), (ClassName.get(Uint64.class)));
assertEquals(buildTypeName("string"), (ClassName.get(Utf8String.class)));
assertEquals(
buildTypeName("uint256[]"),
(ParameterizedTypeName.get(DynamicArray.class, Uint256.class)));
assertEquals(
buildTypeName("uint256[] storage"),
(ParameterizedTypeName.get(DynamicArray.class, Uint256.class)));
assertEquals(
buildTypeName("uint256[] memory"),
(ParameterizedTypeName.get(DynamicArray.class, Uint256.class)));
assertEquals(
buildTypeName("uint256[10]"),
(ParameterizedTypeName.get(StaticArray10.class, Uint256.class)));
assertEquals(
buildTypeName("uint256[33]"),
(ParameterizedTypeName.get(StaticArray.class, Uint256.class)));
assertEquals(
buildTypeName("uint256[10][3]"),
(ParameterizedTypeName.get(
ClassName.get(StaticArray3.class),
ParameterizedTypeName.get(StaticArray10.class, Uint256.class))));
assertEquals(
buildTypeName("uint256[2][]"),
(ParameterizedTypeName.get(
ClassName.get(DynamicArray.class),
ParameterizedTypeName.get(StaticArray2.class, Uint256.class))));
assertEquals(
buildTypeName("uint256[33][]"),
(ParameterizedTypeName.get(
ClassName.get(DynamicArray.class),
ParameterizedTypeName.get(StaticArray.class, Uint256.class))));
assertEquals(
buildTypeName("uint256[][]"),
(ParameterizedTypeName.get(
ClassName.get(DynamicArray.class),
ParameterizedTypeName.get(DynamicArray.class, Uint256.class))));
}
|
public static String getSystemInformation() {
String ret = System.getProperty("java.vendor");
ret += " " + System.getProperty("java.version");
ret += " on " + System.getProperty("os.name");
ret += " " + System.getProperty("os.version");
return ret;
}
|
@Test
public void testGetSystemInformation() {
String result = Tools.getSystemInformation();
assertTrue(result.trim().length() > 0);
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStartBounds,
final Range<Instant> windowEndBounds,
final Optional<Position> position
) {
try {
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query =
WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper);
StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request =
inStore(stateStore.getStateStoreName()).withQuery(query);
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final KafkaStreams streams = stateStore.getKafkaStreams();
final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result =
streams.query(request);
final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult =
result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
}
if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
}
try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it
= queryResult.getResult()) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next();
final Instant windowStart = Instant.ofEpochMilli(next.key);
if (!windowStartBounds.contains(windowStart)) {
continue;
}
final Instant windowEnd = windowStart.plus(windowSize);
if (!windowEndBounds.contains(windowEnd)) {
continue;
}
final TimeWindow window =
new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
new Windowed<>(key, window),
next.value.value(),
next.value.timestamp()
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIteratorWithPosition(
builder.build().iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
@SuppressWarnings("unchecked")
public void shouldCloseIterator_fetchAll() {
// When:
final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> partitionResult = new StateQueryResult<>();
final QueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> queryResult = QueryResult.forResult(keyValueIterator);
queryResult.setPosition(POSITION);
partitionResult.addResult(PARTITION, queryResult);
when(kafkaStreams.query(any(StateQueryRequest.class))).thenReturn(partitionResult);
when(keyValueIterator.hasNext()).thenReturn(false);
Streams.stream((table.get(PARTITION, WINDOW_START_BOUNDS, WINDOW_END_BOUNDS)
.getRowIterator()))
.collect(Collectors.toList());
// Then:
verify(keyValueIterator).close();
}
|
public static <T> Read<T> readMessage() {
return new AutoValue_JmsIO_Read.Builder<T>()
.setMaxNumRecords(Long.MAX_VALUE)
.setCloseTimeout(DEFAULT_CLOSE_TIMEOUT)
.setRequiresDeduping(false)
.build();
}
|
@Test
public void testReadBytesMessages() throws Exception {
long count = 1L;
// produce message
produceTestMessages(count, JmsIOTest::createBytesMessage);
// read from the queue
PCollection<String> output =
pipeline.apply(
JmsIO.<String>readMessage()
.withConnectionFactory(connectionFactory)
.withQueue(QUEUE)
.withUsername(USERNAME)
.withPassword(PASSWORD)
.withMaxNumRecords(1)
.withCoder(SerializableCoder.of(String.class))
.withMessageMapper(new CommonJms.BytesMessageToStringMessageMapper()));
PAssert.thatSingleton(output.apply("Count", Count.<String>globally())).isEqualTo(count);
pipeline.run();
assertQueueIsEmpty();
}
|
@Override
public void collectSizeStats(StateObjectSizeStatsCollector collector) {
streamSubHandles().forEach(handle -> handle.collectSizeStats(collector));
}
|
@Test
void testCollectSizeStats() {
IncrementalRemoteKeyedStateHandle handle = create(ThreadLocalRandom.current());
StateObject.StateObjectSizeStatsCollector statsCollector =
StateObject.StateObjectSizeStatsCollector.create();
handle.collectSizeStats(statsCollector);
Assertions.assertEquals(
new HashMap<StateObject.StateObjectLocation, Long>() {
{
// Location is LOCAL_MEMORY, even though the handle is called remote because
// we test against a local file system
put(StateObject.StateObjectLocation.LOCAL_MEMORY, handle.getStateSize());
}
},
statsCollector.getStats());
}
|
public static boolean regionMatches(
CharSequence expected, CharSequence input, int beginIndex, int endIndex) {
if (expected == null) throw new NullPointerException("expected == null");
if (input == null) throw new NullPointerException("input == null");
int regionLength = regionLength(input.length(), beginIndex, endIndex);
if (expected.length() > regionLength) return false;
for (int i = 0, inputIndex = beginIndex; i < regionLength; i++, inputIndex++) {
if (expected.charAt(i) != input.charAt(inputIndex)) return false;
}
return true;
}
|
@Test void regionMatches_badParameters() {
assertThatThrownBy(() -> CharSequences.regionMatches(null, "b3", 0, 0))
.isInstanceOf(NullPointerException.class)
.hasMessage("expected == null");
assertThatThrownBy(() -> CharSequences.regionMatches("b3", null, 0, 0))
.isInstanceOf(NullPointerException.class)
.hasMessage("input == null");
assertThatThrownBy(() -> CharSequences.regionMatches("b3", "a", -1, 1))
.isInstanceOf(IndexOutOfBoundsException.class)
.hasMessage("beginIndex < 0");
assertThatThrownBy(() -> CharSequences.regionMatches("b3", "a", 0, -1))
.isInstanceOf(IndexOutOfBoundsException.class)
.hasMessage("endIndex < 0");
assertThatThrownBy(() -> CharSequences.regionMatches("b3", "a", 1, 0))
.isInstanceOf(IndexOutOfBoundsException.class)
.hasMessage("beginIndex > endIndex");
assertThatThrownBy(() -> CharSequences.regionMatches("b3", "a", 0, 2))
.isInstanceOf(IndexOutOfBoundsException.class)
.hasMessage("endIndex > input");
}
|
public ExitStatus(Options options) {
this.options = options;
}
|
@Test
void wip_with_skipped_scenarios() {
createNonWipExitStatus();
bus.send(testCaseFinishedWithStatus(Status.SKIPPED));
assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x0)));
}
|
@Override
public Optional<String> canUpgradeTo(final DataSource other) {
final List<String> issues = PROPERTIES.stream()
.filter(prop -> !prop.isCompatible(this, other))
.map(prop -> getCompatMessage(other, prop))
.collect(Collectors.toList());
checkSchemas(getSchema(), other.getSchema())
.map(s -> getCompatMessage(other, SCHEMA_PROP) + ". (" + s + ")")
.ifPresent(issues::add);
final String err = String.join("\n\tAND ", issues);
return err.isEmpty() ? Optional.empty() : Optional.of(err);
}
|
@Test
public void shouldEnforceSameTopic() {
// Given:
final KsqlStream<String> streamA = new KsqlStream<>(
"sql",
SourceName.of("A"),
SOME_SCHEMA,
Optional.empty(),
true,
topic,
false
);
final KsqlStream<String> streamB = new KsqlStream<>(
"sql",
SourceName.of("A"),
SOME_SCHEMA,
Optional.empty(),
true,
topic2,
false
);
// When:
final Optional<String> err = streamA.canUpgradeTo(streamB);
// Then:
assertThat(err.isPresent(), is(true));
assertThat(err.get(), containsString("has topic = topic which is not upgradeable to topic2"));
}
|
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(
MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
}
|
@SuppressWarnings({"unchecked", "rawtypes"})
@Test
void testMultiDimensionalArray() {
// tuple array
MapFunction<?, ?> function =
new MapFunction<Tuple2<Integer, Double>[][], Tuple2<Integer, Double>[][]>() {
private static final long serialVersionUID = 1L;
@Override
public Tuple2<Integer, Double>[][] map(Tuple2<Integer, Double>[][] value)
throws Exception {
return null;
}
};
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(
(MapFunction) function,
TypeInformation.of(new TypeHint<Tuple2<Integer, Double>[][]>() {}));
assertThat(ti.toString())
.isEqualTo(
"ObjectArrayTypeInfo<ObjectArrayTypeInfo<Java Tuple2<Integer, Double>>>");
// primitive array
function =
new MapFunction<int[][][], int[][][]>() {
private static final long serialVersionUID = 1L;
@Override
public int[][][] map(int[][][] value) throws Exception {
return null;
}
};
ti =
TypeExtractor.getMapReturnTypes(
(MapFunction) function, TypeInformation.of(new TypeHint<int[][][]>() {}));
assertThat(ti).hasToString("ObjectArrayTypeInfo<ObjectArrayTypeInfo<int[]>>");
// basic array
function =
new MapFunction<Integer[][][], Integer[][][]>() {
private static final long serialVersionUID = 1L;
@Override
public Integer[][][] map(Integer[][][] value) throws Exception {
return null;
}
};
ti =
TypeExtractor.getMapReturnTypes(
(MapFunction) function,
TypeInformation.of(new TypeHint<Integer[][][]>() {}));
assertThat(ti.toString())
.isEqualTo("ObjectArrayTypeInfo<ObjectArrayTypeInfo<BasicArrayTypeInfo<Integer>>>");
// pojo array
function =
new MapFunction<CustomType[][][], CustomType[][][]>() {
private static final long serialVersionUID = 1L;
@Override
public CustomType[][][] map(CustomType[][][] value) throws Exception {
return null;
}
};
ti =
TypeExtractor.getMapReturnTypes(
(MapFunction) function,
TypeInformation.of(new TypeHint<CustomType[][][]>() {}));
assertThat(ti.toString())
.isEqualTo(
"ObjectArrayTypeInfo<ObjectArrayTypeInfo<ObjectArrayTypeInfo<"
+ "PojoType<org.apache.flink.api.java.typeutils.TypeExtractorTest$CustomType, fields = [myField1: String, myField2: Integer]>"
+ ">>>");
// generic array
ti =
TypeExtractor.getMapReturnTypes(
(MapFunction) new MapperWithMultiDimGenericArray<String>(),
TypeInformation.of(new TypeHint<String[][][]>() {}));
assertThat(ti.toString())
.isEqualTo(
"ObjectArrayTypeInfo<ObjectArrayTypeInfo<ObjectArrayTypeInfo<Java Tuple1<String>>>>");
}
|
@Override
public void execute(EventNotificationContext ctx) throws EventNotificationException {
final SlackEventNotificationConfig config = (SlackEventNotificationConfig) ctx.notificationConfig();
LOG.debug("SlackEventNotification backlog size in method execute is [{}]", config.backlogSize());
try {
SlackMessage slackMessage = createSlackMessage(ctx, config);
slackClient.send(slackMessage, config.webhookUrl());
} catch (JsonProcessingException ex) {
String errorMessage = String.format(Locale.ENGLISH, "Error serializing Slack message object while sending the SlackEventNotification :: %s", ex.getMessage());
LOG.error(errorMessage, ex);
final Notification systemNotification = notificationService.buildNow()
.addNode(nodeId.getNodeId())
.addType(Notification.Type.GENERIC)
.addSeverity(Notification.Severity.URGENT)
.addDetail("title", "SlackEventNotification Failed")
.addDetail("description", errorMessage);
notificationService.publishIfFirst(systemNotification);
throw new EventNotificationException("There was an error serializing the Slack message object when sending the SlackEventNotification", ex);
} catch (TemporaryEventNotificationException exp) {
//scheduler needs to retry a TemporaryEventNotificationException
throw exp;
} catch (PermanentEventNotificationException exp) {
String errorMessage = String.format(Locale.ENGLISH, "Error sending the SlackEventNotification :: %s", exp.getMessage());
final Notification systemNotification = notificationService.buildNow()
.addNode(nodeId.getNodeId())
.addType(Notification.Type.GENERIC)
.addSeverity(Notification.Severity.URGENT)
.addDetail("title", "SlackEventNotification Failed")
.addDetail("description", errorMessage);
notificationService.publishIfFirst(systemNotification);
throw exp;
} catch (Exception exp) {
throw new EventNotificationException("There was an exception triggering the SlackEventNotification", exp);
}
}
|
@Test(expected = EventNotificationException.class)
public void executeWithNullEventTimerange() throws EventNotificationException {
EventNotificationContext yetAnotherContext = getEventNotificationContextToSimulateNullPointerException();
assertThat(yetAnotherContext.event().timerangeStart().isPresent()).isFalse();
assertThat(yetAnotherContext.event().timerangeEnd().isPresent()).isFalse();
assertThat(yetAnotherContext.notificationConfig().type()).isEqualTo(SlackEventNotificationConfig.TYPE_NAME);
slackEventNotification.execute(yetAnotherContext);
}
|
@VisibleForTesting
String normalizeArchitecture(String architecture) {
// Create mapping based on https://docs.docker.com/engine/install/#supported-platforms
if (architecture.equals("x86_64")) {
return "amd64";
} else if (architecture.equals("aarch64")) {
return "arm64";
}
return architecture;
}
|
@Test
public void testNormalizeArchitecture_arm() {
assertThat(stepsRunner.normalizeArchitecture("arm")).isEqualTo("arm");
}
|
public GrantDTO ensure(GRN grantee, Capability capability, GRN target, String creatorUsername) {
final List<GrantDTO> existingGrants = getForTargetAndGrantee(target, grantee);
if (existingGrants.isEmpty()) {
return create(grantee, capability, target, creatorUsername);
}
// This should never happen
Preconditions.checkState(existingGrants.size() == 1);
final GrantDTO grantDTO = existingGrants.get(0);
// Only upgrade capabilities: VIEW < MANAGE < OWNER
if (capability.priority() > grantDTO.capability().priority()) {
final GrantDTO grantUpdate = grantDTO.toBuilder().capability(capability).build();
return save(grantUpdate);
}
return grantDTO;
}
|
@Test
@MongoDBFixtures("grants.json")
public void ensure() {
final GRN jane = grnRegistry.parse("grn::::user:jane");
final GRN stream1 = grnRegistry.parse("grn::::stream:54e3deadbeefdeadbeef0000");
final GRN newStream = grnRegistry.parse("grn::::stream:54e3deadbeefdeadbeef0888");
// Matches existing grant. Returns original
final GrantDTO stream1Grant = dbService.getForTargetAndGrantee(stream1, jane).get(0);
GrantDTO result = dbService.ensure(jane, Capability.VIEW, stream1, "admin");
assertThat(result).isEqualTo(stream1Grant);
// Updates to a higher capability
result = dbService.ensure(jane, Capability.MANAGE, stream1, "admin");
assertThat(result.capability()).isEqualTo(Capability.MANAGE);
// Don't downgrade to a lower capability
result = dbService.ensure(jane, Capability.VIEW, stream1, "admin");
assertThat(result.capability()).isEqualTo(Capability.MANAGE);
// Create a new grant
assertThat(dbService.ensure(jane, Capability.MANAGE, newStream, "admin")).isNotNull();
assertThat(dbService.getForTarget(newStream)).satisfies(grantDTOS -> {
assertThat(grantDTOS.size()).isEqualTo(1);
assertThat(grantDTOS.get(0).grantee()).isEqualTo(jane);
assertThat(grantDTOS.get(0).capability()).isEqualTo(Capability.MANAGE);
assertThat(grantDTOS.get(0).target()).isEqualTo(newStream);
});
}
|
@Override
public long getReadTimeout() {
return safelyParseLongValue(READ_TIMEOUT_PROPERTY).orElse(DEFAULT_TIMEOUT);
}
|
@Test
@UseDataProvider("notALongPropertyValues")
public void getReadTimeout_returns_10000_when_property_is_not_a_long(String notALong) {
settings.setProperty("sonar.alm.timeout.read", notALong);
assertThat(underTest.getReadTimeout()).isEqualTo(30_000L);
}
|
@Override
public boolean tryProcess() {
return tryProcessInternal(null);
}
|
@Test
public void when_idleTimeout_then_idleMessageAfterTimeout() throws Exception {
// We can't inject MockClock to EventTimeMapper inside the InsertWatermarkP, so we use real time.
// We send no events and expect, that after 100 ms WM will be emitted.
createProcessor(100);
// let's process some event and expect real WM to be emitted
resultToCheck.clear();
long start = System.nanoTime();
doAndDrain(() -> p.tryProcess(0, item(10)));
assertEquals(asList(wm(10 - LAG), item(10)), resultToCheck);
// when no more activity occurs, IDLE_MESSAGE should be emitted again
resultToCheck.clear();
long elapsedMs;
do {
assertTrue(p.tryProcess());
elapsedMs = NANOSECONDS.toMillis(System.nanoTime() - start);
outbox.drainQueueAndReset(0, resultToCheck, false);
if (elapsedMs < 100) {
assertTrue("outbox should be empty, elapsedMs=" + elapsedMs, resultToCheck.isEmpty());
} else if (!resultToCheck.isEmpty()) {
System.out.println("WM emitted after " + elapsedMs + "ms (shortly after 100 was expected)");
assertEquals(singletonList(IDLE_MESSAGE), resultToCheck);
break;
}
LockSupport.parkNanos(MILLISECONDS.toNanos(1));
} while (elapsedMs < 1000);
}
|
public void setAbout(Attribute about) throws BadFieldValueException
{
if (XmpConstants.RDF_NAMESPACE.equals(about.getNamespace())
&& XmpConstants.ABOUT_NAME.equals(about.getName()))
{
setAttribute(about);
return;
}
throw new BadFieldValueException("Attribute 'about' must be named 'rdf:about' or 'about'");
}
|
@Test
void testBadRdfAbout() throws Exception
{
assertThrows(BadFieldValueException.class, () -> {
schem.setAbout(new Attribute(null, "about", ""));
});
}
|
@Override
public KGroupedStream<K, V> groupByKey() {
return groupByKey(Grouped.with(keySerde, valueSerde));
}
|
@Test
public void shouldNotAllowNullGroupedOnGroupByKey() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.groupByKey((Grouped<String, String>) null));
assertThat(exception.getMessage(), equalTo("grouped can't be null"));
}
|
@Override
public List<Node> getChildren()
{
ImmutableList.Builder<Node> result = ImmutableList.<Node>builder()
.add(value)
.add(pattern);
escape.ifPresent(result::add);
return result.build();
}
|
@Test
public void testGetChildren()
{
StringLiteral value = new StringLiteral("a");
StringLiteral pattern = new StringLiteral("b");
StringLiteral escape = new StringLiteral("c");
assertEquals(new LikePredicate(value, pattern, escape).getChildren(), ImmutableList.of(value, pattern, escape));
assertEquals(new LikePredicate(value, pattern, Optional.empty()).getChildren(), ImmutableList.of(value, pattern));
}
|
public SearchJob executeSync(String searchId, SearchUser searchUser, ExecutionState executionState) {
return searchDomain.getForUser(searchId, searchUser)
.map(s -> executeSync(s, searchUser, executionState))
.orElseThrow(() -> new NotFoundException("No search found with id <" + searchId + ">."));
}
|
@Test
public void appliesSearchExecutionState() {
final Search search = makeSearch();
final SearchUser searchUser = TestSearchUser.builder()
.withUser(testUser -> testUser.withUsername("frank-drebin"))
.build();
when(searchDomain.getForUser(eq("search1"), eq(searchUser))).thenReturn(Optional.of(search));
final AbsoluteRange absoluteRange = AbsoluteRange.create("2022-05-18T10:00:00.000Z", "2022-05-19T10:00:00.000Z");
final ExecutionState executionState = ExecutionState.builder()
.setGlobalOverride(ExecutionStateGlobalOverride.builder().timerange(absoluteRange).build())
.build();
this.searchExecutor.executeSync("search1", searchUser, executionState);
verify(queryEngine, times(1)).execute(searchJobCaptor.capture(), anySet(), any());
final SearchJob executedJob = searchJobCaptor.getValue();
assertThat(executedJob.getSearch().queries())
.are(new Condition<>(query -> query.timerange().equals(absoluteRange), "timeranges are applied through execution state"));
}
|
@Override
public SelType call(String methodName, SelType[] args) {
if (args.length == 0) {
if ("getAsText".equals(methodName)) {
return SelString.of(val.getAsText(Locale.US));
} else if ("withMinimumValue".equals(methodName)) {
return SelJodaDateTime.of(val.withMinimumValue());
} else if ("withMaximumValue".equals(methodName)) {
return SelJodaDateTime.of(val.withMaximumValue());
} else if ("get".equals(methodName)) {
return SelLong.of((long) val.get());
}
}
throw new UnsupportedOperationException(
type()
+ " DO NOT support calling method: "
+ methodName
+ " with args: "
+ Arrays.toString(args));
}
|
@Test(expected = UnsupportedOperationException.class)
public void testInvalidCallArg() {
one.call("getAsText", new SelType[] {SelType.NULL});
}
|
@Override
public String getAcknowledgmentType() {
return "CE";
}
|
@Test
public void testGetAcknowledgmentType() {
instance = new MllpCommitErrorAcknowledgementException(HL7_MESSAGE_BYTES, HL7_ACKNOWLEDGEMENT_BYTES, LOG_PHI_TRUE);
assertEquals("CE", instance.getAcknowledgmentType());
}
|
public static Type convertType(TypeInfo typeInfo) {
switch (typeInfo.getOdpsType()) {
case BIGINT:
return Type.BIGINT;
case INT:
return Type.INT;
case SMALLINT:
return Type.SMALLINT;
case TINYINT:
return Type.TINYINT;
case FLOAT:
return Type.FLOAT;
case DECIMAL:
DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale());
case DOUBLE:
return Type.DOUBLE;
case CHAR:
CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo;
return ScalarType.createCharType(charTypeInfo.getLength());
case VARCHAR:
VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo;
return ScalarType.createVarcharType(varcharTypeInfo.getLength());
case STRING:
case JSON:
return ScalarType.createDefaultCatalogString();
case BINARY:
return Type.VARBINARY;
case BOOLEAN:
return Type.BOOLEAN;
case DATE:
return Type.DATE;
case TIMESTAMP:
case DATETIME:
return Type.DATETIME;
case MAP:
MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()),
convertType(mapTypeInfo.getValueTypeInfo()));
case ARRAY:
ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo;
return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo()));
case STRUCT:
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
List<Type> fieldTypeList =
structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType)
.collect(Collectors.toList());
return new StructType(fieldTypeList);
default:
return Type.VARCHAR;
}
}
|
@Test
public void testConvertTypeCaseTimestampAndDatetime() {
TypeInfo typeInfo = TypeInfoFactory.TIMESTAMP;
Type result = EntityConvertUtils.convertType(typeInfo);
assertEquals(Type.DATETIME, result);
}
|
@Override
public void uploadPart(RefCountedFSOutputStream file) throws IOException {
// this is to guarantee that nobody is
// writing to the file we are uploading.
checkState(file.isClosed());
final CompletableFuture<PartETag> future = new CompletableFuture<>();
uploadsInProgress.add(future);
final long partLength = file.getPos();
currentUploadInfo.registerNewPart(partLength);
file.retain(); // keep the file while the async upload still runs
uploadThreadPool.execute(new UploadTask(s3AccessHelper, currentUploadInfo, file, future));
}
|
@Test
public void multiplePartAndObjectUploadsShouldBeIncluded() throws IOException {
final byte[] firstCompletePart = bytesOf("hello world");
final byte[] secondCompletePart = bytesOf("hello again");
final byte[] thirdIncompletePart = bytesOf("!!!");
uploadPart(firstCompletePart);
uploadPart(secondCompletePart);
uploadObject(thirdIncompletePart);
assertThat(
stubMultiPartUploader,
allOf(
hasMultiPartUploadWithPart(1, firstCompletePart),
hasMultiPartUploadWithPart(2, secondCompletePart),
hasUploadedObject(thirdIncompletePart)));
}
|
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
}
|
@Test
public void testPreferredReadReplicaOffsetError() {
buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(),
Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis());
subscriptions.assignFromUser(singleton(tp0));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(1)));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(selected.id(), 1);
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// Return an error, should unset the preferred read replica
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.OFFSET_OUT_OF_RANGE, 100L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.empty()));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(selected.id(), -1);
}
|
public void grant(GrantPrivilegeStmt stmt) throws DdlException {
try {
if (stmt.getRole() != null) {
grantToRole(
stmt.getObjectType(),
stmt.getPrivilegeTypes(),
stmt.getObjectList(),
stmt.isWithGrantOption(),
stmt.getRole());
} else {
grantToUser(
stmt.getObjectType(),
stmt.getPrivilegeTypes(),
stmt.getObjectList(),
stmt.isWithGrantOption(),
stmt.getUserIdentity());
}
} catch (PrivilegeException e) {
throw new DdlException("failed to grant: " + e.getMessage(), e);
}
}
|
@Test
public void testSysTypeError() throws Exception {
GlobalStateMgr masterGlobalStateMgr = ctx.getGlobalStateMgr();
AuthorizationMgr masterManager = masterGlobalStateMgr.getAuthorizationMgr();
UtFrameUtils.PseudoJournalReplayer.resetFollowerJournalQueue();
setCurrentUserAndRoles(ctx, UserIdentity.ROOT);
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(
"create user user_for_system", ctx), ctx);
String sql = "GRANT OPERATE ON SYSTEM TO USER user_for_system";
GrantPrivilegeStmt grantStmt = (GrantPrivilegeStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx);
masterManager.grant(grantStmt);
UtFrameUtils.PseudoImage emptyImage = new UtFrameUtils.PseudoImage();
saveRBACPrivilege(masterGlobalStateMgr, emptyImage.getImageWriter());
loadRBACPrivilege(masterGlobalStateMgr, emptyImage.getJsonReader());
sql = "show grants for user_for_system";
ShowGrantsStmt showStreamLoadStmt = (ShowGrantsStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx);
ShowResultSet resultSet = ShowExecutor.execute(showStreamLoadStmt, ctx);
}
|
@Description("converts the string to upper case")
@ScalarFunction
@LiteralParameters("x")
@SqlType("varchar(x)")
public static Slice upper(@SqlType("varchar(x)") Slice slice)
{
return toUpperCase(slice);
}
|
@Test
public void testUpper()
{
assertFunction("UPPER('')", createVarcharType(0), "");
assertFunction("UPPER('Hello World')", createVarcharType(11), "HELLO WORLD");
assertFunction("UPPER('what!!')", createVarcharType(6), "WHAT!!");
assertFunction("UPPER('\u00D6sterreich')", createVarcharType(10), upperByCodePoint("\u00D6") + "STERREICH");
assertFunction("UPPER('From\uD801\uDC2DTo')", createVarcharType(7), "FROM" + upperByCodePoint("\uD801\uDC2D") + "TO");
assertFunction("CAST(UPPER(utf8(from_hex('CE'))) AS VARBINARY)", VARBINARY, new SqlVarbinary(new byte[] {(byte) 0xCE}));
assertFunction("CAST(UPPER('hello' || utf8(from_hex('CE'))) AS VARBINARY)", VARBINARY, new SqlVarbinary(new byte[] {'H', 'E', 'L', 'L', 'O', (byte) 0xCE}));
assertFunction("CAST(UPPER(utf8(from_hex('CE')) || 'hello') AS VARBINARY)", VARBINARY, new SqlVarbinary(new byte[] {(byte) 0xCE, 'H', 'E', 'L', 'L', 'O'}));
}
|
public static <T extends Throwable> void checkContainsKey(final Map<?, ?> map, final Object key, final Supplier<T> exceptionSupplierIfUnexpected) throws T {
if (!map.containsKey(key)) {
throw exceptionSupplierIfUnexpected.get();
}
}
|
@Test
void assertCheckContainsKeyToThrowsException() {
assertThrows(SQLException.class, () -> ShardingSpherePreconditions.checkContainsKey(Collections.singletonMap("foo", "value"), "bar", SQLException::new));
}
|
public static CharSequence withoutSubSequence(CharSequence input, int beginIndex, int endIndex) {
if (input == null) throw new NullPointerException("input == null");
int length = input.length();
// Exit early if the region is empty or the entire input
int skippedRegionLength = regionLength(length, beginIndex, endIndex);
if (skippedRegionLength == 0) return input;
if (beginIndex == 0 && endIndex == length) return "";
// Exit early if the region ends on a boundary.
// This doesn't use input.subsequence as it might allocate a String
if (beginIndex == 0) return new SubSequence(input, endIndex, length);
if (endIndex == length) return new SubSequence(input, 0, beginIndex);
// Otherwise, the region to skip in the middle
return new WithoutSubSequence(input, 0, beginIndex, endIndex, length);
}
|
@Test void withoutSubSequence_length() {
String input = "b3=1,es=2";
for (CharSequence sequence : asList(
CharSequences.withoutSubSequence(input, 0, 2),
CharSequences.withoutSubSequence(input, 2, 4),
CharSequences.withoutSubSequence(input, 4, 6),
CharSequences.withoutSubSequence(input, 6, 8),
CharSequences.withoutSubSequence(input, 1, 9),
CharSequences.withoutSubSequence(input, 0, 8))) {
String string = sequence.toString(); // we know this is ok as it is tested above
assertThat(sequence.length()).isEqualTo(string.length());
}
}
|
public static <EventT> Write<EventT> write() {
return new AutoValue_JmsIO_Write.Builder<EventT>().build();
}
|
@Test
public void testWriteMessageToStaticTopicWithoutRetryPolicy() throws Exception {
Instant now = Instant.now();
String messageText = now.toString();
List<String> data = Collections.singletonList(messageText);
Connection connection = connectionFactory.createConnection(USERNAME, PASSWORD);
connection.start();
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
MessageConsumer consumer = session.createConsumer(session.createTopic(TOPIC));
WriteJmsResult<String> output =
pipeline
.apply(Create.of(data))
.apply(
JmsIO.<String>write()
.withConnectionFactory(connectionFactory)
.withValueMapper(new TextMessageMapper())
.withTopic(TOPIC)
.withUsername(USERNAME)
.withPassword(PASSWORD));
PAssert.that(output.getFailedMessages()).empty();
pipeline.run();
Message message = consumer.receive(1000);
assertNotNull(message);
assertNull(consumer.receiveNoWait());
}
|
public static NearCacheConfig copyWithInitializedDefaultMaxSizeForOnHeapMaps(NearCacheConfig nearCacheConfig) {
if (nearCacheConfig == null) {
return null;
}
EvictionConfig evictionConfig = nearCacheConfig.getEvictionConfig();
if (nearCacheConfig.getInMemoryFormat() == InMemoryFormat.NATIVE
|| evictionConfig.sizeConfigured) {
return nearCacheConfig;
}
// create copy of eviction config
EvictionConfig copyEvictionConfig = new EvictionConfig(evictionConfig)
.setSize(MapConfig.DEFAULT_MAX_SIZE);
// create copy of nearCache config and set eviction config
return new NearCacheConfig(nearCacheConfig)
.setEvictionConfig(copyEvictionConfig);
}
|
@Test
public void testCopyInitDefaultMaxSizeForOnHeapMaps_whenNull_thenDoNothing() {
NearCacheConfigAccessor.copyWithInitializedDefaultMaxSizeForOnHeapMaps(null);
}
|
public static <T> GoConfigClassLoader<T> classParser(Element e, Class<T> aClass, ConfigCache configCache, GoCipher goCipher, final ConfigElementImplementationRegistry registry, ConfigReferenceElements configReferenceElements) {
return new GoConfigClassLoader<>(e, aClass, configCache, goCipher, registry, configReferenceElements);
}
|
@Test
public void shouldErrorOutWhenConfigClassHasAttributeAwareConfigTagAnnotationButAttributeValueIsNotMatching() {
final Element element = new Element("example");
element.setAttribute("type", "foo-bar");
final GoConfigClassLoader<ConfigWithAttributeAwareConfigTagAnnotation> loader = GoConfigClassLoader.classParser(element, ConfigWithAttributeAwareConfigTagAnnotation.class, configCache, goCipher, registry, referenceElements);
assertThatThrownBy(loader::parse)
.isInstanceOf(RuntimeException.class)
.hasMessageContaining("Unable to determine type to generate. Type: com.thoughtworks.go.config.parser.ConfigWithAttributeAwareConfigTagAnnotation Element: \n" +
"\t<example type=\"foo-bar\" />");
}
|
public boolean contains(String headerName) {
String normalName = HeaderName.normalize(Objects.requireNonNull(headerName, "headerName"));
return findNormal(normalName) != ABSENT;
}
|
@Test
void contains() {
Headers headers = new Headers();
headers.add("Via", "duct");
headers.add("COOKie", "this=that");
headers.add("cookIE", "frizzle=frazzle");
headers.add("Soup", "salad");
assertTrue(headers.contains("CoOkIe"));
assertTrue(headers.contains(new HeaderName("CoOkIe")));
assertTrue(headers.contains("cookie"));
assertTrue(headers.contains(new HeaderName("cookie")));
assertTrue(headers.contains("Cookie"));
assertTrue(headers.contains(new HeaderName("Cookie")));
assertTrue(headers.contains("COOKIE"));
assertTrue(headers.contains(new HeaderName("COOKIE")));
assertFalse(headers.contains("Monkey"));
assertFalse(headers.contains(new HeaderName("Monkey")));
headers.remove("cookie");
assertFalse(headers.contains("cookie"));
assertFalse(headers.contains(new HeaderName("cookie")));
}
|
public static List<String> split(String str, String splitter) {
if (str == null) {
return null;
}
if (StringUtils.isBlank(str)) {
return Collections.EMPTY_LIST;
}
String[] addrArray = str.split(splitter);
return Arrays.asList(addrArray);
}
|
@Test
public void testSplit() {
List<String> list = Arrays.asList("groupA=DENY", "groupB=PUB|SUB", "groupC=SUB");
String comma = ",";
assertEquals(list, UtilAll.split("groupA=DENY,groupB=PUB|SUB,groupC=SUB", comma));
assertEquals(null, UtilAll.split(null, comma));
assertEquals(Collections.EMPTY_LIST, UtilAll.split("", comma));
}
|
protected void removeBadMatches(Dependency dependency) {
final Set<Identifier> toRemove = new HashSet<>();
/* TODO - can we utilize the pom's groupid and artifactId to filter??? most of
* these are due to low quality data. Other idea would be to say any CPE
* found based on LOW confidence evidence should have a different CPE type? (this
* might be a better solution then just removing the URL for "best-guess" matches).
*/
//Set<Evidence> groupId = dependency.getVendorEvidence().getEvidence("pom", "groupid");
//Set<Evidence> artifactId = dependency.getVendorEvidence().getEvidence("pom", "artifactid");
for (Identifier i : dependency.getVulnerableSoftwareIdentifiers()) {
//TODO move this startsWith expression to the base suppression file
if (i instanceof CpeIdentifier) {
final CpeIdentifier cpeId = (CpeIdentifier) i;
final Cpe cpe = cpeId.getCpe();
if ((cpe.getProduct().matches(".*c\\+\\+.*")
|| ("file".equals(cpe.getVendor()) && "file".equals(cpe.getProduct()))
|| ("mozilla".equals(cpe.getVendor()) && "mozilla".equals(cpe.getProduct()))
|| ("cvs".equals(cpe.getVendor()) && "cvs".equals(cpe.getProduct()))
|| ("ftp".equals(cpe.getVendor()) && "ftp".equals(cpe.getProduct()))
|| ("tcp".equals(cpe.getVendor()) && "tcp".equals(cpe.getProduct()))
|| ("ssh".equals(cpe.getVendor()) && "ssh".equals(cpe.getProduct()))
|| ("lookup".equals(cpe.getVendor()) && "lookup".equals(cpe.getProduct())))
&& (dependency.getFileName().toLowerCase().endsWith(".jar")
|| dependency.getFileName().toLowerCase().endsWith("pom.xml")
|| dependency.getFileName().toLowerCase().endsWith(".dll")
|| dependency.getFileName().toLowerCase().endsWith(".exe")
|| dependency.getFileName().toLowerCase().endsWith(".nuspec")
|| dependency.getFileName().toLowerCase().endsWith(".zip")
|| dependency.getFileName().toLowerCase().endsWith(".sar")
|| dependency.getFileName().toLowerCase().endsWith(".apk")
|| dependency.getFileName().toLowerCase().endsWith(".tar")
|| dependency.getFileName().toLowerCase().endsWith(".gz")
|| dependency.getFileName().toLowerCase().endsWith(".tgz")
|| dependency.getFileName().toLowerCase().endsWith(".rpm")
|| dependency.getFileName().toLowerCase().endsWith(".ear")
|| dependency.getFileName().toLowerCase().endsWith(".war"))) {
toRemove.add(i);
} else if ((("jquery".equals(cpe.getVendor()) && "jquery".equals(cpe.getProduct()))
|| ("prototypejs".equals(cpe.getVendor()) && "prototype".equals(cpe.getProduct()))
|| ("yahoo".equals(cpe.getVendor()) && "yui".equals(cpe.getProduct())))
&& (dependency.getFileName().toLowerCase().endsWith(".jar")
|| dependency.getFileName().toLowerCase().endsWith("pom.xml")
|| dependency.getFileName().toLowerCase().endsWith(".dll")
|| dependency.getFileName().toLowerCase().endsWith(".exe"))) {
toRemove.add(i);
} else if ((("microsoft".equals(cpe.getVendor()) && "excel".equals(cpe.getProduct()))
|| ("microsoft".equals(cpe.getVendor()) && "word".equals(cpe.getProduct()))
|| ("microsoft".equals(cpe.getVendor()) && "visio".equals(cpe.getProduct()))
|| ("microsoft".equals(cpe.getVendor()) && "powerpoint".equals(cpe.getProduct()))
|| ("microsoft".equals(cpe.getVendor()) && "office".equals(cpe.getProduct()))
|| ("core_ftp".equals(cpe.getVendor()) && "core_ftp".equals(cpe.getProduct())))
&& (dependency.getFileName().toLowerCase().endsWith(".jar")
|| dependency.getFileName().toLowerCase().endsWith(".ear")
|| dependency.getFileName().toLowerCase().endsWith(".war")
|| dependency.getFileName().toLowerCase().endsWith("pom.xml"))) {
toRemove.add(i);
} else if (("apache".equals(cpe.getVendor()) && "maven".equals(cpe.getProduct()))
&& !dependency.getFileName().toLowerCase().matches("maven-core-[\\d.]+\\.jar")) {
toRemove.add(i);
} else if (("m-core".equals(cpe.getVendor()) && "m-core".equals(cpe.getProduct()))) {
boolean found = false;
for (Evidence e : dependency.getEvidence(EvidenceType.PRODUCT)) {
if ("m-core".equalsIgnoreCase(e.getValue())) {
found = true;
break;
}
}
if (!found) {
for (Evidence e : dependency.getEvidence(EvidenceType.VENDOR)) {
if ("m-core".equalsIgnoreCase(e.getValue())) {
found = true;
break;
}
}
}
if (!found) {
toRemove.add(i);
}
} else if (("jboss".equals(cpe.getVendor()) && "jboss".equals(cpe.getProduct()))
&& !dependency.getFileName().toLowerCase().matches("jboss-?[\\d.-]+(GA)?\\.jar")) {
toRemove.add(i);
} else if ("java-websocket_project".equals(cpe.getVendor())
&& "java-websocket".equals(cpe.getProduct())) {
boolean found = false;
for (Identifier si : dependency.getSoftwareIdentifiers()) {
if (si.getValue().toLowerCase().contains("org.java-websocket/java-websocket")) {
found = true;
break;
}
}
if (!found) {
toRemove.add(i);
}
}
}
}
toRemove.forEach(dependency::removeVulnerableSoftwareIdentifier);
}
|
@Test
public void testRemoveBadMatches() throws Exception {
Dependency dependency = new Dependency();
dependency.setFileName("some.jar");
dependency.setFilePath("some.jar");
Cpe cpe = builder.part(Part.APPLICATION).vendor("m-core").product("m-core").build();
CpeIdentifier id = new CpeIdentifier(cpe, Confidence.HIGHEST);
dependency.addVulnerableSoftwareIdentifier(id);
assertEquals(1, dependency.getVulnerableSoftwareIdentifiers().size());
FalsePositiveAnalyzer instance = new FalsePositiveAnalyzer();
instance.removeBadMatches(dependency);
assertEquals(0, dependency.getVulnerableSoftwareIdentifiers().size());
dependency.addVulnerableSoftwareIdentifier(id);
dependency.addEvidence(EvidenceType.PRODUCT, "test", "name", "m-core", Confidence.HIGHEST);
instance.removeBadMatches(dependency);
assertEquals(1, dependency.getVulnerableSoftwareIdentifiers().size());
}
|
public static void saveExistingErrors(
final File markFile,
final AtomicBuffer errorBuffer,
final PrintStream logger,
final String errorFilePrefix)
{
try
{
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final int observations = printErrorLog(errorBuffer, new PrintStream(baos, false, "US-ASCII"));
if (observations > 0)
{
final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss-SSSZ");
final File errorLogFile = new File(
markFile.getParentFile(), errorFilePrefix + '-' + dateFormat.format(new Date()) + "-error.log");
if (null != logger)
{
logger.println("WARNING: existing errors saved to: " + errorLogFile);
}
try (FileOutputStream out = new FileOutputStream(errorLogFile))
{
baos.writeTo(out);
}
}
}
catch (final Exception ex)
{
LangUtil.rethrowUnchecked(ex);
}
}
|
@Test
void saveExistingErrorsCreatesErrorFileInTheSameDirectoryAsTheCorrespondingMarkFile()
{
final File markFile = tempDir.resolve("mark.dat").toFile();
final DistinctErrorLog errorLog =
new DistinctErrorLog(new UnsafeBuffer(allocateDirect(10 * 1024)), SystemEpochClock.INSTANCE);
assertTrue(errorLog.record(new Exception("Just to test")));
final PrintStream logger = mock(PrintStream.class);
final String errorFilePrefix = "my-file-";
CommonContext.saveExistingErrors(markFile, errorLog.buffer(), logger, errorFilePrefix);
final File[] files = tempDir.toFile().listFiles(
(dir, name) -> name.endsWith("-error.log") && name.startsWith(errorFilePrefix));
assertNotNull(files);
assertEquals(1, files.length);
verify(logger).println(and(startsWith("WARNING: existing errors saved to: "), endsWith("-error.log")));
verifyNoMoreInteractions(logger);
}
|
public static Format of(final FormatInfo formatInfo) {
final Format format = fromName(formatInfo.getFormat().toUpperCase());
format.validateProperties(formatInfo.getProperties());
return format;
}
|
@Test
public void shouldCreateFromString() {
assertThat(FormatFactory.of(FormatInfo.of("JsoN")), is(FormatFactory.JSON));
assertThat(FormatFactory.of(FormatInfo.of("AvRo")), is(FormatFactory.AVRO));
assertThat(FormatFactory.of(FormatInfo.of("Delimited")), is(FormatFactory.DELIMITED));
assertThat(FormatFactory.of(FormatInfo.of("PrOtObUf")), is(FormatFactory.PROTOBUF));
assertThat(FormatFactory.of(FormatInfo.of("PrOtObUf_nOsR")), is(FormatFactory.PROTOBUF_NOSR));
}
|
public static <T> TransportResponse<T> success(T response)
{
return new TransportResponseImpl<>(response, null, new TreeMap<>(String.CASE_INSENSITIVE_ORDER));
}
|
@Test
public void testSuccessResponse()
{
doTestSuccessResponse(TransportResponseImpl.success(RESPONSE));
doTestSuccessResponse(TransportResponseImpl.success(RESPONSE, CASE_SENSITIVE_WIRE_ATTRIBUTES));
}
|
public long floorFrameTs(long timestamp) {
return subtractClamped(timestamp, floorMod(
(timestamp >= Long.MIN_VALUE + frameOffset ? timestamp : timestamp + frameSize) - frameOffset,
frameSize
));
}
|
@Test
public void when_floorOutOfRange_then_minValue() {
definition = new SlidingWindowPolicy(4, 3, 10);
assertEquals(Long.MIN_VALUE, definition.floorFrameTs(Long.MIN_VALUE + 2));
assertEquals(Long.MAX_VALUE, definition.floorFrameTs(Long.MAX_VALUE));
}
|
@SuppressWarnings("unchecked")
public <T> T convert(DocString docString, Type targetType) {
if (DocString.class.equals(targetType)) {
return (T) docString;
}
List<DocStringType> docStringTypes = docStringTypeRegistry.lookup(docString.getContentType(), targetType);
if (docStringTypes.isEmpty()) {
if (docString.getContentType() == null) {
throw new CucumberDocStringException(format(
"It appears you did not register docstring type for %s",
targetType.getTypeName()));
}
throw new CucumberDocStringException(format(
"It appears you did not register docstring type for '%s' or %s",
docString.getContentType(),
targetType.getTypeName()));
}
if (docStringTypes.size() > 1) {
List<String> suggestedContentTypes = suggestedContentTypes(docStringTypes);
if (docString.getContentType() == null) {
throw new CucumberDocStringException(format(
"Multiple converters found for type %s, add one of the following content types to your docstring %s",
targetType.getTypeName(),
suggestedContentTypes));
}
throw new CucumberDocStringException(format(
"Multiple converters found for type %s, and the content type '%s' did not match any of the registered types %s. Change the content type of the docstring or register a docstring type for '%s'",
targetType.getTypeName(),
docString.getContentType(),
suggestedContentTypes,
docString.getContentType()));
}
return (T) docStringTypes.get(0).transform(docString.getContent());
}
|
@Test
void throws_when_no_converter_available_for_type() {
DocString docString = DocString.create("{\"hello\":\"world\"}");
CucumberDocStringException exception = assertThrows(
CucumberDocStringException.class,
() -> converter.convert(docString, JsonNode.class));
assertThat(exception.getMessage(), is("" +
"It appears you did not register docstring type for com.fasterxml.jackson.databind.JsonNode"));
}
|
@Override
public Set<DiscreteResource> values() {
return map.values().stream()
.flatMap(x -> x.values(parent.id()).stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
}
|
@Test
public void testValues() {
DiscreteResource res1 = Resources.discrete(DeviceId.deviceId("a"), PortNumber.portNumber(1)).resource();
DiscreteResource res2 = Resources.discrete(DeviceId.deviceId("a"), PortNumber.portNumber(2)).resource();
DiscreteResources sut = EncodableDiscreteResources.of(ImmutableSet.of(res1, res2));
assertThat(sut.values(), is(ImmutableSet.of(res1, res2)));
}
|
@SuppressWarnings("ConstantConditions")
public boolean replaceActions(@NonNull Class<? extends Action> clazz, @NonNull Action a) {
if (clazz == null) {
throw new IllegalArgumentException("Action type must be non-null");
}
if (a == null) {
throw new IllegalArgumentException("Action must be non-null");
}
// CopyOnWriteArrayList does not support Iterator.remove, so need to do it this way:
List<Action> old = new ArrayList<>();
List<Action> current = getActions();
boolean found = false;
for (Action a1 : current) {
if (!found) {
if (a.equals(a1)) {
found = true;
} else if (clazz.isInstance(a1)) {
old.add(a1);
}
} else if (clazz.isInstance(a1) && !a.equals(a1)) {
old.add(a1);
}
}
current.removeAll(old);
if (!found) {
addAction(a);
}
return !(old.isEmpty() && found);
}
|
@SuppressWarnings("deprecation")
@Test
public void replaceActions() {
CauseAction a1 = new CauseAction();
ParametersAction a2 = new ParametersAction();
thing.addAction(a1);
thing.addAction(a2);
CauseAction a3 = new CauseAction();
assertTrue(thing.replaceActions(CauseAction.class, a3));
assertEquals(Arrays.asList(a2, a3), thing.getActions());
assertFalse(thing.replaceActions(CauseAction.class, a3));
assertEquals(Arrays.asList(a2, a3), thing.getActions());
}
|
@Override
public ResourceAllocationResult tryFulfillRequirements(
Map<JobID, Collection<ResourceRequirement>> missingResources,
TaskManagerResourceInfoProvider taskManagerResourceInfoProvider,
BlockedTaskManagerChecker blockedTaskManagerChecker) {
final ResourceAllocationResult.Builder resultBuilder = ResourceAllocationResult.builder();
final List<InternalResourceInfo> registeredResources =
getAvailableResources(
taskManagerResourceInfoProvider, resultBuilder, blockedTaskManagerChecker);
final List<InternalResourceInfo> pendingResources =
getPendingResources(taskManagerResourceInfoProvider, resultBuilder);
ResourceProfile totalCurrentResources =
Stream.concat(registeredResources.stream(), pendingResources.stream())
.map(internalResourceInfo -> internalResourceInfo.totalProfile)
.reduce(ResourceProfile.ZERO, ResourceProfile::merge);
for (Map.Entry<JobID, Collection<ResourceRequirement>> resourceRequirements :
missingResources.entrySet()) {
final JobID jobId = resourceRequirements.getKey();
final Collection<ResourceRequirement> unfulfilledJobRequirements =
tryFulfillRequirementsForJobWithResources(
jobId, resourceRequirements.getValue(), registeredResources);
if (!unfulfilledJobRequirements.isEmpty()) {
totalCurrentResources =
totalCurrentResources.merge(
tryFulfillRequirementsForJobWithPendingResources(
jobId,
unfulfilledJobRequirements,
pendingResources,
resultBuilder));
}
}
// Unlike tryFulfillRequirementsForJobWithPendingResources, which updates pendingResources
// to the latest state after a new PendingTaskManager is created,
// tryFulFillRequiredResources will not update pendingResources even after new
// PendingTaskManagers are created.
// This is because the pendingResources are no longer needed afterward.
tryFulFillRequiredResources(
registeredResources, pendingResources, totalCurrentResources, resultBuilder);
return resultBuilder.build();
}
|
@Test
void testUnfulfillableRequirement() {
final TaskManagerInfo taskManager =
new TestingTaskManagerInfo(
DEFAULT_SLOT_RESOURCE.multiply(NUM_OF_SLOTS),
DEFAULT_SLOT_RESOURCE.multiply(NUM_OF_SLOTS),
DEFAULT_SLOT_RESOURCE);
final JobID jobId = new JobID();
final List<ResourceRequirement> requirements = new ArrayList<>();
final ResourceProfile unfulfillableResource = DEFAULT_SLOT_RESOURCE.multiply(8);
final TaskManagerResourceInfoProvider taskManagerResourceInfoProvider =
TestingTaskManagerResourceInfoProvider.newBuilder()
.setRegisteredTaskManagersSupplier(() -> Collections.singleton(taskManager))
.build();
requirements.add(ResourceRequirement.create(unfulfillableResource, 1));
final ResourceAllocationResult result =
ANY_MATCHING_STRATEGY.tryFulfillRequirements(
Collections.singletonMap(jobId, requirements),
taskManagerResourceInfoProvider,
resourceID -> false);
assertThat(result.getUnfulfillableJobs()).containsExactly(jobId);
assertThat(result.getPendingTaskManagersToAllocate()).isEmpty();
}
|
public static int getTag(byte[] raw) {
try (final Asn1InputStream is = new Asn1InputStream(raw)) {
return is.readTag();
}
}
|
@Test
public void getTagSingleByte() {
assertEquals(0x30, Asn1Utils.getTag(new byte[] { 0x30, 0}));
}
|
@Override
public void execute(final ConnectionSession connectionSession) throws SQLException {
Map<String, String> sessionVariables = extractSessionVariables();
validateSessionVariables(sessionVariables.keySet());
new CharsetSetExecutor(databaseType, connectionSession).set(sessionVariables);
new SessionVariableRecordExecutor(databaseType, connectionSession).recordVariable(sessionVariables);
executeSetGlobalVariablesIfPresent(connectionSession);
}
|
@Test
void assertExecute() throws SQLException {
SetStatement setStatement = prepareSetStatement();
MySQLSetVariableAdminExecutor executor = new MySQLSetVariableAdminExecutor(setStatement);
ConnectionSession connectionSession = mock(ConnectionSession.class);
when(connectionSession.getAttributeMap()).thenReturn(new DefaultAttributeMap());
when(connectionSession.getUsedDatabaseName()).thenReturn("foo_db");
ConnectionContext connectionContext = mockConnectionContext();
when(connectionSession.getConnectionContext()).thenReturn(connectionContext);
ProxyDatabaseConnectionManager databaseConnectionManager = mock(ProxyDatabaseConnectionManager.class);
when(connectionSession.getDatabaseConnectionManager()).thenReturn(databaseConnectionManager);
ContextManager contextManager = mockContextManager();
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
try (MockedConstruction<DatabaseConnector> mockConstruction = mockConstruction(DatabaseConnector.class)) {
executor.execute(connectionSession);
verify(mockConstruction.constructed().get(0)).execute();
}
assertThat(connectionSession.getAttributeMap().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get(), is(StandardCharsets.UTF_8));
}
|
@Override
public SchemaResult getValueSchema(
final Optional<String> topicName,
final Optional<Integer> schemaId,
final FormatInfo expectedFormat,
final SerdeFeatures serdeFeatures
) {
return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, false);
}
|
@Test
public void shouldPassRightSchemaToFormat() {
// When:
supplier.getValueSchema(Optional.of(TOPIC_NAME),
Optional.empty(), expectedFormat, SerdeFeatures.of());
// Then:
verify(format).getSchemaTranslator(formatProperties);
verify(schemaTranslator).toColumns(parsedSchema, SerdeFeatures.of(), false);
}
|
@Override
public void trash(final Local file) throws LocalAccessDeniedException {
if(log.isDebugEnabled()) {
log.debug(String.format("Move %s to Trash", file));
}
final ObjCObjectByReference error = new ObjCObjectByReference();
if(!NSFileManager.defaultManager().trashItemAtURL_resultingItemURL_error(
NSURL.fileURLWithPath(file.getAbsolute()), null, error)) {
final NSError f = error.getValueAs(NSError.class);
if(null == f) {
throw new LocalAccessDeniedException(file.getAbsolute());
}
throw new LocalAccessDeniedException(String.format("%s", f.localizedDescription()));
}
}
|
@Test
public void testTrash() throws Exception {
Local l = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
new DefaultLocalTouchFeature().touch(l);
assertTrue(l.exists());
new FileManagerTrashFeature().trash(l);
assertFalse(l.exists());
}
|
@Override
public <T> int score(List<T> left, List<T> right) {
if (left.isEmpty() && right.isEmpty()) {
return 0;
}
int distance = levenshteinDistance(left, right);
return (int) (100 * (1.0 - ((double) distance) / (max(left.size(), right.size()))));
}
|
@Test
public void finding_threshold_in_line_count_to_go_below_85_score() {
assertThat(underTest.score(listOf(100), listOf(115))).isEqualTo(86);
assertThat(underTest.score(listOf(100), listOf(116))).isEqualTo(86);
assertThat(underTest.score(listOf(100), listOf(117))).isEqualTo(85); // 85.47% - 117%
assertThat(underTest.score(listOf(100), listOf(118))).isEqualTo(84); // 84.74% - 118%
assertThat(underTest.score(listOf(50), listOf(58))).isEqualTo(86); // 86.20% - 116%
assertThat(underTest.score(listOf(50), listOf(59))).isEqualTo(84); // 84.74% - 118%
assertThat(underTest.score(listOf(25), listOf(29))).isEqualTo(86); // 86.20% - 116%
assertThat(underTest.score(listOf(25), listOf(30))).isEqualTo(83); // 83.33% - 120%
assertThat(underTest.score(listOf(12), listOf(14))).isEqualTo(85); // 85.71% - 116.67%
assertThat(underTest.score(listOf(12), listOf(15))).isEqualTo(80); // 80.00% - 125%
assertThat(underTest.score(listOf(10), listOf(11))).isEqualTo(90); // 90.90% - 110%
assertThat(underTest.score(listOf(10), listOf(12))).isEqualTo(83); // 83.33% - 120%
assertThat(underTest.score(listOf(5), listOf(5))).isEqualTo(100); // 100% - 100%
assertThat(underTest.score(listOf(5), listOf(6))).isEqualTo(83); // 83.33% - 120%
assertThat(underTest.score(listOf(200), listOf(234))).isEqualTo(85); // 85.47% - 117%
assertThat(underTest.score(listOf(200), listOf(236))).isEqualTo(84); // 84.75% - 118%
assertThat(underTest.score(listOf(300), listOf(352))).isEqualTo(85); // 85.23% - 117.33%
assertThat(underTest.score(listOf(300), listOf(354))).isEqualTo(84); // 84.74% - 118%
assertThat(underTest.score(listOf(400), listOf(470))).isEqualTo(85); // 85.10% - 117.50%
assertThat(underTest.score(listOf(400), listOf(471))).isEqualTo(84); // 84.92% - 117.75%
assertThat(underTest.score(listOf(500), listOf(588))).isEqualTo(85); // 85.03% - 117.60%
assertThat(underTest.score(listOf(500), listOf(589))).isEqualTo(84); // 84.88% - 117.80%
}
|
@Override
public boolean checkCredentials(String username, String password) {
if (username == null || password == null) {
return false;
}
Credentials credentials = new Credentials(username, password);
if (validCredentialsCache.contains(credentials)) {
return true;
} else if (invalidCredentialsCache.contains(credentials)) {
return false;
}
boolean isValid =
this.username.equals(username)
&& this.passwordHash.equals(
generatePasswordHash(
algorithm, salt, iterations, keyLength, password));
if (isValid) {
validCredentialsCache.add(credentials);
} else {
invalidCredentialsCache.add(credentials);
}
return isValid;
}
|
@Test
public void testPBKDF2WithHmacSHA256_lowerCaseWithoutColon() throws Exception {
String algorithm = "PBKDF2WithHmacSHA256";
int iterations = 1000;
int keyLength = 128;
String hash =
"B6:9C:5C:8A:10:3E:41:7B:BA:18:FC:E1:F2:0C:BC:D9:65:70:D3:53:AB:97:EE:2F:3F:A8:88:AF:43:EA:E6:D7:FB"
+ ":70:14:23:F9:51:29:5C:3A:9F:65:C3:20:EE:09:C9:C6:8A:B7:D3:0A:E1:F3:10:2B:9B:36:3F:1F:B6:1D:52:A7"
+ ":9C:CB:AD:55:25:46:C5:73:09:6C:38:9C:F2:FD:82:7F:90:E5:31:EF:7E:3E:6B:B2:0C:38:77:23:EC:3A:CF:29"
+ ":F7:E5:4D:4E:CC:35:7A:C2:E5:CB:E3:B3:E5:09:2B:CC:B9:40:26:A4:28:E9:5F:2D:18:B2:14:41:E7:4D:5B";
hash = hash.toLowerCase().replace(":", "");
PBKDF2Authenticator PBKDF2Authenticator =
new PBKDF2Authenticator(
"/", VALID_USERNAME, hash, algorithm, SALT, iterations, keyLength);
for (String username : TEST_USERNAMES) {
for (String password : TEST_PASSWORDS) {
boolean expectedIsAuthenticated =
VALID_USERNAME.equals(username) && VALID_PASSWORD.equals(password);
boolean actualIsAuthenticated =
PBKDF2Authenticator.checkCredentials(username, password);
assertEquals(expectedIsAuthenticated, actualIsAuthenticated);
}
}
}
|
public static long retainToDay4MinuteBucket(long minuteTimeBucket) {
if (isMinuteBucket(minuteTimeBucket)) {
return minuteTimeBucket / 10000 * 10000;
} else {
throw new UnexpectedException("Current time bucket is not a minute time bucket");
}
}
|
@Test
public void testRetainToDay4MinuteBucket() {
Assertions.assertEquals(202407110000L, TimeBucket.retainToDay4MinuteBucket(202407112218L));
}
|
@Override
public void runMigrations() {
createMigrationsIndexIfNotExists();
super.runMigrations();
}
|
@Test
void testValidateIndicesWithIndexPrefix() {
ElasticSearchDBCreator elasticSearchDBCreator = new ElasticSearchDBCreator(elasticSearchStorageProviderMock, elasticSearchClient(), "my_index_prefix_");
assertThatThrownBy(elasticSearchDBCreator::validateIndices)
.isInstanceOf(JobRunrException.class)
.hasMessage("Not all required indices are available by JobRunr!");
elasticSearchDBCreator.runMigrations();
assertThatCode(elasticSearchDBCreator::validateIndices).doesNotThrowAnyException();
}
|
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
if(0L == status.getLength()) {
return new NullInputStream(0L);
}
final Storage.Objects.Get request = session.getClient().objects().get(
containerService.getContainer(file).getName(), containerService.getKey(file));
if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) {
request.setUserProject(session.getHost().getCredentials().getUsername());
}
final VersioningConfiguration versioning = null != session.getFeature(Versioning.class) ? session.getFeature(Versioning.class).getConfiguration(
containerService.getContainer(file)
) : VersioningConfiguration.empty();
if(versioning.isEnabled()) {
if(StringUtils.isNotBlank(file.attributes().getVersionId())) {
request.setGeneration(Long.parseLong(file.attributes().getVersionId()));
}
}
if(status.isAppend()) {
final HttpRange range = HttpRange.withStatus(status);
final String header;
if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) {
header = String.format("bytes=%d-", range.getStart());
}
else {
header = String.format("bytes=%d-%d", range.getStart(), range.getEnd());
}
if(log.isDebugEnabled()) {
log.debug(String.format("Add range header %s for file %s", header, file));
}
final HttpHeaders headers = request.getRequestHeaders();
headers.setRange(header);
// Disable compression
headers.setAcceptEncoding("identity");
}
return request.executeMediaAsInputStream();
}
catch(IOException e) {
throw new GoogleStorageExceptionMappingService().map("Download {0} failed", e, file);
}
}
|
@Test
public void testReadEmpty() throws Exception {
final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path directory = new GoogleStorageDirectoryFeature(session).mkdir(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path file = new GoogleStorageTouchFeature(session).touch(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertEquals(0, new GoogleStorageAttributesFinderFeature(session).find(file).getSize());
final CountingInputStream in = new CountingInputStream(new GoogleStorageReadFeature(session).read(file, new TransferStatus(), new DisabledConnectionCallback()));
in.close();
assertEquals(0L, in.getByteCount(), 0L);
new GoogleStorageDeleteFeature(session).delete(Arrays.asList(file, directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static BsonTimestamp decodeTimestamp(BsonDocument resumeToken) {
BsonValue bsonValue =
Objects.requireNonNull(resumeToken, "Missing ResumeToken.").get(DATA_FIELD);
final byte[] keyStringBytes;
// Resume Tokens format: https://www.mongodb.com/docs/manual/changeStreams/#resume-tokens
if (bsonValue.isBinary()) { // BinData
keyStringBytes = bsonValue.asBinary().getData();
} else if (bsonValue.isString()) { // Hex-encoded string (v0 or v1)
keyStringBytes = hexToUint8Array(bsonValue.asString().getValue());
} else {
throw new IllegalArgumentException(
"Unknown resume token format: " + resumeToken.toJson());
}
ByteBuffer buffer = ByteBuffer.wrap(keyStringBytes).order(ByteOrder.BIG_ENDIAN);
int kType = buffer.get() & 0xff;
if (kType != K_TIMESTAMP) {
throw new IllegalArgumentException("Unknown keyType of timestamp: " + kType);
}
int t = buffer.getInt();
int i = buffer.getInt();
return new BsonTimestamp(t, i);
}
|
@Test
public void testDecodeHexFormatV0() {
BsonDocument resumeToken =
BsonDocument.parse(
" {\"_data\": \"826357B0840000000129295A1004461ECCED47A6420D9713A5135650360746645F696400646357B05F35C6AE07E1E6C7390004\"}");
BsonTimestamp expected = new BsonTimestamp(1666691204, 1);
BsonTimestamp actual = ResumeTokenUtils.decodeTimestamp(resumeToken);
assertEquals(expected, actual);
}
|
public ImmutableList<PluginMatchingResult<PortScanner>> getPortScanners() {
return tsunamiPlugins.entrySet().stream()
.filter(entry -> entry.getKey().type().equals(PluginType.PORT_SCAN))
.map(
entry ->
PluginMatchingResult.<PortScanner>builder()
.setPluginDefinition(entry.getKey())
.setTsunamiPlugin((PortScanner) entry.getValue().get())
.build())
.collect(toImmutableList());
}
|
@Test
public void getPortScanners_whenMultiplePortScannersInstalled_returnsAllPortScanners() {
PluginManager pluginManager =
Guice.createInjector(
new FakePortScannerBootstrapModule(),
new FakePortScannerBootstrapModule2(),
new FakeServiceFingerprinterBootstrapModule(),
new FakeVulnDetectorBootstrapModule())
.getInstance(PluginManager.class);
ImmutableList<PluginMatchingResult<PortScanner>> portScanners = pluginManager.getPortScanners();
assertThat(
portScanners.stream()
.map(pluginMatchingResult -> pluginMatchingResult.tsunamiPlugin().getClass()))
.containsExactly(FakePortScanner.class, FakePortScanner2.class);
}
|
@Override
public AppResponse process(Flow flow, SessionDataRequest request) throws SharedServiceClientException {
return validateAmountOfApps(flow, appSession.getAccountId(), request)
.orElseGet(() -> validateSms(flow, appSession.getAccountId(), request.getSmscode())
.orElseGet(() -> confirmSession(flow, request)));
}
|
@Test
void processValidateAmountOfAppsTooManyDevices() throws SharedServiceClientException {
AppAuthenticator oldApp = new AppAuthenticator();
oldApp.setDeviceName("test_device");
oldApp.setLastSignInAt(ZonedDateTime.now());
when(sharedServiceClientMock.getSSConfigInt("Maximum_aantal_DigiD_apps_eindgebruiker")).thenReturn(5);
when(appAuthenticatorService.countByAccountIdAndInstanceIdNot(ACCOUNT_ID, SESSION_DATA_REQUEST_INSTANCE_ID)).thenReturn(6);
when(appAuthenticatorService.findLeastRecentApp(anyLong())).thenReturn(oldApp);
AppResponse appResponse = sessionConfirmed.process(mockedActivateAppWithOtherAppFlow, mockedSessionDataRequest);
assertTrue(appResponse instanceof TooManyAppsResponse);
assertEquals(TOO_MANY_ACTIVE, ((TooManyAppsResponse) appResponse).getError());
assertEquals("test_device", ((TooManyAppsResponse) appResponse).getDeviceName());
}
|
public TurnToken retrieveFromCloudflare() throws IOException {
final List<String> cloudflareTurnComposedUrls;
try {
cloudflareTurnComposedUrls = dnsNameResolver.resolveAll(cloudflareTurnHostname).get().stream()
.map(i -> switch (i) {
case Inet6Address i6 -> "[" + i6.getHostAddress() + "]";
default -> i.getHostAddress();
})
.flatMap(i -> cloudflareTurnUrlsWithIps.stream().map(u -> u.formatted(i)))
.toList();
} catch (Exception e) {
throw new IOException(e);
}
final HttpResponse<String> response;
try {
response = cloudflareTurnClient.sendAsync(request, HttpResponse.BodyHandlers.ofString()).join();
} catch (CompletionException e) {
logger.warn("failed to make http request to Cloudflare Turn: {}", e.getMessage());
throw new IOException(ExceptionUtils.unwrap(e));
}
if (response.statusCode() != Response.Status.CREATED.getStatusCode()) {
logger.warn("failure request credentials from Cloudflare Turn (code={}): {}", response.statusCode(), response);
throw new IOException("Cloudflare Turn http failure : " + response.statusCode());
}
final CloudflareTurnResponse cloudflareTurnResponse = SystemMapper.jsonMapper()
.readValue(response.body(), CloudflareTurnResponse.class);
return new TurnToken(cloudflareTurnResponse.iceServers().username(),
cloudflareTurnResponse.iceServers().credential(),
cloudflareTurnUrls, cloudflareTurnComposedUrls, cloudflareTurnHostname);
}
|
@Test
public void testSuccess() throws IOException, CancellationException, ExecutionException, InterruptedException {
wireMock.stubFor(post(urlEqualTo(GET_CREDENTIALS_PATH))
.willReturn(aResponse().withStatus(201).withHeader("Content-Type", new String[]{"application/json"}).withBody("{\"iceServers\":{\"urls\":[\"turn:cloudflare.example.com:3478?transport=udp\"],\"username\":\"ABC\",\"credential\":\"XYZ\"}}")));
when(dnsResult.get())
.thenReturn(List.of(InetAddress.getByName("127.0.0.1"), InetAddress.getByName("::1")));
when(dnsResolver.resolveAll(TURN_HOSTNAME))
.thenReturn(dnsResult);
TurnToken token = cloudflareTurnCredentialsManager.retrieveFromCloudflare();
assertThat(token.username()).isEqualTo("ABC");
assertThat(token.password()).isEqualTo("XYZ");
assertThat(token.hostname()).isEqualTo("localhost");
assertThat(token.urlsWithIps()).containsAll(List.of("turn:127.0.0.1", "turn:127.0.0.1:80?transport=tcp", "turns:127.0.0.1:443?transport=tcp", "turn:[0:0:0:0:0:0:0:1]", "turn:[0:0:0:0:0:0:0:1]:80?transport=tcp", "turns:[0:0:0:0:0:0:0:1]:443?transport=tcp"));;
assertThat(token.urls()).isEqualTo(List.of("turn:cf.example.com"));
}
|
@Override
public Checksum upload(final Path file, final Local local, final BandwidthThrottle throttle,
final StreamListener listener, final TransferStatus status,
final ConnectionCallback callback) throws BackgroundException {
try {
final IRODSFileSystemAO fs = session.getClient();
final IRODSFile f = fs.getIRODSFileFactory().instanceIRODSFile(file.getAbsolute());
final TransferControlBlock block = DefaultTransferControlBlock.instance(StringUtils.EMPTY,
new HostPreferences(session.getHost()).getInteger("connection.retry"));
final TransferOptions options = new DefaultTransferOptionsConfigurer().configure(new TransferOptions());
if(Host.TransferType.unknown.equals(session.getHost().getTransferType())) {
options.setUseParallelTransfer(Host.TransferType.valueOf(PreferencesFactory.get().getProperty("queue.transfer.type")).equals(Host.TransferType.concurrent));
}
else {
options.setUseParallelTransfer(session.getHost().getTransferType().equals(Host.TransferType.concurrent));
}
block.setTransferOptions(options);
final DataTransferOperations transfer = fs.getIRODSAccessObjectFactory().getDataTransferOperations(fs.getIRODSAccount());
transfer.putOperation(new File(local.getAbsolute()), f, new DefaultTransferStatusCallbackListener(
status, listener, block
), block);
if(status.isComplete()) {
final DataObjectChecksumUtilitiesAO checksum = fs
.getIRODSAccessObjectFactory()
.getDataObjectChecksumUtilitiesAO(fs.getIRODSAccount());
final ChecksumValue value = checksum.computeChecksumOnDataObject(f);
final Checksum fingerprint = Checksum.parse(value.getChecksumStringValue());
if(null == fingerprint) {
log.warn(String.format("Unsupported checksum algorithm %s", value.getChecksumEncoding()));
}
else {
if(file.getType().contains(Path.Type.encrypted)) {
log.warn(String.format("Skip checksum verification for %s with client side encryption enabled", file));
}
else {
final Checksum expected = ChecksumComputeFactory.get(fingerprint.algorithm).compute(local.getInputStream(), new TransferStatus(status));
if(!expected.equals(fingerprint)) {
throw new ChecksumException(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), file.getName()),
MessageFormat.format("Mismatch between {0} hash {1} of uploaded data and ETag {2} returned by the server",
fingerprint.algorithm.toString(), expected, fingerprint.hash));
}
}
}
return fingerprint;
}
return null;
}
catch(JargonException e) {
throw new IRODSExceptionMappingService().map(e);
}
}
|
@Test
public void testInterruptStatus() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials(
PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret")
));
final IRODSSession session = new IRODSSession(host);
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
final int length = 32770;
final byte[] content = RandomUtils.nextBytes(length);
final OutputStream out = local.getOutputStream(false);
IOUtils.write(content, out);
out.close();
final Path test = new Path(new IRODSHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final TransferStatus status = new TransferStatus().withLength(content.length);
final Checksum checksum = new IRODSUploadFeature(session).upload(
test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener() {
@Override
public void sent(final long bytes) {
super.sent(bytes);
status.setCanceled();
}
},
status,
new DisabledConnectionCallback());
try {
status.validate();
fail();
}
catch(ConnectionCanceledException e) {
//
}
assertFalse(status.isComplete());
session.close();
}
|
static int getEncryptedPacketLength(ByteBuf buffer, int offset) {
int packetLength = 0;
// SSLv3 or TLS - Check ContentType
boolean tls;
switch (buffer.getUnsignedByte(offset)) {
case SSL_CONTENT_TYPE_CHANGE_CIPHER_SPEC:
case SSL_CONTENT_TYPE_ALERT:
case SSL_CONTENT_TYPE_HANDSHAKE:
case SSL_CONTENT_TYPE_APPLICATION_DATA:
case SSL_CONTENT_TYPE_EXTENSION_HEARTBEAT:
tls = true;
break;
default:
// SSLv2 or bad data
tls = false;
}
if (tls) {
// SSLv3 or TLS or GMSSLv1.0 or GMSSLv1.1 - Check ProtocolVersion
int majorVersion = buffer.getUnsignedByte(offset + 1);
int version = buffer.getShort(offset + 1);
if (majorVersion == 3 || version == GMSSL_PROTOCOL_VERSION) {
// SSLv3 or TLS or GMSSLv1.0 or GMSSLv1.1
packetLength = unsignedShortBE(buffer, offset + 3) + SSL_RECORD_HEADER_LENGTH;
if (packetLength <= SSL_RECORD_HEADER_LENGTH) {
// Neither SSLv3 or TLSv1 (i.e. SSLv2 or bad data)
tls = false;
}
} else if (version == DTLS_1_0 || version == DTLS_1_2 || version == DTLS_1_3) {
if (buffer.readableBytes() < offset + DTLS_RECORD_HEADER_LENGTH) {
return NOT_ENOUGH_DATA;
}
// length is the last 2 bytes in the 13 byte header.
packetLength = unsignedShortBE(buffer, offset + DTLS_RECORD_HEADER_LENGTH - 2) +
DTLS_RECORD_HEADER_LENGTH;
} else {
// Neither SSLv3 or TLSv1 (i.e. SSLv2 or bad data)
tls = false;
}
}
if (!tls) {
// SSLv2 or bad data - Check the version
int headerLength = (buffer.getUnsignedByte(offset) & 0x80) != 0 ? 2 : 3;
int majorVersion = buffer.getUnsignedByte(offset + headerLength + 1);
if (majorVersion == 2 || majorVersion == 3) {
// SSLv2
packetLength = headerLength == 2 ?
(shortBE(buffer, offset) & 0x7FFF) + 2 : (shortBE(buffer, offset) & 0x3FFF) + 3;
if (packetLength <= headerLength) {
return NOT_ENOUGH_DATA;
}
} else {
return NOT_ENCRYPTED;
}
}
return packetLength;
}
|
@Test
public void shouldGetPacketLengthOfGmsslProtocolFromByteBuffer() {
int bodyLength = 65;
ByteBuf buf = Unpooled.buffer()
.writeByte(SslUtils.SSL_CONTENT_TYPE_HANDSHAKE)
.writeShort(SslUtils.GMSSL_PROTOCOL_VERSION)
.writeShort(bodyLength);
int packetLength = getEncryptedPacketLength(new ByteBuffer[] { buf.nioBuffer() }, 0);
assertEquals(bodyLength + SslUtils.SSL_RECORD_HEADER_LENGTH, packetLength);
buf.release();
}
|
public static Write write() {
return new AutoValue_RedisIO_Write.Builder()
.setConnectionConfiguration(RedisConnectionConfiguration.create())
.setMethod(Write.Method.APPEND)
.build();
}
|
@Test
public void testWriteWithMethodLPush() {
String key = "testWriteWithMethodLPush";
String value = "value";
client.lpush(key, value);
String newValue = "newValue";
PCollection<KV<String, String>> write = p.apply(Create.of(KV.of(key, newValue)));
write.apply(RedisIO.write().withEndpoint(REDIS_HOST, port).withMethod(Method.LPUSH));
p.run();
List<String> values = client.lrange(key, 0, -1);
assertEquals(newValue + value, String.join("", values));
}
|
@Override
@ManagedOperation(description = "Remove the key from the store")
public boolean remove(String key) {
return setOperations.remove(repositoryName, key) != null;
}
|
@Test
public void shouldRemoveKey() {
idempotentRepository.remove(KEY);
verify(setOperations).remove(REPOSITORY, KEY);
}
|
public ConfigRepoConfig getConfigRepo(MaterialConfig config) {
for (ConfigRepoConfig repoConfig : this) {
if (repoConfig.hasSameMaterial(config)) {
return repoConfig;
}
}
return null;
}
|
@Test
public void shouldFindReturnNullWhenConfigRepoWithSpecifiedIdIsNotPresent() {
assertNull(repos.getConfigRepo("repo1"));
}
|
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) {
checkArgument(
OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp);
return new AutoValue_UBinary(binaryOp, lhs, rhs);
}
|
@Test
public void unsignedRightShift() {
assertUnifiesAndInlines(
"4 >>> 17",
UBinary.create(Kind.UNSIGNED_RIGHT_SHIFT, ULiteral.intLit(4), ULiteral.intLit(17)));
}
|
public <T> void register(MeshRuleListener subscriber) {
if (ruleMapHolder != null) {
List<Map<String, Object>> rule = ruleMapHolder.get(subscriber.ruleSuffix());
if (rule != null) {
subscriber.onRuleChange(appName, rule);
}
}
meshRuleDispatcher.register(subscriber);
}
|
@Test
void register() {
MeshAppRuleListener meshAppRuleListener = new MeshAppRuleListener("demo-route");
StandardMeshRuleRouter standardMeshRuleRouter1 = Mockito.spy(new StandardMeshRuleRouter(URL.valueOf("")));
StandardMeshRuleRouter standardMeshRuleRouter2 = Mockito.spy(new StandardMeshRuleRouter(URL.valueOf("")));
meshAppRuleListener.register(standardMeshRuleRouter1);
Assertions.assertEquals(
1,
meshAppRuleListener
.getMeshRuleDispatcher()
.getListenerMap()
.get(MeshRuleConstants.STANDARD_ROUTER_KEY)
.size());
meshAppRuleListener.receiveConfigInfo(rule1 + "---\n" + rule2);
meshAppRuleListener.register(standardMeshRuleRouter2);
Assertions.assertEquals(
2,
meshAppRuleListener
.getMeshRuleDispatcher()
.getListenerMap()
.get(MeshRuleConstants.STANDARD_ROUTER_KEY)
.size());
ArgumentCaptor<String> appCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<List<Map<String, Object>>> ruleCaptor = ArgumentCaptor.forClass(List.class);
verify(standardMeshRuleRouter1, times(1)).onRuleChange(appCaptor.capture(), ruleCaptor.capture());
List<Map<String, Object>> rulesReceived = ruleCaptor.getValue();
assertEquals(2, rulesReceived.size());
Yaml yaml = new Yaml(new SafeConstructor(new LoaderOptions()));
Assertions.assertTrue(rulesReceived.contains(yaml.load(rule1)));
Assertions.assertTrue(rulesReceived.contains(yaml.load(rule2)));
Assertions.assertEquals("demo-route", appCaptor.getValue());
verify(standardMeshRuleRouter2, times(1)).onRuleChange(appCaptor.capture(), ruleCaptor.capture());
rulesReceived = ruleCaptor.getValue();
assertEquals(2, rulesReceived.size());
Assertions.assertTrue(rulesReceived.contains(yaml.load(rule1)));
Assertions.assertTrue(rulesReceived.contains(yaml.load(rule2)));
Assertions.assertEquals("demo-route", appCaptor.getValue());
}
|
@Override
public boolean consume(CodeReader code, TokenQueue output) {
if (code.popTo(matcher, tmpBuilder) > 0) {
// see SONAR-2499
Cursor previousCursor = code.getPreviousCursor();
if (normalizationValue != null) {
output.add(new Token(normalizationValue, previousCursor.getLine(), previousCursor.getColumn()));
} else {
output.add(new Token(tmpBuilder.toString(), previousCursor.getLine(), previousCursor.getColumn()));
}
// Godin: note that other channels use method delete in order to do the same thing
tmpBuilder.setLength(0);
return true;
}
return false;
}
|
@Test
public void shouldNormalize() {
TokenChannel channel = new TokenChannel("ABC", "normalized");
TokenQueue output = mock(TokenQueue.class);
CodeReader codeReader = new CodeReader("ABCD");
assertThat(channel.consume(codeReader, output), is(true));
ArgumentCaptor<Token> token = ArgumentCaptor.forClass(Token.class);
verify(output).add(token.capture());
assertThat(token.getValue(), is(new Token("normalized", 1, 0)));
verifyNoMoreInteractions(output);
assertThat(codeReader.getLinePosition(), is(1));
assertThat(codeReader.getColumnPosition(), is(3));
}
|
public static PDImageXObject createFromByteArray(PDDocument document, byte[] byteArray)
throws IOException
{
// copy stream
ByteArrayInputStream byteStream = new ByteArrayInputStream(byteArray);
Dimensions meta = retrieveDimensions(byteStream);
PDColorSpace colorSpace;
switch (meta.numComponents)
{
case 1:
colorSpace = PDDeviceGray.INSTANCE;
break;
case 3:
colorSpace = PDDeviceRGB.INSTANCE;
break;
case 4:
colorSpace = PDDeviceCMYK.INSTANCE;
break;
default:
throw new UnsupportedOperationException("number of data elements not supported: " +
meta.numComponents);
}
// create PDImageXObject from stream
PDImageXObject pdImage = new PDImageXObject(document, byteStream,
COSName.DCT_DECODE, meta.width, meta.height, 8, colorSpace);
if (colorSpace instanceof PDDeviceCMYK)
{
COSArray decode = new COSArray();
decode.add(COSInteger.ONE);
decode.add(COSInteger.ZERO);
decode.add(COSInteger.ONE);
decode.add(COSInteger.ZERO);
decode.add(COSInteger.ONE);
decode.add(COSInteger.ZERO);
decode.add(COSInteger.ONE);
decode.add(COSInteger.ZERO);
pdImage.setDecode(decode);
}
return pdImage;
}
|
@Test
void testPDFBox5137() throws IOException
{
byte[] ba = Files.readAllBytes(Paths.get("target/imgs", "PDFBOX-5196-lotus.jpg"));
PDDocument document = new PDDocument();
PDImageXObject ximage = JPEGFactory.createFromByteArray(document, ba);
validate(ximage, 8, 500, 500, "jpg", PDDeviceRGB.INSTANCE.getName());
doWritePDF(document, ximage, TESTRESULTSDIR, "PDFBOX-5196-lotus.pdf");
checkJpegStream(TESTRESULTSDIR, "PDFBOX-5196-lotus.pdf", new ByteArrayInputStream(ba));
}
|
@Override
public ByteBuf duplicate() {
ensureAccessible();
return new UnpooledDuplicatedByteBuf(this);
}
|
@Test
public void testDuplicateRelease() {
ByteBuf buf = newBuffer(8);
assertEquals(1, buf.refCnt());
assertTrue(buf.duplicate().release());
assertEquals(0, buf.refCnt());
}
|
public static void main(String[] args) {
var facade = new DwarvenGoldmineFacade();
facade.startNewDay();
facade.digOutGold();
facade.endDay();
}
|
@Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
}
|
public List<Pdu> getPdus() {
return mPdus;
}
|
@Test
public void testCanParsePdusFromOtherBeacon() {
byte[] bytes = hexStringToByteArray("0201060303aafe1516aafe00e72f234454f4911ba9ffa600000000000100000c09526164426561636f6e2047000000000000000000000000000000000000");
BleAdvertisement bleAdvert = new BleAdvertisement(bytes);
assertEquals("An otherBeacon advert should find four PDUs", 4, bleAdvert.getPdus().size());
assertEquals("First PDU should be flags type 1", 1, bleAdvert.getPdus().get(0).getType());
assertEquals("Second PDU should be services type 3", 3, bleAdvert.getPdus().get(1).getType());
assertEquals("Third PDU should be serivce type 0x16", 0x16, bleAdvert.getPdus().get(2).getType());
assertEquals("fourth PDU should be scan response type 9", 9, bleAdvert.getPdus().get(3).getType());
}
|
@Override
public String toString(final RouteUnit routeUnit) {
return identifier.getQuoteCharacter().wrap(getCursorValue(routeUnit));
}
|
@Test
void assertToString() {
CursorToken cursorToken = new CursorToken(0, 0,
new IdentifierValue("t_order_cursor"), mock(CursorStatementContext.class, RETURNS_DEEP_STUBS), mock(ShardingRule.class));
RouteUnit routeUnit = mock(RouteUnit.class);
when(routeUnit.getTableMappers()).thenReturn(Collections.singletonList(new RouteMapper("t_order", "t_order_0")));
when(routeUnit.getDataSourceMapper()).thenReturn(new RouteMapper(DefaultDatabase.LOGIC_NAME, "ds_0"));
assertThat(cursorToken.toString(routeUnit), is("t_order_cursor_t_order_0"));
}
|
@Override
public List<RecognisedObject> recognise(InputStream stream, ContentHandler handler,
Metadata metadata, ParseContext context)
throws IOException, SAXException, TikaException {
INDArray image = preProcessImage(imageLoader.asImageMatrix(stream, false).getImage());
INDArray scores = graph.outputSingle(image);
List<RecognisedObject> result = new ArrayList<>();
for (int i = 0; i < scores.length(); i++) {
if (scores.getDouble(i) > minConfidence) {
String label = labelMap.get(i);
String id = i + "";
result.add(new RecognisedObject(label, labelLang, id, scores.getDouble(i)));
LOG.debug("Found Object {}", label);
}
}
return result;
}
|
@Test
public void recognise() throws Exception {
assumeFalse(SystemUtils.OS_ARCH.equals("aarch64"), "doesn't yet work on aarch64");
TikaConfig config = null;
try (InputStream is = getClass().getResourceAsStream("dl4j-inception3-config.xml")) {
config = new TikaConfig(is);
} catch (Exception e) {
if (e.getMessage() != null && (e.getMessage().contains("Connection refused") ||
e.getMessage().contains("connect timed out") || e.getMessage().contains("403 for URL"))) {
assumeTrue(false, "skipping test because of connection issue");
}
throw e;
}
assumeTrue(config != null, "something went wrong loading tika config");
Tika tika = new Tika(config);
Metadata md = new Metadata();
try (InputStream is = getClass().getResourceAsStream("cat.jpg")) {
tika.parse(is, md);
}
String[] objects = md.getValues("OBJECT");
boolean found = false;
for (String object : objects) {
if (object.contains("_cat")) {
found = true;
break;
}
}
assertTrue(found);
}
|
public String findMsh18(byte[] hl7Message, Charset charset) {
String answer = "";
if (hl7Message != null && hl7Message.length > 0) {
List<Integer> fieldSeparatorIndexes = findFieldSeparatorIndicesInSegment(hl7Message, 0);
if (fieldSeparatorIndexes.size() > 17) {
int startOfMsh19 = fieldSeparatorIndexes.get(16) + 1;
int length = fieldSeparatorIndexes.get(17) - fieldSeparatorIndexes.get(16) - 1;
if (length > 0) {
answer = new String(hl7Message, startOfMsh19, length, charset);
}
}
}
return answer;
}
|
@Test
public void testFindMsh18WhenExistsWithoutTrailingPipe() {
final String testMessage = MSH_SEGMENT + "||||||8859/1" + '\r' + REMAINING_SEGMENTS;
assertEquals("8859/1", hl7util.findMsh18(testMessage.getBytes(), charset));
}
|
static Timestamp toTimestamp(final JsonNode object) {
if (object instanceof NumericNode) {
return new Timestamp(object.asLong());
}
if (object instanceof TextNode) {
try {
return new Timestamp(Long.parseLong(object.textValue()));
} catch (final NumberFormatException e) {
throw failedStringCoercionException(SqlBaseType.TIMESTAMP);
}
}
throw invalidConversionException(object, SqlBaseType.TIMESTAMP);
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldFailWhenConvertingIncompatibleTimestamp() {
JsonSerdeUtils.toTimestamp(JsonNodeFactory.instance.booleanNode(false));
}
|
protected static boolean isSingleQuoted(String input) {
if (input == null || input.isBlank()) {
return false;
}
return input.matches("(^" + QUOTE_CHAR + "{1}([^" + QUOTE_CHAR + "]+)" + QUOTE_CHAR + "{1})");
}
|
@Test
public void testEmptySingleQuotedNegative2() {
assertFalse(isSingleQuoted("\""));
}
|
@VisibleForTesting
static int checkJar(Path file) throws Exception {
final URI uri = file.toUri();
int numSevereIssues = 0;
try (final FileSystem fileSystem =
FileSystems.newFileSystem(
new URI("jar:file", uri.getHost(), uri.getPath(), uri.getFragment()),
Collections.emptyMap())) {
if (isTestJarAndEmpty(file, fileSystem.getPath("/"))) {
return 0;
}
if (!noticeFileExistsAndIsValid(fileSystem.getPath("META-INF", "NOTICE"), file)) {
numSevereIssues++;
}
if (!licenseFileExistsAndIsValid(fileSystem.getPath("META-INF", "LICENSE"), file)) {
numSevereIssues++;
}
numSevereIssues +=
getNumLicenseFilesOutsideMetaInfDirectory(file, fileSystem.getPath("/"));
numSevereIssues += getFilesWithIncompatibleLicenses(file, fileSystem.getPath("/"));
}
return numSevereIssues;
}
|
@Test
void testIgnoreFtlFiles(@TempDir Path tempDir) throws Exception {
assertThat(
JarFileChecker.checkJar(
createJar(
tempDir,
Entry.fileEntry(VALID_NOTICE_CONTENTS, VALID_NOTICE_PATH),
Entry.fileEntry(VALID_LICENSE_CONTENTS, VALID_LICENSE_PATH),
Entry.fileEntry(
"content", Arrays.asList("SomeLicenseFile.ftl")))))
.isEqualTo(0);
}
|
@Override
public CompletableFuture<Void> updateOrCreateSubscriptionGroup(String address, SubscriptionGroupConfig config,
long timeoutMillis) {
CompletableFuture<Void> future = new CompletableFuture<>();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.UPDATE_AND_CREATE_SUBSCRIPTIONGROUP, null);
byte[] body = RemotingSerializable.encode(config);
request.setBody(body);
remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> {
if (response.getCode() == ResponseCode.SUCCESS) {
future.complete(null);
} else {
log.warn("updateOrCreateSubscriptionGroup getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), config);
future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark()));
}
});
return future;
}
|
@Test
public void assertUpdateOrCreateSubscriptionGroupWithSuccess() throws Exception {
setResponseSuccess(null);
SubscriptionGroupConfig config = mock(SubscriptionGroupConfig.class);
CompletableFuture<Void> actual = mqClientAdminImpl.updateOrCreateSubscriptionGroup(defaultBrokerAddr, config, defaultTimeout);
assertNull(actual.get());
}
|
public Blade disableSession() {
this.sessionManager = null;
return this;
}
|
@Test
public void testDisableSession() {
Blade blade = Blade.create().disableSession();
Assert.assertNull(blade.sessionManager());
}
|
void updateOffer(ItemComposition offerItem, BufferedImage itemImage, @Nullable GrandExchangeOffer newOffer)
{
if (newOffer == null || newOffer.getState() == EMPTY)
{
return;
}
else
{
cardLayout.show(container, FACE_CARD);
itemName.setText(offerItem.getMembersName());
itemIcon.setIcon(new ImageIcon(itemImage));
boolean buying = newOffer.getState() == BOUGHT
|| newOffer.getState() == BUYING
|| newOffer.getState() == CANCELLED_BUY;
String offerState = (buying ? "Bought " : "Sold ")
+ QuantityFormatter.quantityToRSDecimalStack(newOffer.getQuantitySold()) + " / "
+ QuantityFormatter.quantityToRSDecimalStack(newOffer.getTotalQuantity());
offerInfo.setText(offerState);
itemPrice.setText(htmlLabel("Price each: ", QuantityFormatter.formatNumber(newOffer.getPrice())));
String action = buying ? "Spent: " : "Received: ";
offerSpent.setText(htmlLabel(action, QuantityFormatter.formatNumber(newOffer.getSpent()) + " / "
+ QuantityFormatter.formatNumber(newOffer.getPrice() * newOffer.getTotalQuantity())));
progressBar.setForeground(getProgressColor(newOffer));
progressBar.setMaximumValue(newOffer.getTotalQuantity());
progressBar.setValue(newOffer.getQuantitySold());
final JPopupMenu popupMenu = new JPopupMenu();
popupMenu.setBorder(new EmptyBorder(5, 5, 5, 5));
final JMenuItem openGeLink = new JMenuItem("Open Grand Exchange website");
openGeLink.addActionListener(e -> grandExchangePlugin.openGeLink(offerItem.getMembersName(), offerItem.getId()));
popupMenu.add(openGeLink);
/* Couldn't set the tooltip for the container panel as the children override it, so I'm setting
* the tooltips on the children instead. */
for (Component c : container.getComponents())
{
if (c instanceof JPanel)
{
JPanel panel = (JPanel) c;
panel.setToolTipText(htmlTooltip(((int) progressBar.getPercentage()) + "%"));
panel.setComponentPopupMenu(popupMenu);
}
}
}
revalidate();
}
|
@Test
public void testUpdateOffer()
{
when(offer.getState()).thenReturn(GrandExchangeOfferState.CANCELLED_BUY);
GrandExchangeOfferSlot offerSlot = new GrandExchangeOfferSlot(mock(GrandExchangePlugin.class));
offerSlot.updateOffer(mock(ItemComposition.class), mock(AsyncBufferedImage.class), offer);
}
|
public URI toUri() { return uri; }
|
@Test (timeout = 30000)
public void testPathToUriConversion() throws URISyntaxException, IOException {
// Path differs from URI in that it ignores the query part..
assertEquals("? mark char in to URI",
new URI(null, null, "/foo?bar", null, null),
new Path("/foo?bar").toUri());
assertEquals("escape slashes chars in to URI",
new URI(null, null, "/foo\"bar", null, null),
new Path("/foo\"bar").toUri());
assertEquals("spaces in chars to URI",
new URI(null, null, "/foo bar", null, null),
new Path("/foo bar").toUri());
// therefore "foo?bar" is a valid Path, so a URI created from a Path
// has path "foo?bar" where in a straight URI the path part is just "foo"
assertEquals("/foo?bar",
new Path("http://localhost/foo?bar").toUri().getPath());
assertEquals("/foo", new URI("http://localhost/foo?bar").getPath());
// The path part handling in Path is equivalent to URI
assertEquals(new URI("/foo;bar").getPath(), new Path("/foo;bar").toUri().getPath());
assertEquals(new URI("/foo;bar"), new Path("/foo;bar").toUri());
assertEquals(new URI("/foo+bar"), new Path("/foo+bar").toUri());
assertEquals(new URI("/foo-bar"), new Path("/foo-bar").toUri());
assertEquals(new URI("/foo=bar"), new Path("/foo=bar").toUri());
assertEquals(new URI("/foo,bar"), new Path("/foo,bar").toUri());
}
|
@Override
protected boolean notExist() {
return Stream.of(DefaultPathConstants.PLUGIN_PARENT, DefaultPathConstants.APP_AUTH_PARENT, DefaultPathConstants.META_DATA).noneMatch(zkClient::isExist);
}
|
@Test
public void testNotExist() {
ZookeeperDataChangedInit zookeeperDataChangedInit = new ZookeeperDataChangedInit(zkClient);
when(zkClient.isExist(DefaultPathConstants.PLUGIN_PARENT)).thenReturn(true);
boolean pluginExist = zookeeperDataChangedInit.notExist();
assertFalse(pluginExist, "plugin exist.");
when(zkClient.isExist(DefaultPathConstants.PLUGIN_PARENT)).thenReturn(false);
when(zkClient.isExist(DefaultPathConstants.APP_AUTH_PARENT)).thenReturn(true);
boolean appAuthExist = zookeeperDataChangedInit.notExist();
assertFalse(appAuthExist, "app auth exist.");
when(zkClient.isExist(DefaultPathConstants.APP_AUTH_PARENT)).thenReturn(false);
when(zkClient.isExist(DefaultPathConstants.META_DATA)).thenReturn(true);
boolean metaDataExist = zookeeperDataChangedInit.notExist();
assertFalse(metaDataExist, "metadata exist.");
when(zkClient.isExist(DefaultPathConstants.META_DATA)).thenReturn(false);
boolean metaDataNotExist = zookeeperDataChangedInit.notExist();
assertTrue(metaDataNotExist, "metadata not exist.");
}
|
public Span nextSpan(ConsumerRecord<?, ?> record) {
// Even though the type is ConsumerRecord, this is not a (remote) consumer span. Only "poll"
// events create consumer spans. Since this is a processor span, we use the normal sampler.
TraceContextOrSamplingFlags extracted =
extractAndClearTraceIdHeaders(processorExtractor, record.headers(), record.headers());
Span result = tracer.nextSpan(extracted);
if (extracted.context() == null && !result.isNoop()) {
addTags(record, result);
}
return result;
}
|
@Test void nextSpan_prefers_b3_header() {
consumerRecord.headers().add("b3", B3SingleFormat.writeB3SingleFormatAsBytes(incoming));
Span child;
try (Scope scope = tracing.currentTraceContext().newScope(parent)) {
child = kafkaTracing.nextSpan(consumerRecord);
}
child.finish();
assertThat(spans.get(0).id()).isEqualTo(child.context().spanIdString());
assertChildOf(spans.get(0), incoming);
}
|
public static IpPrefix valueOf(int address, int prefixLength) {
return new IpPrefix(IpAddress.valueOf(address), prefixLength);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfShortArrayIPv6() {
IpPrefix ipPrefix;
byte[] value;
value = new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9};
ipPrefix = IpPrefix.valueOf(IpAddress.Version.INET6, value, 120);
}
|
public static Set<String> parseDeployOutput(File buildResult) throws IOException {
try (Stream<String> linesStream = Files.lines(buildResult.toPath())) {
return parseDeployOutput(linesStream);
}
}
|
@Test
void testParseDeployOutputDetectsSkippedDeployments() {
assertThat(
DeployParser.parseDeployOutput(
Stream.of(
"[INFO] --- maven-deploy-plugin:2.8.2:deploy (default-deploy) @ flink-parent ---",
"[INFO] Skipping artifact deployment")))
.isEmpty();
}
|
public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException {
checkMaybeCompatible(source, target);
if (source.isOptional() && !target.isOptional()) {
if (target.defaultValue() != null) {
if (record != null) {
return projectRequiredSchema(source, record, target);
} else {
return target.defaultValue();
}
} else {
throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value.");
}
} else {
if (record != null) {
return projectRequiredSchema(source, record, target);
} else {
return null;
}
}
}
|
@Test
public void testArrayProjection() {
Schema source = SchemaBuilder.array(Schema.INT32_SCHEMA).build();
Object projected = SchemaProjector.project(source, Arrays.asList(1, 2, 3), source);
assertEquals(Arrays.asList(1, 2, 3), projected);
Schema optionalSource = SchemaBuilder.array(Schema.INT32_SCHEMA).optional().build();
Schema target = SchemaBuilder.array(Schema.INT32_SCHEMA).defaultValue(Arrays.asList(1, 2, 3)).build();
projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), target);
assertEquals(Arrays.asList(4, 5), projected);
projected = SchemaProjector.project(optionalSource, null, target);
assertEquals(Arrays.asList(1, 2, 3), projected);
Schema promotedTarget = SchemaBuilder.array(Schema.INT64_SCHEMA).defaultValue(Arrays.asList(1L, 2L, 3L)).build();
projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), promotedTarget);
List<Long> expectedProjected = Arrays.asList(4L, 5L);
assertEquals(expectedProjected, projected);
projected = SchemaProjector.project(optionalSource, null, promotedTarget);
assertEquals(Arrays.asList(1L, 2L, 3L), projected);
Schema noDefaultValueTarget = SchemaBuilder.array(Schema.INT32_SCHEMA).build();
assertThrows(SchemaProjectorException.class, () -> SchemaProjector.project(optionalSource, null,
noDefaultValueTarget), "Target schema does not provide a default value.");
Schema nonPromotableTarget = SchemaBuilder.array(Schema.BOOLEAN_SCHEMA).build();
assertThrows(SchemaProjectorException.class,
() -> SchemaProjector.project(optionalSource, null, nonPromotableTarget),
"Neither source type matches target type nor source type can be promoted to target type");
}
|
@SuppressWarnings("unchecked")
public Output run(RunContext runContext) throws Exception {
Logger logger = runContext.logger();
try (HttpClient client = this.client(runContext, this.method)) {
HttpRequest<String> request = this.request(runContext);
HttpResponse<String> response;
try {
response = client
.toBlocking()
.exchange(request, Argument.STRING, Argument.STRING);
// check that the string is a valid Unicode string
if (response.getBody().isPresent()) {
OptionalInt illegalChar = response.body().chars().filter(c -> !Character.isDefined(c)).findFirst();
if (illegalChar.isPresent()) {
throw new IllegalArgumentException("Illegal unicode code point in request body: " + illegalChar.getAsInt() +
", the Request task only support valid Unicode strings as body.\n" +
"You can try using the Download task instead.");
}
}
} catch (HttpClientResponseException e) {
if (!allowFailed) {
throw e;
}
//noinspection unchecked
response = (HttpResponse<String>) e.getResponse();
}
logger.debug("Request '{}' with the response code '{}'", request.getUri(), response.getStatus().getCode());
return this.output(runContext, request, response);
}
}
|
@Test
void failed() throws Exception {
try (
ApplicationContext applicationContext = ApplicationContext.run();
EmbeddedServer server = applicationContext.getBean(EmbeddedServer.class).start();
) {
Request task = Request.builder()
.id(RequestTest.class.getSimpleName())
.type(RequestTest.class.getName())
.uri(server.getURL().toString() + "/hello417")
.allowFailed(true)
.build();
RunContext runContext = TestsUtils.mockRunContext(this.runContextFactory, task, ImmutableMap.of());
Request.Output output = task.run(runContext);
assertThat(output.getBody(), is("{ \"hello\": \"world\" }"));
assertThat(output.getCode(), is(417));
}
}
|
public static List<String> revertForbid(List<String> forbid, Set<URL> subscribed) {
if (CollectionUtils.isNotEmpty(forbid)) {
List<String> newForbid = new ArrayList<>();
for (String serviceName : forbid) {
if (StringUtils.isNotContains(serviceName, ':') && StringUtils.isNotContains(serviceName, '/')) {
for (URL url : subscribed) {
if (serviceName.equals(url.getServiceInterface())) {
newForbid.add(url.getServiceKey());
break;
}
}
} else {
newForbid.add(serviceName);
}
}
return newForbid;
}
return forbid;
}
|
@Test
void testRevertForbid3() {
String service1 = "dubbo.test.api.HelloService:1.0.0";
String service2 = "dubbo.test.api.HelloService:2.0.0";
List<String> forbid = new ArrayList<String>();
forbid.add(service1);
forbid.add(service2);
List<String> newForbid = UrlUtils.revertForbid(forbid, null);
assertEquals(forbid, newForbid);
}
|
public static PTransformMatcher flattenWithDuplicateInputs() {
return new PTransformMatcher() {
@Override
public boolean matches(AppliedPTransform<?, ?, ?> application) {
if (application.getTransform() instanceof Flatten.PCollections) {
Set<PValue> observed = new HashSet<>();
for (PValue pvalue : application.getInputs().values()) {
boolean firstInstance = observed.add(pvalue);
if (!firstInstance) {
return true;
}
}
}
return false;
}
@Override
public String toString() {
return MoreObjects.toStringHelper("FlattenWithDuplicateInputsMatcher").toString();
}
};
}
|
@Test
public void flattenWithDuplicateInputsWithoutDuplicates() {
AppliedPTransform application =
AppliedPTransform.of(
"Flatten",
Collections.singletonMap(
new TupleTag<Integer>(),
PCollection.createPrimitiveOutputInternal(
p, WindowingStrategy.globalDefault(), IsBounded.BOUNDED, VarIntCoder.of())),
Collections.singletonMap(
new TupleTag<Integer>(),
PCollection.createPrimitiveOutputInternal(
p, WindowingStrategy.globalDefault(), IsBounded.BOUNDED, VarIntCoder.of())),
Flatten.pCollections(),
ResourceHints.create(),
p);
assertThat(PTransformMatchers.flattenWithDuplicateInputs().matches(application), is(false));
}
|
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 3) {
onInvalidDataReceived(device, data);
return;
}
final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 0);
final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 1);
final int status = data.getIntValue(Data.FORMAT_UINT8, 2);
if (responseCode != SC_OP_CODE_RESPONSE_CODE) {
onInvalidDataReceived(device, data);
return;
}
if (status != SC_RESPONSE_SUCCESS) {
onSCOperationError(device, requestCode, status);
return;
}
if (requestCode == SC_OP_CODE_REQUEST_SUPPORTED_SENSOR_LOCATIONS) {
final int size = data.size() - 3;
final int[] locations = new int[size];
for (int i = 0; i < size; ++i) {
locations[i] = data.getIntValue(Data.FORMAT_UINT8, 3 + i);
}
onSupportedSensorLocationsReceived(device, locations);
} else {
onSCOperationCompleted(device, requestCode);
}
}
|
@Test
public void onInvalidDataReceived() {
final MutableData data = new MutableData(new byte[] { 0x01, 0x01, 0x00, 0x00, 0x00});
response.onDataReceived(null, data);
assertFalse(success);
assertEquals(0, errorCode);
assertFalse(response.isValid());
assertEquals(0, requestCode);
assertNull(locations);
}
|
public static Write write() {
// 1000 for batch size is good enough in many cases,
// ex: if document size is large, around 10KB, the request's size will be around 10MB
// if document size is small, around 1KB, the request's size will be around 1MB
return new AutoValue_SolrIO_Write.Builder().setMaxBatchSize(1000).build();
}
|
@Test
public void testBatchSize() {
SolrIO.Write write1 =
SolrIO.write()
.withConnectionConfiguration(connectionConfiguration)
.withMaxBatchSize(BATCH_SIZE);
assertTrue(write1.getMaxBatchSize() == BATCH_SIZE);
SolrIO.Write write2 = SolrIO.write().withConnectionConfiguration(connectionConfiguration);
assertTrue(write2.getMaxBatchSize() == DEFAULT_BATCH_SIZE);
}
|
@Override
public void request(Payload grpcRequest, StreamObserver<Payload> responseObserver) {
traceIfNecessary(grpcRequest, true);
String type = grpcRequest.getMetadata().getType();
long startTime = System.nanoTime();
//server is on starting.
if (!ApplicationUtils.isStarted()) {
Payload payloadResponse = GrpcUtils.convert(
ErrorResponse.build(NacosException.INVALID_SERVER_STATUS, "Server is starting,please try later."));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.INVALID_SERVER_STATUS, null, null, System.nanoTime() - startTime);
return;
}
// server check.
if (ServerCheckRequest.class.getSimpleName().equals(type)) {
Payload serverCheckResponseP = GrpcUtils.convert(new ServerCheckResponse(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get(), true));
traceIfNecessary(serverCheckResponseP, false);
responseObserver.onNext(serverCheckResponseP);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, true,
0, null, null, System.nanoTime() - startTime);
return;
}
RequestHandler requestHandler = requestHandlerRegistry.getByRequestType(type);
//no handler found.
if (requestHandler == null) {
Loggers.REMOTE_DIGEST.warn(String.format("[%s] No handler for request type : %s :", "grpc", type));
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.NO_HANDLER, "RequestHandler Not Found"));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.NO_HANDLER, null, null, System.nanoTime() - startTime);
return;
}
//check connection status.
String connectionId = GrpcServerConstants.CONTEXT_KEY_CONN_ID.get();
boolean requestValid = connectionManager.checkValid(connectionId);
if (!requestValid) {
Loggers.REMOTE_DIGEST
.warn("[{}] Invalid connection Id ,connection [{}] is un registered ,", "grpc", connectionId);
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.UN_REGISTER, "Connection is unregistered."));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.UN_REGISTER, null, null, System.nanoTime() - startTime);
return;
}
Object parseObj = null;
try {
parseObj = GrpcUtils.parse(grpcRequest);
} catch (Exception e) {
Loggers.REMOTE_DIGEST
.warn("[{}] Invalid request receive from connection [{}] ,error={}", "grpc", connectionId, e);
Payload payloadResponse = GrpcUtils.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, e.getMessage()));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.BAD_GATEWAY, e.getClass().getSimpleName(), null, System.nanoTime() - startTime);
return;
}
if (parseObj == null) {
Loggers.REMOTE_DIGEST.warn("[{}] Invalid request receive ,parse request is null", connectionId);
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, "Invalid request"));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.BAD_GATEWAY, null, null, System.nanoTime() - startTime);
return;
}
if (!(parseObj instanceof Request)) {
Loggers.REMOTE_DIGEST
.warn("[{}] Invalid request receive ,parsed payload is not a request,parseObj={}", connectionId,
parseObj);
Payload payloadResponse = GrpcUtils
.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, "Invalid request"));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
NacosException.BAD_GATEWAY, null, null, System.nanoTime() - startTime);
return;
}
Request request = (Request) parseObj;
try {
Connection connection = connectionManager.getConnection(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get());
RequestMeta requestMeta = new RequestMeta();
requestMeta.setClientIp(connection.getMetaInfo().getClientIp());
requestMeta.setConnectionId(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get());
requestMeta.setClientVersion(connection.getMetaInfo().getVersion());
requestMeta.setLabels(connection.getMetaInfo().getLabels());
requestMeta.setAbilityTable(connection.getAbilityTable());
connectionManager.refreshActiveTime(requestMeta.getConnectionId());
prepareRequestContext(request, requestMeta, connection);
Response response = requestHandler.handleRequest(request, requestMeta);
Payload payloadResponse = GrpcUtils.convert(response);
traceIfNecessary(payloadResponse, false);
if (response.getErrorCode() == NacosException.OVER_THRESHOLD) {
RpcScheduledExecutor.CONTROL_SCHEDULER.schedule(() -> {
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
}, 1000L, TimeUnit.MILLISECONDS);
} else {
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
}
MetricsMonitor.recordGrpcRequestEvent(type, response.isSuccess(),
response.getErrorCode(), null, request.getModule(), System.nanoTime() - startTime);
} catch (Throwable e) {
Loggers.REMOTE_DIGEST
.error("[{}] Fail to handle request from connection [{}] ,error message :{}", "grpc", connectionId,
e);
Payload payloadResponse = GrpcUtils.convert(ErrorResponse.build(e));
traceIfNecessary(payloadResponse, false);
responseObserver.onNext(payloadResponse);
responseObserver.onCompleted();
MetricsMonitor.recordGrpcRequestEvent(type, false,
ResponseCode.FAIL.getCode(), e.getClass().getSimpleName(), request.getModule(), System.nanoTime() - startTime);
} finally {
RequestContextHolder.removeContext();
}
}
|
@Test
void testApplicationUnStarted() {
RequestMeta metadata = new RequestMeta();
metadata.setClientIp("127.0.0.1");
metadata.setConnectionId(connectId);
ServerCheckRequest serverCheckRequest = new ServerCheckRequest();
serverCheckRequest.setRequestId(requestId);
Payload request = GrpcUtils.convert(serverCheckRequest, metadata);
StreamObserver<Payload> streamObserver = new StreamObserver<Payload>() {
@Override
public void onNext(Payload payload) {
System.out.println("Receive data from server: " + payload);
Object res = GrpcUtils.parse(payload);
assertTrue(res instanceof ErrorResponse);
ErrorResponse errorResponse = (ErrorResponse) res;
assertEquals(NacosException.INVALID_SERVER_STATUS, errorResponse.getErrorCode());
}
@Override
public void onError(Throwable throwable) {
fail(throwable.getMessage());
}
@Override
public void onCompleted() {
System.out.println("complete");
}
};
streamStub.request(request, streamObserver);
}
|
public boolean isCheckpointingEnabled() {
if (snapshotSettings == null) {
return false;
}
return snapshotSettings.getCheckpointCoordinatorConfiguration().isCheckpointingEnabled();
}
|
@Test
public void checkpointingIsEnabledIfIntervalIsqAndLegal() {
final JobGraph jobGraph =
JobGraphBuilder.newStreamingJobGraphBuilder()
.setJobCheckpointingSettings(createCheckpointSettingsWithInterval(10))
.build();
assertTrue(jobGraph.isCheckpointingEnabled());
}
|
static Speed mean(Speed... speeds) {
double n = speeds.length;
Speed spd = Speed.ZERO;
for (Speed speed : speeds) {
spd = spd.plus(speed);
}
return spd.times(1.0 / n);
}
|
@Test
public void meanSpeedIsComputedCorrectly() {
Speed negativeTenKnots = Speed.of(-10, KNOTS);
Speed oneKnot = Speed.of(1, KNOTS);
Speed fiveKnots = Speed.of(5, KNOTS);
Speed nineKnots = Speed.of(9, KNOTS);
assertThat(
mean(oneKnot, nineKnots, fiveKnots).toString(5), //avg of 1, 5, 9
is(Speed.of(5.0, KNOTS).toString(5))
);
assertThat(
mean(oneKnot, nineKnots, negativeTenKnots).toString(5), //avg of 1, 9, -10
is(Speed.of(0.0, KNOTS).toString(5))
);
}
|
@ConstantFunction(name = "minutes_add", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true)
public static ConstantOperator minutesAdd(ConstantOperator date, ConstantOperator minute) {
return ConstantOperator.createDatetimeOrNull(date.getDatetime().plusMinutes(minute.getInt()));
}
|
@Test
public void minutesAdd() {
assertEquals("2015-03-23T09:33:55",
ScalarOperatorFunctions.minutesAdd(O_DT_20150323_092355, O_INT_10).getDatetime().toString());
}
|
int getStrength(long previousDuration, long currentDuration, int strength) {
if (isPreviousDurationCloserToGoal(previousDuration, currentDuration)) {
return strength - 1;
} else {
return strength;
}
}
|
@Test
void getStrengthShouldReturnPreviousStrengthIfPreviousDurationCloserToGoal() {
// given
// when
int actual = bcCryptWorkFactorService.getStrength(980, 1021, 5);
// then
assertThat(actual).isEqualTo(4);
}
|
@Nullable
public static ValueReference of(Object value) {
if (value instanceof Boolean) {
return of((Boolean) value);
} else if (value instanceof Double) {
return of((Double) value);
} else if (value instanceof Float) {
return of((Float) value);
} else if (value instanceof Integer) {
return of((Integer) value);
} else if (value instanceof Long) {
return of((Long) value);
} else if (value instanceof String) {
return of((String) value);
} else if (value instanceof Enum) {
return of((Enum) value);
} else if (value instanceof EncryptedValue encryptedValue) {
return of(encryptedValue);
} else {
return null;
}
}
|
@Test
public void serializeFloat() throws IOException {
assertJsonEqualsNonStrict(objectMapper.writeValueAsString(ValueReference.of(1.0f)), "{\"@type\":\"float\",\"@value\":1.0}");
assertJsonEqualsNonStrict(objectMapper.writeValueAsString(ValueReference.of(42.4f)), "{\"@type\":\"float\",\"@value\":42.4}");
}
|
@Override
public Boolean isUsedInFetchArtifact(PipelineConfig pipelineConfig) {
return Boolean.FALSE;
}
|
@Test
void shouldReturnFalseForIsUsedInFetchArtifact() {
PackageMaterial material = new PackageMaterial();
assertThat(material.isUsedInFetchArtifact(new PipelineConfig())).isFalse();
}
|
static CatalogLoader createCatalogLoader(
String name, Map<String, String> properties, Configuration hadoopConf) {
String catalogImpl = properties.get(CatalogProperties.CATALOG_IMPL);
if (catalogImpl != null) {
String catalogType = properties.get(ICEBERG_CATALOG_TYPE);
Preconditions.checkArgument(
catalogType == null,
"Cannot create catalog %s, both catalog-type and catalog-impl are set: catalog-type=%s, catalog-impl=%s",
name,
catalogType,
catalogImpl);
return CatalogLoader.custom(name, properties, hadoopConf, catalogImpl);
}
String catalogType = properties.getOrDefault(ICEBERG_CATALOG_TYPE, ICEBERG_CATALOG_TYPE_HIVE);
switch (catalogType.toLowerCase(Locale.ENGLISH)) {
case ICEBERG_CATALOG_TYPE_HIVE:
// The values of properties 'uri', 'warehouse', 'hive-conf-dir' are allowed to be null, in
// that case it will
// fallback to parse those values from hadoop configuration which is loaded from classpath.
String hiveConfDir = properties.get(HIVE_CONF_DIR);
String hadoopConfDir = properties.get(HADOOP_CONF_DIR);
Configuration newHadoopConf = mergeHiveConf(hadoopConf, hiveConfDir, hadoopConfDir);
return CatalogLoader.hive(name, newHadoopConf, properties);
case ICEBERG_CATALOG_TYPE_HADOOP:
return CatalogLoader.hadoop(name, hadoopConf, properties);
case ICEBERG_CATALOG_TYPE_REST:
return CatalogLoader.rest(name, hadoopConf, properties);
default:
throw new UnsupportedOperationException(
"Unknown catalog-type: " + catalogType + " (Must be 'hive', 'hadoop' or 'rest')");
}
}
|
@Test
public void testCreateCatalogHive() {
String catalogName = "hiveCatalog";
props.put(
FlinkCatalogFactory.ICEBERG_CATALOG_TYPE, FlinkCatalogFactory.ICEBERG_CATALOG_TYPE_HIVE);
Catalog catalog =
FlinkCatalogFactory.createCatalogLoader(catalogName, props, new Configuration())
.loadCatalog();
assertThat(catalog).isNotNull().isInstanceOf(HiveCatalog.class);
}
|
@Override
@Nullable
public V get(@Nullable Object key) {
Reference<K, V> ref = getReference(key, Restructure.WHEN_NECESSARY);
Entry<K, V> entry = (ref != null ? ref.get() : null);
return (entry != null ? entry.getValue() : null);
}
|
@Test
void shouldGetWithNoItems() {
assertNull(this.map.get(123));
}
|
@Override
public HttpResponseOutputStream<Chunk> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final String uploadUri;
final String resourceId;
if(null == status.getUrl()) {
if(status.isExists()) {
resourceId = fileid.getFileId(file);
uploadUri = EueUploadHelper.updateResource(session, resourceId, status, UploadType.SIMPLE).getUploadURI();
}
else {
final ResourceCreationResponseEntry uploadResourceCreationResponseEntry = EueUploadHelper
.createResource(session, fileid.getFileId(file.getParent()), file.getName(),
status, UploadType.SIMPLE);
resourceId = EueResourceIdProvider.getResourceIdFromResourceUri(uploadResourceCreationResponseEntry.getHeaders().getLocation());
uploadUri = uploadResourceCreationResponseEntry.getEntity().getUploadURI();
}
}
else {
uploadUri = status.getUrl();
resourceId = status.getParameters().get(RESOURCE_ID);
}
final HttpResponseOutputStream<Chunk> stream = this.write(file, status,
new DelayedHttpEntityCallable<Chunk>(file) {
@Override
public Chunk call(final HttpEntity entity) throws BackgroundException {
try {
final HttpResponse response;
final StringBuilder uploadUriWithParameters = new StringBuilder(uploadUri);
if(!Checksum.NONE.equals(status.getChecksum())) {
uploadUriWithParameters.append(String.format("&x_cdash64=%s",
new ChunkListSHA256ChecksumCompute().compute(status.getLength(), Hex.decodeHex(status.getChecksum().hash))));
}
if(status.getLength() != -1) {
uploadUriWithParameters.append(String.format("&x_size=%d", status.getLength()));
}
if(status.isSegment()) {
// Chunked upload from large upload service
uploadUriWithParameters.append(String.format("&x_offset=%d",
new HostPreferences(session.getHost()).getLong("eue.upload.multipart.size") * (status.getPart() - 1)));
final HttpPut request = new HttpPut(uploadUriWithParameters.toString());
request.setEntity(entity);
response = session.getClient().execute(request);
}
else {
final HttpPost request = new HttpPost(uploadUriWithParameters.toString());
request.setEntity(entity);
request.setHeader(HttpHeaders.CONTENT_TYPE, MimeTypeService.DEFAULT_CONTENT_TYPE);
response = session.getClient().execute(request);
}
try {
if(response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) {
return new Chunk(resourceId, status.getPart(), status.getLength(), status.getChecksum());
}
EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity()));
throw new EueExceptionMappingService().map(response);
}
finally {
EntityUtils.consume(response.getEntity());
}
}
catch(HttpResponseException e) {
throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(DecoderException e) {
throw new ChecksumException(LocaleFactory.localizedString("Checksum failure", "Error"), e);
}
}
@Override
public long getContentLength() {
return status.getLength();
}
}
);
fileid.cache(file, resourceId);
return stream;
}
|
@Test(expected = QuotaException.class)
public void testMaxResourceSizeFailure() throws Exception {
// If the file is already created, but not completely uploaded yet, the entry can be overwritten by setting "forceOverwrite" to true.
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final EueWriteFeature feature = new EueWriteFeature(session, fileid);
final Path file = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final TransferStatus status = new TransferStatus().withLength(Long.MAX_VALUE);
try {
feature.write(file, status, new DisabledConnectionCallback());
}
catch(QuotaException e) {
assertEquals("LIMIT_MAX_RESOURCE_SIZE. Please contact your web hosting service provider for assistance.", e.getDetail());
throw e;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.