focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public ScopeModel getScopeModel() {
return Optional.ofNullable(RpcContext.getServiceContext().getConsumerUrl())
.map(URL::getScopeModel)
.orElse(super.getScopeModel());
}
|
@Test
void test2() {
RpcServiceContext.getServiceContext().setConsumerUrl(null);
Assertions.assertNull(instanceURL.getScopeModel());
ModuleModel moduleModel = Mockito.mock(ModuleModel.class);
RpcServiceContext.getServiceContext().setConsumerUrl(URL.valueOf("").setScopeModel(moduleModel));
Assertions.assertEquals(moduleModel, instanceURL.getScopeModel());
}
|
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) {
Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType);
return BINARY_PROTOCOL_VALUES.get(binaryColumnType);
}
|
@Test
void assertGetBinaryProtocolValueWithMySQLTypeNull() {
assertNull(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.NULL));
}
|
List<Endpoint> endpoints() {
try {
String urlString = String.format("%s/api/v1/namespaces/%s/pods", kubernetesMaster, namespace);
return enrichWithPublicAddresses(parsePodsList(callGet(urlString)));
} catch (RestClientException e) {
return handleKnownException(e);
}
}
|
@Test
public void portValuesForPodWithMultipleContainer() throws JsonProcessingException {
PodList podList = new PodList();
List<Pod> pods = new ArrayList<>();
for (int i = 0; i <= 1; i++) {
Pod pod = new PodBuilder()
.withMetadata(new ObjectMetaBuilder()
.withName("hazelcast-" + i)
.build())
.withSpec(new PodSpecBuilder()
.withContainers(new ContainerBuilder()
.withName("hazelcast")
.withPorts(new ContainerPortBuilder()
.withContainerPort(5701)
.withName("hazelcast")
.build())
.build(),
new ContainerBuilder()
.withName("proxy")
.withPorts(new ContainerPortBuilder()
.withContainerPort(5701)
.withName("proxy")
.build())
.build())
.build())
.withStatus(new PodStatusBuilder()
.withContainerStatuses(new ContainerStatusBuilder().withReady().build())
.withPodIP(String.format("172.17.%d.5", i))
.build())
.build();
pods.add(pod);
}
podList.setItems(pods);
ObjectMapper mapper = new ObjectMapper();
String prodsJsonResponse = mapper.writeValueAsString(podList);
stub(String.format("/api/v1/namespaces/%s/pods", NAMESPACE), prodsJsonResponse);
List<Endpoint> result = kubernetesClient.endpoints();
assertThat(formatPrivate(result)).containsExactlyInAnyOrder(ready("172.17.0.5", 5701), ready("172.17.1.5", 5701));
}
|
public static List<String> filterMatches(@Nullable List<String> candidates,
@Nullable Pattern[] positivePatterns,
@Nullable Pattern[] negativePatterns) {
if (candidates == null || candidates.isEmpty()) {
return Collections.emptyList();
}
final Pattern[] positive = (positivePatterns == null || positivePatterns.length == 0) ?
MATCH_ALL_PATTERN : positivePatterns;
final Pattern[] negative = negativePatterns == null ? EMPTY_PATTERN : negativePatterns;
return candidates.stream()
.filter(c -> Arrays.stream(positive).anyMatch(p -> p.matcher(c).matches()))
.filter(c -> Arrays.stream(negative).noneMatch(p -> p.matcher(c).matches()))
.collect(Collectors.toList());
}
|
@Test
public void filterMatchesEmpty() {
List<String> candidates = ImmutableList.of("foo", "bar");
assertThat(filterMatches(candidates, null, null), is(candidates));
assertThat(filterMatches(null, null, null), is(Collections.emptyList()));
}
|
@Nullable
public Object sanitize(String key, @Nullable Object value) {
for (Pattern pattern : sanitizeKeysPatterns) {
if (pattern.matcher(key).matches()) {
return SANITIZED_VALUE;
}
}
return value;
}
|
@Test
void obfuscateCredentialsWithDefinedPatterns() {
final var sanitizer = new KafkaConfigSanitizer(true, Arrays.asList("kafka.ui", ".*test.*"));
assertThat(sanitizer.sanitize("consumer.kafka.ui", "secret")).isEqualTo("******");
assertThat(sanitizer.sanitize("this.is.test.credentials", "secret")).isEqualTo("******");
assertThat(sanitizer.sanitize("this.is.not.credential", "not.credential"))
.isEqualTo("not.credential");
assertThat(sanitizer.sanitize("database.password", "no longer credential"))
.isEqualTo("no longer credential");
}
|
public static OffsetBasedPagination forStartRowNumber(int startRowNumber, int pageSize) {
checkArgument(startRowNumber >= 1, "startRowNumber must be >= 1");
checkArgument(pageSize >= 1, "page size must be >= 1");
return new OffsetBasedPagination(startRowNumber - 1, pageSize);
}
|
@Test
void equals_whenSameObjects_shouldBeTrue() {
OffsetBasedPagination offsetBasedPagination = OffsetBasedPagination.forStartRowNumber(15, 20);
Assertions.assertThat(offsetBasedPagination).isEqualTo(offsetBasedPagination);
}
|
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
}
|
@Test
public void should_clone_serializable_complex_object_with_serializable_nested_object() {
Map<String, List<SerializableObject>> map = new LinkedHashMap<>();
map.put("key1", Lists.newArrayList(new SerializableObject("name1")));
map.put("key2", Lists.newArrayList(
new SerializableObject("name2"),
new SerializableObject("name3")
));
Object original = new SerializableComplexObject(map);
Object cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
}
|
@Override
public int run(String[] args) throws Exception {
parseArgs(args);
// Disable using the RPC tailing mechanism for bootstrapping the standby
// since it is less efficient in this case; see HDFS-14806
conf.setBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, false);
parseConfAndFindOtherNN();
NameNode.checkAllowFormat(conf);
InetSocketAddress myAddr = DFSUtilClient.getNNAddress(conf);
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());
return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
return doRun();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
});
}
|
@Test
public void testStandbyDirsAlreadyExist() throws Exception {
// Should not pass since standby dirs exist, force not given
int rc = BootstrapStandby.run(
new String[]{"-nonInteractive"},
cluster.getConfiguration(1));
assertEquals(BootstrapStandby.ERR_CODE_ALREADY_FORMATTED, rc);
// Should pass with -force
assertEquals(0, forceBootstrap(1));
}
|
@Override
public WanReplicationConfig setName(@Nonnull String name) {
this.name = checkNotNull(name, "Name must not be null");
return this;
}
|
@Test
public void testSerialization_withEmpyConfigs() {
config.setName("name");
SerializationService serializationService = new DefaultSerializationServiceBuilder().build();
Data serialized = serializationService.toData(config);
WanReplicationConfig deserialized = serializationService.toObject(serialized);
assertWanReplicationConfig(config, deserialized);
}
|
@Override
public void close() {
// Acquire the lock so that drain is guaranteed to complete the current batch
appendLock.lock();
List<CompletedBatch<T>> unwritten;
try {
unwritten = drain(Long.MAX_VALUE);
} finally {
appendLock.unlock();
}
unwritten.forEach(CompletedBatch::release);
}
|
@Test
public void testCloseWhenEmpty() {
int leaderEpoch = 17;
long baseOffset = 157;
int lingerMs = 50;
int maxBatchSize = 256;
BatchAccumulator<String> acc = buildAccumulator(
leaderEpoch,
baseOffset,
lingerMs,
maxBatchSize
);
acc.close();
Mockito.verifyNoInteractions(memoryPool);
}
|
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
return inject(statement, new TopicProperties.Builder());
}
|
@Test
public void shouldThrowIfRetentionConfigPresentInCreateTable() {
// Given:
givenStatement("CREATE TABLE foo_bar (FOO STRING PRIMARY KEY, BAR STRING) WITH (kafka_topic='doesntexist', partitions=2, format='avro', retention_ms=30000);");
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> injector.inject(statement, builder)
);
// Then:
assertThat(
e.getMessage(),
containsString("Invalid config variable in the WITH clause: RETENTION_MS."
+ " Non-windowed tables do not support retention."));
}
|
public void setWriteTimeout(int writeTimeout) {
this.writeTimeout = writeTimeout;
}
|
@Test
public void testThrowIOException() throws IOException {
when(mockLowLevelRequest.execute())
.thenThrow(new IOException("Fake Error"))
.thenReturn(mockLowLevelResponse);
when(mockLowLevelResponse.getStatusCode()).thenReturn(200);
Storage.Buckets.Get result = storage.buckets().get("test");
HttpResponse response = result.executeUnparsed();
assertNotNull(response);
verify(mockHttpResponseInterceptor).interceptResponse(any(HttpResponse.class));
verify(mockLowLevelRequest, atLeastOnce()).addHeader(anyString(), anyString());
verify(mockLowLevelRequest, times(2)).setTimeout(anyInt(), anyInt());
verify(mockLowLevelRequest, times(2)).setWriteTimeout(anyInt());
verify(mockLowLevelRequest, times(2)).execute();
verify(mockLowLevelResponse, atLeastOnce()).getStatusCode();
expectedLogs.verifyDebug("Request failed with IOException");
}
|
public ConvertCommand(Logger console) {
super(console);
}
|
@Test
public void testConvertCommand() throws IOException {
File file = toAvro(parquetFile());
ConvertCommand command = new ConvertCommand(createLogger());
command.targets = Arrays.asList(file.getAbsolutePath());
File output = new File(getTempFolder(), "converted.avro");
command.outputPath = output.getAbsolutePath();
command.setConf(new Configuration());
Assert.assertEquals(0, command.run());
Assert.assertTrue(output.exists());
}
|
Map<String, String> describeInstances(AwsCredentials credentials) {
Map<String, String> attributes = createAttributesDescribeInstances();
Map<String, String> headers = createHeaders(attributes, credentials);
String response = callAwsService(attributes, headers);
return parseDescribeInstances(response);
}
|
@Test
public void describeInstances() {
// given
String requestUrl = "/?Action=DescribeInstances"
+ "&Filter.1.Name=tag%3Aaws-test-cluster"
+ "&Filter.1.Value.1=cluster1"
+ "&Filter.2.Name=tag-key"
+ "&Filter.2.Value.1=another-tag-key"
+ "&Filter.3.Name=instance.group-name"
+ "&Filter.3.Value.1=hazelcast"
+ "&Filter.4.Name=instance-state-name&Filter.4.Value.1=running"
+ "&Version=2016-11-15";
//language=XML
String response = """
<?xml version="1.0" encoding="UTF-8"?>
<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<reservationSet>
<item>
<instancesSet>
<item>
<privateIpAddress>10.0.1.25</privateIpAddress>
<ipAddress>54.93.121.213</ipAddress>
<tagSet>
<item>
<key>kubernetes.io/cluster/openshift-cluster</key>
<value>openshift-cluster-eu-central-1</value>
</item>
<item>
<key>Name</key>
<value>* OpenShift Node 1</value>
</item>
</tagSet>
</item>
</instancesSet>
</item>
<item>
<instancesSet>
<item>
<privateIpAddress>172.31.14.42</privateIpAddress>
<ipAddress>18.196.228.248</ipAddress>
<tagSet>
<item>
<key>Name</key>
<value>rafal-ubuntu-2</value>
</item>
</tagSet>
</item>
</instancesSet>
</item>
</reservationSet>
</DescribeInstancesResponse>""";
stubFor(get(urlEqualTo(requestUrl))
.withHeader("X-Amz-Date", equalTo("20200403T102518Z"))
.withHeader("Authorization", equalTo(AUTHORIZATION_HEADER))
.withHeader("X-Amz-Security-Token", equalTo(TOKEN))
.willReturn(aResponse().withStatus(HttpURLConnection.HTTP_OK).withBody(response)));
// when
Map<String, String> result = awsEc2Api.describeInstances(CREDENTIALS);
// then
assertEquals(2, result.size());
assertEquals("54.93.121.213", result.get("10.0.1.25"));
assertEquals("18.196.228.248", result.get("172.31.14.42"));
}
|
public RMNode selectAnyNode(Set<String> blacklist, Resource request) {
List<NodeId> nodeIds = getCandidatesForSelectAnyNode();
int size = nodeIds.size();
if (size <= 0) {
return null;
}
Random rand = new Random();
int startIndex = rand.nextInt(size);
for (int i = 0; i < size; ++i) {
int index = i + startIndex;
index %= size;
NodeId nodeId = nodeIds.get(index);
if (nodeId != null && !blacklist.contains(nodeId.getHost())) {
ClusterNode node = clusterNodes.get(nodeId);
if (node != null && comparator.compareAndIncrement(
node, 1, request)) {
return nodeByHostName.get(nodeId.getHost());
}
}
}
return null;
}
|
@Test
public void testSelectAnyNode() {
NodeQueueLoadMonitor selector = new NodeQueueLoadMonitor(
NodeQueueLoadMonitor.LoadComparator.QUEUE_LENGTH);
RMNode h1 = createRMNode("h1", 1, "rack1", -1, 2, 5);
RMNode h2 = createRMNode("h2", 2, "rack2", -1, 5, 5);
RMNode h3 = createRMNode("h3", 3, "rack2", -1, 4, 10);
selector.addNode(null, h1);
selector.addNode(null, h2);
selector.addNode(null, h3);
selector.updateNode(h1);
selector.updateNode(h2);
selector.updateNode(h3);
selector.computeTask.run();
Assert.assertEquals(2, selector.getSortedNodes().size());
// basic test for selecting node which has queue length
// less than queue capacity.
Set<String> blacklist = new HashSet<>();
RMNode node = selector.selectAnyNode(blacklist, defaultResourceRequested);
Assert.assertTrue(node.getHostName().equals("h1") ||
node.getHostName().equals("h3"));
// if node has been added to blacklist
blacklist.add("h1");
node = selector.selectAnyNode(blacklist, defaultResourceRequested);
Assert.assertEquals("h3", node.getHostName());
blacklist.add("h3");
node = selector.selectAnyNode(blacklist, defaultResourceRequested);
Assert.assertNull(node);
}
|
public static L3ModificationInstruction modL3IPv6Dst(IpAddress addr) {
checkNotNull(addr, "Dst l3 IPv6 address cannot be null");
return new ModIPInstruction(L3SubType.IPV6_DST, addr);
}
|
@Test
public void testModL3IPv6DstMethod() {
final Instruction instruction = Instructions.modL3IPv6Dst(ip61);
final L3ModificationInstruction.ModIPInstruction modIPInstruction =
checkAndConvert(instruction,
Instruction.Type.L3MODIFICATION,
L3ModificationInstruction.ModIPInstruction.class);
assertThat(modIPInstruction.ip(), is(equalTo(ip61)));
assertThat(modIPInstruction.subtype(),
is(equalTo(L3ModificationInstruction.L3SubType.IPV6_DST)));
}
|
@Override
public Date getStartedAt() {
String dateString = settings.get(CoreProperties.SERVER_STARTTIME).orElseThrow(() -> new IllegalStateException("Mandatory"));
return DateUtils.parseDateTime(dateString);
}
|
@Test(expected = RuntimeException.class)
public void invalid_startup_date_throws_exception() {
MapSettings settings = new MapSettings();
settings.setProperty(CoreProperties.SERVER_STARTTIME, "invalid");
DefaultScannerWsClient client = mock(DefaultScannerWsClient.class);
DefaultServer metadata = new DefaultServer(settings.asConfig(), client, null);
metadata.getStartedAt();
}
|
@Override
public boolean syncVerifyData(DistroData verifyData, String targetServer) {
if (isNoExistTarget(targetServer)) {
return true;
}
// replace target server as self server so that can callback.
verifyData.getDistroKey().setTargetServer(memberManager.getSelf().getAddress());
DistroDataRequest request = new DistroDataRequest(verifyData, DataOperation.VERIFY);
Member member = memberManager.find(targetServer);
if (checkTargetServerStatusUnhealthy(member)) {
Loggers.DISTRO
.warn("[DISTRO] Cancel distro verify caused by target server {} unhealthy, key: {}", targetServer,
verifyData.getDistroKey());
return false;
}
try {
Response response = clusterRpcClientProxy.sendRequest(member, request);
return checkResponse(response);
} catch (NacosException e) {
Loggers.DISTRO.error("[DISTRO-FAILED] Verify distro data failed! key: {} ", verifyData.getDistroKey(), e);
}
return false;
}
|
@Test
void testSyncVerifyDataFailure() throws NacosException {
DistroData verifyData = new DistroData();
verifyData.setDistroKey(new DistroKey());
when(memberManager.hasMember(member.getAddress())).thenReturn(true);
when(memberManager.find(member.getAddress())).thenReturn(member);
member.setState(NodeState.UP);
response.setErrorInfo(ResponseCode.FAIL.getCode(), "TEST");
when(clusterRpcClientProxy.isRunning(member)).thenReturn(true);
assertFalse(transportAgent.syncVerifyData(verifyData, member.getAddress()));
}
|
public boolean isAlwaysFalse() {
if (conditions.isEmpty()) {
return false;
}
for (ShardingCondition each : conditions) {
if (!(each instanceof AlwaysFalseShardingCondition)) {
return false;
}
}
return true;
}
|
@Test
void assertIsAlwaysFalse() {
ShardingConditions shardingConditions = new ShardingConditions(Collections.emptyList(), mock(SQLStatementContext.class), mock(ShardingRule.class));
assertFalse(shardingConditions.isAlwaysFalse());
}
|
public Set<MapperConfig> load(InputStream inputStream) throws IOException {
final PrometheusMappingConfig config = ymlMapper.readValue(inputStream, PrometheusMappingConfig.class);
return config.metricMappingConfigs()
.stream()
.flatMap(this::mapMetric)
.collect(Collectors.toSet());
}
|
@Test
void unknownType() {
final Map<String, ImmutableList<Serializable>> config = Collections.singletonMap("metric_mappings",
ImmutableList.of(
ImmutableMap.of(
"type", "unknown",
"metric_name", "test1",
"unknown_property", "foo.bar"
)));
assertThatThrownBy(() -> configLoader.load(new ByteArrayInputStream(objectMapper.writeValueAsBytes(config))))
.hasMessageContaining("Could not resolve type id 'unknown'");
}
|
@PostMapping(path = "/proxyGateway")
public void proxyGateway(@RequestBody @Valid final ProxyGatewayDTO proxyGatewayDTO,
final HttpServletRequest request,
final HttpServletResponse response) throws IOException {
sandboxService.requestProxyGateway(proxyGatewayDTO, request, response);
}
|
@Test
public void testProxyGateway() throws Exception {
AppAuthDO appAuthDO = buildAppAuthDO();
ProxyGatewayDTO proxyGatewayDTO = buildProxyGatewayDTO();
when(this.appAuthService.findByAppKey(any())).thenReturn(appAuthDO);
MockHttpServletResponse response = mockMvc.perform(MockMvcRequestBuilders.post("/sandbox/proxyGateway")
.contentType(MediaType.APPLICATION_JSON)
.content(GsonUtils.getInstance().toJson(proxyGatewayDTO)))
.andReturn().getResponse();
// Verify the results
assertThat(response.getStatus()).isEqualTo(HttpStatus.OK.value());
}
|
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
}
|
@Test
public void should_clone_collection_of_non_serializable_object() {
List<NonSerializableObject> original = new ArrayList<>();
original.add(new NonSerializableObject("value"));
List<NonSerializableObject> cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
}
|
@Bean("EsClient")
public EsClient provide(Configuration config) {
Settings.Builder esSettings = Settings.builder();
// mandatory property defined by bootstrap process
esSettings.put("cluster.name", config.get(CLUSTER_NAME.getKey()).get());
boolean clusterEnabled = config.getBoolean(CLUSTER_ENABLED.getKey()).orElse(false);
boolean searchNode = !clusterEnabled || SEARCH.equals(NodeType.parse(config.get(CLUSTER_NODE_TYPE.getKey()).orElse(null)));
List<HttpHost> httpHosts;
if (clusterEnabled && !searchNode) {
httpHosts = getHttpHosts(config);
LOGGER.info("Connected to remote Elasticsearch: [{}]", displayedAddresses(httpHosts));
} else {
// defaults provided in:
// * in org.sonar.process.ProcessProperties.Property.SEARCH_HOST
// * in org.sonar.process.ProcessProperties.Property.SEARCH_PORT
HostAndPort host = HostAndPort.fromParts(config.get(SEARCH_HOST.getKey()).get(), config.getInt(SEARCH_PORT.getKey()).get());
httpHosts = Collections.singletonList(toHttpHost(host, config));
LOGGER.info("Connected to local Elasticsearch: [{}]", displayedAddresses(httpHosts));
}
return new EsClient(config.get(CLUSTER_SEARCH_PASSWORD.getKey()).orElse(null),
config.get(CLUSTER_ES_HTTP_KEYSTORE.getKey()).orElse(null),
config.get(CLUSTER_ES_HTTP_KEYSTORE_PASSWORD.getKey()).orElse(null),
httpHosts.toArray(new HttpHost[0]));
}
|
@Test
public void provide_whenHttpEncryptionEnabled_shouldUseHttps() throws GeneralSecurityException, IOException {
settings.setProperty(CLUSTER_ENABLED.getKey(), true);
Path keyStorePath = temp.newFile("keystore.p12").toPath();
EsClientTest.createCertificate("localhost", keyStorePath, "password");
settings.setProperty(CLUSTER_ES_HTTP_KEYSTORE.getKey(), keyStorePath.toString());
settings.setProperty(CLUSTER_NODE_TYPE.getKey(), "application");
settings.setProperty(CLUSTER_SEARCH_HOSTS.getKey(), format("%s,%s:8081", localhostHostname, localhostHostname));
EsClient client = underTest.provide(settings.asConfig());
RestHighLevelClient nativeClient = client.nativeClient();
Node node = nativeClient.getLowLevelClient().getNodes().get(0);
assertThat(node.getHost().getSchemeName()).isEqualTo("https");
assertThat(logTester.logs(Level.INFO))
.has(new Condition<>(s -> s.contains("Connected to remote Elasticsearch: [https://" + localhostHostname + ":9001, https://" + localhostHostname + ":8081]"), ""));
}
|
public static SnapshotRef fromJson(String json) {
Preconditions.checkArgument(
json != null && !json.isEmpty(), "Cannot parse snapshot ref from invalid JSON: %s", json);
return JsonUtil.parse(json, SnapshotRefParser::fromJson);
}
|
@Test
public void testTagFromJsonAllFields() {
String json = "{\"snapshot-id\":1,\"type\":\"tag\",\"max-ref-age-ms\":1}";
SnapshotRef ref = SnapshotRef.tagBuilder(1L).maxRefAgeMs(1L).build();
assertThat(SnapshotRefParser.fromJson(json))
.as("Should be able to deserialize tag with all fields")
.isEqualTo(ref);
}
|
@Override
public Mono<GetCurrencyConversionsResponse> getCurrencyConversions(final GetCurrencyConversionsRequest request) {
AuthenticationUtil.requireAuthenticatedDevice();
final CurrencyConversionEntityList currencyConversionEntityList = currencyManager
.getCurrencyConversions()
.orElseThrow(Status.UNAVAILABLE::asRuntimeException);
final List<GetCurrencyConversionsResponse.CurrencyConversionEntity> currencyConversionEntities = currencyConversionEntityList
.getCurrencies()
.stream()
.map(cce -> GetCurrencyConversionsResponse.CurrencyConversionEntity.newBuilder()
.setBase(cce.getBase())
.putAllConversions(transformBigDecimalsToStrings(cce.getConversions()))
.build())
.toList();
return Mono.just(GetCurrencyConversionsResponse.newBuilder()
.addAllCurrencies(currencyConversionEntities).setTimestamp(currencyConversionEntityList.getTimestamp())
.build());
}
|
@Test
public void testUnauthenticated() throws Exception {
assertStatusException(Status.UNAUTHENTICATED, () -> unauthenticatedServiceStub().getCurrencyConversions(
GetCurrencyConversionsRequest.newBuilder().build()));
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName("from") String from, @ParameterName("grouping separator") String group, @ParameterName("decimal separator") String decimal) {
if ( from == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null"));
}
if ( group != null && !group.equals( " " ) && !group.equals( "." ) && !group.equals( "," ) ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "group", "not a valid one, can only be one of: dot ('.'), comma (','), space (' ') "));
}
if ( decimal != null ) {
if (!decimal.equals( "." ) && !decimal.equals( "," )) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "not a valid one, can only be one of: dot ('.'), comma (',') "));
} else if (group != null && decimal.equals( group )) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "cannot be the same as parameter 'group' "));
}
}
if ( group != null ) {
from = from.replaceAll( "\\" + group, "" );
}
if ( decimal != null ) {
from = from.replaceAll( "\\" + decimal, "." );
}
BigDecimal result = NumberEvalHelper.getBigDecimalOrNull(from );
if( from != null && result == null ) {
// conversion failed
return FEELFnResult.ofError( new InvalidParametersEvent(Severity.ERROR, "unable to calculate final number result" ) );
} else {
return FEELFnResult.ofResult( result );
}
}
|
@Test
void invokeEmptyGroup() {
FunctionTestUtil.assertResultError(numberFunction.invoke("1 000", "", null), InvalidParametersEvent.class);
}
|
@Nonnull
@Override
public Collection<DataConnectionResource> listResources() {
try (AdminClient client = AdminClient.create(getConfig().getProperties())) {
return client.listTopics().names().get()
.stream()
.sorted()
.map(n -> new DataConnectionResource("topic", n))
.collect(Collectors.toList());
} catch (ExecutionException | InterruptedException e) {
throw new HazelcastException("Could not get list of topics for DataConnection " + getConfig().getName(), e);
}
}
|
@Test
public void list_resources_should_return_empty_list_for_no_topics() {
kafkaDataConnection = createKafkaDataConnection(kafkaTestSupport);
Collection<DataConnectionResource> resources = kafkaDataConnection.listResources();
List<DataConnectionResource> withoutConfluent =
resources.stream().filter(r -> !Arrays.toString(r.name()).contains("__confluent")).collect(toList());
assertThat(withoutConfluent).isEmpty();
}
|
public void useModules(String... names) {
checkNotNull(names, "names cannot be null");
Set<String> deduplicateNames = new HashSet<>();
for (String name : names) {
if (!loadedModules.containsKey(name)) {
throw new ValidationException(
String.format("No module with name '%s' exists", name));
}
if (!deduplicateNames.add(name)) {
throw new ValidationException(
String.format("Module '%s' appears more than once", name));
}
}
usedModules.clear();
usedModules.addAll(Arrays.asList(names));
}
|
@Test
void testUseModules() {
ModuleMock x = new ModuleMock("x");
ModuleMock y = new ModuleMock("y");
ModuleMock z = new ModuleMock("z");
manager.loadModule(x.getType(), x);
manager.loadModule(y.getType(), y);
manager.loadModule(z.getType(), z);
assertThat(manager.getUsedModules())
.containsSequence(CoreModuleFactory.IDENTIFIER, "x", "y", "z");
// test order for used modules
manager.useModules("z", CoreModuleFactory.IDENTIFIER);
assertThat(manager.getUsedModules()).containsSequence("z", CoreModuleFactory.IDENTIFIER);
// test unmentioned modules are still loaded
Map<String, Module> expectedLoadedModules = new HashMap<>();
expectedLoadedModules.put(CoreModuleFactory.IDENTIFIER, CoreModule.INSTANCE);
expectedLoadedModules.put("x", x);
expectedLoadedModules.put("y", y);
expectedLoadedModules.put("z", z);
assertThat(manager.getLoadedModules()).isEqualTo(expectedLoadedModules);
}
|
public boolean eval(ContentFile<?> file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return new MetricsEvalVisitor().eval(file);
}
|
@Test
public void testIntegerIn() {
boolean shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, in("id", INT_MIN_VALUE - 25, INT_MIN_VALUE - 24))
.eval(FILE);
assertThat(shouldRead).as("Should not read: id below lower bound (5 < 30, 6 < 30)").isFalse();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, in("id", INT_MIN_VALUE - 2, INT_MIN_VALUE - 1))
.eval(FILE);
assertThat(shouldRead).as("Should not read: id below lower bound (28 < 30, 29 < 30)").isFalse();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, in("id", INT_MIN_VALUE - 1, INT_MIN_VALUE))
.eval(FILE);
assertThat(shouldRead).as("Should read: id equal to lower bound (30 == 30)").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, in("id", INT_MAX_VALUE - 4, INT_MAX_VALUE - 3))
.eval(FILE);
assertThat(shouldRead)
.as("Should read: id between lower and upper bounds (30 < 75 < 79, 30 < 76 < 79)")
.isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, in("id", INT_MAX_VALUE, INT_MAX_VALUE + 1))
.eval(FILE);
assertThat(shouldRead).as("Should read: id equal to upper bound (79 == 79)").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, in("id", INT_MAX_VALUE + 1, INT_MAX_VALUE + 2))
.eval(FILE);
assertThat(shouldRead).as("Should not read: id above upper bound (80 > 79, 81 > 79)").isFalse();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, in("id", INT_MAX_VALUE + 6, INT_MAX_VALUE + 7))
.eval(FILE);
assertThat(shouldRead).as("Should not read: id above upper bound (85 > 79, 86 > 79)").isFalse();
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, in("all_nulls", "abc", "def")).eval(FILE);
assertThat(shouldRead).as("Should skip: in on all nulls column").isFalse();
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, in("some_nulls", "abc", "def")).eval(FILE);
assertThat(shouldRead).as("Should read: in on some nulls column").isTrue();
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, in("no_nulls", "abc", "def")).eval(FILE);
assertThat(shouldRead).as("Should read: in on no nulls column").isTrue();
// should read as the number of elements in the in expression is too big
List<Integer> ids = Lists.newArrayListWithExpectedSize(400);
for (int id = -400; id <= 0; id++) {
ids.add(id);
}
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, in("id", ids)).eval(FILE);
assertThat(shouldRead).as("Should read: large in expression").isTrue();
}
|
public boolean couldHoldIgnoringSharedMemory(NormalizedResources other, double thisTotalMemoryMb, double otherTotalMemoryMb) {
if (this.cpu < other.getTotalCpu()) {
return false;
}
return couldHoldIgnoringSharedMemoryAndCpu(other, thisTotalMemoryMb, otherTotalMemoryMb);
}
|
@Test
public void testCouldHoldWithTooLittleMemory() {
NormalizedResources resources = new NormalizedResources(normalize(Collections.singletonMap(gpuResourceName, 1)));
NormalizedResources resourcesToCheck = new NormalizedResources(normalize(Collections.singletonMap(gpuResourceName, 1)));
boolean couldHold = resources.couldHoldIgnoringSharedMemory(resourcesToCheck, 100, 200);
assertThat(couldHold, is(false));
}
|
public String createAndUploadPolicyExample(IamClient iam, String accountID, String policyName) {
// Build the policy.
IamPolicy policy = IamPolicy.builder() // 'version' defaults to "2012-10-17".
.addStatement(IamStatement.builder()
.effect(IamEffect.ALLOW)
.addAction("dynamodb:PutItem")
.addResource("arn:aws:dynamodb:us-east-1:" + accountID
+ ":table/exampleTableName")
.build())
.build();
// Upload the policy.
iam.createPolicy(r -> r.policyName(policyName).policyDocument(policy.toJson()));
return policy.toJson(IamPolicyWriter.builder().prettyPrint(true).build());
}
|
@Test
@Tag("IntegrationTest")
void createAndUploadPolicyExample() {
String accountId = examples.getAccountID();
String policyName = "AllowPutItemToExampleTable";
String jsonPolicy = examples.createAndUploadPolicyExample(iam, accountId, policyName);
logger.info(jsonPolicy);
GetPolicyResponse putItemPolicy = iam
.getPolicy(b -> b.policyArn("arn:aws:iam::" + accountId + ":policy/" + policyName));
iam.deletePolicy(b -> b.policyArn(putItemPolicy.policy().arn()));
logger.info("Policy [{}] deleted", putItemPolicy.policy().arn());
}
|
public static void verifyChunkedSums(int bytesPerSum, int checksumType,
ByteBuffer sums, ByteBuffer data, String fileName, long basePos)
throws ChecksumException {
nativeComputeChunkedSums(bytesPerSum, checksumType,
sums, sums.position(),
data, data.position(), data.remaining(),
fileName, basePos, true);
}
|
@Test
public void testVerifyChunkedSumsSuccessOddSize() throws ChecksumException {
// Test checksum with an odd number of bytes. This is a corner case that
// is often broken in checksum calculation, because there is an loop which
// handles an even multiple or 4 or 8 bytes and then some additional code
// to finish the few odd bytes at the end. This code can often be broken
// but is never tested because we are always calling it with an even value
// such as 512.
bytesPerChecksum--;
allocateDirectByteBuffers();
fillDataAndValidChecksums();
NativeCrc32.verifyChunkedSums(bytesPerChecksum, checksumType.id,
checksums, data, fileName, BASE_POSITION);
bytesPerChecksum++;
}
|
public static HazelcastInstance newHazelcastInstance(Config config) {
if (config == null) {
config = Config.load();
}
return newHazelcastInstance(
config,
config.getInstanceName(),
new DefaultNodeContext()
);
}
|
@Test(expected = ExpectedRuntimeException.class)
public void test_NewInstance_failed_afterNodeStart() throws Exception {
NodeContext context = new TestNodeContext() {
@Override
public NodeExtension createNodeExtension(Node node) {
NodeExtension nodeExtension = super.createNodeExtension(node);
doThrow(new ExpectedRuntimeException()).when(nodeExtension).afterStart();
return nodeExtension;
}
};
Config config = new Config();
config.getNetworkConfig().getJoin().getAutoDetectionConfig().setEnabled(false);
hazelcastInstance = HazelcastInstanceFactory.newHazelcastInstance(config, randomString(), context);
}
|
public void shutdown(final Callback<None> callback)
{
_managerStarted = false;
for (ZooKeeperAnnouncer server : _servers)
{
server.shutdown();
}
Callback<None> zkCloseCallback = new CallbackAdapter<None, None>(callback)
{
@Override
protected None convertResponse(None none) throws Exception
{
_zkConnection.shutdown();
return none;
}
};
if (_store != null)
{
_store.shutdown(zkCloseCallback);
}
else
{
zkCloseCallback.onSuccess(None.none());
}
}
|
@Test
public void testMarkDownAndUpDuringDisconnection()
throws Exception
{
ZooKeeperAnnouncer announcer = getZooKeeperAnnouncer(_cluster, _uri, WEIGHT);
ZooKeeperConnectionManager manager = createManager(true, announcer);
ZooKeeperEphemeralStore<UriProperties> store = createAndStartUriStore();
UriProperties properties = store.get(_cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(URI.create(_uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), WEIGHT);
assertEquals(properties.Uris().size(), 1);
_zkServer.shutdown(false);
FutureCallback<None> markDownCallback = new FutureCallback<>();
announcer.markDown(markDownCallback);
FutureCallback<None> markUpCallback = new FutureCallback<>();
announcer.markUp(markUpCallback);
// ugly, but we need to wait for a while just so that Disconnect event is propagated
// to the caller before we restart zk sever.
Thread.sleep(1000);
_zkServer.restart();
markUpCallback.get(10, TimeUnit.SECONDS);
try
{
markDownCallback.get();
Assert.fail("mark down should have thrown CancellationException.");
}
catch (ExecutionException e)
{
Assert.assertTrue(e.getCause() instanceof CancellationException);
}
properties = store.get(_cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(URI.create(_uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), WEIGHT);
assertEquals(properties.Uris().size(), 1);
shutdownManager(manager);
}
|
@Udf(description = "Converts a string representation of a date in the given format"
+ " into the number of days since 1970-01-01 00:00:00 UTC/GMT.")
public int stringToDate(
@UdfParameter(
description = "The string representation of a date.") final String formattedDate,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
// NB: We do not perform a null here preferring to throw an exception as
// there is no sentinel value for a "null" Date.
try {
final DateTimeFormatter formatter = formatters.get(formatPattern);
return ((int)LocalDate.parse(formattedDate, formatter).toEpochDay());
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to parse date '" + formattedDate
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void shouldThrowIfParseFails() {
// When:
final Exception e = assertThrows(
KsqlFunctionException.class,
() -> udf.stringToDate("invalid", "yyyy-MM-dd")
);
// Then:
assertThat(e.getMessage(), containsString("Failed to parse date 'invalid' with formatter 'yyyy-MM-dd'"));
}
|
@Override
public Map<String, Table> getTables() {
return ImmutableMap.copyOf(tables);
}
|
@Test
public void testGetTables() throws Exception {
store.createTable(mockTable("hello"));
store.createTable(mockTable("world"));
assertEquals(2, store.getTables().size());
assertThat(store.getTables(), Matchers.hasValue(mockTable("hello")));
assertThat(store.getTables(), Matchers.hasValue(mockTable("world")));
}
|
@Override
public boolean storesMixedCaseQuotedIdentifiers() {
return false;
}
|
@Test
void assertStoresMixedCaseQuotedIdentifiers() {
assertFalse(metaData.storesMixedCaseQuotedIdentifiers());
}
|
@Override
public void execute(Context context) {
context.setResolution(resolution);
}
|
@Test
public void execute() {
SetResolution function = new SetResolution("FIXED");
Function.Context context = mock(Function.Context.class);
function.execute(context);
verify(context, times(1)).setResolution("FIXED");
}
|
@Nonnull
public static ToConverter getToConverter(QueryDataType type) {
if (type.getTypeFamily() == QueryDataTypeFamily.OBJECT) {
// User-defined types are subject to the same conversion rules as ordinary OBJECT.
type = QueryDataType.OBJECT;
}
return Objects.requireNonNull(CONVERTERS.get(type), "missing converter for " + type);
}
|
@Test
public void test_dateConversion() {
OffsetDateTime time = OffsetDateTime.of(2020, 9, 8, 11, 4, 0, 123, UTC);
Object converted = getToConverter(TIMESTAMP_WITH_TZ_DATE).convert(time);
assertThat(converted).isEqualTo(Date.from(time.toInstant()));
}
|
@Override
public <T> CompletableFuture<T> submit(Callable<T> callable) {
final CompletableFuture<T> promise = new CompletableFuture<>();
try {
CompletableFuture.supplyAsync(ContextPropagator.decorateSupplier(config.getContextPropagator(),() -> {
try {
publishBulkheadEvent(() -> new BulkheadOnCallPermittedEvent(name));
return callable.call();
} catch (CompletionException e) {
throw e;
} catch (Exception e){
throw new CompletionException(e);
}
}), executorService).whenComplete((result, throwable) -> {
publishBulkheadEvent(() -> new BulkheadOnCallFinishedEvent(name));
if (throwable != null) {
promise.completeExceptionally(throwable);
} else {
promise.complete(result);
}
});
} catch (RejectedExecutionException rejected) {
publishBulkheadEvent(() -> new BulkheadOnCallRejectedEvent(name));
throw BulkheadFullException.createBulkheadFullException(this);
}
return promise;
}
|
@Test
public void testSupplierThreadLocalContextPropagator() {
TestThreadLocalContextHolder.put("ValueShouldCrossThreadBoundary");
CompletableFuture<Object> future = fixedThreadPoolBulkhead
.submit(() -> TestThreadLocalContextHolder.get().orElse(null));
waitAtMost(5, TimeUnit.SECONDS).until(matches(() ->
assertThat(future).isCompletedWithValue("ValueShouldCrossThreadBoundary")));
}
|
@Override
public synchronized CompletableFuture<MastershipEvent> setMaster(NetworkId networkId,
NodeId nodeId, DeviceId deviceId) {
Map<DeviceId, NodeId> masterMap = getMasterMap(networkId);
MastershipRole role = getRole(networkId, nodeId, deviceId);
switch (role) {
case MASTER:
// no-op
return CompletableFuture.completedFuture(null);
case STANDBY:
case NONE:
NodeId prevMaster = masterMap.put(deviceId, nodeId);
incrementTerm(networkId, deviceId);
removeFromBackups(networkId, deviceId, nodeId);
addToBackup(networkId, deviceId, prevMaster);
break;
default:
log.warn("unknown Mastership Role {}", role);
return null;
}
return CompletableFuture.completedFuture(
new MastershipEvent(MASTER_CHANGED, deviceId, getMastership(networkId, deviceId)));
}
|
@Test
public void setMaster() {
put(VNID1, VDID1, N1, false, false);
assertEquals("wrong event", MASTER_CHANGED,
Futures.getUnchecked(sms.setMaster(VNID1, N1, VDID1)).type());
assertEquals("wrong role", MASTER, sms.getRole(VNID1, N1, VDID1));
//set node that's already master - should be ignored
assertNull("wrong event",
Futures.getUnchecked(sms.setMaster(VNID1, N1, VDID1)));
//set STANDBY to MASTER
put(VNID1, VDID2, N1, false, true);
assertEquals("wrong role", STANDBY, sms.getRole(VNID1, N1, VDID2));
assertEquals("wrong event", MASTER_CHANGED,
Futures.getUnchecked(sms.setMaster(VNID1, N1, VDID2)).type());
assertEquals("wrong role", MASTER, sms.getRole(VNID1, N1, VDID2));
}
|
public void writeEncodedValue(EncodedValue encodedValue) throws IOException {
switch (encodedValue.getValueType()) {
case ValueType.BOOLEAN:
writer.write(Boolean.toString(((BooleanEncodedValue) encodedValue).getValue()));
break;
case ValueType.BYTE:
writer.write(
String.format("0x%x", ((ByteEncodedValue)encodedValue).getValue()));
break;
case ValueType.CHAR:
writer.write(
String.format("0x%x", (int)((CharEncodedValue)encodedValue).getValue()));
break;
case ValueType.SHORT:
writer.write(
String.format("0x%x", ((ShortEncodedValue)encodedValue).getValue()));
break;
case ValueType.INT:
writer.write(
String.format("0x%x", ((IntEncodedValue)encodedValue).getValue()));
break;
case ValueType.LONG:
writer.write(
String.format("0x%x", ((LongEncodedValue)encodedValue).getValue()));
break;
case ValueType.FLOAT:
writer.write(Float.toString(((FloatEncodedValue)encodedValue).getValue()));
break;
case ValueType.DOUBLE:
writer.write(Double.toString(((DoubleEncodedValue)encodedValue).getValue()));
break;
case ValueType.ANNOTATION:
writeAnnotation((AnnotationEncodedValue)encodedValue);
break;
case ValueType.ARRAY:
writeArray((ArrayEncodedValue)encodedValue);
break;
case ValueType.STRING:
writeQuotedString(((StringEncodedValue)encodedValue).getValue());
break;
case ValueType.FIELD:
writeFieldDescriptor(((FieldEncodedValue)encodedValue).getValue());
break;
case ValueType.ENUM:
writeFieldDescriptor(((EnumEncodedValue)encodedValue).getValue());
break;
case ValueType.METHOD:
writeMethodDescriptor(((MethodEncodedValue)encodedValue).getValue());
break;
case ValueType.TYPE:
writeType(((TypeEncodedValue)encodedValue).getValue());
break;
case ValueType.METHOD_TYPE:
writeMethodProtoDescriptor(((MethodTypeEncodedValue)encodedValue).getValue());
break;
case ValueType.METHOD_HANDLE:
writeMethodHandle(((MethodHandleEncodedValue)encodedValue).getValue());
break;
case ValueType.NULL:
writer.write("null");
break;
default:
throw new IllegalArgumentException("Unknown encoded value type");
}
}
|
@Test
public void testWriteEncodedValue_double() throws IOException {
DexFormattedWriter writer = new DexFormattedWriter(output);
writer.writeEncodedValue(new ImmutableDoubleEncodedValue(12.34));
Assert.assertEquals("12.34", output.toString());
}
|
@Override
public double rand() {
if (MathEx.random() < q) {
return 0;
} else {
return 1;
}
}
|
@Test
public void testSd() {
System.out.println("sd");
BernoulliDistribution instance = new BernoulliDistribution(0.3);
instance.rand();
assertEquals(Math.sqrt(0.21), instance.sd(), 1E-7);
}
|
public int getConnectionCount() {
return connectionCount;
}
|
@Test
public void test_constructor_connectionCountDefaultBehavior() {
ClientTpcConfig config = new ClientTpcConfig();
assertEquals(1, config.getConnectionCount());
System.setProperty("hazelcast.client.tpc.connectionCount", "5");
config = new ClientTpcConfig();
assertEquals(5, config.getConnectionCount());
}
|
@Override
public List<Integer> applyTransforms(List<Integer> originalGlyphIds)
{
LOG.warn(
"{} class does not perform actual GSUB substitutions. Perhaps the selected language is not yet supported by the FontBox library.",
getClass().getSimpleName());
// Make the result read-only to prevent accidental modifications of the source list
return Collections.unmodifiableList(originalGlyphIds);
}
|
@Test
@DisplayName("Transformation result is actually a read-only version of the argument")
void applyTransforms()
{
// given
DefaultGsubWorker sut = new DefaultGsubWorker();
List<Integer> originalGlyphIds = Arrays.asList(1, 2, 3, 4, 5);
// when
List<Integer> pseudoTransformedIds = sut.applyTransforms(originalGlyphIds);
Executable modification = pseudoTransformedIds::clear;
// then
assertEquals(originalGlyphIds, pseudoTransformedIds);
assertThrows(UnsupportedOperationException.class, modification);
}
|
public Response request(Request request) throws NacosException {
return request(request, rpcClientConfig.timeOutMills());
}
|
@Test
void testRequestWhenTimeoutThenThrowException() throws NacosException {
assertThrows(NacosException.class, () -> {
rpcClient.rpcClientStatus.set(RpcClientStatus.RUNNING);
rpcClient.currentConnection = connection;
doReturn(null).when(connection).request(any(), anyLong());
rpcClient.request(null, 10000);
});
}
|
@Operation(
summary = "Monitor the given search keys in the key transparency log",
description = """
Enforced unauthenticated endpoint. Return proofs proving that the log tree
has been constructed correctly in later entries for each of the given search keys .
"""
)
@ApiResponse(responseCode = "200", description = "All search keys exist in the log", useReturnTypeSchema = true)
@ApiResponse(responseCode = "404", description = "At least one search key lookup did not find the key")
@ApiResponse(responseCode = "413", description = "Ratelimited")
@ApiResponse(responseCode = "422", description = "Invalid request format")
@POST
@Path("/monitor")
@RateLimitedByIp(RateLimiters.For.KEY_TRANSPARENCY_MONITOR_PER_IP)
@Produces(MediaType.APPLICATION_JSON)
public KeyTransparencyMonitorResponse monitor(
@ReadOnly @Auth final Optional<AuthenticatedDevice> authenticatedAccount,
@NotNull @Valid final KeyTransparencyMonitorRequest request) {
// Disallow clients from making authenticated requests to this endpoint
requireNotAuthenticated(authenticatedAccount);
try {
final List<MonitorKey> monitorKeys = new ArrayList<>(List.of(
createMonitorKey(getFullSearchKeyByteString(ACI_PREFIX, request.aci().toCompactByteArray()),
request.aciPositions())
));
request.usernameHash().ifPresent(usernameHash ->
monitorKeys.add(createMonitorKey(getFullSearchKeyByteString(USERNAME_PREFIX, usernameHash),
request.usernameHashPositions().get()))
);
request.e164().ifPresent(e164 ->
monitorKeys.add(
createMonitorKey(getFullSearchKeyByteString(E164_PREFIX, e164.getBytes(StandardCharsets.UTF_8)),
request.e164Positions().get()))
);
return new KeyTransparencyMonitorResponse(keyTransparencyServiceClient.monitor(
monitorKeys,
request.lastNonDistinguishedTreeHeadSize(),
request.lastDistinguishedTreeHeadSize(),
KEY_TRANSPARENCY_RPC_TIMEOUT).join());
} catch (final CancellationException exception) {
LOGGER.error("Unexpected cancellation from key transparency service", exception);
throw new ServerErrorException(Response.Status.SERVICE_UNAVAILABLE, exception);
} catch (final CompletionException exception) {
handleKeyTransparencyServiceError(exception);
}
// This is unreachable
return null;
}
|
@Test
void monitorAuthenticated() {
final Invocation.Builder request = resources.getJerseyTest()
.target("/v1/key-transparency/monitor")
.request()
.header(HttpHeaders.AUTHORIZATION, AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD));
try (Response response = request.post(
Entity.json(createMonitorRequestJson(ACI, List.of(3L), Optional.empty(), Optional.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty())))) {
assertEquals(400, response.getStatus());
verify(keyTransparencyServiceClient, never()).monitor(any(), any(), any(), any());
}
}
|
@Override
public boolean canRescaleMaxParallelism(int desiredMaxParallelism) {
// Technically a valid parallelism value, but one that cannot be rescaled to
if (desiredMaxParallelism == JobVertex.MAX_PARALLELISM_DEFAULT) {
return false;
}
return !rescaleMaxValidator
.apply(normalizeAndCheckMaxParallelism(desiredMaxParallelism))
.isPresent();
}
|
@Test
void canRescaleMaxDefault() {
DefaultVertexParallelismInfo info = new DefaultVertexParallelismInfo(1, 1, ALWAYS_VALID);
assertThat(info.canRescaleMaxParallelism(JobVertex.MAX_PARALLELISM_DEFAULT)).isFalse();
}
|
@Override
public void merge(Accumulator<Integer, Integer> other) {
this.min = Math.min(this.min, other.getLocalValue());
}
|
@Test
void testMerge() {
IntMinimum min1 = new IntMinimum();
min1.add(1234);
IntMinimum min2 = new IntMinimum();
min2.add(5678);
min2.merge(min1);
assertThat(min2.getLocalValue().intValue()).isEqualTo(1234);
min1.merge(min2);
assertThat(min1.getLocalValue().intValue()).isEqualTo(1234);
}
|
public boolean eval(ContentFile<?> file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return new MetricsEvalVisitor().eval(file);
}
|
@Test
public void testCaseInsensitiveIntegerNotEqRewritten() {
boolean shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, not(equal("ID", INT_MIN_VALUE - 25)), false)
.eval(FILE);
assertThat(shouldRead).as("Should read: id below lower bound").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, not(equal("ID", INT_MIN_VALUE - 1)), false)
.eval(FILE);
assertThat(shouldRead).as("Should read: id below lower bound").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, not(equal("ID", INT_MIN_VALUE)), false).eval(FILE);
assertThat(shouldRead).as("Should read: id equal to lower bound").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, not(equal("ID", INT_MAX_VALUE - 4)), false)
.eval(FILE);
assertThat(shouldRead).as("Should read: id between lower and upper bounds").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, not(equal("ID", INT_MAX_VALUE)), false).eval(FILE);
assertThat(shouldRead).as("Should read: id equal to upper bound").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, not(equal("ID", INT_MAX_VALUE + 1)), false)
.eval(FILE);
assertThat(shouldRead).as("Should read: id above upper bound").isTrue();
shouldRead =
new InclusiveMetricsEvaluator(SCHEMA, not(equal("ID", INT_MAX_VALUE + 6)), false)
.eval(FILE);
assertThat(shouldRead).as("Should read: id above upper bound").isTrue();
}
|
@Override
public CompletableFuture<Boolean> checkAndUpdateConfigMap(
String configMapName,
Function<KubernetesConfigMap, Optional<KubernetesConfigMap>> updateFunction) {
return FutureUtils.retry(
() -> attemptCheckAndUpdateConfigMap(configMapName, updateFunction),
maxRetryAttempts,
// Only KubernetesClientException is retryable
t -> ExceptionUtils.findThrowable(t, KubernetesClientException.class).isPresent(),
kubeClientExecutorService);
}
|
@Test
void testCheckAndUpdateConfigMap() throws Exception {
this.flinkKubeClient.createConfigMap(buildTestingConfigMap());
// Checker pass
final boolean updated =
this.flinkKubeClient
.checkAndUpdateConfigMap(
TESTING_CONFIG_MAP_NAME,
c -> {
c.getData()
.put(
TESTING_CONFIG_MAP_KEY,
TESTING_CONFIG_MAP_NEW_VALUE);
return Optional.of(c);
})
.get();
assertThat(updated).isTrue();
final Optional<KubernetesConfigMap> configMapOpt =
this.flinkKubeClient.getConfigMap(TESTING_CONFIG_MAP_NAME);
assertThat(configMapOpt).isPresent();
assertThat(configMapOpt.get().getData())
.containsEntry(TESTING_CONFIG_MAP_KEY, TESTING_CONFIG_MAP_NEW_VALUE);
}
|
SortedReplicas(Broker broker,
Set<Function<Replica, Boolean>> selectionFuncs,
List<Function<Replica, Integer>> priorityFuncs,
Function<Replica, Double> scoreFunction) {
this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true);
}
|
@Test
public void testPriorityFunction() {
Broker broker = generateBroker(NUM_REPLICAS);
new SortedReplicasHelper().addPriorityFunc(PRIORITY_FUNC)
.setScoreFunc(SCORE_FUNC)
.trackSortedReplicasFor(SORT_NAME, broker);
SortedReplicas sr = broker.trackedSortedReplicas(SORT_NAME);
assertEquals(NUM_REPLICAS, sr.sortedReplicas(false).size());
verifySortedReplicas(sr);
}
|
@Override
public byte getByte(int index) {
checkIndex(index);
return _getByte(index);
}
|
@Test
public void getByteBoundaryCheck1() {
assertThrows(IndexOutOfBoundsException.class, new Executable() {
@Override
public void execute() {
buffer.getByte(-1);
}
});
}
|
@Override
protected void init() throws ServiceException {
Configuration hConf = new Configuration(false);
ConfigurationUtils.copy(getServiceConfig(), hConf);
hGroups = new org.apache.hadoop.security.Groups(hConf);
}
|
@Test(expected = RuntimeException.class)
@TestDir
public void invalidGroupsMapping() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
conf.set("server.groups.hadoop.security.group.mapping", String.class.getName());
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
|
public CompletableFuture<ForwardMessageToDeadLetterQueueResponse> forwardMessageToDeadLetterQueue(ProxyContext ctx,
ForwardMessageToDeadLetterQueueRequest request) {
CompletableFuture<ForwardMessageToDeadLetterQueueResponse> future = new CompletableFuture<>();
try {
validateTopicAndConsumerGroup(request.getTopic(), request.getGroup());
String group = request.getGroup().getName();
String handleString = request.getReceiptHandle();
MessageReceiptHandle messageReceiptHandle = messagingProcessor.removeReceiptHandle(ctx, grpcChannelManager.getChannel(ctx.getClientID()), group, request.getMessageId(), request.getReceiptHandle());
if (messageReceiptHandle != null) {
handleString = messageReceiptHandle.getReceiptHandleStr();
}
ReceiptHandle receiptHandle = ReceiptHandle.decode(handleString);
return this.messagingProcessor.forwardMessageToDeadLetterQueue(
ctx,
receiptHandle,
request.getMessageId(),
request.getGroup().getName(),
request.getTopic().getName()
).thenApply(result -> convertToForwardMessageToDeadLetterQueueResponse(ctx, result));
} catch (Throwable t) {
future.completeExceptionally(t);
}
return future;
}
|
@Test
public void testForwardMessageToDeadLetterQueueWhenHasMappingHandle() throws Throwable {
ArgumentCaptor<ReceiptHandle> receiptHandleCaptor = ArgumentCaptor.forClass(ReceiptHandle.class);
when(this.messagingProcessor.forwardMessageToDeadLetterQueue(any(), receiptHandleCaptor.capture(), anyString(), anyString(), anyString()))
.thenReturn(CompletableFuture.completedFuture(RemotingCommand.createResponseCommand(ResponseCode.SUCCESS, "")));
String savedHandleStr = buildReceiptHandle("topic", System.currentTimeMillis(),3000);
when(messagingProcessor.removeReceiptHandle(any(), any(), anyString(), anyString(), anyString()))
.thenReturn(new MessageReceiptHandle("group", "topic", 0, savedHandleStr, "msgId", 0, 0));
ForwardMessageToDeadLetterQueueResponse response = this.forwardMessageToDLQActivity.forwardMessageToDeadLetterQueue(
createContext(),
ForwardMessageToDeadLetterQueueRequest.newBuilder()
.setTopic(Resource.newBuilder().setName("topic").build())
.setGroup(Resource.newBuilder().setName("group").build())
.setMessageId(MessageClientIDSetter.createUniqID())
.setReceiptHandle(buildReceiptHandle("topic", System.currentTimeMillis(), 3000))
.build()
).get();
assertEquals(Code.OK, response.getStatus().getCode());
assertEquals(savedHandleStr, receiptHandleCaptor.getValue().getReceiptHandle());
}
|
public static FusedPipeline fuse(Pipeline p) {
return new GreedyPipelineFuser(p).fusedPipeline;
}
|
@Test
public void sanitizedTransforms() throws Exception {
PCollection flattenOutput = pc("flatten.out");
PCollection read1Output = pc("read1.out");
PCollection read2Output = pc("read2.out");
PCollection impulse1Output = pc("impulse1.out");
PCollection impulse2Output = pc("impulse2.out");
PTransform flattenTransform =
PTransform.newBuilder()
.setUniqueName("Flatten")
.putInputs(read1Output.getUniqueName(), read1Output.getUniqueName())
.putInputs(read2Output.getUniqueName(), read2Output.getUniqueName())
.putOutputs(flattenOutput.getUniqueName(), flattenOutput.getUniqueName())
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.FLATTEN_TRANSFORM_URN)
.setPayload(
WindowIntoPayload.newBuilder()
.setWindowFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.setEnvironmentId("py")
.build();
PTransform read1Transform =
PTransform.newBuilder()
.setUniqueName("read1")
.putInputs(impulse1Output.getUniqueName(), impulse1Output.getUniqueName())
.putOutputs(read1Output.getUniqueName(), read1Output.getUniqueName())
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.setPayload(
WindowIntoPayload.newBuilder()
.setWindowFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.setEnvironmentId("py")
.build();
PTransform read2Transform =
PTransform.newBuilder()
.setUniqueName("read2")
.putInputs(impulse2Output.getUniqueName(), impulse2Output.getUniqueName())
.putOutputs(read2Output.getUniqueName(), read2Output.getUniqueName())
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.setPayload(
WindowIntoPayload.newBuilder()
.setWindowFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.setEnvironmentId("py")
.build();
PTransform impulse1Transform =
PTransform.newBuilder()
.setUniqueName("impulse1")
.putOutputs(impulse1Output.getUniqueName(), impulse1Output.getUniqueName())
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.IMPULSE_TRANSFORM_URN)
.setPayload(
WindowIntoPayload.newBuilder()
.setWindowFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.build();
PTransform impulse2Transform =
PTransform.newBuilder()
.setUniqueName("impulse2")
.putOutputs(impulse2Output.getUniqueName(), impulse2Output.getUniqueName())
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.IMPULSE_TRANSFORM_URN)
.setPayload(
WindowIntoPayload.newBuilder()
.setWindowFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.build();
Pipeline impulse =
Pipeline.newBuilder()
.addRootTransformIds(impulse1Transform.getUniqueName())
.addRootTransformIds(impulse2Transform.getUniqueName())
.addRootTransformIds(flattenTransform.getUniqueName())
.setComponents(
Components.newBuilder()
.putCoders("coder", Coder.newBuilder().build())
.putCoders("windowCoder", Coder.newBuilder().build())
.putWindowingStrategies(
"ws",
WindowingStrategy.newBuilder().setWindowCoderId("windowCoder").build())
.putEnvironments("py", Environments.createDockerEnvironment("py"))
.putPcollections(flattenOutput.getUniqueName(), flattenOutput)
.putTransforms(flattenTransform.getUniqueName(), flattenTransform)
.putPcollections(read1Output.getUniqueName(), read1Output)
.putTransforms(read1Transform.getUniqueName(), read1Transform)
.putPcollections(read2Output.getUniqueName(), read2Output)
.putTransforms(read2Transform.getUniqueName(), read2Transform)
.putPcollections(impulse1Output.getUniqueName(), impulse1Output)
.putTransforms(impulse1Transform.getUniqueName(), impulse1Transform)
.putPcollections(impulse2Output.getUniqueName(), impulse2Output)
.putTransforms(impulse2Transform.getUniqueName(), impulse2Transform)
.build())
.build();
FusedPipeline fused = GreedyPipelineFuser.fuse(impulse);
assertThat(fused.getRunnerExecutedTransforms(), hasSize(2));
assertThat(fused.getFusedStages(), hasSize(2));
assertThat(
fused.getFusedStages(),
containsInAnyOrder(
ExecutableStageMatcher.withInput(impulse1Output.getUniqueName())
.withTransforms(flattenTransform.getUniqueName(), read1Transform.getUniqueName()),
ExecutableStageMatcher.withInput(impulse2Output.getUniqueName())
.withTransforms(flattenTransform.getUniqueName(), read2Transform.getUniqueName())));
assertThat(
fused.getFusedStages().stream()
.flatMap(
s ->
s.getComponents().getTransformsOrThrow(flattenTransform.getUniqueName())
.getInputsMap().values().stream())
.collect(Collectors.toList()),
containsInAnyOrder(read1Output.getUniqueName(), read2Output.getUniqueName()));
}
|
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to validate internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final ValidationResult validationResult = new ValidationResult();
final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet());
final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet());
while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) {
Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap();
if (!topicDescriptionsStillToValidate.isEmpty()) {
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate);
descriptionsForTopic = describeTopicsResult.topicNameValues();
}
Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap();
if (!topicConfigsStillToValidate.isEmpty()) {
final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(
topicConfigsStillToValidate.stream()
.map(topic -> new ConfigResource(Type.TOPIC, topic))
.collect(Collectors.toSet())
);
configsForTopic = describeConfigsResult.values().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue));
}
while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
if (!descriptionsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
descriptionsForTopic,
topicConfigs,
topicDescriptionsStillToValidate,
(streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide)
);
}
if (!configsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
configsForTopic,
topicConfigs,
topicConfigsStillToValidate,
(streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide)
);
}
maybeThrowTimeoutException(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
String.format("Could not validate internal topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs)
);
if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
"validated"
);
}
log.info("Completed validation of internal topics {}.", topicConfigs.keySet());
return validationResult;
}
|
@Test
public void shouldReportMissingTopics() {
final String missingTopic1 = "missingTopic1";
final String missingTopic2 = "missingTopic2";
setupTopicInMockAdminClient(topic1, repartitionTopicConfig());
final InternalTopicConfig internalTopicConfig1 = setupRepartitionTopicConfig(topic1, 1);
final InternalTopicConfig internalTopicConfig2 = setupRepartitionTopicConfig(missingTopic1, 1);
final InternalTopicConfig internalTopicConfig3 = setupRepartitionTopicConfig(missingTopic2, 1);
final ValidationResult validationResult = internalTopicManager.validate(mkMap(
mkEntry(topic1, internalTopicConfig1),
mkEntry(missingTopic1, internalTopicConfig2),
mkEntry(missingTopic2, internalTopicConfig3)
));
final Set<String> missingTopics = validationResult.missingTopics();
assertThat(missingTopics.size(), is(2));
assertThat(missingTopics, hasItem(missingTopic1));
assertThat(missingTopics, hasItem(missingTopic2));
assertThat(validationResult.misconfigurationsForTopics(), anEmptyMap());
}
|
@Override
public boolean offer(final T element) {
return offer(element, 1);
}
|
@Test
public void testStreamSummary() {
ConcurrentStreamSummary<String> vs = new ConcurrentStreamSummary<String>(3);
String[] stream = {"X", "X", "Y", "Z", "A", "B", "C", "X", "X", "A", "A", "A"};
for (String i : stream) {
vs.offer(i);
/*
for(String s : vs.poll(3))
System.out.print(s+" ");
*/
System.out.println(vs);
}
}
|
@Override
public int size()
{
return _size;
}
|
@Test
public void testRemoveLast()
{
List<Integer> control = new ArrayList<>(Arrays.asList(1, 2, 3));
LinkedDeque<Integer> q = new LinkedDeque<>(control);
Assert.assertEquals(q.removeLast(), control.remove(control.size() - 1));
Assert.assertEquals(q, control);
}
|
public static boolean isDirectory(URL resourceURL) throws URISyntaxException {
final String protocol = resourceURL.getProtocol();
switch (protocol) {
case "jar":
try {
final JarURLConnection jarConnection = (JarURLConnection) resourceURL.openConnection();
final JarEntry entry = jarConnection.getJarEntry();
if (entry.isDirectory()) {
return true;
}
// WARNING! Heuristics ahead.
// It turns out that JarEntry#isDirectory() really just tests whether the filename ends in a '/'.
// If you try to open the same URL without a trailing '/', it'll succeed — but the result won't be
// what you want. We try to get around this by calling getInputStream() on the file inside the jar.
// This seems to return null for directories (though that behavior is undocumented as far as I
// can tell). If you have a better idea, please improve this.
final String relativeFilePath = entry.getName();
final JarFile jarFile = jarConnection.getJarFile();
final ZipEntry zipEntry = jarFile.getEntry(relativeFilePath);
final InputStream inputStream = jarFile.getInputStream(zipEntry);
return inputStream == null;
} catch (IOException e) {
throw new ResourceNotFoundException(e);
}
case "file":
return new File(resourceURL.toURI()).isDirectory();
default:
throw new IllegalArgumentException("Unsupported protocol " + resourceURL.getProtocol() +
" for resource " + resourceURL);
}
}
|
@Test
void isDirectoryReturnsTrueForURLEncodedDirectoriesInJarsWithoutTrailingSlashes() throws Exception {
final URL url = new URL("jar:" + resourceJar.toExternalForm() + "!/dir%20with%20space");
assertThat(url.getProtocol()).isEqualTo("jar");
assertThat(ResourceURL.isDirectory(url)).isTrue();
}
|
@Override
public HadoopConf buildHadoopConfWithReadOnlyConfig(ReadonlyConfig readonlyConfig) {
Configuration configuration = loadHiveBaseHadoopConfig(readonlyConfig);
Config config = fillBucket(readonlyConfig, configuration);
config =
config.withValue(
CosConfigOptions.SECRET_ID.key(),
ConfigValueFactory.fromAnyRef(
configuration.get(CosConfigOptions.SECRET_ID.key())));
config =
config.withValue(
CosConfigOptions.SECRET_KEY.key(),
ConfigValueFactory.fromAnyRef(
configuration.get(CosConfigOptions.SECRET_KEY.key())));
config =
config.withValue(
CosConfigOptions.REGION.key(),
ConfigValueFactory.fromAnyRef(
configuration.get(CosConfigOptions.REGION.key())));
HadoopConf hadoopConf = CosConf.buildWithConfig(config);
Map<String, String> propsInConfiguration =
configuration.getPropsWithPrefix(StringUtils.EMPTY);
hadoopConf.setExtraOptions(propsInConfiguration);
return hadoopConf;
}
|
@Test
void fillBucketInHadoopConfPath() throws URISyntaxException {
URL resource = CosStorageTest.class.getResource("/cos");
String filePath = Paths.get(resource.toURI()).toString();
HashMap<String, Object> map = new HashMap<>();
map.put("hive.hadoop.conf-path", filePath);
map.putAll(COS.toMap());
ReadonlyConfig readonlyConfig = ReadonlyConfig.fromMap(map);
COSStorage cosStorage = new COSStorage();
HadoopConf hadoopConf = cosStorage.buildHadoopConfWithReadOnlyConfig(readonlyConfig);
assertHadoopConf(hadoopConf);
}
|
NettyPartitionRequestClient createPartitionRequestClient(ConnectionID connectionId)
throws IOException, InterruptedException {
// We map the input ConnectionID to a new value to restrict the number of tcp connections
connectionId =
new ConnectionID(
connectionId.getResourceID(),
connectionId.getAddress(),
connectionId.getConnectionIndex() % maxNumberOfConnections);
while (true) {
final CompletableFuture<NettyPartitionRequestClient> newClientFuture =
new CompletableFuture<>();
final CompletableFuture<NettyPartitionRequestClient> clientFuture =
clients.putIfAbsent(connectionId, newClientFuture);
final NettyPartitionRequestClient client;
if (clientFuture == null) {
try {
client = connectWithRetries(connectionId);
} catch (Throwable e) {
newClientFuture.completeExceptionally(
new IOException("Could not create Netty client.", e));
clients.remove(connectionId, newClientFuture);
throw e;
}
newClientFuture.complete(client);
} else {
try {
client = clientFuture.get();
} catch (ExecutionException e) {
ExceptionUtils.rethrowIOException(ExceptionUtils.stripExecutionException(e));
return null;
}
}
// Make sure to increment the reference count before handing a client
// out to ensure correct bookkeeping for channel closing.
if (client.validateClientAndIncrementReferenceCounter()) {
return client;
} else if (client.canBeDisposed()) {
client.closeConnection();
} else {
destroyPartitionRequestClient(connectionId, client);
}
}
}
|
@TestTemplate
void testNettyClientConnectRetryFailure() throws Exception {
NettyTestUtil.NettyServerAndClient serverAndClient = createNettyServerAndClient();
UnstableNettyClient unstableNettyClient =
new UnstableNettyClient(serverAndClient.client(), 3);
try {
PartitionRequestClientFactory factory =
new PartitionRequestClientFactory(
unstableNettyClient, 2, 1, connectionReuseEnabled);
assertThatThrownBy(
() ->
factory.createPartitionRequestClient(
serverAndClient.getConnectionID(RESOURCE_ID, 0)))
.isInstanceOf(IOException.class);
} finally {
shutdown(serverAndClient);
}
}
|
@ApiOperation(value = "Get a user’s picture", produces = "application/octet-stream", tags = {
"Users" }, notes = "The response body contains the raw picture data, representing the user’s picture. The Content-type of the response corresponds to the mimeType that was set when creating the picture.")
@ApiResponses(value = {
@ApiResponse(code = 200, message = "Indicates the user was found and has a picture, which is returned in the body."),
@ApiResponse(code = 404, message = "Indicates the requested user was not found or the user does not have a profile picture. Status-description contains additional information about the error.")
})
@GetMapping(value = "/identity/users/{userId}/picture")
public ResponseEntity<byte[]> getUserPicture(@ApiParam(name = "userId") @PathVariable String userId) {
User user = getUserFromRequest(userId);
Picture userPicture = identityService.getUserPicture(user.getId());
if (userPicture == null) {
throw new FlowableObjectNotFoundException("The user with id '" + user.getId() + "' does not have a picture.", Picture.class);
}
HttpHeaders responseHeaders = new HttpHeaders();
if (userPicture.getMimeType() != null) {
responseHeaders.set("Content-Type", userPicture.getMimeType());
} else {
responseHeaders.set("Content-Type", "image/jpeg");
}
try {
return new ResponseEntity<>(IOUtils.toByteArray(userPicture.getInputStream()), responseHeaders, HttpStatus.OK);
} catch (Exception e) {
throw new FlowableException("Error exporting picture: " + e.getMessage(), e);
}
}
|
@Test
public void testUpdatePictureWithCustomMimeType() throws Exception {
User savedUser = null;
try {
User newUser = identityService.newUser("testuser");
newUser.setFirstName("Fred");
newUser.setLastName("McDonald");
newUser.setEmail("[email protected]");
identityService.saveUser(newUser);
savedUser = newUser;
Map<String, String> additionalFields = new HashMap<>();
additionalFields.put("mimeType", MediaType.IMAGE_PNG.toString());
HttpPut httpPut = new HttpPut(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_USER_PICTURE, newUser.getId()));
httpPut.setEntity(HttpMultipartHelper
.getMultiPartEntity("myPicture.png", "image/png", new ByteArrayInputStream("this is the picture raw byte stream".getBytes()),
additionalFields));
closeResponse(executeBinaryRequest(httpPut, HttpStatus.SC_NO_CONTENT));
Picture picture = identityService.getUserPicture(newUser.getId());
assertThat(picture).isNotNull();
assertThat(picture.getMimeType()).isEqualTo("image/png");
assertThat(new String(picture.getBytes())).isEqualTo("this is the picture raw byte stream");
} finally {
// Delete user after test passes or fails
if (savedUser != null) {
identityService.deleteUser(savedUser.getId());
}
}
}
|
@Override
public ObjectiveTranslation doTranslate(NextObjective obj)
throws FabricPipelinerException {
final ObjectiveTranslation.Builder resultBuilder =
ObjectiveTranslation.builder();
switch (obj.type()) {
case SIMPLE:
simpleNext(obj, resultBuilder, false);
break;
case HASHED:
hashedNext(obj, resultBuilder);
break;
case BROADCAST:
if (isXconnect(obj)) {
xconnectNext(obj, resultBuilder);
} else {
multicastNext(obj, resultBuilder);
}
break;
default:
log.warn("Unsupported NextObjective type '{}'", obj);
return ObjectiveTranslation.ofError(ObjectiveError.UNSUPPORTED);
}
if (!isGroupModifyOp(obj)) {
// Generate next MPLS and VLAN rules.
nextMpls(obj, resultBuilder);
nextVlan(obj, resultBuilder);
}
return resultBuilder.build();
}
|
@Test
public void testMplsHashedOutput() throws Exception {
TrafficTreatment treatment1 = DefaultTrafficTreatment.builder()
.setEthSrc(ROUTER_MAC)
.setEthDst(SPINE1_MAC)
.pushMpls()
.copyTtlOut()
.setMpls(MPLS_10)
.popVlan()
.setOutput(PORT_1)
.build();
TrafficTreatment treatment2 = DefaultTrafficTreatment.builder()
.setEthSrc(ROUTER_MAC)
.setEthDst(SPINE2_MAC)
.pushMpls()
.copyTtlOut()
.setMpls(MPLS_10)
.popVlan()
.setOutput(PORT_2)
.build();
NextObjective nextObjective = DefaultNextObjective.builder()
.withId(NEXT_ID_1)
.withPriority(PRIORITY)
.withMeta(VLAN_META)
.addTreatment(treatment1)
.addTreatment(treatment2)
.withType(NextObjective.Type.HASHED)
.makePermanent()
.fromApp(APP_ID)
.add();
ObjectiveTranslation actualTranslation = translatorHashed.doTranslate(nextObjective);
// Expected hashed table flow rule.
PiCriterion nextIdCriterion = PiCriterion.builder()
.matchExact(FabricConstants.HDR_NEXT_ID, NEXT_ID_1)
.build();
TrafficSelector nextIdSelector = DefaultTrafficSelector.builder()
.matchPi(nextIdCriterion)
.build();
PiActionProfileGroupId actionGroupId = PiActionProfileGroupId.of(NEXT_ID_1);
TrafficTreatment treatment = DefaultTrafficTreatment.builder()
.piTableAction(actionGroupId)
.build();
FlowRule expectedFlowRule = DefaultFlowRule.builder()
.forDevice(DEVICE_ID)
.fromApp(APP_ID)
.makePermanent()
// FIXME: currently next objective doesn't support priority, ignore this
.withPriority(0)
.forTable(FabricConstants.FABRIC_INGRESS_NEXT_HASHED)
.withSelector(nextIdSelector)
.withTreatment(treatment)
.build();
// First egress rule - port1
PortNumber outPort = outputPort(treatment1);
PiCriterion egressVlanTableMatch = PiCriterion.builder()
.matchExact(FabricConstants.HDR_EG_PORT, outPort.toLong())
.build();
TrafficSelector selectorForEgressVlan = DefaultTrafficSelector.builder()
.matchPi(egressVlanTableMatch)
.matchVlanId(VLAN_100)
.build();
PiAction piActionForEgressVlan = PiAction.builder()
.withId(FabricConstants.FABRIC_EGRESS_EGRESS_NEXT_POP_VLAN)
.build();
TrafficTreatment treatmentForEgressVlan = DefaultTrafficTreatment.builder()
.piTableAction(piActionForEgressVlan)
.build();
FlowRule expectedEgressVlanPopRule1 = DefaultFlowRule.builder()
.withSelector(selectorForEgressVlan)
.withTreatment(treatmentForEgressVlan)
.forTable(FabricConstants.FABRIC_EGRESS_EGRESS_NEXT_EGRESS_VLAN)
.makePermanent()
.withPriority(nextObjective.priority())
.forDevice(DEVICE_ID)
.fromApp(APP_ID)
.build();
// Second egress rule - port2
outPort = outputPort(treatment2);
egressVlanTableMatch = PiCriterion.builder()
.matchExact(FabricConstants.HDR_EG_PORT, outPort.toLong())
.build();
selectorForEgressVlan = DefaultTrafficSelector.builder()
.matchPi(egressVlanTableMatch)
.matchVlanId(VLAN_100)
.build();
FlowRule expectedEgressVlanPopRule2 = DefaultFlowRule.builder()
.withSelector(selectorForEgressVlan)
.withTreatment(treatmentForEgressVlan)
.forTable(FabricConstants.FABRIC_EGRESS_EGRESS_NEXT_EGRESS_VLAN)
.makePermanent()
.withPriority(nextObjective.priority())
.forDevice(DEVICE_ID)
.fromApp(APP_ID)
.build();
// Expected group
PiAction piAction1 = PiAction.builder()
.withId(FabricConstants.FABRIC_INGRESS_NEXT_ROUTING_HASHED)
.withParameter(new PiActionParam(
FabricConstants.SMAC, ROUTER_MAC.toBytes()))
.withParameter(new PiActionParam(
FabricConstants.DMAC, SPINE1_MAC.toBytes()))
.withParameter(new PiActionParam(
FabricConstants.PORT_NUM, PORT_1.toLong()))
.build();
PiAction piAction2 = PiAction.builder()
.withId(FabricConstants.FABRIC_INGRESS_NEXT_ROUTING_HASHED)
.withParameter(new PiActionParam(
FabricConstants.SMAC, ROUTER_MAC.toBytes()))
.withParameter(new PiActionParam(
FabricConstants.DMAC, SPINE2_MAC.toBytes()))
.withParameter(new PiActionParam(
FabricConstants.PORT_NUM, PORT_2.toLong()))
.build();
treatment1 = DefaultTrafficTreatment.builder()
.piTableAction(piAction1)
.build();
treatment2 = DefaultTrafficTreatment.builder()
.piTableAction(piAction2)
.build();
List<TrafficTreatment> treatments = ImmutableList.of(treatment1, treatment2);
List<GroupBucket> buckets = treatments.stream()
.map(DefaultGroupBucket::createSelectGroupBucket)
.collect(Collectors.toList());
GroupBuckets groupBuckets = new GroupBuckets(buckets);
PiGroupKey groupKey = new PiGroupKey(FabricConstants.FABRIC_INGRESS_NEXT_HASHED,
FabricConstants.FABRIC_INGRESS_NEXT_HASHED_SELECTOR,
NEXT_ID_1);
GroupDescription expectedGroup = new DefaultGroupDescription(
DEVICE_ID,
GroupDescription.Type.SELECT,
groupBuckets,
groupKey,
NEXT_ID_1,
APP_ID
);
ObjectiveTranslation expectedTranslation = ObjectiveTranslation.builder()
.addFlowRule(expectedFlowRule)
.addFlowRule(vlanMetaFlowRule)
.addFlowRule(mplsFlowRule)
.addGroup(expectedGroup)
.addFlowRule(expectedEgressVlanPopRule1)
.addFlowRule(expectedEgressVlanPopRule2)
.build();
assertEquals(expectedTranslation, actualTranslation);
}
|
public static Select select(String fieldName) { return new Select(fieldName);
}
|
@Test
void select_specific_fields() {
String q = Q.select("f1", "f2")
.from("sd1")
.where("f1").contains("v1")
.build();
assertEquals(q, "yql=select f1, f2 from sd1 where f1 contains \"v1\"");
}
|
public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, Timer timer) {
return beginningOrEndOffset(partitions, ListOffsetsRequest.LATEST_TIMESTAMP, timer);
}
|
@Test
public void testEndOffsetsMultipleTopicPartitions() {
buildFetcher();
Map<TopicPartition, Long> expectedOffsets = new HashMap<>();
expectedOffsets.put(tp0, 5L);
expectedOffsets.put(tp1, 7L);
expectedOffsets.put(tp2, 9L);
assignFromUser(expectedOffsets.keySet());
client.prepareResponse(listOffsetResponse(expectedOffsets, Errors.NONE, ListOffsetsRequest.LATEST_TIMESTAMP, ListOffsetsResponse.UNKNOWN_EPOCH));
assertEquals(expectedOffsets, offsetFetcher.endOffsets(asList(tp0, tp1, tp2), time.timer(5000L)));
}
|
public PrepareAndActivateResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) {
DeployHandlerLogger logger = DeployHandlerLogger.forPrepareParams(prepareParams);
File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile();
ThreadLockStats threadLockStats = LockStats.getForCurrentThread();
PrepareAndActivateResult result;
try {
threadLockStats.startRecording("deploy of " + prepareParams.getApplicationId().serializedForm());
result = deploy(decompressApplication(in, tempDir), prepareParams, logger);
} finally {
threadLockStats.stopRecording();
cleanupTempDirectory(tempDir, logger);
}
return result;
}
|
@Test
public void testResolveConfigForMultipleApps() {
Version vespaVersion = VespaModelFactory.createTestFactory().version();
applicationRepository.deploy(app1, new PrepareParams.Builder()
.applicationId(applicationId())
.vespaVersion(vespaVersion)
.build());
ApplicationId appId2 = new ApplicationId.Builder()
.tenant(tenant1)
.applicationName("myapp2")
.instanceName("default")
.build();
applicationRepository.deploy(app2, new PrepareParams.Builder()
.applicationId(appId2)
.vespaVersion(vespaVersion)
.build());
SimpletypesConfig config = resolve(applicationId(), vespaVersion);
assertEquals(1337, config.intval());
SimpletypesConfig config2 = resolve(appId2, vespaVersion);
assertEquals(1330, config2.intval());
RequestHandler requestHandler = getRequestHandler(applicationId());
assertTrue(requestHandler.hasApplication(applicationId(), Optional.of(vespaVersion)));
assertNull(requestHandler.resolveApplicationId("doesnotexist"));
assertEquals(new ApplicationId.Builder().tenant(tenant1).applicationName("testapp").build(),
requestHandler.resolveApplicationId("mytesthost")); // Host set in application package.
}
|
public static CharSequence escapeCsv(CharSequence value) {
return escapeCsv(value, false);
}
|
@Test
public void escapeCsvWithSingleCarriageReturn() {
CharSequence value = "\r";
CharSequence expected = "\"\r\"";
escapeCsv(value, expected);
}
|
@Override
protected Result check() throws Exception {
return timeBoundHealthCheck.check(() -> {
try (Handle handle = jdbi.open()) {
if (validationQuery.isPresent()) {
handle.execute(validationQuery.get());
} else if (!handle.getConnection().isValid(validationQueryTimeout)) {
return Result.unhealthy("Connection::isValid returned false.");
}
return Result.healthy();
}
}
);
}
|
@Test
void tesHealthyAfterWhenMissingValidationQuery() throws Exception {
when(connection.isValid(anyInt())).thenReturn(true);
HealthCheck.Result result = healthCheck().check();
assertThat(result.isHealthy()).isTrue();
verify(connection).isValid(anyInt());
}
|
@Override
public Mono<SetRegistrationLockResponse> setRegistrationLock(final SetRegistrationLockRequest request) {
final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedPrimaryDevice();
if (request.getRegistrationLock().isEmpty()) {
throw Status.INVALID_ARGUMENT.withDescription("Registration lock secret must not be empty").asRuntimeException();
}
return Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier()))
.map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException))
.flatMap(account -> {
// In the previous REST-based API, clients would send hex strings directly. For backward compatibility, we
// convert the registration lock secret to a lowercase hex string before turning it into a salted hash.
final SaltedTokenHash credentials =
SaltedTokenHash.generateFor(HexFormat.of().withLowerCase().formatHex(request.getRegistrationLock().toByteArray()));
return Mono.fromFuture(() -> accountsManager.updateAsync(account,
a -> a.setRegistrationLock(credentials.hash(), credentials.salt())));
})
.map(ignored -> SetRegistrationLockResponse.newBuilder().build());
}
|
@Test
void setRegistrationLockEmptySecret() {
//noinspection ResultOfMethodCallIgnored
GrpcTestUtils.assertStatusException(Status.INVALID_ARGUMENT,
() -> authenticatedServiceStub().setRegistrationLock(SetRegistrationLockRequest.newBuilder()
.build()));
verify(accountsManager, never()).updateAsync(any(), any());
}
|
public Matrix transpose() {
return transpose(true);
}
|
@Test
public void testTranspose() {
Matrix t = matrix.transpose();
assertEquals(Layout.COL_MAJOR, matrix.layout());
assertEquals(Layout.ROW_MAJOR, t.layout());
assertEquals(3, t.nrow());
assertEquals(3, t.ncol());
assertEquals(0.9, matrix.get(0, 0), 1E-7);
assertEquals(0.8, matrix.get(2, 2), 1E-7);
assertEquals(0.5, matrix.get(1, 1), 1E-7);
assertEquals(0.0, matrix.get(0, 2), 1E-7);
assertEquals(0.0, matrix.get(2, 0), 1E-7);
assertEquals(0.4, matrix.get(1, 0), 1E-7);
}
|
@Override
public void remove(NamedNode master) {
connection.sync(RedisCommands.SENTINEL_REMOVE, master.getName());
}
|
@Test
public void testRemove() {
Collection<RedisServer> masters = connection.masters();
connection.remove(masters.iterator().next());
}
|
@Override
public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding,
boolean endStream, ChannelPromise promise) {
return writeHeaders0(ctx, streamId, headers, false, 0, (short) 0, false, padding, endStream, promise);
}
|
@Test
public void headersWithNoPriority() {
writeAllFlowControlledFrames();
final int streamId = 6;
ChannelPromise promise = newPromise();
encoder.writeHeaders(ctx, streamId, EmptyHttp2Headers.INSTANCE, 0, false, promise);
verify(writer).writeHeaders(eq(ctx), eq(streamId), eq(EmptyHttp2Headers.INSTANCE),
eq(0), eq(false), eq(promise));
}
|
@Override
public void run() {
try {
backgroundJobServer.getJobSteward().notifyThreadOccupied();
MDCMapper.loadMDCContextFromJob(job);
performJob();
} catch (Exception e) {
if (isJobDeletedWhileProcessing(e)) {
// nothing to do anymore as Job is deleted
return;
} else if (isJobServerStopped(e)) {
updateJobStateToFailedAndRunJobFilters("Job processing was stopped as background job server has stopped", e);
Thread.currentThread().interrupt();
} else if (isJobNotFoundException(e)) {
updateJobStateToFailedAndRunJobFilters("Job method not found", e);
} else {
updateJobStateToFailedAndRunJobFilters("An exception occurred during the performance of the job", e);
}
} finally {
backgroundJobServer.getJobSteward().notifyThreadIdle();
MDC.clear();
}
}
|
@Test
void onFailureAfterAllRetriesExceptionIsLoggedToError() {
Job job = aFailedJobWithRetries().withEnqueuedState(Instant.now()).build();
when(backgroundJobServer.getBackgroundJobRunner(job)).thenReturn(null);
BackgroundJobPerformer backgroundJobPerformer = new BackgroundJobPerformer(backgroundJobServer, job);
final ListAppender<ILoggingEvent> logger = LoggerAssert.initFor(backgroundJobPerformer);
backgroundJobPerformer.run();
assertThat(logAllStateChangesFilter.getStateChanges(job)).containsExactly("ENQUEUED->PROCESSING", "PROCESSING->FAILED");
assertThat(logger)
.hasNoWarnLogMessages()
.hasErrorMessage(String.format("Job(id=%s, jobName='failed job') processing failed: An exception occurred during the performance of the job", job.getId()));
}
|
public void deleteAfnemersindicatie(String aNummer) {
AfnemersberichtAanDGL afnemersberichtAanDGL = afnemersberichtAanDGLFactory.createAfnemersberichtAanDGL(dglMessageFactory.createAv01(aNummer));
Afnemersbericht afnemersbericht = new Afnemersbericht();
afnemersbericht.setANummer(aNummer);
afnemersbericht.setType(Afnemersbericht.Type.Av01);
afnemersbericht.setOnzeReferentie(afnemersberichtAanDGL.getBerichtHeader().getKenmerkVerstrekker());
afnemersbericht.setStatus(Afnemersbericht.Status.INITIAL);
afnemersberichtRepository.save(afnemersbericht);
dglSendService.sendAfnemersBerichtAanDGL(afnemersberichtAanDGL, afnemersbericht);
}
|
@Test
public void testDeleteAfnemersindicatie(){
Av01 av01 = new Av01();
when(dglMessageFactory.createAv01(anyString())).thenReturn(av01);
AfnemersberichtAanDGL afnemersberichtAanDGL = new AfnemersberichtAanDGL();
BerichtHeaderType berichtHeader = new BerichtHeaderType();
berichtHeader.setKenmerkVerstrekker("TestKernMerkVerstrekker");
afnemersberichtAanDGL.setBerichtHeader(berichtHeader);
when(afnemersberichtAanDGLFactory.createAfnemersberichtAanDGL(any(Av01.class))).thenReturn(afnemersberichtAanDGL);
classUnderTest.deleteAfnemersindicatie("SSSSSSSSSS");
verify(afnemersberichtRepository, times(1)).save(any(Afnemersbericht.class));
verify(dglSendService, times(1)).sendAfnemersBerichtAanDGL(any(AfnemersberichtAanDGL.class),any(Afnemersbericht.class));
}
|
public static BadRequestException create(String... errorMessages) {
return create(asList(errorMessages));
}
|
@Test
public void fail_when_creating_exception_with_one_empty_element() {
assertThatThrownBy(() -> BadRequestException.create(asList("error", "")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Message cannot be empty");
}
|
@Override
public Integer doCall() throws Exception {
JsonObject pluginConfig = loadConfig();
JsonObject plugins = pluginConfig.getMap("plugins");
Object plugin = plugins.remove(name);
if (plugin != null) {
printer().printf("Plugin %s removed%n", name);
saveConfig(pluginConfig);
} else {
printer().printf("Plugin %s not found in configuration%n", name);
}
return 0;
}
|
@Test
public void shouldDeletePlugin() throws Exception {
PluginHelper.enable(PluginType.CAMEL_K);
PluginDelete command = new PluginDelete(new CamelJBangMain().withPrinter(printer));
command.name = "camel-k";
command.doCall();
Assertions.assertEquals("Plugin camel-k removed", printer.getOutput());
Assertions.assertEquals("{\"plugins\":{}}", PluginHelper.getOrCreatePluginConfig().toJson());
}
|
public static String encodeMessage(String raw) {
if (StringUtils.isEmpty(raw)) {
return "";
}
return encodeComponent(raw);
}
|
@Test
void encodeMessage() {
Assertions.assertTrue(TriRpcStatus.encodeMessage(null).isEmpty());
Assertions.assertTrue(TriRpcStatus.encodeMessage("").isEmpty());
}
|
public FEELFnResult<Boolean> invoke(@ParameterName( "point" ) Comparable point, @ParameterName( "range" ) Range range) {
if ( point == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null"));
}
if ( range == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null"));
}
try {
boolean result = ( range.getHighBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getHighEndPoint() ) == 0 );
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range"));
}
}
|
@Test
void invokeParamIsNull() {
FunctionTestUtil.assertResultError(finishesFunction.invoke((Comparable) null, new RangeImpl()), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(finishesFunction.invoke("a", null), InvalidParametersEvent.class);
}
|
@Override
public Set<ConfigOption<?>> requiredOptions() {
return Collections.singleton(FlinkOptions.PATH);
}
|
@Test
void testRequiredOptions() {
ResolvedSchema schema1 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20))
.field("f2", DataTypes.TIMESTAMP(3))
.build();
final MockContext sourceContext1 = MockContext.getInstance(this.conf, schema1, "f2");
// createDynamicTableSource doesn't call sanity check, will not throw exception
assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSource(sourceContext1));
// miss pk and precombine key will throw exception when create sink
assertThrows(HoodieValidationException.class, () -> new HoodieTableFactory().createDynamicTableSink(sourceContext1));
// append mode does not throw
this.conf.set(FlinkOptions.OPERATION, "insert");
final MockContext sourceContext11 = MockContext.getInstance(this.conf, schema1, "f2");
assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSource(sourceContext11));
assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSink(sourceContext11));
//miss the pre combine key will be ok
HoodieTableSink tableSink11 = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sourceContext11);
assertThat(tableSink11.getConf().getString(FlinkOptions.PRECOMBINE_FIELD), is(FlinkOptions.NO_PRE_COMBINE));
this.conf.set(FlinkOptions.OPERATION, FlinkOptions.OPERATION.defaultValue());
// a non-exists precombine key will throw exception
ResolvedSchema schema2 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20))
.field("f2", DataTypes.TIMESTAMP(3))
.build();
this.conf.setString(FlinkOptions.PRECOMBINE_FIELD, "non_exist_field");
final MockContext sourceContext2 = MockContext.getInstance(this.conf, schema2, "f2");
// createDynamicTableSource doesn't call sanity check, will not throw exception
assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSource(sourceContext2));
assertThrows(HoodieValidationException.class, () -> new HoodieTableFactory().createDynamicTableSink(sourceContext2));
this.conf.setString(FlinkOptions.PRECOMBINE_FIELD, FlinkOptions.PRECOMBINE_FIELD.defaultValue());
// given the pk but miss the pre combine key will be ok
ResolvedSchema schema3 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20))
.field("f2", DataTypes.TIMESTAMP(3))
.primaryKey("f0")
.build();
final MockContext sourceContext3 = MockContext.getInstance(this.conf, schema3, "f2");
HoodieTableSource tableSource = (HoodieTableSource) new HoodieTableFactory().createDynamicTableSource(sourceContext3);
HoodieTableSink tableSink = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sourceContext3);
// the precombine field is overwritten
assertThat(tableSink.getConf().getString(FlinkOptions.PRECOMBINE_FIELD), is(FlinkOptions.NO_PRE_COMBINE));
// precombine field not specified, use the default payload clazz
assertThat(tableSource.getConf().getString(FlinkOptions.PAYLOAD_CLASS_NAME), is(FlinkOptions.PAYLOAD_CLASS_NAME.defaultValue()));
assertThat(tableSink.getConf().getString(FlinkOptions.PAYLOAD_CLASS_NAME), is(FlinkOptions.PAYLOAD_CLASS_NAME.defaultValue()));
// append mode given the pk but miss the pre combine key will be ok
this.conf.set(FlinkOptions.OPERATION, "insert");
HoodieTableSink tableSink3 = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sourceContext3);
assertThat(tableSink3.getConf().getString(FlinkOptions.PRECOMBINE_FIELD), is(FlinkOptions.NO_PRE_COMBINE));
this.conf.set(FlinkOptions.OPERATION, FlinkOptions.OPERATION.defaultValue());
this.conf.setString(FlinkOptions.PAYLOAD_CLASS_NAME, DefaultHoodieRecordPayload.class.getName());
final MockContext sourceContext4 = MockContext.getInstance(this.conf, schema3, "f2");
// createDynamicTableSource doesn't call sanity check, will not throw exception
assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSource(sourceContext4));
// given pk but miss the pre combine key with DefaultHoodieRecordPayload should throw
assertThrows(HoodieValidationException.class, () -> new HoodieTableFactory().createDynamicTableSink(sourceContext4));
this.conf.setString(FlinkOptions.PAYLOAD_CLASS_NAME, FlinkOptions.PAYLOAD_CLASS_NAME.defaultValue());
// given pk and pre combine key will be ok
ResolvedSchema schema4 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20))
.field("f2", DataTypes.TIMESTAMP(3))
.field("ts", DataTypes.TIMESTAMP(3))
.primaryKey("f0")
.build();
final MockContext sourceContext5 = MockContext.getInstance(this.conf, schema4, "f2");
assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSource(sourceContext5));
assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSink(sourceContext5));
// precombine field specified(default ts), use DefaultHoodieRecordPayload as payload clazz
HoodieTableSource tableSource5 = (HoodieTableSource) new HoodieTableFactory().createDynamicTableSource(sourceContext5);
HoodieTableSink tableSink5 = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sourceContext5);
assertThat(tableSource5.getConf().getString(FlinkOptions.PAYLOAD_CLASS_NAME), is(EventTimeAvroPayload.class.getName()));
assertThat(tableSink5.getConf().getString(FlinkOptions.PAYLOAD_CLASS_NAME), is(EventTimeAvroPayload.class.getName()));
// given pk and set pre combine key to no_precombine will be ok
ResolvedSchema schema5 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20))
.field("f2", DataTypes.TIMESTAMP(3))
.field("ts", DataTypes.TIMESTAMP(3))
.primaryKey("f0")
.build();
this.conf.setString(FlinkOptions.PRECOMBINE_FIELD, FlinkOptions.NO_PRE_COMBINE);
final MockContext sourceContext6 = MockContext.getInstance(this.conf, schema5, "f2");
assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSource(sourceContext6));
assertDoesNotThrow(() -> new HoodieTableFactory().createDynamicTableSink(sourceContext6));
}
|
private void fail(final ChannelHandlerContext ctx, int length) {
fail(ctx, String.valueOf(length));
}
|
@Test
public void testNotFailFast() throws Exception {
EmbeddedChannel ch = new EmbeddedChannel(new LineBasedFrameDecoder(2, false, false));
assertFalse(ch.writeInbound(wrappedBuffer(new byte[] { 0, 1, 2 })));
assertFalse(ch.writeInbound(wrappedBuffer(new byte[]{ 3, 4 })));
try {
ch.writeInbound(wrappedBuffer(new byte[] { '\n' }));
fail();
} catch (TooLongFrameException expected) {
// Expected once we received a full frame.
}
assertFalse(ch.writeInbound(wrappedBuffer(new byte[] { '5' })));
assertTrue(ch.writeInbound(wrappedBuffer(new byte[] { '\n' })));
ByteBuf expected = wrappedBuffer(new byte[] { '5', '\n' });
ByteBuf buffer = ch.readInbound();
assertEquals(expected, buffer);
expected.release();
buffer.release();
assertFalse(ch.finish());
}
|
@Override
public boolean containsLong(K name, long value) {
return false;
}
|
@Test
public void testContainsLong() {
assertFalse(HEADERS.containsLong("name1", 1));
}
|
@Override
protected void processOptions(LinkedList<String> args)
throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY);
cf.parse(args);
pathOnly = cf.getOpt(OPTION_PATHONLY);
dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse);
humanReadable = cf.getOpt(OPTION_HUMAN);
hideNonPrintable = cf.getOpt(OPTION_HIDENONPRINTABLE);
orderReverse = cf.getOpt(OPTION_REVERSE);
orderTime = cf.getOpt(OPTION_MTIME);
orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
useAtime = cf.getOpt(OPTION_ATIME);
displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
if (args.isEmpty()) args.add(Path.CUR_DIR);
initialiseOrderComparator();
}
|
@Test
public void processPathFiles() throws IOException {
TestFile testfile01 = new TestFile("testDir01", "testFile01");
TestFile testfile02 = new TestFile("testDir02", "testFile02");
TestFile testfile03 = new TestFile("testDir03", "testFile03");
TestFile testfile04 = new TestFile("testDir04", "testFile04");
TestFile testfile05 = new TestFile("testDir05", "testFile05");
TestFile testfile06 = new TestFile("testDir06", "testFile06");
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testfile01.getPathData());
pathData.add(testfile02.getPathData());
pathData.add(testfile03.getPathData());
pathData.add(testfile04.getPathData());
pathData.add(testfile05.getPathData());
pathData.add(testfile06.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
|
public static <T> List<List<T>> groupPartitions(List<T> elements, int numGroups) {
if (numGroups <= 0)
throw new IllegalArgumentException("Number of groups must be positive.");
List<List<T>> result = new ArrayList<>(numGroups);
// Each group has either n+1 or n raw partitions
int perGroup = elements.size() / numGroups;
int leftover = elements.size() - (numGroups * perGroup);
int assigned = 0;
for (int group = 0; group < numGroups; group++) {
int numThisGroup = group < leftover ? perGroup + 1 : perGroup;
List<T> groupList = new ArrayList<>(numThisGroup);
for (int i = 0; i < numThisGroup; i++) {
groupList.add(elements.get(assigned));
assigned++;
}
result.add(groupList);
}
return result;
}
|
@Test
public void testGroupPartitions() {
List<List<Integer>> grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 1);
assertEquals(Collections.singletonList(FIVE_ELEMENTS), grouped);
grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 2);
assertEquals(Arrays.asList(Arrays.asList(1, 2, 3), Arrays.asList(4, 5)), grouped);
grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 3);
assertEquals(Arrays.asList(Arrays.asList(1, 2),
Arrays.asList(3, 4),
Collections.singletonList(5)), grouped);
grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 5);
assertEquals(Arrays.asList(Collections.singletonList(1),
Collections.singletonList(2),
Collections.singletonList(3),
Collections.singletonList(4),
Collections.singletonList(5)), grouped);
grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 7);
assertEquals(Arrays.asList(Collections.singletonList(1),
Collections.singletonList(2),
Collections.singletonList(3),
Collections.singletonList(4),
Collections.singletonList(5),
Collections.emptyList(),
Collections.emptyList()), grouped);
}
|
public static void verifyMessage(final Address address,
final String message,
final String signatureBase64) throws SignatureException {
try {
final ScriptType scriptType = address.getOutputScriptType();
switch (scriptType) {
case P2PKH:
case P2WPKH:
comparePubKeyHash(address, message, signatureBase64);
break;
case P2SH:
compareP2SHScriptHashDerivedFromPubKey((LegacyAddress) address, message, signatureBase64);
break;
default:
throw new SignatureException(SIGNATURE_FAILED_ERROR_MESSAGE);
}
} catch (final SignatureException se) {
throw se;
} catch (final Exception e) {
log.warn("verifying of message signature failed with exception", e);
throw new SignatureException(SIGNATURE_FAILED_ERROR_MESSAGE);
}
}
|
@Test
public void testMessageSignatureVerification() {
final AddressParser addressParser = AddressParser.getDefault(testVector.networkParameters.network());
try {
MessageVerifyUtils.verifyMessage(
addressParser.parseAddress(testVector.address),
testVector.message,
testVector.signature
);
if (!testVector.shouldVerify) {
fail("verification should have failed, but succeed: " + testVector + "\n");
}
} catch (Exception e) {
if (testVector.shouldVerify) {
fail("verification should have succeeded, but failed: " + testVector + "\n");
}
}
}
|
@Override
public ListenableFuture<SplitBatch> getNextBatch(ConnectorPartitionHandle partitionHandle, Lifespan lifespan, int maxSize)
{
checkArgument(maxSize > 0, "Cannot fetch a batch of zero size");
return GetNextBatch.fetchNextBatchAsync(source, Math.min(bufferSize, maxSize), maxSize, partitionHandle, lifespan);
}
|
@Test
public void testFailImmediate()
{
MockSplitSource mockSource = new MockSplitSource()
.setBatchSize(1)
.atSplitCompletion(FAIL);
try (SplitSource source = new BufferingSplitSource(mockSource, 100)) {
assertFutureFailsWithMockFailure(getNextBatch(source, 200));
assertEquals(mockSource.getNextBatchInvocationCount(), 1);
}
}
|
@Override
public String getName() {
return ANALYZER_NAME;
}
|
@Test
public void testGetName() {
assertEquals("Pipfile Analyzer", analyzer.getName());
}
|
@Override
public Proxy find(final String target) {
final String route = this.findNative(target);
if(null == route) {
if(log.isInfoEnabled()) {
log.info(String.format("No proxy configuration found for target %s", target));
}
// Direct
return Proxy.DIRECT;
}
final URI proxy;
try {
proxy = new URI(route);
try {
// User info is never populated. Would have to lookup in keychain but we are unaware of the username
return new Proxy(Proxy.Type.valueOf(StringUtils.upperCase(proxy.getScheme())),
proxy.getHost(), proxy.getPort());
}
catch(IllegalArgumentException e) {
log.warn(String.format("Unsupported scheme for proxy %s", proxy));
}
}
catch(URISyntaxException e) {
log.warn(String.format("Invalid proxy configuration %s", route));
}
return Proxy.DIRECT;
}
|
@Test
public void testFind() {
final SystemConfigurationProxy proxy = new SystemConfigurationProxy();
assertEquals(Proxy.Type.DIRECT, proxy.find("http://cyberduck.io").getType());
assertEquals(Proxy.Type.DIRECT, proxy.find("sftp://cyberduck.io").getType());
assertEquals(Proxy.Type.DIRECT, proxy.find("ftp://cyberduck.io").getType());
assertEquals(Proxy.Type.DIRECT, proxy.find("ftps://cyberduck.io").getType());
}
|
@Override
public TimestampedSegment getOrCreateSegmentIfLive(final long segmentId,
final ProcessorContext context,
final long streamTime) {
final TimestampedSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime);
cleanupExpiredSegments(streamTime);
return segment;
}
|
@Test
public void futureEventsShouldNotCauseSegmentRoll() {
updateStreamTimeAndCreateSegment(0);
verifyCorrectSegments(0, 1);
updateStreamTimeAndCreateSegment(1);
verifyCorrectSegments(0, 2);
updateStreamTimeAndCreateSegment(2);
verifyCorrectSegments(0, 3);
updateStreamTimeAndCreateSegment(3);
verifyCorrectSegments(0, 4);
final long streamTime = updateStreamTimeAndCreateSegment(4);
verifyCorrectSegments(0, 5);
segments.getOrCreateSegmentIfLive(5, context, streamTime);
verifyCorrectSegments(0, 6);
segments.getOrCreateSegmentIfLive(6, context, streamTime);
verifyCorrectSegments(0, 7);
}
|
public void upgrade() {
for (final Document document : collection.find()) {
LOG.debug("Migrate view sharing: {}", document);
final ObjectId sharingId = document.getObjectId("_id");
final String sharingType = document.get("type", String.class);
final String viewId = document.get("view_id", String.class);
try {
switch (sharingType) {
case "users":
//noinspection unchecked
migrateUsers(viewId, (Collection<String>) document.get("users", Collection.class));
break;
case "roles":
//noinspection unchecked
migrateRoles(viewId, (Collection<String>) document.get("roles", Collection.class));
break;
case "all_of_instance":
migrateAllOfInstance(viewId);
break;
default:
LOG.warn("Skipping unknown view sharing type: {}", sharingType);
continue; // Continue here so we don't delete the sharing document
}
// The view sharing document should be removed after successful migration
deleteViewSharing(sharingId);
} catch (Exception e) {
LOG.error("Couldn't migrate view sharing: {}", document, e);
}
}
}
|
@Test
@DisplayName("migrate all-of-instance shares")
void migrateAllOfInstanceShares() throws Exception {
final GRN everyone = GRNRegistry.GLOBAL_USER_GRN;
when(roleService.load(anyString())).thenThrow(new NotFoundException());
final GRN dashboard2 = GRNTypes.DASHBOARD.toGRN("54e3deadbeefdeadbeef0003");
assertThat(grantService.hasGrantFor(everyone, Capability.VIEW, dashboard2)).isFalse();
migration.upgrade();
assertThat(grantService.hasGrantFor(everyone, Capability.VIEW, dashboard2)).isTrue();
assertThat(grantService.hasGrantFor(everyone, Capability.OWN, dashboard2)).isFalse();
assertThat(grantService.hasGrantFor(everyone, Capability.MANAGE, dashboard2)).isFalse();
assertDeletedViewSharing("54e3deadbeefdeadbeef0003");
}
|
@VisibleForTesting
public void validateDictTypeExists(String type) {
DictTypeDO dictType = dictTypeService.getDictType(type);
if (dictType == null) {
throw exception(DICT_TYPE_NOT_EXISTS);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(dictType.getStatus())) {
throw exception(DICT_TYPE_NOT_ENABLE);
}
}
|
@Test
public void testValidateDictTypeExists_success() {
// mock 方法,数据类型被禁用
String type = randomString();
when(dictTypeService.getDictType(eq(type))).thenReturn(randomDictTypeDO(type));
// 调用, 成功
dictDataService.validateDictTypeExists(type);
}
|
@Override
public GlobalBeginRequestProto convert2Proto(GlobalBeginRequest globalBeginRequest) {
final short typeCode = globalBeginRequest.getTypeCode();
final AbstractMessageProto abstractMessage = AbstractMessageProto.newBuilder().setMessageType(
MessageTypeProto.forNumber(typeCode)).build();
final AbstractTransactionRequestProto abstractTransactionRequestProto = AbstractTransactionRequestProto
.newBuilder().setAbstractMessage(abstractMessage).build();
GlobalBeginRequestProto result = GlobalBeginRequestProto.newBuilder().setTimeout(
globalBeginRequest.getTimeout()).setTransactionName(globalBeginRequest.getTransactionName())
.setAbstractTransactionRequest(abstractTransactionRequestProto).build();
return result;
}
|
@Test
public void convert2Proto() {
GlobalBeginRequest globalBeginRequest = new GlobalBeginRequest();
globalBeginRequest.setTimeout(3000);
globalBeginRequest.setTransactionName("taa");
GlobalBeginRequestConvertor convertor = new GlobalBeginRequestConvertor();
GlobalBeginRequestProto proto = convertor.convert2Proto(globalBeginRequest);
GlobalBeginRequest real = convertor.convert2Model(proto);
assertThat(real.getTypeCode()).isEqualTo(globalBeginRequest.getTypeCode());
assertThat(real.getTimeout()).isEqualTo(globalBeginRequest.getTimeout());
assertThat(real.getTransactionName()).isEqualTo(globalBeginRequest.getTransactionName());
}
|
@Override
public String generateSqlType(Dialect dialect) {
return switch (dialect.getId()) {
case PostgreSql.ID -> "SMALLINT";
case Oracle.ID -> "NUMBER(3)";
case MsSql.ID, H2.ID -> "TINYINT";
default -> throw new UnsupportedOperationException(String.format("Unknown dialect '%s'", dialect.getId()));
};
}
|
@Test
public void fail_with_UOE_to_generate_sql_type_when_unknown_dialect() {
assertThatThrownBy(() -> {
TinyIntColumnDef def = new TinyIntColumnDef.Builder()
.setColumnName("foo")
.setIsNullable(true)
.build();
Dialect dialect = mock(Dialect.class);
when(dialect.getId()).thenReturn("unknown");
def.generateSqlType(dialect);
})
.isInstanceOf(UnsupportedOperationException.class)
.hasMessage("Unknown dialect 'unknown'");
}
|
@Override
public DataSink createDataSink(Context context) {
// Validate the configuration
FactoryHelper.createFactoryHelper(this, context).validate();
// Get the configuration directly from the context
Configuration configuration =
Configuration.fromMap(context.getFactoryConfiguration().toMap());
// Validate required options
validateRequiredOptions(configuration);
ZoneId zoneId = determineZoneId(context);
ElasticsearchSinkOptions sinkOptions = buildSinkConnectorOptions(configuration);
return new ElasticsearchDataSink(sinkOptions, zoneId);
}
|
@Test
void testPrefixedRequiredOption() {
DataSinkFactory sinkFactory = getElasticsearchDataSinkFactory();
Configuration conf =
Configuration.fromMap(
ImmutableMap.<String, String>builder()
.put("hosts", "localhost:9200")
.put("batch.size.max", "500")
.put("inflight.requests.max", "5")
.put("version", "7") // Added version to the test configuration
.build());
// 打印日志以确保我们在测试带前缀的必需选项
System.out.println("Testing prefixed required option");
DataSink dataSink = createDataSink(sinkFactory, conf);
Assertions.assertThat(dataSink).isInstanceOf(ElasticsearchDataSink.class);
}
|
@Override
public void trackInstallation(String eventName, JSONObject properties, boolean disableCallback) {
}
|
@Test
public void testTrackInstallation() {
mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
Assert.fail();
return false;
}
});
mSensorsAPI.trackInstallation("AppInstall", new JSONObject());
}
|
@Override
public ConfigErrors errors() {
return errors;
}
|
@Test
public void shouldValidateCorrectPipelineLabelWithTruncationSyntax() {
String labelFormat = "pipeline-${COUNT}-${git[:7]}-alpha";
PipelineConfig pipelineConfig = createAndValidatePipelineLabel(labelFormat);
assertThat(pipelineConfig.errors().on(PipelineConfig.LABEL_TEMPLATE), is(nullValue()));
}
|
public void visit(Entry entry) {
final AFreeplaneAction action = new EntryAccessor().getAction(entry);
if (action != null) {
entries.unregisterEntry(action, entry);
if(! entries.contains(action))
acceleratorMap.removeActionAccelerator(freeplaneActions, action);
}
}
|
@Test
public void unregistersEntryWithAction() {
Entry actionEntry = new Entry();
final AFreeplaneAction action = mock(AFreeplaneAction.class);
IAcceleratorMap acceleratorMap= mock(IAcceleratorMap.class);
FreeplaneActions freeplaneActions= mock(FreeplaneActions.class);
new EntryAccessor().setAction(actionEntry, action);
EntriesForAction entries = mock(EntriesForAction.class);
final AcceleratorDestroyer acceleratorDestroyer = new AcceleratorDestroyer(freeplaneActions, acceleratorMap, entries);
acceleratorDestroyer.visit(actionEntry);
Mockito.verify(entries).unregisterEntry(action, actionEntry);
}
|
@Override
public final void isEqualTo(@Nullable Object other) {
super.isEqualTo(other);
}
|
@Test
@GwtIncompatible("Math.nextAfter")
public void testFloatConstants_matchNextAfter() {
assertThat(Math.nextAfter(Float.MAX_VALUE, 0.0f)).isEqualTo(NEARLY_MAX);
assertThat(Math.nextAfter(-1.0f * Float.MAX_VALUE, 0.0f)).isEqualTo(NEGATIVE_NEARLY_MAX);
assertThat(Math.nextAfter(Float.MIN_VALUE, 1.0f)).isEqualTo(JUST_OVER_MIN);
assertThat(Math.nextAfter(-1.0f * Float.MIN_VALUE, -1.0f)).isEqualTo(JUST_UNDER_NEGATIVE_MIN);
assertThat(1.23f).isEqualTo(GOLDEN);
assertThat(Math.nextAfter(1.23f, Float.POSITIVE_INFINITY)).isEqualTo(JUST_OVER_GOLDEN);
}
|
@Override
public void bounce(final Local file) {
synchronized(NSWorkspace.class) {
NSDistributedNotificationCenter.defaultCenter().postNotification(
NSNotification.notificationWithName("com.apple.DownloadFileFinished", file.getAbsolute())
);
}
}
|
@Test
public void testBounce() throws Exception {
new WorkspaceApplicationLauncher().bounce(new NullLocal("t"));
final NullLocal file = new NullLocal(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
LocalTouchFactory.get().touch(file);
new WorkspaceApplicationLauncher().bounce(file);
file.delete();
}
|
public CompletableFuture<Void> commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets) {
if (offsets.isEmpty()) {
log.debug("Skipping commit of empty offsets");
return CompletableFuture.completedFuture(null);
}
OffsetCommitRequestState commitRequest = createOffsetCommitRequest(offsets, Long.MAX_VALUE);
pendingRequests.addOffsetCommitRequest(commitRequest);
CompletableFuture<Void> asyncCommitResult = new CompletableFuture<>();
commitRequest.future.whenComplete((committedOffsets, error) -> {
if (error != null) {
asyncCommitResult.completeExceptionally(commitAsyncExceptionForError(error));
} else {
asyncCommitResult.complete(null);
}
});
return asyncCommitResult;
}
|
@Test
public void testPollSkipIfCoordinatorUnknown() {
CommitRequestManager commitRequestManager = create(false, 0);
assertPoll(false, 0, commitRequestManager);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(new TopicPartition("t1", 0), new OffsetAndMetadata(0));
commitRequestManager.commitAsync(offsets);
assertPoll(false, 0, commitRequestManager);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.