focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void add(McastRoute route) {
checkNotNull(route, "Route cannot be null");
store.storeRoute(route, McastStore.Type.ADD);
}
|
@Test
public void testAdd() {
manager.add(r1);
validateEvents(McastEvent.Type.ROUTE_ADDED);
}
|
public void append(ByteBuffer record, DataType dataType) throws InterruptedException {
if (dataType.isEvent()) {
writeEvent(record, dataType);
} else {
writeRecord(record, dataType);
}
}
|
@Test
void testAppendDataRequestBuffer() throws Exception {
CompletableFuture<Void> requestBufferFuture = new CompletableFuture<>();
HsMemoryDataManagerOperation memoryDataManagerOperation =
TestingMemoryDataManagerOperation.builder()
.setRequestBufferFromPoolSupplier(
() -> {
requestBufferFuture.complete(null);
return createBufferBuilder(bufferSize);
})
.build();
HsSubpartitionMemoryDataManager subpartitionMemoryDataManager =
createSubpartitionMemoryDataManager(memoryDataManagerOperation);
subpartitionMemoryDataManager.append(createRecord(0), DataType.DATA_BUFFER);
assertThat(requestBufferFuture).isCompleted();
}
|
public synchronized long nextId() {
long timestamp = timeGen();
//闰秒
if (timestamp < lastTimestamp) {
long offset = lastTimestamp - timestamp;
if (offset <= 5) {
try {
wait(offset << 1);
timestamp = timeGen();
if (timestamp < lastTimestamp) {
throw new RuntimeException(String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", offset));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
throw new RuntimeException(String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", offset));
}
}
if (lastTimestamp == timestamp) {
// 相同毫秒内,序列号自增
sequence = (sequence + 1) & sequenceMask;
if (sequence == 0) {
// 同一毫秒的序列数已经达到最大
timestamp = tilNextMillis(lastTimestamp);
}
} else {
// 不同毫秒内,序列号置为 1 - 2 随机数
sequence = ThreadLocalRandom.current().nextLong(1, 3);
}
lastTimestamp = timestamp;
// 时间戳部分 | 数据中心部分 | 机器标识部分 | 序列号部分
return ((timestamp - twepoch) << timestampLeftShift)
| (datacenterId << datacenterIdShift)
| (workerId << workerIdShift)
| sequence;
}
|
@Test
void nextId() {
Sequence sequence = new Sequence(null);
long id = sequence.nextId();
LocalDateTime now = LocalDateTime.now();
System.out.println(sequence.nextId() + "---" + now);
long timestamp = Sequence.parseIdTimestamp(id);
Instant instant = Instant.ofEpochMilli(timestamp);
ZoneId zone = ZoneId.systemDefault();
LocalDateTime time = LocalDateTime.ofInstant(instant, zone);
System.out.println(timestamp + "---" + time);
assertThat(now).isAfter(time);
}
|
@Override
protected MessageFormat resolveCode(String code, Locale locale) {
List<JsonObject> langs = getLanguageMap(locale);
String value = getValue(code, langs);
if (value == null) {
// if we haven't found anything, try the default locale
langs = getLanguageMap(fallbackLocale);
value = getValue(code, langs);
}
if (value == null) {
// if it's still null, return null
return null;
} else {
// otherwise format the message
return new MessageFormat(value, locale);
}
}
|
@Test
public void verifyWhenLocaleDoesNotExist_cannotResolveCode() {
MessageFormat mf = jsonMessageSource.resolveCode("test", localeThatDoesNotHaveAFile);
assertNull(mf);
}
|
@Override
public JsonSchema convert(URI basePath, Descriptors.Descriptor schema) {
Map<String, FieldSchema> definitions = new HashMap<>();
RefFieldSchema rootRef = registerObjectAndReturnRef(schema, definitions);
return JsonSchema.builder()
.id(basePath.resolve(schema.getFullName()))
.type(new SimpleJsonType(JsonType.Type.OBJECT))
.rootRef(rootRef.getRef())
.definitions(definitions)
.build();
}
|
@Test
void testSchemaConvert() throws Exception {
String protoSchema = """
syntax = "proto3";
package test;
import "google/protobuf/timestamp.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/wrappers.proto";
message TestMsg {
string string_field = 1;
int32 int32_field = 2;
bool bool_field = 3;
SampleEnum enum_field = 4;
enum SampleEnum {
ENUM_V1 = 0;
ENUM_V2 = 1;
}
google.protobuf.Timestamp ts_field = 5;
google.protobuf.Struct struct_field = 6;
google.protobuf.ListValue lst_v_field = 7;
google.protobuf.Duration duration_field = 8;
oneof some_oneof1 {
google.protobuf.Value v1 = 9;
google.protobuf.Value v2 = 10;
}
// wrapper fields:
google.protobuf.Int64Value int64_w_field = 11;
google.protobuf.Int32Value int32_w_field = 12;
google.protobuf.UInt64Value uint64_w_field = 13;
google.protobuf.UInt32Value uint32_w_field = 14;
google.protobuf.StringValue string_w_field = 15;
google.protobuf.BoolValue bool_w_field = 16;
google.protobuf.DoubleValue double_w_field = 17;
google.protobuf.FloatValue float_w_field = 18;
//embedded msg
EmbeddedMsg emb = 19;
repeated EmbeddedMsg emb_list = 20;
message EmbeddedMsg {
int32 emb_f1 = 1;
TestMsg outer_ref = 2;
EmbeddedMsg self_ref = 3;
}
map<int32, string> intToStringMap = 21;
map<string, EmbeddedMsg> strToObjMap = 22;
}""";
String expectedJsonSchema = """
{
"$id": "http://example.com/test.TestMsg",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"definitions":
{
"test.TestMsg":
{
"type": "object",
"properties":
{
"enum_field": {
"enum":
[
"ENUM_V1",
"ENUM_V2"
],
"type": "string"
},
"string_w_field": { "type": "string" },
"ts_field": { "type": "string", "format": "date-time" },
"emb_list": {
"type": "array",
"items": { "$ref": "#/definitions/test.TestMsg.EmbeddedMsg" }
},
"float_w_field": { "type": "number" },
"lst_v_field": {
"type": "array",
"items": { "type":[ "number", "string", "object", "array", "boolean", "null" ] }
},
"struct_field": { "type": "object", "properties": {} },
"string_field": { "type": "string" },
"double_w_field": { "type": "number" },
"bool_field": { "type": "boolean" },
"int32_w_field": { "type": "integer", "maximum": 2147483647, "minimum": -2147483648 },
"duration_field": { "type": "string" },
"int32_field": { "type": "integer", "maximum": 2147483647, "minimum": -2147483648 },
"int64_w_field": {
"type": "integer",
"maximum": 9223372036854775807, "minimum": -9223372036854775808
},
"v1": { "type": [ "number", "string", "object", "array", "boolean", "null" ] },
"emb": { "$ref": "#/definitions/test.TestMsg.EmbeddedMsg" },
"v2": { "type": [ "number", "string", "object", "array", "boolean", "null" ] },
"uint32_w_field": { "type": "integer", "maximum": 4294967295, "minimum": 0 },
"bool_w_field": { "type": "boolean" },
"uint64_w_field": { "type": "integer", "maximum": 18446744073709551615, "minimum": 0 },
"strToObjMap": { "type": "object", "additionalProperties": true },
"intToStringMap": { "type": "object", "additionalProperties": true }
}
},
"test.TestMsg.EmbeddedMsg": {
"type": "object",
"properties":
{
"emb_f1": { "type": "integer", "maximum": 2147483647, "minimum": -2147483648 },
"outer_ref": { "$ref": "#/definitions/test.TestMsg" },
"self_ref": { "$ref": "#/definitions/test.TestMsg.EmbeddedMsg" }
}
}
},
"$ref": "#/definitions/test.TestMsg"
}""";
ProtobufSchemaConverter converter = new ProtobufSchemaConverter();
ProtobufSchema protobufSchema = new ProtobufSchema(protoSchema);
URI basePath = new URI("http://example.com/");
JsonSchema converted = converter.convert(basePath, protobufSchema.toDescriptor());
assertJsonEqual(expectedJsonSchema, converted.toJson());
}
|
@Override
public void close() {
JOrphanUtils.closeQuietly(isr);
JOrphanUtils.closeQuietly(fis);
JOrphanUtils.closeQuietly(reader);
}
|
@Test
public void testClose() {
CsvSampleReader reader = new CsvSampleReader(tempCsv, metadata);
reader.close();
try {
reader.readSample();
fail("Stream should be closed.");
} catch (SampleException expected) {
// All is well
}
}
|
public static GcsPath fromComponents(@Nullable String bucket, @Nullable String object) {
return new GcsPath(null, bucket, object);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidBucket() {
GcsPath.fromComponents("invalid/", "");
}
|
@Override
public void onMsg(TbContext ctx, TbMsg msg) throws TbNodeException {
ctx.tellNext(msg, checkMatches(msg) ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE);
}
|
@Test
void givenTypePolygonAndConfigWithPolygonDefined_whenOnMsg_thenFalse() throws TbNodeException {
// GIVEN
var config = new TbGpsGeofencingFilterNodeConfiguration().defaultConfiguration();
config.setFetchPerimeterInfoFromMessageMetadata(false);
config.setPolygonsDefinition(GeoUtilTest.SIMPLE_RECT);
node.init(ctx, new TbNodeConfiguration(JacksonUtil.valueToTree(config)));
DeviceId deviceId = new DeviceId(UUID.randomUUID());
TbMsg msg = getTbMsg(deviceId, TbMsgMetaData.EMPTY,
GeoUtilTest.POINT_OUTSIDE_SIMPLE_RECT.getLatitude(), GeoUtilTest.POINT_OUTSIDE_SIMPLE_RECT.getLongitude());
// WHEN
node.onMsg(ctx, msg);
// THEN
ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class);
verify(ctx, times(1)).tellNext(newMsgCaptor.capture(), eq(TbNodeConnectionType.FALSE));
verify(ctx, never()).tellFailure(any(), any());
TbMsg newMsg = newMsgCaptor.getValue();
assertThat(newMsg).isNotNull();
assertThat(newMsg).isSameAs(msg);
}
|
@Override
public Set<DeviceId> getPhysicalDevices(NetworkId networkId, DeviceId deviceId) {
checkNotNull(networkId, "Network ID cannot be null");
checkNotNull(deviceId, "Virtual device ID cannot be null");
Set<VirtualPort> virtualPortSet = getVirtualPorts(networkId, deviceId);
Set<DeviceId> physicalDeviceSet = new HashSet<>();
virtualPortSet.forEach(virtualPort -> {
if (virtualPort.realizedBy() != null) {
physicalDeviceSet.add(virtualPort.realizedBy().deviceId());
}
});
return ImmutableSet.copyOf(physicalDeviceSet);
}
|
@Test
public void testGetPhysicalDevices() {
manager.registerTenantId(TenantId.tenantId(tenantIdValue1));
manager.registerTenantId(TenantId.tenantId(tenantIdValue2));
VirtualNetwork virtualNetwork1 =
manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1));
VirtualNetwork virtualNetwork2 =
manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue2));
// two virtual device in first virtual network
VirtualDevice vDevice1InVnet1 =
manager.createVirtualDevice(virtualNetwork1.id(), DID1);
VirtualDevice vDevice2InVnet1 =
manager.createVirtualDevice(virtualNetwork1.id(), DID2);
// Two virtual device in second virtual network
VirtualDevice vDevice1InVnet2 =
manager.createVirtualDevice(virtualNetwork2.id(), DID1);
VirtualDevice vDevice2InVnet2 =
manager.createVirtualDevice(virtualNetwork2.id(), DID2);
// Connection Point from each physical device
// Virtual network 1
ConnectPoint cp1InVnet1 =
new ConnectPoint(PHYDID1, PortNumber.portNumber(10));
ConnectPoint cp2InVnet1 =
new ConnectPoint(PHYDID2, PortNumber.portNumber(20));
ConnectPoint cp3InVnet1 =
new ConnectPoint(PHYDID3, PortNumber.portNumber(30));
ConnectPoint cp4InVnet1 =
new ConnectPoint(PHYDID4, PortNumber.portNumber(40));
// Virtual network 2
ConnectPoint cp1InVnet2 =
new ConnectPoint(PHYDID1, PortNumber.portNumber(10));
ConnectPoint cp2InVnet2 =
new ConnectPoint(PHYDID2, PortNumber.portNumber(20));
ConnectPoint cp3InVnet2 =
new ConnectPoint(PHYDID3, PortNumber.portNumber(30));
ConnectPoint cp4InVnet2 =
new ConnectPoint(PHYDID4, PortNumber.portNumber(40));
// Make simple BigSwitch by mapping two phyDevice to one vDevice
// First vDevice in first virtual network
manager.createVirtualPort(virtualNetwork1.id(),
vDevice1InVnet1.id(), PortNumber.portNumber(1), cp1InVnet1);
manager.createVirtualPort(virtualNetwork1.id(),
vDevice1InVnet1.id(), PortNumber.portNumber(2), cp2InVnet1);
// Second vDevice in first virtual network
manager.createVirtualPort(virtualNetwork1.id(),
vDevice2InVnet1.id(), PortNumber.portNumber(1), cp3InVnet1);
manager.createVirtualPort(virtualNetwork1.id(),
vDevice2InVnet1.id(), PortNumber.portNumber(2), cp4InVnet1);
// First vDevice in second virtual network
manager.createVirtualPort(virtualNetwork2.id(),
vDevice1InVnet2.id(), PortNumber.portNumber(1), cp1InVnet2);
manager.createVirtualPort(virtualNetwork2.id(),
vDevice1InVnet2.id(), PortNumber.portNumber(2), cp2InVnet2);
// Second vDevice in second virtual network
manager.createVirtualPort(virtualNetwork2.id(),
vDevice2InVnet2.id(), PortNumber.portNumber(1), cp3InVnet2);
manager.createVirtualPort(virtualNetwork2.id(),
vDevice2InVnet2.id(), PortNumber.portNumber(2), cp4InVnet2);
Set<DeviceId> physicalDeviceSet;
Set<DeviceId> testSet = new HashSet<>();
physicalDeviceSet = manager.getPhysicalDevices(virtualNetwork1.id(), vDevice1InVnet1.id());
testSet.add(PHYDID1);
testSet.add(PHYDID2);
assertEquals("The physical devices 1 did not match", testSet, physicalDeviceSet);
testSet.clear();
physicalDeviceSet = manager.getPhysicalDevices(virtualNetwork1.id(), vDevice2InVnet1.id());
testSet.add(PHYDID3);
testSet.add(PHYDID4);
assertEquals("The physical devices 2 did not match", testSet, physicalDeviceSet);
testSet.clear();
physicalDeviceSet = manager.getPhysicalDevices(virtualNetwork2.id(), vDevice1InVnet2.id());
testSet.add(PHYDID1);
testSet.add(PHYDID2);
assertEquals("The physical devices 1 did not match", testSet, physicalDeviceSet);
testSet.clear();
physicalDeviceSet = manager.getPhysicalDevices(virtualNetwork2.id(), vDevice2InVnet2.id());
testSet.add(PHYDID3);
testSet.add(PHYDID4);
assertEquals("The physical devices 2 did not match", testSet, physicalDeviceSet);
testSet.clear();
}
|
public static UserAgent parse(String userAgentString) {
return UserAgentParser.parse(userAgentString);
}
|
@Test
public void parseMiui10WithChromeTest() {
final String uaStr = "Mozilla/5.0 (Linux; Android 9; MIX 3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.80 Mobile Safari/537.36";
final UserAgent ua = UserAgentUtil.parse(uaStr);
assertEquals("Chrome", ua.getBrowser().toString());
assertEquals("70.0.3538.80", ua.getVersion());
assertEquals("Webkit", ua.getEngine().toString());
assertEquals("537.36", ua.getEngineVersion());
assertEquals("Android", ua.getOs().toString());
assertEquals("9", ua.getOsVersion());
assertEquals("Android", ua.getPlatform().toString());
assertTrue(ua.isMobile());
}
|
@VisibleForTesting
Set<String> extractFields(List<ResultMessage> hits) {
Set<String> filteredFields = Sets.newHashSet();
hits.forEach(hit -> {
final Message message = hit.getMessage();
for (String field : message.getFieldNames()) {
if (!Message.FILTERED_FIELDS.contains(field)) {
filteredFields.add(field);
}
}
});
return filteredFields;
}
|
@Test
public void extractFieldsForTwoMessagesContainingDifferentFields() throws Exception {
final ResultMessage r1 = mock(ResultMessage.class);
final Message m1 = mock(Message.class);
when(m1.getFieldNames()).thenReturn(ImmutableSet.of(
"message",
"source",
"timestamp",
"http_response",
"gl2_source_node",
"_index"
));
when(r1.getMessage()).thenReturn(m1);
final ResultMessage r2 = mock(ResultMessage.class);
final Message m2 = mock(Message.class);
when(m2.getFieldNames()).thenReturn(ImmutableSet.of(
"message",
"source",
"timestamp",
"took_ms",
"gl2_source_collector"
));
when(r2.getMessage()).thenReturn(m2);
final Set<String> result = searchResult.extractFields(ImmutableList.of(r1, r2));
assertThat(result)
.isNotNull()
.isNotEmpty()
.hasSize(5)
.containsExactlyInAnyOrder(
"message",
"source",
"timestamp",
"http_response",
"took_ms"
);
}
|
public @NonNull String fastTail(int numChars, Charset cs) throws IOException {
try (RandomAccessFile raf = new RandomAccessFile(file, "r")) {
long len = raf.length();
// err on the safe side and assume each char occupies 4 bytes
// additional 1024 byte margin is to bring us back in sync in case we started reading from non-char boundary.
long pos = Math.max(0, len - (numChars * 4 + 1024));
raf.seek(pos);
byte[] tail = new byte[(int) (len - pos)];
raf.readFully(tail);
String tails = cs.decode(java.nio.ByteBuffer.wrap(tail)).toString();
return tails.substring(Math.max(0, tails.length() - numChars));
}
}
|
@Test
public void shortTail() throws Exception {
File f = tmp.newFile();
Files.writeString(f.toPath(), "hello", Charset.defaultCharset());
TextFile t = new TextFile(f);
assertEquals("hello", t.fastTail(35));
}
|
public static long toLong(String str, long defaultValue) {
if (str == null) {
return defaultValue;
}
try {
return Long.parseLong(str);
} catch (NumberFormatException nfe) {
return defaultValue;
}
}
|
@Test
void testToLong() {
assertEquals(1L, NumberUtils.toLong(null, 1L));
assertEquals(1L, NumberUtils.toLong("", 1L));
assertEquals(1L, NumberUtils.toLong("1", 0L));
}
|
public static String reformatParams(Object[] params) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < params.length; i++) {
if (i > 0) {
sb.append(", ");
}
sb.append(reformatParam(params[i]));
}
return sb.toString();
}
|
@Test
public void reformatParams() {
String formattedParams = SqlLogFormatter.reformatParams(new Object[] {"foo", 42, null, true});
assertThat(formattedParams).isEqualTo("foo, 42, [null], true");
}
|
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
}
|
@Test
public void test4483Response() {
Reader reader = new Reader(new OpenAPI());
OpenAPI openAPI = reader.read(Ticket4483Resource.class);
String yaml = "openapi: 3.0.1\n" +
"tags:\n" +
"- name: Dummy\n" +
" description: Dummy resource for testing setup\n" +
"paths:\n" +
" /test:\n" +
" get:\n" +
" tags:\n" +
" - Dummy\n" +
" description: Dummy GET\n" +
" operationId: dummy\n" +
" responses:\n" +
" \"401\":\n" +
" description: Authentication is required\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" type: array\n" +
" items:\n" +
" $ref: '#/components/schemas/LocalizedError'\n" +
" \"200\":\n" +
" description: test\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" type: object\n" +
" additionalProperties:\n" +
" type: boolean\n" +
" /test/opresp:\n" +
" get:\n" +
" tags:\n" +
" - Dummy\n" +
" operationId: dummyopresp\n" +
" responses:\n" +
" \"401\":\n" +
" description: Authentication is required\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" type: array\n" +
" items:\n" +
" $ref: '#/components/schemas/LocalizedError'\n" +
" \"200\":\n" +
" description: Dummy GET opresp\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" type: object\n" +
" additionalProperties:\n" +
" type: boolean\n" +
" /test/oprespnodesc:\n" +
" get:\n" +
" tags:\n" +
" - Dummy\n" +
" operationId: oprespnodesc\n" +
" responses:\n" +
" \"401\":\n" +
" description: Authentication is required\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" type: array\n" +
" items:\n" +
" $ref: '#/components/schemas/LocalizedError'\n" +
"components:\n" +
" schemas:\n" +
" LocalizedError:\n" +
" type: object\n" +
" properties:\n" +
" code:\n" +
" type: string\n" +
" message:\n" +
" type: string\n";
SerializationMatchers.assertEqualsToYaml(openAPI, yaml);
}
|
@Override
@TpsControl(pointName = "ConfigPublish")
@Secured(action = ActionTypes.WRITE, signType = SignType.CONFIG)
@ExtractorManager.Extractor(rpcExtractor = ConfigRequestParamExtractor.class)
public ConfigPublishResponse handle(ConfigPublishRequest request, RequestMeta meta) throws NacosException {
try {
String dataId = request.getDataId();
String group = request.getGroup();
String content = request.getContent();
final String tenant = request.getTenant();
final String srcIp = meta.getClientIp();
final String requestIpApp = request.getAdditionParam("requestIpApp");
final String tag = request.getAdditionParam("tag");
final String appName = request.getAdditionParam("appName");
final String type = request.getAdditionParam("type");
final String srcUser = request.getAdditionParam("src_user");
final String encryptedDataKey = request.getAdditionParam("encryptedDataKey");
// check tenant
ParamUtils.checkParam(dataId, group, "datumId", content);
ParamUtils.checkParam(tag);
Map<String, Object> configAdvanceInfo = new HashMap<>(10);
MapUtil.putIfValNoNull(configAdvanceInfo, "config_tags", request.getAdditionParam("config_tags"));
MapUtil.putIfValNoNull(configAdvanceInfo, "desc", request.getAdditionParam("desc"));
MapUtil.putIfValNoNull(configAdvanceInfo, "use", request.getAdditionParam("use"));
MapUtil.putIfValNoNull(configAdvanceInfo, "effect", request.getAdditionParam("effect"));
MapUtil.putIfValNoNull(configAdvanceInfo, "type", type);
MapUtil.putIfValNoNull(configAdvanceInfo, "schema", request.getAdditionParam("schema"));
ParamUtils.checkParam(configAdvanceInfo);
if (AggrWhitelist.isAggrDataId(dataId)) {
Loggers.REMOTE_DIGEST.warn("[aggr-conflict] {} attempt to publish single data, {}, {}", srcIp, dataId,
group);
throw new NacosException(NacosException.NO_RIGHT, "dataId:" + dataId + " is aggr");
}
ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content);
configInfo.setMd5(request.getCasMd5());
configInfo.setType(type);
configInfo.setEncryptedDataKey(encryptedDataKey);
String betaIps = request.getAdditionParam("betaIps");
ConfigOperateResult configOperateResult = null;
String persistEvent = ConfigTraceService.PERSISTENCE_EVENT;
if (StringUtils.isBlank(betaIps)) {
if (StringUtils.isBlank(tag)) {
if (StringUtils.isNotBlank(request.getCasMd5())) {
configOperateResult = configInfoPersistService.insertOrUpdateCas(srcIp, srcUser, configInfo,
configAdvanceInfo);
if (!configOperateResult.isSuccess()) {
return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(),
"Cas publish fail,server md5 may have changed.");
}
} else {
configOperateResult = configInfoPersistService.insertOrUpdate(srcIp, srcUser, configInfo,
configAdvanceInfo);
}
ConfigChangePublisher.notifyConfigChange(new ConfigDataChangeEvent(false, dataId, group, tenant,
configOperateResult.getLastModified()));
} else {
if (StringUtils.isNotBlank(request.getCasMd5())) {
configOperateResult = configInfoTagPersistService.insertOrUpdateTagCas(configInfo, tag, srcIp,
srcUser);
if (!configOperateResult.isSuccess()) {
return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(),
"Cas publish tag config fail,server md5 may have changed.");
}
} else {
configOperateResult = configInfoTagPersistService.insertOrUpdateTag(configInfo, tag, srcIp,
srcUser);
}
persistEvent = ConfigTraceService.PERSISTENCE_EVENT_TAG + "-" + tag;
ConfigChangePublisher.notifyConfigChange(
new ConfigDataChangeEvent(false, dataId, group, tenant, tag,
configOperateResult.getLastModified()));
}
} else {
// beta publish
if (StringUtils.isNotBlank(request.getCasMd5())) {
configOperateResult = configInfoBetaPersistService.insertOrUpdateBetaCas(configInfo, betaIps, srcIp,
srcUser);
if (!configOperateResult.isSuccess()) {
return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(),
"Cas publish beta config fail,server md5 may have changed.");
}
} else {
configOperateResult = configInfoBetaPersistService.insertOrUpdateBeta(configInfo, betaIps, srcIp,
srcUser);
}
persistEvent = ConfigTraceService.PERSISTENCE_EVENT_BETA;
ConfigChangePublisher.notifyConfigChange(
new ConfigDataChangeEvent(true, dataId, group, tenant, configOperateResult.getLastModified()));
}
ConfigTraceService.logPersistenceEvent(dataId, group, tenant, requestIpApp,
configOperateResult.getLastModified(), srcIp, persistEvent, ConfigTraceService.PERSISTENCE_TYPE_PUB,
content);
return ConfigPublishResponse.buildSuccessResponse();
} catch (Exception e) {
Loggers.REMOTE_DIGEST.error("[ConfigPublishRequestHandler] publish config error ,request ={}", request, e);
return ConfigPublishResponse.buildFailResponse(
(e instanceof NacosException) ? ((NacosException) e).getErrCode() : ResponseCode.FAIL.getCode(),
e.getMessage());
}
}
|
@Test
void testBetaPublishCas() throws NacosException, InterruptedException {
String dataId = "testBetaPublishCas";
String group = "group";
String tenant = "tenant";
String content = "content";
ConfigPublishRequest configPublishRequest = new ConfigPublishRequest();
configPublishRequest.setDataId(dataId);
configPublishRequest.setGroup(group);
configPublishRequest.setTenant(tenant);
configPublishRequest.setContent(content);
configPublishRequest.setCasMd5("12314532");
Map<String, String> keyMap = new HashMap<>();
String srcUser = "src_user111";
keyMap.put("src_user", srcUser);
String betaIps = "127.0.0.1,127.0.0.2";
keyMap.put("betaIps", betaIps);
configPublishRequest.setAdditionMap(keyMap);
RequestMeta requestMeta = new RequestMeta();
String clientIp = "127.0.0.1";
requestMeta.setClientIp(clientIp);
AtomicReference<ConfigDataChangeEvent> reference = new AtomicReference<>();
NotifyCenter.registerSubscriber(new Subscriber() {
@Override
public void onEvent(Event event) {
ConfigDataChangeEvent event1 = (ConfigDataChangeEvent) event;
if (event1.dataId.equals(dataId)) {
reference.set((ConfigDataChangeEvent) event);
}
}
@Override
public Class<? extends Event> subscribeType() {
return ConfigDataChangeEvent.class;
}
});
ConfigOperateResult configOperateResult = new ConfigOperateResult(true);
long timestamp = System.currentTimeMillis();
long id = timestamp / 1000;
configOperateResult.setId(id);
configOperateResult.setLastModified(timestamp);
when(configInfoBetaPersistService.insertOrUpdateBetaCas(any(ConfigInfo.class), eq(betaIps), eq(requestMeta.getClientIp()),
eq(srcUser))).thenReturn(configOperateResult);
ConfigPublishResponse response = configPublishRequestHandler.handle(configPublishRequest, requestMeta);
assertEquals(ResponseCode.SUCCESS.getCode(), response.getResultCode());
Thread.sleep(500L);
assertTrue(reference.get() != null);
assertEquals(dataId, reference.get().dataId);
assertEquals(group, reference.get().group);
assertEquals(tenant, reference.get().tenant);
assertEquals(timestamp, reference.get().lastModifiedTs);
assertFalse(reference.get().isBatch);
assertTrue(reference.get().isBeta);
}
|
static int calculateChecksum(ByteBuf data) {
return calculateChecksum(data, data.readerIndex(), data.readableBytes());
}
|
@Test
public void testCalculateChecksum() {
ByteBuf input = Unpooled.wrappedBuffer(new byte[] {
'n', 'e', 't', 't', 'y'
});
assertEquals(maskChecksum(0xd6cb8b55L), calculateChecksum(input));
input.release();
}
|
public Exchange unmarshallExchange(CamelContext camelContext, byte[] buffer, String deserializationFilter)
throws IOException, ClassNotFoundException {
return unmarshallExchange(camelContext, new ByteArrayInputStream(buffer), deserializationFilter);
}
|
@Test
public void shouldFailWithRejected() throws IOException, ClassNotFoundException {
Employee emp = new Employee("Mickey", "Mouse");
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
oos.writeObject(emp);
oos.flush();
oos.close();
InputStream is = new ByteArrayInputStream(baos.toByteArray());
InvalidClassException thrown = Assertions.assertThrows(InvalidClassException.class, () -> {
codec.unmarshallExchange(context, is, "java.**;org.apache.camel.**;!*");
});
Assertions.assertEquals("filter status: REJECTED", thrown.getMessage());
}
|
public static boolean contains(int[] replicas, int value) {
for (int replica : replicas) {
if (replica == value) return true;
}
return false;
}
|
@Test
public void testContains() {
assertTrue(Replicas.contains(new int[] {3, 0, 1}, 0));
assertFalse(Replicas.contains(new int[] {}, 0));
assertTrue(Replicas.contains(new int[] {1}, 1));
}
|
public static boolean parse(final String str, ResTable_config out) {
return parse(str, out, true);
}
|
@Test
public void parse_keysHidden_keysexposed() {
ResTable_config config = new ResTable_config();
ConfigDescription.parse("keysexposed", config);
assertThat(config.inputFlags).isEqualTo(KEYSHIDDEN_NO);
}
|
@SuppressWarnings("unchecked")
public DataObjectToObjectCache<V> clone() throws CloneNotSupportedException
{
DataObjectToObjectCache<V> cloned = (DataObjectToObjectCache<V>) super.clone();
cloned._cache = (HashMap<DataObjectKey, V>) _cache.clone();
return cloned;
}
|
@Test
public void testClone() throws CloneNotSupportedException
{
IdentityHashMap<Object, DataTemplate<?>> controlCache = new IdentityHashMap<>();
DataObjectToObjectCache<DataTemplate<?>> testCache = new DataObjectToObjectCache<>();
populateTestData(controlCache, testCache);
testCache = testCache.clone();
crossCheckTestData(controlCache, testCache);
}
|
public static void checkArgument(boolean expression, Object errorMessage) {
if (Objects.isNull(errorMessage)) {
throw new IllegalArgumentException("errorMessage cannot be null.");
}
if (!expression) {
throw new IllegalArgumentException(String.valueOf(errorMessage));
}
}
|
@Test
void testCheckArgument2Args1true2null() {
assertThrows(IllegalArgumentException.class, () -> {
Preconditions.checkArgument(true, null);
});
}
|
public static ConnectedComponents findComponentsRecursive(Graph graph, EdgeTransitionFilter edgeTransitionFilter, boolean excludeSingleEdgeComponents) {
return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, excludeSingleEdgeComponents).findComponentsRecursive();
}
|
@Test
public void oneWayBridges() {
// 0 - 1 -> 2 - 3
// | |
// 4 - 5 -> 6 - 7
g.edge(0, 1).setDistance(1).set(speedEnc, 10, 10);
g.edge(1, 2).setDistance(1).set(speedEnc, 10, 0);
g.edge(2, 3).setDistance(1).set(speedEnc, 10, 10);
g.edge(2, 4).setDistance(1).set(speedEnc, 10, 10);
g.edge(3, 5).setDistance(1).set(speedEnc, 10, 10);
g.edge(4, 5).setDistance(1).set(speedEnc, 10, 10);
g.edge(5, 6).setDistance(1).set(speedEnc, 10, 0);
g.edge(6, 7).setDistance(1).set(speedEnc, 10, 10);
ConnectedComponents result = EdgeBasedTarjanSCC.findComponentsRecursive(g, fwdAccessFilter, false);
assertEquals(16, result.getEdgeKeys());
assertEquals(7, result.getTotalComponents());
// 0-1, 2-3-5-4-2 and 6-7
assertEquals(3, result.getComponents().size());
// 1->2, 2->1 and 5->6, 6<-5
assertEquals(4, result.getSingleEdgeComponents().cardinality());
assertEquals(result.getComponents().get(1), result.getBiggestComponent());
}
|
public boolean isFiller() {
if(filler == null) return false;
return !this.getFiller().equals(Filler.NONE);
}
|
@Test
void isFiller_false() {
product.setFiller(Filler.NONE);
assertFalse(product.isFiller());
}
|
public List<PropertyMetadata<?>> getSessionProperties()
{
return sessionProperties;
}
|
@Test
public void testEmptyConfigNodeSelectionStrategyConfig()
{
ConnectorSession connectorSession = new TestingConnectorSession(
new HiveCommonSessionProperties(
new HiveCommonClientConfig().setNodeSelectionStrategy(NodeSelectionStrategy.valueOf("NO_PREFERENCE"))
).getSessionProperties());
assertEquals(getNodeSelectionStrategy(connectorSession), NO_PREFERENCE);
}
|
@Override
public long sum() {
return get(sumAsync(60, TimeUnit.SECONDS));
}
|
@Test
public void testSum() {
RLongAdder adder1 = redisson.getLongAdder("test1");
RLongAdder adder2 = redisson.getLongAdder("test1");
RLongAdder adder3 = redisson.getLongAdder("test1");
adder1.add(2);
adder2.add(4);
adder3.add(1);
Assertions.assertThat(adder1.sum()).isEqualTo(7);
Assertions.assertThat(adder2.sum()).isEqualTo(7);
Assertions.assertThat(adder3.sum()).isEqualTo(7);
adder1.destroy();
adder2.destroy();
adder3.destroy();
}
|
@Override
public void checkBeforeUpdate(final SetDefaultSingleTableStorageUnitStatement sqlStatement) {
checkStorageUnitExist(sqlStatement);
}
|
@Test
void assertCheckWithLogicDataSource() {
ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
DataSourceMapperRuleAttribute ruleAttribute = mock(DataSourceMapperRuleAttribute.class, RETURNS_DEEP_STUBS);
when(ruleAttribute.getDataSourceMapper().keySet()).thenReturn(Collections.singleton("logic_ds"));
when(database.getRuleMetaData().getAttributes(any())).thenReturn(Collections.singleton(ruleAttribute));
executor.setDatabase(database);
assertDoesNotThrow(() -> executor.checkBeforeUpdate(new SetDefaultSingleTableStorageUnitStatement("logic_ds")));
}
|
public synchronized void setLevel(Level newLevel) {
if (level == newLevel) {
// nothing to do;
return;
}
if (newLevel == null && isRootLogger()) {
throw new IllegalArgumentException(
"The level of the root logger cannot be set to null");
}
level = newLevel;
if (newLevel == null) {
effectiveLevelInt = parent.effectiveLevelInt;
newLevel = parent.getEffectiveLevel();
} else {
effectiveLevelInt = newLevel.levelInt;
}
if (childrenList != null) {
int len = childrenList.size();
for (int i = 0; i < len; i++) {
Logger child = (Logger) childrenList.get(i);
// tell child to handle parent levelInt change
child.handleParentLevelChange(effectiveLevelInt);
}
}
// inform listeners
loggerContext.fireOnLevelChange(this, newLevel);
}
|
@Test
public void testEnabled_Debug() throws Exception {
root.setLevel(Level.DEBUG);
checkLevelThreshold(loggerTest, Level.DEBUG);
}
|
@Override
public void check(final String databaseName, final ShardingRuleConfiguration ruleConfig, final Map<String, DataSource> dataSourceMap, final Collection<ShardingSphereRule> builtRules) {
checkShardingAlgorithms(ruleConfig.getShardingAlgorithms().values());
checkKeyGeneratorAlgorithms(ruleConfig.getKeyGenerators().values());
Collection<String> keyGenerators = ruleConfig.getKeyGenerators().keySet();
Collection<String> auditors = ruleConfig.getAuditors().keySet();
Collection<String> shardingAlgorithms = ruleConfig.getShardingAlgorithms().keySet();
checkTables(databaseName, ruleConfig.getTables(), ruleConfig.getAutoTables(), keyGenerators, auditors, shardingAlgorithms);
checkKeyGenerateStrategy(databaseName, ruleConfig.getDefaultKeyGenerateStrategy(), keyGenerators);
checkAuditStrategy(databaseName, ruleConfig.getDefaultAuditStrategy(), auditors);
checkShardingStrategy(databaseName, ruleConfig.getDefaultDatabaseShardingStrategy(), shardingAlgorithms);
checkShardingStrategy(databaseName, ruleConfig.getDefaultTableShardingStrategy(), shardingAlgorithms);
}
|
@SuppressWarnings("unchecked")
@Test
void assertCheckTableConfigurationFailed() {
ShardingRuleConfiguration ruleConfig = createRuleConfiguration();
ruleConfig.setTables(Collections.singletonList(createShardingTableRuleConfiguration(null, null, null)));
ruleConfig.setAutoTables(Collections.singleton(createShardingAutoTableRuleConfiguration(null, null, null)));
RuleConfigurationChecker<ShardingRuleConfiguration> checker = OrderedSPILoader.getServicesByClass(
RuleConfigurationChecker.class, Collections.singleton(ruleConfig.getClass())).get(ruleConfig.getClass());
assertThrows(MissingRequiredShardingConfigurationException.class, () -> checker.check("foo_db", ruleConfig, Collections.emptyMap(), Collections.emptyList()));
}
|
public Set<String> getValues() {
return values;
}
|
@Test
void requireThatValueSetIsMutable() {
FeatureSet node = new FeatureSet("key");
node.getValues().add("valueA");
assertValues(List.of("valueA"), node);
node = new FeatureSet("key", "valueA");
node.getValues().add("valueB");
assertValues(List.of("valueA", "valueB"), node);
}
|
public abstract WriteOperation<DestinationT, OutputT> createWriteOperation();
|
@Test
public void testFileBasedWriterWithWritableByteChannelFactory() throws Exception {
final String testUid = "testId";
ResourceId root = getBaseOutputDirectory();
WriteOperation<Void, String> writeOp =
SimpleSink.makeSimpleSink(
root, "file", "-SS-of-NN", "txt", new DrunkWritableByteChannelFactory())
.createWriteOperation();
final Writer<Void, String> writer = writeOp.createWriter();
final ResourceId expectedFile =
writeOp
.getTempDirectory()
.resolve(Writer.spreadUid(testUid), StandardResolveOptions.RESOLVE_FILE);
final List<String> expected = new ArrayList<>();
expected.add("header");
expected.add("header");
expected.add("a");
expected.add("a");
expected.add("b");
expected.add("b");
expected.add("footer");
expected.add("footer");
writer.open(testUid);
writer.write("a");
writer.write("b");
writer.close();
assertEquals(expectedFile, writer.getOutputFile());
assertFileContains(expected, expectedFile);
}
|
@Bean
public CorsFilter corsFilter() {
UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource();
CorsConfiguration config = jHipsterProperties.getCors();
if (!CollectionUtils.isEmpty(config.getAllowedOrigins()) || !CollectionUtils.isEmpty(config.getAllowedOriginPatterns())) {
log.debug("Registering CORS filter");
source.registerCorsConfiguration("/api/**", config);
source.registerCorsConfiguration("/management/**", config);
source.registerCorsConfiguration("/v3/api-docs", config);
source.registerCorsConfiguration("/swagger-ui/**", config);
}
return new CorsFilter(source);
}
|
@Test
void shouldCorsFilterDeactivatedForNullAllowedOrigins() throws Exception {
props.getCors().setAllowedOrigins(null);
MockMvc mockMvc = MockMvcBuilders.standaloneSetup(new WebConfigurerTestController()).addFilters(webConfigurer.corsFilter()).build();
mockMvc
.perform(get("/api/test-cors").header(HttpHeaders.ORIGIN, "other.domain.com"))
.andExpect(status().isOk())
.andExpect(header().doesNotExist(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN));
}
|
public AuditEventProcessor(PluginMgr pluginMgr) {
this.pluginMgr = pluginMgr;
}
|
@Test
public void testAuditEventProcessor() throws IOException {
AuditEventProcessor processor = GlobalStateMgr.getCurrentState().getAuditEventProcessor();
long start = System.currentTimeMillis();
for (int i = 0; i < 10000; i++) {
AuditEvent event = new AuditEvent.AuditEventBuilder().setEventType(EventType.AFTER_QUERY)
.setTimestamp(System.currentTimeMillis())
.setClientIp("127.0.0.1")
.setUser("user1")
.setAuthorizedUser("user2")
.setDb("db1")
.setState("EOF")
.setQueryTime(2000)
.setScanBytes(100000)
.setScanRows(200000)
.setReturnRows(i)
.setStmtId(1234)
.setStmt("select * from tbl1").build();
processor.handleAuditEvent(event);
}
long total = System.currentTimeMillis() - start;
System.out.println("total(ms): " + total + ", avg: " + total / 10000.0);
}
|
byte[] removeEscapedEnclosures( byte[] field, int nrEnclosuresFound ) {
byte[] result = new byte[field.length - nrEnclosuresFound];
int resultIndex = 0;
for ( int i = 0; i < field.length; i++ ) {
result[resultIndex++] = field[i];
if ( field[i] == enclosure[0] && i + 1 < field.length && field[i + 1] == enclosure[0] ) {
// Skip the escaped enclosure after adding the first one
i++;
}
}
return result;
}
|
@Test
public void testRemoveEscapedEnclosuresWithTwoByThemselves() {
CsvInputData csvInputData = new CsvInputData();
csvInputData.enclosure = "\"".getBytes();
String result = new String( csvInputData.removeEscapedEnclosures( "\"\"\"\"".getBytes(), 2 ) );
assertEquals( "\"\"", result );
}
|
public List<Stream> match(Message message) {
final Set<Stream> result = Sets.newHashSet();
final Set<String> blackList = Sets.newHashSet();
for (final Rule rule : rulesList) {
if (blackList.contains(rule.getStreamId())) {
continue;
}
final StreamRule streamRule = rule.getStreamRule();
final StreamRuleType streamRuleType = streamRule.getType();
final Stream.MatchingType matchingType = rule.getMatchingType();
if (!ruleTypesNotNeedingFieldPresence.contains(streamRuleType)
&& !message.hasField(streamRule.getField())) {
if (matchingType == Stream.MatchingType.AND) {
result.remove(rule.getStream());
// blacklist stream because it can't match anymore
blackList.add(rule.getStreamId());
}
continue;
}
final Stream stream;
if (streamRuleType != StreamRuleType.REGEX) {
stream = rule.match(message);
} else {
stream = rule.matchWithTimeOut(message, streamProcessingTimeout, TimeUnit.MILLISECONDS);
}
if (stream == null) {
if (matchingType == Stream.MatchingType.AND) {
result.remove(rule.getStream());
// blacklist stream because it can't match anymore
blackList.add(rule.getStreamId());
}
} else {
result.add(stream);
if (matchingType == Stream.MatchingType.OR) {
// blacklist stream because it is already matched
blackList.add(rule.getStreamId());
}
}
}
final Stream defaultStream = defaultStreamProvider.get();
boolean alreadyRemovedDefaultStream = false;
for (Stream stream : result) {
if (stream.getRemoveMatchesFromDefaultStream()) {
if (alreadyRemovedDefaultStream || message.removeStream(defaultStream)) {
alreadyRemovedDefaultStream = true;
if (LOG.isTraceEnabled()) {
LOG.trace("Successfully removed default stream <{}> from message <{}>", defaultStream.getId(), message.getId());
}
} else {
// A previously executed message processor (or Illuminate) has likely already removed the
// default stream from the message. Now, the message has matched a stream in the Graylog
// MessageFilterChain, and the matching stream is also set to remove the default stream.
// This is usually from user-defined stream rules, and is generally not a problem.
cannotRemoveDefaultMeter.inc();
if (LOG.isTraceEnabled()) {
LOG.trace("Couldn't remove default stream <{}> from message <{}>", defaultStream.getId(), message.getId());
}
}
}
}
return ImmutableList.copyOf(result);
}
|
@Test
public void testExactMatch() throws Exception {
final StreamMock stream = getStreamMock("test");
final StreamRuleMock rule = new StreamRuleMock(ImmutableMap.of(
"_id", new ObjectId(),
"field", "testfield",
"value", "testvalue",
"type", StreamRuleType.EXACT.toInteger(),
"stream_id", stream.getId()
));
stream.setStreamRules(Lists.newArrayList(rule));
final StreamRouterEngine engine = newEngine(Lists.newArrayList(stream));
final Message message = getMessage();
// With wrong value for field.
message.addField("testfield", "no-testvalue");
assertTrue(engine.match(message).isEmpty());
// With matching value for field.
message.addField("testfield", "testvalue");
assertEquals(Lists.newArrayList(stream), engine.match(message));
}
|
public Map<String, ParamDefinition> generatedStaticWorkflowParamDefs(Workflow workflow) {
Map<String, ParamDefinition> allParamDefs = new LinkedHashMap<>();
Map<String, ParamDefinition> defaultWorkflowParams =
defaultParamManager.getDefaultWorkflowParams();
// merge default workflow params
ParamsMergeHelper.mergeParams(
allParamDefs,
defaultWorkflowParams,
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM_DEFAULT, false));
// merge defined workflow params
if (workflow.getParams() != null) {
ParamsMergeHelper.mergeParams(
allParamDefs,
workflow.getParams(),
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.DEFINITION, false));
}
return allParamDefs;
}
|
@Test
public void testStaticWorkflowParamMerge() {
workflow.getParams().put("p1", ParamDefinition.buildParamDefinition("p1", "d1"));
when(defaultParamManager.getDefaultWorkflowParams())
.thenReturn(singletonMap("p2", ParamDefinition.buildParamDefinition("p2", "d2")));
Map<String, ParamDefinition> mergedWorkflowParamDefs =
paramsManager.generatedStaticWorkflowParamDefs(workflow);
Assert.assertEquals("d1", mergedWorkflowParamDefs.get("p1").asStringParamDef().getValue());
Assert.assertEquals("d2", mergedWorkflowParamDefs.get("p2").asStringParamDef().getValue());
}
|
public void wakeup() {
commandConsumer.wakeup();
}
|
@Test
public void shouldWakeUp() {
// When:
commandTopic.wakeup();
//Then:
verify(commandConsumer).wakeup();
}
|
@Override
public ImportResult importItem(UUID jobId, IdempotentImportExecutor idempotentExecutor,
AD authData, MediaContainerResource data) throws Exception {
PhotosContainerResource photosResource = MediaContainerResource.mediaToPhoto(data);
ImportResult photosResult = photosImporter
.importItem(jobId, idempotentExecutor, authData, photosResource);
VideosContainerResource videosResource = MediaContainerResource.mediaToVideo(data);
ImportResult videosResult = videosImporter
.importItem(jobId, idempotentExecutor, authData, videosResource);
return ImportResult.merge(photosResult, videosResult);
}
|
@Test
public void shouldHandleVariousInputs() throws Exception {
assertEquals(ImportResult.OK,
mediaImporter.importItem(null, null, null, new MediaContainerResource(null, null, null)));
assertEquals(ImportResult.OK,
mediaImporter
.importItem(null, null, null, new MediaContainerResource(albums, null, videos)));
assertEquals(ImportResult.OK,
mediaImporter
.importItem(null, null, null, new MediaContainerResource(albums, photos, null)));
assertEquals(ImportResult.OK,
mediaImporter
.importItem(null, null, null, new MediaContainerResource(albums, photos, videos)));
}
|
static AnnotatedClusterState generatedStateFrom(final Params params) {
final ContentCluster cluster = params.cluster;
final ClusterState workingState = ClusterState.emptyState();
final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>();
for (final NodeInfo nodeInfo : cluster.getNodeInfos()) {
final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons);
workingState.setNodeState(nodeInfo.getNode(), nodeState);
}
takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params);
final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params);
if (reasonToBeDown.isPresent()) {
workingState.setClusterState(State.DOWN);
}
workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params));
return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons);
}
|
@Test
void distributor_nodes_are_not_implicitly_transitioned_to_maintenance_mode() {
final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
final ClusterStateGenerator.Params params = fixture.generatorParams()
.currentTimeInMillis(10_000)
.transitionTimes(2000);
fixture.reportDistributorNodeState(2, State.DOWN);
final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.DISTRIBUTOR, 2));
nodeInfo.setTransitionTime(9000);
final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
assertThat(state.toString(), equalTo("distributor:5 .2.s:d storage:5"));
assertThat(state.getNodeStateReasons(),
not(hasStateReasonForNode(distributorNode(1), NodeStateReason.NODE_NOT_BACK_UP_WITHIN_GRACE_PERIOD)));
}
|
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) {
return decoder.decodeFunctionResult(rawInput, outputParameters);
}
|
@Test
public void testDecodeMultipleDynamicStruct2() {
String rawInput =
"0x00000000000000000000000000000000000000000000000000000000000000c0"
+ "0000000000000000000000000000000000000000000000000000000000000180"
+ "0000000000000000000000000000000000000000000000000000000000000001"
+ "000000000000000000000000000000000000000000000000000000000000000a"
+ "0000000000000000000000000000000000000000000000000000000000000002"
+ "000000000000000000000000000000000000000000000000000000000000000b"
+ "0000000000000000000000000000000000000000000000000000000000000040"
+ "0000000000000000000000000000000000000000000000000000000000000080"
+ "0000000000000000000000000000000000000000000000000000000000000002"
+ "6964000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000004"
+ "6e616d6500000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000040"
+ "0000000000000000000000000000000000000000000000000000000000000080"
+ "0000000000000000000000000000000000000000000000000000000000000002"
+ "6964000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000004"
+ "6e616d6500000000000000000000000000000000000000000000000000000000";
assertEquals(
FunctionReturnDecoder.decode(
rawInput, AbiV2TestFixture.getFooFooBarBarFunction.getOutputParameters()),
Arrays.asList(
new AbiV2TestFixture.Foo("id", "name"),
new AbiV2TestFixture.Foo("id", "name"),
new AbiV2TestFixture.Bar(BigInteger.ONE, BigInteger.TEN),
new AbiV2TestFixture.Bar(BigInteger.valueOf(2), BigInteger.valueOf(11))));
}
|
@Override
public MatchType convert(@NotNull String type) {
if (type.contains(DELIMITER)) {
String[] matchType = type.split(DELIMITER);
return new MatchType(RateLimitType.valueOf(matchType[0].toUpperCase()), matchType[1]);
}
return new MatchType(RateLimitType.valueOf(type.toUpperCase()), null);
}
|
@Test
@SuppressWarnings("deprecation")
public void testConvertStringTypeMethodOnly() {
MatchType matchType = target.convert("httpmethod");
assertThat(matchType).isNotNull();
assertThat(matchType.getType()).isEqualByComparingTo(RateLimitType.HTTPMETHOD);
assertThat(matchType.getMatcher()).isNull();
}
|
public void ensureFolder(String parentPath, String name)
throws IOException, InvalidTokenException {
Map<String, Object> rawFolder = new LinkedHashMap<>();
rawFolder.put("name", name);
String url;
try {
url =
getUriBuilder()
.setPath(API_PATH_PREFIX + "/mounts/primary/files/folder")
.setParameter("path", parentPath)
.build()
.toString();
} catch (URISyntaxException e) {
throw new IllegalStateException("Could not produce url.", e);
}
Request.Builder requestBuilder = getRequestBuilder(url);
requestBuilder.post(
RequestBody.create(
MediaType.parse("application/json"), objectMapper.writeValueAsString(rawFolder)));
try (Response response = getResponse(requestBuilder)) {
int code = response.code();
// 409 response code means that the folder already exists
if ((code < 200 || code > 299) && code != 409) {
throw new KoofrClientIOException(response);
}
}
}
|
@Test
public void testEnsureFolderTokenExpired() throws Exception {
when(credentialFactory.refreshCredential(credential))
.then(
(InvocationOnMock invocation) -> {
final Credential cred = invocation.getArgument(0);
cred.setAccessToken("acc1");
return cred;
});
server.enqueue(new MockResponse().setResponseCode(401));
server.enqueue(new MockResponse().setResponseCode(200));
client.ensureFolder("/path/to/folder", "name");
assertEquals(2, server.getRequestCount());
RecordedRequest recordedRequest = server.takeRequest();
assertEquals("POST", recordedRequest.getMethod());
assertEquals(
"/api/v2/mounts/primary/files/folder?path=%2Fpath%2Fto%2Ffolder",
recordedRequest.getPath());
assertEquals("Bearer acc", recordedRequest.getHeader("Authorization"));
assertEquals("2.1", recordedRequest.getHeader("X-Koofr-Version"));
assertEquals(
"application/json; charset=utf-8", recordedRequest.getHeader("Content-Type"));
assertEquals("{\"name\":\"name\"}", recordedRequest.getBody().readUtf8());
recordedRequest = server.takeRequest();
assertEquals("POST", recordedRequest.getMethod());
assertEquals(
"/api/v2/mounts/primary/files/folder?path=%2Fpath%2Fto%2Ffolder",
recordedRequest.getPath());
assertEquals("Bearer acc1", recordedRequest.getHeader("Authorization"));
assertEquals("2.1", recordedRequest.getHeader("X-Koofr-Version"));
assertEquals(
"application/json; charset=utf-8", recordedRequest.getHeader("Content-Type"));
assertEquals("{\"name\":\"name\"}", recordedRequest.getBody().readUtf8());
}
|
private Function<KsqlConfig, Kudf> getUdfFactory(
final Method method,
final UdfDescription udfDescriptionAnnotation,
final String functionName,
final FunctionInvoker invoker,
final String sensorName
) {
return ksqlConfig -> {
final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance(
method.getDeclaringClass(), udfDescriptionAnnotation.name());
if (actualUdf instanceof Configurable) {
ExtensionSecurityManager.INSTANCE.pushInUdf();
try {
((Configurable) actualUdf)
.configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName));
} finally {
ExtensionSecurityManager.INSTANCE.popOutUdf();
}
}
final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf);
return metrics.<Kudf>map(m -> new UdfMetricProducer(
m.getSensor(sensorName),
theUdf,
Time.SYSTEM
)).orElse(theUdf);
};
}
|
@Test
public void shouldSupportUdfParameterAnnotation() {
final UdfFactory substring = FUNC_REG.getUdfFactory(FunctionName.of("somefunction"));
final KsqlScalarFunction function = substring.getFunction(
ImmutableList.of(
SqlArgument.of(SqlTypes.STRING),
SqlArgument.of(SqlTypes.STRING),
SqlArgument.of(SqlTypes.STRING)));
final List<ParameterInfo> arguments = function.parameterInfo();
assertThat(arguments.get(0).name(), is("justValue"));
assertThat(arguments.get(0).description(), is(""));
assertThat(arguments.get(1).name(), is("valueAndDescription"));
assertThat(arguments.get(1).description(), is("Some description"));
// NB: Is the below failing?
// Then you need to add `-parameters` to your IDE's java compiler settings.
assertThat(arguments.get(2).name(), is("noValue"));
assertThat(arguments.get(2).description(), is(""));
}
|
@ProtoFactory
public static MediaType fromString(String tree) {
if (tree == null || tree.isEmpty()) throw CONTAINER.missingMediaType();
Matcher matcher = TREE_PATTERN.matcher(tree);
return parseSingleMediaType(tree, matcher, false);
}
|
@Test(expected = EncodingException.class)
public void testWrongQuoting() {
MediaType.fromString("application/json;charset= \"UTF-8");
}
|
public @NonNull String fastTail(int numChars, Charset cs) throws IOException {
try (RandomAccessFile raf = new RandomAccessFile(file, "r")) {
long len = raf.length();
// err on the safe side and assume each char occupies 4 bytes
// additional 1024 byte margin is to bring us back in sync in case we started reading from non-char boundary.
long pos = Math.max(0, len - (numChars * 4 + 1024));
raf.seek(pos);
byte[] tail = new byte[(int) (len - pos)];
raf.readFully(tail);
String tails = cs.decode(java.nio.ByteBuffer.wrap(tail)).toString();
return tails.substring(Math.max(0, tails.length() - numChars));
}
}
|
@Test
public void tail() throws Exception {
File f = tmp.newFile();
FileUtils.copyURLToFile(getClass().getResource("ascii.txt"), f);
String whole = Files.readString(f.toPath(), Charset.defaultCharset());
TextFile t = new TextFile(f);
String tailStr = whole.substring(whole.length() - 34);
assertEquals(tailStr, t.fastTail(tailStr.length()));
}
|
@Udf(description = "Converts a string representation of a date in the given format"
+ " into a DATE value.")
public Date parseDate(
@UdfParameter(
description = "The string representation of a date.") final String formattedDate,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.text.SimpleDateFormat.") final String formatPattern) {
if (formattedDate == null || formatPattern == null) {
return null;
}
try {
final long time = formatters.get(formatPattern).parse(formattedDate).getTime();
if (time % MILLIS_IN_DAY != 0) {
throw new KsqlFunctionException("Date format contains time field.");
}
return new Date(time);
} catch (final ExecutionException | RuntimeException | ParseException e) {
throw new KsqlFunctionException("Failed to parse date '" + formattedDate
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void shouldThrowOnUnsupportedFields() {
// When:
final Exception e = assertThrows(
KsqlFunctionException.class,
() -> udf.parseDate("2021-12-01 05:40:34", "yyyy-MM-dd HH:mm:ss"));
// Then:
assertThat(e.getMessage(), is("Failed to parse date '2021-12-01 05:40:34' with formatter 'yyyy-MM-dd HH:mm:ss': Date format contains time field."));
}
|
static boolean isMulticastAddress(final String hostAndPort)
{
ParseResult result = tryParseIpV4(hostAndPort);
if (null != result)
{
final String host = result.host;
for (int i = 0, dotIndex = 0, end = host.length() - 1; i <= end; i++)
{
final char c = host.charAt(i);
if ('.' == c || end == i)
{
final int length = end == i ? i - dotIndex : i - 1 - dotIndex;
if (length <= 0 || length > 3)
{
return false;
}
if (0 == dotIndex)
{
final int firstByte = AsciiEncoding.parseIntAscii(host, 0, i);
// IPv4 multicast addresses are defined by the most-significant bit pattern of 1110
if (firstByte > 0xFF || 0xE0 != (firstByte & 0xF0))
{
return false;
}
}
dotIndex = i;
}
else if (c < '0' || c > '9')
{
return false;
}
}
return true;
}
else
{
result = tryParseIpV6(hostAndPort);
if (null != result)
{
final String firstByte = result.host.substring(0, 2);
return "ff".equalsIgnoreCase(firstByte);
}
throw new IllegalArgumentException("invalid format: " + hostAndPort);
}
}
|
@Test
void shouldThrowNullPointerExceptionIfValueIsNull()
{
assertThrowsExactly(NullPointerException.class, () -> SocketAddressParser.isMulticastAddress(null));
}
|
@Override
public ConnectHeaders duplicate() {
return new ConnectHeaders(this);
}
|
@Test
public void shouldDuplicateAndAlwaysReturnEquivalentButDifferentObject() {
assertEquals(headers, headers.duplicate());
assertNotSame(headers, headers.duplicate());
}
|
@Override
public String name() {
return name;
}
|
@Test
public void testNotExposeTableProperties() {
Configuration conf = new Configuration();
conf.set("iceberg.hive.table-property-max-size", "0");
HiveTableOperations ops =
new HiveTableOperations(conf, null, null, catalog.name(), DB_NAME, "tbl");
TableMetadata metadata = mock(TableMetadata.class);
Map<String, String> parameters = Maps.newHashMap();
parameters.put(CURRENT_SNAPSHOT_SUMMARY, "summary");
parameters.put(CURRENT_SNAPSHOT_ID, "snapshotId");
parameters.put(CURRENT_SNAPSHOT_TIMESTAMP, "timestamp");
parameters.put(CURRENT_SCHEMA, "schema");
parameters.put(DEFAULT_PARTITION_SPEC, "partitionSpec");
parameters.put(DEFAULT_SORT_ORDER, "sortOrder");
ops.setSnapshotStats(metadata, parameters);
assertThat(parameters)
.doesNotContainKey(CURRENT_SNAPSHOT_SUMMARY)
.doesNotContainKey(CURRENT_SNAPSHOT_ID)
.doesNotContainKey(CURRENT_SNAPSHOT_TIMESTAMP);
ops.setSchema(metadata.schema(), parameters);
assertThat(parameters).doesNotContainKey(CURRENT_SCHEMA);
ops.setPartitionSpec(metadata, parameters);
assertThat(parameters).doesNotContainKey(DEFAULT_PARTITION_SPEC);
ops.setSortOrder(metadata, parameters);
assertThat(parameters).doesNotContainKey(DEFAULT_SORT_ORDER);
}
|
@Override
public void replay(
long offset,
long producerId,
short producerEpoch,
CoordinatorRecord record
) throws RuntimeException {
ApiMessageAndVersion key = record.key();
ApiMessageAndVersion value = record.value();
switch (key.version()) {
case 0:
case 1:
offsetMetadataManager.replay(
offset,
producerId,
(OffsetCommitKey) key.message(),
(OffsetCommitValue) Utils.messageOrNull(value)
);
break;
case 2:
groupMetadataManager.replay(
(GroupMetadataKey) key.message(),
(GroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 3:
groupMetadataManager.replay(
(ConsumerGroupMetadataKey) key.message(),
(ConsumerGroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 4:
groupMetadataManager.replay(
(ConsumerGroupPartitionMetadataKey) key.message(),
(ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value)
);
break;
case 5:
groupMetadataManager.replay(
(ConsumerGroupMemberMetadataKey) key.message(),
(ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value)
);
break;
case 6:
groupMetadataManager.replay(
(ConsumerGroupTargetAssignmentMetadataKey) key.message(),
(ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value)
);
break;
case 7:
groupMetadataManager.replay(
(ConsumerGroupTargetAssignmentMemberKey) key.message(),
(ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value)
);
break;
case 8:
groupMetadataManager.replay(
(ConsumerGroupCurrentMemberAssignmentKey) key.message(),
(ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value)
);
break;
case 9:
groupMetadataManager.replay(
(ShareGroupPartitionMetadataKey) key.message(),
(ShareGroupPartitionMetadataValue) Utils.messageOrNull(value)
);
break;
case 10:
groupMetadataManager.replay(
(ShareGroupMemberMetadataKey) key.message(),
(ShareGroupMemberMetadataValue) Utils.messageOrNull(value)
);
break;
case 11:
groupMetadataManager.replay(
(ShareGroupMetadataKey) key.message(),
(ShareGroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 12:
groupMetadataManager.replay(
(ShareGroupTargetAssignmentMetadataKey) key.message(),
(ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value)
);
break;
case 13:
groupMetadataManager.replay(
(ShareGroupTargetAssignmentMemberKey) key.message(),
(ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value)
);
break;
case 14:
groupMetadataManager.replay(
(ShareGroupCurrentMemberAssignmentKey) key.message(),
(ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value)
);
break;
default:
throw new IllegalStateException("Received an unknown record type " + key.version()
+ " in " + record);
}
}
|
@Test
public void testReplayGroupMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
GroupMetadataKey key = new GroupMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord(
new ApiMessageAndVersion(key, (short) 2),
null
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
|
@Override
public void register(ConnectRestExtensionContext restPluginContext) {
log.trace("Registering JAAS basic auth filter");
restPluginContext.configurable().register(new JaasBasicAuthFilter(configuration.get()));
log.trace("Finished registering JAAS basic auth filter");
}
|
@SuppressWarnings("unchecked")
@Test
public void testJaasConfigurationNotOverwritten() {
ArgumentCaptor<JaasBasicAuthFilter> jaasFilter = ArgumentCaptor.forClass(JaasBasicAuthFilter.class);
Configurable<? extends Configurable<?>> configurable = mock(Configurable.class);
when(configurable.register(jaasFilter.capture())).thenReturn(null);
ConnectRestExtensionContext context = mock(ConnectRestExtensionContext.class);
when(context.configurable()).thenReturn((Configurable) configurable);
BasicAuthSecurityRestExtension extension = new BasicAuthSecurityRestExtension();
Configuration overwrittenConfiguration = mock(Configuration.class);
Configuration.setConfiguration(overwrittenConfiguration);
extension.register(context);
assertNotEquals(overwrittenConfiguration, jaasFilter.getValue().configuration,
"Overwritten JAAS configuration should not be used by basic auth REST extension");
}
|
@Override
@DSTransactional // 多数据源,使用 @DSTransactional 保证本地事务,以及数据源的切换
public void updateTenant(TenantSaveReqVO updateReqVO) {
// 校验存在
TenantDO tenant = validateUpdateTenant(updateReqVO.getId());
// 校验租户名称是否重复
validTenantNameDuplicate(updateReqVO.getName(), updateReqVO.getId());
// 校验租户域名是否重复
validTenantWebsiteDuplicate(updateReqVO.getWebsite(), updateReqVO.getId());
// 校验套餐被禁用
TenantPackageDO tenantPackage = tenantPackageService.validTenantPackage(updateReqVO.getPackageId());
// 更新租户
TenantDO updateObj = BeanUtils.toBean(updateReqVO, TenantDO.class);
tenantMapper.updateById(updateObj);
// 如果套餐发生变化,则修改其角色的权限
if (ObjectUtil.notEqual(tenant.getPackageId(), updateReqVO.getPackageId())) {
updateTenantRoleMenu(tenant.getId(), tenantPackage.getMenuIds());
}
}
|
@Test
public void testUpdateTenant_notExists() {
// 准备参数
TenantSaveReqVO reqVO = randomPojo(TenantSaveReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> tenantService.updateTenant(reqVO), TENANT_NOT_EXISTS);
}
|
private synchronized boolean validateClientAcknowledgement(long h) {
if (h < 0) {
throw new IllegalArgumentException("Argument 'h' cannot be negative, but was: " + h);
}
if (h > MASK) {
throw new IllegalArgumentException("Argument 'h' cannot be larger than 2^32 -1, but was: " + h);
}
final long oldH = clientProcessedStanzas.get();
final Long lastUnackedX = unacknowledgedServerStanzas.isEmpty() ? null : unacknowledgedServerStanzas.getLast().x;
return validateClientAcknowledgement(h, oldH, lastUnackedX);
}
|
@Test
public void testValidateClientAcknowledgement_rollover_edgecase5() throws Exception
{
// Setup test fixture.
final long MAX = new BigInteger( "2" ).pow( 32 ).longValue() - 1;
final long h = 3;
final long oldH = MAX - 2;
final Long lastUnackedX = 4L;
// Execute system under test.
final boolean result = StreamManager.validateClientAcknowledgement(h, oldH, lastUnackedX);
// Verify results.
assertTrue(result);
}
|
@Override
public Collection<DelayMeasurementStatHistory> getDmHistoricalStats(
MdId mdName, MaIdShort maName, MepId mepId, SoamId dmId)
throws SoamConfigException, CfmConfigException {
MepEntry mep = cfmMepService.getMep(mdName, maName, mepId);
if (mep == null || mep.deviceId() == null) {
throw new CfmConfigException("MEP :"
+ mdName + "/" + maName + "/" + mepId + " does not exist");
} else if (deviceService.getDevice(mep.deviceId()) == null) {
throw new CfmConfigException("Device " + mep.deviceId() + " from MEP :"
+ mdName + "/" + maName + "/" + mepId + " does not exist");
} else if (!deviceService.getDevice(mep.deviceId()).is(SoamDmProgrammable.class)) {
throw new CfmConfigException("Device " + mep.deviceId() + " from MEP :"
+ mdName + "/" + maName + "/" + mepId +
" does not implement SoamDmProgrammable");
}
log.debug("Retrieving History Stats for DM {} in MD {}, MA {}, MEP {} "
+ "on Device {}", dmId, mdName, maName, mepId, mep.deviceId());
return deviceService.getDevice(mep.deviceId())
.as(SoamDmProgrammable.class).getDmHistoricalStats(mdName, maName, mepId, dmId);
}
|
@Test
public void testGetDmHistoryStats() throws CfmConfigException, SoamConfigException {
expect(deviceService.getDevice(DEVICE_ID1)).andReturn(device1).anyTimes();
replay(deviceService);
expect(mepService.getMep(MDNAME1, MANAME1, MEPID1)).andReturn(mep1).anyTimes();
replay(mepService);
expect(driverService.getDriver(DEVICE_ID1)).andReturn(testDriver).anyTimes();
replay(driverService);
Collection<DelayMeasurementStatHistory> dmHistoricalStats =
soamManager.getDmHistoricalStats(MDNAME1, MANAME1, MEPID1, DMID101);
assertNotNull(dmHistoricalStats);
assertEquals(2, dmHistoricalStats.size());
}
|
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response createNetwork(InputStream input) throws IOException {
log.trace(String.format(MESSAGE, "CREATE"));
String inputStr = IOUtils.toString(input, REST_UTF8);
if (!haService.isActive()
&& !DEFAULT_ACTIVE_IP_ADDRESS.equals(haService.getActiveIp())) {
return syncPost(haService, NETWORKS, inputStr);
}
final NeutronNetwork net = (NeutronNetwork)
jsonToModelEntity(inputStr, NeutronNetwork.class);
adminService.createNetwork(net);
UriBuilder locationBuilder = uriInfo.getBaseUriBuilder()
.path(NETWORKS)
.path(net.getId());
return created(locationBuilder.build()).build();
}
|
@Test
public void testCreateNetworkWithDuplicatedId() {
mockOpenstackNetworkAdminService.createNetwork(anyObject());
expectLastCall().andThrow(new IllegalArgumentException());
replay(mockOpenstackNetworkAdminService);
expect(mockOpenstackHaService.isActive()).andReturn(true).anyTimes();
replay(mockOpenstackHaService);
final WebTarget wt = target();
InputStream jsonStream = OpenstackNetworkWebResourceTest.class
.getResourceAsStream("openstack-network.json");
Response response = wt.path(PATH).request(MediaType.APPLICATION_JSON_TYPE)
.post(Entity.json(jsonStream));
final int status = response.getStatus();
assertThat(status, is(400));
verify(mockOpenstackNetworkAdminService);
}
|
public MyNewIssuesNotification newMyNewIssuesNotification(Map<String, UserDto> assigneesByUuid) {
verifyAssigneesByUuid(assigneesByUuid);
return new MyNewIssuesNotification(new DetailsSupplierImpl(assigneesByUuid));
}
|
@Test
public void newMyNewIssuesNotification_DetailsSupplier_getRuleDefinitionByRuleKey_fails_with_NPE_if_ruleKey_is_null() {
MyNewIssuesNotification underTest = this.underTest.newMyNewIssuesNotification(emptyMap());
DetailsSupplier detailsSupplier = readDetailsSupplier(underTest);
assertThatThrownBy(() -> detailsSupplier.getRuleDefinitionByRuleKey(null))
.isInstanceOf(NullPointerException.class)
.hasMessage("ruleKey can't be null");
}
|
public Set<String> heartbeatTopics() throws InterruptedException {
return listTopics().stream()
.filter(this::isHeartbeatTopic)
.collect(Collectors.toSet());
}
|
@Test
public void heartbeatTopicsTest() throws InterruptedException {
MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "heartbeats",
"source1.heartbeats", "source2.source1.heartbeats", "source3.heartbeats"));
Set<String> heartbeatTopics = client.heartbeatTopics();
assertEquals(heartbeatTopics, new HashSet<>(Arrays.asList("heartbeats", "source1.heartbeats",
"source2.source1.heartbeats", "source3.heartbeats")));
}
|
public void start() {
heartbeatExecutor.scheduleAtFixedRate(
new Runnable() {
@Override
public void run() {
// Because consul check set pass triggers consul
// server write operation,frequently heart beat will impact consul
// performance,so heart beat takes long cycle and switcher check takes short cycle.
// multiple check on switcher and then send one heart beat to consul server.
// TODO change to switcher listener approach.
try {
boolean switcherStatus = isHeartbeatOpen();
if (isSwitcherChange(switcherStatus)) { // heart beat switcher status changed
processHeartbeat(switcherStatus);
} else {// heart beat switcher status not changed.
if (switcherStatus) {// switcher is on, check MAX_SWITCHER_CHECK_TIMES and then send a heart beat
switcherCheckTimes++;
if (switcherCheckTimes >= ConsulConstants.MAX_SWITCHER_CHECK_TIMES) {
processHeartbeat(true);
switcherCheckTimes = 0;
}
}
}
} catch (Exception e) {
logger.error("consul heartbeat executor err:",
e);
}
}
}, ConsulConstants.SWITCHER_CHECK_CIRCLE,
ConsulConstants.SWITCHER_CHECK_CIRCLE, TimeUnit.MILLISECONDS);
}
|
@Test
public void testStart() throws InterruptedException {
heartbeatManager.start();
Map<String, Long> mockServices = new HashMap<String, Long>();
int serviceNum = 5;
for (int i = 0; i < serviceNum; i++) {
String serviceid = "service" + i;
mockServices.put(serviceid, 0L);
heartbeatManager.addHeartbeatServcieId(serviceid);
}
// switch on heart beat
setHeartbeatSwitcher(true);
checkHeartbeat(mockServices, true, serviceNum);
// switch off heart beat
setHeartbeatSwitcher(false);
Thread.sleep(100);
checkHeartbeat(mockServices, false, serviceNum);
}
|
public static InMemorySorter create(Options options) {
return new InMemorySorter(options);
}
|
@Test
public void testManySorters() throws Exception {
SorterTestUtils.testRandom(
() -> InMemorySorter.create(new InMemorySorter.Options()), 1000000, 10);
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
if(containerService.isContainer(file)) {
return PathAttributes.EMPTY;
}
return this.toAttributes(this.details(file));
}
|
@Test
public void testFindFile() throws Exception {
final Path container = new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SpectraTouchFeature(session).touch(test, new TransferStatus());
final SpectraAttributesFinderFeature f = new SpectraAttributesFinderFeature(session);
final PathAttributes attributes = f.find(test);
assertEquals(0L, attributes.getSize());
assertEquals("d41d8cd98f00b204e9800998ecf8427e", attributes.getChecksum().hash);
// Missing support for modification date
assertEquals(-1L, attributes.getModificationDate());
// Test wrong type
try {
f.find(new Path(test.getAbsolute(), EnumSet.of(Path.Type.directory, Path.Type.placeholder)));
fail();
}
catch(NotfoundException e) {
// Expected
}
new SpectraDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public Collection<Integer> getOutboundPorts(EndpointQualifier endpointQualifier) {
final AdvancedNetworkConfig advancedNetworkConfig = node.getConfig().getAdvancedNetworkConfig();
if (advancedNetworkConfig.isEnabled()) {
EndpointConfig endpointConfig = advancedNetworkConfig.getEndpointConfigs().get(endpointQualifier);
final Collection<Integer> outboundPorts = endpointConfig != null
? endpointConfig.getOutboundPorts() : Collections.emptyList();
final Collection<String> outboundPortDefinitions = endpointConfig != null
? endpointConfig.getOutboundPortDefinitions() : Collections.emptyList();
return AddressUtil.getOutboundPorts(outboundPorts, outboundPortDefinitions);
}
final NetworkConfig networkConfig = node.getConfig().getNetworkConfig();
final Collection<Integer> outboundPorts = networkConfig.getOutboundPorts();
final Collection<String> outboundPortDefinitions = networkConfig.getOutboundPortDefinitions();
return AddressUtil.getOutboundPorts(outboundPorts, outboundPortDefinitions);
}
|
@Test
public void testGetOutboundPorts_acceptsSpaceAsASeparator() {
networkConfig.addOutboundPortDefinition("29000 29001");
Collection<Integer> outboundPorts = serverContext.getOutboundPorts(MEMBER);
assertThat(outboundPorts).hasSize(2);
assertThat(outboundPorts).containsExactlyInAnyOrder(29000, 29001);
}
|
@Override
public String getSQLListOfSchemas( DatabaseMeta databaseMeta ) {
String databaseName = getDatabaseName();
if ( databaseMeta != null ) {
databaseName = databaseMeta.environmentSubstitute( databaseName );
}
return "SELECT SCHEMA_NAME AS \"name\" FROM " + databaseName + ".INFORMATION_SCHEMA.SCHEMATA";
}
|
@Test
public void testGetSQLListOfSchemasWithoutParameter() {
SnowflakeHVDatabaseMeta snowflakeHVDatabaseMeta = spy( new SnowflakeHVDatabaseMeta() );
snowflakeHVDatabaseMeta.getSQLListOfSchemas();
verify( snowflakeHVDatabaseMeta ).getSQLListOfSchemas( null );
}
|
public String format(DataTable table) {
StringBuilder result = new StringBuilder();
formatTo(table, result);
return result.toString();
}
|
@Test
void should_print() {
DataTable table = tableOf("hello");
assertEquals("| hello |\n", formatter.format(table));
}
|
@Override
public boolean shouldHandle(Request request)
{
// we don't check the method here because we want to return 405 if it is anything but POST
return MUX_URI_PATH.equals(request.getURI().getPath());
}
|
@Test(dataProvider = "multiplexerConfigurations")
public void testIsMultiplexedRequest(MultiplexerRunMode multiplexerRunMode) throws Exception
{
MultiplexedRequestHandlerImpl multiplexer = createMultiplexer(null, multiplexerRunMode);
RestRequest request = fakeMuxRestRequest();
assertTrue(multiplexer.shouldHandle(request));
}
|
public void setStayDuration(double stayDuration) {
this.stayDuration = stayDuration;
}
|
@Test
public void setStayDuration() {
SAExposureConfig saExposureConfig = new SAExposureConfig(1,1,true);
saExposureConfig.setStayDuration(2);
assertEquals(2, saExposureConfig.getStayDuration(), 0.2);
}
|
public IndicesStatsResponse indicesStats(String... indices) {
return execute(() -> {
Request request = new Request("GET", "/" + (indices.length > 0 ? (String.join(",", indices) + "/") : "") + "_stats");
request.addParameter("level", "shards");
Response response = restHighLevelClient.getLowLevelClient().performRequest(request);
return IndicesStatsResponse.toIndicesStatsResponse(gson.fromJson(EntityUtils.toString(response.getEntity()), JsonObject.class));
}, () -> computeDetailsAsString(indices));
}
|
@Test
public void should_rethrow_ex_on_indices_stat_fail() throws Exception {
when(restClient.performRequest(argThat(new RawRequestMatcher(
"GET",
"/_stats"))))
.thenThrow(IOException.class);
assertThatThrownBy(() -> underTest.indicesStats())
.isInstanceOf(ElasticsearchException.class);
}
|
@Override
@CheckForNull
public EmailMessage format(Notification notif) {
if (!(notif instanceof ChangesOnMyIssuesNotification)) {
return null;
}
ChangesOnMyIssuesNotification notification = (ChangesOnMyIssuesNotification) notif;
if (notification.getChange() instanceof AnalysisChange) {
checkState(!notification.getChangedIssues().isEmpty(), "changedIssues can't be empty");
return formatAnalysisNotification(notification.getChangedIssues().keySet().iterator().next(), notification);
}
return formatMultiProject(notification);
}
|
@Test
public void formats_returns_html_message_for_multiple_issues_of_same_rule_on_same_project_on_master_when_analysis_change() {
Project project = newProject("1");
String ruleName = randomAlphabetic(8);
String host = randomAlphabetic(15);
Rule rule = newRule(ruleName, randomRuleTypeHotspotExcluded());
String issueStatus = randomValidStatus();
List<ChangedIssue> changedIssues = IntStream.range(0, 2 + new Random().nextInt(5))
.mapToObj(i -> newChangedIssue("issue_" + i, issueStatus, project, rule))
.collect(toList());
AnalysisChange analysisChange = newAnalysisChange();
when(emailSettings.getServerBaseURL()).thenReturn(host);
EmailMessage emailMessage = underTest.format(new ChangesOnMyIssuesNotification(analysisChange, ImmutableSet.copyOf(changedIssues)));
String expectedHref = host + "/project/issues?id=" + project.getKey()
+ "&issues=" + changedIssues.stream().map(ChangedIssue::getKey).collect(joining("%2C"));
String expectedLinkText = "See all " + changedIssues.size() + " issues";
HtmlFragmentAssert.assertThat(emailMessage.getMessage())
.hasParagraph().hasParagraph() // skip header
.hasParagraph() // skip title based on status
.hasList("Rule " + ruleName + " - " + expectedLinkText)
.withLink(expectedLinkText, expectedHref)
.hasParagraph().hasParagraph() // skip footer
.noMoreBlock();
}
|
@Override
public boolean login() throws LoginException {
Callback[] callbacks = new Callback[2];
callbacks[0] = new NameCallback("User name");
callbacks[1] = new PasswordCallback("Password", false);
try {
handler.handle(callbacks);
} catch (IOException | UnsupportedCallbackException ioe) {
throw (LoginException)new LoginException().initCause(ioe);
}
String password;
String username = ((NameCallback)callbacks[0]).getName();
if (username == null)
return false;
if (((PasswordCallback)callbacks[1]).getPassword() != null)
password = new String(((PasswordCallback)callbacks[1]).getPassword());
else
password="";
// authenticate will throw LoginException
// in case of failed authentication
authenticate(username, password);
user = new UserPrincipal(username);
succeeded = true;
return true;
}
|
@Test
public void testLogin() throws LoginException {
LoginContext context = new LoginContext("LDAPLogin", new CallbackHandler() {
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
for (int i = 0; i < callbacks.length; i++) {
if (callbacks[i] instanceof NameCallback) {
((NameCallback) callbacks[i]).setName("first");
} else if (callbacks[i] instanceof PasswordCallback) {
((PasswordCallback) callbacks[i]).setPassword("secret".toCharArray());
} else {
throw new UnsupportedCallbackException(callbacks[i]);
}
}
}
});
context.login();
context.logout();
}
|
public String removeComments( String script ) {
if ( script == null ) {
return null;
}
StringBuilder result = new StringBuilder();
MODE mode = MODE.SQL;
char currentStringChar = 0;
for ( int i = 0; i < script.length(); i++ ) {
char ch = script.charAt( i );
char nextCh = i < script.length() - 1 ? script.charAt( i + 1 ) : 0;
char nextPlusOneCh = i < script.length() - 2 ? script.charAt( i + 2 ) : 0;
switch ( mode ) {
case SQL:
switch ( ch ) {
case '/':
if ( nextCh == '*' && nextPlusOneCh != '+' ) {
mode = MODE.BLOCK_COMMENT;
i++;
ch = 0;
}
break;
case '-':
if ( nextCh == '-' ) {
mode = MODE.LINE_COMMENT;
i++;
ch = 0;
}
break;
case '\'':
case '"':
mode = MODE.STRING;
currentStringChar = ch;
break;
}
break;
case BLOCK_COMMENT:
if ( ch == '*' ) {
if ( nextCh == '/' ) {
mode = MODE.SQL;
i++;
}
}
ch = 0;
break;
case LINE_COMMENT:
if ( ch == '\n' || ch == '\r' ) {
mode = MODE.SQL;
} else {
ch = 0;
}
break;
case STRING:
if ( ch == '\\' && nextCh == currentStringChar && usingBackslashAsEscapeCharForQuotation ) {
/*
* The user is hard-coding a quote character into the string.
* Pass the hard-coded quote character through, and skip over the quote on next loop
*/
/*
* usingBackslashAsEscapeCharForQuotation
* PDI-16224.
*
* ANSI standards specify that using the backslash character (\) to escape single (' ') or double (" ")
* quotation marks is invalid. For example, the following attempt to find a quotation mark does not conform to ANSI standards:
* where col1 = '\'';"
* In any way a construction '\'|| is correct for Oracle but for others DBs (ex. MySQl) isn't correct.
*
*/
result.append( ch );
result.append( nextCh );
ch = 0;
i++;
} else if ( ch == currentStringChar ) {
mode = MODE.SQL;
}
break;
}
if ( ch != 0 ) {
result.append( ch );
}
}
return result.toString();
}
|
@Test
public void testRemoveComments() {
assertEquals( null, sqlScriptParser.removeComments( null ) );
assertEquals( "", sqlScriptParser.removeComments( "" ) );
assertEquals( "SELECT col1 FROM test", sqlScriptParser.removeComments( "SELECT col1 FROM test" ) );
assertEquals( "SELECT col1 FROM test ", sqlScriptParser.removeComments( "SELECT col1 FROM test --end comment" ) );
assertEquals( "SELECT \n col1, col2\n FROM \n test", sqlScriptParser.removeComments( "SELECT \n col1, col2\n FROM \n test" ) );
assertEquals( "SELECT \n \"col1\", col2\n FROM \n test", sqlScriptParser.removeComments( "SELECT \n \"col1\", col2\n FROM --test\n test" ) );
assertEquals( "SELECT col1 FROM account", sqlScriptParser.removeComments( "SELECT /* \"my_column'\" */ col1 FROM /* 'my_table' */ account" ) );
assertEquals( "SELECT '/' as col1, '*/*' as regex ", sqlScriptParser.removeComments( "SELECT '/' as col1, '*/*' as regex " ) );
assertEquals( "SELECT INSTR('/loader/*/*.txt', '/') - INSTR('/loader/*/*.txt', '/') ",
sqlScriptParser.removeComments( "SELECT INSTR('/loader/*/*.txt', '/') - INSTR('/loader/*/*.txt', '/') " ) );
assertEquals( "SELECT col1, col2, col3 FROM account WHERE name = 'Pentaho'",
sqlScriptParser.removeComments( "SELECT /* my data*/ col1, col2, col3 FROM account WHERE name = 'Pentaho'" ) );
assertEquals( "SELECT /*+ ORACLE hint*/ col1, col2, col3 FROM account WHERE name = 'Pentaho'",
sqlScriptParser.removeComments( "SELECT /*+ ORACLE hint*/ col1, col2, col3 FROM account WHERE name = 'Pentaho'" ) );
assertEquals( "SELECT \n/*+ ORACLE hint*/ col1, col2, col3 FROM account WHERE name = 'Pentaho'",
sqlScriptParser.removeComments( "SELECT \n/*+ ORACLE hint*/ col1, col2, col3 FROM account WHERE name = 'Pentaho'" ) );
assertEquals( "SELECT \n/*+ ORACLE hint*/\n col1, col2, col3 FROM account WHERE name = 'Pentaho'",
sqlScriptParser.removeComments( "SELECT \n/*+ ORACLE hint*/\n col1, col2, col3 FROM account WHERE name = 'Pentaho'" ) );
assertEquals( "SELECT \"hello\\\"world\" FROM dual", sqlScriptParser.removeComments( "SELECT \"hello\\\"world\" FROM dual" ) );
assertEquals( "CREATE TABLE test1 (col1 STRING) TBLPROPERTIES (\"prop1\" = \"my\\\"value\")",
sqlScriptParser.removeComments( "CREATE TABLE test1 (col1 STRING) TBLPROPERTIES (\"prop1\" = \"my\\\"value\")" ) );
assertEquals( "CREATE TABLE test1 (col1 STRING) TBLPROPERTIES ('prop1' = 'my\\\"value')",
sqlScriptParser.removeComments( "CREATE TABLE test1 (col1 STRING) TBLPROPERTIES ('prop1' = 'my\\\"value')" ) );
//PDI-16224
assertEquals( "SELECT 1 from test where t='\\'||t=a", oracleSqlScriptParser.removeComments( "SELECT 1 from test where t='\\'/* comment */||t=a" ) );
}
|
String zone(String podName) {
String nodeUrlString = String.format("%s/api/v1/nodes/%s", kubernetesMaster, nodeName(podName));
return extractZone(callGet(nodeUrlString));
}
|
@Test
public void zoneFailureDomain() throws JsonProcessingException {
// given
String podName = "pod-name";
stub(String.format("/api/v1/namespaces/%s/pods/%s", NAMESPACE, podName), pod("hazelcast-0", NAMESPACE, "node-name"));
//language=JSON
String nodeResponse = """
{
"kind": "Node",
"metadata": {
"labels": {
"failure-domain.beta.kubernetes.io/region": "deprecated-region",
"failure-domain.beta.kubernetes.io/zone": "deprecated-zone",
"failure-domain.kubernetes.io/region": "us-central1",
"failure-domain.kubernetes.io/zone": "us-central1-a"
}
}
}""";
stub("/api/v1/nodes/node-name", nodeResponse);
// when
String zone = kubernetesClient.zone(podName);
// then
assertEquals("us-central1-a", zone);
}
|
@Override
public InputStream getTaskStateFile(String state, String name) throws IOException {
return getTaskStateFile(state, name, false, true);
}
|
@Test
void shouldGetTaskStateFileFromTriggerContext() throws IOException {
// Given
StorageInterface storageInterface = Mockito.mock(StorageInterface.class);
InputStream is = new ByteArrayInputStream(new byte[0]);
Mockito.when(storageInterface.get(any(), eq(URI.create("/namespace/states/state/name")))).thenReturn(is);
InternalStorage storage = new InternalStorage(StorageContext.forTrigger(null,
"namespace",
"flowid",
"executionId",
"triggerId"), storageInterface);
// When
InputStream result = storage.getTaskStateFile("state", "name", true, true);
// Then
Assertions.assertEquals(result, is);
}
|
public static void removeDupes(
final List<CharSequence> suggestions, List<CharSequence> stringsPool) {
if (suggestions.size() < 2) return;
int i = 1;
// Don't cache suggestions.size(), since we may be removing items
while (i < suggestions.size()) {
final CharSequence cur = suggestions.get(i);
// Compare each suggestion with each previous suggestion
for (int j = 0; j < i; j++) {
CharSequence previous = suggestions.get(j);
if (TextUtils.equals(cur, previous)) {
removeSuggestion(suggestions, i, stringsPool);
i--;
break;
}
}
i++;
}
}
|
@Test
public void testRemoveDupesOnlyDupes() throws Exception {
ArrayList<CharSequence> list =
new ArrayList<>(Arrays.<CharSequence>asList("typed", "typed", "typed", "typed", "typed"));
IMEUtil.removeDupes(list, mStringPool);
Assert.assertEquals(1, list.size());
Assert.assertEquals("typed", list.get(0));
}
|
public ColumnConstraints getConstraints() {
return constraints;
}
|
@Test
public void shouldReturnEmptyConstraints() {
// Given:
final TableElement valueElement = new TableElement(NAME, new Type(SqlTypes.STRING));
// Then:
assertThat(valueElement.getConstraints(), is(NO_COLUMN_CONSTRAINTS));
}
|
@Override
@Deprecated
public <VR> KStream<K, VR> transformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, ? extends VR> valueTransformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null");
return doTransformValues(
toValueTransformerWithKeySupplier(valueTransformerSupplier),
NamedInternal.empty(),
stateStoreNames);
}
|
@Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullValueTransformerSupplierOnTransformValuesWithNamedAndStores() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.transformValues(
(org.apache.kafka.streams.kstream.ValueTransformerSupplier<Object, Object>) null,
Named.as("valueTransformer"),
"storeName"));
assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null"));
}
|
public boolean isSameShardingCondition() {
Collection<String> hintStrategyTables = findHintStrategyTables(sqlStatementContext);
return 1 == hintStrategyTables.size() || subqueryContainsShardingCondition && 1 == conditions.size();
}
|
@Test
void assertIsSameShardingConditionFalse() {
ShardingConditions shardingConditions = createMultipleShardingConditions();
assertFalse(shardingConditions.isSameShardingCondition());
}
|
public long lastAppliedOffset() {
return metrics.lastAppliedOffset();
}
|
@Test
public void testCreateAndClose() throws Exception {
MockFaultHandler faultHandler = new MockFaultHandler("testCreateAndClose");
try (MetadataLoader loader = new MetadataLoader.Builder().
setFaultHandler(faultHandler).
setHighWaterMarkAccessor(OptionalLong::empty).
build()) {
assertEquals(-1L, loader.lastAppliedOffset());
}
faultHandler.maybeRethrowFirstException();
}
|
@JsonProperty
public URI getUri()
{
return uri;
}
|
@Test
public void testQueryDividedIntoSplitsShouldHaveCorrectSpacingBetweenTimes()
{
Instant now = LocalDateTime.of(2019, 10, 2, 7, 26, 56, 0).toInstant(UTC);
PrometheusConnectorConfig config = getCommonConfig(prometheusHttpServer.resolve("/prometheus-data/prometheus-metrics.json"));
PrometheusClient client = new PrometheusClient(config, METRIC_CODEC, TYPE_MANAGER);
PrometheusTable table = client.getTable("default", "up");
PrometheusTableHandle tableHandle = new PrometheusTableHandle("default", table.getName());
TupleDomain<ColumnHandle> columnConstraints = TupleDomain.withColumnDomains(
ImmutableMap.of(
new PrometheusColumnHandle("value", BIGINT, 1), Domain.all(VARCHAR),
new PrometheusColumnHandle("text", createUnboundedVarcharType(), 0), Domain.all(VARCHAR)));
PrometheusTableLayoutHandle tableLayoutHandle = new PrometheusTableLayoutHandle(tableHandle, columnConstraints);
PrometheusSplitManager splitManager = new PrometheusSplitManager(client, fixedClockAt(now), config);
ConnectorSplitSource splits = splitManager.getSplits(
null,
null,
tableLayoutHandle,
null);
PrometheusSplit split1 = (PrometheusSplit) splits.getNextBatch(NOT_PARTITIONED, 1).getNow(null).getSplits().get(0);
Map<String, String> paramsMap1 = new HashMap<>();
String[] splitKV1 = split1.getUri().getQuery().split("&");
paramsMap1.put("query", splitKV1[0].split("=")[1]);
paramsMap1.put("time", splitKV1[1].split("=")[1]);
assertEquals(paramsMap1.get("query"), "up[1d]");
PrometheusSplit split2 = (PrometheusSplit) splits.getNextBatch(NOT_PARTITIONED, 1).getNow(null).getSplits().get(0);
Map<String, String> paramsMap2 = new HashMap<>();
String[] splitKV2 = split2.getUri().getQuery().split("&");
paramsMap2.put("query", splitKV2[0].split("=")[1]);
paramsMap2.put("time", splitKV2[1].split("=")[1]);
assertEquals(paramsMap2.get("query"), "up[1d]");
long diff = Double.valueOf(paramsMap2.get("time")).longValue() - Double.valueOf(paramsMap1.get("time")).longValue();
assertEquals(config.getQueryChunkSizeDuration().getValue(TimeUnit.SECONDS), diff, 0.0001);
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName("string") String string) {
if ( string == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "string", "cannot be null"));
} else {
return FEELFnResult.ofResult(NumberEvalHelper.getBigDecimalOrNull(string.codePointCount(0, string.length())));
}
}
|
@Test
void invokeNull() {
FunctionTestUtil.assertResultError(stringLengthFunction.invoke(null), InvalidParametersEvent.class);
}
|
public Set<DatabaseTableName> getCachedTableNames() {
// use partition cache to get all cached table names because partition cache is more accurate,
// table cache will be cached when user use `use catalog.db` command.
return partitionCache.asMap().keySet().stream().map(hivePartitionName ->
DatabaseTableName.of(hivePartitionName.getDatabaseName(), hivePartitionName.getTableName())).collect(
Collectors.toSet());
}
|
@Test
public void testGetCachedName() {
CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore(
metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false);
HiveCacheUpdateProcessor processor = new HiveCacheUpdateProcessor(
"hive_catalog", cachingHiveMetastore, null, null, false, false);
Assert.assertTrue(processor.getCachedTableNames().isEmpty());
processor = new HiveCacheUpdateProcessor("hive_catalog", metastore, null, null, false, false);
Assert.assertTrue(processor.getCachedTableNames().isEmpty());
}
|
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
}
|
@Test
public void testShowMaterializedViewPattern() throws AnalysisException, DdlException {
ctx.setCurrentUserIdentity(UserIdentity.ROOT);
ctx.setCurrentRoleIds(Sets.newHashSet(PrivilegeBuiltinConstants.ROOT_ROLE_ID));
ShowMaterializedViewsStmt stmt = new ShowMaterializedViewsStmt("testDb", "bcd%");
ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertFalse(resultSet.next());
stmt = new ShowMaterializedViewsStmt("testDb", "%test%");
resultSet = ShowExecutor.execute(stmt, ctx);
verifyShowMaterializedViewResult(resultSet);
}
|
public Category name(String name) {
this.name = name;
return this;
}
|
@Test
public void nameTest() {
// TODO: test name
}
|
public int capacity()
{
return capacity;
}
|
@Test
void shouldCalculateCapacityForBuffer()
{
assertThat(broadcastReceiver.capacity(), is(CAPACITY));
}
|
public static Catalog loadIcebergCatalog(SparkSession spark, String catalogName) {
CatalogPlugin catalogPlugin = spark.sessionState().catalogManager().catalog(catalogName);
Preconditions.checkArgument(
catalogPlugin instanceof HasIcebergCatalog,
String.format(
"Cannot load Iceberg catalog from catalog %s because it does not contain an Iceberg Catalog. "
+ "Actual Class: %s",
catalogName, catalogPlugin.getClass().getName()));
return ((HasIcebergCatalog) catalogPlugin).icebergCatalog();
}
|
@Test
public void testLoadIcebergCatalog() throws Exception {
spark.conf().set("spark.sql.catalog.test_cat", SparkCatalog.class.getName());
spark.conf().set("spark.sql.catalog.test_cat.type", "hive");
Catalog catalog = Spark3Util.loadIcebergCatalog(spark, "test_cat");
Assert.assertTrue(
"Should retrieve underlying catalog class", catalog instanceof CachingCatalog);
}
|
@Override
public int compare(final List<String> o1, final List<String> o2) {
if (o1.size() < o2.size()) {
return -1;
} else if (o1.size() > o2.size()) {
return 1;
} else {
int index = 0;
while (index < o1.size()) {
String item1 = o1.get(index);
String item2 = o2.get(index++);
final int comparisonResult = item1.compareToIgnoreCase(item2);
if (comparisonResult != 0) {
return comparisonResult;
}
}
return 0;
}
}
|
@Test
void testListsWithSameElementsIgnoringCaseAreEqual() {
assertEquals(0, toTest.compare(List.of("Mum"), List.of("mum")));
assertEquals(0, toTest.compare(List.of("mum", "Dad"), List.of("mum", "dad")));
}
|
@VisibleForTesting
String importSingleAlbum(UUID jobId, TokensAndUrlAuthData authData, PhotoAlbum inputAlbum)
throws IOException, InvalidTokenException, PermissionDeniedException, UploadErrorException {
// Set up album
GoogleAlbum googleAlbum = new GoogleAlbum();
googleAlbum.setTitle(GooglePhotosImportUtils.cleanAlbumTitle(inputAlbum.getName()));
GoogleAlbum responseAlbum =
getOrCreatePhotosInterface(jobId, authData).createAlbum(googleAlbum);
return responseAlbum.getId();
}
|
@Test
public void importAlbum() throws Exception {
// Set up
String albumName = "Album Name";
String albumDescription = "Album description";
PhotoAlbum albumModel = new PhotoAlbum(OLD_ALBUM_ID, albumName, albumDescription);
GoogleAlbum responseAlbum = new GoogleAlbum();
responseAlbum.setId(NEW_ALBUM_ID);
Mockito.when(googlePhotosInterface.createAlbum(any(GoogleAlbum.class)))
.thenReturn(responseAlbum);
// Run test
googlePhotosImporter.importSingleAlbum(uuid, null, albumModel);
// Check results
ArgumentCaptor<GoogleAlbum> albumArgumentCaptor = ArgumentCaptor.forClass(GoogleAlbum.class);
Mockito.verify(googlePhotosInterface).createAlbum(albumArgumentCaptor.capture());
assertEquals(albumArgumentCaptor.getValue().getTitle(), albumName);
assertNull(albumArgumentCaptor.getValue().getId());
}
|
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 7) {
onInvalidDataReceived(device, data);
return;
}
// First byte: flags
int offset = 0;
final int flags = data.getIntValue(Data.FORMAT_UINT8, offset++);
// See UNIT_* for unit options
final int unit = (flags & 0x01) == UNIT_mmHg ? UNIT_mmHg : UNIT_kPa;
final boolean timestampPresent = (flags & 0x02) != 0;
final boolean pulseRatePresent = (flags & 0x04) != 0;
final boolean userIdPresent = (flags & 0x08) != 0;
final boolean measurementStatusPresent = (flags & 0x10) != 0;
if (data.size() < 7
+ (timestampPresent ? 7 : 0) + (pulseRatePresent ? 2 : 0)
+ (userIdPresent ? 1 : 0) + (measurementStatusPresent ? 2 : 0)) {
onInvalidDataReceived(device, data);
return;
}
// Following bytes - systolic, diastolic and mean arterial pressure
final float systolic = data.getFloatValue(Data.FORMAT_SFLOAT, offset);
final float diastolic = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 2);
final float meanArterialPressure = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 4);
offset += 6;
// Parse timestamp if present
Calendar calendar = null;
if (timestampPresent) {
calendar = DateTimeDataCallback.readDateTime(data, offset);
offset += 7;
}
// Parse pulse rate if present
Float pulseRate = null;
if (pulseRatePresent) {
pulseRate = data.getFloatValue(Data.FORMAT_SFLOAT, offset);
offset += 2;
}
// Read user id if present
Integer userId = null;
if (userIdPresent) {
userId = data.getIntValue(Data.FORMAT_UINT8, offset);
offset += 1;
}
// Read measurement status if present
BPMStatus status = null;
if (measurementStatusPresent) {
final int measurementStatus = data.getIntValue(Data.FORMAT_UINT16_LE, offset);
// offset += 2;
status = new BPMStatus(measurementStatus);
}
onBloodPressureMeasurementReceived(device, systolic, diastolic, meanArterialPressure, unit, pulseRate, userId, status, calendar);
}
|
@Test
public void onBloodPressureMeasurementReceived_minimal() {
final DataReceivedCallback callback = new BloodPressureMeasurementDataCallback() {
@Override
public void onBloodPressureMeasurementReceived(@NonNull final BluetoothDevice device,
final float systolic, final float diastolic, final float meanArterialPressure, final int unit,
@Nullable final Float pulseRate, @Nullable final Integer userID,
@Nullable final BPMStatus status, @Nullable final Calendar calendar) {
assertEquals("Systolic", 18.9, systolic, 0.01);
assertEquals("Diastolic", 11.0, diastolic, 0);
assertEquals("Mean AP", 15.9, meanArterialPressure, 0.01);
assertEquals("Unit: mmHg", 0, unit);
assertNull("Pulse rate not set", pulseRate);
assertNull("User ID not set", userID);
assertNull("Status not set", status);
assertNull("Calendar not set", calendar);
}
@Override
public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
assertEquals("Correct BPM reported as invalid", 1, 2);
}
};
final MutableData data = new MutableData(new byte[7]);
// Flags
data.setByte((byte) 0b00000, 0);
// Systolic, diastolic and mean AP in mmHg
data.setValue(189, -1, Data.FORMAT_SFLOAT, 1);
data.setValue(11, 0, Data.FORMAT_SFLOAT, 3);
data.setValue(159, -1, Data.FORMAT_SFLOAT, 5);
assertArrayEquals(
new byte[] { 0x00, (byte) 0xBD, (byte) 0xF0, 0xB, 0x0, (byte) 0x9F, (byte) 0xF0 },
data.getValue()
);
callback.onDataReceived(null, data);
}
|
@GetMapping
public DeferredResult<ResponseEntity<ApolloConfigNotification>> pollNotification(
@RequestParam(value = "appId") String appId,
@RequestParam(value = "cluster") String cluster,
@RequestParam(value = "namespace", defaultValue = ConfigConsts.NAMESPACE_APPLICATION) String namespace,
@RequestParam(value = "dataCenter", required = false) String dataCenter,
@RequestParam(value = "notificationId", defaultValue = "-1") long notificationId,
@RequestParam(value = "ip", required = false) String clientIp) {
//strip out .properties suffix
namespace = namespaceUtil.filterNamespaceName(namespace);
Set<String> watchedKeys = watchKeysUtil.assembleAllWatchKeys(appId, cluster, namespace, dataCenter);
DeferredResult<ResponseEntity<ApolloConfigNotification>> deferredResult =
new DeferredResult<>(TIMEOUT, NOT_MODIFIED_RESPONSE);
//check whether client is out-dated
ReleaseMessage latest = releaseMessageService.findLatestReleaseMessageForMessages(watchedKeys);
/**
* Manually close the entity manager.
* Since for async request, Spring won't do so until the request is finished,
* which is unacceptable since we are doing long polling - means the db connection would be hold
* for a very long time
*/
entityManagerUtil.closeEntityManager();
if (latest != null && latest.getId() != notificationId) {
deferredResult.setResult(new ResponseEntity<>(
new ApolloConfigNotification(namespace, latest.getId()), HttpStatus.OK));
} else {
//register all keys
for (String key : watchedKeys) {
this.deferredResults.put(key, deferredResult);
}
deferredResult
.onTimeout(() -> logWatchedKeys(watchedKeys, "Apollo.LongPoll.TimeOutKeys"));
deferredResult.onCompletion(() -> {
//unregister all keys
for (String key : watchedKeys) {
deferredResults.remove(key, deferredResult);
}
logWatchedKeys(watchedKeys, "Apollo.LongPoll.CompletedKeys");
});
logWatchedKeys(watchedKeys, "Apollo.LongPoll.RegisteredKeys");
logger.debug("Listening {} from appId: {}, cluster: {}, namespace: {}, datacenter: {}",
watchedKeys, appId, cluster, namespace, dataCenter);
}
return deferredResult;
}
|
@Test
public void testPollNotificationWithDefaultNamespaceAsFile() throws Exception {
String namespace = String.format("%s.%s", defaultNamespace, "properties");
when(namespaceUtil.filterNamespaceName(namespace)).thenReturn(defaultNamespace);
String someWatchKey = "someKey";
String anotherWatchKey = "anotherKey";
Set<String> watchKeys = Sets.newHashSet(someWatchKey, anotherWatchKey);
when(watchKeysUtil
.assembleAllWatchKeys(someAppId, someCluster, defaultNamespace,
someDataCenter)).thenReturn(
watchKeys);
DeferredResult<ResponseEntity<ApolloConfigNotification>>
deferredResult = controller
.pollNotification(someAppId, someCluster, namespace, someDataCenter,
someNotificationId, someClientIp);
assertEquals(watchKeys.size(), deferredResults.size());
for (String watchKey : watchKeys) {
assertTrue(deferredResults.get(watchKey).contains(deferredResult));
}
}
|
@SuppressWarnings("WeakerAccess")
public Map<String, Object> getAdminConfigs(final String clientId) {
final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(ADMIN_CLIENT_PREFIX, AdminClientConfig.configNames());
final Map<String, Object> props = new HashMap<>();
props.putAll(getClientCustomProps());
props.putAll(clientProvidedProps);
// add client id with stream client id prefix
props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
return props;
}
|
@Test
public void shouldSupportNonPrefixedAdminConfigs() {
props.put(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, 10);
final StreamsConfig streamsConfig = new StreamsConfig(props);
final Map<String, Object> configs = streamsConfig.getAdminConfigs(clientId);
assertEquals(10, configs.get(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG));
}
|
@Override
public boolean matches(Job localJob, Job storageProviderJob) {
if (storageProviderJob.getVersion() == localJob.getVersion() + 1
&& localJob.hasState(PROCESSING) && !storageProviderJob.hasState(PROCESSING)) {
return jobSteward.getThreadProcessingJob(localJob) == null;
}
return false;
}
|
@Test
void ifJobIsHavingConcurrentStateChangeAndStorageProviderJobIsAlsoProcessingItWillNotMatch() {
final Job localJob = aJobInProgress().withVersion(2).build();
final Job storageProviderJob = aCopyOf(localJob).withVersion(3).build();
lenient().when(jobSteward.getThreadProcessingJob(localJob)).thenReturn(null);
boolean matchesAllowedStateChange = allowedStateChange.matches(localJob, storageProviderJob);
assertThat(matchesAllowedStateChange).isFalse();
}
|
public CMap parsePredefined(String name) throws IOException
{
try (RandomAccessRead randomAccessRead = getExternalCMap(name))
{
// deactivate strict mode
strictMode = false;
return parse(randomAccessRead);
}
}
|
@Test
void testUniJIS_UCS2_H() throws IOException
{
CMap cMap = new CMapParser().parsePredefined("UniJIS-UCS2-H");
assertEquals(34, cMap.toCID(new byte[] { 0, 65 }), "UniJIS-UCS2-H CID 65 -> 34");
}
|
protected static boolean isDuplicate( List<? extends SharedObjectInterface> objects, SharedObjectInterface object ) {
String newName = object.getName();
for ( SharedObjectInterface soi : objects ) {
if ( soi.getName().equalsIgnoreCase( newName ) ) {
return true;
}
}
return false;
}
|
@Test
public void isDuplicate_DifferentCase() {
assertTrue( isDuplicate( singletonList( mockObject( "qwerty" ) ), mockObject( "Qwerty" ) ) );
}
|
public static Map<String, String> getClientMd5Map(String configKeysString) {
Map<String, String> md5Map = new HashMap<>(5);
if (null == configKeysString || "".equals(configKeysString)) {
return md5Map;
}
int start = 0;
List<String> tmpList = new ArrayList<>(3);
for (int i = start; i < configKeysString.length(); i++) {
char c = configKeysString.charAt(i);
if (c == WORD_SEPARATOR_CHAR) {
tmpList.add(configKeysString.substring(start, i));
start = i + 1;
if (tmpList.size() > 3) {
// Malformed message and return parameter error.
throw new IllegalArgumentException("invalid protocol,too much key");
}
} else if (c == LINE_SEPARATOR_CHAR) {
String endValue = "";
if (start + 1 <= i) {
endValue = configKeysString.substring(start, i);
}
start = i + 1;
// If it is the old message, the last digit is MD5. The post-multi-tenant message is tenant
if (tmpList.size() == 2) {
String groupKey = GroupKey2.getKey(tmpList.get(0), tmpList.get(1));
groupKey = StringPool.get(groupKey);
md5Map.put(groupKey, endValue);
} else {
String groupKey = GroupKey2.getKey(tmpList.get(0), tmpList.get(1), endValue);
groupKey = StringPool.get(groupKey);
md5Map.put(groupKey, tmpList.get(2));
}
tmpList.clear();
// Protect malformed messages
if (md5Map.size() > 10000) {
throw new IllegalArgumentException("invalid protocol, too much listener");
}
}
}
return md5Map;
}
|
@Test
void testGetClientMd5MapForNewProtocol() {
String configKeysString =
"test0" + MD5Util.WORD_SEPARATOR_CHAR + "test1" + MD5Util.WORD_SEPARATOR_CHAR + "test2" + MD5Util.WORD_SEPARATOR_CHAR
+ "test3" + MD5Util.LINE_SEPARATOR_CHAR;
Map<String, String> actualValueMap = MD5Util.getClientMd5Map(configKeysString);
assertEquals("test2", actualValueMap.get("test0+test1+test3"));
}
|
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
}
|
@Test
public void testPuma8nhLS() {
test(Loss.ls(), "puma8nh", Puma8NH.formula, Puma8NH.data, 3.2482);
}
|
@Override
public boolean isOperational() {
if (nodeOperational) {
return true;
}
boolean flag = false;
try {
flag = checkOperational();
} catch (InterruptedException e) {
LOG.trace("Interrupted while checking ES node is operational", e);
Thread.currentThread().interrupt();
} finally {
if (flag) {
esConnector.stop();
nodeOperational = true;
}
}
return nodeOperational;
}
|
@Test
public void isOperational_should_not_be_os_language_sensitive() {
EsConnector esConnector = mock(EsConnector.class);
when(esConnector.getClusterHealthStatus())
.thenThrow(new ElasticsearchException(new ExecutionException(new ConnectException("Connexion refusée"))));
EsManagedProcess underTest = new EsManagedProcess(mock(Process.class), ProcessId.ELASTICSEARCH, esConnector, WAIT_FOR_UP_TIMEOUT_LONG);
assertThat(underTest.isOperational()).isFalse();
}
|
public static PTransformMatcher emptyFlatten() {
return new PTransformMatcher() {
@Override
public boolean matches(AppliedPTransform<?, ?, ?> application) {
return (application.getTransform() instanceof Flatten.PCollections)
&& application.getInputs().isEmpty();
}
@Override
public String toString() {
return MoreObjects.toStringHelper("EmptyFlattenMatcher").toString();
}
};
}
|
@Test
public void emptyFlattenWithEmptyFlatten() {
AppliedPTransform application =
AppliedPTransform.of(
"EmptyFlatten",
Collections.emptyMap(),
Collections.singletonMap(
new TupleTag<Integer>(),
PCollection.createPrimitiveOutputInternal(
p, WindowingStrategy.globalDefault(), IsBounded.BOUNDED, VarIntCoder.of())),
Flatten.pCollections(),
ResourceHints.create(),
p);
assertThat(PTransformMatchers.emptyFlatten().matches(application), is(true));
}
|
public static <T> String render(ClassPluginDocumentation<T> classPluginDocumentation) throws IOException {
return render("task", JacksonMapper.toMap(classPluginDocumentation));
}
|
@SuppressWarnings("unchecked")
@Test
void state() throws IOException {
PluginScanner pluginScanner = new PluginScanner(ClassPluginDocumentationTest.class.getClassLoader());
RegisteredPlugin scan = pluginScanner.scan();
Class<Set> set = scan.findClass(Set.class.getName()).orElseThrow();
ClassPluginDocumentation<? extends Task> doc = ClassPluginDocumentation.of(jsonSchemaGenerator, scan, set, Task.class);
String render = DocumentationGenerator.render(doc);
assertThat(render, containsString("Set"));
assertThat(render, containsString("::alert{type=\"warning\"}\n"));
}
|
@Override
public CharSequence toXML(XmlEnvironment xmlEnvironment) {
XmlStringBuilder sb = new XmlStringBuilder(this, xmlEnvironment);
return sb.attribute(ELEM_URI, uri)
.optAttribute(ELEM_MEDIA_TYPE, mediaType)
.optAttribute(ELEM_WIDTH, width)
.optAttribute(ELEM_HEIGHT, height)
.closeEmptyElement();
}
|
@Test
public void testMinimal() {
ThumbnailElement minimal = new ThumbnailElement("cid:[email protected]");
assertXmlSimilar("<thumbnail xmlns='urn:xmpp:thumbs:1'\n" +
"uri='cid:[email protected]'/>",
minimal.toXML());
}
|
@Override
public Path move(final Path source, final Path target, final TransferStatus status, final Delete.Callback callback,
final ConnectionCallback connectionCallback) throws BackgroundException {
if(containerService.isContainer(source)) {
if(new SimplePathPredicate(source.getParent()).test(target.getParent())) {
// Rename only
return proxy.move(source, target, status, callback, connectionCallback);
}
}
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(source) ^ new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) {
// Moving into or from an encrypted room
final Copy copy = new SDSDelegatingCopyFeature(session, nodeid, new SDSCopyFeature(session, nodeid));
if(log.isDebugEnabled()) {
log.debug(String.format("Move %s to %s using copy feature %s", source, target, copy));
}
final Path c = copy.copy(source, target, status, connectionCallback, new DisabledStreamListener());
// Delete source file after copy is complete
final Delete delete = new SDSDeleteFeature(session, nodeid);
if(delete.isSupported(source)) {
log.warn(String.format("Delete source %s copied to %s", source, target));
delete.delete(Collections.singletonMap(source, status), connectionCallback, callback);
}
return c;
}
else {
return proxy.move(source, target, status, callback, connectionCallback);
}
}
|
@Test
public void testMoveWithRenameDifferentDataRoomSameFilename() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room1 = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path room2 = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test1 = new Path(room1, "A", EnumSet.of(Path.Type.file));
new SDSTouchFeature(session, nodeid).touch(test1, new TransferStatus());
final Path test2 = new Path(room2, "A", EnumSet.of(Path.Type.file));
new SDSTouchFeature(session, nodeid).touch(test2, new TransferStatus());
final Path target = new Path(room2, "A (2)", EnumSet.of(Path.Type.file));
new SDSDelegatingMoveFeature(session, nodeid, new SDSMoveFeature(session, nodeid)).move(test1, target, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
test1.attributes().setVersionId(null);
assertFalse(new SDSFindFeature(session, nodeid).find(test1));
assertTrue(new SDSFindFeature(session, nodeid).find(test2));
assertTrue(new SDSFindFeature(session, nodeid).find(target));
assertEquals(0, session.getMetrics().get(Copy.class));
new SDSDeleteFeature(session, nodeid).delete(Arrays.asList(room1, room2), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public void createOrUpdate(final String path, final Object data) {
zkClient.createOrUpdate(path, data, CreateMode.PERSISTENT);
}
|
@Test
public void testOnAppAuthChangedCreate() {
AppAuthData appAuthData = AppAuthData.builder().appKey(MOCK_APP_KEY).appSecret(MOCK_APP_SECRET).build();
String appAuthPath = DefaultPathConstants.buildAppAuthPath(appAuthData.getAppKey());
zookeeperDataChangedListener.onAppAuthChanged(ImmutableList.of(appAuthData), DataEventTypeEnum.CREATE);
verify(zkClient, times(1)).createOrUpdate(appAuthPath, appAuthData, CreateMode.PERSISTENT);
}
|
@Override
public Range<T> span() {
if (rangeBitSetMap.isEmpty()) {
return null;
}
Entry<Long, BitSet> firstSet = rangeBitSetMap.firstEntry();
Entry<Long, BitSet> lastSet = rangeBitSetMap.lastEntry();
int first = firstSet.getValue().nextSetBit(0);
int last = lastSet.getValue().previousSetBit(lastSet.getValue().size());
return Range.openClosed(consumer.apply(firstSet.getKey(), first - 1), consumer.apply(lastSet.getKey(), last));
}
|
@Test
public void testNPE() {
OpenLongPairRangeSet<LongPair> set = new OpenLongPairRangeSet<>(consumer);
assertNull(set.span());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.