focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endStream, ChannelPromise promise) { return writeHeadersInternal(ctx, streamId, headers, padding, endStream, false, 0, (short) 0, false, promise); }
@Test public void writeHeaders() throws Exception { int streamId = 1; Http2Headers headers = new DefaultHttp2Headers() .method("GET").path("/").authority("foo.com").scheme("https"); frameWriter.writeHeaders(ctx, streamId, headers, 0, true, promise); byte[] expectedPayload = headerPayload(streamId, headers); byte[] expectedFrameBytes = { (byte) 0x00, (byte) 0x00, (byte) 0x0a, // payload length = 10 (byte) 0x01, // payload type = 1 (byte) 0x05, // flags = (0x01 | 0x04) (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x01 // stream id = 1 }; expectedOutbound = Unpooled.copiedBuffer(expectedFrameBytes, expectedPayload); assertEquals(expectedOutbound, outbound); }
@Override protected TableRecords getUndoRows() { return super.getUndoRows(); }
@Test public void getUndoRows() { Assertions.assertEquals(executor.getUndoRows(), executor.getSqlUndoLog().getBeforeImage()); }
@Override public StatusOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final java.nio.file.Path p = session.toPath(file); final Set<OpenOption> options = new HashSet<>(); options.add(StandardOpenOption.WRITE); if(status.isAppend()) { if(!status.isExists()) { options.add(StandardOpenOption.CREATE); } } else { if(status.isExists()) { if(file.isSymbolicLink()) { Files.delete(p); options.add(StandardOpenOption.CREATE); } else { options.add(StandardOpenOption.TRUNCATE_EXISTING); } } else { options.add(StandardOpenOption.CREATE_NEW); } } final FileChannel channel = FileChannel.open(session.toPath(file), options.stream().toArray(OpenOption[]::new)); channel.position(status.getOffset()); return new VoidStatusOutputStream(Channels.newOutputStream(channel)); } catch(IOException e) { throw new LocalExceptionMappingService().map("Upload {0} failed", e, file); } }
@Test public void testWriteSymlink() throws Exception { final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname())); if(session.isPosixFilesystem()) { session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Path workdir = new LocalHomeFinderFeature().find(); final Path target = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new LocalTouchFeature(session).touch(target, new TransferStatus()); assertTrue(new LocalFindFeature(session).find(target)); final String name = UUID.randomUUID().toString(); final Path symlink = new Path(workdir, name, EnumSet.of(Path.Type.file, AbstractPath.Type.symboliclink)); new LocalSymlinkFeature(session).symlink(symlink, target.getName()); assertTrue(new LocalFindFeature(session).find(symlink)); final TransferStatus status = new TransferStatus(); final int length = 1048576; final byte[] content = RandomUtils.nextBytes(length); status.setLength(content.length); status.setExists(true); final OutputStream out = new LocalWriteFeature(session).write(symlink, status, new DisabledConnectionCallback()); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); out.close(); { final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length); final InputStream in = new LocalReadFeature(session).read(symlink, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); new StreamCopier(status, status).transfer(in, buffer); assertArrayEquals(content, buffer.toByteArray()); } { final byte[] buffer = new byte[0]; final InputStream in = new LocalReadFeature(session).read(target, new TransferStatus(), new DisabledConnectionCallback()); IOUtils.readFully(in, buffer); in.close(); assertArrayEquals(new byte[0], buffer); } final AttributedList<Path> list = new LocalListService(session).list(workdir, new DisabledListProgressListener()); assertTrue(list.contains(new Path(workdir, name, EnumSet.of(Path.Type.file)))); assertFalse(list.contains(symlink)); new LocalDeleteFeature(session).delete(Arrays.asList(target, symlink), new DisabledLoginCallback(), new Delete.DisabledCallback()); } }
public static void assertThatClassIsImmutable(Class<?> clazz) { final ImmutableClassChecker checker = new ImmutableClassChecker(); if (!checker.isImmutableClass(clazz, false)) { final Description toDescription = new StringDescription(); final Description mismatchDescription = new StringDescription(); checker.describeTo(toDescription); checker.describeMismatch(mismatchDescription); final String reason = "\n" + "Expected: is \"" + toDescription.toString() + "\"\n" + " but : was \"" + mismatchDescription.toString() + "\""; throw new AssertionError(reason); } }
@Test public void testFinalProtectedMember() throws Exception { boolean gotException = false; try { assertThatClassIsImmutable(FinalProtectedMember.class); } catch (AssertionError assertion) { assertThat(assertion.getMessage(), containsString("a field named 'x' that is not private")); gotException = true; } assertThat(gotException, is(true)); }
@Override public V get(K key) { return map.get(key); }
@Test public void testGet() { map.put(42, "foobar"); String result = adapter.get(42); assertEquals("foobar", result); }
public V put(final K key, final V value) { final Object val = mapNullValue(value); requireNonNull(val, "value cannot be null"); final Object[] entries = this.entries; final int mask = entries.length - 1; int keyIndex = Hashing.evenHash(key.hashCode(), mask); Object oldValue; while (null != (oldValue = entries[keyIndex + 1])) { if (Objects.equals(entries[keyIndex], key)) { break; } keyIndex = next(keyIndex, mask); } if (null == oldValue) { ++size; entries[keyIndex] = key; } entries[keyIndex + 1] = val; increaseCapacity(); return unmapNullValue(oldValue); }
@Test void shouldCopyConstructAndBeEqual() { final int[] testEntries = { 3, 1, 19, 7, 11, 12, 7 }; final Object2ObjectHashMap<String, Integer> map = new Object2ObjectHashMap<>(); for (final int testEntry : testEntries) { map.put(String.valueOf(testEntry), testEntry); } final Object2ObjectHashMap<String, Integer> mapCopy = new Object2ObjectHashMap<>(map); assertEquals(map, mapCopy); }
@Override public AttributedList<Path> run(final Session<?> session) throws BackgroundException { // Run recursively final Search feature = session.getFeature(Search.class); if(log.isDebugEnabled()) { log.debug(String.format("Run with feature %s", feature)); } return this.search(feature, directory); }
@Test public void testRun() throws Exception { final PathCache cache = new PathCache(Integer.MAX_VALUE); final AttributedList<Path> root = new AttributedList<>(); root.add(new Path("/t1.png", EnumSet.of(Path.Type.file))); root.add(new Path("/t1.gif", EnumSet.of(Path.Type.file))); final Path folder = new Path("/folder", EnumSet.of(Path.Type.directory)); root.add(folder); root.add(new Path("/folder2", EnumSet.of(Path.Type.directory))); cache.put(new Path("/", EnumSet.of(Path.Type.directory)), root); final AttributedList<Path> folderContents = new AttributedList<>(); folderContents.add(new Path(folder, "/t2.png", EnumSet.of(Path.Type.file))); folderContents.add(new Path(folder, "/t2.gif", EnumSet.of(Path.Type.file))); final Path subfolder = new Path(folder, "/subfolder", EnumSet.of(Path.Type.directory)); folderContents.add(subfolder); cache.put(folder, folderContents); final AttributedList<Path> subfolderContents = new AttributedList<>(); subfolderContents.add(new Path(subfolder, "t2.png", EnumSet.of(Path.Type.file))); subfolderContents.add(new Path(subfolder, "t2.gif", EnumSet.of(Path.Type.file))); cache.put(subfolder, subfolderContents); final SearchWorker search = new SearchWorker(new Path("/", EnumSet.of(Path.Type.directory)), new SearchFilter(".png"), cache, new DisabledListProgressListener()); final AttributedList<Path> found = search.run(new NullSession(new Host(new TestProtocol()))); assertTrue(found.contains(new Path("/t1.png", EnumSet.of(Path.Type.file)))); assertFalse(found.contains(new Path("/t1.gif", EnumSet.of(Path.Type.file)))); assertFalse(found.contains(new Path("/t2.png", EnumSet.of(Path.Type.file)))); assertFalse(found.contains(new Path("/t2.gif", EnumSet.of(Path.Type.file)))); assertTrue(found.contains(folder)); assertTrue(found.contains(new Path(folder, "/t2.png", EnumSet.of(Path.Type.file)))); assertTrue(found.contains(subfolder)); assertTrue(found.contains(new Path(subfolder, "/t2.png", EnumSet.of(Path.Type.file)))); assertFalse(found.contains(new Path(new Path("/folder2", EnumSet.of(Path.Type.directory)), "/t2.gif", EnumSet.of(Path.Type.file)))); assertFalse(found.contains(new Path("/folder2", EnumSet.of(Path.Type.directory)))); }
@Override public boolean publishConfigCas(String key, String group, String content, Object ticket) { try { if (ticket != null && !(ticket instanceof Stat)) { throw new IllegalArgumentException("zookeeper publishConfigCas requires stat type ticket"); } String pathKey = buildPathKey(group, key); zkClient.createOrUpdate(pathKey, content, false, ticket == null ? 0 : ((Stat) ticket).getVersion()); return true; } catch (Exception e) { logger.warn(REGISTRY_ZOOKEEPER_EXCEPTION, "", "", "zookeeper publishConfigCas failed.", e); return false; } }
@Test void testPublishConfigCas() { String key = "user-service-cas"; String group = "org.apache.dubbo.service.UserService"; String content = "test"; ConfigItem configItem = configuration.getConfigItem(key, group); assertTrue(configuration.publishConfigCas(key, group, content, configItem.getTicket())); configItem = configuration.getConfigItem(key, group); assertEquals("test", configItem.getContent()); assertTrue(configuration.publishConfigCas(key, group, "newtest", configItem.getTicket())); assertFalse(configuration.publishConfigCas(key, group, "newtest2", configItem.getTicket())); assertEquals("newtest", configuration.getConfigItem(key, group).getContent()); }
public static String normalize(final String path) { return normalize(path, true); }
@Test public void testNormalize() { assertEquals(PathNormalizer.normalize("relative/path", false), "relative/path"); assertEquals(PathNormalizer.normalize("/absolute/path", true), "/absolute/path"); assertEquals(PathNormalizer.normalize("/absolute/path", false), "/absolute/path"); }
public boolean hasReadPermissionForWholeCollection(final Subject subject, final String collection) { return readPermissionForCollection(collection) .map(rp -> rp.equals(DbEntity.ALL_ALLOWED) || subject.isPermitted(rp + ":*")) .orElse(false); }
@Test void hasReadPermissionForWholeCollectionReturnsTrueWhenCatalogHasAllAllowedPermission() { doReturn(Optional.of( new DbEntityCatalogEntry("streams", "title", StreamImpl.class, DbEntity.ALL_ALLOWED)) ).when(catalog) .getByCollectionName("streams"); final boolean hasReadPermissions = toTest.hasReadPermissionForWholeCollection(subject, "streams"); assertTrue(hasReadPermissions); }
@Override @Deprecated @SuppressWarnings("unchecked") public <T extends Number> Counter<T> counter(String name, Class<T> type, Unit unit) { if (Integer.class.equals(type)) { return (Counter<T>) new DefaultCounter(unit).asIntCounter(); } if (Long.class.equals(type)) { return (Counter<T>) new DefaultCounter(unit).asLongCounter(); } throw new IllegalArgumentException( String.format("Counter for type %s is not supported", type.getName())); }
@Test public void longCounter() { MetricsContext metricsContext = new DefaultMetricsContext(); MetricsContext.Counter<Long> counter = metricsContext.counter("longCounter", Long.class, MetricsContext.Unit.COUNT); counter.increment(5L); assertThat(counter.value()).isEqualTo(5L); assertThat(counter.unit()).isEqualTo(MetricsContext.Unit.COUNT); }
public static ExternalSorter create(Options options) { return options.getSorterType() == Options.SorterType.HADOOP ? HadoopExternalSorter.create(options) : NativeExternalSorter.create(options); }
@Test public void testRandom() throws Exception { SorterTestUtils.testRandom( () -> ExternalSorter.create( new ExternalSorter.Options() .setTempLocation(getTmpLocation().toString()) .setSorterType(sorterType)), 1, 1000000); }
public Connection getConnection() { return getConfig().isShared() ? pooledConnection() : singleUseConnection(); }
@Test public void should_return_same_connection_when_shared() throws Exception { DataConnectionConfig config = new DataConnectionConfig(SHARED_DATA_CONNECTION_CONFIG) .setProperty("maximumPoolSize", "1"); jdbcDataConnection = new JdbcDataConnection(config); connection1 = jdbcDataConnection.getConnection(); assertThat(connection1).isNotNull(); Connection unwrapped1 = connection1.unwrap(Connection.class); connection1.close(); // used maximumPoolSize above, after closing it should return same connection connection2 = jdbcDataConnection.getConnection(); assertThat(connection2).isNotNull(); Connection unwrapped2 = connection2.unwrap(Connection.class); assertThat(unwrapped1).isSameAs(unwrapped2); }
@Override public Map<String, String> contextLabels() { return Collections.unmodifiableMap(contextLabels); }
@Test public void testCreationWithNullNamespaceAndLabels() { context = new KafkaMetricsContext(null, labels); assertEquals(2, context.contextLabels().size()); assertNull(context.contextLabels().get(MetricsContext.NAMESPACE)); assertEquals(LABEL_A_VALUE, context.contextLabels().get(LABEL_A_KEY)); }
public static void checkTdg(String tenant, String dataId, String group) throws NacosException { checkTenant(tenant); if (StringUtils.isBlank(dataId) || !ParamUtils.isValid(dataId)) { throw new NacosException(NacosException.CLIENT_INVALID_PARAM, DATAID_INVALID_MSG); } if (StringUtils.isBlank(group) || !ParamUtils.isValid(group)) { throw new NacosException(NacosException.CLIENT_INVALID_PARAM, GROUP_INVALID_MSG); } }
@Test void testCheckTdgFail1() throws NacosException { Throwable exception = assertThrows(NacosException.class, () -> { String tenant = "a"; String dataId = ""; String group = "c"; ParamUtils.checkTdg(tenant, dataId, group); }); assertTrue(exception.getMessage().contains("dataId invalid")); }
public ConfigCheckResult checkConfig() { Optional<Long> appId = getAppId(); if (appId.isEmpty()) { return failedApplicationStatus(INVALID_APP_ID_STATUS); } GithubAppConfiguration githubAppConfiguration = new GithubAppConfiguration(appId.get(), gitHubSettings.privateKey(), gitHubSettings.apiURLOrDefault()); return checkConfig(githubAppConfiguration); }
@Test public void checkConfig_whenInstallationsDoesntHaveOrgMembersPermissions_shouldReturnFailedAppAutoProvisioningCheck() { mockGithubConfiguration(); ArgumentCaptor<GithubAppConfiguration> appConfigurationCaptor = ArgumentCaptor.forClass(GithubAppConfiguration.class); mockGithubAppWithValidConfig(appConfigurationCaptor); mockOrganizationsWithoutPermissions(appConfigurationCaptor, "org1"); ConfigCheckResult checkResult = configValidator.checkConfig(); assertSuccessfulAppConfig(checkResult); assertThat(checkResult.installations()) .extracting(InstallationStatus::organization, InstallationStatus::autoProvisioning) .containsExactly(tuple("org1", ConfigStatus.failed(MISSING_ALL_AUTOPROVISIONNING_PERMISSIONS))); verifyAppConfiguration(appConfigurationCaptor.getValue()); }
static JSONArray parseFirstEntries(final InputStream inputStream, final int count) throws ExecutionException, InterruptedException { final SimpleDateFormat dateFormat = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss Z", Locale.ENGLISH); final JSONArray result = new JSONArray(); final Document document = SAXReaderUtil.readDocument(inputStream); final List<Node> nodes = document.selectNodes("/rss/channel/item"); for (final Node node : nodes) { if (!(node instanceof Element)) { continue; } try { final Element item = (Element) node; final JSONObject o = new JSONObject(); o.put("link", item.elementText("link")); o.put("title", item.elementText("title")); o.put("date", JiveGlobals.formatDate(dateFormat.parse(item.elementText("pubDate")))); result.put(o); } catch (Exception e) { Log.debug("Unable to parse element as RSS data: {}", node.asXML(), e); } if (result.length() >= count) { break; } } return result; }
@Test public void testRssParsing() throws Exception { // Setup test fixture. try (final InputStream rssStream = BlogPostServlet.class.getResourceAsStream("/rss/ignite-blog.rss")) { // Execute system under test. final JSONArray result = BlogPostServlet.parseFirstEntries(rssStream, 2); // Verify results. assertEquals(2, result.length()); assertEquals("https://discourse.igniterealtime.org/t/cve-2023-32315-openfire-vulnerability-update/93166", result.getJSONObject(0).getString("link")); assertEquals("CVE-2023-32315: Openfire vulnerability (update)", result.getJSONObject(0).getString("title")); assertEquals("https://discourse.igniterealtime.org/t/sparkweb-lives-again/93130", result.getJSONObject(1).getString("link")); assertEquals("SparkWeb lives again", result.getJSONObject(1).getString("title")); } }
public static <T> RestResult<T> failed() { return RestResult.<T>builder().withCode(500).build(); }
@Test void testFailedWithDefault() { RestResult<Object> restResult = RestResultUtils.failed(); assertRestResult(restResult, 500, null, null, false); }
@Override public String decrypt(String cipherText) throws CryptoException { return decrypt(cipherProvider.getKey(), cipherText); }
@Test public void shouldDecryptText() throws CryptoException { String plainText = desEncrypter.decrypt("mvcX9yrQsM4iPgm1tDxN1A=="); assertThat(plainText).isEqualTo("user-password!"); }
@Override public boolean putIfAbsent(K key, V value) { return map.putIfAbsent(key, value) == null; }
@Test public void testPutIfAbsent() { map.put(42, "oldValue"); assertTrue(adapter.putIfAbsent(23, "newValue")); assertFalse(adapter.putIfAbsent(42, "newValue")); assertEquals("newValue", map.get(23)); assertEquals("oldValue", map.get(42)); }
@Override public ServerSocketEndpointConfig setName(String name) { super.setName(name); return this; }
@Test public void testEndpointConfig_defaultConstructor() { endpointConfig = new ServerSocketEndpointConfig(); endpointConfig.setName(endpointName); assertEquals(endpointName, endpointConfig.getName()); // assertNull(endpointConfig.getMemberAddressProviderConfig()); assertNull(endpointConfig.getProtocolType()); }
@Override public KvMetadata resolveMetadata( boolean isKey, List<MappingField> resolvedFields, Map<String, String> options, InternalSerializationService serializationService ) { Map<QueryPath, MappingField> fieldsByPath = extractFields(resolvedFields, isKey); List<TableField> fields = new ArrayList<>(); for (Entry<QueryPath, MappingField> entry : fieldsByPath.entrySet()) { QueryPath path = entry.getKey(); QueryDataType type = entry.getValue().type(); String name = entry.getValue().name(); fields.add(new MapTableField(name, type, false, path)); } maybeAddDefaultField(isKey, resolvedFields, fields, QueryDataType.OBJECT); Schema schema = getSchema(fieldsByPath, options, isKey); if (schema == null) { String recordName = options.getOrDefault( isKey ? OPTION_KEY_AVRO_RECORD_NAME : OPTION_VALUE_AVRO_RECORD_NAME, "jet.sql"); schema = resolveSchema(recordName, getFields(fieldsByPath)); } return new KvMetadata( fields, AvroQueryTargetDescriptor.INSTANCE, new AvroUpsertTargetDescriptor(schema) ); }
@Test public void test_resolveMetadata() { KvMetadata metadata = INSTANCE.resolveMetadata( isKey, List.of( field("string", QueryDataType.VARCHAR), field("boolean", QueryDataType.BOOLEAN), field("byte", QueryDataType.TINYINT), field("short", QueryDataType.SMALLINT), field("int", QueryDataType.INT), field("long", QueryDataType.BIGINT), field("float", QueryDataType.REAL), field("double", QueryDataType.DOUBLE), field("decimal", QueryDataType.DECIMAL), field("time", QueryDataType.TIME), field("date", QueryDataType.DATE), field("timestamp", QueryDataType.TIMESTAMP), field("timestampTz", QueryDataType.TIMESTAMP_WITH_TZ_OFFSET_DATE_TIME), field("object", QueryDataType.OBJECT) ), emptyMap(), null ); assertThat(metadata.getFields()).containsExactly( new MapTableField("string", QueryDataType.VARCHAR, false, QueryPath.create(prefix + ".string")), new MapTableField("boolean", QueryDataType.BOOLEAN, false, QueryPath.create(prefix + ".boolean")), new MapTableField("byte", QueryDataType.TINYINT, false, QueryPath.create(prefix + ".byte")), new MapTableField("short", QueryDataType.SMALLINT, false, QueryPath.create(prefix + ".short")), new MapTableField("int", QueryDataType.INT, false, QueryPath.create(prefix + ".int")), new MapTableField("long", QueryDataType.BIGINT, false, QueryPath.create(prefix + ".long")), new MapTableField("float", QueryDataType.REAL, false, QueryPath.create(prefix + ".float")), new MapTableField("double", QueryDataType.DOUBLE, false, QueryPath.create(prefix + ".double")), new MapTableField("decimal", QueryDataType.DECIMAL, false, QueryPath.create(prefix + ".decimal")), new MapTableField("time", QueryDataType.TIME, false, QueryPath.create(prefix + ".time")), new MapTableField("date", QueryDataType.DATE, false, QueryPath.create(prefix + ".date")), new MapTableField("timestamp", QueryDataType.TIMESTAMP, false, QueryPath.create(prefix + ".timestamp")), new MapTableField("timestampTz", QueryDataType.TIMESTAMP_WITH_TZ_OFFSET_DATE_TIME, false, QueryPath.create(prefix + ".timestampTz")), new MapTableField("object", QueryDataType.OBJECT, false, QueryPath.create(prefix + ".object")), new MapTableField(prefix, QueryDataType.OBJECT, true, QueryPath.create(prefix)) ); assertThat(metadata.getQueryTargetDescriptor()).isEqualTo(AvroQueryTargetDescriptor.INSTANCE); assertThat(metadata.getUpsertTargetDescriptor()).isEqualToComparingFieldByField( new AvroUpsertTargetDescriptor(SchemaBuilder.record("jet.sql").fields() .optionalString("string") .optionalBoolean("boolean") .optionalInt("byte") .optionalInt("short") .optionalInt("int") .optionalLong("long") .optionalFloat("float") .optionalDouble("double") .optionalString("decimal") .optionalString("time") .optionalString("date") .optionalString("timestamp") .optionalString("timestampTz") .name("object").type(OBJECT_SCHEMA).withDefault(null) .endRecord())); }
@Override public Future<Void> notifyCheckpointAbortAsync( long checkpointId, long latestCompletedCheckpointId) { return notifyCheckpointOperation( () -> { if (latestCompletedCheckpointId > 0) { notifyCheckpointComplete(latestCompletedCheckpointId); } if (isCurrentSyncSavepoint(checkpointId)) { throw new FlinkRuntimeException("Stop-with-savepoint failed."); } subtaskCheckpointCoordinator.notifyCheckpointAborted( checkpointId, operatorChain, this::isRunning); }, String.format("checkpoint %d aborted", checkpointId)); }
@Test void testSavepointTerminateAbortedAsync() { assertThatThrownBy( () -> testSyncSavepointWithEndInput( (streamTask, abortCheckpointId) -> streamTask.notifyCheckpointAbortAsync( abortCheckpointId, 0), SavepointType.terminate(SavepointFormatType.CANONICAL), true)) .isInstanceOf(FlinkRuntimeException.class) .hasMessage("Stop-with-savepoint failed."); }
@Override public boolean removeProperties(Namespace namespace, Set<String> properties) throws NoSuchNamespaceException { if (!namespaceExists(namespace)) { throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace); } Preconditions.checkNotNull(properties, "Invalid properties to remove: null"); if (properties.isEmpty()) { return false; } return deleteProperties(namespace, properties); }
@Test public void testRemoveProperties() { Namespace testNamespace = Namespace.of("testDb", "ns1", "ns2"); Map<String, String> testMetadata = ImmutableMap.of( "key_1", "value_1", "key_2", "value_2", "key_3", "value_3", "key_4", "value_4"); catalog.createNamespace(testNamespace, testMetadata); Set<String> propertiesToRemove = ImmutableSet.of("key_2", "key_4"); catalog.removeProperties(testNamespace, propertiesToRemove); Map<String, String> remainderProperties = catalog.loadNamespaceMetadata(testNamespace); assertThat(remainderProperties) .hasSize(3) .containsKey("key_1") .containsKey("key_3") .containsKey("location"); // Remove remaining properties to test if it deletes the namespace Set<String> allProperties = ImmutableSet.of("key_1", "key_3"); catalog.removeProperties(testNamespace, allProperties); assertThat(catalog.namespaceExists(testNamespace)).isTrue(); }
static void validateConnectors(KafkaMirrorMaker2 kafkaMirrorMaker2) { if (kafkaMirrorMaker2.getSpec() == null) { throw new InvalidResourceException(".spec section is required for KafkaMirrorMaker2 resource"); } else { if (kafkaMirrorMaker2.getSpec().getClusters() == null || kafkaMirrorMaker2.getSpec().getMirrors() == null) { throw new InvalidResourceException(".spec.clusters and .spec.mirrors sections are required in KafkaMirrorMaker2 resource"); } else { Set<String> existingClusterAliases = kafkaMirrorMaker2.getSpec().getClusters().stream().map(KafkaMirrorMaker2ClusterSpec::getAlias).collect(Collectors.toSet()); Set<String> errorMessages = new HashSet<>(); String connectCluster = kafkaMirrorMaker2.getSpec().getConnectCluster(); for (KafkaMirrorMaker2MirrorSpec mirror : kafkaMirrorMaker2.getSpec().getMirrors()) { if (mirror.getSourceCluster() == null) { errorMessages.add("Each MirrorMaker 2 mirror definition has to specify the source cluster alias"); } else if (!existingClusterAliases.contains(mirror.getSourceCluster())) { errorMessages.add("Source cluster alias " + mirror.getSourceCluster() + " is used in a mirror definition, but cluster with this alias does not exist in cluster definitions"); } if (mirror.getTargetCluster() == null) { errorMessages.add("Each MirrorMaker 2 mirror definition has to specify the target cluster alias"); } else if (!existingClusterAliases.contains(mirror.getTargetCluster())) { errorMessages.add("Target cluster alias " + mirror.getTargetCluster() + " is used in a mirror definition, but cluster with this alias does not exist in cluster definitions"); } if (!mirror.getTargetCluster().equals(connectCluster)) { errorMessages.add("Connect cluster alias (currently set to " + connectCluster + ") has to be the same as the target cluster alias " + mirror.getTargetCluster()); } } if (!errorMessages.isEmpty()) { throw new InvalidResourceException("KafkaMirrorMaker2 resource validation failed: " + errorMessages); } } } }
@Test public void testValidation() { assertDoesNotThrow(() -> KafkaMirrorMaker2Connectors.validateConnectors(KMM2)); }
@Override @CacheEvict(cacheNames = RedisKeyConstants.OAUTH_CLIENT, allEntries = true) // allEntries 清空所有缓存,因为 id 不是直接的缓存 key,不好清理 public void deleteOAuth2Client(Long id) { // 校验存在 validateOAuth2ClientExists(id); // 删除 oauth2ClientMapper.deleteById(id); }
@Test public void testDeleteOAuth2Client_success() { // mock 数据 OAuth2ClientDO dbOAuth2Client = randomPojo(OAuth2ClientDO.class); oauth2ClientMapper.insert(dbOAuth2Client);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbOAuth2Client.getId(); // 调用 oauth2ClientService.deleteOAuth2Client(id); // 校验数据不存在了 assertNull(oauth2ClientMapper.selectById(id)); }
public QProfileChangeDto toDto(@Nullable String userUuid) { QProfileChangeDto dto = new QProfileChangeDto(); dto.setChangeType(type.name()); dto.setRulesProfileUuid(getKey().getRuleProfileUuid()); dto.setUserUuid(userUuid); Map<String, String> data = new HashMap<>(); data.put("ruleUuid", getRuleUuid()); parameters.entrySet().stream() .filter(param -> !param.getKey().isEmpty()) .forEach(param -> data.put("param_" + param.getKey(), param.getValue())); if (StringUtils.isNotEmpty(severity)) { data.put("severity", severity); } dto.setData(data); return dto; }
@Test public void toDto() { QProfileDto profile = newQualityProfileDto(); ActiveRuleKey key = ActiveRuleKey.of(profile, RuleKey.of("P1", "R1")); String ruleUuid = Uuids.createFast(); ActiveRuleChange underTest = new ActiveRuleChange(ACTIVATED, key, new RuleDto().setUuid(ruleUuid)); QProfileChangeDto result = underTest.toDto(A_USER_UUID); assertThat(result.getChangeType()).isEqualTo(ACTIVATED.name()); assertThat(result.getRulesProfileUuid()).isEqualTo(profile.getRulesProfileUuid()); assertThat(result.getUserUuid()).isEqualTo(A_USER_UUID); assertThat(result.getDataAsMap()).containsEntry("ruleUuid", ruleUuid); }
public String stringify(boolean value) { throw new UnsupportedOperationException( "stringify(boolean) was called on a non-boolean stringifier: " + toString()); }
@Test public void testTimeStringifier() { for (PrimitiveStringifier stringifier : asList(TIME_STRINGIFIER, TIME_UTC_STRINGIFIER)) { String timezoneAmendment = (stringifier == TIME_STRINGIFIER ? "" : "+0000"); assertEquals(withZoneString("00:00:00.000", timezoneAmendment), stringifier.stringify(0)); assertEquals(withZoneString("00:00:00.000000", timezoneAmendment), stringifier.stringify(0l)); assertEquals(withZoneString("12:34:56.789", timezoneAmendment), stringifier.stringify((int) convert(MILLISECONDS, 12, 34, 56, 789))); assertEquals( withZoneString("12:34:56.789012", timezoneAmendment), stringifier.stringify(convert(MICROSECONDS, 12, 34, 56, 789012))); assertEquals(withZoneString("-12:34:56.789", timezoneAmendment), stringifier.stringify((int) convert(MILLISECONDS, -12, -34, -56, -789))); assertEquals( withZoneString("-12:34:56.789012", timezoneAmendment), stringifier.stringify(convert(MICROSECONDS, -12, -34, -56, -789012))); assertEquals(withZoneString("123:12:34.567", timezoneAmendment), stringifier.stringify((int) convert(MILLISECONDS, 123, 12, 34, 567))); assertEquals( withZoneString("12345:12:34.056789", timezoneAmendment), stringifier.stringify(convert(MICROSECONDS, 12345, 12, 34, 56789))); assertEquals(withZoneString("-123:12:34.567", timezoneAmendment), stringifier.stringify((int) convert(MILLISECONDS, -123, -12, -34, -567))); assertEquals( withZoneString("-12345:12:34.056789", timezoneAmendment), stringifier.stringify(convert(MICROSECONDS, -12345, -12, -34, -56789))); checkThrowingUnsupportedException(stringifier, Integer.TYPE, Long.TYPE); } }
public synchronized boolean doChannelCloseEvent(final String remoteAddr, final Channel channel) { boolean removed = false; if (channel != null) { for (final Map.Entry<String, ConcurrentHashMap<Channel, ClientChannelInfo>> entry : this.groupChannelTable .entrySet()) { final String group = entry.getKey(); final ConcurrentHashMap<Channel, ClientChannelInfo> clientChannelInfoTable = entry.getValue(); final ClientChannelInfo clientChannelInfo = clientChannelInfoTable.remove(channel); if (clientChannelInfo != null) { clientChannelTable.remove(clientChannelInfo.getClientId()); removed = true; log.info( "NETTY EVENT: remove channel[{}][{}] from ProducerManager groupChannelTable, producer group: {}", clientChannelInfo.toString(), remoteAddr, group); callProducerChangeListener(ProducerGroupEvent.CLIENT_UNREGISTER, group, clientChannelInfo); if (clientChannelInfoTable.isEmpty()) { ConcurrentHashMap<Channel, ClientChannelInfo> oldGroupTable = this.groupChannelTable.remove(group); if (oldGroupTable != null) { log.info("unregister a producer group[{}] from groupChannelTable", group); callProducerChangeListener(ProducerGroupEvent.GROUP_UNREGISTER, group, null); } } } } } return removed; }
@Test public void doChannelCloseEvent() throws Exception { producerManager.registerProducer(group, clientInfo); AtomicReference<String> groupRef = new AtomicReference<>(); AtomicReference<ClientChannelInfo> clientChannelInfoRef = new AtomicReference<>(); producerManager.appendProducerChangeListener((event, group, clientChannelInfo) -> { switch (event) { case GROUP_UNREGISTER: groupRef.set(group); break; case CLIENT_UNREGISTER: clientChannelInfoRef.set(clientChannelInfo); break; default: break; } }); assertThat(producerManager.getGroupChannelTable().get(group).get(channel)).isNotNull(); assertThat(producerManager.findChannel("clientId")).isNotNull(); producerManager.doChannelCloseEvent("127.0.0.1", channel); assertThat(producerManager.getGroupChannelTable().get(group)).isNull(); assertThat(groupRef.get()).isEqualTo(group); assertThat(clientChannelInfoRef.get()).isSameAs(clientInfo); assertThat(producerManager.findChannel("clientId")).isNull(); }
@Override public Set<TransferItem> find(final CommandLine input, final TerminalAction action, final Path remote) { if(input.getOptionValues(action.name()).length == 2) { switch(action) { case download: return new DownloadTransferItemFinder().find(input, action, remote); case upload: case synchronize: return new UploadTransferItemFinder().find(input, action, remote); } } else { switch(action) { case upload: case synchronize: return Collections.emptySet(); } } // Relative to current working directory using prefix finder. return Collections.singleton( new TransferItem(remote, LocalFactory.get(prefixer.normalize(remote.getName()))) ); }
@Test public void testDownloadFileToDirectoryTarget() throws Exception { final CommandLineParser parser = new PosixParser(); final String temp = System.getProperty("java.io.tmpdir"); final CommandLine input = parser.parse(TerminalOptionsBuilder.options(), new String[]{"--download", "ftps://test.cyberduck.ch/remote/f", temp}); final Set<TransferItem> found = new SingleTransferItemFinder().find(input, TerminalAction.download, new Path("/remote/f", EnumSet.of(Path.Type.file))); assertFalse(found.isEmpty()); final Iterator<TransferItem> iter = found.iterator(); assertEquals(new TransferItem(new Path("/remote/f", EnumSet.of(Path.Type.file)), LocalFactory.get(String.format("%s/f", temp))), iter.next()); }
public static void main(final String[] args) { SpringApplication.run(App.class, args); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
private List<MySQLPreparedStatementParameterType> getNewParameterTypes(final int paramCount) { List<MySQLPreparedStatementParameterType> result = new ArrayList<>(paramCount); for (int paramIndex = 0; paramIndex < paramCount; paramIndex++) { MySQLBinaryColumnType columnType = MySQLBinaryColumnType.valueOf(payload.readInt1()); int unsignedFlag = payload.readInt1(); result.add(new MySQLPreparedStatementParameterType(columnType, unsignedFlag)); } return result; }
@Test void assertNewWithoutParameter() { byte[] data = {0x01, 0x00, 0x00, 0x00, 0x09, 0x01, 0x00, 0x00, 0x00}; MySQLPacketPayload payload = new MySQLPacketPayload(Unpooled.wrappedBuffer(data), StandardCharsets.UTF_8); MySQLComStmtExecutePacket actual = new MySQLComStmtExecutePacket(payload, 0); assertThat(actual.getStatementId(), is(1)); assertNull(actual.getNewParametersBoundFlag()); assertTrue(actual.getNewParameterTypes().isEmpty()); }
@Override public void warn(String msg) { logger.warn(msg); logWarnToJobDashboard(msg); }
@Test void testWarnLoggingWithoutJob() { jobRunrDashboardLogger.warn("simple message"); verify(slfLogger).warn("simple message"); }
public String addFilterForChannel( final String id, final int priority, final Predicate predicate, final String endpoint, final String channel, final boolean update) { return addFilterForChannel(createFilter(id, priority, predicate, endpoint, new PrioritizedFilterStatistics(id)), channel, update); }
@Test void testUpdateFilterDoesNotExist() { String result = filterService.addFilterForChannel(prioritizedFilter, DYNAMIC_ROUTER_CHANNEL, true); assertEquals("Error: Filter could not be updated -- existing filter found with matching ID: false", result); }
@Override public MaterializedTable nonWindowed() { return new KsqlMaterializedTable(inner.nonWindowed()); }
@Test public void shouldFilterNonWindowed_fullScan() { // Given: final MaterializedTable table = materialization.nonWindowed(); givenNoopProject(); when(filter.apply(any(), any(), any())).thenReturn(Optional.empty()); // When: final Iterator<Row> result = table.get(partition); // Then: assertThat(result.hasNext(), is(false)); }
@Operation(summary = "Get single service") @GetMapping(value = "name/{name}", produces = "application/json") @ResponseBody public Service getByName(@PathVariable("name") String name) { return serviceService.getServiceByName(name); }
@Test public void serviceNameNotFound() { when(serviceServiceMock.getServiceByName(anyString())).thenThrow(NotFoundException.class); assertThrows(NotFoundException.class, () -> { controller.getByName("test"); }); }
@SuppressWarnings("unchecked") protected final <E> boolean emitFromTraverser(@Nonnull int[] ordinals, @Nonnull Traverser<E> traverser) { E item; if (pendingItem != null) { item = (E) pendingItem; pendingItem = null; } else { item = traverser.next(); } for (; item != null; item = traverser.next()) { if (!tryEmit(ordinals, item)) { pendingItem = item; return false; } } return true; }
@Test public void when_emitFromTraverserTo1_then_emittedTo1() { // Given Traverser<Object> trav = Traversers.traverseItems(MOCK_ITEM, MOCK_ITEM); boolean done; do { // When done = p.emitFromTraverser(ORDINAL_1, trav); // Then validateReceptionAtOrdinals(MOCK_ITEM, ORDINAL_1); } while (!done); }
public static StreamExecutionEnvironment getExecutionEnvironment() { return getExecutionEnvironment(new Configuration()); }
@Test void testBufferTimeoutByDefault() { Configuration config = new Configuration(); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); testBufferTimeout(config, env); }
@SqlNullable @Description("Return the closest points on the two geometries") @ScalarFunction("geometry_nearest_points") @SqlType("array(" + GEOMETRY_TYPE_NAME + ")") public static Block geometryNearestPoints(@SqlType(GEOMETRY_TYPE_NAME) Slice left, @SqlType(GEOMETRY_TYPE_NAME) Slice right) { Geometry leftGeometry = deserialize(left); Geometry rightGeometry = deserialize(right); if (leftGeometry.isEmpty() || rightGeometry.isEmpty()) { return null; } try { Coordinate[] nearestCoordinates = DistanceOp.nearestPoints(leftGeometry, rightGeometry); BlockBuilder blockBuilder = GEOMETRY.createBlockBuilder(null, 2); GEOMETRY.writeSlice(blockBuilder, serialize(createJtsPoint(nearestCoordinates[0]))); GEOMETRY.writeSlice(blockBuilder, serialize(createJtsPoint(nearestCoordinates[1]))); return blockBuilder.build(); } catch (TopologyException e) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, e.getMessage(), e); } }
@Test public void testGeometryNearestPoints() { assertNearestPoints("POINT (50 100)", "POINT (150 150)", "POINT (50 100)", "POINT (150 150)"); assertNearestPoints("MULTIPOINT (50 100, 50 200)", "POINT (50 100)", "POINT (50 100)", "POINT (50 100)"); assertNearestPoints("LINESTRING (50 100, 50 200)", "LINESTRING (10 10, 20 20)", "POINT (50 100)", "POINT (20 20)"); assertNearestPoints("MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))", "LINESTRING (10 20, 20 50)", "POINT (4 4)", "POINT (10 20)"); assertNearestPoints("POLYGON ((1 1, 1 3, 3 3, 3 1, 1 1))", "POLYGON ((4 4, 4 5, 5 5, 5 4, 4 4))", "POINT (3 3)", "POINT (4 4)"); assertNearestPoints("MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((0 0, 0 2, 2 2, 2 0, 0 0)))", "POLYGON ((10 100, 30 10, 30 100, 10 100))", "POINT (3 3)", "POINT (30 10)"); assertNearestPoints("GEOMETRYCOLLECTION (POINT (0 0), LINESTRING (0 20, 20 0))", "POLYGON ((5 5, 5 6, 6 6, 6 5, 5 5))", "POINT (10 10)", "POINT (6 6)"); assertNoNearestPoints("POINT EMPTY", "POINT (150 150)"); assertNoNearestPoints("POINT (50 100)", "POINT EMPTY"); assertNoNearestPoints("POINT EMPTY", "POINT EMPTY"); assertNoNearestPoints("MULTIPOINT EMPTY", "POINT (50 100)"); assertNoNearestPoints("LINESTRING (50 100, 50 200)", "LINESTRING EMPTY"); assertNoNearestPoints("MULTILINESTRING EMPTY", "LINESTRING (10 20, 20 50)"); assertNoNearestPoints("POLYGON ((1 1, 1 3, 3 3, 3 1, 1 1))", "POLYGON EMPTY"); assertNoNearestPoints("MULTIPOLYGON EMPTY", "POLYGON ((10 100, 30 10, 30 100, 10 100))"); }
@Override public int diff(String... names) { return get(diffAsync(names)); }
@Test public void testDiff() { RSet<Integer> set = redisson.getSet("set"); set.add(5); set.add(6); RSet<Integer> set1 = redisson.getSet("set1"); set1.add(1); set1.add(2); set1.add(3); RSet<Integer> set2 = redisson.getSet("set2"); set2.add(3); set2.add(4); set2.add(5); assertThat(set.diff("set1", "set2")).isEqualTo(2); assertThat(set).containsOnly(1, 2); }
@Nullable public byte[] getValue() { return mValue; }
@Test public void setValue_SINT24_BE() { final MutableData data = new MutableData(new byte[3]); data.setValue(0xfefdfd, Data.FORMAT_UINT24_BE, 0); assertArrayEquals(new byte[] { (byte) 0xFE, (byte) 0xFD, (byte) 0xFD } , data.getValue()); }
public void configure(ReadableConfig configuration, ClassLoader classLoader) { configuration.getOptional(PipelineOptions.GENERIC_TYPES).ifPresent(this::setGenericTypes); configuration.getOptional(PipelineOptions.FORCE_KRYO).ifPresent(this::setForceKryo); configuration.getOptional(PipelineOptions.FORCE_AVRO).ifPresent(this::setForceAvro); configuration .getOptional(PipelineOptions.FORCE_KRYO_AVRO) .ifPresent(this::setForceKryoAvro); configuration .getOptional(PipelineOptions.KRYO_DEFAULT_SERIALIZERS) .map(s -> parseKryoSerializersWithExceptionHandling(classLoader, s)) .ifPresent(s -> this.defaultKryoSerializerClasses = s); configuration .getOptional(PipelineOptions.POJO_REGISTERED_CLASSES) .map(c -> loadClasses(c, classLoader, "Could not load pojo type to be registered.")) .ifPresent(c -> this.registeredPojoTypes = c); configuration .getOptional(PipelineOptions.KRYO_REGISTERED_CLASSES) .map(c -> loadClasses(c, classLoader, "Could not load kryo type to be registered.")) .ifPresent(c -> this.registeredKryoTypes = c); try { configuration .getOptional(PipelineOptions.SERIALIZATION_CONFIG) .ifPresent(c -> parseSerializationConfigWithExceptionHandling(classLoader, c)); } catch (Exception e) { if (!GlobalConfiguration.isStandardYaml()) { throw new UnsupportedOperationException( String.format( "%s is only supported with the standard YAML config parser, please use \"config.yaml\" as the config file.", PipelineOptions.SERIALIZATION_CONFIG.key())); } throw e; } }
@Test void testReadingDefaultConfig() { SerializerConfig config = new SerializerConfigImpl(); Configuration configuration = new Configuration(); // mutate config according to configuration config.configure(configuration, SerializerConfigImplTest.class.getClassLoader()); assertThat(config).isEqualTo(new SerializerConfigImpl()); }
public static Pair<List<RowData>, String[]> transposeColumnStatsIndex(List<RowData> colStats, String[] queryColumns, RowType tableSchema) { Map<String, LogicalType> tableFieldTypeMap = tableSchema.getFields().stream() .collect(Collectors.toMap(RowType.RowField::getName, RowType.RowField::getType)); // NOTE: We have to collect list of indexed columns to make sure we properly align the rows // w/in the transposed dataset: since some files might not have all the columns indexed // either due to the Column Stats Index config changes, schema evolution, etc. we have // to make sure that all the rows w/in transposed data-frame are properly padded (with null // values) for such file-column combinations Set<String> indexedColumns = colStats.stream().map(row -> row.getString(ORD_COL_NAME) .toString()).collect(Collectors.toSet()); // NOTE: We're sorting the columns to make sure final index schema matches layout // of the transposed table TreeSet<String> sortedTargetColumns = Arrays.stream(queryColumns).sorted() .filter(indexedColumns::contains) .collect(Collectors.toCollection(TreeSet::new)); final Map<LogicalType, AvroToRowDataConverters.AvroToRowDataConverter> converters = new ConcurrentHashMap<>(); Map<StringData, List<RowData>> fileNameToRows = colStats.stream().parallel() .filter(row -> sortedTargetColumns.contains(row.getString(ORD_COL_NAME).toString())) .map(row -> { if (row.isNullAt(ORD_MIN_VAL) && row.isNullAt(ORD_MAX_VAL)) { // Corresponding row could be null in either of the 2 cases // - Column contains only null values (in that case both min/max have to be nulls) // - This is a stubbed Column Stats record (used as a tombstone) return row; } else { String colName = row.getString(ORD_COL_NAME).toString(); LogicalType colType = tableFieldTypeMap.get(colName); return unpackMinMaxVal(row, colType, converters); } }).collect(Collectors.groupingBy(rowData -> rowData.getString(ORD_FILE_NAME))); return Pair.of(foldRowsByFiles(sortedTargetColumns, fileNameToRows), sortedTargetColumns.toArray(new String[0])); }
@Test void testTransposeColumnStatsIndex() throws Exception { final String path = tempFile.getAbsolutePath(); Configuration conf = TestConfigurations.getDefaultConf(path); conf.setBoolean(FlinkOptions.METADATA_ENABLED, true); conf.setBoolean(FlinkOptions.READ_DATA_SKIPPING_ENABLED, true); conf.setString("hoodie.metadata.index.column.stats.enable", "true"); HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder() .enable(true) .withMetadataIndexColumnStats(true) .build(); TestData.writeData(TestData.DATA_SET_INSERT, conf); // explicit query columns String[] queryColumns1 = {"uuid", "age"}; List<RowData> indexRows1 = ColumnStatsIndices.readColumnStatsIndex(path, metadataConfig, queryColumns1); Pair<List<RowData>, String[]> transposedIndexTable1 = ColumnStatsIndices .transposeColumnStatsIndex(indexRows1, queryColumns1, TestConfigurations.ROW_TYPE); assertThat("The schema columns should sort by natural order", Arrays.toString(transposedIndexTable1.getRight()), is("[age, uuid]")); List<RowData> transposed1 = filterOutFileNames(transposedIndexTable1.getLeft()); assertThat(transposed1.size(), is(4)); final String expected = "[" + "+I(2,18,20,0,id5,id6,0), " + "+I(2,23,33,0,id1,id2,0), " + "+I(2,31,53,0,id3,id4,0), " + "+I(2,44,56,0,id7,id8,0)]"; assertThat(transposed1.toString(), is(expected)); // no query columns, only for tests assertThrows(IllegalArgumentException.class, () -> ColumnStatsIndices.readColumnStatsIndex(path, metadataConfig, new String[0])); }
public static <T> T[] getCombinationAnnotations(AnnotatedElement annotationEle, Class<T> annotationType) { return getAnnotations(annotationEle, true, annotationType); }
@Test public void getCombinationAnnotationsWithClassTest(){ final AnnotationForTest[] annotations = AnnotationUtil.getCombinationAnnotations(ClassWithAnnotation.class, AnnotationForTest.class); assertNotNull(annotations); assertEquals(1, annotations.length); assertTrue(annotations[0].value().equals("测试") || annotations[0].value().equals("repeat-annotation")); }
public StreamsResponse getKinesisStreamNames(AWSRequest request) throws ExecutionException { LOG.debug("List Kinesis streams for region [{}]", request.region()); final KinesisClient kinesisClient = awsClientBuilderUtil.buildClient(kinesisClientBuilder, request); ListStreamsRequest streamsRequest = ListStreamsRequest.builder().limit(KINESIS_LIST_STREAMS_LIMIT).build(); final ListStreamsResponse listStreamsResponse = kinesisClient.listStreams(streamsRequest); final List<String> streamNames = new ArrayList<>(listStreamsResponse.streamNames()); final Retryer<Boolean> retryer = RetryerBuilder.<Boolean>newBuilder() .retryIfResult(b -> Objects.equals(b, Boolean.TRUE)) .retryIfExceptionOfType(LimitExceededException.class) .withStopStrategy(StopStrategies.stopAfterAttempt(KINESIS_LIST_STREAMS_MAX_ATTEMPTS)) .build(); if (listStreamsResponse.hasMoreStreams()) { try { retryer.call(() -> { LOG.debug("Requesting streams..."); final String lastStreamName = streamNames.get(streamNames.size() - 1); final ListStreamsRequest moreStreamsRequest = ListStreamsRequest.builder() .exclusiveStartStreamName(lastStreamName) .limit(KINESIS_LIST_STREAMS_LIMIT).build(); final ListStreamsResponse moreSteamsResponse = kinesisClient.listStreams(moreStreamsRequest); streamNames.addAll(moreSteamsResponse.streamNames()); // If more streams, then this will execute again. return moreSteamsResponse.hasMoreStreams(); }); } catch (RetryException e) { LOG.error("Failed to get all stream names after {} attempts. Proceeding to return currently obtained streams.", KINESIS_LIST_STREAMS_MAX_ATTEMPTS); } } LOG.debug("Kinesis streams queried: [{}]", streamNames); if (streamNames.isEmpty()) { throw new BadRequestException(String.format(Locale.ROOT, "No Kinesis streams were found in the [%s] region.", request.region())); } return StreamsResponse.create(streamNames, streamNames.size()); }
@Test public void testGetStreams() throws ExecutionException { when(awsClientBuilderUtil.buildClient(any(KinesisClientBuilder.class), any())).thenReturn(kinesisClient); // Test with two streams and one page. This is the most common case for most AWS accounts. when(kinesisClient.listStreams(isA(ListStreamsRequest.class))) .thenReturn(ListStreamsResponse.builder() .streamNames(TWO_TEST_STREAMS) .hasMoreStreams(false).build()); StreamsResponse streamsResponse = kinesisService.getKinesisStreamNames(AWSRequestImpl.builder() .region(TEST_REGION) .awsAccessKeyId("a-key") .awsSecretAccessKey(encryptedValue).build()); assertEquals(2, streamsResponse.total()); assertEquals(2, streamsResponse.streams().size()); // Test with stream paging functionality. This will be the case when a large number of Kinesis streams // are present on a particular AWS account. when(kinesisClient.listStreams(isA(ListStreamsRequest.class))) // First return a response with two streams indicating that there are more. .thenReturn(ListStreamsResponse.builder() .streamNames(TWO_TEST_STREAMS) .hasMoreStreams(true).build()) // Then return a response with two streams and indicate that all have been retrieved. .thenReturn(ListStreamsResponse.builder() .streamNames(TWO_TEST_STREAMS) .hasMoreStreams(false).build()); // Indicate no more streams. streamsResponse = kinesisService.getKinesisStreamNames(AWSRequestImpl.builder() .region(TEST_REGION) .awsAccessKeyId("a-key") .awsSecretAccessKey(encryptedValue).build()); // There should be 4 total streams (two from each page). assertEquals(4, streamsResponse.total()); assertEquals(4, streamsResponse.streams().size()); }
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) { // tagging like maxweight:conditional=no/none @ destination/delivery/forestry/service String condValue = way.getTag("maxweight:conditional", ""); if (!condValue.isEmpty()) { String[] values = condValue.split("@"); if (values.length == 2) { String key = values[0].trim(); String value = values[1].trim(); if ("no".equals(key) || "none".equals(key)) { if (value.startsWith("(") && value.endsWith(")")) value = value.substring(1, value.length() - 1); mweEnc.setEnum(false, edgeId, edgeIntAccess, MaxWeightExcept.find(value)); return; } } } // For tagging like vehicle:conditional=destination @ (weight>3.5) AND maxweight=3.5 // For vehicle:conditional=no @ (weight>3.5) => NONE is used, which is consistent with max_weight being set to 3.5 in this case for (String restriction : HGV_RESTRICTIONS) { String value = way.getTag(restriction, ""); int atIndex = value.indexOf("@"); if (atIndex > 0) { double dec = OSMValueExtractor.conditionalWeightToTons(value); // set it only if the weight value is the same as in max_weight if (!Double.isNaN(dec) && (stringToTons(way.getTag("maxweight", "")) == dec || stringToTons(way.getTag("maxweightrating:hgv", "")) == dec || stringToTons(way.getTag("maxgcweight", "")) == dec)) { mweEnc.setEnum(false, edgeId, edgeIntAccess, MaxWeightExcept.find(value.substring(0, atIndex).trim())); break; } } } }
@Test public void testSimpleTags() { EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1); ReaderWay readerWay = new ReaderWay(1); readerWay.setTag("highway", "primary"); readerWay.setTag("maxweight:conditional", "none @delivery"); parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(MaxWeightExcept.DELIVERY, mwEnc.getEnum(false, edgeId, edgeIntAccess)); edgeIntAccess = new ArrayEdgeIntAccess(1); readerWay.setTag("maxweight:conditional", "no@ (destination)"); parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(MaxWeightExcept.DESTINATION, mwEnc.getEnum(false, edgeId, edgeIntAccess)); }
public static TaskExecutorMemoryConfiguration create(Configuration config) { return new TaskExecutorMemoryConfiguration( getConfigurationValue(config, FRAMEWORK_HEAP_MEMORY), getConfigurationValue(config, TASK_HEAP_MEMORY), getConfigurationValue(config, FRAMEWORK_OFF_HEAP_MEMORY), getConfigurationValue(config, TASK_OFF_HEAP_MEMORY), getConfigurationValue(config, NETWORK_MEMORY_MAX), getConfigurationValue(config, MANAGED_MEMORY_SIZE), getConfigurationValue(config, JVM_METASPACE), getConfigurationValue(config, JVM_OVERHEAD_MAX), calculateTotalFlinkMemoryFromComponents(config), calculateTotalProcessMemoryFromComponents(config)); }
@Test void testInitialization() { Configuration config = new Configuration(); config.set(FRAMEWORK_HEAP_MEMORY, new MemorySize(1)); config.set(TASK_HEAP_MEMORY, new MemorySize(2)); config.set(FRAMEWORK_OFF_HEAP_MEMORY, new MemorySize(3)); config.set(TASK_OFF_HEAP_MEMORY, new MemorySize(4)); config.set(NETWORK_MEMORY_MIN, new MemorySize(6)); config.set(NETWORK_MEMORY_MAX, new MemorySize(6)); config.set(NETWORK_MEMORY_FRACTION, 0.1f); config.set(MANAGED_MEMORY_SIZE, new MemorySize(7)); config.set(MANAGED_MEMORY_FRACTION, 0.2f); config.set(JVM_METASPACE, new MemorySize(8)); config.set(JVM_OVERHEAD_MIN, new MemorySize(10)); config.set(JVM_OVERHEAD_MAX, new MemorySize(10)); config.set(JVM_OVERHEAD_FRACTION, 0.3f); TaskExecutorMemoryConfiguration actual = TaskExecutorMemoryConfiguration.create(config); TaskExecutorMemoryConfiguration expected = new TaskExecutorMemoryConfiguration(1L, 2L, 3L, 4L, 6L, 7L, 8L, 10L, 23L, 41L); assertThat(actual).isEqualTo(expected); }
@Override public void pre(SpanAdapter span, Exchange exchange, Endpoint endpoint) { super.pre(span, exchange, endpoint); String contentType = exchange.getIn().getHeader(CONTENT_TYPE, String.class); if (contentType != null) { span.setTag(SERVICEBUS_CONTENT_TYPE, contentType); } String correlationId = exchange.getIn().getHeader(CORRELATION_ID, String.class); if (correlationId != null) { span.setTag(SERVICEBUS_CORRELATION_ID, correlationId); } Long deliveryCount = exchange.getIn().getHeader(DELIVERY_COUNT, Long.class); if (deliveryCount != null) { span.setTag(SERVICEBUS_DELIVERY_COUNT, deliveryCount); } Long enqueuedSequenceNumber = exchange.getIn().getHeader(ENQUEUED_SEQUENCE_NUMBER, Long.class); if (enqueuedSequenceNumber != null) { span.setTag(SERVICEBUS_ENQUEUED_SEQUENCE_NUMBER, enqueuedSequenceNumber); } OffsetDateTime enqueuedTime = exchange.getIn().getHeader(ENQUEUED_TIME, OffsetDateTime.class); if (enqueuedTime != null) { span.setTag(SERVICEBUS_ENQUEUED_TIME, enqueuedTime.toString()); } OffsetDateTime expiresAt = exchange.getIn().getHeader(EXPIRES_AT, OffsetDateTime.class); if (expiresAt != null) { span.setTag(SERVICEBUS_EXPIRES_AT, expiresAt.toString()); } String partitionKey = exchange.getIn().getHeader(PARTITION_KEY, String.class); if (partitionKey != null) { span.setTag(SERVICEBUS_PARTITION_KEY, partitionKey); } String replyToSessionId = exchange.getIn().getHeader(REPLY_TO_SESSION_ID, String.class); if (replyToSessionId != null) { span.setTag(SERVICEBUS_REPLY_TO_SESSION_ID, replyToSessionId); } String sessionId = exchange.getIn().getHeader(SESSION_ID, String.class); if (sessionId != null) { span.setTag(SERVICEBUS_SESSION_ID, sessionId); } Duration timeToLive = exchange.getIn().getHeader(TIME_TO_LIVE, Duration.class); if (timeToLive != null) { span.setTag(SERVICEBUS_TIME_TO_LIVE, timeToLive.toString()); } }
@Test public void testPre() { String contentType = "application/json"; String correlationId = "1234"; Long deliveryCount = 27L; Long enqueuedSequenceNumber = 1L; OffsetDateTime enqueuedTime = OffsetDateTime.now(); OffsetDateTime expiresAt = OffsetDateTime.now(); String partitionKey = "MyPartitionKey"; String replyToSessionId = "MyReplyToSessionId"; String sessionId = "4321"; Duration ttl = Duration.ofDays(7); Endpoint endpoint = Mockito.mock(Endpoint.class); Exchange exchange = Mockito.mock(Exchange.class); Message message = Mockito.mock(Message.class); Mockito.when(endpoint.getEndpointUri()).thenReturn("azure-servicebus:topicOrQueueName"); Mockito.when(exchange.getIn()).thenReturn(message); Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.CONTENT_TYPE, String.class)).thenReturn(contentType); Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.CORRELATION_ID, String.class)).thenReturn(correlationId); Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.DELIVERY_COUNT, Long.class)).thenReturn(deliveryCount); Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.ENQUEUED_SEQUENCE_NUMBER, Long.class)) .thenReturn(enqueuedSequenceNumber); Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.ENQUEUED_TIME, OffsetDateTime.class)) .thenReturn(enqueuedTime); Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.EXPIRES_AT, OffsetDateTime.class)).thenReturn(expiresAt); Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.PARTITION_KEY, String.class)).thenReturn(partitionKey); Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.REPLY_TO_SESSION_ID, String.class)) .thenReturn(replyToSessionId); Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.SESSION_ID, String.class)).thenReturn(sessionId); Mockito.when(message.getHeader(AzureServiceBusSpanDecorator.TIME_TO_LIVE, Duration.class)).thenReturn(ttl); AbstractMessagingSpanDecorator decorator = new AzureServiceBusSpanDecorator(); MockSpanAdapter span = new MockSpanAdapter(); decorator.pre(span, exchange, endpoint); assertEquals(contentType, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_CONTENT_TYPE)); assertEquals(correlationId, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_CORRELATION_ID)); assertEquals(deliveryCount, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_DELIVERY_COUNT)); assertEquals(enqueuedSequenceNumber, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_ENQUEUED_SEQUENCE_NUMBER)); assertEquals(enqueuedTime.toString(), span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_ENQUEUED_TIME)); assertEquals(expiresAt.toString(), span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_EXPIRES_AT)); assertEquals(partitionKey, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_PARTITION_KEY)); assertEquals(replyToSessionId, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_REPLY_TO_SESSION_ID)); assertEquals(sessionId, span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_SESSION_ID)); assertEquals(ttl.toString(), span.tags().get(AzureServiceBusSpanDecorator.SERVICEBUS_TIME_TO_LIVE)); }
@Override public boolean matchToken(TokenQueue tokenQueue, List<Token> matchedTokenList) { do { Token token = tokenQueue.poll(); matchedTokenList.add(token); if (uptoMatchTokens.contains(token.getValue())) { return true; } } while (tokenQueue.peek() != null); return false; }
@Test public void shouldNotMatch() { Token t1 = new Token("a", 1, 1); Token t2 = new Token("b", 2, 1); TokenQueue tokenQueue = spy(new TokenQueue(Arrays.asList(t1, t2))); List<Token> output = mock(List.class); UptoTokenMatcher matcher = new UptoTokenMatcher(new String[] { ";" }); assertThat(matcher.matchToken(tokenQueue, output), is(false)); verify(tokenQueue, times(2)).poll(); verify(tokenQueue, times(2)).peek(); verifyNoMoreInteractions(tokenQueue); verify(output).add(t1); verify(output).add(t2); verifyNoMoreInteractions(output); }
ControllerResult<Map<String, ApiError>> updateFeatures( Map<String, Short> updates, Map<String, FeatureUpdate.UpgradeType> upgradeTypes, boolean validateOnly ) { TreeMap<String, ApiError> results = new TreeMap<>(); List<ApiMessageAndVersion> records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); for (Entry<String, Short> entry : updates.entrySet()) { results.put(entry.getKey(), updateFeature(entry.getKey(), entry.getValue(), upgradeTypes.getOrDefault(entry.getKey(), FeatureUpdate.UpgradeType.UPGRADE), records)); } if (validateOnly) { return ControllerResult.of(Collections.emptyList(), results); } else { return ControllerResult.atomicOf(records, results); } }
@Test public void testCanUseSafeDowngradeIfMetadataDidNotChange() { FeatureControlManager manager = new FeatureControlManager.Builder(). setQuorumFeatures(features(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_0_IV0.featureLevel(), MetadataVersion.IBP_3_3_IV1.featureLevel())). setMetadataVersion(MetadataVersion.IBP_3_1_IV0). setMinimumBootstrapVersion(MetadataVersion.IBP_3_0_IV0). build(); assertEquals(ControllerResult.of(Collections.emptyList(), singletonMap(MetadataVersion.FEATURE_NAME, ApiError.NONE)), manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_0_IV1.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), true)); }
@Override public int multiplication(int n1, int n2) { int n5 = n1 * n2; return n5; }
@Test public void testMultiplication() { Controlador controlador = new Controlador(); int result = controlador.multiplication(2, 3); assertEquals(6, result); }
public Map<String, String> findTableNames(final Collection<ColumnSegment> columns, final ShardingSphereSchema schema) { if (1 == simpleTables.size()) { return findTableNameFromSingleTable(columns); } Map<String, String> result = new CaseInsensitiveMap<>(); Map<String, Collection<String>> ownerColumnNames = getOwnerColumnNames(columns); result.putAll(findTableNameFromSQL(ownerColumnNames)); Collection<String> noOwnerColumnNames = getNoOwnerColumnNames(columns); result.putAll(findTableNameFromMetaData(noOwnerColumnNames, schema)); result.putAll(findTableNameFromSubquery(columns, result)); return result; }
@Test void assertFindTableNameWhenColumnSegmentOwnerAbsentAndSchemaMetaDataContainsColumnInUpperCase() { SimpleTableSegment tableSegment1 = createTableSegment("TABLE_1", "TBL_1"); SimpleTableSegment tableSegment2 = createTableSegment("TABLE_2", "TBL_2"); ShardingSphereTable table = new ShardingSphereTable("TABLE_1", Collections.singletonList(new ShardingSphereColumn("COL", 0, false, false, true, true, false, false)), Collections.emptyList(), Collections.emptyList()); ShardingSphereSchema schema = new ShardingSphereSchema(DefaultDatabase.LOGIC_NAME, Stream.of(table).collect(Collectors.toMap(ShardingSphereTable::getName, value -> value)), Collections.emptyMap()); ColumnSegment columnSegment = createColumnSegment(null, "COL"); Map<String, String> actual = new TablesContext(Arrays.asList(tableSegment1, tableSegment2), TypedSPILoader.getService(DatabaseType.class, "FIXTURE"), DefaultDatabase.LOGIC_NAME).findTableNames(Collections.singletonList(columnSegment), schema); assertFalse(actual.isEmpty()); assertThat(actual.get("col"), is("TABLE_1")); }
public Iterator<T> getBookmark() { LinkedSetIterator toRet = new LinkedSetIterator(); toRet.next = this.bookmark.next; this.bookmark = toRet; return toRet; }
@Test(timeout=60000) public void testBookmarkAdvancesOnRemoveOfSameElement() { LOG.info("Test that the bookmark advances if we remove its element."); assertTrue(set.add(list.get(0))); assertTrue(set.add(list.get(1))); assertTrue(set.add(list.get(2))); Iterator<Integer> it = set.getBookmark(); assertEquals(it.next(), list.get(0)); set.remove(list.get(1)); it = set.getBookmark(); assertEquals(it.next(), list.get(2)); }
public static String toHexStringWithPrefixZeroPadded(BigInteger value, int size) { return toHexStringZeroPadded(value, size, true); }
@Test public void testToHexStringWithPrefixZeroPadded() { assertEquals(Numeric.toHexStringWithPrefixZeroPadded(BigInteger.ZERO, 5), ("0x00000")); assertEquals( Numeric.toHexStringWithPrefixZeroPadded( new BigInteger("01c52b08330e05d731e38c856c1043288f7d9744", 16), 40), ("0x01c52b08330e05d731e38c856c1043288f7d9744")); assertEquals( Numeric.toHexStringWithPrefixZeroPadded( new BigInteger("01c52b08330e05d731e38c856c1043288f7d9744", 16), 40), ("0x01c52b08330e05d731e38c856c1043288f7d9744")); }
protected FileStatus[] listStatus(JobConf job) throws IOException { Path[] dirs = getInputPaths(job); if (dirs.length == 0) { throw new IOException("No input paths specified in job"); } // get tokens for all the required FileSystems.. TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs, job); // Whether we need to recursive look into the directory structure boolean recursive = job.getBoolean(INPUT_DIR_RECURSIVE, false); // creates a MultiPathFilter with the hiddenFileFilter and the // user provided one (if any). List<PathFilter> filters = new ArrayList<PathFilter>(); filters.add(hiddenFileFilter); PathFilter jobFilter = getInputPathFilter(job); if (jobFilter != null) { filters.add(jobFilter); } PathFilter inputFilter = new MultiPathFilter(filters); FileStatus[] result; int numThreads = job .getInt( org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS, org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS); StopWatch sw = new StopWatch().start(); if (numThreads == 1) { List<FileStatus> locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive); result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]); } else { Iterable<FileStatus> locatedFiles = null; try { LocatedFileStatusFetcher locatedFileStatusFetcher = new LocatedFileStatusFetcher( job, dirs, recursive, inputFilter, false); locatedFiles = locatedFileStatusFetcher.getFileStatuses(); } catch (InterruptedException e) { throw (IOException) new InterruptedIOException("Interrupted while getting file statuses") .initCause(e); } result = Iterables.toArray(locatedFiles, FileStatus.class); } sw.stop(); if (LOG.isDebugEnabled()) { LOG.debug("Time taken to get FileStatuses: " + sw.now(TimeUnit.MILLISECONDS)); } LOG.info("Total input files to process : " + result.length); return result; }
@Test public void testListStatusNestedRecursive() throws IOException { Configuration conf = new Configuration(); conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads); List<Path> expectedPaths = org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat .configureTestNestedRecursive(conf, localFs); JobConf jobConf = new JobConf(conf); TextInputFormat fif = new TextInputFormat(); fif.configure(jobConf); FileStatus[] statuses = fif.listStatus(jobConf); org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat .verifyFileStatuses(expectedPaths, Lists.newArrayList(statuses), localFs); }
@CanIgnoreReturnValue public JsonElement set(int index, JsonElement element) { return elements.set(index, element == null ? JsonNull.INSTANCE : element); }
@Test public void testSet() { JsonArray array = new JsonArray(); assertThrows(IndexOutOfBoundsException.class, () -> array.set(0, new JsonPrimitive(1))); JsonPrimitive a = new JsonPrimitive("a"); array.add(a); JsonPrimitive b = new JsonPrimitive("b"); JsonElement oldValue = array.set(0, b); assertThat(oldValue).isEqualTo(a); assertThat(array.get(0).getAsString()).isEqualTo("b"); oldValue = array.set(0, null); assertThat(oldValue).isEqualTo(b); assertThat(array.get(0)).isEqualTo(JsonNull.INSTANCE); oldValue = array.set(0, new JsonPrimitive("c")); assertThat(oldValue).isEqualTo(JsonNull.INSTANCE); assertThat(array.get(0).getAsString()).isEqualTo("c"); assertThat(array).hasSize(1); }
@Override public List<String> readFilesWithRetries(Sleeper sleeper, BackOff backOff) throws IOException, InterruptedException { IOException lastException = null; do { try { // Match inputPath which may contains glob Collection<Metadata> files = Iterables.getOnlyElement(FileSystems.match(Collections.singletonList(filePattern))) .metadata(); LOG.debug("Found {} file(s) by matching the path: {}", files.size(), filePattern); if (files.isEmpty() || !checkTotalNumOfFiles(files)) { continue; } // Read data from file paths return readLines(files); } catch (IOException e) { // Ignore and retry lastException = e; LOG.warn("Error in file reading. Ignore and retry."); } } while (BackOffUtils.next(sleeper, backOff)); // Failed after max retries throw new IOException( String.format("Unable to read file(s) after retrying %d times", MAX_READ_RETRIES), lastException); }
@Test public void testReadCustomTemplate() throws Exception { String contents1 = "To be or not to be, ", contents2 = "it is not a question."; // Customized template: resultSSS-totalNNN File tmpFile1 = tmpFolder.newFile("result0-total2"); File tmpFile2 = tmpFolder.newFile("result1-total2"); Files.asCharSink(tmpFile1, StandardCharsets.UTF_8).write(contents1); Files.asCharSink(tmpFile2, StandardCharsets.UTF_8).write(contents2); Pattern customizedTemplate = Pattern.compile("(?x) result (?<shardnum>\\d+) - total (?<numshards>\\d+)"); NumberedShardedFile shardedFile = new NumberedShardedFile(filePattern, customizedTemplate); assertThat(shardedFile.readFilesWithRetries(), containsInAnyOrder(contents1, contents2)); }
public List<String> getNamingAddrs() { String namingAddrsKey = String.join(FILE_CONFIG_SPLIT_CHAR, FILE_ROOT_REGISTRY, REGISTRY_TYPE, NAMING_SERVICE_URL_KEY); String urlListStr = FILE_CONFIG.getConfig(namingAddrsKey); if (urlListStr.isEmpty()) { throw new NamingRegistryException("Naming server url can not be null!"); } return Arrays.stream(urlListStr.split(",")).collect(Collectors.toList()); }
@Test public void getNamingAddrsTest() { NamingserverRegistryServiceImpl namingserverRegistryService = NamingserverRegistryServiceImpl.getInstance(); List<String> list = namingserverRegistryService.getNamingAddrs(); assertEquals(list.size(), 1); }
@Override public boolean start() throws IOException { LOG.info("Starting reader using {}", initCheckpoint); try { shardSubscribersPool = createPool(); shardSubscribersPool().start(initCheckpoint); return advance(); } catch (TransientKinesisException e) { throw new IOException(e); } }
@Test public void startReturnsTrueIfSomeDataAvailable() throws IOException { when(subscribersPool.getNextRecord()).thenReturn(a).thenReturn(null); assertThat(reader.start()).isTrue(); }
public Span nextSpan(Message message) { TraceContextOrSamplingFlags extracted = extractAndClearTraceIdProperties(processorExtractor, message, message); Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler. // When an upstream context was not present, lookup keys are unlikely added if (extracted.context() == null && !result.isNoop()) { // simplify code by re-using an existing MessagingRequest impl tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result); } return result; }
@Test void nextSpan_uses_current_context() { Span child; try (Scope scope = tracing.currentTraceContext().newScope(parent)) { child = jmsTracing.nextSpan(message); } assertChildOf(child.context(), parent); }
static Set<String> getExpiredTopologyIds(Set<String> toposToClean, Map<String, Object> conf) { Set<String> idleTopologies = new HashSet<>(); long topologyDeletionDelay = ObjectReader.getInt( conf.get(DaemonConfig.NIMBUS_TOPOLOGY_BLOBSTORE_DELETION_DELAY_MS), 5 * 60 * 1000); for (String topologyId : toposToClean) { if (Math.max(0, Time.currentTimeMillis() - getTopologyCleanupDetectedTime(topologyId)) >= topologyDeletionDelay) { idleTopologies.add(topologyId); } } rotateTopologyCleanupMap(topologyDeletionDelay); return idleTopologies; }
@Test public void uploadedBlobPersistsMinimumTime() { Set<String> idleTopologies = new HashSet<>(); idleTopologies.add("topology1"); Map<String, Object> conf = new HashMap<>(); conf.put(DaemonConfig.NIMBUS_TOPOLOGY_BLOBSTORE_DELETION_DELAY_MS, 300000); try (Time.SimulatedTime ignored = new Time.SimulatedTime(null)) { Set<String> toDelete = Nimbus.getExpiredTopologyIds(idleTopologies, conf); assertTrue(toDelete.isEmpty()); Time.advanceTime(10 * 60 * 1000L); toDelete = Nimbus.getExpiredTopologyIds(idleTopologies, conf); assertTrue(toDelete.contains("topology1")); assertEquals(1, toDelete.size()); } }
public void autoConfig() throws JoranException { autoConfig(Configurator.class.getClassLoader()); }
@Test public void autoConfigFromServiceLoaderJDK5() throws Exception { assumeTrue(isJDK5()); ClassLoader mockClassLoader = buildMockServiceLoader(this.getClass().getClassLoader()); assertNull(MockConfigurator.context); new ContextInitializer(loggerContext).autoConfig(mockClassLoader); assertNull(MockConfigurator.context); }
@GetMapping("/{id}/{namespaceId}") public ShenyuAdminResult detailSelector(@PathVariable("id") @Valid @Existed(provider = SelectorMapper.class, message = "selector is not existed") final String id, @PathVariable("namespaceId") @Valid @Existed(provider = NamespaceMapper.class, message = "namespaceId is not existed") final String namespaceId) { SelectorVO selectorVO = selectorService.findByIdAndNamespaceId(id, namespaceId); return ShenyuAdminResult.success(ShenyuResultMessage.DETAIL_SUCCESS, selectorVO); }
@Test public void detailSelector() throws Exception { SpringBeanUtils.getInstance().setApplicationContext(mock(ConfigurableApplicationContext.class)); when(SpringBeanUtils.getInstance().getBean(NamespaceMapper.class)).thenReturn(namespaceMapper); when(namespaceMapper.existed(SYS_DEFAULT_NAMESPACE_ID)).thenReturn(true); given(this.selectorService.findByIdAndNamespaceId("1", SYS_DEFAULT_NAMESPACE_ID)).willReturn(selectorVO); this.mockMvc.perform(MockMvcRequestBuilders.get("/selector/{id}/{namespaceId}", "1", SYS_DEFAULT_NAMESPACE_ID)) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DETAIL_SUCCESS))) .andExpect(jsonPath("$.data.id", is(selectorVO.getId()))) .andReturn(); }
@Override public Endpoint<Http2RemoteFlowController> remote() { return remoteEndpoint; }
@Test public void removeAllStreamsWithJustOneRemoveStream() throws Exception { client.remote().createStream(2, false); testRemoveAllStreams(); }
public static StringUtils.Pair toParentAndLeaf(String path) { int pos = path.lastIndexOf('.'); int temp = path.lastIndexOf("['"); if (temp != -1 && temp > pos) { pos = temp - 1; } String right = path.substring(pos + 1); if (right.startsWith("[")) { pos = pos + 1; } String left = path.substring(0, pos == -1 ? 0 : pos); return StringUtils.pair(left, right); }
@Test void testParsingParentAndLeafName() { assertEquals(StringUtils.pair("", "$"), Json.toParentAndLeaf("$")); assertEquals(StringUtils.pair("$", "foo"), Json.toParentAndLeaf("$.foo")); assertEquals(StringUtils.pair("$", "['foo']"), Json.toParentAndLeaf("$['foo']")); assertEquals(StringUtils.pair("$.foo", "bar"), Json.toParentAndLeaf("$.foo.bar")); assertEquals(StringUtils.pair("$.foo", "['bar']"), Json.toParentAndLeaf("$.foo['bar']")); assertEquals(StringUtils.pair("$.foo", "bar[0]"), Json.toParentAndLeaf("$.foo.bar[0]")); assertEquals(StringUtils.pair("$.foo", "['bar'][0]"), Json.toParentAndLeaf("$.foo['bar'][0]")); assertEquals(StringUtils.pair("$.foo[2]", "bar[0]"), Json.toParentAndLeaf("$.foo[2].bar[0]")); assertEquals(StringUtils.pair("$.foo[2]", "['bar'][0]"), Json.toParentAndLeaf("$.foo[2]['bar'][0]")); assertEquals(StringUtils.pair("$.foo[2]", "bar"), Json.toParentAndLeaf("$.foo[2].bar")); assertEquals(StringUtils.pair("$.foo[2]", "['bar']"), Json.toParentAndLeaf("$.foo[2]['bar']")); }
public boolean addNewShard(final Shard shard) { var shardId = shard.getId(); if (!shardMap.containsKey(shardId)) { shardMap.put(shardId, shard); return true; } else { return false; } }
@Test void testAddNewShard() { try { var shard = new Shard(1); shardManager.addNewShard(shard); var field = ShardManager.class.getDeclaredField("shardMap"); field.setAccessible(true); var map = (Map<Integer, Shard>) field.get(shardManager); assertEquals(1, map.size()); assertEquals(shard, map.get(1)); } catch (NoSuchFieldException | IllegalAccessException e) { fail("Fail to modify field access."); } }
public static boolean isPojo(Class<?> cls) { return !ReflectUtils.isPrimitives(cls) && !Collection.class.isAssignableFrom(cls) && !Map.class.isAssignableFrom(cls); }
@Test void testIsPojo() throws Exception { assertFalse(PojoUtils.isPojo(boolean.class)); assertFalse(PojoUtils.isPojo(Map.class)); assertFalse(PojoUtils.isPojo(List.class)); assertTrue(PojoUtils.isPojo(Person.class)); }
@Override public void deleteRewardActivity(Long id) { // 校验存在 RewardActivityDO dbRewardActivity = validateRewardActivityExists(id); if (!dbRewardActivity.getStatus().equals(PromotionActivityStatusEnum.CLOSE.getStatus())) { // 未关闭的活动,不能删除噢 throw exception(REWARD_ACTIVITY_DELETE_FAIL_STATUS_NOT_CLOSED); } // 删除 rewardActivityMapper.deleteById(id); }
@Test public void testDeleteRewardActivity_success() { // mock 数据 RewardActivityDO dbRewardActivity = randomPojo(RewardActivityDO.class, o -> o.setStatus(PromotionActivityStatusEnum.CLOSE.getStatus())); rewardActivityMapper.insert(dbRewardActivity);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbRewardActivity.getId(); // 调用 rewardActivityService.deleteRewardActivity(id); // 校验数据不存在了 assertNull(rewardActivityMapper.selectById(id)); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseNull() { SchemaAndValue schemaAndValue = Values.parseString("null"); assertNull(schemaAndValue); }
public static List<UpdateRequirement> forUpdateTable( TableMetadata base, List<MetadataUpdate> metadataUpdates) { Preconditions.checkArgument(null != base, "Invalid table metadata: null"); Preconditions.checkArgument(null != metadataUpdates, "Invalid metadata updates: null"); Builder builder = new Builder(base, false); builder.require(new UpdateRequirement.AssertTableUUID(base.uuid())); metadataUpdates.forEach(builder::update); return builder.build(); }
@Test public void setDefaultSortOrderFailure() { int sortOrderId = SortOrder.unsorted().orderId(); when(updated.defaultSortOrderId()).thenReturn(sortOrderId + 1); List<UpdateRequirement> requirements = UpdateRequirements.forUpdateTable( metadata, ImmutableList.of(new MetadataUpdate.SetDefaultSortOrder(sortOrderId))); assertThatThrownBy(() -> requirements.forEach(req -> req.validate(updated))) .isInstanceOf(CommitFailedException.class) .hasMessage("Requirement failed: default sort order changed: expected id 0 != 1"); }
@Override public Hotspots.HotspotLite generateClosedIssueMessage(String uuid) { return Hotspots.HotspotLite.newBuilder() .setKey(uuid) .setClosed(true) .build(); }
@Test public void generateClosedIssueMessage_shouldMapClosedHotspotFields() { HotspotLite result = underTest.generateClosedIssueMessage("uuid"); assertThat(result).extracting(HotspotLite::getKey, HotspotLite::getClosed) .containsExactly("uuid", true); }
public static Cluster bootstrap(List<InetSocketAddress> addresses) { List<Node> nodes = new ArrayList<>(); int nodeId = -1; for (InetSocketAddress address : addresses) nodes.add(new Node(nodeId--, address.getHostString(), address.getPort())); return new Cluster(null, true, nodes, new ArrayList<>(0), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap()); }
@Test public void testBootstrap() { String ipAddress = "140.211.11.105"; String hostName = "www.example.com"; Cluster cluster = Cluster.bootstrap(Arrays.asList( new InetSocketAddress(ipAddress, 9002), new InetSocketAddress(hostName, 9002) )); Set<String> expectedHosts = Utils.mkSet(ipAddress, hostName); Set<String> actualHosts = new HashSet<>(); for (Node node : cluster.nodes()) actualHosts.add(node.host()); assertEquals(expectedHosts, actualHosts); }
public static Types.StructType selectNot(Types.StructType struct, Set<Integer> fieldIds) { Set<Integer> projectedIds = getIdsInternal(struct, false); projectedIds.removeAll(fieldIds); return project(struct, projectedIds); }
@Test public void testSelectNot() { Schema schema = new Schema( Lists.newArrayList( required(1, "id", Types.LongType.get()), required( 2, "location", Types.StructType.of( required(3, "lat", Types.DoubleType.get()), required(4, "long", Types.DoubleType.get()))))); Schema expectedNoPrimitive = new Schema( Lists.newArrayList( required( 2, "location", Types.StructType.of( required(3, "lat", Types.DoubleType.get()), required(4, "long", Types.DoubleType.get()))))); Schema actualNoPrimitve = TypeUtil.selectNot(schema, Sets.newHashSet(1)); assertThat(actualNoPrimitve.asStruct()).isEqualTo(expectedNoPrimitive.asStruct()); // Expected legacy behavior is to completely remove structs if their elements are removed Schema expectedNoStructElements = new Schema(required(1, "id", Types.LongType.get())); Schema actualNoStructElements = TypeUtil.selectNot(schema, Sets.newHashSet(3, 4)); assertThat(actualNoStructElements.asStruct()).isEqualTo(expectedNoStructElements.asStruct()); // Expected legacy behavior is to ignore selectNot on struct elements. Schema actualNoStruct = TypeUtil.selectNot(schema, Sets.newHashSet(2)); assertThat(actualNoStruct.asStruct()).isEqualTo(schema.asStruct()); }
public Map<TopicPartition, Long> endOffsets(Set<TopicPartition> partitions) { if (partitions == null || partitions.isEmpty()) { return Collections.emptyMap(); } Map<TopicPartition, OffsetSpec> offsetSpecMap = partitions.stream().collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.latest())); ListOffsetsResult resultFuture = admin.listOffsets(offsetSpecMap, new ListOffsetsOptions(IsolationLevel.READ_UNCOMMITTED)); // Get the individual result for each topic partition so we have better error messages Map<TopicPartition, Long> result = new HashMap<>(); for (TopicPartition partition : partitions) { try { ListOffsetsResultInfo info = resultFuture.partitionResult(partition).get(); result.put(partition, info.offset()); } catch (ExecutionException e) { Throwable cause = e.getCause(); String topic = partition.topic(); if (cause instanceof AuthorizationException) { String msg = String.format("Not authorized to get the end offsets for topic '%s' on brokers at %s", topic, bootstrapServers); throw new ConnectException(msg, cause); } else if (cause instanceof UnsupportedVersionException) { // Should theoretically never happen, because this method is the same as what the consumer uses and therefore // should exist in the broker since before the admin client was added String msg = String.format("API to get the get the end offsets for topic '%s' is unsupported on brokers at %s", topic, bootstrapServers); throw new UnsupportedVersionException(msg, cause); } else if (cause instanceof TimeoutException) { String msg = String.format("Timed out while waiting to get end offsets for topic '%s' on brokers at %s", topic, bootstrapServers); throw new TimeoutException(msg, cause); } else if (cause instanceof LeaderNotAvailableException) { String msg = String.format("Unable to get end offsets during leader election for topic '%s' on brokers at %s", topic, bootstrapServers); throw new LeaderNotAvailableException(msg, cause); } else if (cause instanceof org.apache.kafka.common.errors.RetriableException) { throw (org.apache.kafka.common.errors.RetriableException) cause; } else { String msg = String.format("Error while getting end offsets for topic '%s' on brokers at %s", topic, bootstrapServers); throw new ConnectException(msg, cause); } } catch (InterruptedException e) { Thread.interrupted(); String msg = String.format("Interrupted while attempting to read end offsets for topic '%s' on brokers at %s", partition.topic(), bootstrapServers); throw new RetriableException(msg, e); } } return result; }
@Test public void endOffsetsShouldFailWithUnsupportedVersionWhenVersionUnsupportedErrorOccurs() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); Set<TopicPartition> tps = Collections.singleton(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResultWithUnsupportedVersion(tp1, offset)); TopicAdmin admin = new TopicAdmin(env.adminClient()); assertThrows(UnsupportedVersionException.class, () -> admin.endOffsets(tps)); } }
@Override public String getBasePath() { ClassLoader prevClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(classLoader); return servlet.getBasePath(); } finally { Thread.currentThread().setContextClassLoader(prevClassLoader); } }
@Test public void testWrapper() throws Exception { AdditionalServlet servlet = mock(AdditionalServlet.class); NarClassLoader loader = mock(NarClassLoader.class); AdditionalServletWithClassLoader wrapper = new AdditionalServletWithClassLoader(servlet, loader); String basePath = "metrics/pulsar"; when(servlet.getBasePath()).thenReturn(basePath); assertEquals(basePath, wrapper.getBasePath()); verify(servlet, times(1)).getBasePath(); }
public String defaultBranchName() { return defaultBranchName; }
@Test public void defaultBranchName() { for (int i = 0; i <= nonMainBranches.size(); i++) { List<BranchInfo> branches = new ArrayList<>(nonMainBranches); branches.add(i, mainBranch); assertThat(new ProjectBranches(branches).defaultBranchName()).isEqualTo(mainBranch.name()); } }
public void startAsync() { try { udfLoader.load(); ProcessingLogServerUtils.maybeCreateProcessingLogTopic( serviceContext.getTopicClient(), processingLogConfig, ksqlConfig); if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) { log.warn("processing log auto-create is enabled, but this is not supported " + "for headless mode."); } rocksDBConfigSetterHandler.accept(ksqlConfig); processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); ksqlConfig.originals().forEach((key, value) -> { if (nonNull(value)) { properties.put(key, value.toString()); } }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); throw e; } }
@Test public void shouldRunCtStatement() { // Given: final PreparedStatement<CreateTable> ct = PreparedStatement.of("CT", new CreateTable(SOME_NAME, SOME_ELEMENTS, false, false, JSON_PROPS, false)); givenQueryFileParsesTo(ct); // When: standaloneExecutor.startAsync(); // Then: verify(ksqlEngine).execute(serviceContext, ConfiguredStatement.of(ct, SessionConfig.of(ksqlConfig, emptyMap()))); }
@Override public ObjectNode encode(MappingAction action, CodecContext context) { EncodeMappingActionCodecHelper encoder = new EncodeMappingActionCodecHelper(action, context); return encoder.encode(); }
@Test public void forwardActionTest() { final ForwardMappingAction action = MappingActions.forward(); final ObjectNode actionJson = actionCodec.encode(action, context); assertThat(actionJson, matchesAction(action)); }
public ProviderCert getProviderConnectionConfig(URL localAddress, SocketAddress remoteAddress) { for (CertProvider certProvider : certProviders) { if (certProvider.isSupport(localAddress)) { ProviderCert cert = certProvider.getProviderConnectionConfig(localAddress); if (cert != null) { return cert; } } } return null; }
@Test void testGetProviderConnectionConfig() { CertManager certManager = new CertManager(frameworkModel); Assertions.assertNull(certManager.getProviderConnectionConfig(url, null)); ProviderCert providerCert1 = Mockito.mock(ProviderCert.class); FirstCertProvider.setProviderCert(providerCert1); Assertions.assertNull(certManager.getProviderConnectionConfig(url, null)); FirstCertProvider.setSupport(true); Assertions.assertEquals(providerCert1, certManager.getProviderConnectionConfig(url, null)); ProviderCert providerCert2 = Mockito.mock(ProviderCert.class); SecondCertProvider.setProviderCert(providerCert2); Assertions.assertEquals(providerCert1, certManager.getProviderConnectionConfig(url, null)); SecondCertProvider.setSupport(true); Assertions.assertEquals(providerCert1, certManager.getProviderConnectionConfig(url, null)); FirstCertProvider.setSupport(false); Assertions.assertEquals(providerCert2, certManager.getProviderConnectionConfig(url, null)); FirstCertProvider.setSupport(true); FirstCertProvider.setProviderCert(null); Assertions.assertEquals(providerCert2, certManager.getProviderConnectionConfig(url, null)); }
@Override public Lock lock() { return locker.lock(); }
@Test void whileLockedJobCannotBeLockedForOtherSaveAction() { Job job = anEnqueuedJob().build(); final AtomicBoolean atomicBoolean = new AtomicBoolean(); final Lock lock = job.lock(); await() .during(1, TimeUnit.SECONDS) .atMost(2, TimeUnit.SECONDS) .pollInterval(200, TimeUnit.MILLISECONDS) .untilAsserted(() -> { new Thread(() -> { job.lock(); atomicBoolean.set(true); }).start(); assertThat(lock.isLocked()).isTrue(); assertThat(atomicBoolean).isFalse(); }); }
protected void readLine(URL url, String line) { String[] aliasAndClassName = parseAliasAndClassName(line); if (aliasAndClassName == null || aliasAndClassName.length != 2) { return; } String alias = aliasAndClassName[0]; String className = aliasAndClassName[1]; // 读取配置的实现类 Class tmp; try { tmp = ClassUtils.forName(className, false); } catch (Throwable e) { if (LOGGER.isWarnEnabled()) { LOGGER.warn("Extension {} of extensible {} is disabled, cause by: {}", className, interfaceName, ExceptionUtils.toShortString(e, 2)); } if (LOGGER.isDebugEnabled()) { LOGGER.debug("Extension " + className + " of extensible " + interfaceName + " is disabled.", e); } return; } loadExtension(alias, tmp, StringUtils.toString(url), className); }
@Test public void testReadLine() throws Exception { ExtensionLoader loader = new ExtensionLoader<Filter>(Filter.class, false, null); URL url = Filter.class.getResource("/META-INF/sofa-rpc/" + Filter.class.getName()); try { loader.readLine(url, "com.alipay.sofa.rpc.ext.NotFilter"); } catch (Throwable t) { LOGGER.error(t.getMessage()); } Assert.assertTrue(loader.all.isEmpty()); try { loader.readLine(url, null); } catch (Throwable t) { LOGGER.error(t.getMessage()); } Assert.assertTrue(loader.all.isEmpty()); try { loader.readLine(url, " "); } catch (Throwable t) { LOGGER.error(t.getMessage()); } Assert.assertTrue(loader.all.isEmpty()); loader.all.clear(); try { loader.readLine(url, "com.alipay.sofa.rpc.ext.WrongFilter0"); } catch (Throwable t) { LOGGER.error(t.getMessage()); } try { loader.readLine(url, "com.alipay.sofa.rpc.ext.WrongFilter1"); } catch (Throwable t) { LOGGER.error(t.getMessage()); } try { loader.readLine(url, "com.alipay.sofa.rpc.ext.WrongFilter2"); } catch (Throwable t) { LOGGER.error(t.getMessage()); } try { loader.readLine(url, "com.alipay.sofa.rpc.ext.WrongFilter3"); } catch (Throwable t) { LOGGER.error(t.getMessage()); } try { loader.readLine(url, "w3=com.alipay.sofa.rpc.ext.WrongFilter4"); } catch (Throwable t) { LOGGER.error(t.getMessage()); } try { loader.readLine(url, "echo1=com.alipay.sofa.rpc.ext.ExtensionFilter"); } catch (Throwable t) { LOGGER.error(t.getMessage()); } Assert.assertTrue(loader.all.isEmpty()); try { loader.readLine(url, "com.alipay.sofa.rpc.ext.RightFilter0"); } catch (Throwable t) { LOGGER.error(t.getMessage()); } Assert.assertFalse(loader.all.isEmpty()); loader.all.clear(); try { loader.readLine(url, "rightxx0=com.alipay.sofa.rpc.ext.RightFilter0"); } catch (Throwable t) { LOGGER.error(t.getMessage()); } Assert.assertFalse(loader.all.isEmpty()); // 重复加载 boolean isOk = true; try { loader.readLine(url, "com.alipay.sofa.rpc.ext.RightFilter0"); } catch (Throwable t) { LOGGER.error(t.getMessage()); isOk = false; } Assert.assertFalse(isOk); Assert.assertFalse(loader.all.isEmpty()); Assert.assertTrue(loader.all.size() == 1); ExtensionLoader loader2 = new ExtensionLoader<Protocol>(Protocol.class, false, null); URL url2 = Filter.class.getResource("/META-INF/sofa-rpc/" + Protocol.class.getName()); try { loader2.readLine(url2, "com.alipay.sofa.rpc.ext.WrongProtocol"); } catch (Throwable t) { LOGGER.error(t.getMessage()); } Assert.assertTrue(loader2.all.isEmpty()); }
public static void checkValuesEqual( long value1, String value1Name, long value2, String value2Name) { checkArgument( value1 == value2, "'%s' (%s) must equal '%s' (%s).", value1Name, value1, value2Name, value2); }
@Test public void testCheckValuesEqual() throws Exception { // Should not throw. Validate.checkValuesEqual(1, "arg1", 1, "arg2"); // Verify it throws. intercept(IllegalArgumentException.class, "'arg1' (1) must equal 'arg2' (2)", () -> Validate.checkValuesEqual(1, "arg1", 2, "arg2")); }
public static SerdeFeatures buildInternal(final Format keyFormat) { final ImmutableSet.Builder<SerdeFeature> builder = ImmutableSet.builder(); getKeyWrapping(true, keyFormat) .ifPresent(builder::add); return SerdeFeatures.from(builder.build()); }
@Test public void shouldNotSetUnwrappedKeysIfInternalTopicHasKeyFormatsSupportsOnlyWrapping() { // When: final SerdeFeatures result = SerdeFeaturesFactory.buildInternal(PROTOBUF); // Then: assertThat(result.findAny(SerdeFeatures.WRAPPING_FEATURES), is(Optional.empty())); }
public static Configuration unix() { return UnixHolder.UNIX; }
@Test public void testFileSystemWithDefaultWatchService() throws IOException { FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); WatchService watchService = fs.newWatchService(); assertThat(watchService).isInstanceOf(PollingWatchService.class); PollingWatchService pollingWatchService = (PollingWatchService) watchService; assertThat(pollingWatchService.interval).isEqualTo(5); assertThat(pollingWatchService.timeUnit).isEqualTo(SECONDS); }
public String toTypeString() { // needs a map instead of switch because for some reason switch creates an // internal class with no annotations that messes up EntityTest return Optional.ofNullable(TO_TYPE_STRING.getOrDefault(type, si -> si.type.name())) .orElseThrow(NullPointerException::new).apply(this); }
@Test public void shouldCorrectlyFormatDecimalsWithoutParameters() { final SchemaInfo schemaInfo= new SchemaInfo( SqlBaseType.DECIMAL, null, null ); assertThat(schemaInfo.toTypeString(), equalTo("DECIMAL")); }
@Override public boolean acquirePermit(String nsId) { if (contains(nsId)) { return super.acquirePermit(nsId); } return super.acquirePermit(DEFAULT_NS); }
@Test public void testAcquireTimeout() { Configuration conf = createConf(40); conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + "ns1", 0.5); conf.setTimeDuration(DFS_ROUTER_FAIRNESS_ACQUIRE_TIMEOUT, 100, TimeUnit.MILLISECONDS); RouterRpcFairnessPolicyController routerRpcFairnessPolicyController = FederationUtil.newFairnessPolicyController(conf); // ns1 should have 20 permits allocated for (int i = 0; i < 20; i++) { assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns1")); } long acquireBeginTimeMs = Time.monotonicNow(); assertFalse(routerRpcFairnessPolicyController.acquirePermit("ns1")); long acquireTimeMs = Time.monotonicNow() - acquireBeginTimeMs; // There are some other operations, so acquireTimeMs >= 100ms. assertTrue(acquireTimeMs >= 100); }
public static void main(String[] args) { MmaBantamweightFighter fighter1 = new MmaBantamweightFighter("Joe", "Johnson", "The Geek", "Muay Thai"); MmaBantamweightFighter fighter2 = new MmaBantamweightFighter("Ed", "Edwards", "The Problem Solver", "Judo"); fighter1.fight(fighter2); MmaHeavyweightFighter fighter3 = new MmaHeavyweightFighter("Dave", "Davidson", "The Bug Smasher", "Kickboxing"); MmaHeavyweightFighter fighter4 = new MmaHeavyweightFighter("Jack", "Jackson", "The Pragmatic", "Brazilian Jiu-Jitsu"); fighter3.fight(fighter4); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public static Optional<String> convertRegexToLiteral(String s) { try { Pattern.compile(s); } catch (PatternSyntaxException e) { /* The string is a malformed regular expression which will throw an error at runtime. We will * preserve this behavior by not rewriting it. */ return Optional.empty(); } boolean inQuote = false; StringBuilder result = new StringBuilder(); int length = s.length(); for (int i = 0; i < length; ++i) { char current = s.charAt(i); if (!inQuote && UNESCAPED_CONSTRUCT.matches(current)) { /* If we see an unescaped regular expression control character then we can't match this as a * string-literal so give up */ return Optional.empty(); } else if (current == '\\') { /* There should be a character following the backslash. No need to check for string length * since we have already ascertained we have a well formed regex */ char escaped = s.charAt(++i); if (escaped == 'Q') { inQuote = true; } else if (escaped == 'E') { inQuote = false; } else { /* If not starting or ending a quotation (\Q...\E) backslashes can only be used to write * escaped constructs or to quote characters that would otherwise be interpreted as * unescaped constructs. * * If they are escaping a construct we can write as a literal string (i.e. one of \t \n * \f \r or \\) then we convert to a literal character. * * If they are escaping an unescaped construct we convert to the relevant character * * Everything else we can't represent in a literal string */ Character controlChar = REGEXCHAR_TO_LITERALCHAR.get(escaped); if (controlChar != null) { result.append(controlChar); } else if (escaped == '\\') { result.append('\\'); } else if (UNESCAPED_CONSTRUCT.matches(escaped)) { result.append(escaped); } else { return Optional.empty(); } } } else { /* Otherwise we have a literal character to match so keep going */ result.append(current); } } return Optional.of(result.toString()); }
@Test public void positive() { assertThat(Regexes.convertRegexToLiteral("hello")).hasValue("hello"); assertThat(Regexes.convertRegexToLiteral("\\t\\n\\f\\r")).hasValue("\t\n\f\r"); assertThat(Regexes.convertRegexToLiteral("\\Q.\\E")).hasValue("."); }
public static LocalRetryableExecution executeLocallyWithRetry(NodeEngine nodeEngine, Operation operation) { if (operation.getOperationResponseHandler() != null) { throw new IllegalArgumentException("Operation must not have a response handler set"); } if (!operation.returnsResponse()) { throw new IllegalArgumentException("Operation must return a response"); } if (operation.validatesTarget()) { throw new IllegalArgumentException("Operation must not validate the target"); } final LocalRetryableExecution execution = new LocalRetryableExecution(nodeEngine, operation); execution.run(); return execution; }
@Test(expected = IllegalArgumentException.class) public void executeLocallyWithRetryFailsWhenOperationHandlerIsSet() { final Operation op = new Operation() { }; op.setOperationResponseHandler(new OperationResponseHandler() { @Override public void sendResponse(Operation op, Object response) { } }); executeLocallyWithRetry(null, op); }
@Override public PartitionContainer getPartitionContainer(int partitionId) { assert partitionId != GENERIC_PARTITION_ID : "Cannot be called with GENERIC_PARTITION_ID"; return partitionContainers[partitionId]; }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testGetPartitionContainer_withGenericPartitionId() { mapServiceContext.getPartitionContainer(GENERIC_PARTITION_ID); }
public static Predicate parse(String expression) { final Stack<Predicate> predicateStack = new Stack<>(); final Stack<Character> operatorStack = new Stack<>(); final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll(""); final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true); boolean isTokenMode = true; while (true) { final Character operator; final String token; if (isTokenMode) { if (tokenizer.hasMoreTokens()) { token = tokenizer.nextToken(); } else { break; } if (OPERATORS.contains(token)) { operator = token.charAt(0); } else { operator = null; } } else { operator = operatorStack.pop(); token = null; } isTokenMode = true; if (operator == null) { try { predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance()); } catch (ClassCastException e) { throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e); } catch (Exception e) { throw new RuntimeException(e); } } else { if (operatorStack.empty() || operator == '(') { operatorStack.push(operator); } else if (operator == ')') { while (operatorStack.peek() != '(') { evaluate(predicateStack, operatorStack); } operatorStack.pop(); } else { if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek())) { evaluate(predicateStack, operatorStack); isTokenMode = false; } operatorStack.push(operator); } } } while (!operatorStack.empty()) { evaluate(predicateStack, operatorStack); } if (predicateStack.size() > 1) { throw new RuntimeException("Invalid logical expression"); } return predicateStack.pop(); }
@Test public void testParen() { final Predicate parsed = PredicateExpressionParser.parse("(com.linkedin.data.it.AlwaysTruePredicate)"); Assert.assertEquals(parsed.getClass(), AlwaysTruePredicate.class); }
public static MySQLBinlogProtocolValue getBinlogProtocolValue(final MySQLBinaryColumnType columnType) { Preconditions.checkArgument(BINLOG_PROTOCOL_VALUES.containsKey(columnType), "Cannot find MySQL type '%s' in column type when process binlog protocol value", columnType); return BINLOG_PROTOCOL_VALUES.get(columnType); }
@Test void assertGetBinlogProtocolValue() { assertThat(MySQLBinlogProtocolValueFactory.getBinlogProtocolValue(MySQLBinaryColumnType.TINY), instanceOf(MySQLTinyBinlogProtocolValue.class)); }
public static void notifyConfigChange(ConfigDataChangeEvent event) { if (DatasourceConfiguration.isEmbeddedStorage() && !EnvUtil.getStandaloneMode()) { return; } NotifyCenter.publishEvent(event); }
@Test void testConfigChangeNotify() throws InterruptedException { AtomicReference<ConfigDataChangeEvent> reference = new AtomicReference<>(); NotifyCenter.registerToPublisher(ConfigDataChangeEvent.class, NotifyCenter.ringBufferSize); NotifyCenter.registerSubscriber(new Subscriber() { @Override public void onEvent(Event event) { reference.set((ConfigDataChangeEvent) event); } @Override public Class<? extends Event> subscribeType() { return ConfigDataChangeEvent.class; } }); // nacos is standalone mode and use embedded storage EnvUtil.setIsStandalone(true); DatasourceConfiguration.setEmbeddedStorage(true); ConfigChangePublisher.notifyConfigChange(new ConfigDataChangeEvent("chuntaojun", "chuntaojun", System.currentTimeMillis())); Thread.sleep(2000); assertNotNull(reference.get()); reference.set(null); // nacos is standalone mode and use external storage EnvUtil.setIsStandalone(true); DatasourceConfiguration.setEmbeddedStorage(false); ConfigChangePublisher.notifyConfigChange(new ConfigDataChangeEvent("chuntaojun", "chuntaojun", System.currentTimeMillis())); Thread.sleep(2000); assertNotNull(reference.get()); reference.set(null); // nacos is cluster mode and use embedded storage EnvUtil.setIsStandalone(false); DatasourceConfiguration.setEmbeddedStorage(true); ConfigChangePublisher.notifyConfigChange(new ConfigDataChangeEvent("chuntaojun", "chuntaojun", System.currentTimeMillis())); Thread.sleep(2000); assertNull(reference.get()); reference.set(null); // nacos is cluster mode and use external storage EnvUtil.setIsStandalone(false); DatasourceConfiguration.setEmbeddedStorage(false); ConfigChangePublisher.notifyConfigChange(new ConfigDataChangeEvent("chuntaojun", "chuntaojun", System.currentTimeMillis())); Thread.sleep(2000); assertNotNull(reference.get()); reference.set(null); }
@Override public void onAddedJobGraph(JobID jobId) { runIfStateIs(State.RUNNING, () -> handleAddedJobGraph(jobId)); }
@Test void onAddedJobGraph_failingRecovery_propagatesTheFailure() throws Exception { final FlinkException expectedFailure = new FlinkException("Expected failure"); jobGraphStore = TestingJobGraphStore.newBuilder() .setRecoverJobGraphFunction( (ignoredA, ignoredB) -> { throw expectedFailure; }) .build(); try (final SessionDispatcherLeaderProcess dispatcherLeaderProcess = createDispatcherLeaderProcess()) { dispatcherLeaderProcess.start(); // wait first for the dispatcher service to be created dispatcherLeaderProcess.getDispatcherGateway().get(); jobGraphStore.putJobGraph(JOB_GRAPH); dispatcherLeaderProcess.onAddedJobGraph(JOB_GRAPH.getJobID()); assertThatFuture(fatalErrorHandler.getErrorFuture()) .eventuallySucceeds() .extracting(FlinkAssertions::chainOfCauses, STREAM_THROWABLE) .contains(expectedFailure); assertThat(dispatcherLeaderProcess.getState()) .isEqualTo(SessionDispatcherLeaderProcess.State.STOPPED); fatalErrorHandler.clearError(); } }
@Override public boolean databaseExists(SnowflakeIdentifier database) { Preconditions.checkArgument( database.type() == SnowflakeIdentifier.Type.DATABASE, "databaseExists requires a DATABASE identifier, got '%s'", database); final String finalQuery = "SHOW SCHEMAS IN DATABASE IDENTIFIER(?) LIMIT 1"; List<SnowflakeIdentifier> schemas; try { schemas = connectionPool.run( conn -> queryHarness.query( conn, finalQuery, SCHEMA_RESULT_SET_HANDLER, database.databaseName())); } catch (SQLException e) { if (DATABASE_NOT_FOUND_ERROR_CODES.contains(e.getErrorCode())) { return false; } throw new UncheckedSQLException(e, "Failed to check if database '%s' exists", database); } catch (InterruptedException e) { throw new UncheckedInterruptedException( e, "Interrupted while checking if database '%s' exists", database); } return !schemas.isEmpty(); }
@Test public void testDatabaseFailureWithOtherException() throws SQLException { Exception injectedException = new SQLException("Some other exception", "2000", 2, null); when(mockResultSet.next()).thenThrow(injectedException); assertThatExceptionOfType(UncheckedSQLException.class) .isThrownBy(() -> snowflakeClient.databaseExists(SnowflakeIdentifier.ofDatabase("DB_1"))) .withMessageContaining("Failed to check if database 'DATABASE: 'DB_1'' exists") .withCause(injectedException); }
public static <T extends Iterable<E>, E> T filter(T iter, Filter<E> filter) { if (null == iter) { return null; } filter(iter.iterator(), filter); return iter; }
@Test public void filterTest(){ final List<String> obj2 = ListUtil.toList("3"); final List<String> obj = ListUtil.toList("1", "3"); IterUtil.filter(obj.iterator(), obj2::contains); assertEquals(1, obj.size()); assertEquals("3", obj.get(0)); }
public synchronized boolean memberLeave(Collection<Member> members) { Set<Member> set = new HashSet<>(allMembers()); set.removeAll(members); return memberChange(set); }
@Test void testMemberLeave() { Member member = Member.builder().ip("1.1.3.3").port(8848).state(NodeState.DOWN).build(); boolean joinResult = serverMemberManager.memberJoin(Collections.singletonList(member)); assertTrue(joinResult); List<String> ips = serverMemberManager.getServerListUnhealth(); assertEquals(1, ips.size()); boolean result = serverMemberManager.memberLeave(Collections.singletonList(member)); assertTrue(result); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void streamStreamLeftJoinTopologyWithDefaultStoresNames() { final StreamsBuilder builder = new StreamsBuilder(); final KStream<Integer, String> stream1; final KStream<Integer, String> stream2; stream1 = builder.stream("input-topic1"); stream2 = builder.stream("input-topic2"); stream1.leftJoin( stream2, MockValueJoiner.TOSTRING_JOINER, JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100)), StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String())); final TopologyDescription describe = builder.build().describe(); assertEquals( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" + " --> KSTREAM-WINDOWED-0000000002\n" + " Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" + " --> KSTREAM-WINDOWED-0000000003\n" + " Processor: KSTREAM-WINDOWED-0000000002 (stores: [KSTREAM-JOINTHIS-0000000004-store])\n" + " --> KSTREAM-JOINTHIS-0000000004\n" + " <-- KSTREAM-SOURCE-0000000000\n" + " Processor: KSTREAM-WINDOWED-0000000003 (stores: [KSTREAM-OUTEROTHER-0000000005-store])\n" + " --> KSTREAM-OUTEROTHER-0000000005\n" + " <-- KSTREAM-SOURCE-0000000001\n" + " Processor: KSTREAM-JOINTHIS-0000000004 (stores: [KSTREAM-OUTEROTHER-0000000005-store, KSTREAM-OUTERSHARED-0000000004-store])\n" + " --> KSTREAM-MERGE-0000000006\n" + " <-- KSTREAM-WINDOWED-0000000002\n" + " Processor: KSTREAM-OUTEROTHER-0000000005 (stores: [KSTREAM-JOINTHIS-0000000004-store, KSTREAM-OUTERSHARED-0000000004-store])\n" + " --> KSTREAM-MERGE-0000000006\n" + " <-- KSTREAM-WINDOWED-0000000003\n" + " Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" + " --> none\n" + " <-- KSTREAM-JOINTHIS-0000000004, KSTREAM-OUTEROTHER-0000000005\n\n", describe.toString()); }
@Override public RFuture<String> scriptLoadAsync(String luaScript) { List<CompletableFuture<String>> futures = commandExecutor.executeAllAsync(RedisCommands.SCRIPT_LOAD, luaScript); CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); CompletableFuture<String> s = f.thenApply(r -> futures.get(0).getNow(null)); return new CompletableFutureWrapper<>(s); }
@Test public void testScriptLoadAsync() { redisson.getBucket("foo").set("bar"); RFuture<String> r = redisson.getScript().scriptLoadAsync("return redis.call('get', 'foo')"); Assertions.assertEquals("282297a0228f48cd3fc6a55de6316f31422f5d17", r.toCompletableFuture().join()); String r1 = redisson.getScript().evalSha(Mode.READ_ONLY, "282297a0228f48cd3fc6a55de6316f31422f5d17", RScript.ReturnType.VALUE, Collections.emptyList()); Assertions.assertEquals("bar", r1); }
protected static boolean isValidHexQuantity(String value) { if (value == null) { return false; } if (value.length() < 3) { return false; } if (!value.startsWith(HEX_PREFIX)) { return false; } return value.matches("0[xX][0-9a-fA-F]+"); }
@Test void testIsValidHexQuantity() { assertEquals(true, Numeric.isValidHexQuantity("0x0")); assertEquals(true, Numeric.isValidHexQuantity("0x9")); assertEquals(true, Numeric.isValidHexQuantity("0x123f")); assertEquals(true, Numeric.isValidHexQuantity("0x419E")); assertEquals(true, Numeric.isValidHexQuantity("0x975d")); assertEquals(true, Numeric.isValidHexQuantity("0xDC449C1C16BA0")); assertEquals(true, Numeric.isValidHexQuantity("0x419E")); assertEquals(false, Numeric.isValidHexQuantity("419E")); assertEquals(false, Numeric.isValidHexQuantity("0419E")); assertEquals(false, Numeric.isValidHexQuantity("0x419Erf")); assertEquals(false, Numeric.isValidHexQuantity("0x419fg")); }