focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void setConfigAttributes(Object attributes) { clear(); if (attributes == null) { return; } List<Map> attrList = (List<Map>) attributes; for (Map attrMap : attrList) { String type = (String) attrMap.get("artifactTypeValue"); if (TestArtifactConfig.TEST_PLAN_DISPLAY_NAME.equals(type) || BuildArtifactConfig.ARTIFACT_PLAN_DISPLAY_NAME.equals(type)) { String source = (String) attrMap.get(BuiltinArtifactConfig.SRC); String destination = (String) attrMap.get(BuiltinArtifactConfig.DEST); if (source.trim().isEmpty() && destination.trim().isEmpty()) { continue; } if (TestArtifactConfig.TEST_PLAN_DISPLAY_NAME.equals(type)) { this.add(new TestArtifactConfig(source, destination)); } else { this.add(new BuildArtifactConfig(source, destination)); } } else { String artifactId = (String) attrMap.get(PluggableArtifactConfig.ID); String storeId = (String) attrMap.get(PluggableArtifactConfig.STORE_ID); String pluginId = (String) attrMap.get("pluginId"); Map<String, Object> userSpecifiedConfiguration = (Map<String, Object>) attrMap.get("configuration"); PluggableArtifactConfig pluggableArtifactConfig = new PluggableArtifactConfig(artifactId, storeId); this.add(pluggableArtifactConfig); if (userSpecifiedConfiguration == null) { return; } if (StringUtils.isBlank(pluginId)) { Configuration configuration = pluggableArtifactConfig.getConfiguration(); for (String key : userSpecifiedConfiguration.keySet()) { Map<String, String> configurationMetadata = (Map<String, String>) userSpecifiedConfiguration.get(key); if (configurationMetadata != null) { boolean isSecure = Boolean.parseBoolean(configurationMetadata.get("isSecure")); if (configuration.getProperty(key) == null) { configuration.addNewConfiguration(key, isSecure); } if (isSecure) { configuration.getProperty(key).setEncryptedValue(new EncryptedConfigurationValue(configurationMetadata.get("value"))); } else { configuration.getProperty(key).setConfigurationValue(new ConfigurationValue(configurationMetadata.get("value"))); } } } } else { for (Map.Entry<String, Object> configuration : userSpecifiedConfiguration.entrySet()) { pluggableArtifactConfig.getConfiguration().addNewConfigurationWithValue(configuration.getKey(), String.valueOf(configuration.getValue()), false); } } } } }
@Test public void shouldLoadArtifactPlans() { HashMap<String, String> artifactPlan1 = new HashMap<>(); artifactPlan1.put(SRC, "blah"); artifactPlan1.put(DEST, "something"); artifactPlan1.put("artifactTypeValue", TestArtifactConfig.TEST_PLAN_DISPLAY_NAME); HashMap<String, String> artifactPlan2 = new HashMap<>(); artifactPlan2.put(SRC, "blah2"); artifactPlan2.put(DEST, "something2"); artifactPlan2.put("artifactTypeValue", BuildArtifactConfig.ARTIFACT_PLAN_DISPLAY_NAME); List<HashMap> artifactPlansList = new ArrayList<>(); artifactPlansList.add(artifactPlan1); artifactPlansList.add(artifactPlan2); ArtifactTypeConfigs artifactTypeConfigs = new ArtifactTypeConfigs(); artifactTypeConfigs.setConfigAttributes(artifactPlansList); assertThat(artifactTypeConfigs.size(), is(2)); TestArtifactConfig plan = new TestArtifactConfig(); plan.setSource("blah"); plan.setDestination("something"); assertThat(artifactTypeConfigs.get(0), is(plan)); assertThat(artifactTypeConfigs.get(1), is(new BuildArtifactConfig("blah2", "something2"))); }
@Override public void enableTrackScreenOrientation(boolean enable) { }
@Test public void enableTrackScreenOrientation() { mSensorsAPI.enableTrackScreenOrientation(true); }
public boolean isPacketDistinct(@NonNull String originMacAddress, @NonNull byte[] scanRecord) { byte[] macBytes = originMacAddress.getBytes(); ByteBuffer buffer = ByteBuffer.allocate(macBytes.length+scanRecord.length); buffer.put(macBytes); buffer.put(scanRecord); buffer.rewind(); // rewind puts position back to beginning so .equals and .hashCode work if (mDistinctPacketsDetected.size() == MAX_PACKETS_TO_TRACK) { return mDistinctPacketsDetected.contains(buffer); } else { return mDistinctPacketsDetected.add(buffer); } }
@Test public void testSecondNonDuplicatePacketIsDistinct() throws Exception { DistinctPacketDetector dpd = new DistinctPacketDetector(); dpd.isPacketDistinct("01:02:03:04:05:06", new byte[] {0x01, 0x02}); boolean secondResult = dpd.isPacketDistinct("01:02:03:04:05:06", new byte[] {0x03, 0x04}); assertTrue("second call with different packet should be distinct", secondResult); }
public boolean matches(String comment) { for (String escapedMatcher : escapeMatchers()) { Pattern pattern = Pattern.compile(String.join(escapedMatcher, "\\B", "\\B|\\b", "\\b")); if (pattern.matcher(comment).find()) { return true; } } return false; }
@Test void shouldMatchWordBoundaries() throws Exception { assertThat(new Matcher("!!").matches("!!")).isTrue(); assertThat(new Matcher("ja").matches(" ja")).isTrue(); assertThat(new Matcher("ja").matches("ja ")).isTrue(); assertThat(new Matcher("ja").matches(" ja")).isTrue(); assertThat(new Matcher("ja").matches("ja:")).isTrue(); assertThat(new Matcher("[email protected]").matches("[[email protected]] i checkin")).isTrue(); assertThat(new Matcher("ja").matches("ja&jh")).isTrue(); }
@Override public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, final ShareCreationRequestModel options, final PasswordCallback callback) throws BackgroundException { return this.toGuestUrl(file, options, callback); }
@Test public void testDownloadUrlForContainer() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path sourceFolder = new EueDirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final EueShareFeature feature = new EueShareFeature(session, fileid); final DescriptiveUrl url = feature.toDownloadUrl(sourceFolder, Share.Sharee.world, null, new DisabledPasswordCallback() { @Override public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) { return new Credentials(null, new AlphanumericRandomStringService().random()); } }); assertNotEquals(DescriptiveUrl.EMPTY, url); // Test returning same share assertEquals(url, feature.toDownloadUrl(sourceFolder, Share.Sharee.world, null, new DisabledPasswordCallback() { @Override public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) { return new Credentials(null, new AlphanumericRandomStringService().random()); } })); assertEquals(url, new EueShareUrlProvider(session.getHost(), session.userShares()).toUrl(sourceFolder).find(DescriptiveUrl.Type.signed)); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(sourceFolder), new DisabledPasswordCallback(), new Delete.DisabledCallback()); }
@Override public QueuedCommandStatus enqueueCommand( final CommandId commandId, final Command command, final Producer<CommandId, Command> transactionalProducer ) { final CommandStatusFuture statusFuture = commandStatusMap.compute( commandId, (k, v) -> { if (v == null) { return new CommandStatusFuture(commandId); } // We should fail registration if a future is already registered, to prevent // a caller from receiving a future for a different statement. throw new IllegalStateException( String.format( "Another command with the same id (%s) is being executed.", commandId) ); } ); try { final ProducerRecord<CommandId, Command> producerRecord = new ProducerRecord<>( commandTopicName, COMMAND_TOPIC_PARTITION, commandId, command); final RecordMetadata recordMetadata = transactionalProducer.send(producerRecord).get(); return new QueuedCommandStatus(recordMetadata.offset(), statusFuture); } catch (final Exception e) { commandStatusMap.remove(commandId); throw new KsqlStatementException( "Could not write the statement into the command topic.", String.format( "Could not write the statement '%s' into the command topic.", QueryMask.getMaskedStatement(command.getStatement()) ), QueryMask.getMaskedStatement(command.getStatement()), KsqlStatementException.Problem.OTHER, e ); } }
@Test public void shouldIncludeCommandSequenceNumberInSuccessfulQueuedCommandStatus() { // When: final QueuedCommandStatus commandStatus = commandStore.enqueueCommand(commandId, command, transactionalProducer); // Then: assertThat(commandStatus.getCommandSequenceNumber(), equalTo(recordMetadata.offset())); }
@Override public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, final Object options, final PasswordCallback callback) throws BackgroundException { final Host bookmark = session.getHost(); final StringBuilder request = new StringBuilder(String.format("https://%s%s/apps/files_sharing/api/v1/shares?path=%s&shareType=%d&shareWith=%s", bookmark.getHostname(), new NextcloudHomeFeature(bookmark).find(NextcloudHomeFeature.Context.ocs).getAbsolute(), URIEncoder.encode(PathRelativizer.relativize(NextcloudHomeFeature.Context.files.home(bookmark).find().getAbsolute(), file.getAbsolute())), Sharee.world.equals(sharee) ? SHARE_TYPE_PUBLIC_LINK : SHARE_TYPE_USER, Sharee.world.equals(sharee) ? StringUtils.EMPTY : sharee.getIdentifier() )); final Credentials password = callback.prompt(bookmark, LocaleFactory.localizedString("Passphrase", "Cryptomator"), MessageFormat.format(LocaleFactory.localizedString("Create a passphrase required to access {0}", "Credentials"), file.getName()), new LoginOptions().anonymous(true).keychain(false).icon(bookmark.getProtocol().disk())); if(password.isPasswordAuthentication()) { request.append(String.format("&password=%s", URIEncoder.encode(password.getPassword()))); } final HttpPost resource = new HttpPost(request.toString()); resource.setHeader("OCS-APIRequest", "true"); resource.setHeader(HttpHeaders.ACCEPT, ContentType.APPLICATION_XML.getMimeType()); try { return session.getClient().execute(resource, new OcsDownloadShareResponseHandler()); } catch(HttpResponseException e) { throw new DefaultHttpResponseExceptionMappingService().map(e); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e); } }
@Test public void testToDownloadUrlNoPassword() throws Exception { final Path home = new NextcloudHomeFeature(session.getHost()).find(); final Path file = new DAVTouchFeature(new NextcloudWriteFeature(session)).touch(new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final DescriptiveUrl url = new NextcloudShareFeature(session).toDownloadUrl(file, Share.Sharee.world, null, new DisabledPasswordCallback()); assertNotSame(DescriptiveUrl.EMPTY, url); new DAVDeleteFeature(session).delete(Collections.singletonList(file), new DisabledPasswordCallback(), new Delete.DisabledCallback()); }
public synchronized boolean tryUpdatingPreferredReadReplica(TopicPartition tp, int preferredReadReplicaId, LongSupplier timeMs) { final TopicPartitionState state = assignedStateOrNull(tp); if (state != null) { assignedState(tp).updatePreferredReadReplica(preferredReadReplicaId, timeMs); return true; } return false; }
@Test public void testTryUpdatingPreferredReadReplica() { state.assignFromUser(Collections.singleton(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); final int preferredReadReplicaId = 10; final LongSupplier expirationTimeMs = () -> System.currentTimeMillis() + 60000L; assertTrue(state.tryUpdatingPreferredReadReplica(tp0, preferredReadReplicaId, expirationTimeMs)); assertEquals(Optional.of(preferredReadReplicaId), state.preferredReadReplica(tp0, System.currentTimeMillis())); assertFalse(state.tryUpdatingPreferredReadReplica(unassignedPartition, preferredReadReplicaId, expirationTimeMs)); assertEquals(Optional.empty(), state.preferredReadReplica(unassignedPartition, System.currentTimeMillis())); }
@Override public RecordSet getRecordSet(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorSplit split, List<? extends ColumnHandle> columns) { requireNonNull(split, "partitionChunk is null"); ExampleSplit exampleSplit = (ExampleSplit) split; checkArgument(exampleSplit.getConnectorId().equals(connectorId), "split is not for this connector"); ImmutableList.Builder<ExampleColumnHandle> handles = ImmutableList.builder(); for (ColumnHandle handle : columns) { handles.add((ExampleColumnHandle) handle); } return new ExampleRecordSet(exampleSplit, handles.build()); }
@Test public void testGetRecordSet() { ExampleRecordSetProvider recordSetProvider = new ExampleRecordSetProvider(new ExampleConnectorId("test")); RecordSet recordSet = recordSetProvider.getRecordSet(ExampleTransactionHandle.INSTANCE, SESSION, new ExampleSplit("test", "schema", "table", dataUri), ImmutableList.of( new ExampleColumnHandle("test", "text", createUnboundedVarcharType(), 0), new ExampleColumnHandle("test", "value", BIGINT, 1))); assertNotNull(recordSet, "recordSet is null"); RecordCursor cursor = recordSet.cursor(); assertNotNull(cursor, "cursor is null"); Map<String, Long> data = new LinkedHashMap<>(); while (cursor.advanceNextPosition()) { data.put(cursor.getSlice(0).toStringUtf8(), cursor.getLong(1)); } assertEquals(data, ImmutableMap.<String, Long>builder() .put("ten", 10L) .put("eleven", 11L) .put("twelve", 12L) .build()); }
@Override public long get(long key) { return super.get0(key, 0); }
@Test public void testClear() { final long key = random.nextLong(); insert(key); hsa.clear(); assertEquals(NULL_ADDRESS, hsa.get(key)); assertEquals(0, hsa.size()); }
@VisibleForTesting Entity exportNativeEntity(RuleDao ruleDao, EntityDescriptorIds entityDescriptorIds) { final PipelineRuleEntity ruleEntity = PipelineRuleEntity.create( ValueReference.of(ruleDao.title()), ValueReference.of(ruleDao.description()), ValueReference.of(ruleDao.source())); final JsonNode data = objectMapper.convertValue(ruleEntity, JsonNode.class); return EntityV1.builder() .id(ModelId.of(entityDescriptorIds.getOrThrow(ruleDao.id(), ModelTypes.PIPELINE_RULE_V1))) .type(ModelTypes.PIPELINE_RULE_V1) .data(data) .build(); }
@Test @MongoDBFixtures("PipelineRuleFacadeTest.json") public void exportNativeEntity() { final EntityDescriptor descriptor = EntityDescriptor.create("5adf25034b900a0fdb4e5338", ModelTypes.PIPELINE_RULE_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor); final Entity entity = facade.exportEntity(descriptor, entityDescriptorIds).orElseThrow(AssertionError::new); assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null))); assertThat(entity.type()).isEqualTo(ModelTypes.PIPELINE_RULE_V1); final EntityV1 entityV1 = (EntityV1) entity; final PipelineEntity pipelineEntity = objectMapper.convertValue(entityV1.data(), PipelineEntity.class); assertThat(pipelineEntity.title()).isEqualTo(ValueReference.of("debug")); assertThat(pipelineEntity.description()).isEqualTo(ValueReference.of("Debug")); assertThat(pipelineEntity.source().asString(Collections.emptyMap())).startsWith("rule \"debug\"\n"); }
@Override public Mono<Void> withoutFallback(final ServerWebExchange exchange, final Throwable throwable) { Object error; if (throwable instanceof DegradeException) { exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR); error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.SERVICE_RESULT_ERROR); } else if (throwable instanceof FlowException) { exchange.getResponse().setStatusCode(HttpStatus.TOO_MANY_REQUESTS); error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.TOO_MANY_REQUESTS); } else if (throwable instanceof BlockException) { exchange.getResponse().setStatusCode(HttpStatus.TOO_MANY_REQUESTS); error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.SENTINEL_BLOCK_ERROR); } else if (throwable instanceof SentinelPlugin.SentinelFallbackException) { return exchange.getAttribute(Constants.RESPONSE_MONO); } else { return Mono.error(throwable); } return WebFluxResultUtils.result(exchange, error); }
@Test public void testFlowException() { StepVerifier.create(fallbackHandler.withoutFallback(exchange, new FlowException(""))).expectSubscription().verifyComplete(); }
public static void uncheck(RunnableWithExceptions t) { try { t.run(); } catch (Exception exception) { throwAsUnchecked(exception); } }
@Test public void test_uncheck_exception_thrown_by_method() { Class clazz1 = uncheck(() -> Class.forName("java.lang.String")); Class clazz2 = uncheck(Class::forName, "java.lang.String"); }
@SuppressWarnings("ShouldNotSubclass") public final ThrowableSubject hasCauseThat() { // provides a more helpful error message if hasCauseThat() methods are chained too deep // e.g. assertThat(new Exception()).hCT().hCT().... // TODO(diamondm) in keeping with other subjects' behavior this should still NPE if the subject // *itself* is null, since there's no context to lose. See also b/37645583 if (actual == null) { check("getCause()") .withMessage("Causal chain is not deep enough - add a .isNotNull() check?") .fail(); return ignoreCheck() .that( new Throwable() { @Override @SuppressWarnings("UnsynchronizedOverridesSynchronized") public Throwable fillInStackTrace() { setStackTrace(new StackTraceElement[0]); // for old versions of Android return this; } }); } return check("getCause()").that(actual.getCause()); }
@Test public void hasCauseThat_instanceOf() { assertThat(new Exception("foobar", new IOException("barfoo"))) .hasCauseThat() .isInstanceOf(IOException.class); }
public static boolean isIP(String addr) { return isIPv4(addr) || isIPv6(addr); }
@Test void testIsIP() { assertTrue(InternetAddressUtil.isIP("[::1]")); assertTrue(InternetAddressUtil.isIP("127.0.0.1")); assertFalse(InternetAddressUtil.isIP("er34234")); assertFalse(InternetAddressUtil.isIP("127.100.19")); }
public static void checkValidWriteSchema(GroupType schema) { schema.accept(new TypeVisitor() { @Override public void visit(GroupType groupType) { if (groupType.getFieldCount() <= 0) { throw new InvalidSchemaException("Cannot write a schema with an empty group: " + groupType); } for (Type type : groupType.getFields()) { type.accept(this); } } @Override public void visit(MessageType messageType) { visit((GroupType) messageType); } @Override public void visit(PrimitiveType primitiveType) {} }); }
@Test public void testWriteCheckNestedGroupType() { TypeUtil.checkValidWriteSchema(Types.buildMessage() .repeatedGroup() .required(INT32) .named("a") .optional(BINARY) .as(UTF8) .named("b") .named("valid_group") .named("valid_message")); TestTypeBuilders.assertThrows( "Should complain about empty GroupType", InvalidSchemaException.class, (Callable<Void>) () -> { TypeUtil.checkValidWriteSchema(Types.buildMessage() .addField(new GroupType(REPEATED, "invalid_group")) .named("invalid_message")); return null; }); }
@Override protected void getInfo(List<List<Comparable>> infos) { String progress = FeConstants.NULL_STRING; if (jobState == JobState.RUNNING && getBatchTask() != null) { progress = getBatchTask().getFinishedTaskNum() + "/" + getBatchTask().getTaskNum(); } for (IndexSchemaInfo schemaInfo : schemaInfos) { List<Comparable> info = Lists.newArrayList(); info.add(jobId); info.add(tableName); info.add(TimeUtils.longToTimeString(createTimeMs)); info.add(TimeUtils.longToTimeString(finishedTimeMs)); info.add(schemaInfo.indexName); info.add(schemaInfo.indexId); info.add(schemaInfo.indexId); info.add(String.format("%d:0", schemaInfo.schemaInfo.getVersion())); // schema version and schema hash info.add(getWatershedTxnId()); info.add(jobState.name()); info.add(errMsg); info.add(progress); info.add(timeoutMs / 1000); Warehouse warehouse = GlobalStateMgr.getCurrentState().getWarehouseMgr().getWarehouseAllowNull(warehouseId); if (warehouse == null) { info.add("null"); } else { info.add(warehouse.getName()); } infos.add(info); } }
@Test public void testGetInfo() throws Exception { LakeTable table = createTable(connectContext, "CREATE TABLE t1(c0 INT) DUPLICATE KEY(c0) DISTRIBUTED BY HASH(c0) " + "BUCKETS 2 PROPERTIES('fast_schema_evolution'='true')"); AlterJobV2 job = mustAlterTable(table, "ALTER TABLE t1 ADD COLUMN c1 BIGINT"); List<List<Comparable>> infoList = new ArrayList<>(); job.getInfo(infoList); Assert.assertEquals(1, infoList.size()); List<Comparable> info = infoList.get(0); Assert.assertEquals(14, info.size()); Assert.assertEquals(job.getJobId(), info.get(0)); Assert.assertEquals(table.getName(), info.get(1)); Assert.assertEquals(TimeUtils.longToTimeString(job.createTimeMs), info.get(2)); Assert.assertEquals(TimeUtils.longToTimeString(job.finishedTimeMs), info.get(3)); Assert.assertEquals(table.getIndexNameById(table.getBaseIndexId()), info.get(4)); Assert.assertEquals(table.getBaseIndexId(), info.get(5)); Assert.assertEquals(table.getBaseIndexId(), info.get(6)); Assert.assertEquals(String.format("%d:0", table.getIndexIdToMeta().get(table.getBaseIndexId()).getSchemaVersion()), info.get(7)); Assert.assertEquals(job.getTransactionId().get(), info.get(8)); Assert.assertEquals(job.getJobState().name(), info.get(9)); Assert.assertEquals(job.errMsg, info.get(10)); Assert.assertEquals(job.getTimeoutMs() / 1000, info.get(12)); Assert.assertEquals("default_warehouse", info.get(13)); }
public boolean contains(K value) { return (includeLower ? low.compareTo(value) <= 0 : low.compareTo(value) < 0) && (includeUpper ? up.compareTo(value) >= 0 : up.compareTo(value) > 0); }
@Test public void testContains() { assertTrue(new Interval(Interval.<Integer>getMinusInf(), false, 1000, false).contains(20)); assertFalse(new Interval(Interval.<Integer>getMinusInf(), false, 1000, false).contains(1000)); assertFalse(new Interval(Interval.<Integer>getMinusInf(), false, 1000, false).contains(1001)); assertTrue(new Interval(1000, false, Interval.<Integer>getPlusInf(), false).contains(2000)); assertFalse(new Interval(1000, false, Interval.<Integer>getPlusInf(), false).contains(1000)); assertFalse(new Interval(1000, false, Interval.<Integer>getPlusInf(), false).contains(999)); }
@Override public Boolean mSet(Map<byte[], byte[]> tuple) { if (isQueueing() || isPipelined()) { for (Entry<byte[], byte[]> entry: tuple.entrySet()) { write(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue()); } return true; } CommandBatchService es = new CommandBatchService(executorService); for (Entry<byte[], byte[]> entry: tuple.entrySet()) { es.writeAsync(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue()); } es.execute(); return true; }
@Test public void testMSet() { Map<byte[], byte[]> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.put(("test" + i).getBytes(), ("test" + i*100).getBytes()); } connection.mSet(map); for (Map.Entry<byte[], byte[]> entry : map.entrySet()) { assertThat(connection.get(entry.getKey())).isEqualTo(entry.getValue()); } }
public boolean isAdmin(Admin admin) { return !isSecurityEnabled() || noAdminsConfigured() || adminsConfig.isAdmin(admin, rolesConfig.memberRoles(admin)); }
@Test public void shouldNotCareIfValidUserInRoleOrUser() throws Exception { SecurityConfig security = security(passwordFileAuthConfig(), admins(role("role2"))); assertThat(security.isAdmin(new AdminUser(new CaseInsensitiveString("chris"))), is(true)); assertThat(security.isAdmin(new AdminUser(new CaseInsensitiveString("jez"))), is(false)); security = security(passwordFileAuthConfig(), admins(role("role2"), user("jez"))); assertThat(security.isAdmin(new AdminUser(new CaseInsensitiveString("chris"))), is(true)); assertThat(security.isAdmin(new AdminUser(new CaseInsensitiveString("jez"))), is(true)); }
@Override public boolean equals(Object obj) { if (obj instanceof DateTimeStamp) { DateTimeStamp other = (DateTimeStamp) obj; if (this.hasDateStamp()) return this.getDateTime().equals(other.getDateTime()) && (this.getTimeStamp() == other.getTimeStamp()); else return getTimeStamp() == other.getTimeStamp(); } return false; }
@Test void testEquals() { DateTimeStamp a = new DateTimeStamp(0.586d); DateTimeStamp b = new DateTimeStamp(0.586d); assertEquals(a,b); assertEquals(b,a); b = new DateTimeStamp(.587); assertNotEquals(a, b); assertNotEquals(b,a); a = new DateTimeStamp("2018-04-04T09:10:00.586-0100"); b = new DateTimeStamp("2018-04-04T09:10:00.586-0100"); assertEquals(a, b); assertEquals(b,a); b = new DateTimeStamp("2018-04-04T09:10:00.587-0100"); assertNotEquals(a, b); assertNotEquals(b,a); a = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18); b = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18); assertEquals(a, b); assertEquals(b,a); b = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.19); assertNotEquals(a, b); assertNotEquals(b,a); b = new DateTimeStamp("2018-04-04T09:10:00.586-0200", 0.18); assertNotEquals(a, b); assertNotEquals(b,a); b = new DateTimeStamp("2018-04-04T09:10:00.586-0200", 0.19); assertNotEquals(a, b); assertNotEquals(b,a); }
@Override public <OUT> ProcessConfigurableAndNonKeyedPartitionStream<OUT> process( OneInputStreamProcessFunction<T, OUT> processFunction) { validateStates( processFunction.usesStates(), new HashSet<>( Arrays.asList( StateDeclaration.RedistributionMode.NONE, StateDeclaration.RedistributionMode.IDENTICAL))); TypeInformation<OUT> outType = StreamUtils.getOutputTypeForOneInputProcessFunction(processFunction, getType()); ProcessOperator<T, OUT> operator = new ProcessOperator<>(processFunction); OneInputTransformation<T, OUT> outputTransform = StreamUtils.getOneInputTransformation("Process", this, outType, operator); environment.addOperator(outputTransform); return StreamUtils.wrapWithConfigureHandle( new NonKeyedPartitionStreamImpl<>(environment, outputTransform)); }
@Test void testStateErrorWithOneInputStream() throws Exception { ExecutionEnvironmentImpl env = StreamTestUtils.getEnv(); NonKeyedPartitionStreamImpl<Integer> stream = new NonKeyedPartitionStreamImpl<>( env, new TestingTransformation<>("t1", Types.INT, 1)); for (StateDeclaration stateDeclaration : Arrays.asList(modeNoneStateDeclaration, modeIdenticalStateDeclaration)) { assertThatThrownBy( () -> stream.process( new StreamTestUtils.NoOpOneInputStreamProcessFunction( new HashSet<>( Collections.singletonList( stateDeclaration))))) .isInstanceOf(IllegalRedistributionModeException.class); } }
int getMinLonForTile(double lon) { return (int) (Math.floor((180 + lon) / LON_DEGREE) * LON_DEGREE) - 180; }
@Test public void testMinLon() { assertEquals(-60, instance.getMinLonForTile(-59.9)); assertEquals(0, instance.getMinLonForTile(0.9)); }
public Stream<Hit> stream() { if (nPostingLists == 0) { return Stream.empty(); } return StreamSupport.stream(new PredicateSpliterator(), false); }
@Test void requireThatInsufficientIntervalCoveragePreventsMatch() { PredicateSearch search = createPredicateSearch( new byte[]{1, 1}, postingList(SubqueryBitmap.ALL_SUBQUERIES, entry(0, 0x00010001), entry(1, 0x000200ff))); assertEquals(List.of().toString(), search.stream().toList().toString()); }
public Set<MessageQueue> fetchMessageQueues(String topic) { Set<MessageQueue> mqSet = new HashSet<>(); TopicConfig topicConfig = selectTopicConfig(topic); if (topicConfig != null && topicConfig.getReadQueueNums() > 0) { for (int i = 0; i < topicConfig.getReadQueueNums(); i++) { MessageQueue mq = new MessageQueue(); mq.setTopic(topic); mq.setBrokerName(brokerController.getBrokerConfig().getBrokerName()); mq.setQueueId(i); mqSet.add(mq); } } return mqSet; }
@Test public void testFetchMessageQueues() { Set<MessageQueue> messageQueues = transactionBridge.fetchMessageQueues(TopicValidator.RMQ_SYS_TRANS_HALF_TOPIC); assertThat(messageQueues.size()).isEqualTo(1); }
@Override public Future<Void> notifyCheckpointAbortAsync( long checkpointId, long latestCompletedCheckpointId) { return notifyCheckpointOperation( () -> { if (latestCompletedCheckpointId > 0) { notifyCheckpointComplete(latestCompletedCheckpointId); } if (isCurrentSyncSavepoint(checkpointId)) { throw new FlinkRuntimeException("Stop-with-savepoint failed."); } subtaskCheckpointCoordinator.notifyCheckpointAborted( checkpointId, operatorChain, this::isRunning); }, String.format("checkpoint %d aborted", checkpointId)); }
@Test void testSavepointSuspendAbortedAsync() { assertThatThrownBy( () -> testSyncSavepointWithEndInput( (streamTask, abortCheckpointId) -> streamTask.notifyCheckpointAbortAsync( abortCheckpointId, 0), SavepointType.suspend(SavepointFormatType.CANONICAL), false)) .isInstanceOf(FlinkRuntimeException.class) .hasMessage("Stop-with-savepoint failed."); }
@Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((variables == null) ? 0 : variables.hashCode()); return result; }
@Test public void testHashCode() { UnmodifiableJMeterVariables otherUnmodifiables = new UnmodifiableJMeterVariables(vars); assertThat(unmodifiables.hashCode(), CoreMatchers.is(otherUnmodifiables.hashCode())); }
Selector unwrappedSelector() { return unwrappedSelector; }
@Test public void testInterruptEventLoopThread() throws Exception { EventLoopGroup group = new NioEventLoopGroup(1); final NioEventLoop loop = (NioEventLoop) group.next(); try { Selector selector = loop.unwrappedSelector(); assertTrue(selector.isOpen()); loop.submit(new Runnable() { @Override public void run() { // Interrupt the thread which should not end-up in a busy spin and // so the selector should not have been rebuild. Thread.currentThread().interrupt(); } }).syncUninterruptibly(); assertTrue(selector.isOpen()); final CountDownLatch latch = new CountDownLatch(2); loop.submit(new Runnable() { @Override public void run() { latch.countDown(); } }).syncUninterruptibly(); loop.schedule(new Runnable() { @Override public void run() { latch.countDown(); } }, 2, TimeUnit.SECONDS).syncUninterruptibly(); latch.await(); assertSame(selector, loop.unwrappedSelector()); assertTrue(selector.isOpen()); } finally { group.shutdownGracefully(); } }
public static TypeBuilder<Schema> builder() { return new TypeBuilder<>(new SchemaCompletion(), new NameContext()); }
@Test void string() { Schema.Type type = Schema.Type.STRING; Schema simple = SchemaBuilder.builder().stringType(); Schema expected = primitive(type, simple); Schema built1 = SchemaBuilder.builder().stringBuilder().prop("p", "v").endString(); assertEquals(expected, built1); }
public void setProperty(String name, String value) { if (value == null) { return; } Method setter = aggregationAssessor.findSetterMethod(name); if (setter == null) { addWarn("No setter for property [" + name + "] in " + objClass.getName() + "."); } else { try { setProperty(setter, value); } catch (PropertySetterException ex) { addWarn("Failed to set property [" + name + "] to value \"" + value + "\". ", ex); } } }
@Test public void testFileSize() { setter.setProperty("fs", "2 kb"); assertEquals(2 * 1024, house.getFs().getSize()); }
public static KeyStore newStoreCopyContent(KeyStore originalKeyStore, char[] currentPassword, final char[] newPassword) throws GeneralSecurityException, IOException { if (newPassword == null) { throw new IllegalArgumentException("new password cannot be null"); } KeyStore newKeyStore = KeyStore.getInstance(PKCS12); newKeyStore.load(null, newPassword); final Enumeration<String> aliases = originalKeyStore.aliases(); while (aliases.hasMoreElements()) { String alias = aliases.nextElement(); if (originalKeyStore.entryInstanceOf(alias, KeyStore.PrivateKeyEntry.class)) { newKeyStore.setKeyEntry( alias, originalKeyStore.getKey(alias, currentPassword), newPassword, originalKeyStore.getCertificateChain(alias) ); } else if (originalKeyStore.entryInstanceOf(alias, KeyStore.TrustedCertificateEntry.class)) { newKeyStore.setCertificateEntry(alias, originalKeyStore.getCertificate(alias)); } else if (originalKeyStore.entryInstanceOf(alias, KeyStore.SecretKeyEntry.class)) { newKeyStore.setEntry(alias, originalKeyStore.getEntry(alias, new KeyStore.PasswordProtection(currentPassword)), new KeyStore.PasswordProtection(newPassword) ); } } return newKeyStore; }
@Test void testThrowsExceptionIfNewPasswordIsNull() throws Exception { KeyStore originalKeyStore = KeyStore.getInstance(PKCS12); assertThrows(IllegalArgumentException.class, () -> KeystoreUtils.newStoreCopyContent(originalKeyStore, "nvmd".toCharArray(), null)); }
@SuppressWarnings("unchecked") public static <T> boolean containsAny(T[] array, T... values) { for (T value : values) { if (contains(array, value)) { return true; } } return false; }
@Test public void containsAnyTest() { Integer[] a = {1, 2, 3, 4, 3, 6}; boolean contains = ArrayUtil.containsAny(a, 4, 10, 40); assertTrue(contains); contains = ArrayUtil.containsAny(a, 10, 40); assertFalse(contains); }
@Override public File exportDumpOf(ProjectDescriptor descriptor) { String fileName = slugify(descriptor.getKey()) + DUMP_FILE_EXTENSION; return new File(exportDir, fileName); }
@Test public void exportDumpOf_slugifies_project_key() { assertThat(underTest.exportDumpOf(descriptorWithUglyKey)) .isEqualTo(new File(dataDir, "governance/project_dumps/export/so-me-a9c-key.zip")); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldCoerceFieldValues() { // Given: final Map<String, Object> anOrder = new HashMap<>(AN_ORDER); anOrder.put("orderId", 1); // <-- int, rather than required long in ORDER_SCHEMA. final byte[] bytes = serializeJson(anOrder); // When: final Object result = deserializer.deserialize(SOME_TOPIC, bytes); // Then: assertThat(result, is(expectedOrder)); }
public static String normalizeUri(String uri) throws URISyntaxException { // try to parse using the simpler and faster Camel URI parser String[] parts = CamelURIParser.fastParseUri(uri); if (parts != null) { // we optimized specially if an empty array is returned if (parts == URI_ALREADY_NORMALIZED) { return uri; } // use the faster and more simple normalizer return doFastNormalizeUri(parts); } else { // use the legacy normalizer as the uri is complex and may have unsafe URL characters return doComplexNormalizeUri(uri); } }
@Test public void testNormalizeEndpointUriWithUserInfoSpecialSign() throws Exception { String out1 = URISupport.normalizeUri("ftp://us%40r:t%st@localhost:21000/tmp3/camel?foo=us@r"); assertEquals("ftp://us%40r:t%25st@localhost:21000/tmp3/camel?foo=us%40r", out1); String out2 = URISupport.normalizeUri("ftp://us%40r:t%25st@localhost:21000/tmp3/camel?foo=us@r"); assertEquals("ftp://us%40r:t%25st@localhost:21000/tmp3/camel?foo=us%40r", out2); String out3 = URISupport.normalizeUri("ftp://us@r:t%st@localhost:21000/tmp3/camel?foo=us@r"); assertEquals("ftp://us%40r:t%25st@localhost:21000/tmp3/camel?foo=us%40r", out3); String out4 = URISupport.normalizeUri("ftp://us@r:t%25st@localhost:21000/tmp3/camel?foo=us@r"); assertEquals("ftp://us%40r:t%25st@localhost:21000/tmp3/camel?foo=us%40r", out4); }
@Override public PiAction mapTreatment(TrafficTreatment treatment, PiTableId piTableId) throws PiInterpreterException { if (FORWARDING_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapForwardingTreatment(treatment, piTableId); } else if (PRE_NEXT_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapPreNextTreatment(treatment, piTableId); } else if (ACL_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapAclTreatment(treatment, piTableId); } else if (NEXT_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapNextTreatment(treatment, piTableId); } else if (E_NEXT_CTRL_TBLS.contains(piTableId)) { return treatmentInterpreter.mapEgressNextTreatment(treatment, piTableId); } else { throw new PiInterpreterException(format( "Treatment mapping not supported for table '%s'", piTableId)); } }
@Test public void testNextTreatmentSimpleOutput() throws Exception { TrafficTreatment treatment = DefaultTrafficTreatment.builder() .setOutput(PORT_1) .build(); PiAction mappedAction = interpreter.mapTreatment( treatment, FabricConstants.FABRIC_INGRESS_NEXT_SIMPLE); PiActionParam param = new PiActionParam(FabricConstants.PORT_NUM, PORT_1.toLong()); PiAction expectedAction = PiAction.builder() .withId(FabricConstants.FABRIC_INGRESS_NEXT_OUTPUT_SIMPLE) .withParameter(param) .build(); assertEquals(expectedAction, mappedAction); }
public void maybeTriggerWakeup() { final AtomicBoolean throwWakeupException = new AtomicBoolean(false); pendingTask.getAndUpdate(task -> { if (task == null) { return null; } else if (task instanceof WakeupFuture) { throwWakeupException.set(true); return null; } else { return task; } }); if (throwWakeupException.get()) { throw new WakeupException(); } }
@Test public void testManualTriggerWhenWakeupNotCalled() { assertDoesNotThrow(() -> wakeupTrigger.maybeTriggerWakeup()); }
public static Map<TopicPartition, Long> parseSinkConnectorOffsets(Map<Map<String, ?>, Map<String, ?>> partitionOffsets) { Map<TopicPartition, Long> parsedOffsetMap = new HashMap<>(); for (Map.Entry<Map<String, ?>, Map<String, ?>> partitionOffset : partitionOffsets.entrySet()) { Map<String, ?> partitionMap = partitionOffset.getKey(); if (partitionMap == null) { throw new BadRequestException("The partition for a sink connector offset cannot be null or missing"); } if (!partitionMap.containsKey(KAFKA_TOPIC_KEY) || !partitionMap.containsKey(KAFKA_PARTITION_KEY)) { throw new BadRequestException(String.format("The partition for a sink connector offset must contain the keys '%s' and '%s'", KAFKA_TOPIC_KEY, KAFKA_PARTITION_KEY)); } if (partitionMap.get(KAFKA_TOPIC_KEY) == null) { throw new BadRequestException("Kafka topic names must be valid strings and may not be null"); } if (partitionMap.get(KAFKA_PARTITION_KEY) == null) { throw new BadRequestException("Kafka partitions must be valid numbers and may not be null"); } String topic = String.valueOf(partitionMap.get(KAFKA_TOPIC_KEY)); int partition; try { // We parse it this way because both "10" and 10 should be accepted as valid partition values in the REST API's // JSON request payload. If it throws an exception, we should propagate it since it's indicative of a badly formatted value. partition = Integer.parseInt(String.valueOf(partitionMap.get(KAFKA_PARTITION_KEY))); } catch (Exception e) { throw new BadRequestException("Failed to parse the following Kafka partition value in the provided offsets: '" + partitionMap.get(KAFKA_PARTITION_KEY) + "'. Partition values for sink connectors need " + "to be integers.", e); } TopicPartition tp = new TopicPartition(topic, partition); Map<String, ?> offsetMap = partitionOffset.getValue(); if (offsetMap == null) { // represents an offset reset parsedOffsetMap.put(tp, null); } else { if (!offsetMap.containsKey(KAFKA_OFFSET_KEY)) { throw new BadRequestException(String.format("The offset for a sink connector should either be null or contain " + "the key '%s'", KAFKA_OFFSET_KEY)); } long offset; try { // We parse it this way because both "1000" and 1000 should be accepted as valid offset values in the REST API's // JSON request payload. If it throws an exception, we should propagate it since it's indicative of a badly formatted value. offset = Long.parseLong(String.valueOf(offsetMap.get(KAFKA_OFFSET_KEY))); } catch (Exception e) { throw new BadRequestException("Failed to parse the following Kafka offset value in the provided offsets: '" + offsetMap.get(KAFKA_OFFSET_KEY) + "'. Offset values for sink connectors need " + "to be integers.", e); } parsedOffsetMap.put(tp, offset); } } return parsedOffsetMap; }
@Test public void testNullPartition() { Map<String, Object> offset = new HashMap<>(); offset.put(SinkUtils.KAFKA_OFFSET_KEY, 100); Map<Map<String, ?>, Map<String, ?>> partitionOffsets = new HashMap<>(); partitionOffsets.put(null, offset); ConnectException e = assertThrows(ConnectException.class, () -> SinkUtils.parseSinkConnectorOffsets(partitionOffsets)); assertThat(e.getMessage(), containsString("The partition for a sink connector offset cannot be null or missing")); Map<String, Object> partitionMap = new HashMap<>(); partitionMap.put(SinkUtils.KAFKA_TOPIC_KEY, "topic"); partitionMap.put(SinkUtils.KAFKA_PARTITION_KEY, null); partitionOffsets.clear(); partitionOffsets.put(partitionMap, offset); e = assertThrows(ConnectException.class, () -> SinkUtils.parseSinkConnectorOffsets(partitionOffsets)); assertThat(e.getMessage(), containsString("Kafka partitions must be valid numbers and may not be null")); }
public static CsvMapper createCsvMapper() { final CsvMapper csvMapper = new CsvMapper(); registerModules(csvMapper); return csvMapper; }
@Test void testCsvMapperDateTimeSupportedEnabled() throws Exception { final CsvMapper mapper = JacksonMapperFactory.createCsvMapper(); final String instantString = "2022-08-07T12:00:33.107787800Z"; final Instant instant = Instant.parse(instantString); final String instantCsv = String.format("\"%s\"\n", instantString); final ObjectWriter writer = mapper.writerWithSchemaFor(TypeWithInstant.class); assertThat(writer.writeValueAsString(new TypeWithInstant(instant))).isEqualTo(instantCsv); final ObjectReader reader = mapper.readerWithSchemaFor(TypeWithInstant.class); assertThat(reader.readValue(instantCsv, TypeWithInstant.class).data).isEqualTo(instant); }
@Override public String getName() { return ACTION_NAME; }
@Test void testGetName() { assertEquals("search_web", searchWebAction.getName()); }
public static Permission getPermission(String name, String serviceName, String... actions) { PermissionFactory permissionFactory = PERMISSION_FACTORY_MAP.get(serviceName); if (permissionFactory == null) { throw new IllegalArgumentException("No permissions found for service: " + serviceName); } return permissionFactory.create(name, actions); }
@Test public void getPermission_NamespaceService() { Permission permission = ActionConstants.getPermission("foo", UserCodeNamespaceService.SERVICE_NAME); assertNotNull(permission); assertTrue(permission instanceof UserCodeNamespacePermission); }
@Override public Table getTable(String dbName, String tblName) { JDBCTableName jdbcTable = new JDBCTableName(null, dbName, tblName); return tableInstanceCache.get(jdbcTable, k -> { try (Connection connection = getConnection()) { ResultSet columnSet = schemaResolver.getColumns(connection, dbName, tblName); List<Column> fullSchema = schemaResolver.convertToSRTable(columnSet); List<Column> partitionColumns = Lists.newArrayList(); if (schemaResolver.isSupportPartitionInformation()) { partitionColumns = listPartitionColumns(dbName, tblName, fullSchema); } if (fullSchema.isEmpty()) { return null; } Integer tableId = tableIdCache.getPersistentCache(jdbcTable, j -> ConnectorTableId.CONNECTOR_ID_GENERATOR.getNextId().asInt()); return schemaResolver.getTable(tableId, tblName, fullSchema, partitionColumns, dbName, catalogName, properties); } catch (SQLException | DdlException e) { LOG.warn("get table for JDBC catalog fail!", e); return null; } }); }
@Test public void testCacheTableId() { try { JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource); Table table1 = jdbcMetadata.getTable("test", "tbl1"); columnResult.beforeFirst(); Table table2 = jdbcMetadata.getTable("test", "tbl1"); Assert.assertTrue(table1.getId() == table2.getId()); } catch (Exception e) { System.out.println(e.getMessage()); Assert.fail(); } }
public static IpPrefix valueOf(int address, int prefixLength) { return new IpPrefix(IpAddress.valueOf(address), prefixLength); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfIncorrectString() { IpPrefix ipPrefix; String fromString; fromString = "NoSuchIpPrefix"; ipPrefix = IpPrefix.valueOf(fromString); }
public synchronized String createDataset(String region) throws BigQueryResourceManagerException { // Check to see if dataset already exists, and throw error if it does if (dataset != null) { throw new IllegalStateException( "Dataset " + datasetId + " already exists for project " + projectId + "."); } LOG.info("Creating dataset {} in project {}.", datasetId, projectId); // Send the dataset request to Google Cloud try { DatasetInfo datasetInfo = DatasetInfo.newBuilder(datasetId).setLocation(region).build(); LOG.info("Dataset {} created successfully", datasetId); dataset = bigQuery.create(datasetInfo); return datasetId; } catch (Exception e) { throw new BigQueryResourceManagerException("Failed to create dataset.", e); } }
@Test public void testCreateDatasetShouldThrowErrorWhenDatasetCreateFails() { when(bigQuery.create(any(DatasetInfo.class))).thenThrow(RuntimeException.class); assertThrows( BigQueryResourceManagerException.class, () -> testManager.createDataset(DATASET_ID)); }
@Override public boolean accept(final Path source, final Local local, final TransferStatus parent) { return true; }
@Test public void testAcceptDirectoryNew() throws Exception { final HashMap<Path, Path> files = new HashMap<>(); final Path source = new Path("a", EnumSet.of(Path.Type.directory)); files.put(source, new Path("a", EnumSet.of(Path.Type.directory))); AbstractCopyFilter f = new OverwriteFilter(new NullSession(new Host(new TestProtocol())), new NullSession(new Host(new TestProtocol())), files); assertTrue(f.accept(source, null, new TransferStatus())); }
public Iterable<ConsumerRecord<byte[], byte[]>> getNewCommands(final Duration timeout) { final Iterable<ConsumerRecord<byte[], byte[]>> iterable = commandConsumer.poll(timeout); final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>(); if (iterable != null) { for (ConsumerRecord<byte[], byte[]> record : iterable) { try { backupRecord(Optional.of(commandTopicBackup), record); } catch (final CommandTopicCorruptionException e) { log.warn("Backup is out of sync with the current command topic. " + "Backups will not work until the previous command topic is " + "restored or all backup files are deleted.", e); return records; } records.add(record); } } return records; }
@Test public void shouldNotGetCommandsWhenCommandTopicCorruptionWhenBackingUp() { // Given: when(commandConsumer.poll(any(Duration.class))).thenReturn(consumerRecords); doNothing().doThrow(new CommandTopicCorruptionException("error")).when(commandTopicBackup).writeRecord(any()); // When: final Iterable<ConsumerRecord<byte[], byte[]>> newCommands = commandTopic .getNewCommands(Duration.ofHours(1)); final List<ConsumerRecord<byte[], byte[]>> newCommandsList = ImmutableList.copyOf(newCommands); // Then: assertThat(newCommandsList.size(), is(1)); assertThat(newCommandsList, equalTo(ImmutableList.of(record1))); verify(commandTopicBackup, never()).writeRecord(record3); }
public boolean add(@Nonnull T toAdd) { addInternal(toAdd); return toAdd.getInternalIndex() == getHeadElementIndex(); }
@Test void testAdd() { HeapPriorityQueue<TestElement> priorityQueue = newPriorityQueue(1); final List<TestElement> testElements = Arrays.asList(new TestElement(4711L, 42L), new TestElement(815L, 23L)); testElements.sort( (l, r) -> getTestElementPriorityComparator().compare(r.priority, l.priority)); assertThat(priorityQueue.add(testElements.get(0))).isTrue(); assertThat(priorityQueue.size()).isEqualTo(1); assertThat(priorityQueue.add(testElements.get(1))).isTrue(); assertThat(priorityQueue.size()).isEqualTo(2); assertThat(priorityQueue.poll()).isEqualTo(testElements.get(1)); assertThat(priorityQueue.size()).isEqualTo(1); assertThat(priorityQueue.poll()).isEqualTo(testElements.get(0)); assertThat(priorityQueue.size()).isZero(); }
@Override public void write(int b) { ensureAvailable(1); buffer[pos++] = (byte) (b); }
@Test(expected = IndexOutOfBoundsException.class) public void testWriteForBOffLen_negativeLen() { out.write(TEST_DATA, 0, -3); }
@Override public void selectInstances(Map<Integer, List<InstanceConfig>> poolToInstanceConfigsMap, InstancePartitions instancePartitions) { int numPools = poolToInstanceConfigsMap.size(); Preconditions.checkState(numPools != 0, "No pool qualified for selection"); int tableNameHash = Math.abs(_tableNameWithType.hashCode()); List<Integer> pools = new ArrayList<>(poolToInstanceConfigsMap.keySet()); pools.sort(null); LOGGER.info("Starting instance replica-group/partition selection for table: {} with hash: {} from pools: {}, " + "minimize data movement: {}", _tableNameWithType, tableNameHash, pools, _minimizeDataMovement); if (_replicaGroupPartitionConfig.isReplicaGroupBased()) { if (_minimizeDataMovement) { replicaGroupBasedMinimumMovement(poolToInstanceConfigsMap, instancePartitions, pools, tableNameHash); } else { replicaGroupBasedSimple(poolToInstanceConfigsMap, instancePartitions, pools, tableNameHash); } } else { nonReplicaGroupBased(poolToInstanceConfigsMap, instancePartitions, pools, tableNameHash); } }
@Test public void testPoolsWhenOneMorePoolAddedAndOneMoreReplicaGroupsNeeded() throws JsonProcessingException { //@formatter:off String existingPartitionsJson = "{\n" + " \"instancePartitionsName\": \"0f97dac8-4123-47c6-9a4d-b8ce039c5ea5_OFFLINE\",\n" + " \"partitionToInstancesMap\": {\n" + " \"0_0\": [\n" + " \"Server_pinot-server-rg0-0.pinot-server-headless.pinot.svc.cluster.local_8098\",\n" + " \"Server_pinot-server-rg0-1.pinot-server-headless.pinot.svc.cluster.local_8098\"\n" + " ]\n" + " }\n" + "}"; //@formatter:on InstancePartitions existing = OBJECT_MAPPER.readValue(existingPartitionsJson, InstancePartitions.class); InstanceReplicaGroupPartitionConfig config = new InstanceReplicaGroupPartitionConfig(true, 0, 2, 2, 1, 2, true, null); InstanceReplicaGroupPartitionSelector selector = new InstanceReplicaGroupPartitionSelector(config, "tableNameBlah", existing, true); String[] serverNames = {"rg0-0", "rg0-1", "rg1-0", "rg1-1"}; String[] poolNumbers = {"0", "0", "1", "1"}; String[] poolNames = { "FirstHalfReplicationGroups", "FirstHalfReplicationGroups", "SecondHalfReplicationGroups", "SecondHalfReplicationGroups" }; Map<Integer, List<InstanceConfig>> poolToInstanceConfigsMap = new HashMap<>(); for (int i = 0; i < serverNames.length; i++) { Map<String, String> valuesMap = new HashMap<>(); valuesMap.put("serverName", serverNames[i]); valuesMap.put("pool", poolNumbers[i]); valuesMap.put("poolName", poolNames[i]); StringSubstitutor substitutor = new StringSubstitutor(valuesMap); String resolvedString = substitutor.replace(INSTANCE_CONFIG_TEMPLATE); ZNRecord znRecord = OBJECT_MAPPER.readValue(resolvedString, ZNRecord.class); int poolNumber = Integer.parseInt(poolNumbers[i]); poolToInstanceConfigsMap.computeIfAbsent(poolNumber, k -> new ArrayList<>()).add(new InstanceConfig(znRecord)); } InstancePartitions assignedPartitions = new InstancePartitions("0f97dac8-4123-47c6-9a4d-b8ce039c5ea5_OFFLINE"); selector.selectInstances(poolToInstanceConfigsMap, assignedPartitions); // Now that 1 more pool is added and 1 more RG is needed, a new set called "0_1" is generated, // and the instances from Pool 1 are assigned to this new replica. //@formatter:off String expectedInstancePartitions = "{\n" + " \"instancePartitionsName\": \"0f97dac8-4123-47c6-9a4d-b8ce039c5ea5_OFFLINE\",\n" + " \"partitionToInstancesMap\": {\n" + " \"0_0\": [\n" + " \"Server_pinot-server-rg0-0.pinot-server-headless.pinot.svc.cluster.local_8098\",\n" + " \"Server_pinot-server-rg0-1.pinot-server-headless.pinot.svc.cluster.local_8098\"\n" + " ],\n" + " \"0_1\": [\n" + " \"Server_pinot-server-rg1-0.pinot-server-headless.pinot.svc.cluster.local_8098\",\n" + " \"Server_pinot-server-rg1-1.pinot-server-headless.pinot.svc.cluster.local_8098\"\n" + " ]\n" + " }\n" + "}"; //@formatter:on InstancePartitions expectedPartitions = OBJECT_MAPPER.readValue(expectedInstancePartitions, InstancePartitions.class); assertEquals(assignedPartitions, expectedPartitions); }
public static String[] parseKey(String groupKey) { StringBuilder sb = new StringBuilder(); String dataId = null; String group = null; String tenant = null; for (int i = 0; i < groupKey.length(); ++i) { char c = groupKey.charAt(i); if ('+' == c) { if (null == dataId) { dataId = sb.toString(); sb.setLength(0); } else if (null == group) { group = sb.toString(); sb.setLength(0); } else { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } } else if ('%' == c) { char next = groupKey.charAt(++i); char nextnext = groupKey.charAt(++i); if ('2' == next && 'B' == nextnext) { sb.append('+'); } else if ('2' == next && '5' == nextnext) { sb.append('%'); } else { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } } else { sb.append(c); } } if (StringUtils.isBlank(group)) { group = sb.toString(); } else { tenant = sb.toString(); } if (group.length() == 0) { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } return new String[] {dataId, group, tenant}; }
@Test void testParseKeyForPlusIllegalArgumentException() { assertThrows(IllegalArgumentException.class, () -> { GroupKey.parseKey("+"); // Method is not expected to return due to exception thrown }); // Method is not expected to return due to exception thrown }
@Override public String resolve(Method method, Object[] arguments, String spelExpression) { if (StringUtils.isEmpty(spelExpression)) { return spelExpression; } if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) { return stringValueResolver.resolveStringValue(spelExpression); } if (spelExpression.matches(METHOD_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } if (spelExpression.matches(BEAN_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory)); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } return spelExpression; }
@Test public void testRootArgs() throws Exception { String testExpression = "#root.args[0]"; String firstArgument = "test"; DefaultSpelResolverTest target = new DefaultSpelResolverTest(); Method testMethod = target.getClass().getMethod("testMethod", String.class); String result = sut.resolve(testMethod, new Object[]{firstArgument}, testExpression); assertThat(result).isEqualTo(firstArgument); }
@Override public void validate(final String methodName, final Class<?>[] parameterTypes, final Object[] arguments) throws Exception { List<Class<?>> groups = new ArrayList<>(); Class<?> methodClass = methodClass(methodName); if (Objects.nonNull(methodClass)) { groups.add(methodClass); } Set<ConstraintViolation<?>> violations = new HashSet<>(); Method method = clazz.getMethod(methodName, parameterTypes); Class<?>[] methodClasses; if (method.isAnnotationPresent(MethodValidated.class)) { methodClasses = method.getAnnotation(MethodValidated.class).value(); groups.addAll(Arrays.asList(methodClasses)); } // add into default group groups.add(0, Default.class); groups.add(1, clazz); // convert list to array Class<?>[] classGroups = new Class<?>[groups.size()]; classGroups = groups.toArray(classGroups); Object parameterBean = getMethodParameterBean(clazz, method, arguments); if (parameterBean != null) { violations.addAll(validator.validate(parameterBean, classGroups)); } for (Object arg : arguments) { validate(violations, arg, classGroups); } if (!violations.isEmpty()) { LOG.error("Failed to validate service: {}, method: {}, cause: {}", clazz.getName(), methodName, violations); StringBuilder validateError = new StringBuilder(); violations.forEach(each -> validateError.append(each.getMessage()).append(",")); throw new ValidationException(validateError.substring(0, validateError.length() - 1)); } }
@Test public void validate() throws Exception { URL url = URL.valueOf("dubbo://127.0.0.1:20880/org.apache.shenyu" + ".client.apache.dubbo.validation.service.TestService" + "?accepts=500&anyhost=true&application=shenyu-proxy" + "&bind.ip=127.0.0.1&bind.port=20880&deprecated=false" + "&dubbo=2.0.2&dynamic=true&generic=false" + "&interface=org.apache.shenyu.client.apache.dubbo.validation.service.TestService" + "&keep.alive=true&methods=test&pid=67352&qos.enable=false&release=2.7.0" + "&side=provider&threadpool=fixed&threads=500&timeout=20000" + "&timestamp=1608119259859&validation=shenyuValidation"); Validator apacheDubboClientValidator = new ApacheDubboClientValidation().getValidator(url); apacheDubboClientValidator.validate("test", new Class[]{TestService.TestObject.class}, new Object[]{new TestService.TestObject(1)}); }
public static Write<PubsubMessage> writeMessagesDynamic() { return Write.newBuilder() .setTopicProvider(null) .setTopicFunction(null) .setDynamicDestinations(true) .build(); }
@Test public void testBigMessageBounded() throws IOException { String bigMsg = IntStream.range(0, 100_000).mapToObj(_unused -> "x").collect(Collectors.joining("")); OutgoingMessage msg = OutgoingMessage.of( com.google.pubsub.v1.PubsubMessage.newBuilder() .setData(ByteString.copyFromUtf8(bigMsg)) .build(), 0, null, "projects/project/topics/topic1"); try (PubsubTestClientFactory factory = PubsubTestClient.createFactoryForPublish(null, ImmutableList.of(msg), ImmutableList.of())) { TimestampedValue<PubsubMessage> pubsubMsg = TimestampedValue.of( new PubsubMessage( msg.getMessage().getData().toByteArray(), Collections.emptyMap(), msg.recordId()) .withTopic(msg.topic()), Instant.ofEpochMilli(msg.getTimestampMsSinceEpoch())); PCollection<PubsubMessage> messages = pipeline.apply( Create.timestamped(ImmutableList.of(pubsubMsg)) .withCoder(PubsubMessageWithTopicCoder.of())); messages.setIsBoundedInternal(PCollection.IsBounded.BOUNDED); messages.apply(PubsubIO.writeMessagesDynamic().withClientFactory(factory)); pipeline.run(); } }
public Date parseString(String dateString) throws ParseException { if (dateString == null || dateString.isEmpty()) { return null; } Matcher xep82WoMillisMatcher = xep80DateTimeWoMillisPattern.matcher(dateString); Matcher xep82Matcher = xep80DateTimePattern.matcher(dateString); if (xep82WoMillisMatcher.matches() || xep82Matcher.matches()) { String rfc822Date; // Convert the ISO 8601 time zone string to a RFC822 compatible format // since SimpleDateFormat supports ISO8601 only with Java7 or higher if (dateString.charAt(dateString.length() - 1) == 'Z') { rfc822Date = dateString.replace("Z", "+0000"); } else { // If the time zone wasn't specified with 'Z', then it's in // ISO8601 format (i.e. '(+|-)HH:mm') // RFC822 needs a similar format just without the colon (i.e. // '(+|-)HHmm)'), so remove it int lastColon = dateString.lastIndexOf(':'); rfc822Date = dateString.substring(0, lastColon) + dateString.substring(lastColon + 1); } if (xep82WoMillisMatcher.matches()) { synchronized (dateTimeFormatWoMillies) { return dateTimeFormatWoMillies.parse(rfc822Date); } } else { // OF-898: Replace any number of millisecond-characters with at most three of them. rfc822Date = rfc822Date.replaceAll("(\\.[0-9]{3})[0-9]*", "$1"); synchronized (dateTimeFormat) { return dateTimeFormat.parse(rfc822Date); } } } throw new ParseException("Date String could not be parsed: \"" + dateString + "\"", 0); }
@Test public void testEmpty() throws Exception { // Setup fixture final String testValue = ""; // Execute system under test final Date result = xmppDateTimeFormat.parseString(testValue); // Verify results assertNull(result); }
@Override public void invoke(NamingEvent event) { logInvoke(event); if (listener instanceof AbstractEventListener && ((AbstractEventListener) listener).getExecutor() != null) { ((AbstractEventListener) listener).getExecutor().execute(() -> listener.onEvent(event)); } else { listener.onEvent(event); } }
@Test public void testAbstractEventListener() { AbstractEventListener listener = mock(AbstractEventListener.class); NamingListenerInvoker listenerInvoker = new NamingListenerInvoker(listener); NamingEvent event = new NamingEvent("serviceName", Collections.emptyList()); listenerInvoker.invoke(event); verify(listener).getExecutor(); }
public int filterEntriesForConsumer(List<? extends Entry> entries, EntryBatchSizes batchSizes, SendMessageInfo sendMessageInfo, EntryBatchIndexesAcks indexesAcks, ManagedCursor cursor, boolean isReplayRead, Consumer consumer) { return filterEntriesForConsumer(null, 0, entries, batchSizes, sendMessageInfo, indexesAcks, cursor, isReplayRead, consumer); }
@Test public void testFilterEntriesForConsumerOfNullElement() { List<Entry> entries = new ArrayList<>(); entries.add(null); SendMessageInfo sendMessageInfo = SendMessageInfo.getThreadLocal(); EntryBatchSizes batchSizes = EntryBatchSizes.get(entries.size()); int size = this.helper.filterEntriesForConsumer(entries, batchSizes, sendMessageInfo, null, null, false, null); assertEquals(size, 0); }
static BlockStmt getSimpleSetPredicateVariableDeclaration(final String variableName, final SimpleSetPredicate simpleSetPredicate) { final MethodDeclaration methodDeclaration = SIMPLESET_PREDICATE_TEMPLATE.getMethodsByName(GETKIEPMMLSIMPLESETPREDICATE).get(0).clone(); final BlockStmt simpleSetPredicateBody = methodDeclaration.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration))); final VariableDeclarator variableDeclarator = getVariableDeclarator(simpleSetPredicateBody, SIMPLESET_PREDICATE).orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, SIMPLESET_PREDICATE, simpleSetPredicateBody))); variableDeclarator.setName(variableName); final BlockStmt toReturn = new BlockStmt(); final NodeList<Expression> arguments = new NodeList<>(); List<Object> values = getObjectsFromArray(simpleSetPredicate.getArray()); for (Object value : values) { arguments.add(getExpressionForObject(value)); } final ARRAY_TYPE arrayType = ARRAY_TYPE.byName(simpleSetPredicate.getArray().getType().value()); final NameExpr arrayTypeExpr = new NameExpr(ARRAY_TYPE.class.getName() + "." + arrayType.name()); final IN_NOTIN inNotIn = IN_NOTIN.byName(simpleSetPredicate.getBooleanOperator().value()); final NameExpr inNotInExpr = new NameExpr(IN_NOTIN.class.getName() + "." + inNotIn.name()); final MethodCallExpr initializer = variableDeclarator.getInitializer() .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, SIMPLESET_PREDICATE, simpleSetPredicateBody))) .asMethodCallExpr(); final MethodCallExpr builder = getChainedMethodCallExprFrom("builder", initializer); builder.setArgument(0, new StringLiteralExpr(simpleSetPredicate.getField())); builder.setArgument(2, arrayTypeExpr); builder.setArgument(3, inNotInExpr); getChainedMethodCallExprFrom("asList", initializer).setArguments(arguments); simpleSetPredicateBody.getStatements().forEach(toReturn::addStatement); return toReturn; }
@Test void getSimpleSetPredicateVariableDeclaration() throws IOException { String variableName = "variableName"; Array.Type arrayType = Array.Type.STRING; List<String> values = getStringObjects(arrayType, 4); SimpleSetPredicate simpleSetPredicate = getSimpleSetPredicate(values, arrayType, SimpleSetPredicate.BooleanOperator.IS_IN); String arrayTypeString = ARRAY_TYPE.class.getName() + "." + ARRAY_TYPE.byName(simpleSetPredicate.getArray().getType().value()); String booleanOperatorString = IN_NOTIN.class.getName() + "." + IN_NOTIN.byName(simpleSetPredicate.getBooleanOperator().value()); String valuesString = values.stream() .map(valueString -> "\"" + valueString + "\"") .collect(Collectors.joining(",")); DataField dataField = new DataField(); dataField.setName(simpleSetPredicate.getField()); dataField.setDataType(DataType.DOUBLE); DataDictionary dataDictionary = new DataDictionary(); dataDictionary.addDataFields(dataField); BlockStmt retrieved = KiePMMLSimpleSetPredicateFactory.getSimpleSetPredicateVariableDeclaration(variableName, simpleSetPredicate); String text = getFileContent(TEST_01_SOURCE); Statement expected = JavaParserUtils.parseBlock(String.format(text, variableName,simpleSetPredicate.getField(), arrayTypeString, booleanOperatorString, valuesString)); assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue(); List<Class<?>> imports = Arrays.asList(KiePMMLSimpleSetPredicate.class, Arrays.class, Collections.class); commonValidateCompilationWithImports(retrieved, imports); }
@VisibleForTesting public ProcessContinuation run( RestrictionTracker<OffsetRange, Long> tracker, OutputReceiver<PartitionRecord> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator, InitialPipelineState initialPipelineState) throws Exception { LOG.debug("DNP: Watermark: " + watermarkEstimator.getState()); LOG.debug("DNP: CurrentTracker: " + tracker.currentRestriction().getFrom()); if (tracker.currentRestriction().getFrom() == 0L) { if (!tracker.tryClaim(0L)) { LOG.error( "Could not claim initial DetectNewPartition restriction. No partitions are outputted."); return ProcessContinuation.stop(); } watermarkEstimator.setWatermark(initialPipelineState.getStartTime()); if (initialPipelineState.isResume()) { resumeFromPreviousPipelineAction.run(receiver); } else { generateInitialPartitionsAction.run(receiver, initialPipelineState.getStartTime()); } return ProcessContinuation.resume(); } // Create a new partition reconciler every run to reset the state each time. partitionReconciler = new PartitionReconciler(metadataTableDao, metrics); orphanedMetadataCleaner = new OrphanedMetadataCleaner(); // Calculating the new value of watermark is a resource intensive process. We have to do a full // scan of the metadata table and then ensure we're not missing partitions and then calculate // the low watermark. This is usually a fairly fast process even with thousands of partitions. // However, sometimes this may take so long that the runner checkpoints before the watermark is // calculated. Because the checkpoint takes place before tryClaim, this forces the DoFn to // restart, wasting the resources spent calculating the watermark. On restart, we will try to // calculate the watermark again. The problem causing the slow watermark calculation can persist // leading to a crash loop. In order to ensure we persist the calculated watermark, we calculate // the watermark after successful tryClaim. Then we write to the metadata table the new // watermark. On the start of each run we read the watermark and update the DoFn's watermark. DetectNewPartitionsState detectNewPartitionsState = metadataTableDao.readDetectNewPartitionsState(); if (detectNewPartitionsState != null) { watermarkEstimator.setWatermark(detectNewPartitionsState.getWatermark()); } // Terminate if endTime <= watermark that means all partitions have read up to or beyond // watermark. We no longer need to manage splits and merges, we can terminate. if (endTime != null && !watermarkEstimator.currentWatermark().isBefore(endTime)) { tracker.tryClaim(tracker.currentRestriction().getTo()); return ProcessContinuation.stop(); } if (!tracker.tryClaim(tracker.currentRestriction().getFrom())) { LOG.warn("DNP: Checkpointing, stopping this run: " + tracker.currentRestriction()); return ProcessContinuation.stop(); } // Read StreamPartitions to calculate watermark. List<StreamPartitionWithWatermark> streamPartitionsWithWatermark = null; if (shouldUpdateWatermark(tracker.currentRestriction().getFrom(), detectNewPartitionsState)) { streamPartitionsWithWatermark = metadataTableDao.readStreamPartitionsWithWatermark(); } // Process NewPartitions and track the ones successfully outputted. List<NewPartition> newPartitions = metadataTableDao.readNewPartitions(); List<ByteStringRange> outputtedNewPartitions = new ArrayList<>(); for (NewPartition newPartition : newPartitions) { if (processNewPartitionsAction.processNewPartition(newPartition, receiver)) { outputtedNewPartitions.add(newPartition.getPartition()); } else if (streamPartitionsWithWatermark != null) { // streamPartitionsWithWatermark is not null on runs that we update watermark. We only run // reconciliation when we update watermark. Only add incompleteNewPartitions if // reconciliation is being run partitionReconciler.addIncompleteNewPartitions(newPartition); orphanedMetadataCleaner.addIncompleteNewPartitions(newPartition); } } // Process the watermark using read StreamPartitions and NewPartitions. if (streamPartitionsWithWatermark != null) { Optional<Instant> maybeWatermark = getNewWatermark(streamPartitionsWithWatermark, newPartitions); maybeWatermark.ifPresent(metadataTableDao::updateDetectNewPartitionWatermark); // Only start reconciling after the pipeline has been running for a while. if (tracker.currentRestriction().getFrom() > 50) { // Using NewPartitions and StreamPartitions, evaluate partitions that are possibly not being // streamed. This isn't perfect because there may be partitions moving between // StreamPartitions and NewPartitions while scanning the metadata table. Also, this does not // include NewPartitions marked as deleted from a previous DNP run not yet processed by // RCSP. List<ByteStringRange> existingPartitions = streamPartitionsWithWatermark.stream() .map(StreamPartitionWithWatermark::getPartition) .collect(Collectors.toList()); existingPartitions.addAll(outputtedNewPartitions); List<ByteStringRange> missingStreamPartitions = getMissingPartitionsFromEntireKeySpace(existingPartitions); orphanedMetadataCleaner.addMissingPartitions(missingStreamPartitions); partitionReconciler.addMissingPartitions(missingStreamPartitions); processReconcilerPartitions( receiver, watermarkEstimator, initialPipelineState.getStartTime()); cleanUpOrphanedMetadata(); } } return ProcessContinuation.resume().withResumeDelay(Duration.millis(100)); }
@Test public void testProcessMergeNewPartitionsMissingParent() throws Exception { // Avoid 0 and multiples of 2 so that we can specifically test just reading new partitions. OffsetRange offsetRange = new OffsetRange(1, Long.MAX_VALUE); when(tracker.currentRestriction()).thenReturn(offsetRange); when(tracker.tryClaim(offsetRange.getFrom())).thenReturn(true); // ["a, "b") and ["b", "c") merge into ["a", "c") but ["b", "c") is still processing. ByteStringRange parentPartitionAB = ByteStringRange.create("a", "b"); Instant watermarkAB = startTime; ChangeStreamContinuationToken tokenAB = ChangeStreamContinuationToken.create(parentPartitionAB, "ab"); ByteStringRange childPartitionAC = ByteStringRange.create("a", "c"); NewPartition newPartitionACFromAB = new NewPartition(childPartitionAC, Collections.singletonList(tokenAB), watermarkAB); // Write a new partition for every parent partition that merges into the child. metadataTableDao.writeNewPartition(newPartitionACFromAB); assertEquals( DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)), action.run( tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false))); // No partitions are outputted because it's missing a parent still. verify(receiver, never()).outputWithTimestamp(any(), any()); assertEquals(1, metadataTableDao.readNewPartitions().size()); // On next iteration, ["b", "c") received CloseStream and writes to the metadata table. ByteStringRange parentPartitionBC = ByteStringRange.create("b", "c"); Instant watermarkBC = startTime.plus(Duration.millis(10)); ChangeStreamContinuationToken tokenBC = ChangeStreamContinuationToken.create(parentPartitionBC, "bc"); NewPartition newPartitionACFromBC = new NewPartition(childPartitionAC, Collections.singletonList(tokenBC), watermarkBC); metadataTableDao.writeNewPartition(newPartitionACFromBC); assertEquals( DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)), action.run( tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false))); // The partition is outputted with watermark1 because that is the lowest of the 2 forming the // parent low watermark. verify(receiver, times(1)) .outputWithTimestamp(partitionRecordArgumentCaptor.capture(), eq(Instant.EPOCH)); assertEquals(childPartitionAC, partitionRecordArgumentCaptor.getValue().getPartition()); assertEquals(watermarkAB, partitionRecordArgumentCaptor.getValue().getParentLowWatermark()); assertEquals(endTime, partitionRecordArgumentCaptor.getValue().getEndTime()); assertThat( partitionRecordArgumentCaptor.getValue().getChangeStreamContinuationTokens(), Matchers.containsInAnyOrder(tokenAB, tokenBC)); assertTrue(metadataTableDao.readNewPartitions().isEmpty()); }
public void setProtectedTargets(String... targets) { handler.setProtectedTargets(Arrays.copyOf(targets, targets.length)); }
@Test void addsProtectedTargets() { environment.setProtectedTargets("/woo"); assertThat(handler.getProtectedTargets()).contains("/woo"); }
@Override public AppResponse process(Flow flow, AppSessionRequest request) { if (appSession.getRegistrationId() == null) { return new NokResponse(); } Map<String, String> result = digidClient.getExistingAccount(appSession.getRegistrationId(), appSession.getLanguage()); if (result.get(lowerUnderscore(STATUS)).equals("OK") && result.get(lowerUnderscore(ACCOUNT_ID)) != null) { appSession.setAccountId(Long.valueOf(result.get(lowerUnderscore(ACCOUNT_ID)))); digidClient.remoteLog("54", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); return new OkResponse(); } else if (result.get(lowerUnderscore(STATUS)).equals("PENDING")) { // switch state to require replace action appSession.setState(State.EXISTING_ACCOUNT_FOUND.name()); return new StatusResponse("PENDING"); } else { return new NokResponse(); } }
@Test void processExistingAccountTest(){ when(digidClientMock.getExistingAccount(1337L, "NL")).thenReturn(Map.of( lowerUnderscore(STATUS), "PENDING", lowerUnderscore(ACCOUNT_ID), "1" )); AppResponse appResponse = checkExistingAccount.process(flowMock, null); assertEquals(State.EXISTING_ACCOUNT_FOUND.name(), checkExistingAccount.getAppSession().getState()); assertTrue(appResponse instanceof StatusResponse); assertEquals("PENDING", ((StatusResponse) appResponse).getStatus()); }
public Set<Long> calculateUsers(DelegateExecution execution, int level) { Assert.isTrue(level > 0, "level 必须大于 0"); // 获得发起人 ProcessInstance processInstance = processInstanceService.getProcessInstance(execution.getProcessInstanceId()); Long startUserId = NumberUtils.parseLong(processInstance.getStartUserId()); // 获得对应 leve 的部门 DeptRespDTO dept = null; for (int i = 0; i < level; i++) { // 获得 level 对应的部门 if (dept == null) { dept = getStartUserDept(startUserId); if (dept == null) { // 找不到发起人的部门,所以无法使用该规则 return emptySet(); } } else { DeptRespDTO parentDept = deptApi.getDept(dept.getParentId()).getCheckedData(); if (parentDept == null) { // 找不到父级部门,所以只好结束寻找。原因是:例如说,级别比较高的人,所在部门层级比较少 break; } dept = parentDept; } } return dept.getLeaderUserId() != null ? asSet(dept.getLeaderUserId()) : emptySet(); }
@Test public void testCalculateUsers_noDept() { // 准备参数 DelegateExecution execution = mockDelegateExecution(1L); // mock 方法(startUser) AdminUserRespDTO startUser = randomPojo(AdminUserRespDTO.class, o -> o.setDeptId(10L)); when(adminUserApi.getUser(eq(1L))).thenReturn(success(startUser)); // mock 方法(getStartUserDept)没有部门 when(deptApi.getDept(eq(10L))).thenReturn(success(null)); // 调用 Set<Long> result = expression.calculateUsers(execution, 1); // 断言 assertEquals(0, result.size()); }
@Override public String format(final Date input, final TimeZone zone) { return ISO8601Utils.format(input, true, zone); }
@Test public void testPrint() { assertEquals("2022-11-04T12:43:42.654+01:00", new ISO8601DateFormatter().format(1667562222654L, TimeZone.getTimeZone("Europe/Zurich"))); assertEquals("2022-11-04T11:43:42.654Z", new ISO8601DateFormatter().format(1667562222654L, TimeZone.getTimeZone("UTC"))); }
@Override protected boolean isTokenAboutToExpire() { if (tokenFetchTime == -1 || super.isTokenAboutToExpire()) { return true; } // In case of, any clock skew issues, refresh token. long elapsedTimeSinceLastTokenRefreshInMillis = System.currentTimeMillis() - tokenFetchTime; boolean expiring = elapsedTimeSinceLastTokenRefreshInMillis < 0; if (expiring) { // Clock Skew issue. Refresh token. LOG.debug("JWTToken: token renewing. Time elapsed since last token fetch:" + " {} milliseconds", elapsedTimeSinceLastTokenRefreshInMillis); } return expiring; }
@Test public void testTokenStartsAsExpired() { WorkloadIdentityTokenProvider provider = new WorkloadIdentityTokenProvider( AUTHORITY, TENANT_ID, CLIENT_ID, TOKEN_FILE); Assertions.assertThat(provider.isTokenAboutToExpire()) .describedAs("Token should start as expired") .isTrue(); }
void generateDecodeCodeForAMessage(Map<String, MessageDecoderMethod> msgDecodeCode, Queue<Descriptors.Descriptor> queue, Set<String> fieldsToRead) { Descriptors.Descriptor descriptor = queue.remove(); String fullyQualifiedMsgName = ProtoBufUtils.getFullJavaName(descriptor); int varNum = 1; if (msgDecodeCode.containsKey(fullyQualifiedMsgName)) { return; } StringBuilder code = new StringBuilder(); String methodNameOfDecoder = getDecoderMethodName(fullyQualifiedMsgName); int indent = 1; // Creates decoder method for a message type. Example method signature: // public static Map<String, Object> decodeSample_SampleRecordMessage(Sample.SampleRecord msg) code.append(addIndent( String.format("public static Map<String, Object> %s(%s msg) {", methodNameOfDecoder, fullyQualifiedMsgName), indent)); code.append(completeLine("Map<String, Object> msgMap = new HashMap<>()", ++indent)); List<Descriptors.FieldDescriptor> descriptorsToDerive = new ArrayList<>(); if (fieldsToRead != null && !fieldsToRead.isEmpty()) { for (String fieldName: fieldsToRead.stream().sorted().collect(Collectors.toList())) { if (null == descriptor.findFieldByName(fieldName)) { LOGGER.debug("Field " + fieldName + " not found in the descriptor"); } else { descriptorsToDerive.add(descriptor.findFieldByName(fieldName)); } } } else { descriptorsToDerive = descriptor.getFields(); } for (Descriptors.FieldDescriptor desc : descriptorsToDerive) { Descriptors.FieldDescriptor.Type type = desc.getType(); String fieldNameInCode = ProtobufInternalUtils.underScoreToCamelCase(desc.getName(), true); switch (type) { case STRING: case INT32: case INT64: case UINT64: case FIXED64: case FIXED32: case UINT32: case SFIXED32: case SFIXED64: case SINT32: case SINT64: case DOUBLE: case FLOAT: /* Generate code for scalar field extraction Example: If field has presence if (msg.hasEmail()) { msgMap.put("email", msg.getEmail()); } OR if no presence: msgMap.put("email", msg.getEmail()); OR if repeated: if (msg.getEmailCount() > 0) { msgMap.put("email", msg.getEmailList().toArray()); } */ code.append(codeForScalarFieldExtraction(desc, fieldNameInCode, indent)); break; case BOOL: /* Generate code for boolean field extraction Example: If field has presence if (msg.hasIsRegistered()) { msgMap.put("is_registered", String.valueOf(msg.getIsRegistered())); } OR if no presence: msgMap.put("is_registered", String.valueOf(msg.getIsRegistered())); OR if repeated: List<Object> list1 = new ArrayList<>(); for (String row: msg.getIsRegisteredList()) { list3.add(String.valueOf(row)); } if (!list1.isEmpty()) { msgMap.put("is_registered", list1.toArray()); } */ code.append(codeForComplexFieldExtraction( desc, fieldNameInCode, "String", indent, ++varNum, "String.valueOf", "")); break; case BYTES: /* Generate code for bytes field extraction Example: If field has presence if (msg.hasEmail()) { msgMap.put("email", msg.getEmail().toByteArray()); } OR if no presence: msgMap.put("email", msg.getEmail().toByteArray()); OR if repeated: List<Object> list1 = new ArrayList<>(); for (com.google.protobuf.ByteString row: msg.getEmailList()) { list1.add(row.toByteArray()); } if (!list1.isEmpty()) { msgMap.put("email", list1.toArray()); } */ code.append(codeForComplexFieldExtraction( desc, fieldNameInCode, "com.google.protobuf.ByteString", indent, ++varNum, "", ".toByteArray()")); break; case ENUM: /* Generate code for enum field extraction Example: If field has presence if (msg.hasStatus()) { msgMap.put("status", msg.getStatus().name()); } OR if no presence: msgMap.put("status", msg.getStatus().name()); OR if repeated: List<Object> list1 = new ArrayList<>(); for (Status row: msg.getStatusList()) { list1.add(row.name()); } if (!list1.isEmpty()) { msgMap.put("status", list1.toArray()); } */ code.append(codeForComplexFieldExtraction( desc, fieldNameInCode, ProtoBufUtils.getFullJavaNameForEnum(desc.getEnumType()), indent, ++varNum, "", ".name()")); break; case MESSAGE: String messageType = ProtoBufUtils.getFullJavaName(desc.getMessageType()); if (desc.isMapField()) { // Generated code for Map extraction. The key for the map is always a scalar object in Protobuf. Descriptors.FieldDescriptor valueDescriptor = desc.getMessageType().findFieldByName("value"); if (valueDescriptor.getType() == Descriptors.FieldDescriptor.Type.MESSAGE) { /* Generate code for map field extraction if the value type is a message Example: If field has presence if (msg.hasComplexMap()) { Map<Object, Map<String, Object>> map1 = new HashMap<>(); for (Map.Entry<String, ComplexTypes.TestMessage.NestedMessage> entry: msg.getComplexMapMap() .entrySet()) { map1.put(entry.getKey(), decodeComplexTypes_TestMessage_NestedMessageMessage(entry.getValue())); } msgMap.put("complex_map", map1); } OR if no presence: Map<Object, Map<String, Object>> map1 = new HashMap<>(); for (Map.Entry<String, ComplexTypes.TestMessage.NestedMessage> entry: msg.getComplexMapMap().entrySet()) { map1.put(entry.getKey(), decodeComplexTypes_TestMessage_NestedMessageMessage(entry.getValue())); } msgMap.put("complex_map", map1); */ String valueDescClassName = ProtoBufUtils.getFullJavaName(valueDescriptor.getMessageType()); if (!msgDecodeCode.containsKey(valueDescClassName)) { queue.add(valueDescriptor.getMessageType()); } code.append(codeForMapWithValueMessageType(desc, fieldNameInCode, valueDescClassName, indent, varNum)); break; } else { /* Generate code for map field extraction if the value type is a scalar msgMap.put("simple_map", msg.getSimpleMapMap()); */ code.append(completeLine(putFieldInMsgMapCode(desc.getName(), getProtoFieldMethodName(fieldNameInCode + "Map"), null, null), indent)); } } else { if (!msgDecodeCode.containsKey(messageType)) { queue.add(desc.getMessageType()); } code.append(codeForComplexFieldExtraction(desc, fieldNameInCode, messageType, indent, ++varNum, getDecoderMethodName(messageType), "")); } break; default: LOGGER.error(String.format("Protobuf type %s is not supported by pinot yet. Skipping this field %s", type, desc.getName())); break; } } code.append(completeLine("return msgMap", indent)); code.append(addIndent("}", --indent)); msgDecodeCode.put(fullyQualifiedMsgName, new MessageDecoderMethod(methodNameOfDecoder, code.toString())); }
@Test public void testGenerateDecodeCodeForAMessageForAllFieldsToRead() throws URISyntaxException, IOException { MessageCodeGen messageCodeGen = new MessageCodeGen(); Queue<Descriptors.Descriptor> queue = new ArrayDeque<>(); Map<String, MessageCodeGen.MessageDecoderMethod> msgDecodeCode = new HashMap<>(); Set<String> fieldsToRead = new HashSet<>(); queue.add(ComplexTypes.TestMessage.getDescriptor()); messageCodeGen.generateDecodeCodeForAMessage(msgDecodeCode, queue, fieldsToRead); Set<String> nameList = queue.stream() .map(Descriptors.Descriptor::getName) .collect(Collectors.toSet()); assertEquals(nameList, Set.of("NestedMessage")); assertEquals(msgDecodeCode.size(), 1); URL resource = getClass().getClassLoader().getResource("codegen_output/complex_type_all_method.txt"); String expectedCodeOutput = new String(Files.readAllBytes(Paths.get(resource.toURI()))); MessageCodeGen.MessageDecoderMethod messageDecoderMethod = msgDecodeCode.get("org.apache.pinot.plugin.inputformat.protobuf.ComplexTypes.TestMessage"); assertEquals(messageDecoderMethod.getCode(), expectedCodeOutput); assertEquals(messageDecoderMethod.getMethodName(), "decodeorg_apache_pinot_plugin_inputformat_protobuf_ComplexTypes_TestMessageMessage"); }
ProcessId processId() { return processId; }
@Test public void shouldSetProcessId() { assertEquals(PID_1, new ClientState(PID_1, 1).processId()); assertEquals(PID_2, new ClientState(PID_2, mkMap()).processId()); assertEquals(PID_3, new ClientState(PID_3, 1, mkMap()).processId()); assertNull(new ClientState().processId()); }
public static <T> Values<T> of(Iterable<T> elems) { return new Values<>(elems, Optional.absent(), Optional.absent(), false); }
@Test @Category(ValidatesRunner.class) public void testCreateWithVoidType() throws Exception { PCollection<Void> output = p.apply(Create.of((Void) null, (Void) null)); PAssert.that(output).containsInAnyOrder((Void) null, (Void) null); p.run(); }
public static ClusterHealthStatus isHealth(List<RemoteInstance> remoteInstances) { if (CollectionUtils.isEmpty(remoteInstances)) { return ClusterHealthStatus.unHealth("can't get the instance list"); } if (!CoreModuleConfig.Role.Receiver.equals(ROLE)) { List<RemoteInstance> selfInstances = remoteInstances.stream(). filter(remoteInstance -> remoteInstance.getAddress().isSelf()).collect(Collectors.toList()); if (CollectionUtils.isEmpty(selfInstances)) { return ClusterHealthStatus.unHealth("can't get itself"); } } if (remoteInstances.size() > 1 && hasIllegalNodeAddress(remoteInstances)) { return ClusterHealthStatus.unHealth("find illegal node in cluster mode such as 127.0.0.1, localhost"); } return ClusterHealthStatus.HEALTH; }
@Test public void healthWithSelfAndNodes() { List<RemoteInstance> remoteInstances = new ArrayList<>(); remoteInstances.add(new RemoteInstance(new Address("192.168.0.1", 8899, true))); remoteInstances.add(new RemoteInstance(new Address("192.168.0.2", 8899, false))); ClusterHealthStatus clusterHealthStatus = OAPNodeChecker.isHealth(remoteInstances); Assertions.assertTrue(clusterHealthStatus.isHealth()); }
@Override // mappedStatementId 参数,暂时没有用。以后,可以基于 mappedStatementId + DataPermission 进行缓存 public List<DataPermissionRule> getDataPermissionRule(String mappedStatementId) { // 1. 无数据权限 if (CollUtil.isEmpty(rules)) { return Collections.emptyList(); } // 2. 未配置,则默认开启 DataPermission dataPermission = DataPermissionContextHolder.get(); if (dataPermission == null) { return rules; } // 3. 已配置,但禁用 if (!dataPermission.enable()) { return Collections.emptyList(); } // 4. 已配置,只选择部分规则 if (ArrayUtil.isNotEmpty(dataPermission.includeRules())) { return rules.stream().filter(rule -> ArrayUtil.contains(dataPermission.includeRules(), rule.getClass())) .collect(Collectors.toList()); // 一般规则不会太多,所以不采用 HashSet 查询 } // 5. 已配置,只排除部分规则 if (ArrayUtil.isNotEmpty(dataPermission.excludeRules())) { return rules.stream().filter(rule -> !ArrayUtil.contains(dataPermission.excludeRules(), rule.getClass())) .collect(Collectors.toList()); // 一般规则不会太多,所以不采用 HashSet 查询 } // 6. 已配置,全部规则 return rules; }
@Test public void testGetDataPermissionRule_02() { // 准备参数 String mappedStatementId = randomString(); // 调用 List<DataPermissionRule> result = dataPermissionRuleFactory.getDataPermissionRule(mappedStatementId); // 断言 assertSame(rules, result); }
@Override public void process(Exchange exchange) throws Exception { JsonElement json = getBodyAsJsonElement(exchange); String operation = exchange.getIn().getHeader(CouchDbConstants.HEADER_METHOD, String.class); if (ObjectHelper.isEmpty(operation)) { Response<DocumentResult> save = saveJsonElement(json); if (save == null) { throw new CouchDbException("Could not save document [unknown reason]", exchange); } if (LOG.isTraceEnabled()) { LOG.trace("Document saved [_id={}, _rev={}]", save.getResult().getId(), save.getResult().getRev()); } exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_REV, save.getResult().getRev()); exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_ID, save.getResult().getId()); } else { if (operation.equalsIgnoreCase(CouchDbOperations.DELETE.toString())) { Response<DocumentResult> delete = deleteJsonElement(json); if (delete == null) { throw new CouchDbException("Could not delete document [unknown reason]", exchange); } if (LOG.isTraceEnabled()) { LOG.trace("Document saved [_id={}, _rev={}]", delete.getResult().getId(), delete.getResult().getRev()); } exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_REV, delete.getResult().getRev()); exchange.getIn().setHeader(CouchDbConstants.HEADER_DOC_ID, delete.getResult().getId()); } if (operation.equalsIgnoreCase(CouchDbOperations.GET.toString())) { String docId = exchange.getIn().getHeader(CouchDbConstants.HEADER_DOC_ID, String.class); if (docId == null) { throw new CouchDbException("Could not get document, document id is missing", exchange); } Object response = getElement(docId); if (LOG.isTraceEnabled()) { LOG.trace("Document retrieved [_id={}]", docId); } exchange.getIn().setBody(response); } } }
@Test void testStringBodyIsConvertedToJsonTree() throws Exception { when(msg.getMandatoryBody()).thenReturn("{ \"name\" : \"coldplay\" }"); when(client.save(any())).thenAnswer(new Answer<Response>() { @Override public Response answer(InvocationOnMock invocation) { assertTrue(invocation.getArguments()[0] instanceof Document, invocation.getArguments()[0].getClass() + " but wanted " + Document.class); DocumentResult documentResult = mock(DocumentResult.class); Response response = mock(Response.class); when(response.getResult()).thenReturn(documentResult); return response; } }); producer.process(exchange); verify(client).save(any(Document.class)); }
@Override public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) { SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof ShowStatement) { return Optional.of(new PostgreSQLShowVariableExecutor((ShowStatement) sqlStatement)); } return Optional.empty(); }
@Test void assertCreateWithSelectPgNamespaceAndPgClass() { SQLStatement sqlStatement = parseSQL(SELECT_PG_CLASS_AND_PG_NAMESPACE); SelectStatementContext selectStatementContext = mock(SelectStatementContext.class); when(selectStatementContext.getSqlStatement()).thenReturn((SelectStatement) sqlStatement); Optional<DatabaseAdminExecutor> actual = new PostgreSQLAdminExecutorCreator().create(selectStatementContext, SELECT_PG_CLASS_AND_PG_NAMESPACE, "", Collections.emptyList()); assertFalse(actual.isPresent()); }
private static SSLFactory createSSLFactory(TlsConfig tlsConfig, boolean insecureMode) { return createSSLFactory( tlsConfig.getKeyStoreType(), tlsConfig.getKeyStorePath(), tlsConfig.getKeyStorePassword(), tlsConfig.getTrustStoreType(), tlsConfig.getTrustStorePath(), tlsConfig.getTrustStorePassword(), null, null, true, tlsConfig.isInsecure() || insecureMode); }
@Test public void createSslFactoryInInsecureMode() { SecureRandom secureRandom = new SecureRandom(); SSLFactory sslFactory = RenewableTlsUtils.createSSLFactory(KEYSTORE_TYPE, TLS_KEYSTORE_FILE_PATH, PASSWORD, TRUSTSTORE_TYPE, TLS_TRUSTSTORE_FILE_PATH, PASSWORD, "TLS", secureRandom, false, true); X509ExtendedTrustManager x509ExtendedTrustManager = sslFactory.getTrustManager().get(); assertTrue(x509ExtendedTrustManager instanceof UnsafeX509ExtendedTrustManager); assertEquals(x509ExtendedTrustManager.getAcceptedIssuers().length, 0); sslFactory = RenewableTlsUtils.createSSLFactory(KEYSTORE_TYPE, TLS_KEYSTORE_FILE_PATH, PASSWORD, TRUSTSTORE_TYPE, TLS_TRUSTSTORE_FILE_PATH, PASSWORD, "TLS", secureRandom, true, true); ensurSslFactoryUseUnsafeTrustManager(sslFactory); }
@VisibleForTesting static Collection<FailureEnricher> filterInvalidEnrichers( final Set<FailureEnricher> failureEnrichers) { final Map<String, Set<Class<?>>> enrichersByKey = new HashMap<>(); failureEnrichers.forEach( enricher -> enricher.getOutputKeys() .forEach( enricherKey -> enrichersByKey .computeIfAbsent( enricherKey, ignored -> new HashSet<>()) .add(enricher.getClass()))); final Set<Class<?>> invalidEnrichers = enrichersByKey.entrySet().stream() .filter(entry -> entry.getValue().size() > 1) .flatMap( entry -> { LOG.warn( "Following enrichers have have registered duplicate output key [%s] and will be ignored: {}.", entry.getValue().stream() .map(Class::getName) .collect(Collectors.joining(", "))); return entry.getValue().stream(); }) .collect(Collectors.toSet()); return failureEnrichers.stream() .filter(enricher -> !invalidEnrichers.contains(enricher.getClass())) .collect(Collectors.toList()); }
@Test public void testGetValidatedEnrichers() { // create two enrichers with non-overlapping keys final FailureEnricher firstEnricher = new TestEnricher("key1"); final FailureEnricher secondEnricher = new TestEnricher("key2"); final Set<FailureEnricher> enrichers = new HashSet<FailureEnricher>() { { add(firstEnricher); add(secondEnricher); } }; final Collection<FailureEnricher> validatedEnrichers = FailureEnricherUtils.filterInvalidEnrichers(enrichers); // expect both enrichers to be valid assertThat(validatedEnrichers).hasSize(2); assertThat(validatedEnrichers).contains(firstEnricher, secondEnricher); }
private Collector createCollector(List<MapperConfig> mapperConfigs) { return new DropwizardExports( metricRegistry, new PrometheusMetricFilter(mapperConfigs), new CustomMappingSampleBuilder(mapperConfigs) ); }
@Test void testCreateCollector() { when(prometheusMappingFilesHandlerProvider.get()).thenReturn(prometheusMappingFilesHandler); when(prometheusMappingFilesHandler.getMapperConfigs()).thenReturn(Collections.singletonList(new MapperConfig( "org.graylog2.plugin.streams.Stream.*.StreamRule.*.executionTime", "stream_rules_execution_time", Collections.emptyMap() ))); metricRegistry.timer(MetricRegistry.name(Stream.class, "stream-id", "StreamRule", "stream-rule-id", "executionTime")); metricRegistry.timer(MetricRegistry.name(JvmStats.class, "jvm-stats")); Collector collector = classUnderTest.createCollector(); assertThat(collector.collect()).hasSize(1); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldChooseCorrectLambdaForTypeSpecificCollections() { // Given: givenFunctions( function(EXPECTED, -1, MAP1, LAMBDA_BI_FUNCTION_STRING) ); // When: final KsqlScalarFunction fun1 = udfIndex.getFunction( ImmutableList.of( SqlArgument.of(MAP1_ARG), SqlArgument.of( SqlLambdaResolved.of( ImmutableList.of(SqlTypes.STRING, SqlTypes.STRING), SqlTypes.BOOLEAN)))); final KsqlScalarFunction fun2 = udfIndex.getFunction( ImmutableList.of( SqlArgument.of(MAP1_ARG), SqlArgument.of( SqlLambdaResolved.of( ImmutableList.of(SqlTypes.STRING, SqlTypes.STRING), INTEGER)))); final Exception e = assertThrows( Exception.class, () -> udfIndex.getFunction( ImmutableList.of( SqlArgument.of(MAP1_ARG), SqlArgument.of( SqlLambdaResolved.of( ImmutableList.of(SqlTypes.BOOLEAN, INTEGER), INTEGER)))) ); // Then: assertThat(fun1.name(), equalTo(EXPECTED)); assertThat(fun2.name(), equalTo(EXPECTED)); assertThat(e.getMessage(), containsString("does not accept parameters (" + "MAP<STRING, STRING>, LAMBDA (BOOLEAN, INTEGER) => INTEGER).")); assertThat(e.getMessage(), containsString("Valid alternatives are:" + lineSeparator() + "expected(MAP<VARCHAR, VARCHAR>, LAMBDA (VARCHAR, VARCHAR) => A)")); }
@BuildStep @Record(ExecutionTime.RUNTIME_INIT) void findRecurringJobAnnotationsAndScheduleThem(RecorderContext recorderContext, CombinedIndexBuildItem index, BeanContainerBuildItem beanContainer, JobRunrRecurringJobRecorder recorder, JobRunrBuildTimeConfiguration jobRunrBuildTimeConfiguration) throws NoSuchMethodException { if (jobRunrBuildTimeConfiguration.jobScheduler().enabled()) { new RecurringJobsFinder(recorderContext, index, beanContainer, recorder).findRecurringJobsAndScheduleThem(); } }
@Test void producesJobRunrRecurringJobsFinderIfJobSchedulerIsEnabled() throws NoSuchMethodException { RecorderContext recorderContext = mock(RecorderContext.class); CombinedIndexBuildItem combinedIndex = mock(CombinedIndexBuildItem.class); when(combinedIndex.getIndex()).thenReturn(mock(IndexView.class)); BeanContainerBuildItem beanContainer = mock(BeanContainerBuildItem.class); JobRunrRecurringJobRecorder recurringJobRecorder = mock(JobRunrRecurringJobRecorder.class); when(jobSchedulerConfiguration.enabled()).thenReturn(true); jobRunrExtensionProcessor.findRecurringJobAnnotationsAndScheduleThem(recorderContext, combinedIndex, beanContainer, recurringJobRecorder, jobRunrBuildTimeConfiguration); verify(recorderContext, times(2)).registerNonDefaultConstructor(any(), any()); }
int preferredLocalParallelism() { if (options.containsKey(SqlConnector.OPTION_PREFERRED_LOCAL_PARALLELISM)) { return Integer.parseInt(options.get(SqlConnector.OPTION_PREFERRED_LOCAL_PARALLELISM)); } return StreamKafkaP.PREFERRED_LOCAL_PARALLELISM; }
@Test @Parameters(method = "preferredLocalParallelisms") public void when_preferredLocalParallelism_isDefined_then_parseInt(String plp, Integer expected, boolean shouldThrow) { KafkaTable table = new KafkaTable( null, null, null, null, null, null, null, Map.of(OPTION_PREFERRED_LOCAL_PARALLELISM, plp), null, null, null, null, null ); if (shouldThrow) { assertThatThrownBy(() -> table.preferredLocalParallelism()) .isInstanceOf(NumberFormatException.class); } else { assertThat(table.preferredLocalParallelism()).isEqualTo(expected); } }
@SuppressWarnings("unchecked") public static <K, V> V getWithDefault(final Map<K, ? extends Object> map, final K key, final V defaultValue, final Class<V> valueClass) { final Object actualValue = map.get(key); if (actualValue == null) { return defaultValue; } if (valueClass.isAssignableFrom(actualValue.getClass())) { return (V) actualValue; } if (!(actualValue instanceof String)) { throw new IllegalArgumentException("Unsupported actual value type: " + actualValue.getClass()); } final String valStr = (String) actualValue; if (valueClass.equals(Double.class)) { return (V) Double.valueOf(Double.parseDouble(valStr)); } if (valueClass.equals(Float.class)) { return (V) Float.valueOf(Float.parseFloat(valStr)); } if (valueClass.equals(Long.class)) { return (V) Long.valueOf(Long.parseLong(valStr)); } if (valueClass.equals(Integer.class)) { return (V) Integer.valueOf(Integer.parseInt(valStr)); } if (valueClass.equals(Boolean.class)) { return (V) Boolean.valueOf(Boolean.parseBoolean(valStr)); } throw new IllegalArgumentException("Unsupported expected value type: " + valueClass + " and value = " + actualValue); }
@Test(expectedExceptions = IllegalArgumentException.class) public void testNullArgument() { MapUtil.getWithDefault(_subjectMap, "subMap", null); MapUtil.getWithDefault(_subjectMap, "subMap_default", null); }
@Override public boolean equals( Object o ) { if ( this == o ) { return true; } if ( o == null || getClass() != o.getClass() ) { return false; } ActingPrincipal that = (ActingPrincipal) o; if ( isAnonymous() != that.isAnonymous() ) { return false; } return getName() != null ? getName().equals( that.getName() ) : that.getName() == null; }
@Test public void equals() throws Exception { principal1 = new ActingPrincipal( "suzy" ); principal2 = new ActingPrincipal( "joe" ); assertFalse( principal1.equals( principal2 ) ); assertFalse( principal1.equals( ActingPrincipal.ANONYMOUS ) ); principal2 = new ActingPrincipal( "suzy" ); assertTrue( principal1.equals( principal2 ) ); principal2 = ActingPrincipal.ANONYMOUS; assertTrue( principal2.equals( ActingPrincipal.ANONYMOUS ) ); }
@Override public String[] split(String text) { if (splitContraction) { text = WONT_CONTRACTION.matcher(text).replaceAll("$1ill not"); text = SHANT_CONTRACTION.matcher(text).replaceAll("$1ll not"); text = AINT_CONTRACTION.matcher(text).replaceAll("$1m not"); for (Pattern regexp : NOT_CONTRACTIONS) { text = regexp.matcher(text).replaceAll("$1 not"); } for (Pattern regexp : CONTRACTIONS2) { text = regexp.matcher(text).replaceAll("$1 $2"); } for (Pattern regexp : CONTRACTIONS3) { text = regexp.matcher(text).replaceAll("$1 $2 $3"); } } text = DELIMITERS[0].matcher(text).replaceAll(" $1 "); text = DELIMITERS[1].matcher(text).replaceAll(" $1"); text = DELIMITERS[2].matcher(text).replaceAll(" $1"); text = DELIMITERS[3].matcher(text).replaceAll(" . "); text = DELIMITERS[4].matcher(text).replaceAll(" $1 "); String[] words = WHITESPACE.split(text); if (words.length > 1 && words[words.length-1].equals(".")) { if (EnglishAbbreviations.contains(words[words.length-2])) { words[words.length-2] = words[words.length-2] + "."; } } ArrayList<String> result = new ArrayList<>(); for (String token : words) { if (!token.isEmpty()) { result.add(token); } } return result.toArray(new String[0]); }
@Test public void testTokenizeHyphen() { System.out.println("tokenize hyphen"); String text = "On a noncash basis for the quarter, the bank reported a " + "loss of $7.3 billion because of a $10.4 billion write-down " + "in the value of its credit card unit, attributed to federal " + "regulations that limit debit fees and other charges."; String[] expResult = {"On", "a", "noncash", "basis", "for", "the", "quarter", ",", "the", "bank", "reported", "a", "loss", "of", "$", "7.3", "billion", "because", "of", "a", "$", "10.4", "billion", "write-down", "in", "the", "value", "of", "its", "credit", "card", "unit", ",", "attributed", "to", "federal", "regulations", "that", "limit", "debit", "fees", "and", "other", "charges", "."}; SimpleTokenizer instance = new SimpleTokenizer(); String[] result = instance.split(text); assertEquals(expResult.length, result.length); for (int i = 0; i < result.length; i++) { assertEquals(expResult[i], result[i]); } }
public static Document parseXml(final InputStream is) throws Exception { return parseXml(is, null); }
@Test public void testParse() throws Exception { InputStream fis = Files.newInputStream(Paths.get("src/test/resources/org/apache/camel/util/camel-context.xml")); Document dom = XmlLineNumberParser.parseXml(fis); assertNotNull(dom); NodeList list = dom.getElementsByTagName("beans"); assertEquals(1, list.getLength()); Node node = list.item(0); String lineNumber = (String) node.getUserData(XmlLineNumberParser.LINE_NUMBER); String lineNumberEnd = (String) node.getUserData(XmlLineNumberParser.LINE_NUMBER_END); assertEquals("24", lineNumber); assertEquals("49", lineNumberEnd); }
public static ObjectNode generateAnnotObjectNode(ChartModel cm) { ObjectNode node = MAPPER.createObjectNode(); for (ChartModel.Annot a : cm.getAnnotations()) { node.put(a.key(), a.valueAsString()); } return node; }
@Test public void annot() { ChartModel cm = new ChartModel(FOO, BAR); cm.addAnnotation("dev1", "of:0000000000000001"); cm.addAnnotation("dev2", "of:0000000000000002"); ObjectNode node = ChartUtils.generateAnnotObjectNode(cm); Assert.assertEquals("wrong results", NODE_AS_STRING, node.toString()); }
@Override public ByteBuf writeZero(int length) { if (length == 0) { return this; } ensureWritable(length); int wIndex = writerIndex; checkIndex0(wIndex, length); int nLong = length >>> 3; int nBytes = length & 7; for (int i = nLong; i > 0; i --) { _setLong(wIndex, 0); wIndex += 8; } if (nBytes == 4) { _setInt(wIndex, 0); wIndex += 4; } else if (nBytes < 4) { for (int i = nBytes; i > 0; i --) { _setByte(wIndex, 0); wIndex++; } } else { _setInt(wIndex, 0); wIndex += 4; for (int i = nBytes - 4; i > 0; i --) { _setByte(wIndex, 0); wIndex++; } } writerIndex = wIndex; return this; }
@Test public void testWriteZero() { try { buffer.writeZero(-1); fail(); } catch (IllegalArgumentException e) { // Expected } buffer.clear(); while (buffer.isWritable()) { buffer.writeByte((byte) 0xFF); } buffer.clear(); for (int i = 0; i < buffer.capacity();) { int length = Math.min(buffer.capacity() - i, random.nextInt(32)); buffer.writeZero(length); i += length; } assertEquals(0, buffer.readerIndex()); assertEquals(buffer.capacity(), buffer.writerIndex()); for (int i = 0; i < buffer.capacity(); i ++) { assertEquals(0, buffer.getByte(i)); } }
public List<BackgroundDataWithIndex> getBackgroundDataWithIndex() { return toScesimDataWithIndex(BackgroundDataWithIndex::new); }
@Test public void getBackgroundDataWithIndex() { List<BackgroundDataWithIndex> backgroundDatas = background.getBackgroundDataWithIndex(); assertThat(backgroundDatas).hasSameSizeAs(background.getUnmodifiableData()); BackgroundDataWithIndex backgroundData = backgroundDatas.get(0); int index = backgroundData.getIndex(); assertThat(backgroundData.getScesimData()).isEqualTo(background.getDataByIndex(index - 1)); }
public CreateStreamCommand createStreamCommand(final KsqlStructuredDataOutputNode outputNode) { return new CreateStreamCommand( outputNode.getSinkName().get(), outputNode.getSchema(), outputNode.getTimestampColumn(), outputNode.getKsqlTopic().getKafkaTopicName(), Formats.from(outputNode.getKsqlTopic()), outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(), Optional.of(outputNode.getOrReplace()), Optional.of(false) ); }
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT") @Test public void shouldNotThrowIfTopicDoesExist() { // Given: final CreateStream statement = new CreateStream(SOME_NAME, ONE_KEY_ONE_VALUE, false, true, withProperties, false); // When: createSourceFactory.createStreamCommand(statement, ksqlConfig); // Then: verify(topicClient).isTopicExists(TOPIC_NAME); }
public ApiMessageAndVersion toRecord(Uuid topicId, int partitionId, ImageWriterOptions options) { PartitionRecord record = new PartitionRecord(). setPartitionId(partitionId). setTopicId(topicId). setReplicas(Replicas.toList(replicas)). setIsr(Replicas.toList(isr)). setRemovingReplicas(Replicas.toList(removingReplicas)). setAddingReplicas(Replicas.toList(addingReplicas)). setLeader(leader). setLeaderRecoveryState(leaderRecoveryState.value()). setLeaderEpoch(leaderEpoch). setPartitionEpoch(partitionEpoch); if (options.metadataVersion().isElrSupported()) { // The following are tagged fields, we should only set them when there are some contents, in order to save // spaces. if (elr.length > 0) record.setEligibleLeaderReplicas(Replicas.toList(elr)); if (lastKnownElr.length > 0) record.setLastKnownElr(Replicas.toList(lastKnownElr)); } if (options.metadataVersion().isDirectoryAssignmentSupported()) { record.setDirectories(Uuid.toList(directories)); } else { for (Uuid directory : directories) { if (!DirectoryId.MIGRATING.equals(directory)) { options.handleLoss("the directory assignment state of one or more replicas"); break; } } } return new ApiMessageAndVersion(record, options.metadataVersion().partitionRecordVersion()); }
@Test public void testRecordRoundTrip() { PartitionRegistration registrationA = new PartitionRegistration.Builder(). setReplicas(new int[]{1, 2, 3}). setDirectories(DirectoryId.migratingArray(3)). setIsr(new int[]{1, 2}).setRemovingReplicas(new int[]{1}).setLeader(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED).setLeaderEpoch(0).setPartitionEpoch(0).build(); Uuid topicId = Uuid.fromString("OGdAI5nxT_m-ds3rJMqPLA"); int partitionId = 4; ApiMessageAndVersion record = registrationA.toRecord(topicId, partitionId, new ImageWriterOptions.Builder(). setMetadataVersion(MetadataVersion.IBP_3_7_IV0).build()); // highest MV for PartitionRecord v0 PartitionRegistration registrationB = new PartitionRegistration((PartitionRecord) record.message()); assertEquals(registrationA, registrationB); }
@Override public Iterator<Text> search(String term) { if (invertedFile.containsKey(term)) { ArrayList<Text> hits = new ArrayList<>(invertedFile.get(term)); return hits.iterator(); } else { return Collections.emptyIterator(); } }
@Test public void testSearchRomantic() { System.out.println("search 'romantic'"); Iterator<Relevance> hits = corpus.search(new BM25(), "romantic"); int n = 0; while (hits.hasNext()) { n++; Relevance hit = hits.next(); System.out.println(hit.text + "\t" + hit.score); } assertEquals(27, n); }
@CanIgnoreReturnValue @VisibleForTesting DirectoryEntry remove(Name name) { int index = bucketIndex(name, table.length); DirectoryEntry prev = null; DirectoryEntry entry = table[index]; while (entry != null) { if (name.equals(entry.name())) { if (prev != null) { prev.next = entry.next; } else { table[index] = entry.next; } entry.next = null; entryCount--; entry.file().decrementLinkCount(); return entry; } prev = entry; entry = entry.next; } throw new IllegalArgumentException("no entry matching '" + name + "' in this directory"); }
@Test public void testRemove() { dir.put(entry("foo")); dir.put(entry("bar")); dir.remove(Name.simple("foo")); assertThat(dir.entryCount()).isEqualTo(3); assertThat(ImmutableSet.copyOf(dir)) .containsExactly( entry("bar"), new DirectoryEntry(dir, Name.SELF, dir), new DirectoryEntry(dir, Name.PARENT, root)); assertThat(dir.get(Name.simple("foo"))).isNull(); assertThat(dir.get(Name.simple("bar"))).isEqualTo(entry("bar")); dir.remove(Name.simple("bar")); assertThat(dir.entryCount()).isEqualTo(2); dir.put(entry("bar")); dir.put(entry("foo")); // these should just succeeded }
public BigtableConfig withInstanceId(ValueProvider<String> instanceId) { checkArgument(instanceId != null, "Instance Id of BigTable can not be null"); return toBuilder().setInstanceId(instanceId).build(); }
@Test public void testWithInstanceId() { assertEquals(INSTANCE_ID.get(), config.withInstanceId(INSTANCE_ID).getInstanceId().get()); thrown.expect(IllegalArgumentException.class); config.withInstanceId(null); }
void snapshot(final PendingServiceMessageTracker tracker, final ErrorHandler errorHandler) { final int length = MessageHeaderEncoder.ENCODED_LENGTH + PendingMessageTrackerEncoder.BLOCK_LENGTH; final long nextServiceSessionId = correctNextServiceSessionId(tracker, errorHandler); idleStrategy.reset(); while (true) { final long result = publication.tryClaim(length, bufferClaim); if (result > 0) { pendingMessageTrackerEncoder .wrapAndApplyHeader(bufferClaim.buffer(), bufferClaim.offset(), messageHeaderEncoder) .nextServiceSessionId(nextServiceSessionId) .logServiceSessionId(tracker.logServiceSessionId()) .pendingMessageCapacity(tracker.pendingMessages().size()) .serviceId(tracker.serviceId()); bufferClaim.commit(); break; } checkResultAndIdle(result); } tracker.pendingMessages().forEach(this, Integer.MAX_VALUE); }
@Test void snapshotPendingServiceMessageTracker() { final int offset = 108; final int length = MessageHeaderEncoder.ENCODED_LENGTH + PendingMessageTrackerEncoder.BLOCK_LENGTH; final int serviceId = 6; final PendingServiceMessageTracker pendingServiceMessageTracker = new PendingServiceMessageTracker( serviceId, mock(Counter.class), mock(LogPublisher.class), mock(ClusterClock.class)); pendingServiceMessageTracker.enqueueMessage(buffer, 32, 0); final int capacity = pendingServiceMessageTracker.size(); when(publication.tryClaim(eq(length), any())) .thenReturn(ADMIN_ACTION) .thenAnswer(mockTryClaim(offset)); when(publication.offer(any(), anyInt(), anyInt())) .thenReturn(BACK_PRESSURED, 9L); snapshotTaker.snapshot(pendingServiceMessageTracker, mock(ErrorHandler.class)); final InOrder inOrder = inOrder(idleStrategy, publication); inOrder.verify(idleStrategy).reset(); inOrder.verify(publication).tryClaim(anyInt(), any()); inOrder.verify(idleStrategy).idle(); inOrder.verify(publication).tryClaim(anyInt(), any()); inOrder.verify(idleStrategy).reset(); inOrder.verify(publication).offer(any(), anyInt(), anyInt()); inOrder.verify(idleStrategy).idle(); inOrder.verify(publication).offer(any(), anyInt(), anyInt()); inOrder.verifyNoMoreInteractions(); pendingMessageTrackerDecoder.wrapAndApplyHeader(buffer, offset + HEADER_LENGTH, messageHeaderDecoder); assertEquals(-8791026472627208190L, pendingMessageTrackerDecoder.nextServiceSessionId()); assertEquals(-8791026472627208192L, pendingMessageTrackerDecoder.logServiceSessionId()); assertEquals(capacity, pendingMessageTrackerDecoder.pendingMessageCapacity()); assertEquals(serviceId, pendingMessageTrackerDecoder.serviceId()); }
@Override public void updateIngress(Ingress ingress) { checkNotNull(ingress, ERR_NULL_INGRESS); checkArgument(!Strings.isNullOrEmpty(ingress.getMetadata().getUid()), ERR_NULL_INGRESS_UID); k8sIngressStore.updateIngress(ingress); log.info(String.format(MSG_INGRESS, ingress.getMetadata().getName(), MSG_UPDATED)); }
@Test(expected = IllegalArgumentException.class) public void testUpdateUnregisteredIngress() { target.updateIngress(INGRESS); }
@Override public void checkBeforeUpdate(final DropEncryptRuleStatement sqlStatement) { if (!sqlStatement.isIfExists()) { checkToBeDroppedEncryptTableNames(sqlStatement); } }
@Test void assertCheckSQLStatementWithoutToBeDroppedRule() { EncryptRule rule = mock(EncryptRule.class); when(rule.getConfiguration()).thenReturn(new EncryptRuleConfiguration(Collections.emptyList(), Collections.emptyMap())); executor.setRule(rule); assertThrows(MissingRequiredRuleException.class, () -> executor.checkBeforeUpdate(createSQLStatement("t_encrypt"))); }
public static String getVersion(Class<?> clazz) { String version = clazz.getPackage().getImplementationVersion(); if (version != null) return version; return getManifestAttributeValue(clazz, "Bundle-Version"); }
@Test void jobRunrVersion() { assertThat(JarUtils.getVersion(JobRunr.class)) .satisfiesAnyOf( val -> assertThat(val).isEqualTo("1.0.0-SNAPSHOT"), val -> assertThat(val).matches("(\\d)+.(\\d)+.(\\d)+(-.*)?") ); }
@Override public ProcessorSlotChain build() { ProcessorSlotChain chain = new DefaultProcessorSlotChain(); List<ProcessorSlot> sortedSlotList = SpiLoader.of(ProcessorSlot.class).loadInstanceListSorted(); for (ProcessorSlot slot : sortedSlotList) { if (!(slot instanceof AbstractLinkedProcessorSlot)) { RecordLog.warn("The ProcessorSlot(" + slot.getClass().getCanonicalName() + ") is not an instance of AbstractLinkedProcessorSlot, can't be added into ProcessorSlotChain"); continue; } chain.addLast((AbstractLinkedProcessorSlot<?>) slot); } return chain; }
@Test public void testBuild() { DefaultSlotChainBuilder builder = new DefaultSlotChainBuilder(); ProcessorSlotChain slotChain = builder.build(); assertNotNull(slotChain); // Verify the order of slot AbstractLinkedProcessorSlot<?> next = slotChain.getNext(); assertTrue(next instanceof NodeSelectorSlot); // Store the first NodeSelectorSlot instance NodeSelectorSlot nodeSelectorSlot = (NodeSelectorSlot) next; next = next.getNext(); assertTrue(next instanceof ClusterBuilderSlot); next = next.getNext(); assertTrue(next instanceof LogSlot); next = next.getNext(); assertTrue(next instanceof StatisticSlot); next = next.getNext(); assertTrue(next instanceof AuthoritySlot); next = next.getNext(); assertTrue(next instanceof SystemSlot); next = next.getNext(); assertTrue(next instanceof FlowSlot); next = next.getNext(); assertTrue(next instanceof DefaultCircuitBreakerSlot); next = next.getNext(); assertTrue(next instanceof DegradeSlot); next = next.getNext(); assertNull(next); // Build again to verify different instances ProcessorSlotChain slotChain2 = builder.build(); assertNotNull(slotChain2); // Verify the two ProcessorSlotChain instances are different assertNotSame(slotChain, slotChain2); next = slotChain2.getNext(); assertTrue(next instanceof NodeSelectorSlot); // Store the second NodeSelectorSlot instance NodeSelectorSlot nodeSelectorSlot2 = (NodeSelectorSlot) next; // Verify the two NodeSelectorSlot instances are different assertNotSame(nodeSelectorSlot, nodeSelectorSlot2); }
private <T> T accept(Expression<T> expr) { return expr.accept(this); }
@Test public void testLesser() throws Exception { assertThat(Expr.Lesser.create( Expr.NumberValue.create(1), Expr.NumberValue.create(2) ).accept(new BooleanNumberConditionsVisitor())) .isTrue(); assertThat(Expr.Lesser.create( Expr.NumberValue.create(2), Expr.NumberValue.create(2) ).accept(new BooleanNumberConditionsVisitor())) .isFalse(); assertThat(Expr.Lesser.create( Expr.NumberValue.create(3), Expr.NumberValue.create(2) ).accept(new BooleanNumberConditionsVisitor())) .isFalse(); assertThat(loadCondition("condition-lesser.json").accept(new BooleanNumberConditionsVisitor())) .isTrue(); }
@Override public COMMIT3Response commit(XDR xdr, RpcInfo info) { SecurityHandler securityHandler = getSecurityHandler(info); RpcCall rpcCall = (RpcCall) info.header(); int xid = rpcCall.getXid(); SocketAddress remoteAddress = info.remoteAddress(); return commit(xdr, info.channel(), xid, securityHandler, remoteAddress); }
@Test(timeout = 120000) public void testEncryptedReadWrite() throws Exception { final int len = 8192; final Path zone = new Path("/zone"); hdfs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH); final byte[] buffer = new byte[len]; for (int i = 0; i < len; i++) { buffer[i] = (byte) i; } final String encFile1 = "/zone/myfile"; createFileUsingNfs(encFile1, buffer); commit(encFile1, len); assertArrayEquals("encFile1 not equal", getFileContentsUsingNfs(encFile1, len), getFileContentsUsingDfs(encFile1, len)); /* * Same thing except this time create the encrypted file using DFS. */ final String encFile2 = "/zone/myfile2"; final Path encFile2Path = new Path(encFile2); DFSTestUtil.createFile(hdfs, encFile2Path, len, (short) 1, 0xFEED); assertArrayEquals("encFile2 not equal", getFileContentsUsingNfs(encFile2, len), getFileContentsUsingDfs(encFile2, len)); }
public CompletableFuture<AckResult> changeInvisibleTime(ProxyContext ctx, ReceiptHandle handle, String messageId, String groupName, String topicName, long invisibleTime, long timeoutMillis) { CompletableFuture<AckResult> future = new CompletableFuture<>(); try { this.validateReceiptHandle(handle); ChangeInvisibleTimeRequestHeader changeInvisibleTimeRequestHeader = new ChangeInvisibleTimeRequestHeader(); changeInvisibleTimeRequestHeader.setConsumerGroup(groupName); changeInvisibleTimeRequestHeader.setTopic(handle.getRealTopic(topicName, groupName)); changeInvisibleTimeRequestHeader.setQueueId(handle.getQueueId()); changeInvisibleTimeRequestHeader.setExtraInfo(handle.getReceiptHandle()); changeInvisibleTimeRequestHeader.setOffset(handle.getOffset()); changeInvisibleTimeRequestHeader.setInvisibleTime(invisibleTime); long commitLogOffset = handle.getCommitLogOffset(); future = this.serviceManager.getMessageService().changeInvisibleTime( ctx, handle, messageId, changeInvisibleTimeRequestHeader, timeoutMillis) .thenApplyAsync(ackResult -> { if (StringUtils.isNotBlank(ackResult.getExtraInfo())) { AckResult result = new AckResult(); result.setStatus(ackResult.getStatus()); result.setPopTime(result.getPopTime()); result.setExtraInfo(createHandle(ackResult.getExtraInfo(), commitLogOffset)); return result; } else { return ackResult; } }, this.executor); } catch (Throwable t) { future.completeExceptionally(t); } return FutureUtils.addExecutor(future, this.executor); }
@Test public void testChangeInvisibleTime() throws Throwable { ReceiptHandle handle = create(createMessageExt(MixAll.RETRY_GROUP_TOPIC_PREFIX + TOPIC, "", 0, 3000)); assertNotNull(handle); ArgumentCaptor<ChangeInvisibleTimeRequestHeader> requestHeaderArgumentCaptor = ArgumentCaptor.forClass(ChangeInvisibleTimeRequestHeader.class); AckResult innerAckResult = new AckResult(); innerAckResult.setStatus(AckStatus.OK); when(this.messageService.changeInvisibleTime(any(), any(), anyString(), requestHeaderArgumentCaptor.capture(), anyLong())) .thenReturn(CompletableFuture.completedFuture(innerAckResult)); AckResult ackResult = this.consumerProcessor.changeInvisibleTime(createContext(), handle, MessageClientIDSetter.createUniqID(), CONSUMER_GROUP, TOPIC, 1000, 3000).get(); assertEquals(AckStatus.OK, ackResult.getStatus()); assertEquals(KeyBuilder.buildPopRetryTopic(TOPIC, CONSUMER_GROUP, new BrokerConfig().isEnableRetryTopicV2()), requestHeaderArgumentCaptor.getValue().getTopic()); assertEquals(CONSUMER_GROUP, requestHeaderArgumentCaptor.getValue().getConsumerGroup()); assertEquals(1000, requestHeaderArgumentCaptor.getValue().getInvisibleTime().longValue()); assertEquals(handle.getReceiptHandle(), requestHeaderArgumentCaptor.getValue().getExtraInfo()); }
@Override public Map<String, List<TopicPartition>> assignPartitions(Map<String, List<PartitionInfo>> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignments = super.assignPartitions(partitionsPerTopic, subscriptions); Map<TopicPartition, String> partitionsTransferringOwnership = super.partitionsTransferringOwnership == null ? computePartitionsTransferringOwnership(subscriptions, assignments) : super.partitionsTransferringOwnership; adjustAssignment(assignments, partitionsTransferringOwnership); return assignments; }
@Test public void testUniformSubscriptionTransferOwnershipListIsRight() { this.replicationFactor = 1; this.numBrokerRacks = 2; this.hasConsumerRack = true; Map<String, List<PartitionInfo>> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, partitionInfos(topic1, 4)); subscriptions.put("c0", buildSubscriptionV2Above(topics(topic1), partitions(tp(topic1, 0), tp(topic1, 1)), generationId, 0)); subscriptions.put("c1", buildSubscriptionV2Above(topics(topic1), partitions(tp(topic1, 2), tp(topic1, 3)), generationId, 1)); assignor.assignPartitions(partitionsPerTopic, subscriptions); assertEquals(2, assignor.partitionsTransferringOwnership().size()); }
@Override protected void removeRange(int fromIndex, int toIndex) { if (fromIndex == toIndex) { return; } notifyRemoval(fromIndex, toIndex - fromIndex); super.removeRange(fromIndex, toIndex); }
@Test public void testRemoveRange() { modelList.removeRange(0, 2); assertEquals(1, modelList.size()); verify(observer).onItemRangeRemoved(0, 2); }
public static long hash64(byte[] data) { return hash64(data, 1337); }
@Test @Disabled public void bulkHashing64Test() { String[] strArray = getRandomStringArray(); long startCity = System.currentTimeMillis(); for (String s : strArray) { CityHash.hash64(s.getBytes()); } long endCity = System.currentTimeMillis(); long startMetro = System.currentTimeMillis(); for (String s : strArray) { MetroHash.hash64(StrUtil.utf8Bytes(s)); } long endMetro = System.currentTimeMillis(); System.out.println("metroHash =============" + (endMetro - startMetro)); System.out.println("cityHash =============" + (endCity - startCity)); }
@Override public BulkheadConfig getBulkheadConfig() { return config; }
@Test public void testCreateWithDefaults() { Bulkhead bulkhead = Bulkhead.ofDefaults("test"); assertThat(bulkhead).isNotNull(); assertThat(bulkhead.getBulkheadConfig()).isNotNull(); assertThat(bulkhead.getBulkheadConfig().getMaxConcurrentCalls()) .isEqualTo(DEFAULT_MAX_CONCURRENT_CALLS); assertThat(bulkhead.getBulkheadConfig().isWritableStackTraceEnabled()) .isEqualTo(DEFAULT_WRITABLE_STACK_TRACE_ENABLED); assertThat(bulkhead.getBulkheadConfig().isFairCallHandlingEnabled()) .isEqualTo(DEFAULT_FAIR_CALL_HANDLING_STRATEGY_ENABLED); }