focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void open() throws Exception { super.open(); forwardedInputSerializer = new RowDataSerializer(inputType); this.lastKeyDataStartPos = 0; windowBoundaryWithDataBaos = new ByteArrayOutputStreamWithPos(); windowBoundaryWithDataWrapper = new DataOutputViewStreamWrapper(windowBoundaryWithDataBaos); boundedRangeWindowBoundaries = new ArrayList<>(lowerBoundary.length); boundedRangeWindowIndex = new ArrayList<>(); for (int i = 0; i < lowerBoundary.length; i++) { // range window with bounded preceding or bounded following if (isRangeWindows[i] && (lowerBoundary[i] != Long.MIN_VALUE || upperBoundary[i] != Long.MAX_VALUE)) { boundedRangeWindowIndex.add(i); boundedRangeWindowBoundaries.add(new ArrayList<>()); } } }
@Test void testOverWindowAggregateFunction() throws Exception { OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = getTestHarness(new Configuration()); long initialTime = 0L; ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>(); testHarness.open(); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c1", "c2", 0L, 0L), initialTime + 1)); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c1", "c4", 1L, 0L), initialTime + 2)); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c1", "c6", 2L, 10L), initialTime + 3)); testHarness.processElement( new StreamRecord<>(newBinaryRow(true, "c2", "c8", 3L, 0L), initialTime + 3)); testHarness.close(); expectedOutput.add(new StreamRecord<>(newRow(true, "c1", "c2", 0L, 0L, 0L))); expectedOutput.add(new StreamRecord<>(newRow(true, "c1", "c4", 1L, 0L, 0L))); expectedOutput.add(new StreamRecord<>(newRow(true, "c1", "c6", 2L, 10L, 2L))); expectedOutput.add(new StreamRecord<>(newRow(true, "c2", "c8", 3L, 0L, 3L))); assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput()); }
@Override public void deleteBrand(Long id) { // 校验存在 validateBrandExists(id); // 删除 brandMapper.deleteById(id); }
@Test public void testDeleteBrand_success() { // mock 数据 ProductBrandDO dbBrand = randomPojo(ProductBrandDO.class); brandMapper.insert(dbBrand);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbBrand.getId(); // 调用 brandService.deleteBrand(id); // 校验数据不存在了 assertNull(brandMapper.selectById(id)); }
void handleConfigDataChangeEvent(Event event) { if (event instanceof ConfigDataChangeEvent) { ConfigDataChangeEvent evt = (ConfigDataChangeEvent) event; long dumpTs = evt.lastModifiedTs; String dataId = evt.dataId; String group = evt.group; String tenant = evt.tenant; String tag = evt.tag; MetricsMonitor.incrementConfigChangeCount(tenant, group, dataId); Collection<Member> ipList = memberManager.allMembersWithoutSelf(); // In fact, any type of queue here can be Queue<NotifySingleRpcTask> rpcQueue = new LinkedList<>(); for (Member member : ipList) { // grpc report data change only rpcQueue.add( new NotifySingleRpcTask(dataId, group, tenant, tag, dumpTs, evt.isBeta, evt.isBatch, member)); } if (!rpcQueue.isEmpty()) { ConfigExecutor.executeAsyncNotify(new AsyncRpcTask(rpcQueue)); } } }
@Test void testHandleConfigDataChangeEvent() { long timeStamp = System.currentTimeMillis(); List<Member> memberList = new ArrayList<>(); // member1 success Member member1 = new Member(); member1.setIp("testip1" + timeStamp); member1.setState(NodeState.UP); memberList.add(member1); // member2 exception Member member2 = new Member(); member2.setIp("testip2" + timeStamp); member2.setState(NodeState.UP); memberList.add(member2); // member3 unhealth Member member3 = new Member(); member3.setIp("testip3" + timeStamp); member3.setState(NodeState.DOWN); memberList.add(member3); Mockito.when(serverMemberManager.allMembersWithoutSelf()).thenReturn(memberList); configExecutorMocked.when(() -> ConfigExecutor.scheduleAsyncNotify(any(Runnable.class), anyLong(), any(TimeUnit.class))) .thenAnswer(invocation -> null); String dataId = "testDataId" + timeStamp; String group = "testGroup"; AsyncNotifyService asyncNotifyService = new AsyncNotifyService(serverMemberManager); asyncNotifyService.handleConfigDataChangeEvent(new ConfigDataChangeEvent(dataId, group, System.currentTimeMillis())); // expect schedule twice fail or exception response. configExecutorMocked.verify(() -> ConfigExecutor.executeAsyncNotify(any(AsyncNotifyService.AsyncRpcTask.class)), times(1)); }
public static boolean hasPublicNullaryConstructor(Class<?> clazz) { return Arrays.stream(clazz.getConstructors()) .anyMatch(constructor -> constructor.getParameterCount() == 0); }
@Test void testHasNullaryConstructorFalse() { assertThat(InstantiationUtil.hasPublicNullaryConstructor(InstantiationUtil.class)) .isFalse(); }
public static Map<String, String> removeHeaders(Map<String, String> headers, Collection<String> headerNames) { if (headers == null || headerNames == null || headerNames.isEmpty()) { return headers; } Set<String> headersToRemove = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); headersToRemove.addAll(headerNames); Map<String, String> newHeaders = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); for(Map.Entry<String, String> header : headers.entrySet()) { String name = header.getKey(); if (!headersToRemove.contains(name)) { newHeaders.put(name, header.getValue()); } } return newHeaders; }
@Test public void testRemoveHeaders() { Map<String, String> headers = new HashMap<>(); headers.put("X-header1", "header1Value"); headers.put("X-header2", "header2Value"); headers.put("X-header3", "header3Value"); List<String> headersToRemove = Arrays.asList("x-header3", "x-header4"); Map<String, String> newHeader = HeaderUtil.removeHeaders(headers, headersToRemove); Assert.assertEquals(newHeader.size(), 2); Assert.assertEquals(newHeader.get("x-header1"), "header1Value"); Assert.assertEquals(newHeader.get("x-header2"), "header2Value"); }
@Override public ResultSetMetaData getMetaData() throws SQLException { validateState(); return new PinotResultMetadata(_totalColumns, _columns, _columnDataTypes); }
@Test public void testGetResultMetadata() throws Exception { ResultSetGroup resultSetGroup = getResultSet(TEST_RESULT_SET_RESOURCE); ResultSet resultSet = resultSetGroup.getResultSet(0); PinotResultSet pinotResultSet = new PinotResultSet(resultSet); ResultSetMetaData pinotResultSetMetadata = pinotResultSet.getMetaData(); for (int i = 0; i < resultSet.getColumnCount(); i++) { Assert.assertEquals(pinotResultSetMetadata.getColumnTypeName(i + 1), resultSet.getColumnDataType(i)); } }
public static RowExpression inlineVariables(Function<VariableReferenceExpression, ? extends RowExpression> mapping, RowExpression expression) { return RowExpressionTreeRewriter.rewriteWith(new RowExpressionVariableInliner(mapping), expression); }
@Test public void testInlineVariable() { assertEquals(RowExpressionVariableInliner.inlineVariables( ImmutableMap.of( variable("a"), variable("b")), variable("a")), variable("b")); }
public static StringBuilder appends(StringBuilder builder, CharSequence... charSequences) { if (builder == null) { return createBuilder(charSequences); } if (charSequences == null || charSequences.length == 0) { return builder; } for (CharSequence sequence : charSequences) { builder.append(sequence); } return builder; }
@Test public void appends() { StringBuilder sb1 = StringUtil.appends(null, "H", "ippo", "4j"); Assert.assertEquals("Hippo4j", sb1.toString()); StringBuilder sb2 = StringUtil.appends(StringUtil.createBuilder("To "), null); Assert.assertEquals("To ", sb2.toString()); StringBuilder sb3 = StringUtil.appends(StringUtil.createBuilder("To "), "H", "ippo", "4j"); Assert.assertEquals("To Hippo4j", sb3.toString()); }
public static Optional<Object> buildWithConstructor(String className, Class<?>[] paramsTypes, Object[] params) { final Class<?> clazz = loadClass(className).orElse(null); return buildWithConstructor(clazz, paramsTypes, params); }
@Test public void buildWithConstructor() { final Optional<Object> result = ReflectUtils.buildWithConstructor(TestReflect.class.getName(), null, null); Assert.assertTrue(result.isPresent() && result.get() instanceof TestReflect); final Optional<Object> paramsResult = ReflectUtils.buildWithConstructor(TestReflect.class.getName(), new Class[] {int.class, int.class},new Object[] {1, 2}); Assert.assertTrue(paramsResult.isPresent() && paramsResult.get() instanceof TestReflect); final TestReflect reflect = (TestReflect) paramsResult.get(); Assert.assertEquals(reflect.x + reflect.y, 3); }
@ApiOperation(value = "Delete Tenant Profile (deleteTenantProfile)", notes = "Deletes the tenant profile. Referencing non-existing tenant profile Id will cause an error. Referencing profile that is used by the tenants will cause an error. " + SYSTEM_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('SYS_ADMIN')") @RequestMapping(value = "/tenantProfile/{tenantProfileId}", method = RequestMethod.DELETE) @ResponseStatus(value = HttpStatus.OK) public void deleteTenantProfile(@Parameter(description = TENANT_PROFILE_ID_PARAM_DESCRIPTION) @PathVariable("tenantProfileId") String strTenantProfileId) throws ThingsboardException { checkParameter("tenantProfileId", strTenantProfileId); TenantProfileId tenantProfileId = new TenantProfileId(toUUID(strTenantProfileId)); TenantProfile profile = checkTenantProfileId(tenantProfileId, Operation.DELETE); tbTenantProfileService.delete(getTenantId(), profile); }
@Test public void testDeleteTenantProfile() throws Exception { loginSysAdmin(); TenantProfile tenantProfile = this.createTenantProfile("Tenant Profile"); TenantProfile savedTenantProfile = doPost("/api/tenantProfile", tenantProfile, TenantProfile.class); Mockito.reset(tbClusterService); doDelete("/api/tenantProfile/" + savedTenantProfile.getId().getId().toString()) .andExpect(status().isOk()); testBroadcastEntityStateChangeEventTimeManyTimeTenantProfile(savedTenantProfile, ComponentLifecycleEvent.DELETED, 1); doGet("/api/tenantProfile/" + savedTenantProfile.getId().getId().toString()) .andExpect(status().isNotFound()) .andExpect(statusReason(containsString(msgErrorNoFound("Tenant profile", savedTenantProfile.getId().getId().toString())))); }
public boolean appliesTo(Component project, @Nullable MetricEvaluationResult metricEvaluationResult) { return metricEvaluationResult != null && metricEvaluationResult.evaluationResult.level() != Measure.Level.OK && METRICS_TO_IGNORE_ON_SMALL_CHANGESETS.contains(metricEvaluationResult.condition.getMetric().getKey()) && config.getConfiguration().getBoolean(CoreProperties.QUALITY_GATE_IGNORE_SMALL_CHANGES).orElse(true) && isSmallChangeset(project); }
@Test public void should_not_change_for_bigger_changesets() { QualityGateMeasuresStep.MetricEvaluationResult metricEvaluationResult = generateEvaluationResult(NEW_COVERAGE_KEY, ERROR); Component project = generateNewRootProject(); measureRepository.addRawMeasure(PROJECT_REF, CoreMetrics.NEW_LINES_KEY, newMeasureBuilder().create(20)); boolean result = underTest.appliesTo(project, metricEvaluationResult); assertThat(result).isFalse(); }
public static Identifier parse(String stringValue) { return parse(stringValue, -1); }
@Test public void testParseIntegerMinInclusive() { Identifier.parse("0"); }
public void receiveMessage(ProxyContext ctx, ReceiveMessageRequest request, StreamObserver<ReceiveMessageResponse> responseObserver) { ReceiveMessageResponseStreamWriter writer = createWriter(ctx, responseObserver); try { Settings settings = this.grpcClientSettingsManager.getClientSettings(ctx); Subscription subscription = settings.getSubscription(); boolean fifo = subscription.getFifo(); int maxAttempts = settings.getBackoffPolicy().getMaxAttempts(); ProxyConfig config = ConfigurationManager.getProxyConfig(); Long timeRemaining = ctx.getRemainingMs(); long pollingTime; if (request.hasLongPollingTimeout()) { pollingTime = Durations.toMillis(request.getLongPollingTimeout()); } else { pollingTime = timeRemaining - Durations.toMillis(settings.getRequestTimeout()) / 2; } if (pollingTime < config.getGrpcClientConsumerMinLongPollingTimeoutMillis()) { pollingTime = config.getGrpcClientConsumerMinLongPollingTimeoutMillis(); } if (pollingTime > config.getGrpcClientConsumerMaxLongPollingTimeoutMillis()) { pollingTime = config.getGrpcClientConsumerMaxLongPollingTimeoutMillis(); } if (pollingTime > timeRemaining) { if (timeRemaining >= config.getGrpcClientConsumerMinLongPollingTimeoutMillis()) { pollingTime = timeRemaining; } else { final String clientVersion = ctx.getClientVersion(); Code code = null == clientVersion || ILLEGAL_POLLING_TIME_INTRODUCED_CLIENT_VERSION.compareTo(clientVersion) > 0 ? Code.BAD_REQUEST : Code.ILLEGAL_POLLING_TIME; writer.writeAndComplete(ctx, code, "The deadline time remaining is not enough" + " for polling, please check network condition"); return; } } validateTopicAndConsumerGroup(request.getMessageQueue().getTopic(), request.getGroup()); String topic = request.getMessageQueue().getTopic().getName(); String group = request.getGroup().getName(); long actualInvisibleTime = Durations.toMillis(request.getInvisibleDuration()); ProxyConfig proxyConfig = ConfigurationManager.getProxyConfig(); if (proxyConfig.isEnableProxyAutoRenew() && request.getAutoRenew()) { actualInvisibleTime = proxyConfig.getDefaultInvisibleTimeMills(); } else { validateInvisibleTime(actualInvisibleTime, ConfigurationManager.getProxyConfig().getMinInvisibleTimeMillsForRecv()); } FilterExpression filterExpression = request.getFilterExpression(); SubscriptionData subscriptionData; try { subscriptionData = FilterAPI.build(topic, filterExpression.getExpression(), GrpcConverter.getInstance().buildExpressionType(filterExpression.getType())); } catch (Exception e) { writer.writeAndComplete(ctx, Code.ILLEGAL_FILTER_EXPRESSION, e.getMessage()); return; } this.messagingProcessor.popMessage( ctx, new ReceiveMessageQueueSelector( request.getMessageQueue().getBroker().getName() ), group, topic, request.getBatchSize(), actualInvisibleTime, pollingTime, ConsumeInitMode.MAX, subscriptionData, fifo, new PopMessageResultFilterImpl(maxAttempts), request.hasAttemptId() ? request.getAttemptId() : null, timeRemaining ).thenAccept(popResult -> { if (proxyConfig.isEnableProxyAutoRenew() && request.getAutoRenew()) { if (PopStatus.FOUND.equals(popResult.getPopStatus())) { List<MessageExt> messageExtList = popResult.getMsgFoundList(); for (MessageExt messageExt : messageExtList) { String receiptHandle = messageExt.getProperty(MessageConst.PROPERTY_POP_CK); if (receiptHandle != null) { MessageReceiptHandle messageReceiptHandle = new MessageReceiptHandle(group, topic, messageExt.getQueueId(), receiptHandle, messageExt.getMsgId(), messageExt.getQueueOffset(), messageExt.getReconsumeTimes()); messagingProcessor.addReceiptHandle(ctx, grpcChannelManager.getChannel(ctx.getClientID()), group, messageExt.getMsgId(), messageReceiptHandle); } } } } writer.writeAndComplete(ctx, request, popResult); }) .exceptionally(t -> { writer.writeAndComplete(ctx, request, t); return null; }); } catch (Throwable t) { writer.writeAndComplete(ctx, request, t); } }
@Test public void testReceiveMessageIllegalFilter() { StreamObserver<ReceiveMessageResponse> receiveStreamObserver = mock(ServerCallStreamObserver.class); ArgumentCaptor<ReceiveMessageResponse> responseArgumentCaptor = ArgumentCaptor.forClass(ReceiveMessageResponse.class); doNothing().when(receiveStreamObserver).onNext(responseArgumentCaptor.capture()); when(this.grpcClientSettingsManager.getClientSettings(any())).thenReturn(Settings.newBuilder().getDefaultInstanceForType()); this.receiveMessageActivity.receiveMessage( createContext(), ReceiveMessageRequest.newBuilder() .setGroup(Resource.newBuilder().setName(CONSUMER_GROUP).build()) .setMessageQueue(MessageQueue.newBuilder().setTopic(Resource.newBuilder().setName(TOPIC).build()).build()) .setAutoRenew(true) .setFilterExpression(FilterExpression.newBuilder() .setType(FilterType.SQL) .setExpression("") .build()) .build(), receiveStreamObserver ); assertEquals(Code.ILLEGAL_FILTER_EXPRESSION, getResponseCodeFromReceiveMessageResponseList(responseArgumentCaptor.getAllValues())); }
public static boolean isValidValue(Map<String, Object> serviceSuppliedConfig, Map<String, Object> clientSuppliedServiceConfig, String propertyName) { // prevent clients from violating SLAs as published by the service if (propertyName.equals(PropertyKeys.HTTP_REQUEST_TIMEOUT)) { String clientSuppliedTimeout = (String)clientSuppliedServiceConfig.get(propertyName); String serviceSuppliedTimeout = (String)serviceSuppliedConfig.get(propertyName); try { return Integer.parseInt(clientSuppliedTimeout) >= Integer.parseInt(serviceSuppliedTimeout); } catch (NumberFormatException e) { _log.error("Failed to convert HTTP Request Timeout to an int. clientSuppliedTimeout is " + clientSuppliedTimeout + ". serviceSuppliedTimeout is " + serviceSuppliedTimeout, e); return false; } } return true; }
@Test public void testValidHttpRequestTimeout() { Map<String, Object> serviceSuppliedProperties = new HashMap<>(); serviceSuppliedProperties.put(PropertyKeys.HTTP_REQUEST_TIMEOUT, "1000"); Map<String, Object> clientSuppliedProperties = new HashMap<>(); clientSuppliedProperties.put(PropertyKeys.HTTP_REQUEST_TIMEOUT, "2000"); Assert.assertTrue(ClientServiceConfigValidator.isValidValue(serviceSuppliedProperties, clientSuppliedProperties, PropertyKeys.HTTP_REQUEST_TIMEOUT)); }
@Override public byte[] serialize(final String topic, final LeftOrRightValue<V1, V2> data) { if (data == null) { return null; } final byte[] rawValue = (data.getLeftValue() != null) ? leftSerializer.serialize(topic, data.getLeftValue()) : rightSerializer.serialize(topic, data.getRightValue()); if (rawValue == null) { return null; } return ByteBuffer .allocate(1 + rawValue.length) .put((byte) (data.getLeftValue() != null ? 1 : 0)) .put(rawValue) .array(); }
@Test public void shouldThrowIfSerializeOtherValueAsNull() { assertThrows(NullPointerException.class, () -> STRING_OR_INTEGER_SERDE.serializer().serialize(TOPIC, LeftOrRightValue.makeRightValue(null))); }
public static InetAddress fixScopeIdAndGetInetAddress(final InetAddress inetAddress) throws SocketException { if (!(inetAddress instanceof Inet6Address inet6Address)) { return inetAddress; } if (!inetAddress.isLinkLocalAddress() && !inetAddress.isSiteLocalAddress()) { return inetAddress; } if (inet6Address.getScopeId() > 0 || inet6Address.getScopedInterface() != null) { return inetAddress; } final Inet6Address resultInetAddress = findRealInet6Address(inet6Address); return resultInetAddress == null ? inetAddress : resultInetAddress; }
@Test public void testFixScopeIdAndGetInetAddress_whenLinkLocalAddress() throws SocketException, UnknownHostException { byte[] address = InetAddress.getByName(SOME_LINK_LOCAL_ADDRESS).getAddress(); Inet6Address inet6Address = Inet6Address.getByAddress(SOME_LINK_LOCAL_ADDRESS, address, 1); assertThat(inet6Address.isLinkLocalAddress()).isTrue(); InetAddress actual = AddressUtil.fixScopeIdAndGetInetAddress(inet6Address); assertEquals(inet6Address, actual); }
@Override public ConsumerBuilder<T> startPaused(boolean paused) { conf.setStartPaused(paused); return this; }
@Test public void testStartPaused() { consumerBuilderImpl.startPaused(true); verify(consumerBuilderImpl.getConf()).setStartPaused(true); }
SortMergeSubpartitionReader createSubpartitionReader( BufferAvailabilityListener availabilityListener, int targetSubpartition, PartitionedFile resultFile) throws IOException { synchronized (lock) { checkState(!isReleased, "Partition is already released."); PartitionedFileReader fileReader = createFileReader(resultFile, targetSubpartition); SortMergeSubpartitionReader subpartitionReader = new SortMergeSubpartitionReader(availabilityListener, fileReader); if (allReaders.isEmpty()) { bufferPool.registerRequester(this); } allReaders.add(subpartitionReader); sortedReaders.add(subpartitionReader); subpartitionReader .getReleaseFuture() .thenRun(() -> releaseSubpartitionReader(subpartitionReader)); mayTriggerReading(); return subpartitionReader; } }
@Test void testOnSubpartitionReaderError() throws Exception { SortMergeSubpartitionReader subpartitionReader = readScheduler.createSubpartitionReader( new NoOpBufferAvailablityListener(), 0, partitionedFile); subpartitionReader.releaseAllResources(); waitUntilReadFinish(); assertAllResourcesReleased(); }
public static Area getArea(Integer id) { return areas.get(id); }
@Test public void testGetArea() { // 调用:北京 Area area = AreaUtils.getArea(110100); // 断言 assertEquals(area.getId(), 110100); assertEquals(area.getName(), "北京市"); assertEquals(area.getType(), AreaTypeEnum.CITY.getType()); assertEquals(area.getParent().getId(), 110000); assertEquals(area.getChildren().size(), 16); }
@Override public Num calculate(BarSeries series, Position position) { if (position == null || position.getEntry() == null || position.getExit() == null) { return series.zero(); } CashFlow cashFlow = new CashFlow(series, position); return calculateMaximumDrawdown(series, null, cashFlow); }
@Test public void withTradesThatSellBeforeBuying() { MockBarSeries series = new MockBarSeries(numFunction, 2, 1, 3, 5, 6, 3, 20); AnalysisCriterion mdd = getCriterion(); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(1, series), Trade.buyAt(3, series), Trade.sellAt(4, series), Trade.sellAt(5, series), Trade.buyAt(6, series)); assertNumEquals(3.8d, mdd.calculate(series, tradingRecord)); }
@Override public byte[] evaluateResponse( @Nonnull final byte[] response ) throws SaslException { if ( isComplete() ) { throw new IllegalStateException( "Authentication exchange already completed." ); } // The value as sent to us in the 'from' attribute of the stream element sent by the remote server. final String defaultIdentity = session.getDefaultIdentity(); // RFC 6120 Section 4.7.1: // "Because a server is a "public entity" on the XMPP network, it MUST include the 'from' attribute after the // confidentiality and integrity of the stream are protected via TLS or an equivalent security layer." // // When doing SASL EXTERNAL, TLS must already have been negotiated, which means that the 'from' attribute must have been set. if (defaultIdentity == null || defaultIdentity.isEmpty()) { throw new SaslFailureException(Failure.NOT_AUTHORIZED, "Peer does not provide 'from' attribute value on stream."); } final String requestedId; if (response.length == 0 && session.getSessionData(SASLAuthentication.SASL_LAST_RESPONSE_WAS_PROVIDED_BUT_EMPTY) == null) { if (PROPERTY_SASL_EXTERNAL_SERVER_REQUIRE_AUTHZID.getValue()) { // No initial response. Send a challenge to get one, per RFC 4422 appendix-A. return new byte[0]; } else { requestedId = defaultIdentity; } } else { requestedId = new String( response, StandardCharsets.UTF_8 ); } complete = true; Log.trace("Completing handshake with '{}' using authzid value: '{}'", defaultIdentity, requestedId); // Added for backwards compatibility. Not required by XMPP, but versions of Openfire prior to 4.8.0 did require the authzid to be present. if (SASLAuthentication.EXTERNAL_S2S_REQUIRE_AUTHZID.getValue() && requestedId.isEmpty()) { throw new SaslFailureException(Failure.INVALID_AUTHZID, "Peer does not provide authzid, which is required by configuration."); } // When an authorization identity is provided, make sure that it matches the 'from' value from the session stream. if (!requestedId.isEmpty() && !requestedId.equals(defaultIdentity)) { throw new SaslFailureException(Failure.INVALID_AUTHZID, "Stream 'from' attribute value '" + defaultIdentity + "' does not equal SASL authzid '" + requestedId + "'"); } if (!SASLAuthentication.verifyCertificates(session.getConnection().getPeerCertificates(), defaultIdentity, true)) { throw new SaslFailureException(Failure.NOT_AUTHORIZED, "Server-to-Server certificate verification failed."); } authorizationID = defaultIdentity; Log.trace("Successfully authenticated '{}'", authorizationID); return null; // Success! }
@Test public void testEmptyInitialResponse() throws Exception { // Setup test fixture. final String streamID = "example.org"; when(session.getSessionData(SASLAuthentication.SASL_LAST_RESPONSE_WAS_PROVIDED_BUT_EMPTY)).thenReturn(true); when(session.getDefaultIdentity()).thenReturn(streamID); when(session.getConnection()).thenReturn(connection); saslAuthentication.when(() -> SASLAuthentication.verifyCertificates(any(), eq(streamID), anyBoolean())).thenReturn(true); final ExternalServerSaslServer server = new ExternalServerSaslServer(session); final byte[] input = new byte[]{}; // Execute system under test. final byte[] response = server.evaluateResponse(input); // Verify results. assertNull(response); // This asserts for successful authentication, rather than 'not a challenge'. }
public boolean containsShardingTable(final Collection<String> logicTableNames) { for (String each : logicTableNames) { if (isShardingTable(each)) { return true; } } return false; }
@Test void assertContainsShardingTableForMultipleTables() { assertTrue(createMaximumShardingRule().containsShardingTable(Arrays.asList("logic_table", "table_0"))); }
public QueryResult queryMessage(String topic, String key, int maxNum, long begin, long end) throws MQClientException, InterruptedException { return this.mQClientFactory.getMQAdminImpl().queryMessage(topic, key, maxNum, begin, end); }
@Test public void testQueryMessage() throws InterruptedException, MQClientException { assertNull(defaultMQPushConsumerImpl.queryMessage(defaultTopic, defaultKey, 1, 0, 1)); }
@Override public Mono<MatchResult> matches(ServerWebExchange exchange) { return isWebSocketUpgrade(exchange.getRequest().getHeaders()) ? match() : notMatch(); }
@Test void shouldMatchIfWebSocketProtocol() { var httpRequest = MockServerHttpRequest.get("") .header(HttpHeaders.CONNECTION, HttpHeaders.UPGRADE) .header(HttpHeaders.UPGRADE, "websocket") .build(); var wsExchange = MockServerWebExchange.from(httpRequest); var wsMatcher = new WebSocketServerWebExchangeMatcher(); StepVerifier.create(wsMatcher.matches(wsExchange)) .consumeNextWith(result -> assertTrue(result.isMatch())) .verifyComplete(); }
public static <T> Read<T> readAvrosWithBeamSchema(Class<T> clazz) { if (clazz.equals(GenericRecord.class)) { throw new IllegalArgumentException("For GenericRecord, please call readAvroGenericRecords"); } AvroCoder<T> coder = AvroCoder.of(clazz); org.apache.avro.Schema avroSchema = coder.getSchema(); Schema schema = AvroUtils.getSchema(clazz, avroSchema); return Read.newBuilder(parsePayloadUsingCoder(coder)) .setCoder( SchemaCoder.of( schema, TypeDescriptor.of(clazz), AvroUtils.getToRowFunction(clazz, avroSchema), AvroUtils.getFromRowFunction(clazz))) .build(); }
@Test public void testAvroSpecificRecord() { AvroCoder<AvroGeneratedUser> coder = AvroCoder.specific(AvroGeneratedUser.class); List<AvroGeneratedUser> inputs = ImmutableList.of( new AvroGeneratedUser("Bob", 256, null), new AvroGeneratedUser("Alice", 128, null), new AvroGeneratedUser("Ted", null, "white")); setupTestClient(inputs, coder); PCollection<AvroGeneratedUser> read = pipeline.apply( PubsubIO.readAvrosWithBeamSchema(AvroGeneratedUser.class) .fromSubscription(SUBSCRIPTION.getPath()) .withClock(CLOCK) .withClientFactory(clientFactory)); PAssert.that(read).containsInAnyOrder(inputs); pipeline.run(); }
@Override public Set<OAuth2AccessTokenEntity> getAllAccessTokens() { TypedQuery<OAuth2AccessTokenEntity> query = manager.createNamedQuery(OAuth2AccessTokenEntity.QUERY_ALL, OAuth2AccessTokenEntity.class); return new LinkedHashSet<>(query.getResultList()); }
@Test public void testGetAllAccessTokens(){ Set<OAuth2AccessTokenEntity> tokens = repository.getAllAccessTokens(); assertEquals(4, tokens.size()); }
public AlterSourceCommand create(final AlterSource statement) { final DataSource dataSource = metaStore.getSource(statement.getName()); final String dataSourceType = statement.getDataSourceType().getKsqlType(); if (dataSource != null && dataSource.isSource()) { throw new KsqlException( String.format("Cannot alter %s '%s': ALTER operations are not supported on source %s.", dataSourceType.toLowerCase(), statement.getName().text(), dataSourceType.toLowerCase() + "s")); } final List<Column> newColumns = statement .getAlterOptions() .stream() .map( alterOption -> Column.of( ColumnName.of(alterOption.getColumnName()), alterOption.getType().getSqlType(), Namespace.VALUE, 0)) .collect(Collectors.toList()); return new AlterSourceCommand( statement.getName(), dataSourceType, newColumns ); }
@Test public void shouldThrowInAlterOnSourceTable() { // Given: final AlterSource alterSource = new AlterSource(TABLE_NAME, DataSourceType.KTABLE, NEW_COLUMNS); when(ksqlTable.isSource()).thenReturn(true); // When: final Exception e = assertThrows( KsqlException.class, () -> alterSourceFactory.create(alterSource)); // Then: assertThat(e.getMessage(), containsString( "Cannot alter table 'tablename': ALTER operations are not supported on " + "source tables.")); }
@Override public void ack() { Function.ProcessingGuarantees processingGuarantees = functionConfig.getProcessingGuarantees(); if (processingGuarantees == Function.ProcessingGuarantees.MANUAL) { record.ack(); } else { log.warn("Ignore this ack option, under this configuration Guarantees:[{}] autoAck:[{}], " + "the framework will automatically ack", processingGuarantees, functionConfig.getAutoAck()); } }
@Test public void testAck() { Record record = mock(Record.class); Function.FunctionDetails functionDetails = Function.FunctionDetails.newBuilder().setAutoAck(true) .setProcessingGuarantees(Function.ProcessingGuarantees.ATMOST_ONCE).build(); PulsarFunctionRecord pulsarFunctionRecord = new PulsarFunctionRecord<>(record, functionDetails); pulsarFunctionRecord.ack(); verify(record, times(0)).ack(); clearInvocations(record); functionDetails = Function.FunctionDetails.newBuilder().setAutoAck(true) .setProcessingGuarantees(Function.ProcessingGuarantees.ATLEAST_ONCE).build(); pulsarFunctionRecord = new PulsarFunctionRecord<>(record, functionDetails); pulsarFunctionRecord.ack(); verify(record, times(0)).ack(); clearInvocations(record); functionDetails = Function.FunctionDetails.newBuilder().setAutoAck(true) .setProcessingGuarantees(Function.ProcessingGuarantees.EFFECTIVELY_ONCE).build(); pulsarFunctionRecord = new PulsarFunctionRecord<>(record, functionDetails); pulsarFunctionRecord.ack(); verify(record, times(0)).ack(); clearInvocations(record); functionDetails = Function.FunctionDetails.newBuilder().setAutoAck(true) .setProcessingGuarantees(Function.ProcessingGuarantees.MANUAL).build(); pulsarFunctionRecord = new PulsarFunctionRecord<>(record, functionDetails); pulsarFunctionRecord.ack(); verify(record, times(1)).ack(); }
private Function<KsqlConfig, Kudf> getUdfFactory( final Method method, final UdfDescription udfDescriptionAnnotation, final String functionName, final FunctionInvoker invoker, final String sensorName ) { return ksqlConfig -> { final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance( method.getDeclaringClass(), udfDescriptionAnnotation.name()); if (actualUdf instanceof Configurable) { ExtensionSecurityManager.INSTANCE.pushInUdf(); try { ((Configurable) actualUdf) .configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName)); } finally { ExtensionSecurityManager.INSTANCE.popOutUdf(); } } final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf); return metrics.<Kudf>map(m -> new UdfMetricProducer( m.getSensor(sensorName), theUdf, Time.SYSTEM )).orElse(theUdf); }; }
@Test public void shouldEnsureFunctionReturnTypeIsDeepOptional() { final List<SqlArgument> args = Collections.singletonList(SqlArgument.of(SqlTypes.STRING)); final KsqlScalarFunction complexFunction = FUNC_REG .getUdfFactory(FunctionName.of("ComplexFunction")) .getFunction(args); assertThat(complexFunction.getReturnType(args), is( SqlTypes.struct() .field("F0", SqlTypes.struct() .field("F1", SqlTypes.INTEGER) .build()) .build())); }
static boolean fieldMatchCaseInsensitive(Object repoObj, Object filterObj) { return fieldMatch(repoObj, filterObj) || compareIgnoreCaseOnlyIfStringType(repoObj, filterObj); }
@Test public void testFieldMatchWithEqualObjectsShouldReturnTrue() { assertTrue(Utilities.fieldMatchCaseInsensitive("repoObject", "repoObject")); }
public static Object[] realize(Object[] objs, Class<?>[] types) { if (objs.length != types.length) { throw new IllegalArgumentException("args.length != types.length"); } Object[] dests = new Object[objs.length]; for (int i = 0; i < objs.length; i++) { dests[i] = realize(objs[i], types[i]); } return dests; }
@Test public void testPojoGeneric4() throws NoSuchMethodException { String personName = "testName"; Dgeneric generic = createDGenericPersonInfo(personName); Object o = JSON.toJSON(generic); { Dgeneric personInfo = (Dgeneric) PojoUtils.realize(o, Dgeneric.class); assertEquals(Dgeneric.NAME, personInfo.getName()); assertTrue(personInfo.getT() instanceof Map); assertTrue(personInfo.getY() instanceof Map); assertTrue(personInfo.getZ() instanceof Map); } { Type[] createGenericPersonInfos = ReflectUtils.getReturnTypes( PojoUtilsTest.class.getDeclaredMethod("createDGenericPersonInfo", String.class)); Dgeneric personInfo = (Dgeneric) PojoUtils.realize(o, (Class) createGenericPersonInfos[0], createGenericPersonInfos[1]); assertEquals(Dgeneric.NAME, personInfo.getName()); assertEquals(personInfo.getT().getClass(), Ageneric.class); assertEquals(((Ageneric) personInfo.getT()).getData().getClass(), PersonInfo.class); assertEquals(personInfo.getY().getClass(), Bgeneric.class); assertEquals(((Bgeneric) personInfo.getY()).getData().getClass(), PersonInfo.class); assertEquals(personInfo.getZ().getClass(), Cgeneric.class); assertEquals(((Cgeneric) personInfo.getZ()).getData().getClass(), PersonInfo.class); assertEquals(personInfo.getZ().getClass(), Cgeneric.class); assertEquals(((Cgeneric) personInfo.getZ()).getA().getClass(), Ageneric.class); assertEquals(((Cgeneric) personInfo.getZ()).getA().getData().getClass(), PersonInfo.class); assertEquals(((Cgeneric) personInfo.getZ()).getB().getClass(), Bgeneric.class); assertEquals(((Cgeneric) personInfo.getZ()).getB().getData().getClass(), PersonInfo.class); } }
@Operation(summary = "Start collecting of Metadata for one or all connections ") @GetMapping(value = "collect_metadata/{id}", produces = "application/json") public Map<String, String> collectMetadata(@PathVariable("id") String id) throws CollectSamlMetadataException { return metadataProcessorService.collectSamlMetadata(id); }
@Test public void failedCollectingMetadata() throws CollectSamlMetadataException { when(metadataProcessorServiceMock.collectSamlMetadata(anyString())).thenThrow(CollectSamlMetadataException.class); assertThrows(CollectSamlMetadataException.class, () -> { controllerMock.collectMetadata(anyString()); }); }
public static <T> Object create(Class<T> iface, T implementation, RetryPolicy retryPolicy) { return RetryProxy.create(iface, new DefaultFailoverProxyProvider<T>(iface, implementation), retryPolicy); }
@Test public void testRetryOtherThanRemoteException() throws Throwable { Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = Collections.<Class<? extends Exception>, RetryPolicy>singletonMap( IOException.class, RETRY_FOREVER); UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create(UnreliableInterface.class, unreliableImpl, retryOtherThanRemoteAndSaslException(TRY_ONCE_THEN_FAIL, exceptionToPolicyMap)); // should retry with local IOException. unreliable.failsOnceWithIOException(); try { // won't get retry on remote exception unreliable.failsOnceWithRemoteException(); fail("Should fail"); } catch (RemoteException e) { // expected } }
@Override public String toString() { return toString(null); }
@Test public void testMultiSigOutputToString() throws Exception { sendMoneyToWallet(AbstractBlockChain.NewBlockType.BEST_CHAIN, Coin.COIN); ECKey myKey = new ECKey(); this.wallet.importKey(myKey); // Simulate another signatory ECKey otherKey = new ECKey(); // Create multi-sig transaction Transaction multiSigTransaction = new Transaction(); List<ECKey> keys = Arrays.asList(myKey, otherKey); Script scriptPubKey = ScriptBuilder.createMultiSigOutputScript(2, keys); multiSigTransaction.addOutput(Coin.COIN, scriptPubKey); SendRequest req = SendRequest.forTx(multiSigTransaction); this.wallet.completeTx(req); TransactionOutput multiSigTransactionOutput = multiSigTransaction.getOutput(0); assertThat(multiSigTransactionOutput.toString(), CoreMatchers.containsString("CHECKMULTISIG")); }
@GET @Path("{path:.*}") @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8}) public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) { return Response.status(Response.Status.FORBIDDEN).build(); } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put("hostname", request.getRemoteAddr()); switch (op.value()) { case OPEN: { Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter final FSOperations.FSOpen command = new FSOperations.FSOpen(path); final FileSystem fs = createFileSystem(user); InputStream is = null; UserGroupInformation ugi = UserGroupInformation .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws Exception { return command.execute(fs); } }); } catch (InterruptedException ie) { LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long len = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(is, offset, len); response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM) .build(); } break; } case GETFILESTATUS: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS: { String filter = params.get(FilterParam.NAME, FilterParam.class); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETHOMEDIRECTORY: { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); Set<String> userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); Map snapshot = instrumentation.getSnapshot(); response = Response.ok(snapshot).build(); break; } case GETCONTENTSUMMARY: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETQUOTAUSAGE: { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); AUDIT_LOG.info("[{}]", path); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { Map json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); } break; } case GETFILEBLOCKLOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("BlockLocations", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETACLSTATUS: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("ACL status for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETXATTRS: { List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class); XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttrs for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTXATTRS: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttr names for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS_BATCH: { String startAfter = params.get( HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] token [{}]", path, token); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOT: { FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETALLSTORAGEPOLICY: { FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTORAGEPOLICY: { FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFF: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFFLISTING: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); String snapshotDiffStartPath = params .get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class); Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class); FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTTABLEDIRECTORYLIST: { FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTLIST: { FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSERVERDEFAULTS: { FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case CHECKACCESS: { String mode = params.get(FsActionParam.NAME, FsActionParam.class); FsActionParam fsparam = new FsActionParam(mode); FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value())); fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok().build(); break; } case GETECPOLICY: { FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECPOLICIES: { FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECCODECS: { FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs(); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("LocatedBlocks", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILELINKSTATUS: { FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTATUS: { FSOperations.FSStatus command = new FSOperations.FSStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOTS: { Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); Map json = fsExecute(user, command); AUDIT_LOG.info("allUsers [{}]", allUsers); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); } } return response; }
@Test @TestDir @TestJetty @TestHdfs public void testNoRedirect() throws Exception { createHttpFSServer(false, false); final String testContent = "Test content"; final String path = "/testfile.txt"; final String username = HadoopUsersConfTestHelper.getHadoopUsers()[0]; // Trigger the creation of the file which shouldn't redirect URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format( "/webhdfs/v1{0}?user.name={1}&op=CREATE&noredirect=true", path, username)); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod(HttpMethod.PUT); conn.connect(); // Verify that it returned the final write location Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); JSONObject json = (JSONObject)new JSONParser().parse( new InputStreamReader(conn.getInputStream())); String location = (String)json.get("Location"); Assert.assertTrue(location.contains(DataParam.NAME)); Assert.assertFalse(location.contains(NoRedirectParam.NAME)); Assert.assertTrue(location.contains("CREATE")); Assert.assertTrue("Wrong location: " + location, location.startsWith(TestJettyHelper.getJettyURL().toString())); // Use the location to actually write the file url = new URL(location); conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod(HttpMethod.PUT); conn.setRequestProperty( "Content-Type", MediaType.APPLICATION_OCTET_STREAM); conn.setDoOutput(true); conn.connect(); OutputStream os = conn.getOutputStream(); os.write(testContent.getBytes()); os.close(); // Verify that it created the file and returned the location Assert.assertEquals( HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); json = (JSONObject)new JSONParser().parse( new InputStreamReader(conn.getInputStream())); location = (String)json.get("Location"); Assert.assertEquals( TestJettyHelper.getJettyURL() + "/webhdfs/v1" + path, location); // Read the file which shouldn't redirect url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format( "/webhdfs/v1{0}?user.name={1}&op=OPEN&noredirect=true", path, username)); conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod(HttpMethod.GET); conn.connect(); // Verify that we got the final location to read from Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); json = (JSONObject)new JSONParser().parse( new InputStreamReader(conn.getInputStream())); location = (String)json.get("Location"); Assert.assertTrue(!location.contains(NoRedirectParam.NAME)); Assert.assertTrue(location.contains("OPEN")); Assert.assertTrue("Wrong location: " + location, location.startsWith(TestJettyHelper.getJettyURL().toString())); // Use the location to actually read url = new URL(location); conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod(HttpMethod.GET); conn.connect(); // Verify that we read what we wrote Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); String content = IOUtils.toString(conn.getInputStream(), StandardCharsets.UTF_8); Assert.assertEquals(testContent, content); // Get the checksum of the file which shouldn't redirect url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format( "/webhdfs/v1{0}?user.name={1}&op=GETFILECHECKSUM&noredirect=true", path, username)); conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod(HttpMethod.GET); conn.connect(); // Verify that we got the final location to write to Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); json = (JSONObject)new JSONParser().parse( new InputStreamReader(conn.getInputStream())); location = (String)json.get("Location"); Assert.assertTrue(!location.contains(NoRedirectParam.NAME)); Assert.assertTrue(location.contains("GETFILECHECKSUM")); Assert.assertTrue("Wrong location: " + location, location.startsWith(TestJettyHelper.getJettyURL().toString())); // Use the location to actually get the checksum url = new URL(location); conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod(HttpMethod.GET); conn.connect(); // Verify that we read what we wrote Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); json = (JSONObject)new JSONParser().parse( new InputStreamReader(conn.getInputStream())); JSONObject checksum = (JSONObject)json.get("FileChecksum"); Assert.assertEquals( "0000020000000000000000001b9c0a445fed3c0bf1e1aa7438d96b1500000000", checksum.get("bytes")); Assert.assertEquals(28L, checksum.get("length")); Assert.assertEquals("MD5-of-0MD5-of-512CRC32C", checksum.get("algorithm")); }
public static FunctionTypeInfo getFunctionTypeInfo( final ExpressionTypeManager expressionTypeManager, final FunctionCall functionCall, final UdfFactory udfFactory, final Map<String, SqlType> lambdaMapping ) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity final List<Expression> arguments = functionCall.getArguments(); final List<SqlArgument> functionArgumentTypes = firstPassOverFunctionArguments( arguments, expressionTypeManager, lambdaMapping ); final KsqlScalarFunction function = udfFactory.getFunction(functionArgumentTypes); final SqlType returnSchema; final List<ArgumentInfo> argumentInfoForFunction = new ArrayList<>(); if (!functionCall.hasLambdaFunctionCallArguments()) { returnSchema = function.getReturnType(functionArgumentTypes); return FunctionTypeInfo.of( functionArgumentTypes.stream() .map(argument -> ArgumentInfo.of(argument, new HashMap<>(lambdaMapping))) .collect(Collectors.toList()), returnSchema, function ); } else { final List<ParamType> paramTypes = function.parameters(); final Map<GenericType, SqlType> reservedGenerics = new HashMap<>(); final List<SqlArgument> functionArgumentTypesWithResolvedLambdaType = new ArrayList<>(); // second pass over the function arguments to properly do lambda type checking for (int i = 0; i < arguments.size(); i++) { final Expression expression = arguments.get(i); final ParamType parameter = paramTypes.get(i); if (expression instanceof LambdaFunctionCall) { // the function returned from the UDF factory should have lambda // at this index in the function arguments if there's a // lambda node at this index in the function node argument list if (!(parameter instanceof LambdaType)) { throw new RuntimeException(String.format("Error while processing lambda function." + "Expected lambda parameter but was %s" + "This is most likely an internal error and a " + "Github issue should be filed for debugging. " + "Include the function name, the parameters passed in, the expected " + "signature, and any other relevant information.", parameter.toString())); } final ArrayList<SqlType> lambdaSqlTypes = new ArrayList<>(); final Map<String, SqlType> variableTypeMapping = mapLambdaParametersToTypes( (LambdaFunctionCall) expression, (LambdaType) parameter, reservedGenerics, lambdaSqlTypes ); final Map<String,SqlType> updateLambdaMapping = LambdaMappingUtil.resolveOldAndNewLambdaMapping(variableTypeMapping, lambdaMapping); final SqlType resolvedLambdaReturnType = expressionTypeManager.getExpressionSqlType(expression, updateLambdaMapping); final SqlArgument lambdaArgument = SqlArgument.of( SqlLambdaResolved.of(lambdaSqlTypes, resolvedLambdaReturnType)); functionArgumentTypesWithResolvedLambdaType.add(lambdaArgument); argumentInfoForFunction.add( ArgumentInfo.of( lambdaArgument, new HashMap<>(updateLambdaMapping))); } else { functionArgumentTypesWithResolvedLambdaType.add(functionArgumentTypes.get(i)); argumentInfoForFunction.add( ArgumentInfo.of( functionArgumentTypes.get(i), new HashMap<>(lambdaMapping))); } if (GenericsUtil.hasGenerics(parameter)) { final Pair<Boolean, Optional<KsqlException>> success = GenericsUtil.reserveGenerics( parameter, functionArgumentTypesWithResolvedLambdaType.get(i), reservedGenerics ); if (!success.getLeft() && success.getRight().isPresent()) { throw success.getRight().get(); } } } returnSchema = function.getReturnType(functionArgumentTypesWithResolvedLambdaType); return new FunctionTypeInfo( argumentInfoForFunction, returnSchema, function ); } }
@Test public void shouldResolveLambdaWithoutGenerics() { // Given: givenUdfWithNameAndReturnType("SmallLambda", SqlTypes.DOUBLE); when(function.parameters()).thenReturn( ImmutableList.of( ArrayType.of(ParamTypes.DOUBLE), LambdaType.of(ImmutableList.of(ParamTypes.INTEGER), ParamTypes.INTEGER))); final FunctionCall expression = new FunctionCall(FunctionName.of("SmallLambda"), ImmutableList.of( ARRAYCOL, new LambdaFunctionCall(ImmutableList.of("2.3"), new ArithmeticBinaryExpression( Operator.ADD, new DoubleLiteral(2.3), new DoubleLiteral(2.3))))); // When: final FunctionTypeInfo argumentsAndContexts = FunctionArgumentsUtil.getFunctionTypeInfo(expressionTypeManager, expression, udfFactory, Collections.emptyMap()); // Then: assertThat(argumentsAndContexts.getReturnType(), is(SqlTypes.DOUBLE)); assertThat(argumentsAndContexts.getArgumentInfos().size(), is(2)); verify(udfFactory).getFunction( ImmutableList.of( SqlArgument.of(SqlTypes.array(SqlTypes.DOUBLE)), SqlArgument.of(SqlLambda.of(1)) ) ); verify(function).getReturnType( ImmutableList.of( SqlArgument.of(SqlTypes.array(SqlTypes.DOUBLE)), SqlArgument.of(SqlLambdaResolved.of(ImmutableList.of(SqlTypes.INTEGER), SqlTypes.DOUBLE)) ) ); }
protected boolean databaseForBothDbInterfacesIsTheSame( DatabaseInterface primary, DatabaseInterface secondary ) { if ( primary == null || secondary == null ) { throw new IllegalArgumentException( "DatabaseInterface shouldn't be null!" ); } if ( primary.getPluginId() == null || secondary.getPluginId() == null ) { return false; } if ( primary.getPluginId().equals( secondary.getPluginId() ) ) { return true; } return primary.getClass().isAssignableFrom( secondary.getClass() ); }
@Test public void databases_WithSameDbConnTypes_AreNotSame_IfPluginIdIsNull() { DatabaseInterface mssqlServerDatabaseMeta = new MSSQLServerDatabaseMeta(); mssqlServerDatabaseMeta.setPluginId( null ); assertFalse( databaseMeta.databaseForBothDbInterfacesIsTheSame( mssqlServerDatabaseMeta, mssqlServerDatabaseMeta ) ); }
@Override public void initializeState(StateInitializationContext context) throws Exception { if (isPartitionCommitTriggerEnabled()) { partitionCommitPredicate = PartitionCommitPredicate.create(conf, getUserCodeClassloader(), partitionKeys); } currentNewPartitions = new HashSet<>(); newPartitions = new TreeMap<>(); committablePartitions = new HashSet<>(); inProgressPartitions = new HashMap<>(); super.initializeState(context); }
@Test void testCommitFileWhenPartitionIsCommittableByProcessTime() throws Exception { // the rolling policy is not to roll file by filesize and roll file after one day, // it can ensure the file can be closed only when the partition is committable in this test. FileSystemTableSink.TableRollingPolicy tableRollingPolicy = new FileSystemTableSink.TableRollingPolicy( false, Long.MAX_VALUE, Duration.ofDays(1).toMillis(), Duration.ofDays(1).toMillis()); List<String> partitionKeys = Collections.singletonList("d"); // commit delay is 1 second with process-time trigger Configuration conf = getProcTimeCommitTriggerConf(Duration.ofSeconds(1).toMillis()); OperatorSubtaskState state; long currentTimeMillis = System.currentTimeMillis(); try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) { harness.setup(); harness.initializeEmptyState(); harness.open(); harness.setProcessingTime(currentTimeMillis); harness.processElement(row("1"), 0); harness.processElement(row("2"), 0); state = harness.snapshot(1, 1); harness.processElement(row("3"), 0); harness.notifyOfCompletedCheckpoint(1); // assert files aren't committed in {1, 2} partitions assertThat(isPartitionFileCommitted("1", 0, 0)).isFalse(); assertThat(isPartitionFileCommitted("2", 0, 1)).isFalse(); } // first retry try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) { harness.setup(); harness.initializeState(state); harness.open(); harness.processElement(row("3"), 0); // simulate waiting for 2 seconds, now partition {3} is committable currentTimeMillis += Duration.ofSeconds(2).toMillis(); harness.setProcessingTime(currentTimeMillis); harness.processElement(row("4"), 0); harness.snapshot(2, 2); harness.notifyOfCompletedCheckpoint(2); // only file in partition {3} should be committed // assert files are committed assertThat(isPartitionFileCommitted("3", 0, 2)).isTrue(); assertThat(isPartitionFileCommitted("4", 0, 3)).isFalse(); // simulate waiting for 2 seconds again, now partition {1} is committable currentTimeMillis += Duration.ofSeconds(2).toMillis(); harness.setProcessingTime(currentTimeMillis); state = harness.snapshot(3, 3); harness.notifyOfCompletedCheckpoint(3); assertThat(isPartitionFileCommitted("4", 0, 3)).isTrue(); } // second retry try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) { harness.setup(); harness.initializeState(state); harness.open(); harness.processElement(row("4"), 0); harness.processElement(row("4"), 0); harness.snapshot(4, 4); harness.processElement(row("5"), 5); harness.endInput(); // assert files in all partition have been committed assertThat(isPartitionFileCommitted("4", 0, 4)).isTrue(); assertThat(isPartitionFileCommitted("5", 0, 5)).isTrue(); } }
public Expression toPredicate(TupleDomain<String> tupleDomain) { if (tupleDomain.isNone()) { return FALSE_LITERAL; } Map<String, Domain> domains = tupleDomain.getDomains().get(); return domains.entrySet().stream() .sorted(comparing(entry -> entry.getKey())) .map(entry -> toPredicate(entry.getValue(), new SymbolReference(entry.getKey()))) .collect(collectingAndThen(toImmutableList(), ExpressionUtils::combineConjuncts)); }
@Test public void testInOptimization() { Domain testDomain = Domain.create( ValueSet.all(BIGINT) .subtract(ValueSet.ofRanges( Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L), Range.equal(BIGINT, 3L))), false); TupleDomain<String> tupleDomain = withColumnDomains(ImmutableMap.<String, Domain>builder().put(C_BIGINT, testDomain).build()); assertEquals(toPredicate(tupleDomain), not(in(C_BIGINT, ImmutableList.of(1L, 2L, 3L)))); testDomain = Domain.create( ValueSet.ofRanges( Range.lessThan(BIGINT, 4L)).intersect( ValueSet.all(BIGINT) .subtract(ValueSet.ofRanges(Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L), Range.equal(BIGINT, 3L)))), false); tupleDomain = withColumnDomains(ImmutableMap.<String, Domain>builder().put(C_BIGINT, testDomain).build()); assertEquals(toPredicate(tupleDomain), and(lessThan(C_BIGINT, bigintLiteral(4L)), not(in(C_BIGINT, ImmutableList.of(1L, 2L, 3L))))); testDomain = Domain.create(ValueSet.ofRanges( Range.range(BIGINT, 1L, true, 3L, true), Range.range(BIGINT, 5L, true, 7L, true), Range.range(BIGINT, 9L, true, 11L, true)), false); tupleDomain = withColumnDomains(ImmutableMap.<String, Domain>builder().put(C_BIGINT, testDomain).build()); assertEquals(toPredicate(tupleDomain), or(between(C_BIGINT, bigintLiteral(1L), bigintLiteral(3L)), (between(C_BIGINT, bigintLiteral(5L), bigintLiteral(7L))), (between(C_BIGINT, bigintLiteral(9L), bigintLiteral(11L))))); testDomain = Domain.create( ValueSet.ofRanges( Range.lessThan(BIGINT, 4L)) .intersect(ValueSet.all(BIGINT) .subtract(ValueSet.ofRanges(Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L), Range.equal(BIGINT, 3L)))) .union(ValueSet.ofRanges(Range.range(BIGINT, 7L, true, 9L, true))), false); tupleDomain = withColumnDomains(ImmutableMap.<String, Domain>builder().put(C_BIGINT, testDomain).build()); assertEquals(toPredicate(tupleDomain), or(and(lessThan(C_BIGINT, bigintLiteral(4L)), not(in(C_BIGINT, ImmutableList.of(1L, 2L, 3L)))), between(C_BIGINT, bigintLiteral(7L), bigintLiteral(9L)))); testDomain = Domain.create( ValueSet.ofRanges(Range.lessThan(BIGINT, 4L)) .intersect(ValueSet.all(BIGINT) .subtract(ValueSet.ofRanges(Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L), Range.equal(BIGINT, 3L)))) .union(ValueSet.ofRanges(Range.range(BIGINT, 7L, false, 9L, false), Range.range(BIGINT, 11L, false, 13L, false))), false); tupleDomain = withColumnDomains(ImmutableMap.<String, Domain>builder().put(C_BIGINT, testDomain).build()); assertEquals(toPredicate(tupleDomain), or( and(lessThan(C_BIGINT, bigintLiteral(4L)), not(in(C_BIGINT, ImmutableList.of(1L, 2L, 3L)))), and(greaterThan(C_BIGINT, bigintLiteral(7L)), lessThan(C_BIGINT, bigintLiteral(9L))), and(greaterThan(C_BIGINT, bigintLiteral(11L)), lessThan(C_BIGINT, bigintLiteral(13L))))); }
@Override public URL getResource(String name) { ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name); log.trace("Received request to load resource '{}'", name); for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) { URL url = null; switch (classLoadingSource) { case APPLICATION: url = super.getResource(name); break; case PLUGIN: url = findResource(name); break; case DEPENDENCIES: url = findResourceFromDependencies(name); break; } if (url != null) { log.trace("Found resource '{}' in {} classpath", name, classLoadingSource); return url; } else { log.trace("Couldn't find resource '{}' in {}", name, classLoadingSource); } } return null; }
@Test void parentLastGetResourceExistsInParentAndDependencyAndPlugin() throws URISyntaxException, IOException { URL resource = parentLastPluginClassLoader.getResource("META-INF/file-in-both-parent-and-dependency-and-plugin"); assertFirstLine("plugin", resource); }
@Override public void handleRequest(RestRequest request, RequestContext requestContext, Callback<RestResponse> callback) { //This code path cannot accept content types or accept types that contain //multipart/related. This is because these types of requests will usually have very large payloads and therefore //would degrade server performance since RestRequest reads everything into memory. if (!isMultipart(request, requestContext, callback)) { _restRestLiServer.handleRequest(request, requestContext, callback); } }
@Test(dataProvider = "restOrStream") public void testSyncNullObject404(final RestOrStream restOrStream) throws Exception { final StatusCollectionResource statusResource = getMockResource(StatusCollectionResource.class); EasyMock.expect(statusResource.get(eq(1L))).andReturn(null).once(); replay(statusResource); Callback<RestResponse> restResponseCallback = new Callback<RestResponse>() { @Override public void onSuccess(RestResponse restResponse) { fail("We should not get a success here. The server should have returned a 404!"); } @Override public void onError(Throwable e) { RestException restException = (RestException) e; assertEquals(restException.getResponse().getStatus(), 404, "We should get a 404 back here!"); EasyMock.verify(statusResource); EasyMock.reset(statusResource); } }; if (restOrStream == RestOrStream.REST) { RestRequest request = new RestRequestBuilder(new URI("/statuses/1")) .setHeader(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, AllProtocolVersions.BASELINE_PROTOCOL_VERSION.toString()).build(); _server.handleRequest(request, new RequestContext(), restResponseCallback); } else { StreamRequest streamRequest = new StreamRequestBuilder(new URI("/statuses/1")) .setHeader(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, AllProtocolVersions.BASELINE_PROTOCOL_VERSION.toString()) .build(EntityStreams.emptyStream()); Callback<StreamResponse> callback = new Callback<StreamResponse>() { @Override public void onSuccess(StreamResponse streamResponse) { fail("We should not get a success here. The server should have returned a 404!"); } @Override public void onError(Throwable e) { Messages.toRestException((StreamException) e, new Callback<RestException>() { @Override public void onError(Throwable e) { Assert.fail(); } @Override public void onSuccess(RestException result) { restResponseCallback.onError(result); } }); } }; _server.handleRequest(streamRequest, new RequestContext(), callback); } }
public Integer doCall() throws Exception { if (all) { client(Integration.class).delete(); printer().println("Integrations deleted"); } else { if (names == null) { throw new RuntimeCamelException("Missing integration name as argument or --all option."); } for (String name : Arrays.stream(names).map(KubernetesHelper::sanitize).toList()) { List<StatusDetails> status = client(Integration.class).withName(name).delete(); if (status.isEmpty()) { printer().printf("Integration %s deletion skipped - not found%n", name); } else { printer().printf("Integration %s deleted%n", name); } } } return 0; }
@Test public void shouldDeleteAll() throws Exception { Integration integration1 = createIntegration("foo"); Integration integration2 = createIntegration("bar"); kubernetesClient.resources(Integration.class).resource(integration1).create(); kubernetesClient.resources(Integration.class).resource(integration2).create(); IntegrationDelete command = createCommand(); command.all = true; command.doCall(); Assertions.assertEquals("Integrations deleted", printer.getOutput()); Assertions.assertEquals(0, kubernetesClient.resources(Integration.class).list().getItems().size()); }
@Override public ExportResult<CalendarContainerResource> export( UUID jobId, TokensAndUrlAuthData authData, Optional<ExportInformation> exportInformation) { if (!exportInformation.isPresent()) { return exportCalendars(authData, Optional.empty()); } else { StringPaginationToken paginationToken = (StringPaginationToken) exportInformation.get().getPaginationData(); if (paginationToken != null && paginationToken.getToken().startsWith(CALENDAR_TOKEN_PREFIX)) { // Next thing to export is more calendars return exportCalendars(authData, Optional.of(paginationToken)); } else { // Next thing to export is events IdOnlyContainerResource idOnlyContainerResource = (IdOnlyContainerResource) exportInformation.get().getContainerResource(); Optional<PaginationData> pageData = Optional.ofNullable(paginationToken); return getCalendarEvents(authData, idOnlyContainerResource.getId(), pageData); } } }
@Test public void exportEventSubsequentSet() throws IOException { setUpSingleEventResponse(); // Looking at subsequent page, with no pages after it ContainerResource containerResource = new IdOnlyContainerResource(CALENDAR_ID); PaginationData paginationData = new StringPaginationToken(EVENT_TOKEN_PREFIX + NEXT_TOKEN); ExportInformation exportInformation = new ExportInformation(paginationData, containerResource); eventListResponse.setNextPageToken(null); // Run test ExportResult<CalendarContainerResource> result = googleCalendarExporter.export(UUID.randomUUID(), null, Optional.of(exportInformation)); // Check results // Verify correct methods were called in order InOrder inOrder = Mockito.inOrder(eventListRequest); inOrder.verify(eventListRequest).setPageToken(NEXT_TOKEN); inOrder.verify(eventListRequest).execute(); // Check pagination token ContinuationData continuationData = (ContinuationData) result.getContinuationData(); StringPaginationToken paginationToken = (StringPaginationToken) continuationData.getPaginationData(); assertThat(paginationToken).isNull(); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuilder buf = new StringBuilder(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case START_STATE: handleStartState(c, tokenList, buf); break; case DEFAULT_VAL_STATE: handleDefaultValueState(c, tokenList, buf); default: } } // EOS switch (state) { case LITERAL_STATE: addLiteralToken(tokenList, buf); break; case DEFAULT_VAL_STATE: // trailing colon. see also LOGBACK-1140 buf.append(CoreConstants.COLON_CHAR); addLiteralToken(tokenList, buf); break; case START_STATE: // trailing $. see also LOGBACK-1149 buf.append(CoreConstants.DOLLAR); addLiteralToken(tokenList, buf); break; } return tokenList; }
@Test public void literalContainingColon() throws ScanException { String input = "a:b"; Tokenizer tokenizer = new Tokenizer(input); List<Token> tokenList = tokenizer.tokenize(); witnessList.add(new Token(Token.Type.LITERAL, "a")); witnessList.add(new Token(Token.Type.LITERAL, ":b")); assertEquals(witnessList, tokenList); }
@Override public String getRawSourceHash(Component file) { checkComponentArgument(file); if (rawSourceHashesByKey.containsKey(file.getKey())) { return checkSourceHash(file.getKey(), rawSourceHashesByKey.get(file.getKey())); } else { String newSourceHash = computeRawSourceHash(file); rawSourceHashesByKey.put(file.getKey(), newSourceHash); return checkSourceHash(file.getKey(), newSourceHash); } }
@Test void getRawSourceHash_returns_hash_of_lines_from_SourceLinesRepository() { sourceLinesRepository.addLines(FILE_REF, SOME_LINES); String rawSourceHash = underTest.getRawSourceHash(FILE_COMPONENT); SourceHashComputer sourceHashComputer = new SourceHashComputer(); for (int i = 0; i < SOME_LINES.length; i++) { sourceHashComputer.addLine(SOME_LINES[i], i < (SOME_LINES.length - 1)); } assertThat(rawSourceHash).isEqualTo(sourceHashComputer.getHash()); }
public static CoordinatorRecord newGroupMetadataRecord( ClassicGroup group, Map<String, byte[]> assignment, MetadataVersion metadataVersion ) { List<GroupMetadataValue.MemberMetadata> members = new ArrayList<>(group.allMembers().size()); group.allMembers().forEach(member -> { byte[] subscription = group.protocolName().map(member::metadata).orElse(null); if (subscription == null) { throw new IllegalStateException("Attempted to write non-empty group metadata with no defined protocol."); } byte[] memberAssignment = assignment.get(member.memberId()); if (memberAssignment == null) { throw new IllegalStateException("Attempted to write member " + member.memberId() + " of group " + group.groupId() + " with no assignment."); } members.add( new GroupMetadataValue.MemberMetadata() .setMemberId(member.memberId()) .setClientId(member.clientId()) .setClientHost(member.clientHost()) .setRebalanceTimeout(member.rebalanceTimeoutMs()) .setSessionTimeout(member.sessionTimeoutMs()) .setGroupInstanceId(member.groupInstanceId().orElse(null)) .setSubscription(subscription) .setAssignment(memberAssignment) ); }); return new CoordinatorRecord( new ApiMessageAndVersion( new GroupMetadataKey() .setGroup(group.groupId()), (short) 2 ), new ApiMessageAndVersion( new GroupMetadataValue() .setProtocol(group.protocolName().orElse(null)) .setProtocolType(group.protocolType().orElse("")) .setGeneration(group.generationId()) .setLeader(group.leaderOrNull()) .setCurrentStateTimestamp(group.currentStateTimestampOrDefault()) .setMembers(members), metadataVersion.groupMetadataValueVersion() ) ); }
@Test public void testNewGroupMetadataRecordThrowsWhenNullSubscription() { Time time = new MockTime(); List<GroupMetadataValue.MemberMetadata> expectedMembers = new ArrayList<>(); expectedMembers.add( new GroupMetadataValue.MemberMetadata() .setMemberId("member-1") .setClientId("client-1") .setClientHost("host-1") .setRebalanceTimeout(1000) .setSessionTimeout(1500) .setGroupInstanceId("group-instance-1") .setSubscription(new byte[]{0, 1}) .setAssignment(new byte[]{1, 2}) ); ClassicGroup group = new ClassicGroup( new LogContext(), "group-id", ClassicGroupState.PREPARING_REBALANCE, time, mock(GroupCoordinatorMetricsShard.class) ); expectedMembers.forEach(member -> { JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestProtocolCollection(); protocols.add(new JoinGroupRequestProtocol() .setName("range") .setMetadata(null)); group.add(new ClassicGroupMember( member.memberId(), Optional.of(member.groupInstanceId()), member.clientId(), member.clientHost(), member.rebalanceTimeout(), member.sessionTimeout(), "consumer", protocols, member.assignment() )); }); assertThrows(IllegalStateException.class, () -> GroupCoordinatorRecordHelpers.newGroupMetadataRecord( group, Collections.emptyMap(), MetadataVersion.IBP_3_5_IV2 )); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test public void test4412PathWildcards() { Reader reader = new Reader(new OpenAPI()); OpenAPI openAPI = reader.read(Ticket4412Resource.class); String yaml = "openapi: 3.0.1\n" + "paths:\n" + " /test/sws/{var}:\n" + " get:\n" + " operationId: getCart\n" + " parameters:\n" + " - name: var\n" + " in: path\n" + " required: true\n" + " schema:\n" + " pattern: .*\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " text/xml:\n" + " schema:\n" + " type: array\n" + " items:\n" + " type: string"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); }
protected static String[] getOAuthPrefix(final Host bookmark) { if(StringUtils.isNotBlank(bookmark.getCredentials().getUsername())) { return new String[]{ String.format("%s (%s)", bookmark.getProtocol().getOAuthClientId(), bookmark.getCredentials().getUsername()), String.format("%s (%s)", bookmark.getProtocol().getDescription(), bookmark.getCredentials().getUsername()) }; } return new String[]{ bookmark.getProtocol().getOAuthClientId(), bookmark.getProtocol().getDescription() }; }
@Test public void testGetOAuthPrefix() { final String[] prefix = DefaultHostPasswordStore.getOAuthPrefix(new Host(new TestProtocol(Scheme.https) { @Override public String getOAuthClientId() { return "clientid"; } @Override public String getOAuthClientSecret() { return "clientsecret"; } @Override public String getOAuthRedirectUrl() { return "x-cyberduck-action:oauth"; } })); assertEquals("clientid", prefix[0]); assertEquals("Test", prefix[1]); }
public void restore(final List<Pair<byte[], byte[]>> backupCommands) { // Delete the command topic deleteCommandTopicIfExists(); // Create the command topic KsqlInternalTopicUtils.ensureTopic(commandTopicName, serverConfig, topicClient); // Restore the commands restoreCommandTopic(backupCommands); }
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT") @Test public void shouldThrowIfCannotCreateTopic() { // Given: when(topicClient.isTopicExists(COMMAND_TOPIC_NAME)).thenReturn(false); doThrow(new RuntimeException("denied")).when(topicClient) .createTopic(COMMAND_TOPIC_NAME, INTERNAL_TOPIC_PARTITION_COUNT, INTERNAL_TOPIC_REPLICAS_COUNT, INTERNAL_TOPIC_CONFIG); // When: final Exception e = assertThrows( RuntimeException.class, () -> restoreCommandTopic.restore(Collections.singletonList(BACKUP_COMMANDS.get(0)))); // Then: assertThat(e.getMessage(), containsString("denied")); verify(topicClient, times(2)).isTopicExists(COMMAND_TOPIC_NAME); verifyCreateCommandTopic(); verifyNoMoreInteractions(topicClient); verifyNoMoreInteractions(kafkaProducer); }
@Override public boolean createTopic( final String topic, final int numPartitions, final short replicationFactor, final Map<String, ?> configs, final CreateTopicsOptions createOptions ) { final Optional<Long> retentionMs = KafkaTopicClient.getRetentionMs(configs); if (isTopicExists(topic)) { validateTopicProperties(topic, numPartitions, replicationFactor, retentionMs); return false; } final short resolvedReplicationFactor = replicationFactor == TopicProperties.DEFAULT_REPLICAS ? getDefaultClusterReplication() : replicationFactor; final NewTopic newTopic = new NewTopic(topic, numPartitions, resolvedReplicationFactor); newTopic.configs(toStringConfigs(configs)); try { LOG.info("Creating topic '{}' {}", topic, (createOptions.shouldValidateOnly()) ? "(ONLY VALIDATE)" : "" ); ExecutorUtil.executeWithRetries( () -> adminClient.get().createTopics( Collections.singleton(newTopic), createOptions ).all().get(), ExecutorUtil.RetryBehaviour.ON_RETRYABLE); return true; } catch (final InterruptedException e) { Thread.currentThread().interrupt(); throw new KafkaResponseGetFailedException( "Failed to guarantee existence of topic " + topic, e); } catch (final TopicExistsException e) { // if the topic already exists, it is most likely because another node just created it. // ensure that it matches the partition count, replication factor, and retention // before returning success validateTopicProperties(topic, numPartitions, replicationFactor, retentionMs); return false; } catch (final TopicAuthorizationException e) { throw new KsqlTopicAuthorizationException( AclOperation.CREATE, Collections.singleton(topic)); } catch (final Exception e) { throw new KafkaResponseGetFailedException( "Failed to guarantee existence of topic " + topic, e); } }
@Test public void shouldNotCreateTopicIfItAlreadyExistsWithDefaultRf() { // Given: givenTopicExists("someTopic", 1, 2); givenTopicConfigs( "someTopic", overriddenConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "8640000000") ); // When: kafkaTopicClient.createTopic("someTopic", 1, (short) -1, configs); // Then: verify(adminClient, never()).createTopics(any(), any()); }
public ECPoint getQ() { return q; }
@Test public void shouldConvertPublicPoint() { final EcPrivateKey key = new EcPrivateKey(new ECPrivateKeySpec(D, SPEC)); assertEquals(Q, key.getQ()); assertEquals(true, key.getQ().isNormalized()); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 2) { onInvalidDataReceived(device, data); return; } // Read flags int offset = 0; final int flags = data.getIntValue(Data.FORMAT_UINT8, offset); final int hearRateType = (flags & 0x01) == 0 ? Data.FORMAT_UINT8 : Data.FORMAT_UINT16_LE; final int sensorContactStatus = (flags & 0x06) >> 1; final boolean sensorContactSupported = sensorContactStatus == 2 || sensorContactStatus == 3; final boolean sensorContactDetected = sensorContactStatus == 3; final boolean energyExpandedPresent = (flags & 0x08) != 0; final boolean rrIntervalsPresent = (flags & 0x10) != 0; offset += 1; // Validate packet length if (data.size() < 1 + (hearRateType & 0x0F) + (energyExpandedPresent ? 2 : 0) + (rrIntervalsPresent ? 2 : 0)) { onInvalidDataReceived(device, data); return; } // Prepare data final Boolean sensorContact = sensorContactSupported ? sensorContactDetected : null; final int heartRate = data.getIntValue(hearRateType, offset); offset += hearRateType & 0xF; Integer energyExpanded = null; if (energyExpandedPresent) { energyExpanded = data.getIntValue(Data.FORMAT_UINT16_LE, offset); offset += 2; } List<Integer> rrIntervals = null; if (rrIntervalsPresent) { final int count = (data.size() - offset) / 2; final List<Integer> intervals = new ArrayList<>(count); for (int i = 0; i < count; ++i) { intervals.add(data.getIntValue(Data.FORMAT_UINT16_LE, offset)); offset += 2; } rrIntervals = Collections.unmodifiableList(intervals); } onHeartRateMeasurementReceived(device, heartRate, sensorContact, energyExpanded, rrIntervals); }
@Test public void onHeartRateMeasurementReceived_simple() { success = false; final Data data = new Data(new byte[] { 0, 85 }); response.onDataReceived(null, data); assertTrue(response.isValid()); assertTrue(success); assertEquals(85, heartRate); assertNull(contactDetected); assertNull(energyExpanded); assertNull(rrIntervals); }
@Override public final String toString() { StringBuilder out = new StringBuilder(); appendTo(out); return out.toString(); }
@Test void requireThatPredicatesCanBeConstructedUsingConstructors() { assertEquals("country in [no, se] and age in [20..30]", new Conjunction(new FeatureSet("country", "no", "se"), new FeatureRange("age", 20L, 30L)).toString()); assertEquals("country not in [no, se] or age in [20..] or height in [..160]", new Disjunction(new Negation(new FeatureSet("country", "no", "se")), new FeatureRange("age", 20L, null), new FeatureRange("height", null, 160L)).toString()); }
@InvokeOnHeader(Web3jConstants.ETH_COINBASE) void ethCoinbase(Message message) throws IOException { Request<?, EthCoinbase> request = web3j.ethCoinbase(); setRequestId(message, request); EthCoinbase response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getAddress()); } }
@Test public void ethCoinbaseTest() throws Exception { EthCoinbase response = Mockito.mock(EthCoinbase.class); Mockito.when(mockWeb3j.ethCoinbase()).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getAddress()).thenReturn("123"); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_COINBASE); template.send(exchange); String body = exchange.getIn().getBody(String.class); assertEquals("123", body); }
@Udf public <T> List<T> concat( @UdfParameter(description = "First array of values") final List<T> left, @UdfParameter(description = "Second array of values") final List<T> right) { if (left == null && right == null) { return null; } final int leftSize = left != null ? left.size() : 0; final int rightSize = right != null ? right.size() : 0; final List<T> result = new ArrayList<>(leftSize + rightSize); if (left != null) { result.addAll(left); } if (right != null) { result.addAll(right); } return result; }
@Test public void shouldReturnNullForAllNullInputs() { final List<Long> result = udf.concat((List<Long>) null, (List<Long>) null); assertThat(result, is(nullValue())); }
public static SerializableFunction<byte[], Row> getProtoBytesToRowFunction( String fileDescriptorPath, String messageName) { ProtoSchemaInfo dynamicProtoDomain = getProtoDomain(fileDescriptorPath, messageName); ProtoDomain protoDomain = dynamicProtoDomain.getProtoDomain(); @SuppressWarnings("unchecked") ProtoDynamicMessageSchema<DynamicMessage> protoDynamicMessageSchema = ProtoDynamicMessageSchema.forDescriptor(protoDomain, messageName); return new SimpleFunction<byte[], Row>() { @Override public Row apply(byte[] input) { try { List<String> messageElements = Splitter.on('.').splitToList(messageName); String messageTypeByName = messageElements.get(messageElements.size() - 1); final Descriptors.Descriptor descriptor = protoDomain .getFileDescriptor(dynamicProtoDomain.getFileName()) .findMessageTypeByName(messageTypeByName); DynamicMessage dynamicMessage = DynamicMessage.parseFrom(descriptor, input); SerializableFunction<DynamicMessage, Row> res = protoDynamicMessageSchema.getToRowFunction(); return res.apply(dynamicMessage); } catch (InvalidProtocolBufferException e) { LOG.error("Error parsing to DynamicMessage", e); throw new RuntimeException(e); } } }; }
@Test public void testProtoBytesToRowFunctionGenerateSerializableFunction() { SerializableFunction<byte[], Row> protoBytesToRowFunction = ProtoByteUtils.getProtoBytesToRowFunction(DESCRIPTOR_PATH, MESSAGE_NAME); Assert.assertNotNull(protoBytesToRowFunction); }
public static <T> Iterator<T> iterator(Class<T> expectedType, String factoryId, ClassLoader classLoader) throws Exception { Iterator<Class<T>> classIterator = classIterator(expectedType, factoryId, classLoader); return new NewInstanceIterator<>(classIterator); }
@Test public void loadServicesTcclAndGivenClassLoader() throws Exception { Class<ServiceLoaderTestInterface> type = ServiceLoaderTestInterface.class; String factoryId = "com.hazelcast.ServiceLoaderTestInterface"; ClassLoader given = new URLClassLoader(new URL[0]); Thread current = Thread.currentThread(); ClassLoader tccl = current.getContextClassLoader(); current.setContextClassLoader(new URLClassLoader(new URL[0])); Set<ServiceLoaderTestInterface> implementations = new HashSet<>(); Iterator<ServiceLoaderTestInterface> iterator = ServiceLoader.iterator(type, factoryId, given); while (iterator.hasNext()) { implementations.add(iterator.next()); } current.setContextClassLoader(tccl); assertEquals(1, implementations.size()); }
public static String buildGlueExpression(Map<Column, Domain> partitionPredicates) { List<String> perColumnExpressions = new ArrayList<>(); int expressionLength = 0; for (Map.Entry<Column, Domain> partitionPredicate : partitionPredicates.entrySet()) { String columnName = partitionPredicate.getKey().getName(); if (JSQL_PARSER_RESERVED_KEYWORDS.contains(columnName.toUpperCase(ENGLISH))) { // The column name is a reserved keyword in the grammar of the SQL parser used internally by Glue API continue; } Domain domain = partitionPredicate.getValue(); if (domain != null && !domain.isAll()) { Optional<String> columnExpression = buildGlueExpressionForSingleDomain(columnName, domain); if (columnExpression.isPresent()) { int newExpressionLength = expressionLength + columnExpression.get().length(); if (expressionLength > 0) { newExpressionLength += CONJUNCT_SEPARATOR.length(); } if (newExpressionLength > GLUE_EXPRESSION_CHAR_LIMIT) { continue; } perColumnExpressions.add((columnExpression.get())); expressionLength = newExpressionLength; } } } return Joiner.on(CONJUNCT_SEPARATOR).join(perColumnExpressions); }
@Test public void testBuildGlueExpressionTupleDomainEqualsSingleValue() { Map<Column, Domain> predicates = new PartitionFilterBuilder(HIVE_TYPE_TRANSLATOR) .addStringValues("col1", "2020-01-01") .addStringValues("col2", "2020-02-20") .build(); String expression = buildGlueExpression(predicates); assertEquals(expression, "((col1 = '2020-01-01')) AND ((col2 = '2020-02-20'))"); }
@Override public double cdf(double k) { if (k < 0) { return 0.0; } else { return regularizedIncompleteBetaFunction(r, k + 1, p); } }
@Test public void testCdf() { System.out.println("cdf"); NegativeBinomialDistribution instance = new NegativeBinomialDistribution(3, 0.3); instance.rand(); assertEquals(0.027, instance.cdf(0), 1E-7); assertEquals(0.0837, instance.cdf(1), 1E-7); assertEquals(0.16308, instance.cdf(2), 1E-7); assertEquals(0.25569, instance.cdf(3), 1E-7); assertEquals(0.7975217, instance.cdf(10), 1E-7); }
public static DataflowRunner fromOptions(PipelineOptions options) { DataflowPipelineOptions dataflowOptions = PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options); ArrayList<String> missing = new ArrayList<>(); if (dataflowOptions.getAppName() == null) { missing.add("appName"); } if (Strings.isNullOrEmpty(dataflowOptions.getRegion()) && isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) { missing.add("region"); } if (missing.size() > 0) { throw new IllegalArgumentException( "Missing required pipeline options: " + Joiner.on(',').join(missing)); } validateWorkerSettings( PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options)); PathValidator validator = dataflowOptions.getPathValidator(); String gcpTempLocation; try { gcpTempLocation = dataflowOptions.getGcpTempLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires gcpTempLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(gcpTempLocation); String stagingLocation; try { stagingLocation = dataflowOptions.getStagingLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires stagingLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(stagingLocation); if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) { validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs()); } if (dataflowOptions.getFilesToStage() != null) { // The user specifically requested these files, so fail now if they do not exist. // (automatically detected classpath elements are permitted to not exist, so later // staging will not fail on nonexistent files) dataflowOptions.getFilesToStage().stream() .forEach( stagedFileSpec -> { File localFile; if (stagedFileSpec.contains("=")) { String[] components = stagedFileSpec.split("=", 2); localFile = new File(components[1]); } else { localFile = new File(stagedFileSpec); } if (!localFile.exists()) { // should be FileNotFoundException, but for build-time backwards compatibility // cannot add checked exception throw new RuntimeException( String.format("Non-existent files specified in filesToStage: %s", localFile)); } }); } else { dataflowOptions.setFilesToStage( detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options)); if (dataflowOptions.getFilesToStage().isEmpty()) { throw new IllegalArgumentException("No files to stage has been found."); } else { LOG.info( "PipelineOptions.filesToStage was not specified. " + "Defaulting to files from the classpath: will stage {} files. " + "Enable logging at DEBUG level to see which files will be staged.", dataflowOptions.getFilesToStage().size()); LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage()); } } // Verify jobName according to service requirements, truncating converting to lowercase if // necessary. String jobName = dataflowOptions.getJobName().toLowerCase(); checkArgument( jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"), "JobName invalid; the name must consist of only the characters " + "[-a-z0-9], starting with a letter and ending with a letter " + "or number"); if (!jobName.equals(dataflowOptions.getJobName())) { LOG.info( "PipelineOptions.jobName did not match the service requirements. " + "Using {} instead of {}.", jobName, dataflowOptions.getJobName()); } dataflowOptions.setJobName(jobName); // Verify project String project = dataflowOptions.getProject(); if (project.matches("[0-9]*")) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project number."); } else if (!project.matches(PROJECT_ID_REGEXP)) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project" + " description."); } DataflowPipelineDebugOptions debugOptions = dataflowOptions.as(DataflowPipelineDebugOptions.class); // Verify the number of worker threads is a valid value if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) { throw new IllegalArgumentException( "Number of worker harness threads '" + debugOptions.getNumberOfWorkerHarnessThreads() + "' invalid. Please make sure the value is non-negative."); } // Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11 if (dataflowOptions.getRecordJfrOnGcThrashing() && Environments.getJavaVersion() == Environments.JavaVersion.java8) { throw new IllegalArgumentException( "recordJfrOnGcThrashing is only supported on java 9 and up."); } if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) { dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT); } // Adding the Java version to the SDK name for user's and support convenience. String agentJavaVer = "(JRE 8 environment)"; if (Environments.getJavaVersion() != Environments.JavaVersion.java8) { agentJavaVer = String.format("(JRE %s environment)", Environments.getJavaVersion().specification()); } DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String userAgentName = dataflowRunnerInfo.getName(); Preconditions.checkArgument( !userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty."); String userAgentVersion = dataflowRunnerInfo.getVersion(); Preconditions.checkArgument( !userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty."); String userAgent = String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_"); dataflowOptions.setUserAgent(userAgent); return new DataflowRunner(dataflowOptions); }
@Test public void testInvalidProfileLocation() throws IOException { DataflowPipelineOptions options = buildPipelineOptions(); options.setSaveProfilesToGcs("file://my/staging/location"); try { DataflowRunner.fromOptions(options); fail("fromOptions should have failed"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("Expected a valid 'gs://' path but was given")); } options.setSaveProfilesToGcs("my/staging/location"); try { DataflowRunner.fromOptions(options); fail("fromOptions should have failed"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("Expected a valid 'gs://' path but was given")); } }
@Override public DistroData getData(DistroKey key, String targetServer) { Member member = memberManager.find(targetServer); if (checkTargetServerStatusUnhealthy(member)) { throw new DistroException( String.format("[DISTRO] Cancel get snapshot caused by target server %s unhealthy", targetServer)); } DistroDataRequest request = new DistroDataRequest(); DistroData distroData = new DistroData(); distroData.setDistroKey(key); distroData.setType(DataOperation.QUERY); request.setDistroData(distroData); request.setDataOperation(DataOperation.QUERY); try { Response response = clusterRpcClientProxy.sendRequest(member, request); if (checkResponse(response)) { return ((DistroDataResponse) response).getDistroData(); } else { throw new DistroException( String.format("[DISTRO-FAILED] Get data request to %s failed, code: %d, message: %s", targetServer, response.getErrorCode(), response.getMessage())); } } catch (NacosException e) { throw new DistroException("[DISTRO-FAILED] Get distro data failed! ", e); } }
@Test void testGetDataForMemberNonExist() { assertThrows(DistroException.class, () -> { transportAgent.getData(new DistroKey(), member.getAddress()); }); }
public void importCounters(String[] counterNames, String[] counterKinds, long[] counterDeltas) { final int length = counterNames.length; if (counterKinds.length != length || counterDeltas.length != length) { throw new AssertionError("array lengths do not match"); } for (int i = 0; i < length; ++i) { final CounterName name = CounterName.named(counterPrefix + counterNames[i]); final String kind = counterKinds[i]; final long delta = counterDeltas[i]; switch (kind) { case "sum": counterFactory.longSum(name).addValue(delta); break; case "max": counterFactory.longMax(name).addValue(delta); break; case "min": counterFactory.longMin(name).addValue(delta); break; default: throw new IllegalArgumentException("unsupported counter kind: " + kind); } } }
@Test public void testSinglePreexistingCounter() throws Exception { Counter<Long, Long> sumCounter = counterSet.longSum(CounterName.named("stageName-systemName-dataset-sum_counter")); sumCounter.addValue(1000L); String[] names = {"sum_counter"}; String[] kinds = {"sum"}; long[] deltas = {122}; counters.importCounters(names, kinds, deltas); counterSet.extractUpdates(false, mockUpdateExtractor); verify(mockUpdateExtractor) .longSum(named("stageName-systemName-dataset-sum_counter"), false, 1122L); verifyNoMoreInteractions(mockUpdateExtractor); }
@Override public void warn(String msg) { logger.warn(msg); }
@Test public void testWarn() { Log mockLog = mock(Log.class); InternalLogger logger = new CommonsLogger(mockLog, "foo"); logger.warn("a"); verify(mockLog).warn("a"); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testIncrementalFromSnapshotTimestampWithInvalidIds() throws Exception { appendTwoSnapshots(); long invalidSnapshotTimestampMs = snapshot2.timestampMillis() + 1000L; ScanContext scanContextWithInvalidSnapshotId = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_TIMESTAMP) .startSnapshotTimestamp(invalidSnapshotTimestampMs) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl( TABLE_RESOURCE.tableLoader().clone(), scanContextWithInvalidSnapshotId, null); assertThatThrownBy(() -> splitPlanner.planSplits(null)) .isInstanceOf(IllegalArgumentException.class) .hasMessageStartingWith("Cannot find a snapshot after:"); }
LinkedList<RewriteOp> makeRewriteOps( Iterable<String> srcFilenames, Iterable<String> destFilenames, boolean deleteSource, boolean ignoreMissingSource, boolean ignoreExistingDest) throws IOException { List<String> srcList = Lists.newArrayList(srcFilenames); List<String> destList = Lists.newArrayList(destFilenames); checkArgument( srcList.size() == destList.size(), "Number of source files %s must equal number of destination files %s", srcList.size(), destList.size()); LinkedList<RewriteOp> rewrites = Lists.newLinkedList(); for (int i = 0; i < srcList.size(); i++) { final GcsPath sourcePath = GcsPath.fromUri(srcList.get(i)); final GcsPath destPath = GcsPath.fromUri(destList.get(i)); if (ignoreExistingDest && !sourcePath.getBucket().equals(destPath.getBucket())) { throw new UnsupportedOperationException( "Skipping dest existence is only supported within a bucket."); } rewrites.addLast(new RewriteOp(sourcePath, destPath, deleteSource, ignoreMissingSource)); } return rewrites; }
@Test public void testMakeRewriteOpsInvalid() throws IOException { GcsUtil gcsUtil = gcsOptionsWithTestCredential().getGcsUtil(); thrown.expect(IllegalArgumentException.class); thrown.expectMessage("Number of source files 3"); gcsUtil.makeRewriteOps(makeStrings("s", 3), makeStrings("d", 1), false, false, false); }
public void checkForUpgradeAndExtraProperties() throws IOException { if (upgradesEnabled()) { checkForUpgradeAndExtraProperties(systemEnvironment.getAgentMd5(), systemEnvironment.getGivenAgentLauncherMd5(), systemEnvironment.getAgentPluginsMd5(), systemEnvironment.getTfsImplMd5()); } else { LOGGER.debug("[Agent Upgrade] Skipping check as there is no wrapping launcher to relaunch the agent JVM..."); } }
@Test void checkForUpgradeShouldKillAgentIfTfsMd5doesNotMatch() { when(systemEnvironment.getAgentMd5()).thenReturn("not-changing"); expectHeaderValue(SystemEnvironment.AGENT_CONTENT_MD5_HEADER, "not-changing"); when(systemEnvironment.getGivenAgentLauncherMd5()).thenReturn("not-changing"); expectHeaderValue(SystemEnvironment.AGENT_LAUNCHER_CONTENT_MD5_HEADER, "not-changing"); when(systemEnvironment.getAgentPluginsMd5()).thenReturn("not-changing"); expectHeaderValue(SystemEnvironment.AGENT_PLUGINS_ZIP_MD5_HEADER, "not-changing"); when(systemEnvironment.getTfsImplMd5()).thenReturn("old-tfs-md5"); expectHeaderValue(SystemEnvironment.AGENT_TFS_SDK_MD5_HEADER, "new-tfs-md5"); RuntimeException toBeThrown = new RuntimeException("Boo!"); doThrow(toBeThrown).when(jvmExitter).jvmExit(anyString(), anyString(), anyString()); try { agentUpgradeService.checkForUpgradeAndExtraProperties(); fail("should have done jvm exit"); } catch (Exception e) { assertThat(toBeThrown).isSameAs(e); } verify(jvmExitter).jvmExit("tfs-impl jar", "old-tfs-md5", "new-tfs-md5"); }
public static FaloTheBardClue forText(String text) { for (FaloTheBardClue clue : CLUES) { if (clue.text.equalsIgnoreCase(text)) { return clue; } } return null; }
@Test public void forTextEmptyString() { assertNull(FaloTheBardClue.forText("")); }
public static String join(List<String> src, String delimiter) { return src == null ? null : String.join(delimiter, src.toArray(new String[0])); }
@Test public void testJoin() { assertEquals(join(Arrays.asList("a", "b"), "|"), ("a|b")); assertNull(join(null, "|")); assertEquals(join(Collections.singletonList("a"), "|"), ("a")); }
@Override public boolean updateAccessConfig(PlainAccessConfig plainAccessConfig) { return aclPlugEngine.updateAccessConfig(plainAccessConfig); }
@Test(expected = AclException.class) public void createAndUpdateAccessAclNullSkExceptionTest() { String backupFileName = System.getProperty("rocketmq.home.dir") + File.separator + "conf/plain_acl_bak.yml".replace("/", File.separator); String targetFileName = System.getProperty("rocketmq.home.dir") + File.separator + "conf/plain_acl.yml".replace("/", File.separator); PlainAccessData backUpAclConfigMap = AclUtils.getYamlDataObject(backupFileName, PlainAccessData.class); AclUtils.writeDataObject(targetFileName, backUpAclConfigMap); PlainAccessConfig plainAccessConfig = new PlainAccessConfig(); plainAccessConfig.setAccessKey("RocketMQ33"); // secret key is null PlainAccessValidator plainAccessValidator = new PlainAccessValidator(); plainAccessValidator.updateAccessConfig(plainAccessConfig); AclUtils.writeDataObject(targetFileName, backUpAclConfigMap); }
@Override public synchronized void updateDockerRunCommand( DockerRunCommand dockerRunCommand, Container container) throws ContainerExecutionException { if (!requestsGpu(container)) { return; } Set<GpuDevice> assignedResources = getAssignedGpus(container); if (assignedResources == null || assignedResources.isEmpty()) { return; } Map<String, String> environment = new HashMap<>(); String gpuIndexList = ""; for (GpuDevice gpuDevice : assignedResources) { gpuIndexList = gpuIndexList + gpuDevice.getIndex() + ","; LOG.info("nvidia docker2 assigned gpu index: " + gpuDevice.getIndex()); } dockerRunCommand.addRuntime(nvidiaRuntime); environment.put(nvidiaVisibleDevices, gpuIndexList.substring(0, gpuIndexList.length() - 1)); dockerRunCommand.addEnv(environment); }
@Test public void testPlugin() throws Exception { DockerRunCommand runCommand = new DockerRunCommand("container_1", "user", "fakeimage"); Map<String, List<String>> originalCommandline = copyCommandLine( runCommand.getDockerCommandWithArguments()); MyNvidiaDockerV2CommandPlugin commandPlugin = new MyNvidiaDockerV2CommandPlugin(); Container nmContainer = mock(Container.class); // getResourceMapping is null, so commandline won't be updated commandPlugin.updateDockerRunCommand(runCommand, nmContainer); Assert.assertTrue(commandlinesEquals(originalCommandline, runCommand.getDockerCommandWithArguments())); // no GPU resource assigned, so commandline won't be updated ResourceMappings resourceMappings = new ResourceMappings(); when(nmContainer.getResourceMappings()).thenReturn(resourceMappings); commandPlugin.updateDockerRunCommand(runCommand, nmContainer); Assert.assertTrue(commandlinesEquals(originalCommandline, runCommand.getDockerCommandWithArguments())); // Assign GPU resource ResourceMappings.AssignedResources assigned = new ResourceMappings.AssignedResources(); assigned.updateAssignedResources( ImmutableList.of(new GpuDevice(0, 0), new GpuDevice(1, 1))); resourceMappings.addAssignedResources(ResourceInformation.GPU_URI, assigned); commandPlugin.setRequestsGpu(true); commandPlugin.updateDockerRunCommand(runCommand, nmContainer); Map<String, List<String>> newCommandLine = runCommand.getDockerCommandWithArguments(); // Command line will be updated Assert.assertFalse(commandlinesEquals(originalCommandline, newCommandLine)); // NVIDIA_VISIBLE_DEVICES will be set Assert.assertTrue( runCommand.getEnv().get("NVIDIA_VISIBLE_DEVICES").equals("0,1")); // runtime should exist Assert.assertTrue(newCommandLine.containsKey("runtime")); }
public static Integer getInt(Map<?, ?> map, Object key) { return get(map, key, Integer.class); }
@Test public void getIntTest(){ assertThrows(NumberFormatException.class, () -> { final HashMap<String, String> map = MapUtil.of("age", "d"); final Integer age = MapUtil.getInt(map, "age"); assertNotNull(age); }); }
List<CSVResult> sniff(Reader reader) throws IOException { if (!reader.markSupported()) { reader = new BufferedReader(reader); } List<CSVResult> ret = new ArrayList<>(); for (char delimiter : delimiters) { reader.mark(markLimit); try { CSVResult result = new Snifflet(delimiter).sniff(reader); ret.add(result); } finally { reader.reset(); } } Collections.sort(ret); return ret; }
@Test public void testCSVMidCellQuoteException() throws Exception { List<CSVResult> results = sniff(DELIMITERS, CSV_MID_CELL_QUOTE_EXCEPTION, StandardCharsets.UTF_8); assertEquals(4, results.size()); }
public static StatementExecutorResponse execute( final ConfiguredStatement<Explain> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { return StatementExecutorResponse.handled(Optional .of(ExplainExecutor.explain( serviceContext, statement, executionContext, sessionProperties))); }
@Test public void shouldExplainStatementWithStructFieldDereference() { // Given: engine.givenSource(DataSourceType.KSTREAM, "Y"); final String statementText = "SELECT address->street FROM Y EMIT CHANGES;"; final ConfiguredStatement<?> explain = engine.configure("EXPLAIN " + statementText); // When: final QueryDescriptionEntity query = (QueryDescriptionEntity) CustomExecutors.EXPLAIN.execute( explain, sessionProperties, engine.getEngine(), engine.getServiceContext() ).getEntity().orElseThrow(IllegalStateException::new); // Then: assertThat(query.getQueryDescription().getStatementText(), equalTo(statementText)); assertThat(query.getQueryDescription().getSources(), containsInAnyOrder("Y")); }
public List<ShardingCondition> createShardingConditions(final SQLStatementContext sqlStatementContext, final List<Object> params) { if (!(sqlStatementContext instanceof WhereAvailable)) { return Collections.emptyList(); } Collection<ColumnSegment> columnSegments = ((WhereAvailable) sqlStatementContext).getColumnSegments(); ShardingSphereSchema schema = getSchema(sqlStatementContext, database); Map<String, String> columnExpressionTableNames = sqlStatementContext instanceof TableAvailable ? ((TableAvailable) sqlStatementContext).getTablesContext().findTableNames(columnSegments, schema) : Collections.emptyMap(); List<ShardingCondition> result = new ArrayList<>(); for (WhereSegment each : ((WhereAvailable) sqlStatementContext).getWhereSegments()) { result.addAll(createShardingConditions(each.getExpr(), params, columnExpressionTableNames)); } return result; }
@Test void assertCreateShardingConditionsForSelectRangeStatement() { int between = 1; int and = 100; ColumnSegment left = new ColumnSegment(0, 0, new IdentifierValue("foo_sharding_col")); ExpressionSegment betweenSegment = new LiteralExpressionSegment(0, 0, between); ExpressionSegment andSegment = new LiteralExpressionSegment(0, 0, and); BetweenExpression betweenExpression = new BetweenExpression(0, 0, left, betweenSegment, andSegment, false); when(whereSegment.getExpr()).thenReturn(betweenExpression); when(shardingRule.findShardingColumn(any(), any())).thenReturn(Optional.of("foo_sharding_col")); List<ShardingCondition> actual = shardingConditionEngine.createShardingConditions(sqlStatementContext, Collections.emptyList()); assertThat(actual.get(0).getStartIndex(), is(0)); assertTrue(actual.get(0).getValues().get(0) instanceof RangeShardingConditionValue); }
@Override public void getErrors(ErrorCollection errors, String parentLocation) { String location = getLocation(parentLocation); super.getErrors(errors, parentLocation); errors.checkMissing(location, "id", id); errors.checkMissing(location, "store_id", storeId); if (this.configuration != null) { for (CRConfigurationProperty property : configuration) { property.getErrors(errors, location); } } }
@Test public void shouldCheckForTypeWhileDeserializing() { String json = """ { "id" : "id", "store_id" : "s3" }"""; CRPluggableArtifact crPluggableArtifact = gson.fromJson(json, CRPluggableArtifact.class); assertThat(crPluggableArtifact.getId(), is("id")); assertThat(crPluggableArtifact.getStoreId(), is("s3")); assertNull(crPluggableArtifact.getType()); assertNull(crPluggableArtifact.getConfiguration()); assertFalse(crPluggableArtifact.getErrors().isEmpty()); }
@Override public void handle(final RoutingContext routingContext) { // We must set it to allow chunked encoding if we're using http1.1 if (routingContext.request().version() == HttpVersion.HTTP_1_1) { routingContext.response().putHeader(TRANSFER_ENCODING, CHUNKED_ENCODING); } else if (routingContext.request().version() == HttpVersion.HTTP_2) { // Nothing required } else { routingContext.fail(BAD_REQUEST.code(), new KsqlApiException("This endpoint is only available when using HTTP1.1 or HTTP2", ERROR_CODE_BAD_REQUEST)); } final CommonRequest request = getRequest(routingContext); if (request == null) { return; } final Optional<Boolean> internalRequest = ServerVerticle.isInternalRequest(routingContext); final MetricsCallbackHolder metricsCallbackHolder = new MetricsCallbackHolder(); final long startTimeNanos = Time.SYSTEM.nanoseconds(); endpoints.createQueryPublisher( request.sql, request.configOverrides, request.sessionProperties, request.requestProperties, context, server.getWorkerExecutor(), DefaultApiSecurityContext.create(routingContext, server), metricsCallbackHolder, internalRequest) .thenAccept(publisher -> { if (publisher instanceof BlockingPrintPublisher) { handlePrintPublisher( routingContext, (BlockingPrintPublisher) publisher); } else { handleQueryPublisher( routingContext, (QueryPublisher) publisher, metricsCallbackHolder, startTimeNanos); } }) .exceptionally(t -> ServerUtils.handleEndpointException(t, routingContext, "Failed to execute query")); }
@Test public void shouldSucceed_printQuery() { // Given: final QueryStreamArgs req = new QueryStreamArgs("print mytopic;", Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); givenRequest(req); // When: handler.handle(routingContext); endHandler.getValue().handle(null); // Then: assertThat(subscriber.getValue(), notNullValue()); verify(publisher).close(); }
@Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { if (executor.isShutdown()) { return; } BlockingQueue<Runnable> workQueue = executor.getQueue(); Runnable firstWork = workQueue.poll(); boolean newTaskAdd = workQueue.offer(r); if (firstWork != null) { firstWork.run(); } if (!newTaskAdd) { executor.execute(r); } }
@Test public void testRejectedExecutionWhenATaskIsInTheQueueAndThePollReturnANullValue() { when(threadPoolExecutor.isShutdown()).thenReturn(false); when(threadPoolExecutor.getQueue()).thenReturn(workQueue); when(workQueue.poll()).thenReturn(null); when(workQueue.offer(runnable)).thenReturn(false); runsOldestTaskPolicy.rejectedExecution(runnable, threadPoolExecutor); verify(runnableInTheQueue, never()).run(); verify(threadPoolExecutor).execute(runnable); verify(runnable, never()).run(); }
String path() { return path; }
@Test public void testDecodePath() { final String ESCAPED_PATH = "/test%25+1%26%3Dtest?op=OPEN&foo=bar"; final String EXPECTED_PATH = "/test%+1&=test"; Configuration conf = new Configuration(); QueryStringDecoder decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + ESCAPED_PATH); ParameterParser testParser = new ParameterParser(decoder, conf); Assert.assertEquals(EXPECTED_PATH, testParser.path()); }
public static String dataToAvroSchemaJson(DataSchema dataSchema) { return dataToAvroSchemaJson(dataSchema, new DataToAvroSchemaTranslationOptions()); }
@Test(dataProvider = "embeddingSchemaWithDataPropertyData") public void testEmbeddingSchemaWithDataProperty(String schemaText, String expected) throws IOException { DataToAvroSchemaTranslationOptions options = new DataToAvroSchemaTranslationOptions(JsonBuilder.Pretty.SPACES, EmbedSchemaMode.ROOT_ONLY); String avroSchemaJson = SchemaTranslator.dataToAvroSchemaJson(TestUtil.dataSchemaFromString(schemaText), options); DataMap avroSchemaDataMap = TestUtil.dataMapFromString(avroSchemaJson); DataMap expectedDataMap = TestUtil.dataMapFromString(expected); assertEquals(avroSchemaDataMap, expectedDataMap); }
@Override public Revision getCurrentRevision(String requestedMaterialName) { return UNKNOWN_REVISION; }
@Test public void shouldReturnUnknownModificationAsCurrent() { assertThat(instanceModel.getCurrentRevision("foo"), is(PipelineInstanceModel.UNKNOWN_REVISION)); }
@Override public PageData<WidgetTypeInfo> findTenantWidgetTypesByTenantId(WidgetTypeFilter widgetTypeFilter, PageLink pageLink) { boolean deprecatedFilterEnabled = !DeprecatedFilter.ALL.equals(widgetTypeFilter.getDeprecatedFilter()); boolean deprecatedFilterBool = DeprecatedFilter.DEPRECATED.equals(widgetTypeFilter.getDeprecatedFilter()); boolean widgetTypesEmpty = widgetTypeFilter.getWidgetTypes() == null || widgetTypeFilter.getWidgetTypes().isEmpty(); return DaoUtil.toPageData( widgetTypeInfoRepository .findTenantWidgetTypesByTenantId( widgetTypeFilter.getTenantId().getId(), pageLink.getTextSearch(), widgetTypeFilter.isFullSearch(), deprecatedFilterEnabled, deprecatedFilterBool, widgetTypesEmpty, widgetTypeFilter.getWidgetTypes() == null ? Collections.emptyList() : widgetTypeFilter.getWidgetTypes(), widgetTypeFilter.isScadaFirst(), DaoUtil.toPageable(pageLink, WidgetTypeInfoEntity.SEARCH_COLUMNS_MAP))); }
@Test public void testTagsSearchInFindTenantWidgetTypesByTenantId() { for (var entry : SHOULD_FIND_SEARCH_TO_TAGS_MAP.entrySet()) { String searchText = entry.getKey(); String[] tags = entry.getValue(); WidgetTypeDetails savedWidgetType = createAndSaveWidgetType(TenantId.SYS_TENANT_ID, WIDGET_TYPE_COUNT + 1, tags); PageData<WidgetTypeInfo> widgetTypes = widgetTypeDao.findTenantWidgetTypesByTenantId( WidgetTypeFilter.builder() .tenantId(TenantId.SYS_TENANT_ID) .fullSearch(true) .deprecatedFilter(DeprecatedFilter.ALL) .widgetTypes(null).build(), new PageLink(10, 0, searchText) ); assertThat(widgetTypes.getData()).hasSize(1); assertThat(widgetTypes.getData().get(0).getId()).isEqualTo(savedWidgetType.getId()); widgetTypeDao.removeById(TenantId.SYS_TENANT_ID, savedWidgetType.getUuidId()); } for (var entry : SHOULDNT_FIND_SEARCH_TO_TAGS_MAP.entrySet()) { String searchText = entry.getKey(); String[] tags = entry.getValue(); WidgetTypeDetails savedWidgetType = createAndSaveWidgetType(TenantId.SYS_TENANT_ID, WIDGET_TYPE_COUNT + 1, tags); PageData<WidgetTypeInfo> widgetTypes = widgetTypeDao.findTenantWidgetTypesByTenantId( WidgetTypeFilter.builder() .tenantId(TenantId.SYS_TENANT_ID) .fullSearch(true) .deprecatedFilter(DeprecatedFilter.ALL) .widgetTypes(null).build(), new PageLink(10, 0, searchText) ); assertThat(widgetTypes.getData()).hasSize(0); widgetTypeDao.removeById(TenantId.SYS_TENANT_ID, savedWidgetType.getUuidId()); } }
@Override public boolean canManageInput(EfestoInput toEvaluate, EfestoRuntimeContext context) { return canManageEfestoInput(toEvaluate, context); }
@Test void canManageInput() { modelLocalUriId = getModelLocalUriIdFromPmmlIdFactory(FILE_NAME, MODEL_NAME); PMMLRuntimeContext context = getPMMLContext(FILE_NAME, MODEL_NAME, memoryCompilerClassLoader); BaseEfestoInput inputPMML = new EfestoInputPMML(modelLocalUriId, context); assertThat(kieRuntimeServicePMML.canManageInput(inputPMML, context)).isTrue(); }
public PipelineConfigs findGroup(String groupName) { for (PipelineConfigs pipelines : this) { if (pipelines.isNamed(groupName)) { return pipelines; } } throw new RecordNotFoundException(EntityType.PipelineGroup, groupName); }
@Test public void shouldThrowGroupNotFoundExceptionWhenSearchingForANonExistingGroup() { PipelineConfig pipeline = createPipelineConfig("pipeline1", "stage1"); PipelineConfigs defaultGroup = createGroup("defaultGroup", pipeline); PipelineGroups pipelineGroups = new PipelineGroups(defaultGroup); assertThrows(RecordNotFoundException.class, () -> pipelineGroups.findGroup("NonExistantGroup")); }
@Override public Subscriber getSubscriber(Service service) { return subscribers.get(service); }
@Test void getSubscriber() { addServiceSubscriber(); Subscriber subscriber1 = abstractClient.getSubscriber(service); assertNotNull(subscriber1); }
public static Write write() { return Write.create(); }
@Test public void testWriteClientRateLimitingAlsoSetReportMsecs() { // client side flow control BigtableIO.Write write = BigtableIO.write().withTableId("table").withFlowControl(true); assertEquals( 60_000, (int) checkNotNull(write.getBigtableWriteOptions().getThrottlingReportTargetMs())); // client side latency based throttling int targetMs = 30_000; write = BigtableIO.write().withTableId("table").withThrottlingTargetMs(targetMs); assertEquals( targetMs, (int) checkNotNull(write.getBigtableWriteOptions().getThrottlingReportTargetMs())); }
@Override public ValidationResult validate(Object value) { ValidationResult result = super.validate(value); if (result instanceof ValidationResult.ValidationPassed) { final String sValue = (String)value; if (sValue.length() < minLength || sValue.length() > maxLength) { result = new ValidationResult.ValidationFailed("Value is not between " + minLength + " and " + maxLength + " in length!"); } } return result; }
@Test public void testValidateNoString() { assertThat(new LimitedStringValidator(1, 1).validate(123)) .isInstanceOf(ValidationResult.ValidationFailed.class); }
@Override public void write(String propertyKey, @Nullable String value) { checkPropertyKey(propertyKey); try (DbSession dbSession = dbClient.openSession(false)) { if (value == null || value.isEmpty()) { dbClient.internalPropertiesDao().saveAsEmpty(dbSession, propertyKey); } else { dbClient.internalPropertiesDao().save(dbSession, propertyKey, value); } dbSession.commit(); } }
@Test public void write_throws_IAE_if_key_is_null() { expectKeyNullOrEmptyIAE(() -> underTest.write(null, SOME_VALUE)); }
@Override public AppResponse process(Flow flow, ConfirmRequest request) throws FlowNotDefinedException, IOException, NoSuchAlgorithmException { var authAppSession = appSessionService.getSession(request.getAuthSessionId()); if (!isAppSessionAuthenticated(authAppSession) || !request.getUserAppId().equals(authAppSession.getUserAppId())){ return new NokResponse(); } appAuthenticator = appAuthenticatorService.findByUserAppId(authAppSession.getUserAppId()); if (!isAppAuthenticatorActivated(appAuthenticator) || !appAuthenticatorService.exists(appAuthenticator)) return new NokResponse(); if (appSession.getEidasUit()){ var response = validatePipSignature(request.getSignatureOfPip()); if (response != null) return response; } if (appSession.getAction() != null ) { var result = digidClient.getAccountStatus(appAuthenticator.getAccountId()); if (ERROR_DECEASED.equals(result.get("error"))) return deceasedResponse(); switch(appSession.getAction()){ case "activate_with_app" -> digidClient.remoteLog("1366", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); case "upgrade_rda_widchecker" -> digidClient.remoteLog("1318", getAppDetails()); default -> digidClient.remoteLog("1344", getAppDetails()); } } appSession.setAppAuthenticationLevel(appAuthenticator.getAuthenticationLevel()); appSession.setAccountId(authAppSession.getAccountId()); appSession.setSubstantialActivatedAt(appAuthenticator.getSubstantieelActivatedAt()); appSession.setSubstantialDocumentType(appAuthenticator.getSubstantieelDocumentType()); appSession.setUserAppId(authAppSession.getUserAppId()); if (appSession.getOidcSessionId() != null && authAppSession.getState().equals(State.AUTHENTICATED.name())) { oidcClient.confirmOidc(appSession.getAccountId(), appAuthenticator.getAuthenticationLevel(), appSession.getOidcSessionId()); } if (appSession.getAdSessionId() != null && authAppSession.getState().equals(State.AUTHENTICATED.name())) { var bsn = digidClient.getBsn(appSession.getAccountId()); samlClient.updateAdSession(appSession.getAdSessionId(), appAuthenticator.getAuthenticationLevel(), bsn.get(BSN)); } return new ConfirmationResponse(appAuthenticator.getId().equals(appSession.getAppToDestroy())); }
@Test public void processReturnsNokResponse() throws FlowNotDefinedException, IOException, NoSuchAlgorithmException { //given authAppSession.setState("NOTAUTHENTICATED"); when(appSessionService.getSession(confirmRequest.getAuthSessionId())).thenReturn(authAppSession); //when AppResponse appResponse = confirmed.process(mockedFlow, confirmRequest); //then assertTrue(appResponse instanceof NokResponse); }
@Override public void indexOnStartup(Set<IndexType> uninitializedIndexTypes) { // TODO do not load everything in memory. Db rows should be scrolled. List<IndexPermissions> authorizations = getAllAuthorizations(); Stream<AuthorizationScope> scopes = getScopes(uninitializedIndexTypes); index(authorizations, scopes, Size.LARGE); }
@Test public void indexOnStartup_grants_access_to_anybody_on_public_project() { ProjectDto project = createAndIndexPublicProject(); UserDto user = db.users().insertUser(); GroupDto group = db.users().insertGroup(); indexOnStartup(); verifyAnyoneAuthorized(project); verifyAuthorized(project, user); verifyAuthorized(project, user, group); }
public Object resolve(final Expression expression) { return new Visitor().process(expression, null); }
@Test public void shouldThrowIfCannotParseTime() { // Given: final SqlType type = SqlTypes.TIME; final Expression exp = new StringLiteral("abc"); // When: final KsqlException e = assertThrows( KsqlException.class, () -> new GenericExpressionResolver(type, FIELD_NAME, registry, config, "insert value", false).resolve(exp)); // Then: assertThat(e.getMessage(), containsString("Time format must be hh:mm:ss[.S]")); }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testRelativeOffsetAssignmentNonCompressedV2() { long now = System.currentTimeMillis(); MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V2, now, Compression.NONE); long offset = 1234567; checkOffsets(records, 0); MemoryRecords messageWithOffset = new LogValidator( records, new TopicPartition("topic", 0), time, CompressionType.NONE, Compression.NONE, false, RecordBatch.MAGIC_VALUE_V2, TimestampType.CREATE_TIME, 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords; checkOffsets(messageWithOffset, offset); }
@Override public abstract boolean equals(Object other);
@Test public void testEnumDataSchema() throws Exception { final String schemaString = "{ \"type\" : \"enum\", \"name\" : \"numbers\", \"symbols\" : [ \"ONE\", \"TWO\", \"THREE\", \"FOUR\", \"FIVE\"], \"symbolDocs\" : { \"FIVE\" : \"DOC_FIVE\", \"ONE\" : \"DOC_ONE\" } }"; PegasusSchemaParser parser = schemaParserFromString(schemaString); EnumDataSchema schema = (EnumDataSchema)parser.topLevelDataSchemas().get(0); String[] orderedSymbols = {"ONE", "TWO", "THREE", "FOUR", "FIVE" }; for (int i = 0; i < orderedSymbols.length; ++i) { Assert.assertEquals(schema.index(orderedSymbols[i]), i); Assert.assertTrue(schema.contains(orderedSymbols[i])); } String[] missingSymbols = {"SIX", "SEVEN", "EIGHT"}; for (String missingSymbol : missingSymbols) { Assert.assertFalse(schema.contains(missingSymbol)); Assert.assertEquals(schema.index(missingSymbol), -1); } Assert.assertEquals(schema.getSymbols(), Arrays.asList(orderedSymbols)); String[] symbolDocKeys = {"ONE", "FIVE"}; for (String symbolDocKey : symbolDocKeys) { Assert.assertTrue(schema.getSymbolDocs().containsKey(symbolDocKey) && schema.getSymbolDocs().get(symbolDocKey).equals("DOC_" + symbolDocKey)); } String[] missingSymbolDocs = {"TWO", "THREE", "FOUR"}; for (String missingSymbol : missingSymbols) { Assert.assertFalse(schema.getSymbolDocs().containsKey(missingSymbol)); } }
public synchronized ConnectionProfile createGCSDestinationConnectionProfile( String connectionProfileId, String gcsBucketName, String gcsRootPath) { checkArgument( !Strings.isNullOrEmpty(connectionProfileId), "connectionProfileId can not be null or empty"); checkArgument(!Strings.isNullOrEmpty(gcsBucketName), "gcsBucketName can not be null or empty"); checkArgument(gcsRootPath != null, "gcsRootPath can not be null"); checkArgument( gcsRootPath.isEmpty() || gcsRootPath.charAt(0) == '/', "gcsRootPath must either be an empty string or start with a '/'"); LOG.info( "Creating GCS Destination Connection Profile {} in project {}.", connectionProfileId, projectId); try { ConnectionProfile.Builder connectionProfileBuilder = ConnectionProfile.newBuilder() .setDisplayName(connectionProfileId) .setStaticServiceIpConnectivity(StaticServiceIpConnectivity.getDefaultInstance()) .setGcsProfile( GcsProfile.newBuilder().setBucket(gcsBucketName).setRootPath(gcsRootPath)); CreateConnectionProfileRequest request = CreateConnectionProfileRequest.newBuilder() .setParent(LocationName.of(projectId, location).toString()) .setConnectionProfile(connectionProfileBuilder) .setConnectionProfileId(connectionProfileId) .build(); ConnectionProfile reference = datastreamClient.createConnectionProfileAsync(request).get(); createdConnectionProfileIds.add(connectionProfileId); LOG.info( "Successfully created GCS Destination Connection Profile {} in project {}.", connectionProfileId, projectId); return reference; } catch (ExecutionException | InterruptedException e) { throw new DatastreamResourceManagerException( "Failed to create GCS source connection profile. ", e); } }
@Test public void testCreateGCSDestinationConnectionProfileWithInvalidGCSRootPathShouldFail() { IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> testManager.createGCSDestinationConnectionProfile( CONNECTION_PROFILE_ID, BUCKET, "invalid")); assertThat(exception) .hasMessageThat() .contains("gcsRootPath must either be an empty string or start with a '/'"); }
@VisibleForTesting public static UClassIdent create(String qualifiedName) { List<String> topLevelPath = new ArrayList<>(); for (String component : Splitter.on('.').split(qualifiedName)) { topLevelPath.add(component); if (Character.isUpperCase(component.charAt(0))) { break; } } return create(Joiner.on('.').join(topLevelPath), qualifiedName); }
@Test public void equality() throws CouldNotResolveImportException { new EqualsTester() .addEqualityGroup(UClassIdent.create("java.util.List")) .addEqualityGroup(UClassIdent.create("com.sun.tools.javac.util.List")) .addEqualityGroup( UClassIdent.create("java.lang.String"), UClassIdent.create(inliner.resolveClass("java.lang.String"))) .testEquals(); }
@Override public Collection<V> valueRange(int startIndex, int endIndex) { return get(valueRangeAsync(startIndex, endIndex)); }
@Test public void testValueRange() { RScoredSortedSet<Integer> set = redisson.getScoredSortedSet("simple"); set.add(0, 1); set.add(1, 2); set.add(2, 3); set.add(3, 4); set.add(4, 5); set.add(4, 5); Collection<Integer> vals = set.valueRange(0, -1); assertThat(vals).containsExactly(1, 2, 3, 4, 5); }
public Record update(MetricsRecord mr, boolean includingTags) { String name = mr.name(); RecordCache recordCache = map.get(name); if (recordCache == null) { recordCache = new RecordCache(); map.put(name, recordCache); } Collection<MetricsTag> tags = mr.tags(); Record record = recordCache.get(tags); if (record == null) { record = new Record(); recordCache.put(tags, record); } for (AbstractMetric m : mr.metrics()) { record.metrics.put(m.name(), m); } if (includingTags) { // mostly for some sinks that include tags as part of a dense schema for (MetricsTag t : mr.tags()) { record.tags.put(t.name(), t.value()); } } return record; }
@SuppressWarnings("deprecation") @Test public void testUpdate() { MetricsCache cache = new MetricsCache(); MetricsRecord mr = makeRecord("r", Arrays.asList(makeTag("t", "tv")), Arrays.asList(makeMetric("m", 0), makeMetric("m1", 1))); MetricsCache.Record cr = cache.update(mr); verify(mr).name(); verify(mr).tags(); verify(mr).metrics(); assertEquals("same record size", cr.metrics().size(), ((Collection<AbstractMetric>)mr.metrics()).size()); assertEquals("same metric value", 0, cr.getMetric("m")); MetricsRecord mr2 = makeRecord("r", Arrays.asList(makeTag("t", "tv")), Arrays.asList(makeMetric("m", 2), makeMetric("m2", 42))); cr = cache.update(mr2); assertEquals("contains 3 metric", 3, cr.metrics().size()); checkMetricValue("updated metric value", cr, "m", 2); checkMetricValue("old metric value", cr, "m1", 1); checkMetricValue("new metric value", cr, "m2", 42); MetricsRecord mr3 = makeRecord("r", Arrays.asList(makeTag("t", "tv3")), // different tag value Arrays.asList(makeMetric("m3", 3))); cr = cache.update(mr3); // should get a new record assertEquals("contains 1 metric", 1, cr.metrics().size()); checkMetricValue("updated metric value", cr, "m3", 3); // tags cache should be empty so far assertEquals("no tags", 0, cr.tags().size()); // until now cr = cache.update(mr3, true); assertEquals("Got 1 tag", 1, cr.tags().size()); assertEquals("Tag value", "tv3", cr.getTag("t")); checkMetricValue("Metric value", cr, "m3", 3); }
@Override public void run() { boolean isNeedFlush = false; boolean sqlShowEnabled = ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getProps().getValue(ConfigurationPropertyKey.SQL_SHOW); try { if (sqlShowEnabled) { fillLogMDC(); } isNeedFlush = executeCommand(context, databaseProtocolFrontendEngine.getCodecEngine().createPacketPayload((ByteBuf) message, context.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get())); // CHECKSTYLE:OFF } catch (final Exception ex) { // CHECKSTYLE:ON processException(ex); // CHECKSTYLE:OFF } catch (final Error error) { // CHECKSTYLE:ON processException(new RuntimeException(error)); } finally { connectionSession.clearQueryContext(); Collection<SQLException> exceptions = Collections.emptyList(); try { connectionSession.getDatabaseConnectionManager().closeExecutionResources(); } catch (final BackendConnectionException ex) { exceptions = ex.getExceptions().stream().filter(SQLException.class::isInstance).map(SQLException.class::cast).collect(Collectors.toList()); } if (isNeedFlush) { context.flush(); } processClosedExceptions(exceptions); context.pipeline().fireUserEventTriggered(new WriteCompleteEvent()); if (sqlShowEnabled) { clearLogMDC(); } if (message instanceof CompositeByteBuf) { releaseCompositeByteBuf((CompositeByteBuf) message); } ((ByteBuf) message).release(); } }
@Test void assertRunNeedFlushByFalse() throws SQLException, BackendConnectionException { when(queryCommandExecutor.execute()).thenReturn(Collections.emptyList()); when(engine.getCommandExecuteEngine().getCommandPacket(payload, commandPacketType, connectionSession)).thenReturn(commandPacket); when(engine.getCommandExecuteEngine().getCommandExecutor(commandPacketType, commandPacket, connectionSession)).thenReturn(queryCommandExecutor); when(engine.getCommandExecuteEngine().getCommandPacketType(payload)).thenReturn(commandPacketType); when(engine.getCodecEngine().createPacketPayload(message, StandardCharsets.UTF_8)).thenReturn(payload); CommandExecutorTask actual = new CommandExecutorTask(engine, connectionSession, handlerContext, message); actual.run(); verify(queryCommandExecutor).close(); verify(databaseConnectionManager).closeExecutionResources(); }
@SuppressWarnings("unchecked") public <V> V run(String callableName, RetryOperation<V> operation) { int attempt = 1; while (true) { try { return operation.run(); } catch (Exception e) { if (!exceptionClass.isInstance(e)) { throwIfUnchecked(e); throw new RuntimeException(e); } E qe = (E) e; exceptionCallback.accept(qe); if (attempt >= maxAttempts || !retryPredicate.test(qe)) { throw qe; } attempt++; int delayMillis = (int) min(minBackoffDelay.toMillis() * pow(scaleFactor, attempt - 1), maxBackoffDelay.toMillis()); int jitterMillis = ThreadLocalRandom.current().nextInt(max(1, (int) (delayMillis * 0.1))); log.info( "Failed on executing %s with attempt %d. Retry after %sms. Cause: %s", callableName, attempt - 1, delayMillis, qe.getMessage()); try { MILLISECONDS.sleep(delayMillis + jitterMillis); } catch (InterruptedException ie) { currentThread().interrupt(); throw new RuntimeException(ie); } } } }
@Test public void testSuccess() { assertEquals( retryDriver.run("test", new MockOperation(5, RETRYABLE_EXCEPTION)), Integer.valueOf(5)); }
public IterableSubject asList() { return checkNoNeedToDisplayBothValues("asList()").that(Longs.asList(checkNotNull(actual))); }
@Test public void asList() { assertThat(array(5, 2, 9)).asList().containsAtLeast(2L, 9L); }
public static String getKey(String dataId, String group) { return getKey(dataId, group, ""); }
@Test public void testGetKey1() { String[] strings = {"dataId", "group", "datumStr"}; String expected = "dataId+group+datumStr"; String key = GroupKey.getKey(strings); Assert.isTrue(key.equals(expected)); }