focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public List<Ce.Task> formatQueue(DbSession dbSession, List<CeQueueDto> dtos) { DtoCache cache = DtoCache.forQueueDtos(dbClient, dbSession, dtos); return dtos.stream().map(input -> formatQueue(input, cache)).toList(); }
@Test public void formatQueue_compute_execute_time_if_in_progress() { long startedAt = 1_450_000_001_000L; long now = 1_450_000_003_000L; CeQueueDto dto = new CeQueueDto(); dto.setUuid("UUID"); dto.setTaskType("TYPE"); dto.setStatus(CeQueueDto.Status.PENDING); dto.setCreatedAt(1_450_000_000_000L); db.getDbClient().ceQueueDao().insert(db.getSession(), dto); makeInProgress(db.getSession(), "workerUuid", startedAt, dto); CeQueueDto inProgress = db.getDbClient().ceQueueDao().selectByUuid(db.getSession(), dto.getUuid()).get(); when(system2.now()).thenReturn(now); Ce.Task wsTask = underTest.formatQueue(db.getSession(), inProgress); assertThat(wsTask.getExecutionTimeMs()).isEqualTo(now - startedAt); }
public static FormBody buildFormBody(final Map<String, ?> form) { FormBody.Builder paramBuilder = new FormBody.Builder(StandardCharsets.UTF_8); for (Map.Entry<String, ?> entry : form.entrySet()) { paramBuilder.add(entry.getKey(), String.valueOf(entry.getValue())); } return paramBuilder.build(); }
@Test public void buildFormBodyTest() { FormBody formBody = HttpUtils.buildFormBody(formMap); Assert.assertNotNull(formBody); }
static void readFullyHeapBuffer(InputStream f, ByteBuffer buf) throws IOException { readFully(f, buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); buf.position(buf.limit()); }
@Test public void testHeapReadFullyLimit() throws Exception { final ByteBuffer readBuffer = ByteBuffer.allocate(10); readBuffer.limit(7); MockInputStream stream = new MockInputStream(2, 3, 3); DelegatingSeekableInputStream.readFullyHeapBuffer(stream, readBuffer); Assert.assertEquals(7, readBuffer.position()); Assert.assertEquals(7, readBuffer.limit()); DelegatingSeekableInputStream.readFullyHeapBuffer(stream, readBuffer); Assert.assertEquals(7, readBuffer.position()); Assert.assertEquals(7, readBuffer.limit()); readBuffer.flip(); Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 7), readBuffer); readBuffer.position(7); readBuffer.limit(10); DelegatingSeekableInputStream.readFullyHeapBuffer(stream, readBuffer); Assert.assertEquals(10, readBuffer.position()); Assert.assertEquals(10, readBuffer.limit()); readBuffer.flip(); Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY), readBuffer); }
public static <K> KStreamHolder<K> build( final KStreamHolder<K> left, final KTableHolder<K> right, final StreamTableJoin<K> join, final RuntimeBuildContext buildContext, final JoinedFactory joinedFactory ) { final Formats leftFormats = join.getInternalFormats(); final QueryContext queryContext = join.getProperties().getQueryContext(); final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext); final LogicalSchema leftSchema = left.getSchema(); final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from( leftSchema, leftFormats.getKeyFeatures(), leftFormats.getValueFeatures() ); final Serde<GenericRow> leftSerde = buildContext.buildValueSerde( leftFormats.getValueFormat(), leftPhysicalSchema, stacker.push(SERDE_CTX).getQueryContext() ); final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde( leftFormats.getKeyFormat(), leftPhysicalSchema, queryContext ); final Joined<K, GenericRow, GenericRow> joined = joinedFactory.create( keySerde, leftSerde, null, StreamsUtil.buildOpName(queryContext) ); final LogicalSchema rightSchema = right.getSchema(); final JoinParams joinParams = JoinParamsFactory .create(join.getKeyColName(), leftSchema, rightSchema); final KStream<K, GenericRow> result; switch (join.getJoinType()) { case LEFT: result = left.getStream().leftJoin(right.getTable(), joinParams.getJoiner(), joined); break; case INNER: result = left.getStream().join(right.getTable(), joinParams.getJoiner(), joined); break; default: throw new IllegalStateException("invalid join type"); } return left.withStream(result, joinParams.getSchema()); }
@Test public void shouldReturnCorrectLegacySchema() { // Given: join = new StreamTableJoin<>( new ExecutionStepPropertiesV1(CTX), JoinType.INNER, ColumnName.of(LEGACY_KEY_COL), LEFT_FMT, left, right ); // When: final KStreamHolder<Struct> result = join.build(planBuilder, planInfo); // Then: assertThat( result.getSchema(), is(JoinParamsFactory.create(ROWKEY_NAME, LEFT_SCHEMA, RIGHT_SCHEMA).getSchema()) ); }
public static SqlArgument of(final SqlType type) { return new SqlArgument(type, null, null); }
@Test public void shouldThrowWhenLambdaPresentWhenGettingType() { final SqlArgument argument = SqlArgument.of(null, (SqlLambdaResolved .of(ImmutableList.of(SqlTypes.STRING), SqlTypes.INTEGER))); final Exception e = assertThrows( RuntimeException.class, argument::getSqlTypeOrThrow ); assertThat(e.getMessage(), containsString("Was expecting type as a function argument")); }
@Override public String toString() { return "ChildEip{" + "id='" + id + '\'' + ", eipAttributes=" + eipAttributeMap + '}'; }
@Test public void testToString() { String toString = getInstance().toString(); assertNotNull(toString); assertTrue(toString.contains("ChildEip")); }
@Override public List<PluginWrapper> getPlugins() { return Arrays.asList(getPlugin(currentPluginId)); }
@Test public void getPlugins() { pluginManager.loadPlugins(); assertEquals(2, pluginManager.getPlugins().size()); assertEquals(1, wrappedPluginManager.getPlugins().size()); }
@Override public CompletableFuture<Acknowledge> disposeSavepoint(String savepointPath) { final SavepointDisposalRequest savepointDisposalRequest = new SavepointDisposalRequest(savepointPath); final CompletableFuture<TriggerResponse> savepointDisposalTriggerFuture = sendRequest( SavepointDisposalTriggerHeaders.getInstance(), savepointDisposalRequest); final CompletableFuture<AsynchronousOperationInfo> savepointDisposalFuture = savepointDisposalTriggerFuture.thenCompose( (TriggerResponse triggerResponse) -> { final TriggerId triggerId = triggerResponse.getTriggerId(); final SavepointDisposalStatusHeaders savepointDisposalStatusHeaders = SavepointDisposalStatusHeaders.getInstance(); final SavepointDisposalStatusMessageParameters savepointDisposalStatusMessageParameters = savepointDisposalStatusHeaders .getUnresolvedMessageParameters(); savepointDisposalStatusMessageParameters.triggerIdPathParameter.resolve( triggerId); return pollResourceAsync( () -> sendRequest( savepointDisposalStatusHeaders, savepointDisposalStatusMessageParameters)); }); return savepointDisposalFuture.thenApply( (AsynchronousOperationInfo asynchronousOperationInfo) -> { if (asynchronousOperationInfo.getFailureCause() == null) { return Acknowledge.get(); } else { throw new CompletionException(asynchronousOperationInfo.getFailureCause()); } }); }
@Test void testDisposeSavepoint() throws Exception { final String savepointPath = "foobar"; final String exceptionMessage = "Test exception."; final FlinkException testException = new FlinkException(exceptionMessage); final TestSavepointDisposalHandlers testSavepointDisposalHandlers = new TestSavepointDisposalHandlers(savepointPath); final TestSavepointDisposalHandlers.TestSavepointDisposalTriggerHandler testSavepointDisposalTriggerHandler = testSavepointDisposalHandlers.new TestSavepointDisposalTriggerHandler(); final TestSavepointDisposalHandlers.TestSavepointDisposalStatusHandler testSavepointDisposalStatusHandler = testSavepointDisposalHandlers .new TestSavepointDisposalStatusHandler( OptionalFailure.of(AsynchronousOperationInfo.complete()), OptionalFailure.of( AsynchronousOperationInfo.completeExceptional( new SerializedThrowable(testException))), OptionalFailure.ofFailure(testException)); try (TestRestServerEndpoint restServerEndpoint = createRestServerEndpoint( testSavepointDisposalStatusHandler, testSavepointDisposalTriggerHandler)) { RestClusterClient<?> restClusterClient = createRestClusterClient(restServerEndpoint.getServerAddress().getPort()); try { { final CompletableFuture<Acknowledge> disposeSavepointFuture = restClusterClient.disposeSavepoint(savepointPath); assertThat(disposeSavepointFuture.get()).isEqualTo(Acknowledge.get()); } { final CompletableFuture<Acknowledge> disposeSavepointFuture = restClusterClient.disposeSavepoint(savepointPath); try { disposeSavepointFuture.get(); fail("Expected an exception"); } catch (ExecutionException ee) { assertThat(ExceptionUtils.findThrowableWithMessage(ee, exceptionMessage)) .isPresent(); } } { try { restClusterClient.disposeSavepoint(savepointPath).get(); fail("Expected an exception."); } catch (ExecutionException ee) { assertThat(ExceptionUtils.findThrowable(ee, RestClientException.class)) .isPresent(); } } } finally { restClusterClient.close(); } } }
@Override public boolean addIfGreater(double score, V object) { return get(addIfGreaterAsync(score, object)); }
@Test public void testAddIfGreater() { RScoredSortedSet<String> set = redisson.getScoredSortedSet("simple"); set.add(123, "1980"); assertThat(set.addIfGreater(120, "1980")).isFalse(); assertThat(set.getScore("1980")).isEqualTo(123); assertThat(set.addIfGreater(125, "1980")).isTrue(); assertThat(set.getScore("1980")).isEqualTo(125); }
@Udf public String concat(@UdfParameter( description = "The varchar fields to concatenate") final String... inputs) { if (inputs == null) { return null; } return Arrays.stream(inputs) .filter(Objects::nonNull) .collect(Collectors.joining()); }
@Test public void shouldReturnEmptyForSingleNullInput() { assertThat(udf.concat((String) null), is("")); assertThat(udf.concat((ByteBuffer) null), is(ByteBuffer.wrap(new byte[] {}))); }
public ModelMBeanInfo getMBeanInfo(Object defaultManagedBean, Object customManagedBean, String objectName) throws JMException { if ((defaultManagedBean == null && customManagedBean == null) || objectName == null) return null; // skip proxy classes if (defaultManagedBean != null && Proxy.isProxyClass(defaultManagedBean.getClass())) { LOGGER.trace("Skip creating ModelMBeanInfo due proxy class {}", defaultManagedBean.getClass()); return null; } // maps and lists to contain information about attributes and operations Map<String, ManagedAttributeInfo> attributes = new LinkedHashMap<>(); Set<ManagedOperationInfo> operations = new LinkedHashSet<>(); Set<ModelMBeanAttributeInfo> mBeanAttributes = new LinkedHashSet<>(); Set<ModelMBeanOperationInfo> mBeanOperations = new LinkedHashSet<>(); Set<ModelMBeanNotificationInfo> mBeanNotifications = new LinkedHashSet<>(); // extract details from default managed bean if (defaultManagedBean != null) { extractAttributesAndOperations(defaultManagedBean.getClass(), attributes, operations); extractMbeanAttributes(defaultManagedBean, attributes, mBeanAttributes, mBeanOperations); extractMbeanOperations(defaultManagedBean, operations, mBeanOperations); extractMbeanNotifications(defaultManagedBean, mBeanNotifications); } // extract details from custom managed bean if (customManagedBean != null) { extractAttributesAndOperations(customManagedBean.getClass(), attributes, operations); extractMbeanAttributes(customManagedBean, attributes, mBeanAttributes, mBeanOperations); extractMbeanOperations(customManagedBean, operations, mBeanOperations); extractMbeanNotifications(customManagedBean, mBeanNotifications); } // create the ModelMBeanInfo String name = getName(customManagedBean != null ? customManagedBean : defaultManagedBean, objectName); String description = getDescription(customManagedBean != null ? customManagedBean : defaultManagedBean, objectName); ModelMBeanAttributeInfo[] arrayAttributes = mBeanAttributes.toArray(new ModelMBeanAttributeInfo[mBeanAttributes.size()]); ModelMBeanOperationInfo[] arrayOperations = mBeanOperations.toArray(new ModelMBeanOperationInfo[mBeanOperations.size()]); ModelMBeanNotificationInfo[] arrayNotifications = mBeanNotifications.toArray(new ModelMBeanNotificationInfo[mBeanNotifications.size()]); ModelMBeanInfo info = new ModelMBeanInfoSupport(name, description, arrayAttributes, null, arrayOperations, arrayNotifications); LOGGER.trace("Created ModelMBeanInfo {}", info); return info; }
@Test(expected = IllegalArgumentException.class) public void testAttributePOJONamingNoGetter() throws JMException { mbeanInfoAssembler.getMBeanInfo(new BadAttributeNameNoGetterSetter(), null, "someName"); }
@Override public Result apply(ApplyNode applyNode, Captures captures, Context context) { if (applyNode.getMayParticipateInAntiJoin()) { return Result.empty(); } Assignments subqueryAssignments = applyNode.getSubqueryAssignments(); if (subqueryAssignments.size() != 1) { return Result.empty(); } RowExpression expression = getOnlyElement(subqueryAssignments.getExpressions()); if (!(expression instanceof InSubqueryExpression)) { return Result.empty(); } InSubqueryExpression inPredicate = (InSubqueryExpression) expression; VariableReferenceExpression inPredicateOutputVariable = getOnlyElement(subqueryAssignments.getVariables()); PlanNode leftInput = applyNode.getInput(); // Add unique id column if the set of columns do not form a unique key already if (!((GroupReference) leftInput).getLogicalProperties().isPresent() || !((GroupReference) leftInput).getLogicalProperties().get().isDistinct(ImmutableSet.copyOf(leftInput.getOutputVariables()))) { VariableReferenceExpression uniqueKeyVariable = context.getVariableAllocator().newVariable("unique", BIGINT); leftInput = new AssignUniqueId( applyNode.getSourceLocation(), context.getIdAllocator().getNextId(), leftInput, uniqueKeyVariable); } VariableReferenceExpression leftVariableReference = inPredicate.getValue(); VariableReferenceExpression rightVariableReference = inPredicate.getSubquery(); JoinNode innerJoin = new JoinNode( applyNode.getSourceLocation(), context.getIdAllocator().getNextId(), JoinType.INNER, leftInput, applyNode.getSubquery(), ImmutableList.of(new EquiJoinClause( leftVariableReference, rightVariableReference)), ImmutableList.<VariableReferenceExpression>builder() .addAll(leftInput.getOutputVariables()) .build(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableMap.of()); AggregationNode distinctNode = new AggregationNode( innerJoin.getSourceLocation(), context.getIdAllocator().getNextId(), innerJoin, ImmutableMap.of(), singleGroupingSet(ImmutableList.<VariableReferenceExpression>builder() .addAll(innerJoin.getOutputVariables()) .build()), ImmutableList.of(), SINGLE, Optional.empty(), Optional.empty(), Optional.empty()); ImmutableList<VariableReferenceExpression> referencedOutputs = ImmutableList.<VariableReferenceExpression>builder() .addAll(applyNode.getInput().getOutputVariables()) .add(inPredicateOutputVariable) .build(); ProjectNode finalProjectNdde = new ProjectNode( context.getIdAllocator().getNextId(), distinctNode, Assignments.builder() .putAll(identityAssignments(distinctNode.getOutputVariables())) .put(inPredicateOutputVariable, TRUE_CONSTANT) .build() .filter(referencedOutputs)); return Result.ofPlanNode(finalProjectNdde); }
@Test public void testFeatureDisabled() { tester().assertThat(new TransformUncorrelatedInPredicateSubqueryToDistinctInnerJoin()) .setSystemProperty(IN_PREDICATES_AS_INNER_JOINS_ENABLED, "false") .on(p -> p.apply( assignment( p.variable("x"), inSubquery(p.variable("y"), p.variable("z"))), emptyList(), p.values(p.variable("y")), p.values(p.variable("z")))) .doesNotFire(); tester().assertThat(new TransformUncorrelatedInPredicateSubqueryToDistinctInnerJoin()) .setSystemProperty(EXPLOIT_CONSTRAINTS, "false") .on(p -> p.apply( assignment( p.variable("x"), inSubquery(p.variable("y"), p.variable("z"))), emptyList(), p.values(p.variable("y")), p.values(p.variable("z")))) .doesNotFire(); tester().assertThat(new TransformUncorrelatedInPredicateSubqueryToDistinctInnerJoin()) .setSystemProperty(JOIN_REORDERING_STRATEGY, "NONE") .on(p -> p.apply( assignment( p.variable("x"), inSubquery(p.variable("y"), p.variable("z"))), emptyList(), p.values(p.variable("y")), p.values(p.variable("z")))) .doesNotFire(); }
public Map<String, String> confirm(RdaConfirmRequest params) { AppSession appSession = appSessionService.getSession(params.getAppSessionId()); AppAuthenticator appAuthenticator = appAuthenticatorService.findByUserAppId(appSession.getUserAppId()); if(!checkSecret(params, appSession) || !checkAccount(params, appSession)){ appSession.setRdaSessionStatus("ABORTED"); appSessionService.save(appSession); return Map.of("arrivalStatus", "NOK"); } if(checkAndProcessError(params, appSession)){ appSessionService.save(appSession); return Map.of("arrivalStatus", "OK"); } if (!switchService.digidAppSwitchEnabled()) { digidClient.remoteLog("853", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); appSession.setRdaSessionStatus("REFUTED"); } else if (!switchService.digidRdaSwitchEnabled()){ digidClient.remoteLog("579", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); appSession.setRdaSessionStatus("REFUTED"); } else if (params.isVerified() && (SCANNING.equals(appSession.getRdaSessionStatus()) || SCANNING_FOREIGN.equals(appSession.getRdaSessionStatus()))) { appSession.setRdaSessionStatus("VERIFIED"); appAuthenticator.setSubstantieelActivatedAt(ZonedDateTime.now()); appAuthenticator.setSubstantieelDocumentType(params.getDocumentType().toLowerCase()); if (appAuthenticator.getWidActivatedAt() == null) { appAuthenticator.setIssuerType("rda"); } storeIdCheckDocument(params.getDocumentNumber(), params.getDocumentType(), appSession.getAccountId(), appAuthenticator.getUserAppId()); if (ID_CHECK_ACTION.equals(appSession.getRdaAction())) { digidClient.remoteLog("1321", Map.of("document_type", params.getDocumentType().toLowerCase(), lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId())); } else { digidClient.remoteLog("848", Map.of("document_type", params.getDocumentType().toLowerCase(), lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); } appAuthenticatorService.save(appAuthenticator); if(appSession.getFlow().equals(UpgradeLoginLevel.NAME)) { digidClient.sendNotificationMessage(appSession.getAccountId(), "ED024", "SMS20"); logger.debug("Sending notify email ED024 / SMS20 for device {}", appAuthenticator.getDeviceName()); } } appSession.setAppAuthenticationLevel(appAuthenticator.getAuthenticationLevel()); appSessionService.save(appSession); return Map.of("arrivalStatus", "OK"); }
@Test void checkVerifiedWidchecker(){ appSession.setRdaAction("upgrade_rda_widchecker"); when(appSessionService.getSession(any())).thenReturn(appSession); when(appAuthenticatorService.findByUserAppId(any())).thenReturn(appAuthenticator); when(switchService.digidAppSwitchEnabled()).thenReturn(Boolean.TRUE); when(switchService.digidRdaSwitchEnabled()).thenReturn(Boolean.TRUE); Map<String, String> result = rdaService.confirm(rdaConfirmRequest); verify(digidClient, times(1)).remoteLog("1321", Map.of("document_type", "driving_license", lowerUnderscore(ACCOUNT_ID), T_ACCOUNT_ID)); assertEquals("VERIFIED", appSession.getRdaSessionStatus()); assertEquals("rda", appAuthenticator.getIssuerType()); assertEquals("OK", result.get("arrivalStatus")); verify(idCheckDocumentRepository, times(1)).save(any(IdCheckDocument.class)); }
public static CustomWeighting.Parameters createWeightingParameters(CustomModel customModel, EncodedValueLookup lookup) { String key = customModel.toString(); Class<?> clazz = customModel.isInternal() ? INTERNAL_CACHE.get(key) : null; if (CACHE_SIZE > 0 && clazz == null) clazz = CACHE.get(key); if (clazz == null) { clazz = createClazz(customModel, lookup); if (customModel.isInternal()) { INTERNAL_CACHE.put(key, clazz); if (INTERNAL_CACHE.size() > 100) { CACHE.putAll(INTERNAL_CACHE); INTERNAL_CACHE.clear(); LoggerFactory.getLogger(CustomModelParser.class).warn("Internal cache must stay small but was " + INTERNAL_CACHE.size() + ". Cleared it. Misuse of CustomModel::internal?"); } } else if (CACHE_SIZE > 0) { CACHE.put(key, clazz); } } try { // The class does not need to be thread-safe as we create an instance per request CustomWeightingHelper prio = (CustomWeightingHelper) clazz.getDeclaredConstructor().newInstance(); prio.init(customModel, lookup, CustomModel.getAreasAsMap(customModel.getAreas())); return new CustomWeighting.Parameters( prio::getSpeed, prio::calcMaxSpeed, prio::getPriority, prio::calcMaxPriority, customModel.getDistanceInfluence() == null ? 0 : customModel.getDistanceInfluence(), customModel.getHeadingPenalty() == null ? Parameters.Routing.DEFAULT_HEADING_PENALTY : customModel.getHeadingPenalty()); } catch (ReflectiveOperationException ex) { throw new IllegalArgumentException("Cannot compile expression " + ex.getMessage(), ex); } }
@Test void testBackwardFunction() { CustomModel customModel = new CustomModel(); customModel.addToPriority(If("backward_car_access != car_access", MULTIPLY, "0.5")); customModel.addToSpeed(If("true", LIMIT, "100")); CustomWeighting.EdgeToDoubleMapping priorityMapping = CustomModelParser.createWeightingParameters(customModel, encodingManager). getEdgeToPriorityMapping(); BaseGraph graph = new BaseGraph.Builder(encodingManager).create(); EdgeIteratorState edge1 = graph.edge(0, 1).setDistance(100).set(accessEnc, true, false); EdgeIteratorState edge2 = graph.edge(1, 2).setDistance(100).set(accessEnc, true, true); assertEquals(0.5, priorityMapping.get(edge1, false), 1.e-6); assertEquals(1.0, priorityMapping.get(edge2, false), 1.e-6); }
public static SmtpCommand valueOf(CharSequence commandName) { ObjectUtil.checkNotNull(commandName, "commandName"); SmtpCommand command = COMMANDS.get(commandName.toString()); return command != null ? command : new SmtpCommand(AsciiString.of(commandName)); }
@Test public void equalsIgnoreCase() { assertEquals(SmtpCommand.MAIL, SmtpCommand.valueOf("mail")); assertEquals(SmtpCommand.valueOf("test"), SmtpCommand.valueOf("TEST")); }
public List<String> toMnemonic(byte[] entropy) { checkArgument(entropy.length % 4 == 0, () -> "entropy length not multiple of 32 bits"); checkArgument(entropy.length > 0, () -> "entropy is empty"); // We take initial entropy of ENT bits and compute its // checksum by taking first ENT / 32 bits of its SHA256 hash. byte[] hash = Sha256Hash.hash(entropy); boolean[] hashBits = bytesToBits(hash); boolean[] entropyBits = bytesToBits(entropy); int checksumLengthBits = entropyBits.length / 32; // We append these bits to the end of the initial entropy. boolean[] concatBits = new boolean[entropyBits.length + checksumLengthBits]; System.arraycopy(entropyBits, 0, concatBits, 0, entropyBits.length); System.arraycopy(hashBits, 0, concatBits, entropyBits.length, checksumLengthBits); // Next we take these concatenated bits and split them into // groups of 11 bits. Each group encodes number from 0-2047 // which is a position in a wordlist. We convert numbers into // words and use joined words as mnemonic sentence. ArrayList<String> words = new ArrayList<>(); int nwords = concatBits.length / 11; for (int i = 0; i < nwords; ++i) { int index = 0; for (int j = 0; j < 11; ++j) { index <<= 1; if (concatBits[(i * 11) + j]) index |= 0x1; } words.add(this.wordList.get(index)); } return words; }
@Test(expected = RuntimeException.class) public void testBadEntropyLength() throws Exception { byte[] entropy = ByteUtils.parseHex("7f7f7f7f7f7f7f7f7f7f7f7f7f7f"); mc.toMnemonic(entropy); }
public static <T> Map<Integer, List<T>> distributeObjects(int count, List<T> objects) { Map<Integer, List<T>> processorToObjects = IntStream.range(0, objects.size()) .mapToObj(i -> entry(i, objects.get(i))) .collect(groupingBy(e -> e.getKey() % count, mapping(Map.Entry::getValue, Collectors.toList()))); for (int i = 0; i < count; i++) { processorToObjects.putIfAbsent(i, emptyList()); } return processorToObjects; }
@Test public void test_distributeObjects() { // count == 1 assertArrayEquals( new int[][]{ new int[]{}}, distributeObjects(1, new int[]{})); assertArrayEquals( new int[][]{ new int[]{2}}, distributeObjects(1, new int[]{2})); assertArrayEquals( new int[][]{ new int[]{2, 4}}, distributeObjects(1, new int[]{2, 4})); // count == 3 assertArrayEquals( new int[][]{ new int[]{2}, new int[]{}, new int[]{}}, distributeObjects(3, new int[]{2})); assertArrayEquals( new int[][]{ new int[]{2}, new int[]{4}, new int[]{6}}, distributeObjects(3, new int[]{2, 4, 6})); assertArrayEquals( new int[][]{ new int[]{2, 8}, new int[]{4}, new int[]{6}}, distributeObjects(3, new int[]{2, 4, 6, 8})); }
@Override public void trackChannelEvent(String eventName) { }
@Test public void testTrackChannelEvent() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.trackChannelEvent("TestChannelEvent", new JSONObject()); }
static String headerLine(CSVFormat csvFormat) { return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader()); }
@Test public void givenNoCommentMarker_doesntSkipLine() { CSVFormat csvFormat = csvFormat(); PCollection<String> input = pipeline.apply( Create.of(headerLine(csvFormat), "#comment", "a,1,1.1", "b,2,2.2", "c,3,3.3")); CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat); CsvIOParseResult<List<String>> result = input.apply(underTest); PAssert.that(result.getOutput()) .containsInAnyOrder( Arrays.asList( Collections.singletonList("#comment"), Arrays.asList("a", "1", "1.1"), Arrays.asList("b", "2", "2.2"), Arrays.asList("c", "3", "3.3"))); PAssert.that(result.getErrors()).empty(); pipeline.run(); }
public void close() { this.client.close(); }
@Test public void closeTest() { try (MockedStatic<Client> clientMockedStatic = mockStatic(Client.class)) { this.mockEtcd(clientMockedStatic); final EtcdClient etcdClient = new EtcdClient("url", 60L, 3000L); etcdClient.close(); } catch (Exception e) { throw new ShenyuException(e.getCause()); } }
@Override public PipelineDef parse(Path pipelineDefPath, Configuration globalPipelineConfig) throws Exception { return parse(mapper.readTree(pipelineDefPath.toFile()), globalPipelineConfig); }
@Test void testInvalidTimeZone() throws Exception { URL resource = Resources.getResource("definitions/pipeline-definition-minimized.yaml"); YamlPipelineDefinitionParser parser = new YamlPipelineDefinitionParser(); assertThatThrownBy( () -> parser.parse( Paths.get(resource.toURI()), Configuration.fromMap( ImmutableMap.<String, String>builder() .put( PIPELINE_LOCAL_TIME_ZONE.key(), "invalid time zone") .build()))) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining( "Invalid time zone. The valid value should be a Time Zone Database ID" + " such as 'America/Los_Angeles' to include daylight saving time. " + "Fixed offsets are supported using 'GMT-08:00' or 'GMT+08:00'. " + "Or use 'UTC' without time zone and daylight saving time."); }
public static boolean canDrop( FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) { Objects.requireNonNull(pred, "pred cannnot be null"); Objects.requireNonNull(columns, "columns cannnot be null"); return pred.accept(new DictionaryFilter(columns, dictionaries)); }
@Test public void testInverseUdpMissingColumn() throws Exception { InInt32UDP nullRejecting = new InInt32UDP(ImmutableSet.of(42)); InInt32UDP nullAccepting = new InInt32UDP(Sets.newHashSet((Integer) null)); IntColumn fake = intColumn("missing_column"); assertTrue( "Should drop block for null accepting udp", canDrop(LogicalInverseRewriter.rewrite(not(userDefined(fake, nullAccepting))), ccmd, dictionaries)); assertFalse( "Should not drop block for null rejecting udp", canDrop(LogicalInverseRewriter.rewrite(not(userDefined(fake, nullRejecting))), ccmd, dictionaries)); }
static String generateIdFromName(String name) { return String.format("%s-%s", AUTO_GENERATED_ID_PREFIX, Objects.hash(name)); }
@Test void generateIdFromName() { String name = "name"; String wrongName = "wrong-name"; String retrieved = TupleIdentifier.generateIdFromName(name); assertThat(retrieved).isEqualTo(TupleIdentifier.generateIdFromName(name)) .isNotEqualTo(TupleIdentifier.generateIdFromName(wrongName)); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void sendChatAction() { assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.typing.name())).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.typing)).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.upload_photo)).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.record_video)).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.upload_video)).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.record_voice)).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.upload_voice)).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.upload_document)).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.find_location)).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.record_video_note)).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.upload_video_note)).isOk()); assertTrue(bot.execute(new SendChatAction(chatId, ChatAction.choose_sticker)).isOk()); assertTrue(bot.execute(new SendChatAction(forum, ChatAction.choose_sticker).messageThreadId(forumEditThread)).isOk()); }
public Mode.Bits toModeBits() { Mode.Bits bits = Mode.Bits.NONE; if (contains(AclAction.READ)) { bits = bits.or(Mode.Bits.READ); } if (contains(AclAction.WRITE)) { bits = bits.or(Mode.Bits.WRITE); } if (contains(AclAction.EXECUTE)) { bits = bits.or(Mode.Bits.EXECUTE); } return bits; }
@Test public void toModeBits() { AclActions actions = new AclActions(); assertEquals(Mode.Bits.NONE, actions.toModeBits()); actions = new AclActions(); actions.add(AclAction.READ); assertEquals(Mode.Bits.READ, actions.toModeBits()); actions = new AclActions(); actions.add(AclAction.WRITE); assertEquals(Mode.Bits.WRITE, actions.toModeBits()); actions = new AclActions(); actions.add(AclAction.EXECUTE); assertEquals(Mode.Bits.EXECUTE, actions.toModeBits()); actions = new AclActions(); actions.add(AclAction.READ); actions.add(AclAction.WRITE); assertEquals(Mode.Bits.READ_WRITE, actions.toModeBits()); actions = new AclActions(); actions.add(AclAction.READ); actions.add(AclAction.EXECUTE); assertEquals(Mode.Bits.READ_EXECUTE, actions.toModeBits()); actions = new AclActions(); actions.add(AclAction.WRITE); actions.add(AclAction.EXECUTE); assertEquals(Mode.Bits.WRITE_EXECUTE, actions.toModeBits()); actions = new AclActions(); actions.add(AclAction.READ); actions.add(AclAction.WRITE); actions.add(AclAction.EXECUTE); assertEquals(Mode.Bits.ALL, actions.toModeBits()); }
public static Optional<String> getTableName(final String tableMetaDataPath) { Pattern pattern = Pattern.compile(getShardingSphereDataNodePath() + "/([\\w\\-]+)/schemas/([\\w\\-]+)/tables" + "/([\\w\\-]+)$", Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(tableMetaDataPath); return matcher.find() ? Optional.of(matcher.group(3)) : Optional.empty(); }
@Test void assertGetTableNameTableNameNotFoundScenario() { assertThat(ShardingSphereDataNode.getTableName("/statistics/databases/db_name/schemas/db_schema"), is(Optional.empty())); }
public static List<HollowSchema> parseCollectionOfSchemas(Reader reader) throws IOException { StreamTokenizer tokenizer = new StreamTokenizer(reader); configureTokenizer(tokenizer); List<HollowSchema> schemaList = new ArrayList<HollowSchema>(); HollowSchema schema = parseSchema(tokenizer); while (schema != null) { schemaList.add(schema); schema = parseSchema(tokenizer); } return schemaList; }
@Test public void testParseCollectionOfSchemas_reader() throws Exception { InputStream input = null; try { input = getClass().getResourceAsStream("/schema1.txt"); List<HollowSchema> schemas = HollowSchemaParser.parseCollectionOfSchemas(new BufferedReader(new InputStreamReader(input))); Assert.assertEquals("Should have two schemas", 2, schemas.size()); Assert.assertEquals("Should have Minion schema", "Minion", schemas.get(0).getName()); Assert.assertEquals("Should have String schema", "String", schemas.get(1).getName()); } finally { if (input != null) { input.close(); } } }
@Transactional public AccessKey create(String appId, AccessKey entity) { long count = accessKeyRepository.countByAppId(appId); if (count >= ACCESSKEY_COUNT_LIMIT) { throw new BadRequestException("AccessKeys count limit exceeded"); } entity.setId(0L); entity.setAppId(appId); entity.setDataChangeLastModifiedBy(entity.getDataChangeCreatedBy()); AccessKey accessKey = accessKeyRepository.save(entity); auditService.audit(AccessKey.class.getSimpleName(), accessKey.getId(), Audit.OP.INSERT, accessKey.getDataChangeCreatedBy()); return accessKey; }
@Test(expected = BadRequestException.class) public void testCreateWithException() { String appId = "someAppId"; String secret = "someSecret"; int maxCount = 5; for (int i = 0; i <= maxCount; i++) { AccessKey entity = assembleAccessKey(appId, secret); accessKeyService.create(appId, entity); } }
public void reset() { lock.lock(); try { cancellationException = null; clearElementsLocked(); } finally { lock.unlock(); } }
@Test(timeout = 10_000) public void runTestForMultipleConsumersAndProducers() throws Exception { CancellableQueue<String> queue = new CancellableQueue<>(100); runTestForMultipleConsumersAndProducers(queue); queue.reset(); runTestForMultipleConsumersAndProducers(queue); }
@Override public Optional<AuthenticatedDevice> authenticate(BasicCredentials basicCredentials) { boolean succeeded = false; String failureReason = null; try { final UUID accountUuid; final byte deviceId; { final Pair<String, Byte> identifierAndDeviceId = getIdentifierAndDeviceId(basicCredentials.getUsername()); accountUuid = UUID.fromString(identifierAndDeviceId.first()); deviceId = identifierAndDeviceId.second(); } Optional<Account> account = accountsManager.getByAccountIdentifier(accountUuid); if (account.isEmpty()) { failureReason = "noSuchAccount"; return Optional.empty(); } Optional<Device> device = account.get().getDevice(deviceId); if (device.isEmpty()) { failureReason = "noSuchDevice"; return Optional.empty(); } SaltedTokenHash deviceSaltedTokenHash = device.get().getAuthTokenHash(); if (deviceSaltedTokenHash.verify(basicCredentials.getPassword())) { succeeded = true; Account authenticatedAccount = updateLastSeen(account.get(), device.get()); if (deviceSaltedTokenHash.getVersion() != SaltedTokenHash.CURRENT_VERSION) { OLD_TOKEN_VERSION_COUNTER.increment(); authenticatedAccount = accountsManager.updateDeviceAuthentication( authenticatedAccount, device.get(), SaltedTokenHash.generateFor(basicCredentials.getPassword())); // new credentials have current version } return Optional.of(new AuthenticatedDevice(authenticatedAccount, device.get())); } else { failureReason = "incorrectPassword"; return Optional.empty(); } } catch (IllegalArgumentException | InvalidAuthorizationHeaderException iae) { failureReason = "invalidHeader"; return Optional.empty(); } finally { Tags tags = Tags.of( AUTHENTICATION_SUCCEEDED_TAG_NAME, String.valueOf(succeeded)); if (StringUtils.isNotBlank(failureReason)) { tags = tags.and(AUTHENTICATION_FAILURE_REASON_TAG_NAME, failureReason); } Metrics.counter(AUTHENTICATION_COUNTER_NAME, tags).increment(); } }
@Test void testAuthenticateIncorrectPassword() { final UUID uuid = UUID.randomUUID(); final byte deviceId = 1; final String password = "12345"; final Account account = mock(Account.class); final Device device = mock(Device.class); final SaltedTokenHash credentials = mock(SaltedTokenHash.class); clock.unpin(); when(accountsManager.getByAccountIdentifier(uuid)).thenReturn(Optional.of(account)); when(account.getUuid()).thenReturn(uuid); when(account.getDevice(deviceId)).thenReturn(Optional.of(device)); when(device.getId()).thenReturn(deviceId); when(device.getAuthTokenHash()).thenReturn(credentials); when(credentials.verify(password)).thenReturn(true); when(credentials.getVersion()).thenReturn(SaltedTokenHash.CURRENT_VERSION); final String incorrectPassword = password + "incorrect"; final Optional<AuthenticatedDevice> maybeAuthenticatedAccount = accountAuthenticator.authenticate(new BasicCredentials(uuid.toString(), incorrectPassword)); assertThat(maybeAuthenticatedAccount).isEmpty(); verify(credentials).verify(incorrectPassword); }
public static PathData[] expandAsGlob(String pattern, Configuration conf) throws IOException { Path globPath = new Path(pattern); FileSystem fs = globPath.getFileSystem(conf); FileStatus[] stats = fs.globStatus(globPath); PathData[] items = null; if (stats == null) { // remove any quoting in the glob pattern pattern = pattern.replaceAll("\\\\(.)", "$1"); // not a glob & file not found, so add the path with a null stat items = new PathData[]{ new PathData(fs, pattern, null) }; } else { // figure out what type of glob path was given, will convert globbed // paths to match the type to preserve relativity PathType globType; URI globUri = globPath.toUri(); if (globUri.getScheme() != null) { globType = PathType.HAS_SCHEME; } else if (!globUri.getPath().isEmpty() && new Path(globUri.getPath()).isAbsolute()) { globType = PathType.SCHEMELESS_ABSOLUTE; } else { globType = PathType.RELATIVE; } // convert stats to PathData items = new PathData[stats.length]; int i=0; for (FileStatus stat : stats) { URI matchUri = stat.getPath().toUri(); String globMatch = null; switch (globType) { case HAS_SCHEME: // use as-is, but remove authority if necessary if (globUri.getAuthority() == null) { matchUri = removeAuthority(matchUri); } globMatch = uriToString(matchUri, false); break; case SCHEMELESS_ABSOLUTE: // take just the uri's path globMatch = matchUri.getPath(); break; case RELATIVE: // make it relative to the current working dir URI cwdUri = fs.getWorkingDirectory().toUri(); globMatch = relativize(cwdUri, matchUri, stat.isDirectory()); break; } items[i++] = new PathData(fs, globMatch, stat); } } Arrays.sort(items); return items; }
@Test public void testGlobThrowsExceptionForUnreadableDir() throws Exception { Path obscuredDir = new Path("foo"); Path subDir = new Path(obscuredDir, "bar"); //so foo is non-empty fs.mkdirs(subDir); fs.setPermission(obscuredDir, new FsPermission((short)0)); //no access try { PathData.expandAsGlob("foo/*", conf); Assert.fail("Should throw IOException"); } catch (IOException ioe) { // expected } finally { // make sure the test directory can be deleted fs.setPermission(obscuredDir, new FsPermission((short)0755)); //default } }
public static String substVars(String val, PropertyContainer pc1) { return substVars(val, pc1, null); }
@Test(timeout = 1000) public void detectCircularReferences2() { context.putProperty("A", "${B}"); context.putProperty("B", "${C}"); context.putProperty("C", "${A}"); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Circular variable reference detected while parsing input [${A} --> ${B} --> ${C} --> ${A}]"); OptionHelper.substVars("${A}", context); }
public B proxy(String proxy) { this.proxy = proxy; return getThis(); }
@Test void proxy() { InterfaceBuilder builder = new InterfaceBuilder(); builder.proxy("mockproxyfactory"); Assertions.assertEquals("mockproxyfactory", builder.build().getProxy()); }
public static Long parseLedgerId(String name) { if (name == null || name.isEmpty()) { return null; } if (name.endsWith("-index")) { name = name.substring(0, name.length() - "-index".length()); } int pos = name.indexOf("-ledger-"); if (pos < 0) { return null; } try { return Long.parseLong(name.substring(pos + 8)); } catch (NumberFormatException err) { return null; } }
@Test public void parseLedgerIdTest() throws Exception { UUID id = UUID.randomUUID(); long ledgerId = 123124; String key = DataBlockUtils.dataBlockOffloadKey(ledgerId, id); String keyIndex = DataBlockUtils.indexBlockOffloadKey(ledgerId, id); assertEquals(ledgerId, DataBlockUtils.parseLedgerId(key).longValue()); assertEquals(ledgerId, DataBlockUtils.parseLedgerId(keyIndex).longValue()); assertNull(DataBlockUtils.parseLedgerId(null)); assertNull(DataBlockUtils.parseLedgerId("")); assertNull(DataBlockUtils.parseLedgerId("-ledger-")); assertNull(DataBlockUtils.parseLedgerId("something")); assertNull(DataBlockUtils.parseLedgerId("-ledger-index")); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void kickChatMember() { BaseResponse response = bot.execute(new KickChatMember(channelName, chatId).untilDate(123).revokeMessages(true)); assertFalse(response.isOk()); assertEquals(400, response.errorCode()); assertEquals("Bad Request: can't remove chat owner", response.description()); }
@NonNull @Override public Object configure(CNode config, ConfigurationContext context) throws ConfiguratorException { return Stapler.lookupConverter(target) .convert( target, context.getSecretSourceResolver() .resolve(config.asScalar().toString())); }
@Test public void _int() throws Exception { Configurator c = registry.lookupOrFail(int.class); final Object value = c.configure(new Scalar("123"), context); assertEquals(123, (int) value); }
public static void createTopics( Logger log, String bootstrapServers, Map<String, String> commonClientConf, Map<String, String> adminClientConf, Map<String, NewTopic> topics, boolean failOnExisting) throws Throwable { // this method wraps the call to createTopics() that takes admin client, so that we can // unit test the functionality with MockAdminClient. The exception is caught and // re-thrown so that admin client is closed when the method returns. try (Admin adminClient = createAdminClient(bootstrapServers, commonClientConf, adminClientConf)) { createTopics(log, adminClient, topics, failOnExisting); } catch (Exception e) { log.warn("Failed to create or verify topics {}", topics, e); throw e; } }
@Test public void testCreateTopicsFailsIfAtLeastOneTopicExists() { adminClient.addTopic( false, TEST_TOPIC, Collections.singletonList(new TopicPartitionInfo(0, broker1, singleReplica, Collections.emptyList())), null); Map<String, NewTopic> newTopics = new HashMap<>(); newTopics.put(TEST_TOPIC, NEW_TEST_TOPIC); newTopics.put("another-topic", new NewTopic("another-topic", TEST_PARTITIONS, TEST_REPLICATION_FACTOR)); newTopics.put("one-more-topic", new NewTopic("one-more-topic", TEST_PARTITIONS, TEST_REPLICATION_FACTOR)); assertThrows(TopicExistsException.class, () -> WorkerUtils.createTopics(log, adminClient, newTopics, true)); }
public Optional<ScimGroupDto> findByGroupUuid(DbSession dbSession, String groupUuid) { return Optional.ofNullable(mapper(dbSession).findByGroupUuid(groupUuid)); }
@Test void findByGroupUuid_whenScimUuidNotFound_shouldReturnEmptyOptional() { assertThat(scimGroupDao.findByGroupUuid(db.getSession(), "unknownId")).isEmpty(); }
@Nullable public Iterable<String> searchDomains() { return searchDomains; }
@Test void searchDomains() { assertThat(builder.build().searchDomains()).isNull(); List<String> searchDomains = Collections.singletonList("searchDomains"); builder.searchDomains(searchDomains); assertThat(builder.build().searchDomains()).isEqualTo(searchDomains); }
public static URI parse(String gluePath) { requireNonNull(gluePath, "gluePath may not be null"); if (gluePath.isEmpty()) { return rootPackageUri(); } // Legacy from the Cucumber Eclipse plugin // Older versions of Cucumber allowed it. if (CLASSPATH_SCHEME_PREFIX.equals(gluePath)) { return rootPackageUri(); } if (nonStandardPathSeparatorInUse(gluePath)) { String standardized = replaceNonStandardPathSeparator(gluePath); return parseAssumeClasspathScheme(standardized); } if (isProbablyPackage(gluePath)) { String path = resourceNameOfPackageName(gluePath); return parseAssumeClasspathScheme(path); } return parseAssumeClasspathScheme(gluePath); }
@Test void glue_path_must_have_valid_identifier_parts() { Executable testMethod = () -> GluePath.parse("01-examples"); IllegalArgumentException actualThrown = assertThrows(IllegalArgumentException.class, testMethod); assertThat("Unexpected exception message", actualThrown.getMessage(), is(equalTo( "The glue path contained invalid identifiers 01-examples"))); }
@Override public boolean shouldHandle(Request request) { // we don't check the method here because we want to return 405 if it is anything but POST return MUX_URI_PATH.equals(request.getURI().getPath()); }
@Test(dataProvider = "multiplexerConfigurations") public void testIsNotMultiplexedRequest(MultiplexerRunMode multiplexerRunMode) throws Exception { MultiplexedRequestHandlerImpl multiplexer = createMultiplexer(null, multiplexerRunMode); RestRequest request = new RestRequestBuilder(new URI("/somethingElse")).setMethod(HttpMethod.POST.name()).build(); assertFalse(multiplexer.shouldHandle(request)); }
public void registerRunningQuery(final String appId) { queriesGuaranteedToBeRunningAtSomePoint.add(appId); }
@Test public void shouldReturnTrueForQueriesGuaranteedToBeRunningAtSomePoint() { // Given: ALL_APP_IDS.forEach(id -> service.registerRunningQuery(id)); // When: List<Boolean> results = ALL_APP_IDS.stream() .map(service::wasQueryGuaranteedToBeRunningAtSomePoint) .collect(Collectors.toList()); // Then: results.forEach(result -> assertEquals(result, true)); }
public int generate(Class<? extends CustomResource> crdClass, Writer out) throws IOException { ObjectNode node = nf.objectNode(); Crd crd = crdClass.getAnnotation(Crd.class); if (crd == null) { err(crdClass + " is not annotated with @Crd"); } else { node.put("apiVersion", "apiextensions.k8s.io/" + crdApiVersion) .put("kind", "CustomResourceDefinition") .putObject("metadata") .put("name", crd.spec().names().plural() + "." + crd.spec().group()); if (!labels.isEmpty()) { ((ObjectNode) node.get("metadata")) .putObject("labels") .setAll(labels.entrySet().stream() .collect(Collectors.<Map.Entry<String, String>, String, JsonNode, LinkedHashMap<String, JsonNode>>toMap( Map.Entry::getKey, e -> new TextNode( e.getValue() .replace("%group%", crd.spec().group()) .replace("%plural%", crd.spec().names().plural()) .replace("%singular%", crd.spec().names().singular())), (x, y) -> x, LinkedHashMap::new))); } node.set("spec", buildSpec(crdApiVersion, crd.spec(), crdClass)); } mapper.writeValue(out, node); return numErrors; }
@Test void simpleTest() throws IOException { StringWriter w = new StringWriter(); CrdGenerator crdGenerator = new CrdGenerator(KubeVersion.V1_16_PLUS, ApiVersion.V1, CrdGenerator.YAML_MAPPER, emptyMap(), crdGeneratorReporter, emptyList(), null, null, new CrdGenerator.NoneConversionStrategy(), null); crdGenerator.generate(ExampleCrd.class, w); String s = w.toString(); assertTrue(errors.isEmpty(), "CrdGenerator should not report any errors: " + errors); assertEquals(CrdTestUtils.readResource("simpleTest.yaml"), s); }
@Deprecated protected ExecutorService getReportCacheExecutor() { return reportCacheExecutor; }
@Test void testStoreProviderUsual() throws ClassNotFoundException { String interfaceName = "org.apache.dubbo.metadata.store.InterfaceNameTestService"; String version = "1.0.0"; String group = null; String application = "vic"; ThreadPoolExecutor reportCacheExecutor = (ThreadPoolExecutor) abstractMetadataReport.getReportCacheExecutor(); long completedTaskCount1 = reportCacheExecutor.getCompletedTaskCount(); MetadataIdentifier providerMetadataIdentifier = storeProvider(abstractMetadataReport, interfaceName, version, group, application); await().until(() -> reportCacheExecutor.getCompletedTaskCount() > completedTaskCount1); Assertions.assertNotNull( abstractMetadataReport.store.get(providerMetadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY))); }
@VisibleForTesting URI getDefaultHttpUri() { return getDefaultHttpUri("/"); }
@Test public void testHttpBindAddressIPv6Wildcard() throws RepositoryException, ValidationException { jadConfig.setRepository(new InMemoryRepository(ImmutableMap.of("http_bind_address", "[::]:9000"))).addConfigurationBean(configuration).process(); assertThat(configuration.getDefaultHttpUri()) .isNotNull() .isNotEqualTo(URI.create("http://[::]:9000")); }
public static String toString(Throwable e) { UnsafeStringWriter w = new UnsafeStringWriter(); PrintWriter p = new PrintWriter(w); p.print(e.getClass().getName()); if (e.getMessage() != null) { p.print(": " + e.getMessage()); } p.println(); try { e.printStackTrace(p); return w.toString(); } finally { p.close(); } }
@Test void testExceptionToString() throws Exception { assertThat( StringUtils.toString(new RuntimeException("abc")), containsString("java.lang.RuntimeException: abc")); }
@Override public List<ActionParameter> getParameters() { return List.of( ActionParameter.from("message", "The message to output to the user, special characters should be escaped.") ); }
@Test void testGetParameters() { List<ActionParameter> parameters = outputMessageAction.getParameters(); assertEquals(1, parameters.size()); ActionParameter parameter = parameters.get(0); assertEquals("message", parameter.getName()); assertTrue(parameter.getDescription().contains("The message to output to the user")); }
public static List<Path> pluginUrls(Path topPath) throws IOException { boolean containsClassFiles = false; Set<Path> archives = new TreeSet<>(); LinkedList<DirectoryEntry> dfs = new LinkedList<>(); Set<Path> visited = new HashSet<>(); if (isArchive(topPath)) { return Collections.singletonList(topPath); } DirectoryStream<Path> topListing = Files.newDirectoryStream( topPath, PLUGIN_PATH_FILTER ); dfs.push(new DirectoryEntry(topListing)); visited.add(topPath); try { while (!dfs.isEmpty()) { Iterator<Path> neighbors = dfs.peek().iterator; if (!neighbors.hasNext()) { dfs.pop().stream.close(); continue; } Path adjacent = neighbors.next(); if (Files.isSymbolicLink(adjacent)) { try { Path symlink = Files.readSymbolicLink(adjacent); // if symlink is absolute resolve() returns the absolute symlink itself Path parent = adjacent.getParent(); if (parent == null) { continue; } Path absolute = parent.resolve(symlink).toRealPath(); if (Files.exists(absolute)) { adjacent = absolute; } else { continue; } } catch (IOException e) { // See https://issues.apache.org/jira/browse/KAFKA-6288 for a reported // failure. Such a failure at this stage is not easily reproducible and // therefore an exception is caught and ignored after issuing a // warning. This allows class scanning to continue for non-broken plugins. log.warn( "Resolving symbolic link '{}' failed. Ignoring this path.", adjacent, e ); continue; } } if (!visited.contains(adjacent)) { visited.add(adjacent); if (isArchive(adjacent)) { archives.add(adjacent); } else if (isClassFile(adjacent)) { containsClassFiles = true; } else { DirectoryStream<Path> listing = Files.newDirectoryStream( adjacent, PLUGIN_PATH_FILTER ); dfs.push(new DirectoryEntry(listing)); } } } } finally { while (!dfs.isEmpty()) { dfs.pop().stream.close(); } } if (containsClassFiles) { if (archives.isEmpty()) { return Collections.singletonList(topPath); } log.warn("Plugin path contains both java archives and class files. Returning only the" + " archives"); } return Arrays.asList(archives.toArray(new Path[0])); }
@Test public void testPluginUrlsWithRelativeSymlinkBackwards() throws Exception { createBasicDirectoryLayout(); Path anotherPath = rootDir.resolve("moreplugins"); Files.createDirectories(anotherPath); anotherPath = anotherPath.toRealPath(); Files.createDirectories(anotherPath.resolve("connectorB-deps")); Files.createSymbolicLink( pluginPath.resolve("connectorB/deps/symlink"), Paths.get("../../../moreplugins/connectorB-deps") ); List<Path> expectedUrls = createBasicExpectedUrls(); expectedUrls.add(Files.createFile(anotherPath.resolve("connectorB-deps/converter.jar"))); assertUrls(expectedUrls, PluginUtils.pluginUrls(pluginPath)); }
public static String toHexString(byte[] input, int offset, int length, boolean withPrefix) { final String output = new String(toHexCharArray(input, offset, length)); return withPrefix ? new StringBuilder(HEX_PREFIX).append(output).toString() : output; }
@Test public void testToHexString() { assertEquals(Numeric.toHexString(new byte[] {}), ("0x")); assertEquals(Numeric.toHexString(new byte[] {0x1}), ("0x01")); assertEquals(Numeric.toHexString(HEX_RANGE_ARRAY), (HEX_RANGE_STRING)); byte[] input = {(byte) 0x12, (byte) 0x34, (byte) 0x56, (byte) 0x78}; assertEquals(Numeric.toHexString(input, 0, input.length, false), ("12345678")); assertEquals(Numeric.toHexString(input, 0, 2, false), ("1234")); assertEquals(Numeric.toHexString(input, 2, 2, false), ("5678")); }
@LiteralParameters("x") @ScalarOperator(GREATER_THAN) @SqlType(StandardTypes.BOOLEAN) public static boolean greaterThan(@SqlType("char(x)") Slice left, @SqlType("char(x)") Slice right) { return compareChars(left, right) > 0; }
@Test public void testGreaterThan() { assertFunction("cast('bar' as char(5)) > cast('foo' as char(3))", BOOLEAN, false); assertFunction("cast('foo' as char(5)) > cast('bar' as char(3))", BOOLEAN, true); assertFunction("cast('bar' as char(3)) > cast('foo' as char(5))", BOOLEAN, false); assertFunction("cast('foo' as char(3)) > cast('bar' as char(5))", BOOLEAN, true); assertFunction("cast('foo' as char(3)) > cast('foo' as char(3))", BOOLEAN, false); assertFunction("cast('foo' as char(3)) > cast('foo' as char(5))", BOOLEAN, false); assertFunction("cast('foo' as char(5)) > cast('foo' as char(3))", BOOLEAN, false); assertFunction("cast('foo' as char(3)) > cast('bar' as char(3))", BOOLEAN, true); assertFunction("cast('bar' as char(3)) > cast('foo' as char(3))", BOOLEAN, false); assertFunction("cast('foobar' as char(6)) > cast('foobaz' as char(6))", BOOLEAN, false); assertFunction("cast('foob r' as char(6)) > cast('foobar' as char(6))", BOOLEAN, false); assertFunction("cast(' ' as char(1)) > cast('\0' as char(1))", BOOLEAN, true); assertFunction("cast('' as char(0)) > cast('\0' as char(1))", BOOLEAN, true); assertFunction("cast('abc' as char(4)) > cast('abc\0' as char(4))", BOOLEAN, true); // 'abc' is implicitly padded with spaces -> 'abc' is greater assertFunction("cast('\0 ' as char(2)) > cast('\0' as char(1))", BOOLEAN, false); assertFunction("cast('\0 ' as char(2)) > cast('\0' as char(2))", BOOLEAN, false); // '\0' is implicitly padded with spaces -> both are equal assertFunction("cast('\0 a' as char(3)) > cast('\0' as char(3))", BOOLEAN, true); }
public String getArgs() { return args; }
@Test @DirtiesContext public void testCreateEndpointWithArgs3() throws Exception { String args = "RAW(arg1+arg2 arg3)"; // Just avoid URI encoding by using the RAW() ExecEndpoint e = createExecEndpoint("exec:test?args=" + args); assertEquals("arg1+arg2 arg3", e.getArgs()); }
@GetMapping("/id/{id}") public ShenyuAdminResult queryById(@PathVariable("id") @Valid @Existed(provider = TagMapper.class, message = "tag is not existed") final String id) { TagVO tagVO = tagService.findById(id); return ShenyuAdminResult.success(ShenyuResultMessage.DETAIL_SUCCESS, tagVO); }
@Test public void testqueryById() throws Exception { given(tagService.findById("123")).willReturn(buildTagVO()); this.mockMvc.perform(MockMvcRequestBuilders.get("/tag/id/{id}", "123")) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DETAIL_SUCCESS))) .andReturn(); }
@Override public void onCreate( final ServiceContext serviceContext, final MetaStore metaStore, final QueryMetadata queryMetadata) { if (perQuery.containsKey(queryMetadata.getQueryId())) { return; } perQuery.put( queryMetadata.getQueryId(), new PerQueryListener( metrics, metricsPrefix, queryMetadata.getQueryId().toString(), metricsTags ) ); }
@Test public void shouldInitiallyHaveInitialState() { // When: listener.onCreate(serviceContext, metaStore, query); // Then: assertThat(currentGaugeValue(METRIC_NAME_1), is("-")); assertThat(currentGaugeValue(METRIC_NAME_2), is("NO_ERROR")); assertThat(currentGaugeNumValue(NUM_METRIC_NAME_1), is(-1)); assertThat(currentGaugeNumValue(NUM_METRIC_NAME_2), is(-1)); assertThat(currentGaugeNumValue(NUM_METRIC_NAME_3), is(-1)); }
@Override public JibContainerBuilder createJibContainerBuilder( JavaContainerBuilder javaContainerBuilder, ContainerizingMode containerizingMode) { try { FileCollection projectDependencies = project.files( project.getConfigurations().getByName(configurationName).getResolvedConfiguration() .getResolvedArtifacts().stream() .filter( artifact -> artifact.getId().getComponentIdentifier() instanceof ProjectComponentIdentifier) .map(ResolvedArtifact::getFile) .collect(Collectors.toList())); if (isWarProject()) { String warFilePath = getWarFilePath(); log(LogEvent.info("WAR project identified, creating WAR image from: " + warFilePath)); Path explodedWarPath = tempDirectoryProvider.newDirectory(); ZipUtil.unzip(Paths.get(warFilePath), explodedWarPath); return JavaContainerBuilderHelper.fromExplodedWar( javaContainerBuilder, explodedWarPath, projectDependencies.getFiles().stream().map(File::getName).collect(Collectors.toSet())); } SourceSet mainSourceSet = getMainSourceSet(); FileCollection classesOutputDirectories = mainSourceSet.getOutput().getClassesDirs().filter(File::exists); Path resourcesOutputDirectory = mainSourceSet.getOutput().getResourcesDir().toPath(); FileCollection allFiles = project.getConfigurations().getByName(configurationName).filter(File::exists); FileCollection nonProjectDependencies = allFiles .minus(classesOutputDirectories) .minus(projectDependencies) .filter(file -> !file.toPath().equals(resourcesOutputDirectory)); FileCollection snapshotDependencies = nonProjectDependencies.filter(file -> file.getName().contains("SNAPSHOT")); FileCollection dependencies = nonProjectDependencies.minus(snapshotDependencies); // Adds dependency files javaContainerBuilder .addDependencies( dependencies.getFiles().stream().map(File::toPath).collect(Collectors.toList())) .addSnapshotDependencies( snapshotDependencies.getFiles().stream() .map(File::toPath) .collect(Collectors.toList())) .addProjectDependencies( projectDependencies.getFiles().stream() .map(File::toPath) .collect(Collectors.toList())); switch (containerizingMode) { case EXPLODED: // Adds resource files if (Files.exists(resourcesOutputDirectory)) { javaContainerBuilder.addResources(resourcesOutputDirectory); } // Adds class files for (File classesOutputDirectory : classesOutputDirectories) { javaContainerBuilder.addClasses(classesOutputDirectory.toPath()); } if (classesOutputDirectories.isEmpty()) { log(LogEvent.warn("No classes files were found - did you compile your project?")); } break; case PACKAGED: // Add a JAR Jar jarTask = (Jar) project.getTasks().findByName("jar"); Path jarPath = jarTask.getArchiveFile().get().getAsFile().toPath(); log(LogEvent.debug("Using JAR: " + jarPath)); javaContainerBuilder.addToClasspath(jarPath); break; default: throw new IllegalStateException("unknown containerizing mode: " + containerizingMode); } return javaContainerBuilder.toContainerBuilder(); } catch (IOException ex) { throw new GradleException("Obtaining project build output files failed", ex); } }
@Test public void testCreateContainerBuilder_noErrorIfWebInfDoesNotExist() throws IOException, InvalidImageReferenceException { setUpWarProject(temporaryFolder.getRoot().toPath()); assertThat( gradleProjectProperties.createJibContainerBuilder( JavaContainerBuilder.from("ignored"), ContainerizingMode.EXPLODED)) .isNotNull(); }
@Override public int size() { return size; }
@Test public void sizeIsInitiallyZero() { assertEquals(0, set.size()); }
@Nonnull public <T> T getInstance(@Nonnull Class<T> type) { return getInstance(new Key<>(type)); }
@Test public void whenNoImplOrServiceOrDefaultSpecified_shouldThrow() throws Exception { try { injector.getInstance(Umm.class); fail(); } catch (InjectionException e) { // ok } }
private static List<Long> getAllComputeNodeIds() throws DdlException { SystemInfoService infoService = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); List<Long> allComputeNodeIds = Lists.newArrayList(); if (RunMode.isSharedDataMode()) { // check warehouse long warehouseId = ConnectContext.get().getCurrentWarehouseId(); List<Long> computeNodeIs = GlobalStateMgr.getCurrentState().getWarehouseMgr().getAllComputeNodeIds(warehouseId); if (computeNodeIs.isEmpty()) { Warehouse warehouse = GlobalStateMgr.getCurrentState().getWarehouseMgr().getWarehouse(warehouseId); throw new DdlException("no available compute nodes in warehouse " + warehouse.getName()); } allComputeNodeIds.addAll(computeNodeIs); } else { allComputeNodeIds = infoService.getBackendIds(false); } return allComputeNodeIds; }
@Test public void testGetTabletDistributionForSharedDataMode() throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { new MockUp<RunMode>() { @Mock public RunMode getCurrentRunMode() { return RunMode.SHARED_DATA; } }; new Expectations() { { ConnectContext.get(); minTimes = 0; result = connectContext; long warehouseId = 10000L; connectContext.getCurrentWarehouseId(); minTimes = 0; result = warehouseId; GlobalStateMgr.getCurrentState().getWarehouseMgr().getAllComputeNodeIds(warehouseId); minTimes = 0; result = Lists.newArrayList(10003L, 10004L, 10005L); } }; Object[] args = new Object[] {CatalogMocker.TEST_DB_NAME, CatalogMocker.TEST_TBL_NAME, null}; List<List<String>> result = (List<List<String>>) getTabletDistributionMethod.invoke(null, args); Assert.assertEquals(3, result.size()); }
public static BytesInput empty() { return EMPTY_BYTES_INPUT; }
@Test public void testEmpty() throws IOException { byte[] data = new byte[0]; Supplier<BytesInput> factory = () -> BytesInput.empty(); validate(data, factory); }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test public void testTemplateRunnerLoggedErrorForFile() throws Exception { DataflowPipelineOptions options = PipelineOptionsFactory.as(DataflowPipelineOptions.class); options.setJobName("TestJobName"); options.setRunner(DataflowRunner.class); options.setTemplateLocation("//bad/path"); options.setProject("test-project"); options.setRegion(REGION_ID); options.setTempLocation(tmpFolder.getRoot().getPath()); options.setGcpCredential(new TestCredential()); options.setPathValidatorClass(NoopPathValidator.class); Pipeline p = Pipeline.create(options); thrown.expectMessage("Cannot create output file at"); thrown.expect(RuntimeException.class); p.run(); }
@Override public boolean isEnvironmentEmpty() { for (EnvironmentConfig part : this) { if (!part.isEnvironmentEmpty()) return false; } return true; }
@Test void shouldReturnTrueIsChildConfigContainsNoPipelineAgentsAndVariables() { assertTrue(singleEnvironmentConfig.isEnvironmentEmpty()); }
public static CurlOption parse(String cmdLine) { List<String> args = ShellWords.parse(cmdLine); URI url = null; HttpMethod method = HttpMethod.PUT; List<Entry<String, String>> headers = new ArrayList<>(); Proxy proxy = NO_PROXY; while (!args.isEmpty()) { String arg = args.remove(0); if (arg.equals("-X")) { String methodArg = removeArgFor(arg, args); method = HttpMethod.parse(methodArg); } else if (arg.equals("-H")) { String headerArg = removeArgFor(arg, args); SimpleEntry<String, String> e = parseHeader(headerArg); headers.add(e); } else if (arg.equals("-x")) { String proxyArg = removeArgFor(arg, args); proxy = parseProxy(proxyArg); } else { if (url != null) { throw new IllegalArgumentException("'" + cmdLine + "' was not a valid curl command"); } url = parseUrl(arg); } } if (url == null) { throw new IllegalArgumentException("'" + cmdLine + "' was not a valid curl command"); } return new CurlOption(proxy, method, url, headers); }
@Test public void must_provide_valid_proxy_domain() { String uri = "https://example.com -x https://:3129"; IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> CurlOption.parse(uri)); assertThat(exception.getMessage(), is("'https://:3129' did not have a valid proxy host")); }
@Override public <T> T convert(DataTable dataTable, Type type) { return convert(dataTable, type, false); }
@Test void convert_to_single_object__single_cell__using_default_transformer() { DataTable table = parse("| BLACK_BISHOP |"); registry.setDefaultDataTableEntryTransformer(TABLE_ENTRY_BY_TYPE_CONVERTER_SHOULD_NOT_BE_USED); registry.setDefaultDataTableCellTransformer(JACKSON_TABLE_CELL_BY_TYPE_CONVERTER); assertEquals(Piece.BLACK_BISHOP, converter.convert(table, Piece.class)); }
@Override public int getOrder() { return PluginEnum.CRYPTOR_RESPONSE.getCode(); }
@Test public void getOrderTest() { final int result = cryptorResponsePlugin.getOrder(); assertEquals(PluginEnum.CRYPTOR_RESPONSE.getCode(), result); }
public static FindKV findKV(String regex, int keyGroup, int valueGroup) { return findKV(Pattern.compile(regex), keyGroup, valueGroup); }
@Test @Category(NeedsRunner.class) public void testKVMatchesName() { PCollection<KV<String, String>> output = p.apply(Create.of("a b c")) .apply(Regex.findKV("a (?<keyname>b) (?<valuename>c)", "keyname", "valuename")); PAssert.that(output).containsInAnyOrder(KV.of("b", "c")); p.run(); }
@Override public void init(TbContext ctx, TbNodeConfiguration configuration) throws TbNodeException { this.config = TbNodeUtils.convert(configuration, TbJsonPathNodeConfiguration.class); this.jsonPathValue = config.getJsonPath(); if (!TbJsonPathNodeConfiguration.DEFAULT_JSON_PATH.equals(this.jsonPathValue)) { this.configurationJsonPath = Configuration.builder() .jsonProvider(new JacksonJsonNodeJsonProvider()) .build(); this.jsonPath = JsonPath.compile(config.getJsonPath()); } }
@Test void givenDefaultConfig_whenInit_thenFail() { config.setJsonPath(""); nodeConfiguration = new TbNodeConfiguration(JacksonUtil.valueToTree(config)); assertThatThrownBy(() -> node.init(ctx, nodeConfiguration)).isInstanceOf(IllegalArgumentException.class); }
public String render(Object o) { StringBuilder result = new StringBuilder(template.length()); render(o, result); return result.toString(); }
@Test public void valuesSubstitutedIntoTemplate() { Template template = new Template("Hello {{value}} "); assertEquals("Hello World ", template.render(foo)); }
@Override public boolean needsTaskCommit(TaskAttemptContext context) { // We need to commit if this is the last phase of a MapReduce process return TaskType.REDUCE.equals(context.getTaskAttemptID().getTaskID().getTaskType()) || context.getJobConf().getNumReduceTasks() == 0; }
@Test public void testNeedsTaskCommit() { HiveIcebergOutputCommitter committer = new HiveIcebergOutputCommitter(); JobConf mapOnlyJobConf = new JobConf(); mapOnlyJobConf.setNumMapTasks(10); mapOnlyJobConf.setNumReduceTasks(0); // Map only job should commit map tasks Assert.assertTrue(committer.needsTaskCommit(new TaskAttemptContextImpl(mapOnlyJobConf, MAP_TASK_ID))); JobConf mapReduceJobConf = new JobConf(); mapReduceJobConf.setNumMapTasks(10); mapReduceJobConf.setNumReduceTasks(10); // MapReduce job should not commit map tasks, but should commit reduce tasks Assert.assertFalse(committer.needsTaskCommit(new TaskAttemptContextImpl(mapReduceJobConf, MAP_TASK_ID))); Assert.assertTrue(committer.needsTaskCommit(new TaskAttemptContextImpl(mapReduceJobConf, REDUCE_TASK_ID))); }
@Override @MethodNotAvailable public CompletionStage<V> putAsync(K key, V value) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testPutAsyncWithExpiryPolicy() { ExpiryPolicy expiryPolicy = new HazelcastExpiryPolicy(1, 1, 1, TimeUnit.MILLISECONDS); adapter.putAsync(42, "value", expiryPolicy); }
@Override public ValueSet canonicalize(boolean removeSafeConstants) { if (removeSafeConstants) { // Since we cannot create a set with multiple entries but same "constant", we just use number of entries return new EquatableValueSet( type, whiteList, singleton(new ValueEntry(INTEGER, Utils.nativeValueToBlock(INTEGER, (long) entries.size())))); } return new EquatableValueSet( type, whiteList, // As comparing blocks is messy, we just sort by block hash hoping for the best entries.stream().sorted(comparing(ValueEntry::hashCode)).collect(toLinkedSet())); }
@Test public void testCanonicalize() throws Exception { assertSameSet(EquatableValueSet.all(type(BIGINT)), EquatableValueSet.all(type(BIGINT)), false); assertSameSet( EquatableValueSet.of(type(BIGINT), 0L, 1L, 3L), EquatableValueSet.of(type(BIGINT), 0L, 1L, 3L), false); assertDifferentSet( EquatableValueSet.of(type(BIGINT), 0L, 1L, 3L), EquatableValueSet.of(type(BIGINT), 0L, 1L, 2L), false); assertDifferentSet(EquatableValueSet.all(type(BIGINT)), EquatableValueSet.none(type(BIGINT)), false); assertDifferentSet(EquatableValueSet.all(type(BIGINT)), EquatableValueSet.all(type(VARCHAR)), false); assertSameSet( EquatableValueSet.of(type(BIGINT), 0L, 1L, 3L), EquatableValueSet.of(type(BIGINT), 0L, 1L, 2L), true); assertSameSet(EquatableValueSet.all(type(BIGINT)), EquatableValueSet.all(type(BIGINT)), true); assertDifferentSet( EquatableValueSet.of(type(BIGINT), 0L, 1L, 3L), EquatableValueSet.of(type(INTEGER), 0L, 1L, 3L), true); assertDifferentSet(EquatableValueSet.all(type(BIGINT)), EquatableValueSet.all(type(BOOLEAN)), true); }
public void encryptColumns( String inputFile, String outputFile, List<String> paths, FileEncryptionProperties fileEncryptionProperties) throws IOException { Path inPath = new Path(inputFile); Path outPath = new Path(outputFile); RewriteOptions options = new RewriteOptions.Builder(conf, inPath, outPath) .encrypt(paths) .encryptionProperties(fileEncryptionProperties) .build(); ParquetRewriter rewriter = new ParquetRewriter(options); rewriter.processBlocks(); rewriter.close(); }
@Test public void testEncryptAllColumns() throws IOException { String[] encryptColumns = {"DocId", "Name", "Gender", "Links.Forward", "Links.Backward"}; testSetup("GZIP"); columnEncryptor.encryptColumns( inputFile.getFileName(), outputFile, Arrays.asList(encryptColumns), EncDecProperties.getFileEncryptionProperties(encryptColumns, ParquetCipher.AES_GCM_CTR_V1, false)); verifyResultDecryptionWithValidKey(); }
@UdafFactory(description = "Compute sample standard deviation of column with type Long.", aggregateSchema = "STRUCT<SUM bigint, COUNT bigint, M2 double>") public static TableUdaf<Long, Struct, Double> stdDevLong() { return getStdDevImplementation( 0L, STRUCT_LONG, (agg, newValue) -> newValue + agg.getInt64(SUM), (agg, newValue) -> Double.valueOf(newValue * (agg.getInt64(COUNT) + 1) - (agg.getInt64(SUM) + newValue)), (agg1, agg2) -> agg1.getInt64(SUM).doubleValue() / agg1.getInt64(COUNT).doubleValue() - agg2.getInt64(SUM).doubleValue() / agg2.getInt64(COUNT).doubleValue(), (agg1, agg2) -> agg1.getInt64(SUM) + agg2.getInt64(SUM), (agg, valueToRemove) -> agg.getInt64(SUM) - valueToRemove); }
@Test public void shouldMergeLongs() { final TableUdaf<Long, Struct, Double> udaf = stdDevLong(); Struct left = udaf.initialize(); final Long[] leftValues = new Long[] {1L, 2L, 3L, 4L, 5L}; for (final Long thisValue : leftValues) { left = udaf.aggregate(thisValue, left); } Struct right = udaf.initialize(); final Long[] rightValues = new Long[] {2L, 2L, 1L}; for (final Long thisValue : rightValues) { right = udaf.aggregate(thisValue, right); } final Struct merged = udaf.merge(left, right); assertThat(merged.getInt64(COUNT), equalTo(8L)); assertThat(merged.getInt64(SUM), equalTo(20L)); assertThat(merged.getFloat64(M2), equalTo(14.0)); final double standardDev = udaf.map(merged); assertThat(standardDev, equalTo(1.4142135623730951)); }
public static Complex of(double real) { return new Complex(real, 0.0); }
@Test public void testArray() { System.out.println("Complex.Array"); Complex.Array array = Complex.Array.of(a, b); System.out.println("a = " + a); System.out.println("b = " + b); assertEquals(a.re, array.get(0).re, 1E-15); assertEquals(a.im, array.get(0).im, 1E-15); assertEquals(b.re, array.get(1).re, 1E-15); assertEquals(b.im, array.get(1).im, 1E-15); Complex c = Complex.of(3.0); array.set(1, c); assertEquals(a.re, array.get(0).re, 1E-15); assertEquals(a.im, array.get(0).im, 1E-15); assertEquals(c.re, array.get(1).re, 1E-15); assertEquals(c.im, array.get(1).im, 1E-15); }
public List<RowMetaInterface> getRecommendedIndexes() { List<RowMetaInterface> indexes = new ArrayList<RowMetaInterface>(); // First index : ID_BATCH if any is used. if ( isBatchIdUsed() ) { indexes.add( addFieldsToIndex( getKeyField() ) ); } // The next index includes : ERRORS, STATUS, TRANSNAME: indexes.add( addFieldsToIndex( findField( ID.ERRORS ), findField( ID.STATUS ), findField( ID.TRANSNAME ) ) ); // Index used for deleting rows during cleanup indexes.add( addFieldsToIndex( findField( ID.TRANSNAME ), findField( ID.LOGDATE ) ) ); return indexes; }
@Test public void getRecommendedIndexes() { List<RowMetaInterface> indexes = transLogTable.getRecommendedIndexes(); String[] expected = new String[]{ "TRANSNAME", "LOGDATE" }; assertTrue( "No indicies present", indexes.size() > 0 ); boolean found = false; for ( RowMetaInterface rowMeta : indexes ) { if ( Arrays.equals( rowMeta.getFieldNames(), expected ) ) { found = true; break; } } if ( !found ) { fail( "Could not find index with " + Arrays.toString( expected ) ); } }
public boolean compatibleVersion(String acceptableVersionRange, String actualVersion) { V pluginVersion = parseVersion(actualVersion); // Treat a single version "1.4" as a left bound, equivalent to "[1.4,)" if (acceptableVersionRange.matches(VERSION_REGEX)) { return ge(pluginVersion, parseVersion(acceptableVersionRange)); } // Otherwise ensure it is a version range with bounds Matcher matcher = INTERVAL_PATTERN.matcher(acceptableVersionRange); Preconditions.checkArgument(matcher.matches(), "invalid version range"); String leftBound = matcher.group("left"); String rightBound = matcher.group("right"); Preconditions.checkArgument( leftBound != null || rightBound != null, "left and right bounds cannot both be empty"); BiPredicate<V, V> leftComparator = acceptableVersionRange.startsWith("[") ? VersionChecker::ge : VersionChecker::gt; BiPredicate<V, V> rightComparator = acceptableVersionRange.endsWith("]") ? VersionChecker::le : VersionChecker::lt; if (leftBound != null && !leftComparator.test(pluginVersion, parseVersion(leftBound))) { return false; } if (rightBound != null && !rightComparator.test(pluginVersion, parseVersion(rightBound))) { return false; } return true; }
@Test public void testRange_leftOpen_exact() { Assert.assertFalse(checker.compatibleVersion("(2.3,4.3]", "2.3")); Assert.assertFalse(checker.compatibleVersion("(2.3,4.3)", "2.3")); Assert.assertFalse(checker.compatibleVersion("(2.3,)", "2.3")); Assert.assertFalse(checker.compatibleVersion("(2.3,]", "2.3")); }
@Override public void executeUpdate(final UnregisterStorageUnitStatement sqlStatement, final ContextManager contextManager) { if (!sqlStatement.isIfExists()) { checkExisted(sqlStatement.getStorageUnitNames()); } checkInUsed(sqlStatement); try { contextManager.getPersistServiceFacade().getMetaDataManagerPersistService().unregisterStorageUnits(database.getName(), sqlStatement.getStorageUnitNames()); } catch (final SQLException | ShardingSphereServerException ex) { throw new StorageUnitsOperateException("unregister", sqlStatement.getStorageUnitNames(), ex); } }
@Test void assertExecuteUpdateWithStorageUnitInUsedWithoutIgnoredTables() { when(database.getRuleMetaData()).thenReturn(new RuleMetaData(Collections.singleton(new DistSQLHandlerFixtureRule()))); assertThrows(InUsedStorageUnitException.class, () -> executor.executeUpdate(new UnregisterStorageUnitStatement(Collections.singleton("foo_ds"), false, false), mock(ContextManager.class))); }
@Override public Option<HoodieBaseFile> getLatestBaseFile(String partitionPath, String fileId) { return execute(partitionPath, fileId, preferredView::getLatestBaseFile, (path, id) -> getSecondaryView().getLatestBaseFile(path, id)); }
@Test public void testGetLatestBaseFile() { Option<HoodieBaseFile> actual; Option<HoodieBaseFile> expected = Option.of(new HoodieBaseFile("test.file")); String partitionPath = "/table2"; String fileID = "file.123"; when(primary.getLatestBaseFile(partitionPath, fileID)).thenReturn(expected); actual = fsView.getLatestBaseFile(partitionPath, fileID); assertEquals(expected, actual); verify(secondaryViewSupplier, never()).get(); resetMocks(); when(secondaryViewSupplier.get()).thenReturn(secondary); when(primary.getLatestBaseFile(partitionPath, fileID)).thenThrow(new RuntimeException()); when(secondary.getLatestBaseFile(partitionPath, fileID)).thenReturn(expected); actual = fsView.getLatestBaseFile(partitionPath, fileID); assertEquals(expected, actual); resetMocks(); when(secondary.getLatestBaseFile(partitionPath, fileID)).thenReturn(expected); actual = fsView.getLatestBaseFile(partitionPath, fileID); assertEquals(expected, actual); resetMocks(); when(secondary.getLatestBaseFile(partitionPath, fileID)).thenThrow(new RuntimeException()); assertThrows(RuntimeException.class, () -> { fsView.getLatestBaseFile(partitionPath, fileID); }); }
@Override public boolean remove(Object o) { return map.remove(o) != null; }
@Test public void testRemoveFailure() { ExtendedSet<TestValue> set = new ExtendedSet<TestValue>(Maps.newConcurrentMap()); TestValue val = new TestValue("foo", 1); assertFalse(set.remove(val)); }
@Override public Object handle(String targetService, List<Object> invokers, Object invocation, Map<String, String> queryMap, String serviceInterface) { if (!shouldHandle(invokers)) { return invokers; } List<Object> result = getTargetInvokersByRules(invokers, targetService); return super.handle(targetService, result, invocation, queryMap, serviceInterface); }
@Test public void testGetTargetInvokerByTagRulesWithPolicySceneTwo() { // initialize the routing rule RuleInitializationUtils.initAZTagMatchTriggerThresholdPolicyRule(); // Scenario 1: The downstream provider has instances that meet the requirements List<Object> invokers = new ArrayList<>(); ApacheInvoker<Object> invoker1 = new ApacheInvoker<>("1.0.0", "az1"); invokers.add(invoker1); ApacheInvoker<Object> invoker2 = new ApacheInvoker<>("1.0.1", "az2"); invokers.add(invoker2); Invocation invocation = new ApacheInvocation(); Map<String, String> queryMap = new HashMap<>(); queryMap.put("zone", "az1"); queryMap.put("interface", "io.sermant.foo.FooTest"); Map<String, String> parameters = new HashMap<>(); parameters.putIfAbsent(RouterConstant.META_ZONE_KEY, "az1"); DubboCache.INSTANCE.setParameters(parameters); DubboCache.INSTANCE.putApplication("io.sermant.foo.FooTest", "foo"); List<Object> targetInvokers = (List<Object>) tagRouteHandler.handle( DubboCache.INSTANCE.getApplication("io.sermant.foo.FooTest") , invokers, invocation, queryMap, "io.sermant.foo.FooTest"); Assert.assertEquals(2, targetInvokers.size()); // Scenario 2: The downstream provider does not have instances that meet the requirements List<Object> invokers2 = new ArrayList<>(); ApacheInvoker<Object> invoker3 = new ApacheInvoker<>("1.0.0", "az2"); invokers2.add(invoker3); ApacheInvoker<Object> invoker4 = new ApacheInvoker<>("1.0.1", "az3"); invokers2.add(invoker4); targetInvokers = (List<Object>) tagRouteHandler.handle( DubboCache.INSTANCE.getApplication("io.sermant.foo.FooTest") , invokers2, invocation, queryMap, "io.sermant.foo.FooTest"); Assert.assertEquals(2, targetInvokers.size()); ConfigCache.getLabel(RouterConstant.DUBBO_CACHE_NAME).resetRouteRule(Collections.emptyMap()); }
@Override public void validateAction( RepositoryOperation... operations ) throws KettleException { for ( RepositoryOperation operation : operations ) { switch ( operation ) { case EXECUTE_TRANSFORMATION: case EXECUTE_JOB: checkOperationAllowed( EXECUTE_CONTENT_ACTION ); break; case MODIFY_TRANSFORMATION: case MODIFY_JOB: checkOperationAllowed( CREATE_CONTENT_ACTION ); break; case SCHEDULE_TRANSFORMATION: case SCHEDULE_JOB: checkOperationAllowed( SCHEDULE_CONTENT_ACTION ); break; case MODIFY_DATABASE: checkOperationAllowed( MODIFY_DATABASE_ACTION ); break; case SCHEDULER_EXECUTE: checkOperationAllowed( SCHEDULER_EXECUTE_ACTION ); break; } } }
@Test public void noExceptionThrown_WhenOperationIsAllowed_CreateOperation() throws Exception { setOperationPermissions( IAbsSecurityProvider.SCHEDULE_CONTENT_ACTION, true ); provider.validateAction( RepositoryOperation.SCHEDULE_TRANSFORMATION ); }
public static <T> Bounded<T> from(BoundedSource<T> source) { return new Bounded<>(null, source); }
@Test public void testDisplayData() { SerializableBoundedSource boundedSource = new SerializableBoundedSource() { @Override public void populateDisplayData(DisplayData.Builder builder) { builder.add(DisplayData.item("foo", "bar")); } }; SerializableUnboundedSource unboundedSource = new SerializableUnboundedSource() { @Override public void populateDisplayData(DisplayData.Builder builder) { builder.add(DisplayData.item("foo", "bar")); } }; Duration maxReadTime = Duration.standardMinutes(2345); Read.Bounded<String> bounded = Read.from(boundedSource); BoundedReadFromUnboundedSource<String> unbounded = Read.from(unboundedSource).withMaxNumRecords(1234).withMaxReadTime(maxReadTime); DisplayData boundedDisplayData = DisplayData.from(bounded); assertThat(boundedDisplayData, hasDisplayItem("source", boundedSource.getClass())); assertThat(boundedDisplayData, includesDisplayDataFor("source", boundedSource)); DisplayData unboundedDisplayData = DisplayData.from(unbounded); assertThat(unboundedDisplayData, hasDisplayItem("source", unboundedSource.getClass())); assertThat(unboundedDisplayData, includesDisplayDataFor("source", unboundedSource)); assertThat(unboundedDisplayData, hasDisplayItem("maxRecords", 1234)); assertThat(unboundedDisplayData, hasDisplayItem("maxReadTime", maxReadTime)); }
public OpenTelemetry getOpenTelemetry() { return openTelemetrySdkReference.get(); }
@Test public void testServiceIsDisabledByDefault() throws Exception { @Cleanup var metricReader = InMemoryMetricReader.create(); @Cleanup var ots = OpenTelemetryService.builder() .builderCustomizer(getBuilderCustomizer(metricReader, Map.of())) .clusterName("openTelemetryServiceTestCluster") .build(); var meter = ots.getOpenTelemetry().getMeter("openTelemetryServiceTestInstrument"); var builders = List.of( meter.counterBuilder("dummyCounterA"), meter.counterBuilder("dummyCounterB").setDescription("desc"), meter.counterBuilder("dummyCounterC").setDescription("desc").setUnit("unit"), meter.counterBuilder("dummyCounterD").setUnit("unit") ); var callback = new AtomicBoolean(); // Validate that no matter how the counters are being built, they are all backed by the same underlying object. // This ensures we conserve memory when the SDK is disabled. assertThat(builders.stream().map(LongCounterBuilder::build).distinct()).hasSize(1); assertThat(builders.stream().map(LongCounterBuilder::buildObserver).distinct()).hasSize(1); assertThat(builders.stream().map(b -> b.buildWithCallback(__ -> callback.set(true))).distinct()).hasSize(1); // Validate that no metrics are being emitted at all. assertThat(metricReader.collectAllMetrics()).isEmpty(); // Validate that the callback has not being called. assertThat(callback).isFalse(); }
@Override public UserCredentials findByUserId(TenantId tenantId, UUID userId) { return DaoUtil.getData(userCredentialsRepository.findByUserId(userId)); }
@Test public void testFindByUserId() { UserCredentials foundedUserCredentials = userCredentialsDao.findByUserId(SYSTEM_TENANT_ID, neededUserCredentials.getUserId().getId()); assertNotNull(foundedUserCredentials); assertEquals(neededUserCredentials, foundedUserCredentials); }
@Override public Iterator<QueryableEntry> iterator() { return new It(); }
@Test public void contains_matchingPredicate_inOtherResult() { Set<QueryableEntry> entries = generateEntries(100000); Set<QueryableEntry> otherIndexResult = new HashSet<>(); otherIndexResult.add(entries.iterator().next()); List<Set<QueryableEntry>> otherIndexedResults = new ArrayList<>(); otherIndexedResults.add(otherIndexResult); AndResultSet resultSet = new AndResultSet(entries, otherIndexedResults, asList(Predicates.alwaysTrue())); Iterator<QueryableEntry> it = entries.iterator(); assertContains(resultSet, it.next()); while (it.hasNext()) { assertNotContains(resultSet, it.next()); } }
public List<DataRecord> merge(final List<DataRecord> dataRecords) { Map<DataRecord.Key, DataRecord> result = new HashMap<>(); dataRecords.forEach(each -> { if (PipelineSQLOperationType.INSERT == each.getType()) { mergeInsert(each, result); } else if (PipelineSQLOperationType.UPDATE == each.getType()) { mergeUpdate(each, result); } else if (PipelineSQLOperationType.DELETE == each.getType()) { mergeDelete(each, result); } }); return new ArrayList<>(result.values()); }
@Test void assertUpdatePrimaryKeyBeforeUpdatePrimaryKey() { DataRecord beforeDataRecord = mockUpdateDataRecord(1, 2, 10, 50); DataRecord afterDataRecord = mockUpdateDataRecord(2, 3, 10, 50); Collection<DataRecord> actual = groupEngine.merge(Arrays.asList(beforeDataRecord, afterDataRecord)); assertThat(actual.size(), is(1)); DataRecord dataRecord = actual.iterator().next(); assertThat(dataRecord.getType(), is(PipelineSQLOperationType.UPDATE)); assertThat(dataRecord.getTableName(), is("order")); assertThat(dataRecord.getActualTableName(), is("order_0")); assertThat(dataRecord.getCommitTime(), is(456L)); assertColumnsMatched(dataRecord.getColumn(0), new Column("id", 1, 3, true, true)); assertColumnsMatched(dataRecord.getColumn(1), new Column("user_id", 10, 10, false, false)); assertColumnsMatched(dataRecord.getColumn(2), new Column("total_price", 50, 50, false, false)); }
@Audit @Operation(summary = "login", description = "User Login") @PostMapping(value = "/login") public ResponseEntity<LoginVO> login(@RequestBody LoginReq loginReq) { if (!StringUtils.hasText(loginReq.getUsername()) || !StringUtils.hasText(loginReq.getPassword())) { throw new ApiException(ApiExceptionEnum.USERNAME_OR_PASSWORD_REQUIRED); } LoginDTO loginDTO = LoginConverter.INSTANCE.fromReq2DTO(loginReq); return ResponseEntity.success(loginService.login(loginDTO)); }
@Test void loginThrowsExceptionForMissingUsernameAndPassword() { LoginReq loginReq = new LoginReq(); loginReq.setUsername(""); loginReq.setPassword(""); ApiException exception = assertThrows(ApiException.class, () -> loginController.login(loginReq)); assertEquals(ApiExceptionEnum.USERNAME_OR_PASSWORD_REQUIRED, exception.getEx()); }
public static WhereSegment bind(final WhereSegment segment, final SQLStatementBinderContext binderContext, final Map<String, TableSegmentBinderContext> tableBinderContexts, final Map<String, TableSegmentBinderContext> outerTableBinderContexts) { return new WhereSegment(segment.getStartIndex(), segment.getStopIndex(), ExpressionSegmentBinder.bind(segment.getExpr(), SegmentType.PREDICATE, binderContext, tableBinderContexts, outerTableBinderContexts)); }
@Test void assertBind() { SQLStatementBinderContext sqlStatementBinderContext = mock(SQLStatementBinderContext.class); WhereSegment expectedWhereSegment = new WhereSegment(1, 2, mock(ExpressionSegment.class)); Map<String, TableSegmentBinderContext> tableBinderContexts = new HashMap<>(); Map<String, TableSegmentBinderContext> outerTableBinderContexts = new HashMap<>(); WhereSegment actualWhereSegment = WhereSegmentBinder.bind(expectedWhereSegment, sqlStatementBinderContext, tableBinderContexts, outerTableBinderContexts); assertThat(actualWhereSegment.getStopIndex(), is(expectedWhereSegment.getStopIndex())); assertThat(actualWhereSegment.getStartIndex(), is(expectedWhereSegment.getStartIndex())); assertThat(actualWhereSegment.getExpr(), is(expectedWhereSegment.getExpr())); }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testLoadBalancerSheddingConditionHitCountThreshold() { UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); int max = 3; ctx.brokerConfiguration() .setLoadBalancerSheddingConditionHitCountThreshold(max); for (int i = 0; i < max; i++) { var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); assertEquals(counter.getBreakdownCounters().get(Skip).get(HitCount).get(), i+1); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); } var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet<UnloadDecision>(); expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); }
@Override public ServiceInfo subscribe(String serviceName, String groupName, String clusters) throws NacosException { throw new UnsupportedOperationException("Do not support subscribe service by UDP, please use gRPC replaced."); }
@Test void testSubscribe() throws Exception { assertThrows(UnsupportedOperationException.class, () -> { String groupName = "group1"; String serviceName = "serviceName"; String clusters = "clusters"; //when clientProxy.subscribe(serviceName, groupName, clusters); }); }
@Override public void process() { JMeterContext context = getThreadContext(); Sampler sam = context.getCurrentSampler(); SampleResult res = context.getPreviousResult(); HTTPSamplerBase sampler; HTTPSampleResult result; if (!(sam instanceof HTTPSamplerBase) || !(res instanceof HTTPSampleResult)) { log.info("Can't apply HTML Link Parser when the previous" + " sampler run is not an HTTP Request."); return; } else { sampler = (HTTPSamplerBase) sam; result = (HTTPSampleResult) res; } List<HTTPSamplerBase> potentialLinks = new ArrayList<>(); String responseText = result.getResponseDataAsString(); int index = responseText.indexOf('<'); // $NON-NLS-1$ if (index == -1) { index = 0; } if (log.isDebugEnabled()) { log.debug("Check for matches against: "+sampler.toString()); } Document html = (Document) HtmlParsingUtils.getDOM(responseText.substring(index)); addAnchorUrls(html, result, sampler, potentialLinks); addFormUrls(html, result, sampler, potentialLinks); addFramesetUrls(html, result, sampler, potentialLinks); if (!potentialLinks.isEmpty()) { HTTPSamplerBase url = potentialLinks.get(ThreadLocalRandom.current().nextInt(potentialLinks.size())); if (log.isDebugEnabled()) { log.debug("Selected: "+url.toString()); } sampler.setDomain(url.getDomain()); sampler.setPath(url.getPath()); if (url.getMethod().equals(HTTPConstants.POST)) { for (JMeterProperty jMeterProperty : sampler.getArguments()) { Argument arg = (Argument) jMeterProperty.getObjectValue(); modifyArgument(arg, url.getArguments()); } } else { sampler.setArguments(url.getArguments()); } sampler.setProtocol(url.getProtocol()); } else { log.debug("No matches found"); } }
@Test public void testNullResult() throws Exception { jmctx.setCurrentSampler(makeContext("http://www.apache.org/subdir/previous.html")); jmctx.setPreviousResult(null); parser.process(); // should do nothing }
public boolean isValidatedPath(final String path) { return pathPattern.matcher(path).find(); }
@Test void assertIsNotValidatedPath() { assertFalse(nodePath.isValidatedPath("/metadata/foo_db/rules/bar/tables/foo_table")); }
public CustomToggle turnOff(String ability) { baseMapping.put(ability, OFF); return this; }
@Test public void canTurnOffAbilities() { toggle = new CustomToggle().turnOff(DefaultAbilities.CLAIM); customBot = new DefaultBot(null, EMPTY, db, toggle); customBot.onRegister(); assertFalse(customBot.getAbilities().containsKey(DefaultAbilities.CLAIM)); }
public static String findAddress(List<NodeAddress> addresses, NodeAddressType preferredAddressType) { if (addresses == null) { return null; } Map<String, String> addressMap = addresses.stream() .collect(Collectors.toMap(NodeAddress::getType, NodeAddress::getAddress, (address1, address2) -> { LOGGER.warnOp("Found multiple addresses with the same type. Only the first address '{}' will be used.", address1); return address1; })); // If user set preferred address type, we should check it first if (preferredAddressType != null && addressMap.containsKey(preferredAddressType.toValue())) { return addressMap.get(preferredAddressType.toValue()); } if (addressMap.containsKey("ExternalDNS")) { return addressMap.get("ExternalDNS"); } else if (addressMap.containsKey("ExternalIP")) { return addressMap.get("ExternalIP"); } else if (addressMap.containsKey("InternalDNS")) { return addressMap.get("InternalDNS"); } else if (addressMap.containsKey("InternalIP")) { return addressMap.get("InternalIP"); } else if (addressMap.containsKey("Hostname")) { return addressMap.get("Hostname"); } return null; }
@Test public void testFindAddressesNull() { List<NodeAddress> addresses = null; String address = NodeUtils.findAddress(addresses, null); assertThat(address, is(CoreMatchers.nullValue())); }
public void writeInt1(final int value) { byteBuf.writeByte(value); }
@Test void assertWriteInt1() { new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).writeInt1(1); verify(byteBuf).writeByte(1); }
P current() { return current; }
@Test void shouldResultNullWhenCurrentCalledWithoutNext() { assertNull(publicationGroup.current()); }
@Override public Set<Path> getPaths(ElementId src, ElementId dst, LinkWeigher weigher) { checkNotNull(src, ELEMENT_ID_NULL); checkNotNull(dst, ELEMENT_ID_NULL); LinkWeigher internalWeigher = weigher != null ? weigher : DEFAULT_WEIGHER; // Get the source and destination edge locations EdgeLink srcEdge = getEdgeLink(src, true); EdgeLink dstEdge = getEdgeLink(dst, false); // If either edge is null, bail with no paths. if (srcEdge == null || dstEdge == null) { return ImmutableSet.of(); } DeviceId srcDevice = srcEdge != NOT_HOST ? srcEdge.dst().deviceId() : (DeviceId) src; DeviceId dstDevice = dstEdge != NOT_HOST ? dstEdge.src().deviceId() : (DeviceId) dst; // If the source and destination are on the same edge device, there // is just one path, so build it and return it. if (srcDevice.equals(dstDevice)) { return edgeToEdgePaths(srcEdge, dstEdge, internalWeigher); } // Otherwise get all paths between the source and destination edge // devices. Topology topology = topologyService.currentTopology(); Set<Path> paths = topologyService.getPaths(topology, srcDevice, dstDevice, internalWeigher); return edgeToEdgePaths(srcEdge, dstEdge, paths, internalWeigher); }
@Test public void testSelfPaths() { HostId host = hid("12:34:56:78:90:ab/1"); Set<Path> paths = service.getPaths(host, host, new TestWeigher()); assertThat(paths, hasSize(1)); Path path = paths.iterator().next(); assertThat(path, not(nullValue())); assertThat(path.links(), hasSize(2)); Link link1 = path.links().get(0); Link link2 = path.links().get(1); assertThat(link1.src(), is(link2.dst())); assertThat(link2.src(), is(link1.dst())); assertThat(link1.src().hostId(), is(host)); assertThat(link2.dst().hostId(), is(host)); }
@Override public String getName() { return _name; }
@Test public void testShaTransformFunction() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("sha(%s)", BYTES_SV_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getName(), "sha"); String[] expectedValues = new String[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = DigestUtils.shaHex(_bytesSVValues[i]); } testTransformFunction(transformFunction, expectedValues); }
public V put(final int key, final V value) { final Entry<V>[] table = this.table; final int index = HashUtil.indexFor(key, table.length, mask); for (Entry<V> e = table[index]; e != null; e = e.hashNext) { if (e.key == key) { moveToTop(e); return e.setValue(value); } } final Entry<V> e = new Entry<>(key, value); e.hashNext = table[index]; table[index] = e; final Entry<V> top = this.top; e.next = top; if (top != null) { top.previous = e; } else { back = e; } this.top = e; _size += 1; if (removeEldestEntry(back)) { remove(back.key); } else if (_size > capacity) { rehash(HashUtil.nextCapacity(capacity)); } return null; }
@Test public void forEachProcedure() { final IntLinkedHashMap<String> tested = new IntLinkedHashMap<>(); for (int i = 0; i < 100000; ++i) { tested.put(i, Integer.toString(i)); } final int[] ii = {0}; tested.forEachKey(object -> { ii[0]++; return true; }); tested.forEachValue(object -> { ii[0]++; return true; }); Assert.assertEquals(tested.size() * 2, ii[0]); ii[0] = 0; tested.forEachKey(object -> { ii[0]++; return object > 99500; }); tested.forEachValue(object -> { ii[0]++; return true; }); Assert.assertEquals(tested.size() + 500, ii[0]); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) { return aggregate(initializer, Materialized.with(null, null)); }
@Test public void shouldNotHaveNullNamedTwoOptionOnAggregate() { assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(MockInitializer.STRING_INIT, (Named) null)); }
@Override public void loadGlue(Glue glue, List<URI> gluePaths) { GlueAdaptor glueAdaptor = new GlueAdaptor(lookup, glue); gluePaths.stream() .filter(gluePath -> CLASSPATH_SCHEME.equals(gluePath.getScheme())) .map(ClasspathSupport::packageName) .map(classFinder::scanForClassesInPackage) .flatMap(Collection::stream) .distinct() .forEach(aGlueClass -> scan(aGlueClass, (method, annotation) -> { container.addClass(method.getDeclaringClass()); glueAdaptor.addDefinition(method, annotation); })); }
@Test void detects_subclassed_glue_and_throws_exception() { Executable testMethod = () -> backend.loadGlue(glue, asList(URI.create("classpath:io/cucumber/java/steps"), URI.create("classpath:io/cucumber/java/incorrectlysubclassedsteps"))); InvalidMethodException expectedThrown = assertThrows(InvalidMethodException.class, testMethod); assertThat(expectedThrown.getMessage(), is(equalTo( "You're not allowed to extend classes that define Step Definitions or hooks. class io.cucumber.java.incorrectlysubclassedsteps.SubclassesSteps extends class io.cucumber.java.steps.Steps"))); }
@Override public ExecuteContext doAfter(ExecuteContext context) { if (isHasMethodLoadSpringFactories()) { // 仅当在高版本采用LoadSpringFactories的方式注入, 高版本存在缓存会更加高效, 仅需注入一次 if (IS_INJECTED.compareAndSet(false, true)) { injectConfigurations(context.getResult()); } } else { final Object rawFactoryType = context.getArguments()[0]; if (rawFactoryType instanceof Class) { final Class<?> factoryType = (Class<?>) rawFactoryType; injectConfigurationsWithLowVersion(context.getResult(), factoryType.getName()); } } return context; }
@Test public void doAfterLowVersion() throws NoSuchMethodException, IllegalAccessException { // lowVersionTesting final SpringFactoriesInterceptor lowVersionInterceptor = new SpringFactoriesInterceptor(); hasMethodLoadSpringFactoriesFiled.set(lowVersionInterceptor, Boolean.FALSE); ExecuteContext executeContext = ExecuteContext.forMemberMethod(this, this.getClass().getMethod("doAfterLowVersion"), new Object[]{org.springframework.boot.autoconfigure.EnableAutoConfiguration.class }, null, null); final List<String> lowResult = new ArrayList<>(); executeContext.changeResult(lowResult); executeContext = lowVersionInterceptor.doAfter(executeContext); executeContext.changeArgs(new Object[]{org.springframework.cloud.bootstrap.BootstrapConfiguration.class }); executeContext = lowVersionInterceptor.doAfter(executeContext); final List<String> injectResult = (List<String>) executeContext.getResult(); Assert.assertTrue(injectResult.contains(PROPERTY_LOCATOR_CLASS) && injectResult.contains(EVENT_PUBLISHER_CLASS)); }