name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_SchemaAdapter_tableNames | /**
* @see org.alfasoftware.morf.metadata.Schema#tableNames()
*/
@Override
public Collection<String> tableNames() {
return delegate.tableNames();
} | 3.68 |
hbase_AccessController_prePrepareBulkLoad | /**
* Authorization check for SecureBulkLoadProtocol.prepareBulkLoad()
* @param ctx the context
*/
@Override
public void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
requireAccess(ctx, "prePrepareBulkLoad",
ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN,
Action.CREATE);
} | 3.68 |
flink_RoundRobinOperatorStateRepartitioner_groupByStateMode | /** Group by the different named states. */
@SuppressWarnings("unchecked, rawtype")
private GroupByStateNameResults groupByStateMode(
List<List<OperatorStateHandle>> previousParallelSubtaskStates) {
// Reorganize: group by (State Name -> StreamStateHandle + StateMetaInfo)
EnumMap<
OperatorStateHandle.Mode,
Map<
String,
List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>>
nameToStateByMode = new EnumMap<>(OperatorStateHandle.Mode.class);
for (OperatorStateHandle.Mode mode : OperatorStateHandle.Mode.values()) {
nameToStateByMode.put(mode, new HashMap<>());
}
for (List<OperatorStateHandle> previousParallelSubtaskState :
previousParallelSubtaskStates) {
for (OperatorStateHandle operatorStateHandle : previousParallelSubtaskState) {
if (operatorStateHandle == null) {
continue;
}
final Set<Map.Entry<String, OperatorStateHandle.StateMetaInfo>>
partitionOffsetEntries =
operatorStateHandle.getStateNameToPartitionOffsets().entrySet();
for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> e :
partitionOffsetEntries) {
OperatorStateHandle.StateMetaInfo metaInfo = e.getValue();
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>
nameToState = nameToStateByMode.get(metaInfo.getDistributionMode());
List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>
stateLocations =
nameToState.computeIfAbsent(
e.getKey(),
k ->
new ArrayList<>(
previousParallelSubtaskStates.size()
* partitionOffsetEntries
.size()));
stateLocations.add(
Tuple2.of(operatorStateHandle.getDelegateStateHandle(), e.getValue()));
}
}
}
return new GroupByStateNameResults(nameToStateByMode);
} | 3.68 |
dubbo_DubboAutoConfiguration_serviceAnnotationBeanProcessor | /**
* Creates {@link ServiceAnnotationPostProcessor} Bean
*
* @param packagesToScan the packages to scan
* @return {@link ServiceAnnotationPostProcessor}
*/
@ConditionalOnProperty(prefix = DUBBO_SCAN_PREFIX, name = BASE_PACKAGES_PROPERTY_NAME)
@ConditionalOnBean(name = BASE_PACKAGES_BEAN_NAME)
@Bean
public ServiceAnnotationPostProcessor serviceAnnotationBeanProcessor(
@Qualifier(BASE_PACKAGES_BEAN_NAME) Set<String> packagesToScan) {
return new ServiceAnnotationPostProcessor(packagesToScan);
} | 3.68 |
flink_AbstractStreamOperatorV2_open | /**
* This method is called immediately before any elements are processed, it should contain the
* operator's initialization logic, e.g. state initialization.
*
* <p>The default implementation does nothing.
*
* @throws Exception An exception in this method causes the operator to fail.
*/
@Override
public void open() throws Exception {} | 3.68 |
flink_HiveParserUtils_isNative | // TODO: we need a way to tell whether a function is built-in, for now just return false so that
// the unparser will quote them
public static boolean isNative(SqlOperator sqlOperator) {
return false;
} | 3.68 |
dubbo_DubboMergingDigest_recordAllData | /**
* Turns on internal data recording.
*/
@Override
public TDigest recordAllData() {
super.recordAllData();
data = new ArrayList<>();
tempData = new ArrayList<>();
return this;
} | 3.68 |
hbase_SnapshotReferenceUtil_visitRegionStoreFiles | /**
* Iterate over the snapshot store files in the specified region
* @param manifest snapshot manifest to inspect
* @param visitor callback object to get the store files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitRegionStoreFiles(final SnapshotRegionManifest manifest,
final StoreFileVisitor visitor) throws IOException {
RegionInfo regionInfo = ProtobufUtil.toRegionInfo(manifest.getRegionInfo());
for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) {
String familyName = familyFiles.getFamilyName().toStringUtf8();
for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) {
visitor.storeFile(regionInfo, familyName, storeFile);
}
}
} | 3.68 |
dubbo_MetricsSupport_increment | /**
* Incr method num
*/
public static void increment(
MetricsKey metricsKey,
MetricsPlaceValue placeType,
MethodMetricsCollector<TimeCounterEvent> collector,
MetricsEvent event) {
collector.increment(
event.getAttachmentValue(METHOD_METRICS),
new MetricsKeyWrapper(metricsKey, placeType),
SELF_INCREMENT_SIZE);
} | 3.68 |
morf_ResultSetComparer_valueCheck | /**
* Produces a mismatch if the specified column index mismatches.
*/
@SuppressWarnings("rawtypes")
private Optional<ResultSetMismatch> valueCheck(ResultSet left, ResultSet right, String[] keys, int i, int columnType, MismatchType checkForMismatchType) throws SQLException {
Comparable leftValue;
Comparable rightValue;
switch(checkForMismatchType) {
case MISMATCH:
leftValue = columnToValue(left, i, columnType);
rightValue = columnToValue(right, i, columnType);
return compareColumnValue(leftValue, rightValue, keys, i, columnType, checkForMismatchType);
case MISSING_LEFT:
rightValue = columnToValue(right, i, columnType);
return Optional.of(new ResultSetMismatch(
MISSING_LEFT, i,
RECORD_NOT_PRESENT, valueToString(rightValue, columnType),
keys
));
case MISSING_RIGHT:
leftValue = columnToValue(left, i, columnType);
return Optional.of(new ResultSetMismatch(
MISSING_RIGHT, i,
valueToString(leftValue, columnType), RECORD_NOT_PRESENT,
keys
));
default:
throw new IllegalStateException("Unknown mismatch type");
}
} | 3.68 |
Activiti_DelegateInvocation_proceed | /**
* make the invocation proceed, performing the actual invocation of the user code.
*
* @throws Exception
* the exception thrown by the user code
*/
public void proceed() {
invoke();
} | 3.68 |
hadoop_ReadBufferManager_getBlock | /**
* {@link AbfsInputStream} calls this method read any bytes already available in a buffer (thereby saving a
* remote read). This returns the bytes if the data already exists in buffer. If there is a buffer that is reading
* the requested offset, then this method blocks until that read completes. If the data is queued in a read-ahead
* but not picked up by a worker thread yet, then it cancels that read-ahead and reports cache miss. This is because
* depending on worker thread availability, the read-ahead may take a while - the calling thread can do it's own
* read to get the data faster (copmared to the read waiting in queue for an indeterminate amount of time).
*
* @param stream the file to read bytes for
* @param position the offset in the file to do a read for
* @param length the length to read
* @param buffer the buffer to read data into. Note that the buffer will be written into from offset 0.
* @return the number of bytes read
*/
int getBlock(final AbfsInputStream stream, final long position, final int length, final byte[] buffer)
throws IOException {
// not synchronized, so have to be careful with locking
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("getBlock for file {} position {} thread {}",
stream.getPath(), position, Thread.currentThread().getName());
}
waitForProcess(stream, position);
int bytesRead = 0;
synchronized (this) {
bytesRead = getBlockFromCompletedQueue(stream, position, length, buffer);
}
if (bytesRead > 0) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Done read from Cache for {} position {} length {}",
stream.getPath(), position, bytesRead);
}
return bytesRead;
}
// otherwise, just say we got nothing - calling thread can do its own read
return 0;
} | 3.68 |
flink_FactoryUtil_discoverOptionalDecodingFormat | /**
* Discovers a {@link DecodingFormat} of the given type using the given option (if present)
* as factory identifier.
*/
public <I, F extends DecodingFormatFactory<I>>
Optional<DecodingFormat<I>> discoverOptionalDecodingFormat(
Class<F> formatFactoryClass, ConfigOption<String> formatOption) {
return discoverOptionalFormatFactory(formatFactoryClass, formatOption)
.map(
formatFactory -> {
String formatPrefix = formatPrefix(formatFactory, formatOption);
try {
return formatFactory.createDecodingFormat(
context,
createFormatOptions(formatPrefix, formatFactory));
} catch (Throwable t) {
throw new ValidationException(
String.format(
"Error creating scan format '%s' in option space '%s'.",
formatFactory.factoryIdentifier(),
formatPrefix),
t);
}
});
} | 3.68 |
morf_AbstractSelectStatementBuilder_getOrderBys | /**
* Gets the fields which the select is ordered by
*
* @return the order by fields
*/
List<AliasedField> getOrderBys() {
return orderBys;
} | 3.68 |
pulsar_SecurityUtil_loginKerberos | /**
* Initializes UserGroupInformation with the given Configuration and performs the login for the
* given principal and keytab. All logins should happen through this class to ensure other threads
* are not concurrently modifying UserGroupInformation.
* <p/>
* @param config the configuration instance
* @param principal the principal to authenticate as
* @param keyTab the keytab to authenticate with
*
* @return the UGI for the given principal
*
* @throws IOException if login failed
*/
public static synchronized UserGroupInformation loginKerberos(final Configuration config,
final String principal, final String keyTab) throws IOException {
Validate.notNull(config);
Validate.notNull(principal);
Validate.notNull(keyTab);
UserGroupInformation.setConfiguration(config);
UserGroupInformation.loginUserFromKeytab(principal.trim(), keyTab.trim());
return UserGroupInformation.getCurrentUser();
} | 3.68 |
morf_NamedParameterPreparedStatement_setQueryTimeout | /**
* Sets the timeout in <b>seconds</b> after which the query will time out on
* database side. In such case JDBC driver will throw an exception which is
* specific to database implementation but is likely to extend
* {@link SQLTimeoutException}.
*
* @param queryTimeout timeout in <b>seconds</b>
* @exception SQLException if an error occurs when setting the timeout
*/
public void setQueryTimeout(Integer queryTimeout) throws SQLException {
statement.setQueryTimeout(queryTimeout);
} | 3.68 |
flink_HiveParserCalcitePlanner_genDistSortBy | // Generate plan for sort by, cluster by and distribute by. This is basically same as generating
// order by plan.
// Should refactor to combine them.
private Pair<RelNode, RelNode> genDistSortBy(
HiveParserQB qb, RelNode srcRel, boolean outermostOB) throws SemanticException {
RelNode res = null;
RelNode originalInput = null;
HiveParserQBParseInfo qbp = qb.getParseInfo();
String destClause = qbp.getClauseNames().iterator().next();
HiveParserASTNode sortAST = qbp.getSortByForClause(destClause);
HiveParserASTNode distAST = qbp.getDistributeByForClause(destClause);
HiveParserASTNode clusterAST = qbp.getClusterByForClause(destClause);
if (sortAST != null || distAST != null || clusterAST != null) {
List<RexNode> virtualCols = new ArrayList<>();
List<Pair<HiveParserASTNode, TypeInfo>> vcASTAndType = new ArrayList<>();
List<RelFieldCollation> fieldCollations = new ArrayList<>();
List<Integer> distKeys = new ArrayList<>();
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
HiveParserRexNodeConverter converter =
new HiveParserRexNodeConverter(
cluster,
srcRel.getRowType(),
relToHiveColNameCalcitePosMap.get(srcRel),
0,
false,
funcConverter);
int numSrcFields = srcRel.getRowType().getFieldCount();
// handle cluster by
if (clusterAST != null) {
if (sortAST != null) {
throw new SemanticException("Cannot have both CLUSTER BY and SORT BY");
}
if (distAST != null) {
throw new SemanticException("Cannot have both CLUSTER BY and DISTRIBUTE BY");
}
for (Node node : clusterAST.getChildren()) {
HiveParserASTNode childAST = (HiveParserASTNode) node;
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc =
semanticAnalyzer.genAllExprNodeDesc(childAST, inputRR);
ExprNodeDesc childNodeDesc = astToExprNodeDesc.get(childAST);
if (childNodeDesc == null) {
throw new SemanticException(
"Invalid CLUSTER BY expression: " + childAST.toString());
}
RexNode childRexNode = converter.convert(childNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) childRexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, childNodeDesc.getTypeInfo()));
}
// cluster by doesn't support specifying ASC/DESC or NULLS FIRST/LAST, so use
// default values
fieldCollations.add(
new RelFieldCollation(
fieldIndex,
RelFieldCollation.Direction.ASCENDING,
RelFieldCollation.NullDirection.FIRST));
distKeys.add(fieldIndex);
}
} else {
// handle sort by
if (sortAST != null) {
for (Node node : sortAST.getChildren()) {
HiveParserASTNode childAST = (HiveParserASTNode) node;
HiveParserASTNode nullOrderAST = (HiveParserASTNode) childAST.getChild(0);
HiveParserASTNode fieldAST = (HiveParserASTNode) nullOrderAST.getChild(0);
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc =
semanticAnalyzer.genAllExprNodeDesc(fieldAST, inputRR);
ExprNodeDesc fieldNodeDesc = astToExprNodeDesc.get(fieldAST);
if (fieldNodeDesc == null) {
throw new SemanticException(
"Invalid sort by expression: " + fieldAST.toString());
}
RexNode childRexNode =
converter.convert(fieldNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) childRexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, fieldNodeDesc.getTypeInfo()));
}
RelFieldCollation.Direction direction =
RelFieldCollation.Direction.DESCENDING;
if (childAST.getType() == HiveASTParser.TOK_TABSORTCOLNAMEASC) {
direction = RelFieldCollation.Direction.ASCENDING;
}
RelFieldCollation.NullDirection nullOrder;
if (nullOrderAST.getType() == HiveASTParser.TOK_NULLS_FIRST) {
nullOrder = RelFieldCollation.NullDirection.FIRST;
} else if (nullOrderAST.getType() == HiveASTParser.TOK_NULLS_LAST) {
nullOrder = RelFieldCollation.NullDirection.LAST;
} else {
throw new SemanticException(
"Unexpected null ordering option: " + nullOrderAST.getType());
}
fieldCollations.add(
new RelFieldCollation(fieldIndex, direction, nullOrder));
}
}
// handle distribute by
if (distAST != null) {
for (Node node : distAST.getChildren()) {
HiveParserASTNode childAST = (HiveParserASTNode) node;
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc =
semanticAnalyzer.genAllExprNodeDesc(childAST, inputRR);
ExprNodeDesc childNodeDesc = astToExprNodeDesc.get(childAST);
if (childNodeDesc == null) {
throw new SemanticException(
"Invalid DISTRIBUTE BY expression: " + childAST.toString());
}
RexNode childRexNode =
converter.convert(childNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) childRexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, childNodeDesc.getTypeInfo()));
}
distKeys.add(fieldIndex);
}
}
}
Preconditions.checkState(
!fieldCollations.isEmpty() || !distKeys.isEmpty(),
"Both field collations and dist keys are empty");
// add child SEL if needed
RelNode realInput = srcRel;
HiveParserRowResolver outputRR = new HiveParserRowResolver();
if (!virtualCols.isEmpty()) {
List<RexNode> originalInputRefs =
srcRel.getRowType().getFieldList().stream()
.map(input -> new RexInputRef(input.getIndex(), input.getType()))
.collect(Collectors.toList());
HiveParserRowResolver addedProjectRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(addedProjectRR, inputRR)) {
throw new SemanticException(
"Duplicates detected when adding columns to RR: see previous message");
}
int vColPos = inputRR.getRowSchema().getSignature().size();
for (Pair<HiveParserASTNode, TypeInfo> astTypePair : vcASTAndType) {
addedProjectRR.putExpression(
astTypePair.getKey(),
new ColumnInfo(
getColumnInternalName(vColPos),
astTypePair.getValue(),
null,
false));
vColPos++;
}
realInput =
genSelectRelNode(
CompositeList.of(originalInputRefs, virtualCols),
addedProjectRR,
srcRel);
if (outermostOB) {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException(
"Duplicates detected when adding columns to RR: see previous message");
}
} else {
if (!HiveParserRowResolver.add(outputRR, addedProjectRR)) {
throw new SemanticException(
"Duplicates detected when adding columns to RR: see previous message");
}
}
originalInput = srcRel;
} else {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException(
"Duplicates detected when adding columns to RR: see previous message");
}
}
// create rel node
RelTraitSet traitSet = cluster.traitSet();
RelCollation canonizedCollation = traitSet.canonize(RelCollations.of(fieldCollations));
res = LogicalDistribution.create(realInput, canonizedCollation, distKeys);
Map<String, Integer> hiveColNameCalcitePosMap = buildHiveToCalciteColumnMap(outputRR);
relToRowResolver.put(res, outputRR);
relToHiveColNameCalcitePosMap.put(res, hiveColNameCalcitePosMap);
}
return (new Pair<>(res, originalInput));
} | 3.68 |
hadoop_AzureBlobFileSystem_trailingPeriodCheck | /**
* Performs a check for (.) until root in the path to throw an exception.
* The purpose is to differentiate between dir/dir1 and dir/dir1.
* Without the exception the behavior seen is dir1. will appear
* to be present without it's actual creation as dir/dir1 and dir/dir1. are
* treated as identical.
* @param path the path to be checked for trailing period (.)
* @throws IllegalArgumentException if the path has a trailing period (.)
*/
private void trailingPeriodCheck(Path path) throws IllegalArgumentException {
while (!path.isRoot()) {
String pathToString = path.toString();
if (pathToString.length() != 0) {
if (pathToString.charAt(pathToString.length() - 1) == '.') {
throw new IllegalArgumentException(
"ABFS does not allow files or directories to end with a dot.");
}
path = path.getParent();
} else {
break;
}
}
} | 3.68 |
pulsar_LegacyHierarchicalLedgerRangeIterator_getLedgerRangeByLevel | /**
* Get a single node level1/level2.
*
* @param level1
* 1st level node name
* @param level2
* 2nd level node name
* @throws IOException
*/
LedgerManager.LedgerRange getLedgerRangeByLevel(final String level1, final String level2)
throws IOException {
StringBuilder nodeBuilder = threadLocalNodeBuilder.get();
nodeBuilder.setLength(0);
nodeBuilder.append(ledgersRoot).append("/")
.append(level1).append("/").append(level2);
String nodePath = nodeBuilder.toString();
List<String> ledgerNodes = null;
try {
ledgerNodes = store.getChildren(nodePath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
} catch (ExecutionException | TimeoutException e) {
throw new IOException("Error when get child nodes from zk", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("Error when get child nodes from zk", e);
}
NavigableSet<Long> zkActiveLedgers =
HierarchicalLedgerUtils.ledgerListToSet(ledgerNodes, ledgersRoot, nodePath);
if (log.isDebugEnabled()) {
log.debug("All active ledgers from ZK for hash node "
+ level1 + "/" + level2 + " : " + zkActiveLedgers);
}
return new LedgerManager.LedgerRange(zkActiveLedgers.subSet(getStartLedgerIdByLevel(level1, level2), true,
getEndLedgerIdByLevel(level1, level2), true));
} | 3.68 |
hbase_EnableTableProcedure_getMaxReplicaId | /** Returns Maximum region replica id found in passed list of regions. */
private static int getMaxReplicaId(List<RegionInfo> regions) {
int max = 0;
for (RegionInfo regionInfo : regions) {
if (regionInfo.getReplicaId() > max) {
// Iterating through all the list to identify the highest replicaID region.
// We can stop after checking with the first set of regions??
max = regionInfo.getReplicaId();
}
}
return max;
} | 3.68 |
hadoop_IOStatisticsContext_setThreadIOStatisticsContext | /**
* Set the IOStatisticsContext for the current thread.
* @param statisticsContext IOStatistics context instance for the
* current thread. If null, the context is reset.
*/
static void setThreadIOStatisticsContext(
IOStatisticsContext statisticsContext) {
IOStatisticsContextIntegration.setThreadIOStatisticsContext(
statisticsContext);
} | 3.68 |
hibernate-validator_AnnotationMetaDataProvider_findConstraintAnnotations | /**
* Examines the given annotation to see whether it is a single- or multi-valued constraint annotation.
*
* @param constrainable The constrainable to check for constraints annotations
* @param annotation The annotation to examine
* @param type the element type on which the annotation/constraint is placed on
* @param <A> the annotation type
*
* @return A list of constraint descriptors or the empty list in case {@code annotation} is neither a
* single nor multi-valued annotation.
*/
protected <A extends Annotation> List<ConstraintDescriptorImpl<?>> findConstraintAnnotations(
Constrainable constrainable,
A annotation,
ConstraintLocationKind type) {
// HV-1049 and HV-1311 - Ignore annotations from the JDK (jdk.internal.* and java.*); They cannot be constraint
// annotations so skip them right here, as for the proper check we'd need package access permission for
// "jdk.internal" and "java".
if ( constraintCreationContext.getConstraintHelper().isJdkAnnotation( annotation.annotationType() ) ) {
return Collections.emptyList();
}
List<Annotation> constraints = newArrayList();
Class<? extends Annotation> annotationType = annotation.annotationType();
if ( constraintCreationContext.getConstraintHelper().isConstraintAnnotation( annotationType ) ) {
constraints.add( annotation );
}
else if ( constraintCreationContext.getConstraintHelper().isMultiValueConstraint( annotationType ) ) {
constraints.addAll( constraintCreationContext.getConstraintHelper().getConstraintsFromMultiValueConstraint( annotation ) );
}
return constraints.stream()
.map( c -> buildConstraintDescriptor( constrainable, c, type ) )
.collect( Collectors.toList() );
} | 3.68 |
framework_VAccordion_clearPaintables | /**
* {@inheritDoc}
*
* @deprecated This method is not called by the framework code anymore.
*/
@Deprecated
@Override
protected void clearPaintables() {
clear();
} | 3.68 |
hudi_HoodieTableConfig_getTableChecksum | /**
* Read the table checksum.
*/
private Long getTableChecksum() {
return getLong(TABLE_CHECKSUM);
} | 3.68 |
hbase_Procedure_isRunnable | /** Returns true if the procedure is in a RUNNABLE state. */
public synchronized boolean isRunnable() {
return state == ProcedureState.RUNNABLE;
} | 3.68 |
flink_ShuffleMaster_close | /**
* Closes this shuffle master service which should release all resources. A shuffle master will
* only be closed when the cluster is shut down.
*/
@Override
default void close() throws Exception {} | 3.68 |
hbase_HMaster_main | /**
* @see org.apache.hadoop.hbase.master.HMasterCommandLine
*/
public static void main(String[] args) {
LOG.info("STARTING service " + HMaster.class.getSimpleName());
VersionInfo.logVersion();
new HMasterCommandLine(HMaster.class).doMain(args);
} | 3.68 |
querydsl_CollectionUtils_unmodifiableSet | /**
* Return an unmodifiable copy of a set, or the same set if its already an unmodifiable type.
*
* @param set the set
* @param <T> element type
* @return unmodifiable copy of a set, or the same set if its already an unmodifiable type
*/
@SuppressWarnings("unchecked")
public static <T> Set<T> unmodifiableSet(Set<T> set) {
if (isUnmodifiableType(set.getClass())) {
return set;
}
switch (set.size()) {
case 0:
return Collections.emptySet();
case 1:
return Collections.singleton(set.iterator().next());
default:
return Collections.unmodifiableSet((Set<T>) (
set instanceof LinkedHashSet ? ((LinkedHashSet<T>) set).clone() :
set instanceof TreeSet ? ((TreeSet<T>) set).clone() :
set instanceof HashSet ? ((HashSet<T>) set).clone() :
new LinkedHashSet<>(set)));
}
} | 3.68 |
flink_CommittableMessageTypeInfo_noOutput | /**
* Returns the type information for a {@link CommittableMessage} with no committable.
*
* @return {@link TypeInformation} with {@link CommittableMessage}
*/
public static TypeInformation<CommittableMessage<Void>> noOutput() {
return new CommittableMessageTypeInfo<>(NoOutputSerializer::new);
} | 3.68 |
hibernate-validator_ExecutableHelper_isResolvedToSameMethodInHierarchy | /**
* Checks if a pair of given methods ({@code left} and {@code right}) are resolved to the same
* method based on the {@code mainSubType} type.
*
* @param mainSubType a type at the bottom of class hierarchy to be used to lookup the methods.
* @param left one of the methods to check
* @param right another of the methods to check
*
* @return {@code true} if a pair of methods are equal {@code left == right}, or one of the methods
* override another one in the class hierarchy with {@code mainSubType} at the bottom,
* {@code false} otherwise.
*/
public boolean isResolvedToSameMethodInHierarchy(Class<?> mainSubType, Method left, Method right) {
Contracts.assertValueNotNull( mainSubType, "mainSubType" );
Contracts.assertValueNotNull( left, "left" );
Contracts.assertValueNotNull( right, "right" );
if ( left.equals( right ) ) {
return true;
}
if ( !left.getName().equals( right.getName() ) ) {
return false;
}
// methods with same name in the same class should be different
if ( left.getDeclaringClass().equals( right.getDeclaringClass() ) ) {
return false;
}
if ( left.getParameterCount() != right.getParameterCount() ) {
return false;
}
// if at least one method from a pair is static - they are different methods
if ( Modifier.isStatic( right.getModifiers() ) || Modifier.isStatic( left.getModifiers() ) ) {
return false;
}
// HV-861 Bridge method should be ignored. Classmates type/member resolution will take care of proper
// override detection without considering bridge methods
if ( left.isBridge() || right.isBridge() ) {
return false;
}
// if one of the methods is private - methods are different
if ( Modifier.isPrivate( left.getModifiers() ) || Modifier.isPrivate( right.getModifiers() ) ) {
return false;
}
if ( !isMethodVisibleTo( right, left ) || !isMethodVisibleTo( left, right ) ) {
return false;
}
// We need to check if the passed mainSubType is not a Weld proxy. In case of proxy we need to get
// a class that was proxied otherwise we can use the class itself. This is due to the issue that
// call to Class#getGenericInterfaces() on a Weld proxy returns raw types instead of parametrized
// generics and methods will not be resolved correctly.
return instanceMethodParametersResolveToSameTypes(
Filters.excludeProxies().accepts( mainSubType ) ? mainSubType : mainSubType.getSuperclass(),
left,
right
);
} | 3.68 |
framework_Calendar_setStartDate | /**
* Sets start date for the calendar. This and {@link #setEndDate(Date)}
* control the range of dates visible on the component. The default range is
* one week.
*
* @param date
* First visible date to show.
*/
public void setStartDate(Date date) {
if (!date.equals(startDate)) {
startDate = date;
markAsDirty();
}
} | 3.68 |
hmily_OriginTrackedPropertiesLoader_isEndOfLine | /**
* Is end of line boolean.
*
* @return the boolean
*/
public boolean isEndOfLine() {
return this.character == -1 || (!this.escaped && this.character == '\n');
} | 3.68 |
hadoop_AbstractClientRequestInterceptor_getConf | /**
* Gets the {@link Configuration}.
*/
@Override
public Configuration getConf() {
return this.conf;
} | 3.68 |
framework_Window_addFocusListener | /*
* (non-Javadoc)
*
* @see
* com.vaadin.event.FieldEvents.FocusNotifier#addFocusListener(com.vaadin
* .event.FieldEvents.FocusListener)
*/
@Override
public Registration addFocusListener(FocusListener listener) {
return addListener(FocusEvent.EVENT_ID, FocusEvent.class, listener,
FocusListener.focusMethod);
} | 3.68 |
hudi_RDDConsistentBucketBulkInsertPartitioner_initializeBucketIdentifier | /**
* Initialize hashing metadata of input records. The metadata of all related partitions will be loaded, and
* the mapping from partition to its bucket identifier is constructed.
*/
private Map<String, ConsistentBucketIdentifier> initializeBucketIdentifier(JavaRDD<HoodieRecord<T>> records) {
return records.map(HoodieRecord::getPartitionPath).distinct().collect().stream()
.collect(Collectors.toMap(p -> p, this::getBucketIdentifier));
} | 3.68 |
framework_AbstractComponentTest_createActions | /**
* Create actions for the component. Remember to call super.createActions()
* when overriding.
*/
protected void createActions() {
createBooleanAction("Enabled", CATEGORY_STATE, true, enabledCommand);
createBooleanAction("Readonly", CATEGORY_STATE, false, readonlyCommand);
createBooleanAction("Visible", CATEGORY_STATE, true, visibleCommand);
createBooleanAction("Error indicator", CATEGORY_STATE, false,
errorIndicatorCommand);
createLocaleSelect(CATEGORY_STATE);
createErrorMessageSelect(CATEGORY_DECORATIONS);
createDescriptionSelect(CATEGORY_DECORATIONS);
createCaptionSelect(CATEGORY_DECORATIONS);
createIconSelect(CATEGORY_DECORATIONS);
createWidthAndHeightActions(CATEGORY_SIZE);
createStyleNameSelect(CATEGORY_DECORATIONS);
createFocusActions();
} | 3.68 |
shardingsphere-elasticjob_ZookeeperRegistryCenter_waitForCacheClose | /*
* // TODO sleep 500ms, let cache client close first and then client, otherwise will throw exception reference:https://issues.apache.org/jira/browse/CURATOR-157
*/
private void waitForCacheClose() {
try {
Thread.sleep(500L);
} catch (final InterruptedException ex) {
Thread.currentThread().interrupt();
}
} | 3.68 |
hudi_Hive3Shim_getDateWriteable | /**
* Get date writeable object from int value.
* Hive3 use DateWritableV2 to build date objects and Hive2 use DateWritable.
* So that we need to initialize date according to the version of Hive.
*/
public Writable getDateWriteable(int value) {
try {
return (Writable) DATE_WRITEABLE_V2_CONSTRUCTOR.newInstance(value);
} catch (IllegalAccessException | InstantiationException | InvocationTargetException e) {
throw new HoodieException("can not create writable v2 class!", e);
}
} | 3.68 |
hadoop_MutableGaugeLong_decr | /**
* decrement by delta
* @param delta of the decrement
*/
public void decr(long delta) {
value.addAndGet(-delta);
setChanged();
} | 3.68 |
graphhopper_GraphHopper_setProfiles | /**
* Sets the routing profiles that shall be supported by this GraphHopper instance. The (and only the) given profiles
* can be used for routing without preparation and for CH/LM preparation.
* <p>
* Here is an example how to setup two CH profiles and one LM profile (via the Java API)
*
* <pre>
* {@code
* hopper.setProfiles(
* new Profile("my_car").setVehicle("car"),
* new Profile("your_bike").setVehicle("bike")
* );
* hopper.getCHPreparationHandler().setCHProfiles(
* new CHProfile("my_car"),
* new CHProfile("your_bike")
* );
* hopper.getLMPreparationHandler().setLMProfiles(
* new LMProfile("your_bike")
* );
* }
* </pre>
* <p>
* See also https://github.com/graphhopper/graphhopper/pull/1922.
*
* @see CHPreparationHandler#setCHProfiles
* @see LMPreparationHandler#setLMProfiles
*/
public GraphHopper setProfiles(Profile... profiles) {
return setProfiles(Arrays.asList(profiles));
} | 3.68 |
flink_SavepointReader_readListState | /**
* Read operator {@code ListState} from a {@code Savepoint} when a custom serializer was used;
* e.g., a different serializer than the one returned by {@code
* TypeInformation#createSerializer}.
*
* @param identifier The identifier of the operator.
* @param name The (unique) name for the state.
* @param typeInfo The type of the elements in the state.
* @param serializer The serializer used to write the elements into state.
* @param <T> The type of the values that are in the list state.
* @return A {@code DataStream} representing the elements in state.
* @throws IOException If the savepoint path is invalid or the uid does not exist.
*/
public <T> DataStream<T> readListState(
OperatorIdentifier identifier,
String name,
TypeInformation<T> typeInfo,
TypeSerializer<T> serializer)
throws IOException {
return readListState(identifier, typeInfo, new ListStateDescriptor<>(name, serializer));
} | 3.68 |
flink_HiveParserASTNodeOrigin_getUsageAlias | /**
* @return the alias of the object from which an HiveParserASTNode originated, e.g. "v1" (this
* can help with debugging context-dependent expansions)
*/
public String getUsageAlias() {
return usageAlias;
} | 3.68 |
hadoop_AbfsConfiguration_accountConf | /**
* Appends an account name to a configuration key yielding the
* account-specific form.
* @param key Account-agnostic configuration key
* @return Account-specific configuration key
*/
public String accountConf(String key) {
return key + "." + accountName;
} | 3.68 |
hadoop_LpSolver_generateUnderAllocationConstraints | /**
* Generate under-allocation constraints.
*
* @param lpModel the LP model.
* @param cJobITimeK actual container allocation for job i in time
* interval k.
* @param uaPredict absolute container under-allocation.
* @param ua recursive container under-allocation.
* @param x predicted container allocation.
* @param indexJobITimeK index for job i at time interval k.
* @param timeK index for time interval k.
*/
private void generateUnderAllocationConstraints(
final ExpressionsBasedModel lpModel, final double cJobITimeK,
final Variable[] uaPredict, final Variable[] ua, final Variable[] x,
final int indexJobITimeK, final int timeK) {
// uaPredict_job_i_timeK + x_timeK >= cJobITimeK
Expression underAllocPredictExpression =
lpModel.addExpression("under_alloc_predict_" + indexJobITimeK);
underAllocPredictExpression.set(uaPredict[indexJobITimeK], 1);
underAllocPredictExpression.set(x[timeK], 1);
underAllocPredictExpression.lower(cJobITimeK); // >=
if (timeK >= 1) {
/** Recursively calculate container under-allocation. */
// ua_job_i_timeK >= ua_job_i_time_(k-1) + cJobITimeK - x_timeK
Expression underAllocExpression =
lpModel.addExpression("under_alloc_" + indexJobITimeK);
underAllocExpression.set(ua[indexJobITimeK], 1);
underAllocExpression.set(ua[indexJobITimeK - 1], -1);
underAllocExpression.set(x[timeK], 1);
underAllocExpression.lower(cJobITimeK); // >=
} else {
/** Initial value for container under-allocation. */
// ua_job_i_time_0 >= cJobI_time_0 - x_time_0
Expression underAllocExpression =
lpModel.addExpression("under_alloc_" + indexJobITimeK);
underAllocExpression.set(ua[indexJobITimeK], 1);
underAllocExpression.set(x[timeK], 1);
underAllocExpression.lower(cJobITimeK); // >=
}
} | 3.68 |
AreaShop_RentRegion_setDuration | /**
* Set the duration of the rent.
* @param duration The duration of the rent (as specified on the documentation pages)
*/
public void setDuration(String duration) {
setSetting("rent.duration", duration);
} | 3.68 |
querydsl_PathMetadataFactory_forArrayAccess | /**
* Create a new PathMetadata instance for indexed array access
*
* @param parent parent path
* @param index index of element
* @return array access path
*/
public static PathMetadata forArrayAccess(Path<?> parent, @Range(from = 0, to = Integer.MAX_VALUE) int index) {
return new PathMetadata(parent, index, PathType.ARRAYVALUE_CONSTANT);
} | 3.68 |
graphhopper_InstructionsOutgoingEdges_isLeavingCurrentStreet | /**
* If the name and prevName changes this method checks if either the current street is continued on a
* different edge or if the edge we are turning onto is continued on a different edge.
* If either of these properties is true, we can be quite certain that a turn instruction should be provided.
*/
public boolean isLeavingCurrentStreet(String prevName, String name) {
if (InstructionsHelper.isNameSimilar(name, prevName)) {
return false;
}
boolean roadClassOrLinkChange = !isTheSameRoadClassAndLink(prevEdge, currentEdge);
for (EdgeIteratorState edge : allowedAlternativeTurns) {
String edgeName = edge.getName();
// leave the current street
if (InstructionsHelper.isNameSimilar(prevName, edgeName) || (roadClassOrLinkChange && isTheSameRoadClassAndLink(prevEdge, edge))) {
return true;
}
// enter a different street
if (InstructionsHelper.isNameSimilar(name, edgeName) || (roadClassOrLinkChange && isTheSameRoadClassAndLink(currentEdge, edge))) {
return true;
}
}
return false;
} | 3.68 |
hadoop_SysInfoWindows_getNetworkBytesWritten | /** {@inheritDoc} */
@Override
public long getNetworkBytesWritten() {
refreshIfNeeded();
return netBytesWritten;
} | 3.68 |
hbase_ReplicationSourceLogQueue_getOldestWalTimestamp | /*
* Get the oldest wal timestamp from all the queues.
*/
private long getOldestWalTimestamp() {
long oldestWalTimestamp = Long.MAX_VALUE;
for (Map.Entry<String, PriorityBlockingQueue<Path>> entry : queues.entrySet()) {
PriorityBlockingQueue<Path> queue = entry.getValue();
Path path = queue.peek();
// Can path ever be null ?
if (path != null) {
oldestWalTimestamp =
Math.min(oldestWalTimestamp, AbstractFSWALProvider.WALStartTimeComparator.getTS(path));
}
}
return oldestWalTimestamp;
} | 3.68 |
hbase_ReplicationSourceManager_logPositionAndCleanOldLogs | /**
* This method will log the current position to storage. And also clean old logs from the
* replication queue.
* @param source the replication source
* @param entryBatch the wal entry batch we just shipped
*/
public void logPositionAndCleanOldLogs(ReplicationSourceInterface source,
WALEntryBatch entryBatch) {
String walName = entryBatch.getLastWalPath().getName();
String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(walName);
// if end of file, we just set the offset to -1 so we know that this file has already been fully
// replicated, otherwise we need to compare the file length
ReplicationGroupOffset offset = new ReplicationGroupOffset(walName,
entryBatch.isEndOfFile() ? -1 : entryBatch.getLastWalPosition());
interruptOrAbortWhenFail(() -> this.queueStorage.setOffset(source.getQueueId(), walPrefix,
offset, entryBatch.getLastSeqIds()));
cleanOldLogs(walName, entryBatch.isEndOfFile(), source);
} | 3.68 |
hbase_HFileWriterImpl_getPath | /** Returns Path or null if we were passed a stream rather than a Path. */
@Override
public Path getPath() {
return path;
} | 3.68 |
hadoop_AzureBlobFileSystemStore_isAppendBlobKey | /**
* Checks if the given key in Azure Storage should be stored as a page
* blob instead of block blob.
*/
public boolean isAppendBlobKey(String key) {
return isKeyForDirectorySet(key, appendBlobDirSet);
} | 3.68 |
graphhopper_ResponsePath_getPoints | /**
* This method returns all points on the path. Keep in mind that calculating the distance from
* these points might yield different results compared to getDistance as points could have been
* simplified on import or after querying.
*/
public PointList getPoints() {
check("getPoints");
return pointList;
} | 3.68 |
graphhopper_StringEncodedValue_indexOf | /**
* @param value the String to retrieve the index
* @return the non-zero index of the String or <i>0</i> if it couldn't be found
*/
public int indexOf(String value) {
return indexMap.getOrDefault(value, 0);
} | 3.68 |
pulsar_MessageRouter_choosePartition | /**
* Choose a partition based on msg and the topic metadata.
*
* @param msg message to route
* @param metadata topic metadata
* @return the partition to route the message.
* @since 1.22.0
*/
default int choosePartition(Message<?> msg, TopicMetadata metadata) {
return choosePartition(msg);
} | 3.68 |
hbase_ParseFilter_registerFilter | /**
* Register a new filter with the parser. If the filter is already registered, an
* IllegalArgumentException will be thrown.
* @param name a name for the filter
* @param filterClass fully qualified class name
*/
public static void registerFilter(String name, String filterClass) {
if (LOG.isInfoEnabled()) LOG.info("Registering new filter " + name);
filterHashMap.put(name, filterClass);
} | 3.68 |
hbase_HbckChore_runChore | /**
* Request execution of this chore's action.
* @return {@code true} if the chore was executed, {@code false} if the chore is disabled or
* already running.
*/
public boolean runChore() {
// This function does the sanity checks of making sure the chore is not run when it is
// disabled or when it's already running. It returns whether the chore was actually run or not.
if (isDisabled() || isRunning()) {
if (isDisabled()) {
LOG.warn("hbck chore is disabled! Set " + HBCK_CHORE_INTERVAL + " > 0 to enable it.");
} else {
LOG.warn("hbck chore already running. Can't run till it finishes.");
}
return false;
}
chore();
return true;
} | 3.68 |
hbase_KeyValue_matchingRowColumn | /**
* Compares the row and column of two keyvalues for equality
* @param left left cell to compare row and column
* @param right right cell to compare row and column
* @return True if same row and column.
*/
public boolean matchingRowColumn(final Cell left, final Cell right) {
short lrowlength = left.getRowLength();
short rrowlength = right.getRowLength();
// TsOffset = end of column data. just comparing Row+CF length of each
if (
(left.getRowLength() + left.getFamilyLength() + left.getQualifierLength())
!= (right.getRowLength() + right.getFamilyLength() + right.getQualifierLength())
) {
return false;
}
if (!matchingRows(left, lrowlength, right, rrowlength)) {
return false;
}
int lfoffset = left.getFamilyOffset();
int rfoffset = right.getFamilyOffset();
int lclength = left.getQualifierLength();
int rclength = right.getQualifierLength();
int lfamilylength = left.getFamilyLength();
int rfamilylength = right.getFamilyLength();
int diff = compareFamilies(left.getFamilyArray(), lfoffset, lfamilylength,
right.getFamilyArray(), rfoffset, rfamilylength);
if (diff != 0) {
return false;
} else {
diff = compareColumns(left.getQualifierArray(), left.getQualifierOffset(), lclength,
right.getQualifierArray(), right.getQualifierOffset(), rclength);
return diff == 0;
}
} | 3.68 |
hadoop_TaskInfo_getOutputBytes | /**
* @return Raw bytes written to the destination FileSystem. Note that this may
* not match output bytes.
*/
public long getOutputBytes() {
return bytesOut;
} | 3.68 |
hmily_MetricsTrackerFacade_start | /**
* Init for metrics tracker manager.
*
* @param metricsConfig metrics config
*/
public void start(final HmilyMetricsConfig metricsConfig) {
if (this.isStarted.compareAndSet(false, true)) {
metricsBootService = ExtensionLoaderFactory.load(MetricsBootService.class, metricsConfig.getMetricsName());
Preconditions.checkNotNull(metricsBootService,
"Can not find metrics tracker manager with metrics name : %s in metrics configuration.", metricsConfig.getMetricsName());
metricsBootService.start(metricsConfig, ExtensionLoaderFactory.load(MetricsRegister.class, metricsConfig.getMetricsName()));
} else {
log.info("metrics tracker has started !");
}
} | 3.68 |
hbase_ModifyRegionUtils_editRegions | /**
* Execute the task on the specified set of regions.
* @param exec Thread Pool Executor
* @param regions {@link RegionInfo} that describes the regions to edit
* @param task {@link RegionFillTask} custom code to edit the region
*/
public static void editRegions(final ThreadPoolExecutor exec,
final Collection<RegionInfo> regions, final RegionEditTask task) throws IOException {
final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<>(exec);
for (final RegionInfo hri : regions) {
completionService.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
task.editRegion(hri);
return null;
}
});
}
try {
for (RegionInfo hri : regions) {
completionService.take().get();
}
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
throw new IOException(e.getCause());
}
} | 3.68 |
hbase_SnapshotInfo_getLogsSize | /** Returns the total log size */
public long getLogsSize() {
return logSize.get();
} | 3.68 |
flink_WrappingCollector_outputResult | /** Outputs the final result to the wrapped collector. */
public void outputResult(T result) {
this.collector.collect(result);
} | 3.68 |
framework_MethodPropertyDescriptor_writeObject | /* Special serialization to handle method references */
private void writeObject(ObjectOutputStream out)
throws IOException {
out.defaultWriteObject();
SerializerHelper.writeClass(out, propertyType);
if (writeMethod != null) {
out.writeObject(writeMethod.getName());
SerializerHelper.writeClass(out, writeMethod.getDeclaringClass());
SerializerHelper.writeClassArray(out,
writeMethod.getParameterTypes());
} else {
out.writeObject(null);
out.writeObject(null);
out.writeObject(null);
}
if (readMethod != null) {
out.writeObject(readMethod.getName());
SerializerHelper.writeClass(out, readMethod.getDeclaringClass());
SerializerHelper.writeClassArray(out,
readMethod.getParameterTypes());
} else {
out.writeObject(null);
out.writeObject(null);
out.writeObject(null);
}
} | 3.68 |
flink_Grouping_getCustomPartitioner | /**
* Gets the custom partitioner to be used for this grouping, or {@code null}, if none was
* defined.
*
* @return The custom partitioner to be used for this grouping.
*/
@Internal
public Partitioner<?> getCustomPartitioner() {
return this.customPartitioner;
} | 3.68 |
hbase_ScheduledChore_updateTimeTrackingBeforeRun | /**
* Update our time tracking members. Called at the start of an execution of this chore's run()
* method so that a correct decision can be made as to whether or not we missed the start time
*/
private synchronized void updateTimeTrackingBeforeRun() {
timeOfLastRun = timeOfThisRun;
timeOfThisRun = EnvironmentEdgeManager.currentTime();
} | 3.68 |
hbase_KeyValueUtil_nextShallowCopy | /**
* Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
* position to the start of the next KeyValue. Does not allocate a new array or copy data.
*/
public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion,
boolean includesTags) {
if (bb.isDirect()) {
throw new IllegalArgumentException("only supports heap buffers");
}
if (bb.remaining() < 1) {
return null;
}
int underlyingArrayOffset = bb.arrayOffset() + bb.position();
int keyLength = bb.getInt();
int valueLength = bb.getInt();
ByteBufferUtils.skip(bb, keyLength + valueLength);
int tagsLength = 0;
if (includesTags) {
// Read short as unsigned, high byte first
tagsLength = ((bb.get() & 0xff) << 8) ^ (bb.get() & 0xff);
ByteBufferUtils.skip(bb, tagsLength);
}
int kvLength = (int) KeyValue.getKeyValueDataStructureSize(keyLength, valueLength, tagsLength);
KeyValue keyValue = new KeyValue(bb.array(), underlyingArrayOffset, kvLength);
if (includesMvccVersion) {
long mvccVersion = ByteBufferUtils.readVLong(bb);
keyValue.setSequenceId(mvccVersion);
}
return keyValue;
} | 3.68 |
flink_SourceCoordinatorContext_getAndRemoveUncheckpointedAssignment | /**
* Get the split to put back. This only happens when a source reader subtask has failed.
*
* @param subtaskId the failed subtask id.
* @param restoredCheckpointId the checkpoint that the task is recovered to.
* @return A list of splits that needs to be added back to the {@link SplitEnumerator}.
*/
List<SplitT> getAndRemoveUncheckpointedAssignment(int subtaskId, long restoredCheckpointId) {
return assignmentTracker.getAndRemoveUncheckpointedAssignment(
subtaskId, restoredCheckpointId);
} | 3.68 |
flink_TypeInfoLogicalTypeConverter_fromLogicalTypeToTypeInfo | /** Use {@link BigDecimalTypeInfo} to retain precision and scale of decimal. */
public static TypeInformation fromLogicalTypeToTypeInfo(LogicalType type) {
DataType dataType =
fromLogicalTypeToDataType(type)
.nullable()
.bridgedTo(ClassLogicalTypeConverter.getDefaultExternalClassForType(type));
return TypeInfoDataTypeConverter.fromDataTypeToTypeInfo(dataType);
} | 3.68 |
flink_CheckpointStatsCounts_createSnapshot | /**
* Creates a snapshot of the current state.
*
* @return Snapshot of the current state.
*/
CheckpointStatsCounts createSnapshot() {
return new CheckpointStatsCounts(
numRestoredCheckpoints,
numTotalCheckpoints,
numInProgressCheckpoints,
numCompletedCheckpoints,
numFailedCheckpoints);
} | 3.68 |
hbase_GroupingTableMapper_extractKeyValues | /**
* Extract columns values from the current record. This method returns null if any of the columns
* are not found.
* <p>
* Override this method if you want to deal with nulls differently.
* @param r The current values.
* @return Array of byte values.
*/
protected byte[][] extractKeyValues(Result r) {
byte[][] keyVals = null;
ArrayList<byte[]> foundList = new ArrayList<>();
int numCols = columns.length;
if (numCols > 0) {
for (Cell value : r.listCells()) {
byte[] column =
CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value));
for (int i = 0; i < numCols; i++) {
if (Bytes.equals(column, columns[i])) {
foundList.add(CellUtil.cloneValue(value));
break;
}
}
}
if (foundList.size() == numCols) {
keyVals = foundList.toArray(new byte[numCols][]);
}
}
return keyVals;
} | 3.68 |
hbase_AccessChecker_getUserGroups | /**
* Retrieve the groups of the given user.
* @param user User name
*/
public static List<String> getUserGroups(String user) {
try {
return groupService.getGroups(user);
} catch (IOException e) {
LOG.error("Error occurred while retrieving group for " + user, e);
return new ArrayList<>();
}
} | 3.68 |
framework_JsonPaintTarget_addData | /**
*
* @param s
* json string, object or array
*/
public void addData(String s) {
children.add(s);
} | 3.68 |
hibernate-validator_ConstraintAnnotationVisitor_checkConstraints | /**
* Retrieves the checks required for the given element and annotations,
* executes them and reports all occurred errors.
*
* @param annotatedElement The element to check.
* @param mirrors The annotations to check.
*/
private void checkConstraints(Element annotatedElement, List<AnnotationMirror> mirrors) {
for ( AnnotationMirror oneAnnotationMirror : mirrors ) {
try {
ConstraintChecks constraintChecks = constraintCheckFactory.getConstraintChecks(
annotatedElement, oneAnnotationMirror
);
reportIssues( constraintChecks.execute( annotatedElement, oneAnnotationMirror ) );
}
//HV-293: if single constraints can't be properly checked, report this and
//proceed with next constraints
catch (Exception e) {
if ( verbose ) {
messager.getDelegate()
.printMessage( Kind.NOTE, e.getMessage() != null ? e.getMessage() : e.toString(), annotatedElement, oneAnnotationMirror );
}
}
}
} | 3.68 |
querydsl_GroupByBuilder_collection | /**
* Get the results as a list
*
* @param expression projection
* @return new result transformer
*/
public <V, RES extends Collection<V>> ResultTransformer<RES> collection(Supplier<RES> resultFactory, FactoryExpression<V> expression) {
final FactoryExpression<V> transformation = FactoryExpressionUtils.wrap(expression);
List<Expression<?>> args = transformation.getArgs();
return new GroupByGenericCollection<K, V, RES>(resultFactory, key, args.toArray(new Expression<?>[args.size()])) {
@Override
protected V transform(Group group) {
// XXX Isn't group.toArray() suitable here?
List<Object> args = new ArrayList<Object>(groupExpressions.size() - 1);
for (int i = 1; i < groupExpressions.size(); i++) {
args.add(group.getGroup(groupExpressions.get(i)));
}
return transformation.newInstance(args.toArray());
}
};
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_getWriteBatchSize | /** Gets the max batch size will be used in {@link RocksDBWriteBatchWrapper}. */
public long getWriteBatchSize() {
return writeBatchSize == UNDEFINED_WRITE_BATCH_SIZE
? WRITE_BATCH_SIZE.defaultValue().getBytes()
: writeBatchSize;
} | 3.68 |
flink_StandardDeCompressors_getDecompressorForFileName | /**
* Gets the decompressor for a file name. This checks the file against all known and supported
* file extensions. Returns null if there is no decompressor for this file name.
*/
@Nullable
public static InflaterInputStreamFactory<?> getDecompressorForFileName(String fileName) {
for (final Map.Entry<String, InflaterInputStreamFactory<?>> entry :
DECOMPRESSORS.entrySet()) {
if (fileName.endsWith(entry.getKey())) {
return entry.getValue();
}
}
return null;
} | 3.68 |
hudi_BootstrapExecutorUtils_syncHive | /**
* Sync to Hive.
*/
private void syncHive() {
if (cfg.enableHiveSync) {
TypedProperties metaProps = new TypedProperties();
metaProps.putAll(props);
metaProps.put(META_SYNC_DATABASE_NAME.key(), cfg.database);
metaProps.put(META_SYNC_TABLE_NAME.key(), cfg.tableName);
metaProps.put(META_SYNC_BASE_PATH.key(), cfg.basePath);
metaProps.put(META_SYNC_BASE_FILE_FORMAT.key(), cfg.baseFileFormat);
if (props.getBoolean(HIVE_SYNC_BUCKET_SYNC.key(), HIVE_SYNC_BUCKET_SYNC.defaultValue())) {
metaProps.put(HIVE_SYNC_BUCKET_SYNC_SPEC.key(), HiveSyncConfig.getBucketSpec(props.getString(BUCKET_INDEX_HASH_FIELD.key()),
props.getInteger(BUCKET_INDEX_NUM_BUCKETS.key())));
}
try (HiveSyncTool hiveSyncTool = new HiveSyncTool(metaProps, configuration)) {
hiveSyncTool.syncHoodieTable();
}
}
} | 3.68 |
hbase_MasterObserver_postRegionOffline | /**
* Called after the region has been marked offline.
* @param ctx the environment to interact with the framework and master
*/
default void postRegionOffline(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final RegionInfo regionInfo) throws IOException {
} | 3.68 |
rocketmq-connect_RocketMQKafkaSinkTaskContext_convertToTopicPartition | /**
* convert to kafka topic partition
*
* @param partitionMap
* @return
*/
public TopicPartition convertToTopicPartition(Map<String, ?> partitionMap) {
if (partitionMap.containsKey(TOPIC) && partitionMap.containsKey(QUEUE_ID)) {
return new TopicPartition(partitionMap.get(TOPIC).toString(), Integer.valueOf(partitionMap.get(QUEUE_ID).toString()));
}
return null;
} | 3.68 |
pulsar_MessageImpl_getPayload | /**
* used only for unit-test to validate payload's state and ref-cnt.
*
* @return
*/
@VisibleForTesting
ByteBuf getPayload() {
return payload;
} | 3.68 |
flink_TernaryBoolean_getOrDefault | /**
* Gets the boolean value corresponding to this value. If this is the 'undefined' value, the
* method returns the given default.
*
* @param defaultValue The value to be returned in case this ternary value is 'undefined'.
*/
public boolean getOrDefault(boolean defaultValue) {
return this == UNDEFINED ? defaultValue : (this == TRUE);
} | 3.68 |
morf_DirectoryDataSet_openOutputStreamForTable | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider.XmlOutputStreamProvider#openOutputStreamForTable(java.lang.String)
*/
@Override
public OutputStream openOutputStreamForTable(String tableName) {
try {
return new FileOutputStream(new File(directory, tableName + ".xml"));
} catch (FileNotFoundException e) {
throw new RuntimeException("Error opening output stream", e);
}
} | 3.68 |
flink_MemorySegment_getShort | /**
* Reads a short integer value (16 bit, 2 bytes) from the given position, composing them into a
* short value according to the current byte order.
*
* @param index The position from which the memory will be read.
* @return The short value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public short getShort(int index) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 2) {
return UNSAFE.getShort(heapMemory, pos);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
} | 3.68 |
framework_DDEventHandleStrategy_handleMouseUp | /**
* Called to handle {@link Event#ONMOUSEUP} event.
*
* @param target
* target element over which DnD event has happened
* @param event
* ONMOUSEUP GWT event for active DnD operation
* @param mediator
* VDragAndDropManager data accessor
*/
protected void handleMouseUp(Element target, NativePreviewEvent event,
DDManagerMediator mediator) {
mediator.getManager().endDrag();
} | 3.68 |
zxing_UPCEANReader_findGuardPattern | /**
* @param row row of black/white values to search
* @param rowOffset position to start search
* @param whiteFirst if true, indicates that the pattern specifies white/black/white/...
* pixel counts, otherwise, it is interpreted as black/white/black/...
* @param pattern pattern of counts of number of black and white pixels that are being
* searched for as a pattern
* @param counters array of counters, as long as pattern, to re-use
* @return start/end horizontal offset of guard pattern, as an array of two ints
* @throws NotFoundException if pattern is not found
*/
private static int[] findGuardPattern(BitArray row,
int rowOffset,
boolean whiteFirst,
int[] pattern,
int[] counters) throws NotFoundException {
int width = row.getSize();
rowOffset = whiteFirst ? row.getNextUnset(rowOffset) : row.getNextSet(rowOffset);
int counterPosition = 0;
int patternStart = rowOffset;
int patternLength = pattern.length;
boolean isWhite = whiteFirst;
for (int x = rowOffset; x < width; x++) {
if (row.get(x) != isWhite) {
counters[counterPosition]++;
} else {
if (counterPosition == patternLength - 1) {
if (patternMatchVariance(counters, pattern, MAX_INDIVIDUAL_VARIANCE) < MAX_AVG_VARIANCE) {
return new int[]{patternStart, x};
}
patternStart += counters[0] + counters[1];
System.arraycopy(counters, 2, counters, 0, counterPosition - 1);
counters[counterPosition - 1] = 0;
counters[counterPosition] = 0;
counterPosition--;
} else {
counterPosition++;
}
counters[counterPosition] = 1;
isWhite = !isWhite;
}
}
throw NotFoundException.getNotFoundInstance();
} | 3.68 |
pulsar_ChannelFutures_toCompletableFuture | /**
* Convert a {@link ChannelFuture} into a {@link CompletableFuture}.
*
* @param channelFuture the {@link ChannelFuture}
* @return a {@link CompletableFuture} that completes successfully when the channelFuture completes successfully,
* and completes exceptionally if the channelFuture completes with a {@link Throwable}
*/
public static CompletableFuture<Channel> toCompletableFuture(ChannelFuture channelFuture) {
Objects.requireNonNull(channelFuture, "channelFuture cannot be null");
CompletableFuture<Channel> adapter = new CompletableFuture<>();
if (channelFuture.isDone()) {
if (channelFuture.isSuccess()) {
adapter.complete(channelFuture.channel());
} else {
adapter.completeExceptionally(channelFuture.cause());
}
} else {
channelFuture.addListener((ChannelFuture cf) -> {
if (cf.isSuccess()) {
adapter.complete(cf.channel());
} else {
adapter.completeExceptionally(cf.cause());
}
});
}
return adapter;
} | 3.68 |
dubbo_MetricsApplicationListener_onFinishEventBuild | /**
* To end the monitoring normally, in addition to increasing the number of corresponding indicators,
* use the introspection method to calculate the relevant rt indicators
*
* @param metricsKey Monitor key
* @param collector Corresponding collector
*/
public static AbstractMetricsKeyListener onFinishEventBuild(
MetricsKey metricsKey, MetricsPlaceValue placeType, CombMetricsCollector<?> collector) {
return AbstractMetricsKeyListener.onFinish(metricsKey, event -> {
collector.increment(metricsKey);
collector.addApplicationRt(placeType.getType(), event.getTimePair().calc());
});
} | 3.68 |
morf_TestingDatabaseEquivalentStringComparator_configure | /**
* @see com.google.inject.AbstractModule#configure()
*/
@Override
protected void configure() {
binder().bind(DatabaseEquivalentStringComparator.class).to(TestingDatabaseEquivalentStringComparator.class);
} | 3.68 |
hadoop_EvaluatingStatisticsMap_values | /**
* Evaluate all the entries and provide a list of the results.
*
* This is not a snapshot, so if the evaluators actually return
* references to mutable objects (e.g. a MeanStatistic instance)
* then that value may still change.
* @return the current list of evaluated results.
*/
@Override
public Collection<E> values() {
Set<Entry<String, Function<String, E>>> evalEntries =
evaluators.entrySet();
return evalEntries.parallelStream().map((e) ->
e.getValue().apply(e.getKey()))
.collect(Collectors.toList());
} | 3.68 |
flink_StreamExecutionEnvironment_enableChangelogStateBackend | /**
* Enable the change log for current state backend. This change log allows operators to persist
* state changes in a very fine-grained manner. Currently, the change log only applies to keyed
* state, so non-keyed operator state and channel state are persisted as usual. The 'state' here
* refers to 'keyed state'. Details are as follows:
*
* <p>Stateful operators write the state changes to that log (logging the state), in addition to
* applying them to the state tables in RocksDB or the in-mem Hashtable.
*
* <p>An operator can acknowledge a checkpoint as soon as the changes in the log have reached
* the durable checkpoint storage.
*
* <p>The state tables are persisted periodically, independent of the checkpoints. We call this
* the materialization of the state on the checkpoint storage.
*
* <p>Once the state is materialized on checkpoint storage, the state changelog can be truncated
* to the corresponding point.
*
* <p>It establish a way to drastically reduce the checkpoint interval for streaming
* applications across state backends. For more details please check the FLIP-158.
*
* <p>If this method is not called explicitly, it means no preference for enabling the change
* log. Configs for change log enabling will override in different config levels
* (job/local/cluster).
*
* @param enabled true if enable the change log for state backend explicitly, otherwise disable
* the change log.
* @return This StreamExecutionEnvironment itself, to allow chaining of function calls.
* @see #isChangelogStateBackendEnabled()
*/
@PublicEvolving
public StreamExecutionEnvironment enableChangelogStateBackend(boolean enabled) {
this.changelogStateBackendEnabled = TernaryBoolean.fromBoolean(enabled);
return this;
} | 3.68 |
framework_GridLayout_getCursorY | /**
* Gets the current y-position (row) of the cursor.
*
* <p>
* The cursor position points the position for the next component that is
* added without specifying its coordinates (grid cell). When the cursor
* position is occupied, the next component will be added to the first free
* position after the cursor.
* </p>
*
* @return the grid row the Cursor is on.
*/
public int getCursorY() {
return cursorY;
} | 3.68 |
open-banking-gateway_BaseDatasafeDbStorageService_read | /**
* Open Datasafe object for reading.
* @param absoluteLocation Absolute path of the object to read. I.e. {@code db://storage/deadbeef}
* @return Stream to read data from.
*/
@Override
@SneakyThrows
@Transactional(noRollbackFor = BaseDatasafeDbStorageService.DbStorageEntityNotFoundException.class)
public InputStream read(AbsoluteLocation absoluteLocation) {
return new ByteArrayInputStream(requireBytes(absoluteLocation));
} | 3.68 |
hadoop_FedBalanceContext_setBandwidthLimit | /**
* The bandwidth limit of the distcp job(MB).
* @param value the bandwidth.
* @return the builder.
*/
public Builder setBandwidthLimit(int value) {
this.bandwidthLimit = value;
return this;
} | 3.68 |
cron-utils_SecondsDescriptor_visit | /**
* Provide a human readable description for Every instance.
*
* @param every - Every
* @return human readable description - String
*/
@Override
public Every visit(final Every every) {
String description;
if (every.getPeriod().getValue() > 1) {
description = String.format("%s %s ", bundle.getString(EVERY), nominalValue(every.getPeriod())) + " replace_plural ";
} else {
description = bundle.getString(EVERY) + " %s ";
}
//TODO save the description?
return every;
} | 3.68 |
hmily_HmilyXaResource_getXaResource | /**
* Gets xa resource.
*
* @return the xa resource
*/
public XAResource getXaResource() {
return xaResource;
} | 3.68 |
framework_VDateField_getDefaultDate | /**
* Sets the default date when no date is selected.
*
* @return the default date
* @since 8.1.2
*/
public Date getDefaultDate() {
return defaultDate;
} | 3.68 |
flink_WindowedStream_process | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Note that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of incremental aggregation.
*
* @param function The window function.
* @param resultType Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
*/
@Internal
public <R> SingleOutputStreamOperator<R> process(
ProcessWindowFunction<T, R, K, W> function, TypeInformation<R> resultType) {
function = input.getExecutionEnvironment().clean(function);
final String opName = builder.generateOperatorName();
final String opDesc = builder.generateOperatorDescription(function, null);
OneInputStreamOperator<T, R> operator = builder.process(function);
return input.transform(opName, resultType, operator).setDescription(opDesc);
} | 3.68 |
hbase_RegionServerObserver_preReplicateLogEntries | // TODO remove below 2 hooks when we implement AC as a core impl than a CP impl.
/**
* This will be called before executing replication request to shipping log entries.
* @param ctx the environment to interact with the framework and region server.
* @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal usage
* by AccessController. Do not use these hooks in custom co-processors.
*/
@Deprecated
default void preReplicateLogEntries(final ObserverContext<RegionServerCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
hadoop_MountInterface_getValue | /** @return the int value representing the procedure. */
public int getValue() {
return ordinal();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.