name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_VAbstractCalendarPanel_updateAssistiveLabels
/** * Updates assistive labels of the navigation elements. * * @since 8.4 */ public void updateAssistiveLabels() { if (prevMonth != null) { Roles.getButtonRole().setAriaLabelProperty(prevMonth.getElement(), prevMonthAssistiveLabel); } if (nextMonth != null) { Roles.getButtonRole().setAriaLabelProperty(nextMonth.getElement(), nextMonthAssistiveLabel); } if (prevYear != null) { Roles.getButtonRole().setAriaLabelProperty(prevYear.getElement(), prevYearAssistiveLabel); } if (nextYear != null) { Roles.getButtonRole().setAriaLabelProperty(nextYear.getElement(), nextYearAssistiveLabel); } }
3.68
hbase_CompoundConfiguration_addBytesMap
/** * Add Bytes map to config list. This map is generally created by HTableDescriptor or * HColumnDescriptor, but can be abstractly used. The added configuration overrides the previous * ones if there are name collisions. Bytes map * @return this, for builder pattern */ public CompoundConfiguration addBytesMap(final Map<Bytes, Bytes> map) { freezeMutableConf(); // put new map at the front of the list (top priority) this.configs.add(0, new ImmutableConfigMap() { private final Map<Bytes, Bytes> m = map; @Override public Iterator<Map.Entry<String, String>> iterator() { Map<String, String> ret = new HashMap<>(); for (Map.Entry<Bytes, Bytes> entry : map.entrySet()) { String key = Bytes.toString(entry.getKey().get()); String val = entry.getValue() == null ? null : Bytes.toString(entry.getValue().get()); ret.put(key, val); } return ret.entrySet().iterator(); } @Override public String get(String key) { Bytes ibw = new Bytes(Bytes.toBytes(key)); if (!m.containsKey(ibw)) return null; Bytes value = m.get(ibw); if (value == null || value.get() == null) return null; return Bytes.toString(value.get()); } @Override public String getRaw(String key) { return get(key); } @Override public Class<?> getClassByName(String name) throws ClassNotFoundException { return null; } @Override public int size() { return m.size(); } @Override public String toString() { return m.toString(); } }); return this; }
3.68
hadoop_OBSDataBlocks_dataSize
/** * Get the amount of data; if there is no buffer then the size is 0. * * @return the amount of data available to upload. */ @Override int dataSize() { return dataSize != null ? dataSize : bufferCapacityUsed(); }
3.68
hadoop_PlacementConstraint_name
/** * An optional name associated to this constraint. **/ public PlacementConstraint name(String name) { this.name = name; return this; }
3.68
pulsar_ConcurrentOpenHashSet_forEach
/** * Iterate over all the elements in the set and apply the provided function. * <p> * <b>Warning: Do Not Guarantee Thread-Safety.</b> * @param processor the function to apply to each element */ public void forEach(Consumer<? super V> processor) { for (int i = 0; i < sections.length; i++) { sections[i].forEach(processor); } }
3.68
flink_MemorySegment_equalTo
/** * Equals two memory segment regions. * * @param seg2 Segment to equal this segment with * @param offset1 Offset of this segment to start equaling * @param offset2 Offset of seg2 to start equaling * @param length Length of the equaled memory region * @return true if equal, false otherwise */ public boolean equalTo(MemorySegment seg2, int offset1, int offset2, int length) { int i = 0; // we assume unaligned accesses are supported. // Compare 8 bytes at a time. while (i <= length - 8) { if (getLong(offset1 + i) != seg2.getLong(offset2 + i)) { return false; } i += 8; } // cover the last (length % 8) elements. while (i < length) { if (get(offset1 + i) != seg2.get(offset2 + i)) { return false; } i += 1; } return true; }
3.68
hbase_DataBlockEncoding_writeIdInBytes
/** * Writes id bytes to the given array starting from offset. * @param dest output array * @param offset starting offset of the output array */ // System.arraycopy is static native. Nothing we can do this until we have minimum JDK 9. @SuppressWarnings("UnsafeFinalization") public void writeIdInBytes(byte[] dest, int offset) throws IOException { System.arraycopy(idInBytes, 0, dest, offset, ID_SIZE); }
3.68
framework_WebBrowser_getTimeZoneId
/** * Returns the TimeZone Id (like "Europe/Helsinki") provided by the browser * (if the browser supports this feature). * * @return the TimeZone Id if provided by the browser, null otherwise. * @see <a href= * "https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/DateTimeFormat/resolvedOptions">Intl.DateTimeFormat.prototype.resolvedOptions()</a> * @since 8.2 */ public String getTimeZoneId() { return timeZoneId; }
3.68
flink_FactoryUtil_validateExcept
/** * Validates the options of the factory. It checks for unconsumed option keys while ignoring * the options with given prefixes. * * <p>The option keys that have given prefix {@code prefixToSkip} would just be skipped for * validation. * * @param prefixesToSkip Set of option key prefixes to skip validation */ public void validateExcept(String... prefixesToSkip) { Preconditions.checkArgument( prefixesToSkip.length > 0, "Prefixes to skip can not be empty."); final List<String> prefixesList = Arrays.asList(prefixesToSkip); consumedOptionKeys.addAll( allOptions.keySet().stream() .filter(key -> prefixesList.stream().anyMatch(key::startsWith)) .collect(Collectors.toSet())); validate(); }
3.68
hbase_CompactionConfiguration_getMajorCompactionJitter
/** * @return Major the jitter fraction, the fraction within which the major compaction period is * randomly chosen from the majorCompactionPeriod in each store. */ public float getMajorCompactionJitter() { return majorCompactionJitter; }
3.68
hadoop_SendRequestIntercept_bind
/** * Binds a new lister to the operation context so the WASB file system can * appropriately intercept sends and allow concurrent OOB I/Os. This * by-passes the blob immutability check when reading streams. * * @param opContext the operation context assocated with this request. */ public static void bind(OperationContext opContext) { opContext.getSendingRequestEventHandler().addListener(new SendRequestIntercept()); }
3.68
dubbo_NacosDynamicConfiguration_createTargetListener
/** * Ignores the group parameter. * * @param key property key the native listener will listen on * @param group to distinguish different set of properties * @return */ private NacosConfigListener createTargetListener(String key, String group) { NacosConfigListener configListener = new NacosConfigListener(); configListener.fillContext(key, group); return configListener; }
3.68
hbase_MemStoreSnapshot_getCellsCount
/** Returns Number of Cells in this snapshot. */ public int getCellsCount() { return cellsCount; }
3.68
flink_FlinkCompletableFutureAssert_eventuallyFailsWith
/** * An equivalent of {@link #failsWithin(Duration)}, that doesn't rely on timeouts. * * @param exceptionClass type of the exception we expect the future to complete with * @return a new assertion instance on the future's exception. * @param <E> type of the exception we expect the future to complete with */ public <E extends Throwable> ThrowableAssertAlternative<E> eventuallyFailsWith( Class<E> exceptionClass) { return eventuallyFails().withThrowableOfType(exceptionClass); }
3.68
hbase_TableName_getNameWithNamespaceInclAsString
/** * Ideally, getNameAsString should contain namespace within it, but if the namespace is default, * it just returns the name. This method takes care of this corner case. */ public String getNameWithNamespaceInclAsString() { if (getNamespaceAsString().equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR)) { return NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + TableName.NAMESPACE_DELIM + getNameAsString(); } return getNameAsString(); }
3.68
hbase_RingBufferTruck_load
/** * Load the truck with a {@link SyncFuture}. */ void load(final SyncFuture syncFuture) { this.sync = syncFuture; this.type = Type.SYNC; }
3.68
flink_SplitEnumerator_notifyCheckpointComplete
/** * We have an empty default implementation here because most source readers do not have to * implement the method. * * @see CheckpointListener#notifyCheckpointComplete(long) */ @Override default void notifyCheckpointComplete(long checkpointId) throws Exception {}
3.68
hibernate-validator_ConstraintDescriptorImpl_validateComposingConstraintTypes
/** * Asserts that this constraint and all its composing constraints share the * same constraint type (generic or cross-parameter). */ private void validateComposingConstraintTypes() { for ( ConstraintDescriptorImpl<?> composingConstraint : getComposingConstraintImpls() ) { if ( composingConstraint.constraintType != constraintType ) { throw LOG.getComposedAndComposingConstraintsHaveDifferentTypesException( annotationDescriptor.getType(), composingConstraint.annotationDescriptor.getType(), constraintType, composingConstraint.constraintType ); } } }
3.68
framework_AbstractConnector_doInit
/** * Called once by the framework to initialize the connector. * <p> * Note that the shared state is not yet available when this method is * called. * <p> * Connector classes should override {@link #init()} instead of this method. */ @Override public final void doInit(String connectorId, ApplicationConnection connection) { Profiler.enter("AbstractConnector.doInit"); this.connection = connection; id = connectorId; // Doing this here because we want to run it after connection and id has // been set but before init() is called to enable e.g. // JavaScriptConnector to use connection when determining the tag name if (this instanceof ComponentConnector) { setConnectorId(((ComponentConnector) this).getWidget().getElement(), connectorId); } addStateChangeHandler(this); if (Profiler.isEnabled()) { Profiler.enter( "AbstractConnector.init " + getClass().getSimpleName()); } init(); if (Profiler.isEnabled()) { Profiler.leave( "AbstractConnector.init " + getClass().getSimpleName()); } Profiler.leave("AbstractConnector.doInit"); }
3.68
hmily_InsertStatementAssembler_assembleHmilyInsertStatement
/** * Assemble Hmily insert statement. * * @param insertStatement insert statement * @param hmilyInsertStatement hmily insert statement * @return hmily insert statement */ public static HmilyInsertStatement assembleHmilyInsertStatement(final InsertStatement insertStatement, final HmilyInsertStatement hmilyInsertStatement) { HmilySimpleTableSegment hmilySimpleTableSegment = CommonAssembler.assembleHmilySimpleTableSegment(insertStatement.getTable()); HmilyInsertColumnsSegment hmilyInsertColumnsSegment = null; if (insertStatement.getInsertColumns().isPresent()) { hmilyInsertColumnsSegment = assembleHmilyInsertColumnsSegment(insertStatement.getInsertColumns().get()); } Collection<HmilyInsertValuesSegment> hmilyInsertValuesSegments = assembleHmilyInsertValuesSegments(insertStatement.getValues()); hmilyInsertStatement.setTable(hmilySimpleTableSegment); hmilyInsertStatement.setInsertColumns(hmilyInsertColumnsSegment); for (HmilyInsertValuesSegment each : hmilyInsertValuesSegments) { hmilyInsertStatement.getValues().add(each); } return hmilyInsertStatement; }
3.68
flink_Plan_setDefaultParallelism
/** * Sets the default parallelism for this plan. That degree is always used when an operator is * not explicitly given a parallelism. * * @param defaultParallelism The default parallelism for the plan. */ public void setDefaultParallelism(int defaultParallelism) { checkArgument( defaultParallelism >= 1 || defaultParallelism == ExecutionConfig.PARALLELISM_DEFAULT, "The default parallelism must be positive, or ExecutionConfig.PARALLELISM_DEFAULT if the system should use the globally configured default."); this.defaultParallelism = defaultParallelism; }
3.68
hudi_BaseHoodieWriteClient_releaseResources
/** * Called after each write, to release any resources used. */ protected void releaseResources(String instantTime) { // do nothing here }
3.68
hbase_OrderedBytes_encodeBlobCopy
/** * Encode a Blob value as a byte-for-byte copy. BlobCopy encoding in DESCENDING order is NULL * terminated so as to preserve proper sorting of {@code []} and so it does not support * {@code 0x00} in the value. * @return the number of bytes written. * @throws IllegalArgumentException when {@code ord} is DESCENDING and {@code val} contains a * {@code 0x00} byte. * @see #encodeBlobCopy(PositionedByteRange, byte[], int, int, Order) */ public static int encodeBlobCopy(PositionedByteRange dst, byte[] val, Order ord) { return encodeBlobCopy(dst, val, 0, null != val ? val.length : 0, ord); }
3.68
flink_TernaryBoolean_resolveUndefined
/** * Gets the boolean value corresponding to this value. If this is the 'UNDEFINED' value, the * method returns the given valueForUndefined. * * @param valueForUndefined The value to be returned in case this ternary value is 'undefined'. */ public TernaryBoolean resolveUndefined(boolean valueForUndefined) { return this != UNDEFINED ? this : fromBoolean(valueForUndefined); }
3.68
flink_PojoSerializerSnapshot_newPojoSerializerIsCompatibleAfterMigration
/** Checks if the new {@link PojoSerializer} is compatible after migration. */ private static <T> boolean newPojoSerializerIsCompatibleAfterMigration( PojoSerializer<T> newPojoSerializer, IntermediateCompatibilityResult<T> fieldSerializerCompatibility, IntermediateCompatibilityResult<T> preExistingRegistrationsCompatibility, LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots) { return newPojoHasNewOrRemovedFields(fieldSerializerSnapshots, newPojoSerializer) || fieldSerializerCompatibility.isCompatibleAfterMigration() || preExistingRegistrationsCompatibility.isCompatibleAfterMigration(); }
3.68
flink_FileInputSplit_getPath
/** * Returns the path of the file containing this split's data. * * @return the path of the file containing this split's data. */ public Path getPath() { return file; }
3.68
hadoop_StringInterner_weakIntern
/** * Interns and returns a reference to the representative instance * for any of a collection of string instances that are equal to each other. * Retains weak reference to the instance, * and so does not prevent it from being garbage-collected. * * @param sample string instance to be interned * @return weak reference to interned string instance */ public static String weakIntern(String sample) { if (sample == null) { return null; } return sample.intern(); }
3.68
flink_ThriftObjectConversions_toTRowSet
/** * Similar to {@link SerDeUtils#toThriftPayload(Object, ObjectInspector, int)} that converts the * returned Rows to JSON string. The only difference is the current implementation also keep the * type for primitive type. */ public static TRowSet toTRowSet( TProtocolVersion version, ResolvedSchema schema, List<RowData> data) { for (RowData row : data) { if (row.getRowKind() != RowKind.INSERT) { throw new UnsupportedOperationException( "HiveServer2 Endpoint only supports to serialize the INSERT-ONLY RowData."); } } List<RowData.FieldGetter> fieldGetters = new ArrayList<>(); for (int i = 0; i < schema.getColumnCount(); i++) { fieldGetters.add( RowData.createFieldGetter( schema.getColumnDataTypes().get(i).getLogicalType(), i)); } List<LogicalType> fieldTypes = schema.getColumnDataTypes().stream() .map(DataType::getLogicalType) .collect(Collectors.toList()); if (version.getValue() < HIVE_CLI_SERVICE_PROTOCOL_V6.getVersion().getValue()) { return toRowBasedSet(fieldTypes, fieldGetters, data); } else { return toColumnBasedSet(fieldTypes, fieldGetters, data); } }
3.68
flink_OrInputTypeStrategy_commonMin
/** Returns the common minimum argument count or null if undefined. */ private static @Nullable Integer commonMin(List<ArgumentCount> counts) { // min=5, min=3, min=0 -> min=0 // min=5, min=3, min=0, min=null -> min=null int commonMin = Integer.MAX_VALUE; for (ArgumentCount count : counts) { final Optional<Integer> min = count.getMinCount(); if (!min.isPresent()) { return null; } commonMin = Math.min(commonMin, min.get()); } if (commonMin == Integer.MAX_VALUE) { return null; } return commonMin; }
3.68
hadoop_OBSInputStream_incrementBytesRead
/** * Increment the bytes read counter if there is a stats instance and the * number of bytes read is more than zero. * * @param bytesRead number of bytes read */ private void incrementBytesRead(final long bytesRead) { if (statistics != null && bytesRead > 0) { statistics.incrementBytesRead(bytesRead); } }
3.68
flink_CombinedWatermarkStatus_getWatermark
/** * Returns the current watermark timestamp. This will throw {@link IllegalStateException} if * the output is currently idle. */ private long getWatermark() { checkState(!idle, "Output is idle."); return watermark; }
3.68
hibernate-validator_MessagerAdapter_getDelegate
/** * Returns the messager used by this adapter. * * @return The underlying messager. */ public Messager getDelegate() { return messager; }
3.68
flink_RocksDBFullRestoreOperation_restore
/** Restores all key-groups data that is referenced by the passed state handles. */ @Override public RocksDBRestoreResult restore() throws IOException, StateMigrationException, RocksDBException { rocksHandle.openDB(); try (ThrowingIterator<SavepointRestoreResult> restore = savepointRestoreOperation.restore()) { while (restore.hasNext()) { applyRestoreResult(restore.next()); } } return new RocksDBRestoreResult( this.rocksHandle.getDb(), this.rocksHandle.getDefaultColumnFamilyHandle(), this.rocksHandle.getNativeMetricMonitor(), -1, null, null); }
3.68
hadoop_AppPlacementAllocator_getPreferredNodeIterator
/** * Get iterator of preferred node depends on requirement and/or availability. * @param candidateNodeSet input CandidateNodeSet * @return iterator of preferred node */ public Iterator<N> getPreferredNodeIterator( CandidateNodeSet<N> candidateNodeSet) { // Now only handle the case that single node in the candidateNodeSet // TODO, Add support to multi-hosts inside candidateNodeSet which is passed // in. N singleNode = CandidateNodeSetUtils.getSingleNode(candidateNodeSet); if (singleNode != null) { return IteratorUtils.singletonIterator(singleNode); } // singleNode will be null if Multi-node placement lookup is enabled, and // hence could consider sorting policies. return multiNodeSortingManager.getMultiNodeSortIterator( candidateNodeSet.getAllNodes().values(), candidateNodeSet.getPartition(), multiNodeSortPolicyName); }
3.68
framework_BasicEventProvider_addEvent
/* * (non-Javadoc) * * @see * com.vaadin.addon.calendar.event.CalendarEditableEventProvider#addEvent * (com.vaadin.addon.calendar.event.CalendarEvent) */ @Override public void addEvent(CalendarEvent event) { eventList.add(event); if (event instanceof BasicEvent) { ((BasicEvent) event).addEventChangeListener(this); } fireEventSetChange(); }
3.68
hudi_HoodieIndex_requiresTagging
/** * To indicate if an operation type requires location tagging before writing */ @PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING) public boolean requiresTagging(WriteOperationType operationType) { switch (operationType) { case DELETE: case DELETE_PREPPED: case UPSERT: return true; default: return false; } }
3.68
pulsar_LoadSimulationController_run
/** * Create a shell for the user to send commands to clients. */ public void run() throws Exception { BufferedReader inReader = new BufferedReader(new InputStreamReader(System.in)); while (true) { // Print the very simple prompt. System.out.println(); System.out.print("> "); read(inReader.readLine().split("\\s+")); } }
3.68
flink_FutureUtils_completeAll
/** * Creates a {@link ConjunctFuture} which is only completed after all given futures have * completed. Unlike {@link FutureUtils#waitForAll(Collection)}, the resulting future won't be * completed directly if one of the given futures is completed exceptionally. Instead, all * occurring exception will be collected and combined to a single exception. If at least on * exception occurs, then the resulting future will be completed exceptionally. * * @param futuresToComplete futures to complete * @return Future which is completed after all given futures have been completed. */ public static ConjunctFuture<Void> completeAll( Collection<? extends CompletableFuture<?>> futuresToComplete) { return new CompletionConjunctFuture(futuresToComplete); }
3.68
flink_KvStateLocationRegistry_getKvStateLocation
/** * Returns the {@link KvStateLocation} for the registered KvState instance or <code>null</code> * if no location information is available. * * @param registrationName Name under which the KvState instance is registered. * @return Location information or <code>null</code>. */ public KvStateLocation getKvStateLocation(String registrationName) { return lookupTable.get(registrationName); }
3.68
hadoop_ProbeStatus_getOriginator
/** * Get the probe that generated this result. May be null * @return a possibly null reference to a probe */ public Probe getOriginator() { return originator; }
3.68
flink_AllocatedSlot_releasePayload
/** * Triggers the release of the assigned payload. If the payload could be released, then it is * removed from the slot. * * @param cause of the release operation */ public void releasePayload(Throwable cause) { final Payload payload = payloadReference.get(); if (payload != null) { payload.release(cause); payloadReference.set(null); } }
3.68
pulsar_ManagedLedgerImpl_invalidateEntriesUpToSlowestReaderPosition
// slowest reader position is earliest mark delete position when cacheEvictionByMarkDeletedPosition=true // it is the earliest read position when cacheEvictionByMarkDeletedPosition=false private void invalidateEntriesUpToSlowestReaderPosition() { if (entryCache.getSize() <= 0) { return; } if (!activeCursors.isEmpty()) { PositionImpl evictionPos = activeCursors.getSlowestReaderPosition(); if (evictionPos != null) { entryCache.invalidateEntries(evictionPos); } } else { entryCache.clear(); } }
3.68
flink_PartitionedFileReader_readCurrentRegion
/** * Reads a buffer from the current region of the target {@link PartitionedFile} and moves the * read position forward. * * <p>Note: The caller is responsible for recycling the target buffer if any exception occurs. * * @param freeSegments The free {@link MemorySegment}s to read data to. * @param recycler The {@link BufferRecycler} which is responsible to recycle the target buffer. * @param consumer The target {@link Buffer} stores the data read from file channel. * @return Whether the file reader has remaining data to read. */ boolean readCurrentRegion( Queue<MemorySegment> freeSegments, BufferRecycler recycler, Consumer<Buffer> consumer) throws IOException { if (currentRegionRemainingBytes == 0) { return false; } checkArgument(!freeSegments.isEmpty(), "No buffer available for data reading."); dataFileChannel.position(nextOffsetToRead); BufferAndHeader partialBuffer = new BufferAndHeader(null, null); try { while (!freeSegments.isEmpty() && currentRegionRemainingBytes > 0) { MemorySegment segment = freeSegments.poll(); int numBytes = (int) Math.min(segment.size(), currentRegionRemainingBytes); ByteBuffer byteBuffer = segment.wrap(0, numBytes); try { BufferReaderWriterUtil.readByteBufferFully(dataFileChannel, byteBuffer); byteBuffer.flip(); currentRegionRemainingBytes -= byteBuffer.remaining(); nextOffsetToRead += byteBuffer.remaining(); } catch (Throwable throwable) { freeSegments.add(segment); throw throwable; } NetworkBuffer buffer = new NetworkBuffer(segment, recycler); buffer.setSize(byteBuffer.remaining()); try { partialBuffer = processBuffer(byteBuffer, buffer, partialBuffer, consumer); } catch (Throwable throwable) { partialBuffer = new BufferAndHeader(null, null); throw throwable; } finally { buffer.recycleBuffer(); } } } finally { if (headerBuf.position() > 0) { nextOffsetToRead -= headerBuf.position(); currentRegionRemainingBytes += headerBuf.position(); headerBuf.clear(); } if (partialBuffer.header != null) { nextOffsetToRead -= HEADER_LENGTH; currentRegionRemainingBytes += HEADER_LENGTH; } if (partialBuffer.buffer != null) { nextOffsetToRead -= partialBuffer.buffer.readableBytes(); currentRegionRemainingBytes += partialBuffer.buffer.readableBytes(); partialBuffer.buffer.recycleBuffer(); } } return hasRemaining(); }
3.68
dubbo_CollectionUtils_isNotEmptyMap
/** * Return {@code true} if the supplied Map is {@code not null} or not empty. * Otherwise, return {@code false}. * * @param map the Map to check * @return whether the given Map is not empty */ public static boolean isNotEmptyMap(Map map) { return !isEmptyMap(map); }
3.68
hbase_DisableTableProcedure_postDisable
/** * Action after disabling table. * @param env MasterProcedureEnv * @param state the procedure state */ protected void postDisable(final MasterProcedureEnv env, final DisableTableState state) throws IOException, InterruptedException { runCoprocessorAction(env, state); }
3.68
morf_SchemaUtils_primaryKeysForTable
/** * List the primary key columns for a given table. * * @param table The table * @return The primary key columns */ public static List<Column> primaryKeysForTable(Table table) { return table.columns().stream().filter(Column::isPrimaryKey).collect(Collectors.toList()); }
3.68
flink_BaseTwoInputStreamOperatorWithStateRetention_registerProcessingCleanupTimer
/** * If the user has specified a {@code minRetentionTime} and {@code maxRetentionTime}, this * method registers a cleanup timer for {@code currentProcessingTime + minRetentionTime}. * * <p>When this timer fires, the {@link #cleanupState(long)} method is called. */ protected void registerProcessingCleanupTimer() throws IOException { if (stateCleaningEnabled) { long currentProcessingTime = timerService.currentProcessingTime(); Optional<Long> currentCleanupTime = Optional.ofNullable(latestRegisteredCleanupTimer.value()); if (!currentCleanupTime.isPresent() || (currentProcessingTime + minRetentionTime) > currentCleanupTime.get()) { updateCleanupTimer(currentProcessingTime, currentCleanupTime); } } }
3.68
hudi_WriteProfile_getSmallFiles
/** * Returns a list of small files in the given partition path. * * <p>Note: This method should be thread safe. */ public synchronized List<SmallFile> getSmallFiles(String partitionPath) { // lookup the cache first if (smallFilesMap.containsKey(partitionPath)) { return smallFilesMap.get(partitionPath); } List<SmallFile> smallFiles = new ArrayList<>(); if (config.getParquetSmallFileLimit() <= 0) { this.smallFilesMap.put(partitionPath, smallFiles); return smallFiles; } smallFiles = smallFilesProfile(partitionPath); this.smallFilesMap.put(partitionPath, smallFiles); return smallFiles; }
3.68
hbase_RegionServerTracker_processAsActiveMaster
// execute the operations which are only needed for active masters, such as expire old servers, // add new servers, etc. private void processAsActiveMaster(Set<ServerName> newServers) { Set<ServerName> oldServers = regionServers; ServerManager serverManager = server.getServerManager(); // expire dead servers for (ServerName crashedServer : Sets.difference(oldServers, newServers)) { LOG.info("RegionServer ephemeral node deleted, processing expiration [{}]", crashedServer); serverManager.expireServer(crashedServer); } // check whether there are new servers, log them boolean newServerAdded = false; for (ServerName sn : newServers) { if (!oldServers.contains(sn)) { newServerAdded = true; LOG.info("RegionServer ephemeral node created, adding [" + sn + "]"); } } if (newServerAdded && server.isInitialized()) { // Only call the check to move servers if a RegionServer was added to the cluster; in this // case it could be a server with a new version so it makes sense to run the check. server.checkIfShouldMoveSystemRegionAsync(); } }
3.68
dubbo_AccessLogData_newLogData
/** * Get new instance of log data. * * @return instance of AccessLogData */ public static AccessLogData newLogData() { return new AccessLogData(); }
3.68
morf_DatabaseMetaDataProvider_readTableName
/** * Retrieves table name from a result set. * * @param tableResultSet Result set to be read. * @return Name of the table. * @throws SQLException Upon errors. */ protected RealName readTableName(ResultSet tableResultSet) throws SQLException { String tableName = tableResultSet.getString(TABLE_NAME); return createRealName(tableName, tableName); }
3.68
flink_OperatingSystemRestriction_forbid
/** * Forbids the execution on the given set of operating systems. * * @param reason reason for the restriction * @param forbiddenSystems forbidden operating systems * @throws AssumptionViolatedException if this method is called on a forbidden operating system */ public static void forbid(final String reason, final OperatingSystem... forbiddenSystems) throws AssumptionViolatedException { final OperatingSystem os = OperatingSystem.getCurrentOperatingSystem(); for (final OperatingSystem forbiddenSystem : forbiddenSystems) { Assume.assumeTrue(reason, os != forbiddenSystem); } }
3.68
framework_TreeTable_removeListener
/** * @deprecated As of 7.0, replaced by * {@link #removeCollapseListener(CollapseListener)} */ @Deprecated public void removeListener(CollapseListener listener) { removeCollapseListener(listener); }
3.68
framework_ActionsOnInvisibleComponents_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return 12743; }
3.68
hadoop_RBFMetrics_setStateStoreVersions
/** * Populate the map with the State Store versions. * * @param map Map with the information. * @param version State Store versions. */ private static void setStateStoreVersions( Map<String, Object> map, StateStoreVersion version) { long membershipVersion = version.getMembershipVersion(); String lastMembershipUpdate = getDateString(membershipVersion); map.put("lastMembershipUpdate", lastMembershipUpdate); long mountTableVersion = version.getMountTableVersion(); String lastMountTableDate = getDateString(mountTableVersion); map.put("lastMountTableUpdate", lastMountTableDate); }
3.68
hudi_HoodieCombineHiveInputFormat_toString
/** * Prints this object as a string. */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(inputSplitShim.toString()); sb.append("InputFormatClass: " + inputFormatClassName); sb.append("\n"); return sb.toString(); }
3.68
framework_LayoutManager_reportHeightAssignedToRelative
/** * Registers the height reserved for a relatively sized component. This can * be used as an optimization by ManagedLayouts; by informing the * LayoutManager about what size a component will have, the layout * propagation can continue directly without first measuring the potentially * resized elements. * * @param component * the relatively sized component for which the size is reported * @param assignedHeight * the inner height of the relatively sized component's parent * element in pixels */ public void reportHeightAssignedToRelative(ComponentConnector component, int assignedHeight) { assert component.isRelativeHeight(); float percentSize = parsePercent( component.getState().height == null ? "" : component.getState().height); int effectiveHeight = Math.round(assignedHeight * (percentSize / 100)); reportOuterHeight(component, effectiveHeight); }
3.68
hbase_NettyFutureUtils_safeWrite
/** * Call write on the channel and eat the returned future by logging the error when the future is * completed with error. */ public static void safeWrite(ChannelOutboundInvoker channel, Object msg) { consume(channel.write(msg)); }
3.68
hbase_HRegion_checkRow
/** Make sure this is a valid row for the HRegion */ void checkRow(byte[] row, String op) throws IOException { if (!rowIsInRange(getRegionInfo(), row)) { throw new WrongRegionException("Requested row out of range for " + op + " on HRegion " + this + ", startKey='" + Bytes.toStringBinary(getRegionInfo().getStartKey()) + "', getEndKey()='" + Bytes.toStringBinary(getRegionInfo().getEndKey()) + "', row='" + Bytes.toStringBinary(row) + "'"); } }
3.68
morf_XmlDataSetProducer_getAutoNumberStart
/** * @see org.alfasoftware.morf.metadata.Column#getAutoNumberStart() */ @Override public int getAutoNumberStart() { return autonumberStart == null ? 0 : autonumberStart; }
3.68
morf_AbstractSqlDialectTest_testSelectWhereIsNull
/** * Tests a select with a null check clause. */ @Test public void testSelectWhereIsNull() { SelectStatement stmt = new SelectStatement() .from(new TableReference(TEST_TABLE)) .where(isNull(new FieldReference(INT_FIELD))); String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE (intField IS NULL)"; assertEquals("Select with null check clause", expectedSql, testDialect.convertStatementToSQL(stmt)); }
3.68
flink_BufferDecompressor_decompressToOriginalBuffer
/** * The difference between this method and {@link #decompressToIntermediateBuffer(Buffer)} is * that this method copies the decompressed data to the input {@link Buffer} starting from * offset 0. * * <p>The caller must guarantee that the input {@link Buffer} is writable and there's enough * space left. */ @VisibleForTesting public Buffer decompressToOriginalBuffer(Buffer buffer) { int decompressedLen = decompress(buffer); // copy the decompressed data back int memorySegmentOffset = buffer.getMemorySegmentOffset(); MemorySegment segment = buffer.getMemorySegment(); segment.put(memorySegmentOffset, internalBufferArray, 0, decompressedLen); return new ReadOnlySlicedNetworkBuffer( buffer.asByteBuf(), 0, decompressedLen, memorySegmentOffset, false); }
3.68
hbase_HBackupFileSystem_getBackupTmpDirPathForBackupId
/** * Get backup tmp directory for backupId * @param backupRoot backup root * @param backupId backup id * @return backup tmp directory path */ public static Path getBackupTmpDirPathForBackupId(String backupRoot, String backupId) { return new Path(getBackupTmpDirPath(backupRoot), backupId); }
3.68
hadoop_HdfsFileStatus_storagePolicy
/** * Set the storage policy for this entity * (default = {@link HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED}). * @param storagePolicy Storage policy * @return This Builder instance */ public Builder storagePolicy(byte storagePolicy) { this.storagePolicy = storagePolicy; return this; }
3.68
zxing_DetectionResultRowIndicatorColumn_adjustIncompleteIndicatorColumnRowNumbers
// TODO maybe we should add missing codewords to store the correct row number to make // finding row numbers for other columns easier // use row height count to make detection of invalid row numbers more reliable private void adjustIncompleteIndicatorColumnRowNumbers(BarcodeMetadata barcodeMetadata) { BoundingBox boundingBox = getBoundingBox(); ResultPoint top = isLeft ? boundingBox.getTopLeft() : boundingBox.getTopRight(); ResultPoint bottom = isLeft ? boundingBox.getBottomLeft() : boundingBox.getBottomRight(); int firstRow = imageRowToCodewordIndex((int) top.getY()); int lastRow = imageRowToCodewordIndex((int) bottom.getY()); //float averageRowHeight = (lastRow - firstRow) / (float) barcodeMetadata.getRowCount(); Codeword[] codewords = getCodewords(); int barcodeRow = -1; int maxRowHeight = 1; int currentRowHeight = 0; for (int codewordsRow = firstRow; codewordsRow < lastRow; codewordsRow++) { if (codewords[codewordsRow] == null) { continue; } Codeword codeword = codewords[codewordsRow]; codeword.setRowNumberAsRowIndicatorColumn(); int rowDifference = codeword.getRowNumber() - barcodeRow; // TODO improve handling with case where first row indicator doesn't start with 0 if (rowDifference == 0) { currentRowHeight++; } else if (rowDifference == 1) { maxRowHeight = Math.max(maxRowHeight, currentRowHeight); currentRowHeight = 1; barcodeRow = codeword.getRowNumber(); } else if (codeword.getRowNumber() >= barcodeMetadata.getRowCount()) { codewords[codewordsRow] = null; } else { barcodeRow = codeword.getRowNumber(); currentRowHeight = 1; } } //return (int) (averageRowHeight + 0.5); }
3.68
hbase_HDFSBlocksDistribution_getWeight
/** * return the weight for a specific host, that will be the total bytes of all blocks on the host * @param host the host name * @return the weight of the given host */ public long getWeight(String host) { long weight = 0; if (host != null) { HostAndWeight hostAndWeight = this.hostAndWeights.get(host); if (hostAndWeight != null) { weight = hostAndWeight.getWeight(); } } return weight; }
3.68
framework_HasItems_setItems
/** * Sets the data items of this listing provided as a stream. * <p> * This is just a shorthand for {@link #setItems(Collection)}, that * <b>collects objects in the stream to a list</b>. Thus, using this method, * instead of its array and Collection variations, doesn't save any memory. * If you have a large data set to bind, using a lazy data provider is * recommended. See {@link BackEndDataProvider} for more info. * <p> * The provided items are wrapped into a {@link ListDataProvider} and this * instance is used as a data provider for the * {@link #setDataProvider(DataProvider)} method. It means that the items * collection can be accessed later on via * {@link ListDataProvider#getItems()}: * * <pre> * <code> * HasDataProvider&lt;String&gt; listing = new CheckBoxGroup<&gt;(); * listing.setItems(Stream.of("a","b")); * ... * * Collection<String> collection = ((ListDataProvider&lt;String&gt;)listing.getDataProvider()).getItems(); * </code> * </pre> * <p> * * @see #setItems(Collection) * * @param streamOfItems * the stream of data items to display, not {@code null} */ public default void setItems(Stream<T> streamOfItems) { setItems(streamOfItems.collect(Collectors.toList())); }
3.68
morf_AutoNumberRemovalHelper_removeAutonumber
/** * Removes the row, referring to a {@linkplain Table}, from the AutoNumber table. * * @param dataEditor Executor of statements * @param table The table for which autonumbering will be rmeoved */ public static void removeAutonumber(DataEditor dataEditor, Table table) { dataEditor.executeStatement(delete(tableRef("AutoNumber")).where(eq(field("name"), literal(table.getName())))); }
3.68
flink_RpcEndpoint_validateScheduledExecutorClosed
/** * Validate whether the scheduled executor is closed. * * @return true if the scheduled executor is shutdown, otherwise false */ final boolean validateScheduledExecutorClosed() { return mainScheduledExecutor.isShutdown(); }
3.68
morf_ConnectionResourcesBean_setStatementPoolingMaxStatements
/** * @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setStatementPoolingMaxStatements(int) */ @Override public void setStatementPoolingMaxStatements(int statementPoolingMaxStatements) { this.statementPoolingMaxStatements = statementPoolingMaxStatements; }
3.68
framework_TouchScrollDelegate_removeElement
/** * Unregisters the given element as scrollable. Should be called when a * previously-registered element is removed from the DOM to prevent * memory leaks. */ public void removeElement(Element scrollable) { scrollable.removeClassName(SCROLLABLE_CLASSNAME); if (requiresDelegate()) { delegate.scrollableElements.remove(scrollable); } }
3.68
querydsl_AbstractMongodbQuery_fetch
/** * Fetch with the specific fields * * @param paths fields to return * @return results */ public List<K> fetch(Path<?>... paths) { queryMixin.setProjection(paths); return fetch(); }
3.68
flink_ExceptionUtils_stringifyException
/** * Makes a string representation of the exception's stack trace, or "(null)", if the exception * is null. * * <p>This method makes a best effort and never fails. * * @param e The exception to stringify. * @return A string with exception name and call stack. */ public static String stringifyException(final Throwable e) { if (e == null) { return STRINGIFIED_NULL_EXCEPTION; } try { StringWriter stm = new StringWriter(); PrintWriter wrt = new PrintWriter(stm); e.printStackTrace(wrt); wrt.close(); return stm.toString(); } catch (Throwable t) { return e.getClass().getName() + " (error while printing stack trace)"; } }
3.68
AreaShop_GeneralRegion_getVolume
/** * Get the volume of the region (number of blocks inside it). * @return Number of blocks in the region */ public long getVolume() { // Cache volume, important for polygon regions if(volume < 0) { volume = calculateVolume(); } return volume; }
3.68
framework_DeclarativeTestUI_getComponent
/** * Get access to the declaratively created component. This method typecasts * the component to the receiving type; if there's a mismatch between what * you expect and what's written in the design, this will fail with a * ClassCastException. * * @return a Vaadin component */ @SuppressWarnings("unchecked") public <T extends Component> T getComponent() { try { return (T) component; } catch (ClassCastException ex) { getLogger().log(Level.SEVERE, "Component code/design type mismatch", ex); } return null; }
3.68
hbase_ParseFilter_convertByteArrayToInt
/** * Converts an int expressed in a byte array to an actual int * <p> * This doesn't use Bytes.toInt because that assumes that there will be {@link Bytes#SIZEOF_INT} * bytes available. * <p> * @param numberAsByteArray the int value expressed as a byte array * @return the int value */ public static int convertByteArrayToInt(byte[] numberAsByteArray) { long tempResult = ParseFilter.convertByteArrayToLong(numberAsByteArray); if (tempResult > Integer.MAX_VALUE) { throw new IllegalArgumentException("Integer Argument too large"); } else if (tempResult < Integer.MIN_VALUE) { throw new IllegalArgumentException("Integer Argument too small"); } int result = (int) tempResult; return result; }
3.68
framework_DragSourceExtensionConnector_getDraggableElement
/** * Finds the draggable element within the widget. By default, returns the * topmost element. * <p> * Override this method to make some other than the root element draggable * instead. * <p> * In case you need to make more than whan element draggable, override * {@link #extend(ServerConnector)} instead. * * @return the draggable element in the parent widget. */ protected Element getDraggableElement() { return dragSourceWidget.getElement(); }
3.68
framework_Color_setRed
/** * Sets the red value of the color. Value must be within the range [0, 255]. * * @param red * new red value */ public void setRed(int red) { if (withinRange(red)) { this.red = red; } else { throw new IllegalArgumentException(OUTOFRANGE + red); } }
3.68
flink_StreamProjection_projectTuple5
/** * Projects a {@link Tuple} {@link DataStream} to the previously selected fields. * * @return The projected DataStream. * @see Tuple * @see DataStream */ public <T0, T1, T2, T3, T4> SingleOutputStreamOperator<Tuple5<T0, T1, T2, T3, T4>> projectTuple5() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>> tType = new TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>>(fTypes); return dataStream.transform( "Projection", tType, new StreamProject<IN, Tuple5<T0, T1, T2, T3, T4>>( fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
3.68
flink_EnvironmentInformation_getHadoopUser
/** * Gets the name of the user that is running the JVM. * * @return The name of the user that is running the JVM. */ public static String getHadoopUser() { try { Class<?> ugiClass = Class.forName( "org.apache.hadoop.security.UserGroupInformation", false, EnvironmentInformation.class.getClassLoader()); Method currentUserMethod = ugiClass.getMethod("getCurrentUser"); Method shortUserNameMethod = ugiClass.getMethod("getShortUserName"); Object ugi = currentUserMethod.invoke(null); return (String) shortUserNameMethod.invoke(ugi); } catch (ClassNotFoundException e) { return "<no hadoop dependency found>"; } catch (LinkageError e) { // hadoop classes are not in the classpath LOG.debug( "Cannot determine user/group information using Hadoop utils. " + "Hadoop classes not loaded or compatible", e); } catch (Throwable t) { // some other error occurred that we should log and make known LOG.warn("Error while accessing user/group information via Hadoop utils.", t); } return UNKNOWN; }
3.68
hadoop_AbstractSchedulerPlanFollower_getReservationQueueName
// Schedulers have different ways of naming queues. See YARN-2773 protected String getReservationQueueName(String planQueueName, String reservationId) { return reservationId; }
3.68
hadoop_JsonObjectMapperParser_getNext
/** * Get the next object from the trace. * * @return The next instance of the object. Or null if we reach the end of * stream. * @throws IOException */ public T getNext() throws IOException { try { return mapper.readValue(jsonParser, clazz); } catch (JsonMappingException e) { return null; } }
3.68
zxing_ModulusPoly_isZero
/** * @return true iff this polynomial is the monomial "0" */ boolean isZero() { return coefficients[0] == 0; }
3.68
flink_StatsSummarySnapshot_getMinimum
/** * Returns the minimum seen value. * * @return The current minimum value. */ public long getMinimum() { return min; }
3.68
rocketmq-connect_ColumnDefinition_mutability
/** * Indicates whether the designated column is mutable. * * @return the mutability; never null */ public Mutability mutability() { return mutability; }
3.68
flink_RichSqlInsertKeyword_symbol
/** * Creates a parse-tree node representing an occurrence of this keyword at a particular position * in the parsed text. */ public SqlLiteral symbol(SqlParserPos pos) { return SqlLiteral.createSymbol(this, pos); }
3.68
hudi_HiveSchemaUtils_createHiveColumns
/** * Create Hive columns from Flink table schema. */ private static List<FieldSchema> createHiveColumns(TableSchema schema) { final DataType dataType = schema.toPersistedRowDataType(); final RowType rowType = (RowType) dataType.getLogicalType(); final String[] fieldNames = rowType.getFieldNames().toArray(new String[0]); final DataType[] fieldTypes = dataType.getChildren().toArray(new DataType[0]); List<FieldSchema> columns = new ArrayList<>(fieldNames.length); for (int i = 0; i < fieldNames.length; i++) { columns.add( new FieldSchema( fieldNames[i], toHiveTypeInfo(fieldTypes[i]).getTypeName(), null)); } return columns; }
3.68
hbase_PermissionStorage_getUserPermissions
/** * Returns the currently granted permissions for a given table/namespace with associated * permissions based on the specified column family, column qualifier and user name. * @param conf the configuration * @param entryName Table name or the namespace * @param cf Column family * @param cq Column qualifier * @param user User name to be filtered from permission as requested * @param hasFilterUser true if filter user is provided, otherwise false. * @return List of UserPermissions * @throws IOException on failure */ public static List<UserPermission> getUserPermissions(Configuration conf, byte[] entryName, byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException { ListMultimap<String, UserPermission> allPerms = getPermissions(conf, entryName, null, cf, cq, user, hasFilterUser); List<UserPermission> perms = new ArrayList<>(); for (Map.Entry<String, UserPermission> entry : allPerms.entries()) { perms.add(entry.getValue()); } return perms; }
3.68
framework_VaadinFinderLocatorStrategy_getBestSelector
/** * Search different queries for the best one. Use the fact that the lowest * possible index is with the last selector. Last selector is the full * search path containing the complete Component hierarchy. * * @param selectors * List of selectors * @param target * Target element * @param subPart * sub part selector string for actual target * @return Best selector string formatted with a post filter */ private String getBestSelector(List<String> selectors, Element target, String subPart) { // The last selector gives us smallest list index for target element. String bestSelector = selectors.get(selectors.size() - 1); int min = getElementsByPath(bestSelector).indexOf(target); if (selectors.size() > 1 && min == getElementsByPath(selectors.get(0)).indexOf(target)) { // The first selector has same index as last. It's much shorter. bestSelector = selectors.get(0); } else if (selectors.size() > 2) { // See if we get minimum from second last. If not then we already // have the best one.. Second last one contains almost full // component hierarchy. if (getElementsByPath(selectors.get(selectors.size() - 2)) .indexOf(target) == min) { for (int i = 1; i < selectors.size() - 2; ++i) { // Loop through the remaining selectors and look for one // with the same index if (getElementsByPath(selectors.get(i)) .indexOf(target) == min) { bestSelector = selectors.get(i); break; } } } } return "(" + bestSelector + (subPart != null ? "#" + subPart : "") + ")[" + min + "]"; }
3.68
hbase_HRegion_execService
/** * Executes a single protocol buffer coprocessor endpoint {@link Service} method using the * registered protocol handlers. {@link Service} implementations must be registered via the * {@link #registerService(Service)} method before they are available. * @param controller an {@code RpcContoller} implementation to pass to the invoked service * @param call a {@code CoprocessorServiceCall} instance identifying the service, method, * and parameters for the method invocation * @return a protocol buffer {@code Message} instance containing the method's result * @throws IOException if no registered service handler is found or an error occurs during the * invocation * @see #registerService(Service) */ public Message execService(RpcController controller, CoprocessorServiceCall call) throws IOException { String serviceName = call.getServiceName(); Service service = coprocessorServiceHandlers.get(serviceName); if (service == null) { throw new UnknownProtocolException(null, "No registered coprocessor service found for " + serviceName + " in region " + Bytes.toStringBinary(getRegionInfo().getRegionName())); } ServiceDescriptor serviceDesc = service.getDescriptorForType(); cpRequestsCount.increment(); String methodName = call.getMethodName(); MethodDescriptor methodDesc = CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc); Message.Builder builder = service.getRequestPrototype(methodDesc).newBuilderForType(); ProtobufUtil.mergeFrom(builder, call.getRequest().toByteArray()); Message request = CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest()); if (coprocessorHost != null) { request = coprocessorHost.preEndpointInvocation(service, methodName, request); } final Message.Builder responseBuilder = service.getResponsePrototype(methodDesc).newBuilderForType(); service.callMethod(methodDesc, controller, request, new RpcCallback<Message>() { @Override public void run(Message message) { if (message != null) { responseBuilder.mergeFrom(message); } } }); if (coprocessorHost != null) { coprocessorHost.postEndpointInvocation(service, methodName, request, responseBuilder); } IOException exception = org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils.getControllerException(controller); if (exception != null) { throw exception; } return responseBuilder.build(); }
3.68
hudi_ConsistentBucketIdentifier_initialize
/** * Initialize necessary data structure to facilitate bucket identifying. * Specifically, we construct: * - An in-memory tree (ring) to speed up range mapping searching. * - A hash table (fileIdToBucket) to allow lookup of bucket using fileId. * <p> * Children nodes are also considered, and will override the original nodes, * which is used during bucket resizing (i.e., children nodes take the place * of the original nodes) */ private void initialize() { for (ConsistentHashingNode p : metadata.getNodes()) { ring.put(p.getValue(), p); // One bucket has only one file group, so append 0 directly fileIdToBucket.put(FSUtils.createNewFileId(p.getFileIdPrefix(), 0), p); } // Handle children nodes, i.e., replace or delete the original nodes ConsistentHashingNode tmp; for (ConsistentHashingNode p : metadata.getChildrenNodes()) { switch (p.getTag()) { case REPLACE: tmp = ring.put(p.getValue(), p); if (tmp != null) { fileIdToBucket.remove(FSUtils.createNewFileId(tmp.getFileIdPrefix(), 0)); } fileIdToBucket.put(FSUtils.createNewFileId(p.getFileIdPrefix(), 0), p); break; case DELETE: tmp = ring.remove(p.getValue()); fileIdToBucket.remove(FSUtils.createNewFileId(tmp.getFileIdPrefix(), 0)); break; default: throw new HoodieClusteringException("Children node is tagged as NORMAL or unknown tag: " + p); } } }
3.68
flink_IOManager_createChannel
/** * Creates a new {@link ID} in one of the temp directories. Multiple invocations of this method * spread the channels evenly across the different directories. * * @return A channel to a temporary directory. */ public ID createChannel() { return fileChannelManager.createChannel(); }
3.68
flink_DataSet_fillInType
/** * Tries to fill in the type information. Type information can be filled in later when the * program uses a type hint. This method checks whether the type information has ever been * accessed before and does not allow modifications if the type was accessed already. This * ensures consistency by making sure different parts of the operation do not assume different * type information. * * @param typeInfo The type information to fill in. * @throws IllegalStateException Thrown, if the type information has been accessed before. */ protected void fillInType(TypeInformation<T> typeInfo) { if (typeUsed) { throw new IllegalStateException( "TypeInformation cannot be filled in for the type after it has been used. " + "Please make sure that the type info hints are the first call after the transformation function, " + "before any access to types or semantic properties, etc."); } this.type = typeInfo; }
3.68
hadoop_FSDataOutputStreamBuilder_checksumOpt
/** * Set checksum opt. * * @param chksumOpt check sum opt. * @return Generics Type B. */ public B checksumOpt(@Nonnull final ChecksumOpt chksumOpt) { checkNotNull(chksumOpt); checksumOpt = chksumOpt; return getThisBuilder(); }
3.68
flink_CliFrontend_handleError
/** * Displays an exception message. * * @param t The exception to display. * @return The return code for the process. */ private static int handleError(Throwable t) { LOG.error("Error while running the command.", t); System.err.println(); System.err.println("------------------------------------------------------------"); System.err.println(" The program finished with the following exception:"); System.err.println(); if (t.getCause() instanceof InvalidProgramException) { System.err.println(t.getCause().getMessage()); StackTraceElement[] trace = t.getCause().getStackTrace(); for (StackTraceElement ele : trace) { System.err.println("\t" + ele); if (ele.getMethodName().equals("main")) { break; } } } else { t.printStackTrace(); } return 1; }
3.68
hbase_ScannerContext_checkBatchLimit
/** * @param checkerScope The scope that the limit is being checked from * @return true when the limit is enforceable from the checker's scope and it has been reached */ boolean checkBatchLimit(LimitScope checkerScope) { return !skippingRow && hasBatchLimit(checkerScope) && progress.getBatch() >= limits.getBatch(); }
3.68
flink_SortUtil_putTimestampNormalizedKey
/** Support the compact precision TimestampData. */ public static void putTimestampNormalizedKey( TimestampData value, MemorySegment target, int offset, int numBytes) { assert value.getNanoOfMillisecond() == 0; putLongNormalizedKey(value.getMillisecond(), target, offset, numBytes); }
3.68
streampipes_SpServiceDefinitionBuilder_registerMigrators
/** * Include migrations in the service definition. * <br> * Please refrain from providing {@link IModelMigrator}s with overlapping version definitions for one application id. * @param migrations List of migrations to be registered * @return {@link SpServiceDefinitionBuilder} */ public SpServiceDefinitionBuilder registerMigrators(IModelMigrator<?, ?>... migrations) { this.serviceDefinition.addMigrators(List.of(migrations)); return this; }
3.68
hadoop_ResourceSkyline_setJobId
/** * Set jobId. * * @param jobIdConfig jobId. */ public final void setJobId(final String jobIdConfig) { this.jobId = jobIdConfig; }
3.68
hudi_ErrorTableUtils_validate
/** * validates for constraints on ErrorRecordColumn when ErrorTable enabled configs are set. * @param dataset */ public static void validate(Dataset<Row> dataset) { if (!isErrorTableCorruptRecordColumnPresent(dataset)) { throw new HoodieValidationException(String.format("Invalid condition, columnName=%s " + "is not present in transformer " + "output schema", ERROR_TABLE_CURRUPT_RECORD_COL_NAME)); } }
3.68