repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
iri
|
iri-master/src/main/java/com/iota/iri/service/milestone/impl/MilestoneServiceImpl.java
|
package com.iota.iri.service.milestone.impl;
import com.iota.iri.BundleValidator;
import com.iota.iri.conf.MilestoneConfig;
import com.iota.iri.controllers.MilestoneViewModel;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.crypto.Curl;
import com.iota.iri.crypto.ISS;
import com.iota.iri.crypto.ISSInPlace;
import com.iota.iri.crypto.SpongeFactory;
import com.iota.iri.model.Hash;
import com.iota.iri.model.HashFactory;
import com.iota.iri.model.StateDiff;
import com.iota.iri.service.milestone.MilestoneException;
import com.iota.iri.service.milestone.MilestoneService;
import com.iota.iri.service.milestone.MilestoneValidity;
import com.iota.iri.service.snapshot.Snapshot;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.snapshot.SnapshotService;
import com.iota.iri.storage.Tangle;
import com.iota.iri.utils.Converter;
import com.iota.iri.utils.dag.DAGHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import static com.iota.iri.controllers.TransactionViewModel.OBSOLETE_TAG_TRINARY_OFFSET;
import static com.iota.iri.service.milestone.MilestoneValidity.*;
/**
* <p>
* Creates a service instance that allows us to perform milestone specific operations.
* </p>
* <p>
* This class is stateless and does not hold any domain specific models.
* </p>
*/
public class MilestoneServiceImpl implements MilestoneService {
/**
* Holds the logger of this class.
*/
private final static Logger log = LoggerFactory.getLogger(MilestoneServiceImpl.class);
/**
* Holds the tangle object which acts as a database interface.
*/
private final Tangle tangle;
/**
* Holds the snapshot provider which gives us access to the relevant snapshots.
*/
private final SnapshotProvider snapshotProvider;
/**
* Holds a reference to the service instance of the snapshot package that allows us to roll back ledger states.
*/
private final SnapshotService snapshotService;
/**
* Configurations for milestone
*/
private final MilestoneConfig config;
private final BundleValidator bundleValidator;
/**
* @param tangle Tangle object which acts as a database interface
* @param snapshotProvider snapshot provider which gives us access to the relevant snapshots
* @param snapshotService service for modifying and generating snapshots
* @param bundleValidator Validator to use when checking milestones
* @param config config with important milestone specific settings
*/
public MilestoneServiceImpl(Tangle tangle, SnapshotProvider snapshotProvider, SnapshotService snapshotService,
BundleValidator bundleValidator, MilestoneConfig config) {
this.tangle = tangle;
this.snapshotProvider = snapshotProvider;
this.snapshotService = snapshotService;
this.bundleValidator = bundleValidator;
this.config = config;
}
//region {PUBLIC METHODS] //////////////////////////////////////////////////////////////////////////////////////////
/**
* {@inheritDoc}
*
* <p>
* We first check the trivial case where the node was fully synced. If no processed solid milestone could be found
* within the last two milestones of the node, we perform a binary search from present to past, which reduces the
* amount of database requests to a minimum (even with a huge amount of milestones in the database).
* </p>
*/
@Override
public Optional<MilestoneViewModel> findLatestProcessedSolidMilestoneInDatabase() throws MilestoneException {
try {
// if we have no milestone in our database -> abort
MilestoneViewModel latestMilestone = MilestoneViewModel.latest(tangle);
if (latestMilestone == null) {
return Optional.empty();
}
// trivial case #1: the node was fully synced
if (wasMilestoneAppliedToLedger(latestMilestone)) {
return Optional.of(latestMilestone);
}
// trivial case #2: the node was fully synced but the last milestone was not processed, yet
MilestoneViewModel latestMilestonePredecessor = MilestoneViewModel.findClosestPrevMilestone(tangle,
latestMilestone.index(), snapshotProvider.getInitialSnapshot().getIndex());
if (latestMilestonePredecessor != null && wasMilestoneAppliedToLedger(latestMilestonePredecessor)) {
return Optional.of(latestMilestonePredecessor);
}
// non-trivial case: do a binary search in the database
return binarySearchLatestProcessedSolidMilestoneInDatabase(latestMilestone);
} catch (Exception e) {
throw new MilestoneException(
"unexpected error while trying to find the latest processed solid milestone in the database", e);
}
}
@Override
public void updateMilestoneIndexOfMilestoneTransactions(Hash milestoneHash, int index) throws MilestoneException {
if (index <= 0) {
throw new MilestoneException("the new index needs to be bigger than 0 " +
"(use resetCorruptedMilestone to reset the milestone index)");
}
updateMilestoneIndexOfMilestoneTransactions(milestoneHash, index, index, new HashSet<>());
}
/**
* {@inheritDoc}
*
* <p>
* We redirect the call to {@link #resetCorruptedMilestone(int, Set)} while initiating the set of {@code
* processedTransactions} with an empty {@link HashSet} which will ensure that we reset all found
* transactions.
* </p>
*/
@Override
public void resetCorruptedMilestone(int index) throws MilestoneException {
resetCorruptedMilestone(index, new HashSet<>());
}
@Override
public MilestoneValidity validateMilestone(TransactionViewModel transactionViewModel, int milestoneIndex)
throws MilestoneException {
if (milestoneIndex < 0 || milestoneIndex >= config.getMaxMilestoneIndex()) {
return INVALID;
}
try {
MilestoneViewModel existingMilestone = MilestoneViewModel.get(tangle, milestoneIndex);
if(existingMilestone != null){
return existingMilestone.getHash().equals(transactionViewModel.getHash()) ? VALID : INVALID;
}
final List<TransactionViewModel> bundleTransactions = bundleValidator.validate(tangle, true,
snapshotProvider.getInitialSnapshot(), transactionViewModel.getHash());
if (bundleTransactions.isEmpty()) {
return INCOMPLETE;
} else {
final TransactionViewModel tail = bundleTransactions.get(0);
if (tail.getHash().equals(transactionViewModel.getHash())) {
//the signed transaction - which references the confirmed transactions and contains
// the Merkle tree siblings.
int coordinatorSecurityLevel = config.getCoordinatorSecurityLevel();
if (isMilestoneBundleStructureValid(bundleTransactions, coordinatorSecurityLevel)) {
final TransactionViewModel siblingsTx = bundleTransactions
.get(coordinatorSecurityLevel);
//milestones sign the normalized hash of the sibling transaction.
byte[] signedHash = ISS.normalizedBundle(siblingsTx.getHash().trits());
//validate leaf signature
ByteBuffer bb = ByteBuffer.allocate(Curl.HASH_LENGTH * coordinatorSecurityLevel);
byte[] digest = new byte[Curl.HASH_LENGTH];
SpongeFactory.Mode coordinatorSignatureMode = config.getCoordinatorSignatureMode();
for (int i = 0; i < coordinatorSecurityLevel; i++) {
ISSInPlace.digest(coordinatorSignatureMode, signedHash,
ISS.NUMBER_OF_FRAGMENT_CHUNKS * i,
bundleTransactions.get(i).getSignature(), 0, digest);
bb.put(digest);
}
byte[] digests = bb.array();
byte[] address = ISS.address(coordinatorSignatureMode, digests);
//validate Merkle path
byte[] merkleRoot = ISS.getMerkleRoot(coordinatorSignatureMode, address,
siblingsTx.trits(), 0, milestoneIndex, config.getNumberOfKeysInMilestone());
boolean skipValidation = config.isTestnet() && config.isDontValidateTestnetMilestoneSig();
if (skipValidation || config.getCoordinator().equals(HashFactory.ADDRESS.create(merkleRoot))) {
MilestoneViewModel newMilestoneViewModel = new MilestoneViewModel(milestoneIndex,
transactionViewModel.getHash());
newMilestoneViewModel.store(tangle);
// if we find a NEW milestone that should have been processed before our latest solid
// milestone -> reset the ledger state and check the milestones again
//
// NOTE: this can happen if a new subtangle becomes solid before a previous one while
// syncing
if (milestoneIndex < snapshotProvider.getLatestSnapshot().getIndex() &&
milestoneIndex > snapshotProvider.getInitialSnapshot().getIndex()) {
resetCorruptedMilestone(milestoneIndex);
}
return VALID;
} else {
return INVALID;
}
}
}
}
} catch (Exception e) {
throw new MilestoneException("error while checking milestone status of " + transactionViewModel, e);
}
return INVALID;
}
@Override
public int getMilestoneIndex(TransactionViewModel milestoneTransaction) {
return (int) Converter.longValue(milestoneTransaction.trits(), OBSOLETE_TAG_TRINARY_OFFSET, 15);
}
@Override
public boolean isTransactionConfirmed(TransactionViewModel transaction, int milestoneIndex) {
return transaction.snapshotIndex() != 0 && transaction.snapshotIndex() <= milestoneIndex;
}
@Override
public boolean isTransactionConfirmed(TransactionViewModel transaction) {
return isTransactionConfirmed(transaction, snapshotProvider.getLatestSnapshot().getIndex());
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
//region [PRIVATE UTILITY METHODS] /////////////////////////////////////////////////////////////////////////////////
/**
* <p>
* Performs a binary search for the latest solid milestone which was already processed by the node and applied to
* the ledger state at some point in the past (i.e. before IRI got restarted).
* </p>
* <p>
* It searches from present to past using a binary search algorithm which quickly narrows down the amount of
* candidates even for big databases.
* </p>
*
* @param latestMilestone the latest milestone in the database (used to define the search range)
* @return the latest solid milestone that was previously processed by IRI or an empty value if no previously
* processed solid milestone can be found
* @throws Exception if anything unexpected happens while performing the search
*/
private Optional<MilestoneViewModel> binarySearchLatestProcessedSolidMilestoneInDatabase(
MilestoneViewModel latestMilestone) throws Exception {
Optional<MilestoneViewModel> lastAppliedCandidate = Optional.empty();
int rangeEnd = latestMilestone.index();
int rangeStart = snapshotProvider.getInitialSnapshot().getIndex() + 1;
while (rangeEnd - rangeStart >= 0) {
// if no candidate found in range -> return last candidate
MilestoneViewModel currentCandidate = getMilestoneInMiddleOfRange(rangeStart, rangeEnd);
if (currentCandidate == null) {
return lastAppliedCandidate;
}
// if the milestone was applied -> continue to search for "later" ones that might have also been applied
if (wasMilestoneAppliedToLedger(currentCandidate)) {
rangeStart = currentCandidate.index() + 1;
lastAppliedCandidate = Optional.of(currentCandidate);
}
// if the milestone was not applied -> continue to search for "earlier" ones
else {
rangeEnd = currentCandidate.index() - 1;
}
}
return lastAppliedCandidate;
}
/**
* <p>
* Determines the milestone in the middle of the range defined by {@code rangeStart} and {@code rangeEnd}.
* </p>
* <p>
* It is used by the binary search algorithm of {@link #findLatestProcessedSolidMilestoneInDatabase()}. It first
* calculates the index that represents the middle of the range and then tries to find the milestone that is closest
* to this index.
* </p>
* Note: We start looking for younger milestones first, because most of the times the latest processed solid
* milestone is close to the end.
*
* @param rangeStart the milestone index representing the start of our search range
* @param rangeEnd the milestone index representing the end of our search range
* @return the milestone that is closest to the middle of the given range or {@code null} if no milestone can be
* found
* @throws Exception if anything unexpected happens while trying to get the milestone
*/
private MilestoneViewModel getMilestoneInMiddleOfRange(int rangeStart, int rangeEnd) throws Exception {
int range = rangeEnd - rangeStart;
int middleOfRange = rangeEnd - range / 2;
MilestoneViewModel milestone = MilestoneViewModel.findClosestNextMilestone(tangle, middleOfRange - 1, rangeEnd);
if (milestone == null) {
milestone = MilestoneViewModel.findClosestPrevMilestone(tangle, middleOfRange, rangeStart);
}
return milestone;
}
/**
* <p>
* Checks if the milestone was applied to the ledger at some point in the past (before a restart of IRI).
* </p>
* <p>
* Since the {@code snapshotIndex} value is used as a flag to determine if the milestone was already applied to the
* ledger, we can use it to determine if it was processed by IRI in the past. If this value is set we should also
* have a corresponding {@link StateDiff} entry in the database.
* </p>
*
* @param milestone the milestone that shall be checked
* @return {@code true} if the milestone has been processed by IRI before and {@code false} otherwise
* @throws Exception if anything unexpected happens while checking the milestone
*/
private boolean wasMilestoneAppliedToLedger(MilestoneViewModel milestone) throws Exception {
TransactionViewModel milestoneTransaction = TransactionViewModel.fromHash(tangle, milestone.getHash());
return milestoneTransaction.getType() != TransactionViewModel.PREFILLED_SLOT &&
milestoneTransaction.snapshotIndex() != 0;
}
/**
* This method implements the logic described by {@link #updateMilestoneIndexOfMilestoneTransactions(Hash, int)} but
* accepts some additional parameters that allow it to be reused by different parts of this service.
*
* @param milestoneHash the hash of the transaction
* @param correctIndex the milestone index of the milestone that would be set if all transactions are marked
* correctly
* @param newIndex the milestone index that shall be set
* @throws MilestoneException if anything unexpected happens while updating the milestone index
* @param processedTransactions a set of transactions that have been processed already (for the recursive calls)
*/
private void updateMilestoneIndexOfMilestoneTransactions(Hash milestoneHash, int correctIndex, int newIndex,
Set<Hash> processedTransactions) throws MilestoneException {
processedTransactions.add(milestoneHash);
Set<Integer> inconsistentMilestones = new HashSet<>();
Set<TransactionViewModel> transactionsToUpdate = new HashSet<>();
try {
prepareMilestoneIndexUpdate(TransactionViewModel.fromHash(tangle, milestoneHash), correctIndex, newIndex,
inconsistentMilestones, transactionsToUpdate);
DAGHelper.get(tangle).traverseApprovees(
milestoneHash,
currentTransaction -> {
// if the transaction was confirmed by a PREVIOUS milestone -> check if we have a back-referencing
// transaction and abort the traversal
if (isTransactionConfirmed(currentTransaction, correctIndex - 1)) {
patchSolidEntryPointsIfNecessary(snapshotProvider.getInitialSnapshot(), currentTransaction);
return false;
}
return true;
},
currentTransaction -> prepareMilestoneIndexUpdate(currentTransaction, correctIndex, newIndex,
inconsistentMilestones, transactionsToUpdate),
processedTransactions
);
} catch (Exception e) {
throw new MilestoneException("error while updating the milestone index", e);
}
for(int inconsistentMilestoneIndex : inconsistentMilestones) {
resetCorruptedMilestone(inconsistentMilestoneIndex, processedTransactions);
}
for (TransactionViewModel transactionToUpdate : transactionsToUpdate) {
updateMilestoneIndexOfSingleTransaction(transactionToUpdate, newIndex);
}
}
/**
* <p>
* This method resets the {@code milestoneIndex} of a single transaction.
* </p>
* <p>
* In addition to setting the corresponding value, we also publish a message to the ZeroMQ message provider, which
* allows external recipients to get informed about this change.
* </p>
*
* @param transaction the transaction that shall have its {@code milestoneIndex} reset
* @param index the milestone index that is set for the given transaction
* @throws MilestoneException if anything unexpected happens while updating the transaction
*/
private void updateMilestoneIndexOfSingleTransaction(TransactionViewModel transaction, int index) throws
MilestoneException {
try {
transaction.setSnapshot(tangle, snapshotProvider.getInitialSnapshot(), index);
} catch (Exception e) {
throw new MilestoneException("error while updating the snapshotIndex of " + transaction, e);
}
tangle.publish("%s %s %d sn", transaction.getAddressHash(), transaction.getHash(), index);
tangle.publish("sn %d %s %s %s %s %s", index, transaction.getHash(), transaction.getAddressHash(),
transaction.getTrunkTransactionHash(), transaction.getBranchTransactionHash(),
transaction.getBundleHash());
tangle.publish("sn_trytes %s %s %d", Converter.trytes(transaction.trits()), transaction.getHash(),
transaction.getTransaction().snapshot.get());
}
/**
* <p>
* This method prepares the update of the milestone index by checking the current {@code snapshotIndex} of the given
* transaction.
* </p>
* <p>
* If the {@code snapshotIndex} is higher than the "correct one", we know that we applied the milestones in the
* wrong order and need to reset the corresponding milestone that wrongly approved this transaction. We therefore
* add its index to the {@code corruptMilestones} set.
* </p>
* <p>
* If the milestone does not have the new value set already we add it to the set of {@code transactionsToUpdate} so
* it can be updated by the caller accordingly.
* </p>
*
* @param transaction the transaction that shall get its milestoneIndex updated
* @param correctMilestoneIndex the milestone index that this transaction should be associated to (the index of the
* milestone that should "confirm" this transaction)
* @param newMilestoneIndex the new milestone index that we want to assign (either 0 or the correctMilestoneIndex)
* @param corruptMilestones a set that is used to collect all corrupt milestones indexes [output parameter]
* @param transactionsToUpdate a set that is used to collect all transactions that need to be updated [output
* parameter]
*/
private void prepareMilestoneIndexUpdate(TransactionViewModel transaction, int correctMilestoneIndex,
int newMilestoneIndex, Set<Integer> corruptMilestones, Set<TransactionViewModel> transactionsToUpdate) {
if(transaction.snapshotIndex() > correctMilestoneIndex) {
corruptMilestones.add(transaction.snapshotIndex());
}
if (transaction.snapshotIndex() != newMilestoneIndex) {
transactionsToUpdate.add(transaction);
}
}
/**
* <p>
* This method patches the solid entry points if a back-referencing transaction is detected.
* </p>
* <p>
* While we iterate through the approvees of a milestone we stop as soon as we arrive at a transaction that has a
* smaller {@code snapshotIndex} than the milestone. If this {@code snapshotIndex} is also smaller than the index of
* the milestone of our local snapshot, we have detected a back-referencing transaction.
* </p>
*
* @param initialSnapshot the initial snapshot holding the solid entry points
* @param transaction the transactions that was referenced by the processed milestone
*/
private void patchSolidEntryPointsIfNecessary(Snapshot initialSnapshot, TransactionViewModel transaction) {
if (transaction.snapshotIndex() <= initialSnapshot.getIndex() && !initialSnapshot.hasSolidEntryPoint(
transaction.getHash())) {
initialSnapshot.getSolidEntryPoints().put(transaction.getHash(), initialSnapshot.getIndex());
}
}
/**
* <p>
* This method is a utility method that checks if the transactions belonging to the potential milestone bundle have
* a valid structure (used during the validation of milestones).
* </p>
* <p>
* It first checks if the bundle has enough transactions to conform to the given {@code securityLevel} and then
* verifies that the {@code branchTransactionsHash}es are pointing to the {@code trunkTransactionHash} of the head
* transactions.
* </p>
*
* @param bundleTransactions all transactions belonging to the milestone
* @param securityLevel the security level used for the signature
* @return {@code true} if the basic structure is valid and {@code false} otherwise
*/
private boolean isMilestoneBundleStructureValid(List<TransactionViewModel> bundleTransactions, int securityLevel) {
if (bundleTransactions.size() <= securityLevel) {
return false;
}
Hash headTransactionHash = bundleTransactions.get(securityLevel).getTrunkTransactionHash();
return bundleTransactions.stream()
.limit(securityLevel)
.map(TransactionViewModel::getBranchTransactionHash)
.allMatch(branchTransactionHash -> branchTransactionHash.equals(headTransactionHash));
}
/**
* <p>
* This method does the same as {@link #resetCorruptedMilestone(int)} but additionally receives a set of {@code
* processedTransactions} that will allow us to not process the same transactions over and over again while
* resetting additional milestones in recursive calls.
* </p>
* <p>
* It first checks if the desired {@code milestoneIndex} is reachable by this node and then triggers the reset
* by:
* </p>
* <ol>
* <li>resetting the ledger state if it addresses a milestone before the current latest solid milestone</li>
* <li>resetting the {@code milestoneIndex} of all transactions that were confirmed by the current milestone</li>
* <li>deleting the corresponding {@link StateDiff} entry from the database</li>
* </ol>
*
* @param index milestone index that shall be reverted
* @param processedTransactions a set of transactions that have been processed already
* @throws MilestoneException if anything goes wrong while resetting the corrupted milestone
*/
private void resetCorruptedMilestone(int index, Set<Hash> processedTransactions) throws MilestoneException {
if(index <= snapshotProvider.getInitialSnapshot().getIndex()) {
return;
}
log.info("resetting corrupted milestone #" + index);
try {
MilestoneViewModel milestoneToRepair = MilestoneViewModel.get(tangle, index);
if(milestoneToRepair != null) {
if(milestoneToRepair.index() <= snapshotProvider.getLatestSnapshot().getIndex()) {
snapshotService.rollBackMilestones(snapshotProvider.getLatestSnapshot(), milestoneToRepair.index());
}
updateMilestoneIndexOfMilestoneTransactions(milestoneToRepair.getHash(), milestoneToRepair.index(), 0,
processedTransactions);
tangle.delete(StateDiff.class, milestoneToRepair.getHash());
}
} catch (Exception e) {
throw new MilestoneException("failed to repair corrupted milestone with index #" + index, e);
}
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
}
| 26,567 | 46.956679 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/milestone/impl/MilestoneSolidifierImpl.java
|
package com.iota.iri.service.milestone.impl;
import com.iota.iri.conf.SolidificationConfig;
import com.iota.iri.controllers.AddressViewModel;
import com.iota.iri.controllers.MilestoneViewModel;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.model.persistables.Transaction;
import com.iota.iri.network.TransactionRequester;
import com.iota.iri.service.ledger.LedgerService;
import com.iota.iri.service.milestone.MilestoneRepairer;
import com.iota.iri.service.milestone.MilestoneService;
import com.iota.iri.service.milestone.MilestoneSolidifier;
import com.iota.iri.service.milestone.MilestoneValidity;
import com.iota.iri.service.snapshot.Snapshot;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.validation.TransactionSolidifier;
import com.iota.iri.storage.Tangle;
import com.iota.iri.utils.ASCIIProgressBar;
import com.iota.iri.utils.log.interval.IntervalLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
public class MilestoneSolidifierImpl implements MilestoneSolidifier {
private final static Logger log = LoggerFactory.getLogger(MilestoneSolidifierImpl.class);
private static final IntervalLogger latestMilestoneLogger = new IntervalLogger(MilestoneSolidifierImpl.class);
private static final IntervalLogger latestSolidMilestoneLogger = new IntervalLogger(MilestoneSolidifierImpl.class);
private static final IntervalLogger solidifyLogger = new IntervalLogger(MilestoneSolidifierImpl.class, 10000);
private static final IntervalLogger progressBarLogger = new IntervalLogger(MilestoneSolidifierImpl.class);
// Max size fo the solidification queue
private static final int MAX_SIZE = 10;
private Map<Hash, Integer> unsolidMilestones = new ConcurrentHashMap<>();
private Map<Hash, Integer> solidificationQueue = new ConcurrentHashMap<>();
private Map<Integer, Hash> seenMilestones = new ConcurrentHashMap<>();
private Map.Entry<Hash, Integer> oldestMilestoneInQueue = null;
private AtomicInteger latestMilestoneIndex = new AtomicInteger(0);
private AtomicReference<Hash> latestMilestoneHash = new AtomicReference<>(Hash.NULL_HASH);
private AtomicInteger latestSolidMilestone = new AtomicInteger(0);
private TransactionSolidifier transactionSolidifier;
private LedgerService ledgerService;
private SnapshotProvider snapshotProvider;
private Tangle tangle;
private TransactionRequester transactionRequester;
private MilestoneService milestoneService;
private MilestoneRepairer milestoneRepairer;
private SolidificationConfig config;
private SyncProgressInfo syncProgressInfo = new SyncProgressInfo();
/**
* An indicator for whether or not the solidifier has processed from the first batch of seen milestones
*/
private AtomicBoolean initialized = new AtomicBoolean(false);
/**
* A thread to run the milestone solidification thread in
*/
private Thread milestoneSolidifier = new Thread(this::milestoneSolidificationThread, "Milestone Solidifier");
/**
* Constructor for the {@link MilestoneSolidifierImpl}. This class holds milestone objects to be processed for
* solidification. It also tracks the latest solid milestone object.
*
* @param transactionSolidifier Solidification service for transaction objects
* @param tangle DB reference
* @param snapshotProvider SnapshotProvider for fetching local snapshots
* @param ledgerService LedgerService for bootstrapping ledger state
* @param txRequester TransationRequester to submit missing transactions to
* @param milestoneService MilestoneService for resetting corrupted milestones
* @param config Configuration to determine if additional logging will be used
*/
public MilestoneSolidifierImpl(TransactionSolidifier transactionSolidifier, Tangle tangle,
SnapshotProvider snapshotProvider, LedgerService ledgerService,
TransactionRequester txRequester, MilestoneService milestoneService,
SolidificationConfig config) {
this.transactionSolidifier = transactionSolidifier;
this.snapshotProvider = snapshotProvider;
this.ledgerService = ledgerService;
this.tangle = tangle;
this.transactionRequester = txRequester;
this.milestoneService = milestoneService;
this.config = config;
this.milestoneRepairer = new MilestoneRepairerImpl(milestoneService);
}
@Override
public void start() {
try {
bootStrapSolidMilestones();
ledgerService.restoreLedgerState();
Snapshot latestSnapshot = snapshotProvider.getLatestSnapshot();
setLatestMilestone(latestSnapshot.getHash(), latestSnapshot.getIndex());
logChange(snapshotProvider.getInitialSnapshot().getIndex());
syncProgressInfo.setSyncMilestoneStartIndex(snapshotProvider.getInitialSnapshot().getIndex());
milestoneSolidifier.start();
} catch (Exception e) {
log.error("Error starting milestone solidification thread", e);
}
}
private void milestoneSolidificationThread() {
while(!Thread.currentThread().isInterrupted()) {
try {
processSolidifyQueue();
checkLatestSolidMilestone();
if (getLatestMilestoneIndex() > getLatestSolidMilestoneIndex()) {
solidifyLog();
}
Thread.sleep(100);
} catch (InterruptedException e) {
log.info("Milestone Thread interrupted. Shutting Down.");
} catch (Exception e) {
log.error("Error running milestone solidification thread", e);
}
}
}
@Override
public void shutdown(){
milestoneSolidifier.interrupt();
}
/**
* Scan through milestones in the {@link #unsolidMilestones} queue and refill the {@link #solidificationQueue} queue.
* It then iterates through this queue to determine if the milestone has already been seen and validated. If it is
* found in the {@link #seenMilestones} queue, then the milestone is removed from the solidification pool. If not,
* then the {@link #oldestMilestoneInQueue} is updated iterating through whatever milestones are still present in
* the {@link #solidificationQueue}.
*/
private void scanMilestonesInQueue() {
// refill the solidification queue with solidification candidates sorted by index
if (unsolidMilestones.size() > 0) {
solidificationQueue.clear();
unsolidMilestones.entrySet().stream()
.sorted(Map.Entry.comparingByValue())
.forEach(milestone -> {
//If milestone candidate has a lower index than the latest solid milestone remove it from the queues.
if (milestone.getValue() < getLatestSolidMilestoneIndex()){
removeFromQueues(milestone.getKey());
} else {
if (solidificationQueue.size() < MAX_SIZE) {
solidificationQueue.put(milestone.getKey(), milestone.getValue());
}
}
});
}
// update the oldest milestone in queue. First reset oldestMilestoneInQueue before scanning queue.
oldestMilestoneInQueue = null;
for (Map.Entry<Hash, Integer> currentEntry : solidificationQueue.entrySet()) {
if (!seenMilestones.containsKey(currentEntry.getValue())) {
updateOldestMilestone(currentEntry.getKey(), currentEntry.getValue());
}
}
}
private void updateOldestMilestone(Hash milestoneHash, int milestoneIndex) {
if (oldestMilestoneInQueue == null || oldestMilestoneInQueue.getValue() > milestoneIndex) {
oldestMilestoneInQueue = new AbstractMap.SimpleEntry<>(milestoneHash, milestoneIndex);
}
}
/**
* Iterates through the {@link #solidificationQueue} to check for validity. Valid transactions are then submitted to
* the {@link #seenMilestones} mapping if solid, and the {@link TransactionSolidifier} if not. Invalid transactions
* are removed from the solidification queues.
*/
private void processSolidifyQueue() throws Exception {
Iterator<Map.Entry<Hash, Integer>> iterator = solidificationQueue.entrySet().iterator();
Map.Entry<Hash, Integer> milestone;
while(!Thread.currentThread().isInterrupted() && iterator.hasNext()) {
milestone = iterator.next();
Hash milestoneHash = milestone.getKey();
int milestoneIndex = milestone.getValue();
TransactionViewModel milestoneCandidate = TransactionViewModel.fromHash(tangle, milestoneHash);
MilestoneValidity validity = milestoneService.validateMilestone(milestoneCandidate, milestoneIndex);
switch(validity) {
case VALID:
milestoneCandidate.isMilestone(tangle, snapshotProvider.getInitialSnapshot(), true);
registerNewMilestone(getLatestMilestoneIndex(), milestoneIndex, milestoneHash);
if (milestoneCandidate.isSolid()) {
removeFromQueues(milestoneHash);
addSeenMilestone(milestoneHash, milestoneIndex);
} else {
transactionSolidifier.addToSolidificationQueue(milestoneHash);
}
break;
case INCOMPLETE:
transactionSolidifier.addToSolidificationQueue(milestoneHash);
break;
case INVALID:
removeFromQueues(milestone.getKey());
}
}
scanMilestonesInQueue();
}
/**
* Tries to solidify the next available milestone index. If successful, the milestone will be removed from the
* {@link #seenMilestones} queue, and any milestone objects below that index in the {@link #unsolidMilestones} queue
* will be removed as well.
*/
private void checkLatestSolidMilestone() {
try {
if (getLatestMilestoneIndex() > getLatestSolidMilestoneIndex()) {
int nextMilestone = getLatestSolidMilestoneIndex() + 1;
boolean isSyncing = false;
if (seenMilestones.containsKey(nextMilestone)) {
TransactionViewModel milestone = TransactionViewModel.fromHash(tangle,
seenMilestones.get(nextMilestone));
if (milestone.isSolid()) {
isSyncing = true;
updateSolidMilestone(getLatestSolidMilestoneIndex());
transactionSolidifier.addToPropagationQueue(milestone.getHash());
} else {
transactionSolidifier.addToSolidificationQueue(milestone.getHash());
}
}
if (!seenMilestones.isEmpty() && !isSyncing) {
checkOldestSeenMilestoneSolidity();
}
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
private void solidifyLog() {
if (!unsolidMilestones.isEmpty() && oldestMilestoneInQueue != null) {
int milestoneIndex = oldestMilestoneInQueue.getValue();
solidifyLogger.info("Solidifying milestone # " + milestoneIndex + " - [ LSM: " + latestSolidMilestone +
" LM: " + latestMilestoneIndex + " ] - [ Remaining: " +
(getLatestMilestoneIndex() - getLatestSolidMilestoneIndex()) + " Queued: " +
(seenMilestones.size() + unsolidMilestones.size()) + " ]");
}
}
/**
* Sets the {@link #latestSolidMilestone} index to the snapshot index, and scans through all transactions that are
* present in the db associated with the coordinator address. It then iterates through these transactions, adding
* valid transactions to the {@link #seenMilestones} pool, and adding all valid and incomplete milestones to the
* solidification queue. Once completed the {@link #initialized} flag is set to true.
*
* @return Latest Solid Milestone index
* @throws Exception
*/
private void bootStrapSolidMilestones() throws Exception {
setLatestSolidMilestone(snapshotProvider.getLatestSnapshot().getIndex());
int index = getLatestSolidMilestoneIndex();
MilestoneViewModel nextMilestone;
log.info("Starting milestone bootstrapping...");
while ((nextMilestone = MilestoneViewModel.get(tangle, ++index)) != null) {
registerNewMilestone(getLatestMilestoneIndex(), index, nextMilestone.getHash());
addMilestoneCandidate(nextMilestone.getHash(), nextMilestone.index());
}
log.info("Done bootstrapping milestones.");
initialized.set(true);
}
/**
* Checks the seen milestones queue for the lowest index available, and tries to solidify it. If it is already
* solid but not syncing, there are milestone objects in the db that have not been processed through the solidifier
* (likely due to shutting down during synchronisation). If this is the case, an address scan is performed to
* find and process all present milestone transactions through the milestone solidifier.
*/
private void checkOldestSeenMilestoneSolidity() {
Optional<Integer> lowestIndex = seenMilestones.keySet().stream().min(Integer::compareTo);
if (lowestIndex.isPresent()) {
if (lowestIndex.get() <= getLatestSolidMilestoneIndex()) {
removeCurrentAndLowerSeenMilestone(lowestIndex.get());
} else {
if (!transactionSolidifier.addMilestoneToSolidificationQueue(seenMilestones.get(lowestIndex.get())) &&
(seenMilestones.size() + unsolidMilestones.size()) <
(getLatestMilestoneIndex() - getLatestSolidMilestoneIndex())) {
scanAddressHashes();
}
}
}
}
private void scanAddressHashes() {
try{
log.info("Scanning existing milestone candidates...");
TransactionViewModel milestoneCandidate;
int index;
int processed = 0;
for (Hash hash: AddressViewModel.load(tangle, config.getCoordinator()).getHashes()) {
processed++;
milestoneCandidate = TransactionViewModel.fromHash(tangle, hash);
index = milestoneService.getMilestoneIndex(milestoneCandidate);
if (!seenMilestones.containsValue(hash) &&
milestoneCandidate.getCurrentIndex() == 0 &&
index > getLatestSolidMilestoneIndex() &&
milestoneService.validateMilestone(milestoneCandidate, index) == MilestoneValidity.VALID) {
addMilestoneCandidate(hash, index);
}
if (processed % 5000 == 0) {
log.info(processed + " Milestones Processed...");
}
}
log.info("Done scanning existing milestone candidates.");
} catch (Exception e) {
log.warn("Error scanning coo address hashes.", e);
}
}
private void updateSolidMilestone(int currentSolidMilestoneIndex) throws Exception {
int nextMilestoneIndex = currentSolidMilestoneIndex + 1;
MilestoneViewModel nextSolidMilestone = MilestoneViewModel.get(tangle, nextMilestoneIndex);
if (nextSolidMilestone != null) {
if (applySolidMilestoneToLedger(nextSolidMilestone)) {
logChange(currentSolidMilestoneIndex);
if (nextMilestoneIndex == getLatestMilestoneIndex()) {
transactionRequester.clearRecentlyRequestedTransactions();
}
removeCurrentAndLowerSeenMilestone(nextMilestoneIndex);
}
}
}
/**
* {@inheritDoc}
*/
@Override
public void addMilestoneCandidate(Hash milestoneHash, int milestoneIndex) {
if (!unsolidMilestones.containsKey(milestoneHash) && !seenMilestones.containsKey(milestoneIndex) &&
milestoneIndex > getLatestSolidMilestoneIndex()) {
unsolidMilestones.put(milestoneHash, milestoneIndex);
}
}
/**
* {@inheritDoc}
*/
@Override
public void addSeenMilestone(Hash milestoneHash, int milestoneIndex) {
if (!seenMilestones.containsKey(milestoneIndex)) {
seenMilestones.put(milestoneIndex, milestoneHash);
}
removeFromQueues(milestoneHash);
}
/**
* {@inheritDoc}
*/
@Override
public List<Map.Entry<Hash, Integer>> getOldestMilestonesInQueue() {
return new ArrayList<>(solidificationQueue.entrySet());
}
/**
* {@inheritDoc}
*/
@Override
public void removeFromQueues(Hash milestoneHash) {
unsolidMilestones.remove(milestoneHash);
solidificationQueue.remove(milestoneHash);
}
/**
* {@inheritDoc}
*/
@Override
public int getLatestMilestoneIndex() {
return latestMilestoneIndex.get();
}
/**
* {@inheritDoc}
*/
@Override
public Hash getLatestMilestoneHash() {
return latestMilestoneHash.get();
}
private int getLatestSolidMilestoneIndex() {
return latestSolidMilestone.get();
}
private void setLatestSolidMilestone(int milestoneIndex) {
if (milestoneIndex > latestSolidMilestone.get()) {
this.latestSolidMilestone.set(milestoneIndex);
}
}
/**
* {@inheritDoc}
*/
@Override
public void setLatestMilestone(Hash milestoneHash, int milestoneIndex) {
if (milestoneIndex > latestMilestoneIndex.get()) {
this.latestMilestoneHash.set(milestoneHash);
this.latestMilestoneIndex.set(milestoneIndex);
}
}
/**
* {@inheritDoc}
*/
@Override
public boolean isInitialScanComplete() {
return initialized.get();
}
/**
* {@inheritDoc}
*/
@Override
public void registerNewMilestone(int oldMilestoneIndex, int newMilestoneIndex, Hash newMilestoneHash) {
if (newMilestoneIndex > oldMilestoneIndex) {
setLatestMilestone(newMilestoneHash, newMilestoneIndex);
tangle.publish("lmi %d %d", oldMilestoneIndex, newMilestoneIndex);
latestMilestoneLogger.info("Latest milestone has changed from #" + oldMilestoneIndex + " to #" + newMilestoneIndex);
}
}
/**
* Remove milestone candidate from the {@link #seenMilestones} queue, then iterate through the
* {@link #unsolidMilestones} queue and remove any milestones with an index below the provided index.
*
* @param solidMilestoneIndex The milestone index to remove
*/
private void removeCurrentAndLowerSeenMilestone(int solidMilestoneIndex) {
seenMilestones.remove(solidMilestoneIndex);
for (Iterator<Map.Entry<Hash, Integer>> iterator = unsolidMilestones.entrySet().iterator();
!Thread.currentThread().isInterrupted() && iterator.hasNext();) {
Map.Entry<Hash, Integer> nextMilestone = iterator.next();
if (nextMilestone.getValue() <= solidMilestoneIndex ||
nextMilestone.getValue() < snapshotProvider.getLatestSnapshot().getInitialIndex()) {
iterator.remove();
}
}
scanMilestonesInQueue();
}
private boolean applySolidMilestoneToLedger(MilestoneViewModel milestone) throws Exception {
if (ledgerService.applyMilestoneToLedger(milestone)) {
if (milestoneRepairer.isRepairRunning() && milestoneRepairer.isRepairSuccessful(milestone)) {
milestoneRepairer.stopRepair();
}
syncProgressInfo.addMilestoneApplicationTime();
return true;
} else {
milestoneRepairer.repairCorruptedMilestone(milestone);
return false;
}
}
private void logChange(int prevSolidMilestoneIndex) {
Snapshot latestSnapshot = snapshotProvider.getLatestSnapshot();
int nextLatestSolidMilestone = latestSnapshot.getIndex();
setLatestSolidMilestone(nextLatestSolidMilestone);
if (prevSolidMilestoneIndex == nextLatestSolidMilestone) {
return;
}
latestSolidMilestoneLogger.info("Latest SOLID milestone index changed from #" + prevSolidMilestoneIndex + " to #" + nextLatestSolidMilestone);
tangle.publish("lmsi %d %d", prevSolidMilestoneIndex, nextLatestSolidMilestone);
tangle.publish("lmhs %s", latestMilestoneHash);
if (!config.isPrintSyncProgressEnabled()) {
return;
}
// only print more sophisticated progress if we are coming from a more unsynced state
if (getLatestMilestoneIndex() - nextLatestSolidMilestone < 1) {
syncProgressInfo.setSyncMilestoneStartIndex(nextLatestSolidMilestone);
syncProgressInfo.resetMilestoneApplicationTimes();
return;
}
int estSecondsToBeSynced = syncProgressInfo.computeEstimatedTimeToSyncUpSeconds(getLatestMilestoneIndex(),
nextLatestSolidMilestone);
StringBuilder progressSB = new StringBuilder();
// add progress bar
progressSB.append(ASCIIProgressBar.getProgressBarString(syncProgressInfo.getSyncMilestoneStartIndex(),
getLatestMilestoneIndex(), nextLatestSolidMilestone));
// add lsm to lm
progressSB.append(String.format(" [LSM %d / LM %d - remaining: %d]", nextLatestSolidMilestone,
getLatestMilestoneIndex(), getLatestMilestoneIndex() - nextLatestSolidMilestone));
// add estimated time to get fully synced
if (estSecondsToBeSynced != -1) {
progressSB.append(String.format(" - est. seconds to get synced: %d", estSecondsToBeSynced));
}
progressBarLogger.info(progressSB.toString());
}
/**
* Holds variables containing information needed for sync progress calculation.
*/
private static class SyncProgressInfo {
/**
* The actual start milestone index from which the node started from when syncing up.
*/
private int syncMilestoneStartIndex;
/**
* Used to calculate the average time needed to apply a milestone.
*/
private BlockingQueue<Long> lastMilestoneApplyTimes = new ArrayBlockingQueue<>(100);
/**
* Gets the milestone sync start index.
*
* @return the milestone sync start index
*/
int getSyncMilestoneStartIndex() {
return syncMilestoneStartIndex;
}
/**
* Sets the milestone sync start index.
*
* @param syncMilestoneStartIndex the value to set
*/
void setSyncMilestoneStartIndex(int syncMilestoneStartIndex) {
this.syncMilestoneStartIndex = syncMilestoneStartIndex;
}
/**
* Adds the current time as a time where a milestone got applied to the ledger state.
*/
void addMilestoneApplicationTime() {
if (lastMilestoneApplyTimes.remainingCapacity() == 0) {
lastMilestoneApplyTimes.remove();
}
lastMilestoneApplyTimes.add(System.currentTimeMillis());
}
/**
* Clears the records of times when milestones got applied.
*/
void resetMilestoneApplicationTimes() {
lastMilestoneApplyTimes.clear();
}
/**
* Computes the estimated time (seconds) needed to get synced up by looking at the last N milestone applications
* times and the current solid and last milestone indices.
*
* @param latestMilestoneIndex the current latest known milestone index
* @param latestSolidMilestoneIndex the current to the ledger applied milestone index
* @return the number of seconds needed to get synced up or -1 if not enough data is available
*/
int computeEstimatedTimeToSyncUpSeconds(int latestMilestoneIndex, int latestSolidMilestoneIndex) {
// compute average time needed to apply a milestone
Object[] times = lastMilestoneApplyTimes.toArray();
long sumDelta = 0;
double avgMilestoneApplyMillisec;
if (times.length > 1) {
// compute delta sum
for (int i = times.length - 1; i > 1; i--) {
sumDelta += ((Long)times[i]) - ((Long)times[i - 1]);
}
avgMilestoneApplyMillisec = (double) sumDelta / (double) (times.length - 1);
} else {
return -1;
}
return (int) ((avgMilestoneApplyMillisec / 1000) * (latestMilestoneIndex - latestSolidMilestoneIndex));
}
}
}
| 25,835 | 41.284779 | 150 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/milestone/impl/SeenMilestonesRetrieverImpl.java
|
package com.iota.iri.service.milestone.impl;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.network.TransactionRequester;
import com.iota.iri.service.milestone.SeenMilestonesRetriever;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.storage.Tangle;
import com.iota.iri.utils.log.interval.IntervalLogger;
import com.iota.iri.utils.thread.DedicatedScheduledExecutorService;
import com.iota.iri.utils.thread.SilentScheduledExecutorService;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
/**
* <p>
* Creates a manager that proactively requests the missing "seen milestones" (defined in the local snapshot file).
* </p>
* <p>
* It stores the passed in dependencies in their corresponding properties and then makes a copy of the {@code
* seenMilestones} of the initial snapshot which will consequently be requested.
* </p>
* <p>
* Once the manager finishes to request all "seen milestones" it will automatically {@link #shutdown()} (when being
* {@link #start()}ed before).
* </p>
*/
public class SeenMilestonesRetrieverImpl implements SeenMilestonesRetriever {
/**
* Defines how far ahead of the latest solid milestone we are requesting the missing milestones.
*/
private static final int RETRIEVE_RANGE = 50;
/**
* Defines the interval (in milliseconds) in which the background worker will check for new milestones to
* request.
*/
private static final int RESCAN_INTERVAL = 1000;
/**
* Holds the logger of this class (a rate limited logger than doesn't spam the CLI output).
*/
private static final IntervalLogger log = new IntervalLogger(SeenMilestonesRetrieverImpl.class);
/**
* Tangle object which acts as a database interface.
*/
private final Tangle tangle;
/**
* The snapshot provider which gives us access to the relevant snapshots to calculate our range.
*/
private final SnapshotProvider snapshotProvider;
/**
* Holds a reference to the {@link TransactionRequester} that allows us to issue requests for the missing
* milestones.
*/
private final TransactionRequester transactionRequester;
/**
* Holds a reference to the manager of the background worker.
*/
private final SilentScheduledExecutorService executorService = new DedicatedScheduledExecutorService(
"Seen Milestones Retriever", log.delegate());
/**
* The list of seen milestones that need to be requested.
*/
private Map<Hash, Integer> seenMilestones;
/**
* @param tangle Tangle object which acts as a database interface
* @param snapshotProvider snapshot provider which gives us access to the relevant snapshots to calculate our range
* @param transactionRequester allows us to issue requests for the missing milestones
*/
public SeenMilestonesRetrieverImpl(Tangle tangle, SnapshotProvider snapshotProvider,
TransactionRequester transactionRequester) {
this.tangle = tangle;
this.snapshotProvider = snapshotProvider;
this.transactionRequester = transactionRequester;
}
@Override
public void init() {
seenMilestones = new ConcurrentHashMap<>(snapshotProvider.getInitialSnapshot().getSeenMilestones());
}
/**
* {@inheritDoc}
*
* <p>
* It simply iterates over the set of seenMilestones and requests them if they are in the range of
* [genesisMilestone ... latestSolidMilestone + RETRIEVE_RANGE]. Milestones that are older than this range get
* deleted because they are irrelevant for the ledger state and milestones that are younger than this range get
* ignored to be processed later.
* </p>
* <p>
* This gives the node enough resources to solidify the next milestones without getting its requests queue filled
* with milestone requests that will become relevant only much later (this achieves a linear sync speed).
* </p>
* <p>
* Note: If no more seen milestones have to be requested, this manager shuts down automatically.
* </p>
*/
@Override
public void retrieveSeenMilestones() {
seenMilestones.forEach((milestoneHash, milestoneIndex) -> {
try {
if (milestoneIndex <= snapshotProvider.getInitialSnapshot().getIndex()) {
seenMilestones.remove(milestoneHash);
} else if (milestoneIndex < snapshotProvider.getLatestSnapshot().getIndex() + RETRIEVE_RANGE) {
TransactionViewModel milestoneTransaction = TransactionViewModel.fromHash(tangle, milestoneHash);
if (milestoneTransaction.getType() == TransactionViewModel.PREFILLED_SLOT &&
!transactionRequester.isTransactionRequested(milestoneHash)) {
transactionRequester.requestTransaction(milestoneHash);
}
// the transactionRequester will never drop milestone requests - we can therefore remove it from the
// list of milestones to request
seenMilestones.remove(milestoneHash);
}
log.info("Requesting seen milestones (" + seenMilestones.size() + " left) ...");
} catch (Exception e) {
log.error("unexpected error while processing the seen milestones", e);
}
});
if (seenMilestones.isEmpty()) {
log.info("Requesting seen milestones ... [DONE]");
shutdown();
}
}
@Override
public void start() {
executorService.silentScheduleWithFixedDelay(this::retrieveSeenMilestones, 0, RESCAN_INTERVAL,
TimeUnit.MILLISECONDS);
}
@Override
public void shutdown() {
executorService.shutdownNow();
}
}
| 5,978 | 38.596026 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/restserver/ApiProcessor.java
|
package com.iota.iri.service.restserver;
import java.net.InetAddress;
import com.iota.iri.service.dto.AbstractResponse;
/**
*
* Interface that defines the API call handling
*
*/
@FunctionalInterface
public interface ApiProcessor {
/**
* Processes the request according to the
*
* @param request the request body, unprocessed
* @param inetAddress the address from the API caller
* @return The response for this request
*/
AbstractResponse processFunction(String request, InetAddress inetAddress);
}
| 553 | 22.083333 | 78 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/restserver/RestConnector.java
|
package com.iota.iri.service.restserver;
/**
*
* Connector interface which contains logic for starting and stopping a REST server
*
*/
public interface RestConnector {
/**
* Initializes the REST server.
*
* @param processFunction the function/class we call after dependency specific handling
*/
void init(ApiProcessor processFunction);
/**
* Starts the server.
* If {@link #init(ApiProcessor)} has not been called, nothing happens
*/
void start();
/**
* Stops the REST server, so no more API calls can be made
*/
void stop();
}
| 607 | 20.714286 | 91 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/restserver/resteasy/RestEasy.java
|
package com.iota.iri.service.restserver.resteasy;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.ws.rs.ApplicationPath;
import javax.ws.rs.core.Application;
import org.jboss.resteasy.plugins.server.undertow.UndertowJaxrsServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xnio.channels.StreamSinkChannel;
import org.xnio.streams.ChannelInputStream;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.iota.iri.Iota;
import com.iota.iri.conf.APIConfig;
import com.iota.iri.service.dto.AbstractResponse;
import com.iota.iri.service.dto.AccessLimitedResponse;
import com.iota.iri.service.dto.ErrorResponse;
import com.iota.iri.service.dto.ExceptionResponse;
import com.iota.iri.service.restserver.ApiProcessor;
import com.iota.iri.service.restserver.RestConnector;
import com.iota.iri.utils.IotaIOUtils;
import com.iota.iri.utils.MapIdentityManager;
import io.undertow.Handlers;
import io.undertow.Undertow;
import io.undertow.security.api.AuthenticationMechanism;
import io.undertow.security.api.AuthenticationMode;
import io.undertow.security.handlers.AuthenticationCallHandler;
import io.undertow.security.handlers.AuthenticationConstraintHandler;
import io.undertow.security.handlers.AuthenticationMechanismsHandler;
import io.undertow.security.handlers.SecurityInitialHandler;
import io.undertow.security.idm.IdentityManager;
import io.undertow.security.impl.BasicAuthenticationMechanism;
import io.undertow.server.HandlerWrapper;
import io.undertow.server.HttpHandler;
import io.undertow.server.HttpServerExchange;
import io.undertow.servlet.api.DeploymentInfo;
import io.undertow.util.HeaderMap;
import io.undertow.util.Headers;
import io.undertow.util.HttpString;
import io.undertow.util.Methods;
import io.undertow.util.MimeMappings;
import io.undertow.util.StatusCodes;
/**
*
* Rest connector based on RestEasy, which uses Jaxrs server under the hood.
* This will not actually have any REST endpoints, but rather handle all incoming connections from one point.
*
*/
@ApplicationPath("")
public class RestEasy extends Application implements RestConnector {
private static final Logger log = LoggerFactory.getLogger(RestEasy.class);
private final Gson gson = new GsonBuilder().create();
private UndertowJaxrsServer server;
private DeploymentInfo info;
private ApiProcessor processFunction;
private int maxBodyLength;
private String remoteAuth;
private String apiHost;
private int port;
/**
* Required for every {@link Application}
* Will be instantiated once and is supposed to provide REST api classes
* <b>Do not call manually</b>
*
* We handle all calls manually without ever using the {@link Application} functionality using a
* {@link DeploymentInfo#addInnerHandlerChainWrapper}
*/
public RestEasy() {
}
/**
*
* @param configuration
*/
public RestEasy(APIConfig configuration) {
maxBodyLength = configuration.getMaxBodyLength();
port = configuration.getPort();
apiHost = configuration.getApiHost();
remoteAuth = configuration.getRemoteAuth();
}
/**
* Prepares the IOTA API for usage. Until this method is called, no HTTP requests can be made.
* The order of loading is as follows
* <ol>
* <li>
* Read the spend addresses from the previous epoch. Used in {@link #wasAddressSpentFrom(Hash)}.
* This only happens if {@link APIConfig#isTestnet()} is <tt>false</tt>
* If reading from the previous epoch fails, a log is printed. The API will continue to initialize.
* </li>
* <li>
* Get the {@link APIConfig} from the {@link Iota} instance,
* and read {@link APIConfig#getPort()} and {@link APIConfig#getApiHost()}
* </li>
* <li>
* Builds a secure {@link Undertow} server with the port and host.
* If {@link APIConfig#getRemoteAuth()} is defined, remote authentication is blocked for anyone except
* those defined in {@link APIConfig#getRemoteAuth()} or localhost.
* This is done with {@link BasicAuthenticationMechanism} in a {@link AuthenticationMode#PRO_ACTIVE} mode.
* By default, this authentication is disabled.
* </li>
* <li>
* Starts the server, opening it for HTTP API requests
* </li>
* </ol>
*/
@Override
public void init(ApiProcessor processFunction) {
log.debug("Binding JSON-REST API Undertow server on {}:{}", apiHost, port);
this.processFunction = processFunction;
server = new UndertowJaxrsServer();
info = server.undertowDeployment(RestEasy.class);
info.setDisplayName("Iota Realm");
info.setDeploymentName("Iota Realm");
info.setContextPath("/");
info.addSecurityWrapper(new HandlerWrapper() {
@Override
public HttpHandler wrap(HttpHandler toWrap) {
String credentials = remoteAuth;
if (credentials == null || credentials.isEmpty()) {
return toWrap;
}
final Map<String, char[]> users = new HashMap<>(2);
users.put(credentials.split(":")[0], credentials.split(":")[1].toCharArray());
IdentityManager identityManager = new MapIdentityManager(users);
HttpHandler handler = toWrap;
handler = new AuthenticationCallHandler(handler);
handler = new AuthenticationConstraintHandler(handler);
final List<AuthenticationMechanism> mechanisms =
Collections.singletonList(new BasicAuthenticationMechanism("Iota Realm"));
handler = new AuthenticationMechanismsHandler(handler, mechanisms);
handler = new SecurityInitialHandler(AuthenticationMode.PRO_ACTIVE, identityManager, handler);
return handler;
}
});
info.addInnerHandlerChainWrapper(handler -> {
return Handlers.path().addPrefixPath("/", new HttpHandler() {
@Override
public void handleRequest(final HttpServerExchange exchange) throws Exception {
HttpString requestMethod = exchange.getRequestMethod();
if (Methods.OPTIONS.equals(requestMethod)) {
String allowedMethods = "GET,HEAD,POST,PUT,DELETE,TRACE,OPTIONS,CONNECT,PATCH";
//return list of allowed methods in response headers
exchange.setStatusCode(StatusCodes.OK);
exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, MimeMappings.DEFAULT_MIME_MAPPINGS.get("txt"));
exchange.getResponseHeaders().put(Headers.CONTENT_LENGTH, 0);
exchange.getResponseHeaders().put(Headers.ALLOW, allowedMethods);
exchange.getResponseHeaders().put(new HttpString("Access-Control-Allow-Origin"), "*");
exchange.getResponseHeaders().put(new HttpString("Access-Control-Allow-Headers"), "User-Agent, Origin, X-Requested-With, Content-Type, Accept, X-IOTA-API-Version");
exchange.getResponseSender().close();
return;
}
if (exchange.isInIoThread()) {
exchange.dispatch(this);
return;
}
processRequest(exchange);
}
});
});
}
/**
* {@inheritDoc}
*/
@Override
public void start() {
if (info != null) {
Undertow.Builder builder = Undertow.builder()
.addHttpListener(port, apiHost);
server.start(builder);
server.deploy(info);
}
}
/**
* {@inheritDoc}
*/
@Override
public void stop() {
server.stop();
}
/**
* Sends the API response back as JSON to the requester.
* Status code of the HTTP request is also set according to the type of response.
* <ul>
* <li>{@link ErrorResponse}: 400</li>
* <li>{@link AccessLimitedRprocessRequestesponse}: 401</li>
* <li>{@link ExceptionResponse}: 500</li>
* <li>Default: 200</li>
* </ul>
*
* @param exchange Contains information about what the client sent to us
* @param res The response of the API.
* See {@link #processRequest(HttpServerExchange)}
* and {@link #process(String, InetSocketAddress)} for the different responses in each case.
* @param beginningTime The time when we received the request, in milliseconds.
* This will be used to set the response duration in {@link AbstractResponse#setDuration(Integer)}
* @throws IOException When connection to client has been lost - Currently being caught.
*/
private void sendResponse(HttpServerExchange exchange, AbstractResponse res, long beginningTime) throws IOException {
res.setDuration((int) (System.currentTimeMillis() - beginningTime));
final String response = gson.toJson(res);
if (res instanceof ErrorResponse) {
// bad request or invalid parameters
exchange.setStatusCode(400);
} else if (res instanceof AccessLimitedResponse) {
// API method not allowed
exchange.setStatusCode(401);
} else if (res instanceof ExceptionResponse) {
// internal error
exchange.setStatusCode(500);
}
setupResponseHeaders(exchange);
ByteBuffer responseBuf = ByteBuffer.wrap(response.getBytes(StandardCharsets.UTF_8));
exchange.setResponseContentLength(responseBuf.array().length);
StreamSinkChannel sinkChannel = exchange.getResponseChannel();
sinkChannel.getWriteSetter().set( channel -> {
if (responseBuf.remaining() > 0) {
try {
sinkChannel.write(responseBuf);
if (responseBuf.remaining() == 0) {
exchange.endExchange();
}
} catch (IOException e) {
log.error("Lost connection to client - cannot send response");
exchange.endExchange();
sinkChannel.getWriteSetter().set(null);
}
}
else {
exchange.endExchange();
}
});
sinkChannel.resumeWrites();
}
/**
* <p>
* Processes an API HTTP request.
* No checks have been done until now, except that it is not an OPTIONS request.
* We can be sure that we are in a thread that allows blocking.
* </p>
* <p>
* The request process duration is recorded.
* During this the request gets verified. If it is incorrect, an {@link ErrorResponse} is made.
* Otherwise it is processed in {@link #process(String, InetSocketAddress)}.
* The result is sent back to the requester.
* </p>
*
* @param exchange Contains the data the client sent to us
* @throws IOException If the body of this HTTP request cannot be read
*/
private void processRequest(final HttpServerExchange exchange) throws IOException {
final ChannelInputStream cis = new ChannelInputStream(exchange.getRequestChannel());
exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, "application/json");
final long beginningTime = System.currentTimeMillis();
final String body = IotaIOUtils.toString(cis, StandardCharsets.UTF_8);
AbstractResponse response;
if (!exchange.getRequestHeaders().contains("X-IOTA-API-Version")) {
response = ErrorResponse.create("Invalid API Version");
} else if (body.length() > maxBodyLength) {
response = ErrorResponse.create("Request too long");
} else {
response = this.processFunction.processFunction(body, exchange.getSourceAddress().getAddress());
}
sendResponse(exchange, response, beginningTime);
}
/**
* Updates the {@link HttpServerExchange} {@link HeaderMap} with the proper response settings.
* @param exchange Contains information about what the client has send to us
*/
private static void setupResponseHeaders(HttpServerExchange exchange) {
final HeaderMap headerMap = exchange.getResponseHeaders();
headerMap.add(new HttpString("Access-Control-Allow-Origin"),"*");
headerMap.add(new HttpString("Keep-Alive"), "timeout=500, max=100");
}
}
| 13,025 | 40.221519 | 188 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/LocalSnapshotManager.java
|
package com.iota.iri.service.snapshot;
import com.iota.iri.service.milestone.MilestoneSolidifier;
import com.iota.iri.service.transactionpruning.PruningCondition;
/**
* Represents the manager for local {@link Snapshot}s that takes care of periodically creating a new {@link Snapshot}
* when the configured interval has passed.
*/
public interface LocalSnapshotManager {
/**
* Starts the automatic creation of local {@link Snapshot}s by spawning a background {@link Thread}, that
* periodically checks if the last snapshot is older than
* {@link com.iota.iri.conf.SnapshotConfig#getLocalSnapshotsIntervalSynced()}.
*
* When we detect that it is time for a local snapshot we internally trigger its creation.
*
* Note: If the node is not fully synced we use
* {@link com.iota.iri.conf.SnapshotConfig#getLocalSnapshotsIntervalUnsynced()} instead.
*
* @param milestoneSolidifier tracker for the milestones to determine when a new local snapshot is due
*/
void start(MilestoneSolidifier milestoneSolidifier);
/**
* Stops the {@link Thread} that takes care of creating the local {@link Snapshot}s and that was spawned by the
* {@link #start(MilestoneSolidifier)} method.
*/
void shutdown();
/**
* Add a conditional check which will be queried every cycle of the manager.
*
* @param conditions conditions on which we check to make a snapshot
*/
void addSnapshotCondition(SnapshotCondition... conditions);
/**
* Add a conditional check for pruning which will be queried every cycle of the manager.
*
* @param conditions conditions on which we check to make a snapshot
*/
void addPruningConditions(PruningCondition... conditions);
}
| 1,778 | 38.533333 | 117 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/Snapshot.java
|
package com.iota.iri.service.snapshot;
/**
* <p>
* Represents a complete Snapshot of the ledger state.
* </p>
* <p>
* The Snapshot is a container for the {@link SnapshotMetaData} and the {@link SnapshotState} and therefore fulfills
* both of these contracts while offering some additional utility methods to manipulate them.
* </p>
* <p>
* Important: Since we are only dealing with Snapshots outside of the snapshot package (the underlying meta data and
* state objects are not exposed via getters) this class takes care of making the exposed methods thread
* safe. The logic of the underlying objects is not thread-safe (for performance and simplicity reasons) but
* we don't need to worry about this since we do not have access to them.
* </p>
*/
public interface Snapshot extends SnapshotMetaData, SnapshotState {
/**
* Locks the complete Snapshot object for read access.
*
* This is used to synchronize the access from different Threads.
*/
void lockRead();
/**
* Unlocks the complete Snapshot object from read blocks.
*
* This is used to synchronize the access from different Threads.
*/
void unlockRead();
/**
* Locks the complete Snapshot object for write access.
*
* This is used to synchronize the access from different Threads.
*/
void lockWrite();
/**
* Unlocks the complete Snapshot object from write blocks.
*
* This is used to synchronize the access from different Threads.
*/
void unlockWrite();
/**
* This methods allows us to keep track when we skip a milestone when applying changes.
*
* <p>
* Since we can only rollback changes if we know which milestones have lead to the current state, we need to keep
* track of the milestones that were previously skipped.
* </p>
*
* @param skippedMilestoneIndex index of the milestone that was skipped while applying the ledger state
* @return true if the index was added and false if it was already part of the set
*/
boolean addSkippedMilestone(int skippedMilestoneIndex);
/**
* This methods allows us to remove a milestone index from the internal list of skipped milestone indexes.
*
* <p>
* Since we can only rollback changes if we know which milestones have lead to the current state, we need to keep
* track of the milestones that were previously skipped.
* </p>
*
* @param skippedMilestoneIndex index of the milestone that was skipped while applying the ledger state
* @return true if the skipped milestone was removed from the internal list and false if it was not present
*/
boolean removeSkippedMilestone(int skippedMilestoneIndex);
/**
* Replaces the values of this instance with the values of another snapshot object.
*
* <p>
* This can for example be used to "reset" the snapshot after a failed modification attempt (while being able to
* keep the same instance).
* </p>
*
* @param snapshot the new snapshot details that shall overwrite the current ones
*/
void update(Snapshot snapshot);
/**
* <p>
* Creates a deep clone of the Snapshot which can be modified without affecting the values of the original
* one.
* </p>
* <p>
* Since the data structures inside the Snapshot are extremely big, this method is relatively expensive and should
* only be used when it is really necessary.
* </p>
*
* @return a deep clone of the snapshot
*/
Snapshot clone();
}
| 3,623 | 35.606061 | 119 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/SnapshotCondition.java
|
package com.iota.iri.service.snapshot;
/**
*
* A snapshot condition defines a limitation our database takes on storing data.
* Once the condition is met, a {@link Snapshot} will be taken
*
*/
public interface SnapshotCondition {
/**
* Executes a check on the node to determine if we should take a snapshot.
*
* @param isInSync <code>true</code> if this node is considered in sync, in all other cases <code>false</code>
* @return <code>true</code> if we should take a snapshot, otherwise <code>false</code>
*/
boolean shouldTakeSnapshot(boolean isInSync);
/**
* Determines the Milestone index from which we should snapshot.
* Should only be called when {@link #shouldTakeSnapshot(boolean)} returns <code>true</code>
*
* @return The index of the milestone we will create a new Snapshot at
* @throws SnapshotException if we could not obtain the requirements for determining the milestone index
*/
int getSnapshotStartingMilestone() throws SnapshotException;
/**
* Determines the milestone at which we should start pruning.
* This does not take the minimum amount of milestones in the database into account.
*
* @return The index of the last milestone we will keep in the database.
* Everything before this number will be pruned away if allowed.
* @throws SnapshotException if we could not obtain the requirements for determining the milestone index
*/
int getSnapshotPruningMilestone() throws SnapshotException;
}
| 1,556 | 39.973684 | 114 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/SnapshotException.java
|
package com.iota.iri.service.snapshot;
/**
* This class is used to wrap exceptions that are specific to the snapshot logic.
*
* It allows us to distinct between the different kinds of errors that can happen during the execution of the code.
*/
public class SnapshotException extends Exception {
/**
* Constructor of the exception which allows us to provide a specific error message and the cause of the error.
*
* @param message reason why this error occurred
* @param cause wrapped exception that caused this error
*/
public SnapshotException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructor of the exception which allows us to provide a specific error message without having an underlying
* cause.
*
* @param message reason why this error occurred
*/
public SnapshotException(String message) {
super(message);
}
/**
* Constructor of the exception which allows us to wrap the underlying cause of the error without providing a
* specific reason.
*
* @param cause wrapped exception that caused this error
*/
public SnapshotException(Throwable cause) {
super(cause);
}
}
| 1,240 | 30.820513 | 116 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/SnapshotMetaData.java
|
package com.iota.iri.service.snapshot;
import com.iota.iri.model.Hash;
import java.util.Map;
/**
* Represents the meta data of a snapshot.
*
* Since a snapshot represents the state of the ledger at a given point and this point is defined by a chosen milestone
* in the tangle, we store milestone specific values like a hash, an index and the timestamp but also derived values
* that are only relevant for the local snapshots logic.
*/
public interface SnapshotMetaData {
/**
* Getter of the hash of the transaction that the snapshot was "derived" from.
*
* In case of the "latest" {@link Snapshot} this value will be the hash of the "initial" {@link Snapshot}.
*
* Note: a snapshot can be modified over time, as we apply the balance changes caused by consecutive transactions,
* so this value differs from the value returned by {@link #getHash()}.
*
* @return hash of the transaction that the snapshot was "derived" from
*/
Hash getInitialHash();
/**
* Setter of the hash of the transaction that the snapshot was "derived" from.
*
* Note: After creating a new local {@link Snapshot} we update this value in the latest {@link Snapshot} to
* correctly reflect the new "origin" of the latest {@link Snapshot}.
*
* @param initialHash hash of the transaction that the snapshot was "derived" from
*/
void setInitialHash(Hash initialHash);
/**
* Getter of the index of the milestone that the snapshot was "derived" from.
*
* In case of the "latest" {@link Snapshot} this value will be the index of the "initial" {@link Snapshot}.
*
* Note: a snapshot can be modified over time, as we apply the balance changes caused by consecutive transactions,
* so this value differs from the value returned by {@link #getIndex()}.
*
* @return index of the milestone that the snapshot was "derived" from
*/
int getInitialIndex();
/**
* Setter of the index of the milestone that the snapshot was "derived" from.
*
* Note: After creating a new local {@link Snapshot} we update this value in the latest {@link Snapshot} to
* correctly reflect the new "origin" of the latest {@link Snapshot}.
*
* @param initialIndex index of the milestone that the snapshot was "derived" from
*/
void setInitialIndex(int initialIndex);
/**
* Getter of the timestamp of the transaction that the snapshot was "derived" from.
*
* In case of the "latest" {@link Snapshot} this value will be the timestamp of the "initial" {@link Snapshot}.
*
* Note: a snapshot can be modified over time, as we apply the balance changes caused by consecutive transactions,
* so this value differs from the value returned by {@link #getTimestamp()}.
*
* @return timestamp of the transaction that the snapshot was "derived" from
*/
long getInitialTimestamp();
/**
* Setter of the timestamp of the transaction that the snapshot was "derived" from.
*
* Note: After creating a new local {@link Snapshot} we update this value in the latest {@link Snapshot} to
* correctly reflect the new "origin" of the latest {@link Snapshot}.
*
* @param initialTimestamp timestamp of the transaction that the snapshot was "derived" from
*/
void setInitialTimestamp(long initialTimestamp);
/**
* Getter of the hash of the transaction that the snapshot currently belongs to.
*
* @return hash of the transaction that the snapshot currently belongs to
*/
Hash getHash();
/**
* Setter of the hash of the transaction that the snapshot currently belongs to.
*
* @param hash hash of the transaction that the snapshot currently belongs to
*/
void setHash(Hash hash);
/**
* Getter of the milestone index that the snapshot currently belongs to.
*
* Note: At the moment we use milestones as a reference for snapshots.
*
* @return milestone index that the snapshot currently belongs to
*/
int getIndex();
/**
* Setter of the milestone index that the snapshot currently belongs to.
*
* Note: At the moment we use milestones as a reference for snapshots.
*
* @param index milestone index that the snapshot currently belongs to
*/
void setIndex(int index);
/**
* Getter of the timestamp of the transaction that the snapshot currently belongs to.
*
* @return timestamp of the transaction that the snapshot currently belongs to
*/
long getTimestamp();
/**
* Setter of the timestamp of the transaction that the snapshot currently belongs to.
*
* @param timestamp timestamp of the transaction that the snapshot currently belongs to
*/
void setTimestamp(long timestamp);
/**
* Getter for the solid entry points of the snapshot.
*
* A solid entry point is a confirmed transaction that had non-orphaned approvers during the time of the snapshot
* creation and therefore a connection to the most recent part of the tangle. The solid entry points allow us to
* stop the solidification process without having to go all the way back to the genesis.
*
* Note: Nodes that do not use local snapshots will only have one solid entry point (the NULL_HASH).
*
* @return map with the transaction hashes associated to their milestone index
*/
Map<Hash, Integer> getSolidEntryPoints();
/**
* Setter for the solid entry points of this snapshot.
*
* A solid entry point is a confirmed transaction that had non-orphaned approvers during the time of the snapshot
* creation and therefore a connection to the most recent part of the tangle. The solid entry points allow us to
* stop the solidification process without having to go all the way back to the genesis.
*
* Note: Nodes that do not use local snapshots should only have one solid entry point (the NULL_HASH).
*
* @param solidEntryPoints map with the transaction hashes associated to their milestone index
*/
void setSolidEntryPoints(Map<Hash, Integer> solidEntryPoints);
/**
* This method checks if a given transaction is a solid entry point.
*
* It simply performs a member check on the value returned by {@link #getSolidEntryPoints()} and therefore is a
* utility method that makes it easier to check a single transaction.
*
* A solid entry point is a confirmed transaction that had non-orphaned approvers during the time of the snapshot
* creation and therefore a connection to the most recent part of the tangle. The solid entry points allow us to
* stop the solidification process without having to go all the way back to the genesis.
*
* @param solidEntrypoint transaction hash that shall be checked
* @return true if the hash is a solid entry point and false otherwise
*/
boolean hasSolidEntryPoint(Hash solidEntrypoint);
/**
* Returns the index of the milestone that was responsible for confirming the transaction that is now a solid
* entry point.
*
* A solid entry point is a confirmed transaction that had non-orphaned approvers during the time of the snapshot
* creation and therefore a connection to the most recent part of the tangle. The solid entry points allow us to
* stop the solidification process without having to go all the way back to the genesis.
*
* @param solidEntrypoint transaction hash of the solid entry point
* @return index of the milestone that once confirmed the solid entry point or null if the given transaction is no
* solid entry point
*/
int getSolidEntryPointIndex(Hash solidEntrypoint);
/**
* Getter for the seen milestones.
*
* Note: Since new nodes use local snapshot files as a way to bootstrap their database, we store the hashes of all
* milestones that have been following the snapshot in its meta data. This allows these nodes to immediately
* start requesting the missing milestones without having to discover them one by one, which massively
* increases the sync speed of new nodes.
*
* @return map of milestone transaction hashes associated to their milestone index
*/
Map<Hash, Integer> getSeenMilestones();
/**
* Setter for the seen milestones.
*
* Note: Since new nodes use local snapshot files as a way to bootstrap their database, we store the hashes of all
* milestones that have been following the snapshot in its meta data. This allows these nodes to immediately
* start requesting the missing milestones without having to discover them one by one, which massively
* increases the sync speed of new nodes.
*
* @param seenMilestones map of milestone transaction hashes associated to their milestone index
*/
void setSeenMilestones(Map<Hash, Integer> seenMilestones);
/**
* Replaces the meta data values of this instance with the values of another meta data object.
*
* This can for example be used to "reset" the meta data after a failed modification attempt (while being able to
* keep the same instance).
*
* @param newMetaData the new meta data that shall overwrite the current one
*/
void update(SnapshotMetaData newMetaData);
}
| 9,508 | 43.023148 | 119 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/SnapshotProvider.java
|
package com.iota.iri.service.snapshot;
import com.iota.iri.service.spentaddresses.SpentAddressesException;
/**
* The data provider that allows to retrieve the {@link Snapshot} instances that are relevant for the node.
*/
public interface SnapshotProvider {
/**
* Returns the Snapshot that reflects the start of the ledger (either the last global or a local {@link Snapshot}).
*
* This Snapshot will be modified whenever a new local {@link Snapshot} is taken.
*
* @return the Snapshot that reflects the start of the ledger (either the last global or a local {@link Snapshot})
*/
Snapshot getInitialSnapshot();
/**
* Returns the Snapshot that reflects the most recent "confirmed" state of the ledger.
*
* This Snapshot will be modified whenever a new milestone arrives and new transactions are confirmed.
*
* @return the Snapshot that represents the most recent "confirmed" state of the ledger
*/
Snapshot getLatestSnapshot();
/**
* Removes an existing old local snapshot and persists the newly given one.
*
* @param snapshot the snapshot to persist
* @throws SnapshotException if anything goes wrong while deleting or persisting data
*/
void persistSnapshot(Snapshot snapshot) throws SnapshotException;
/**
* Frees the resources of the {@link SnapshotProvider}.
*
* Snapshots require quite a bit of memory and should be cleaned up when they are not required anymore. This is
* particularly important for unit tests, that create a separate instance of the {@link SnapshotProvider}.
*/
void shutdown();
/**
* This method initializes the instance by loading the snapshots.
*
* @throws SnapshotException if anything goes wrong while trying to read the snapshots
*/
void init() throws SnapshotException, SpentAddressesException;
}
| 1,903 | 37.08 | 119 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/SnapshotService.java
|
package com.iota.iri.service.snapshot;
import java.util.Map;
import com.iota.iri.controllers.MilestoneViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.milestone.MilestoneSolidifier;
import com.iota.iri.service.transactionpruning.TransactionPruner;
/**
* <p>
* Represents the service for snapshots that contains the relevant business logic for modifying {@link Snapshot}s and
* generating new local {@link Snapshot}s.
* </p>
* This class is stateless and does not hold any domain specific models.
*/
public interface SnapshotService {
/**
* </p>
* This method applies the balance changes that are introduced by future milestones to the current Snapshot.
* </p>
* <p>
* It iterates over the milestone indexes starting from the current index to the target index and applies all found
* milestone balances. If it can not find a milestone for a certain index it keeps track of these skipped
* milestones, which allows us to revert the changes even if the missing milestone was received and processed in the
* mean time. If the application of changes fails, we restore the state of the snapshot to the one it had before the
* application attempt so this method only modifies the Snapshot if it succeeds.
* </p>
* <p>
* Note: the changes done by this method can be reverted by using {@link #rollBackMilestones(Snapshot, int)}.
* </p>
*
* @param snapshot the Snapshot that shall get modified
* @param targetMilestoneIndex the index of the milestone that should be applied
* @throws SnapshotException if something goes wrong while applying the changes
*/
void replayMilestones(Snapshot snapshot, int targetMilestoneIndex) throws SnapshotException;
/**
* <p>
* This method rolls back the latest milestones until it reaches the state that the snapshot had before applying
* the milestone indicated by the given parameter.
* </p>
* <p>
* When rolling back the milestones we take previously skipped milestones into account, so this method should give
* the correct result, even if the missing milestones were received and processed in the mean time. If the rollback
* fails, we restore the state of the snapshot to the one it had before the rollback attempt so this method only
* modifies the Snapshot if it succeeds.
* </p>
* <p>
* Note: this method is used to reverse the changes introduced by {@link #replayMilestones(Snapshot, int)}
* </p>
* @param snapshot the Snapshot that shall get modified
* @param targetMilestoneIndex the index of the milestone that should be rolled back (including all following
* milestones that were applied)
* @throws SnapshotException if something goes wrong while reverting the changes
*/
void rollBackMilestones(Snapshot snapshot, int targetMilestoneIndex) throws SnapshotException;
/**
* <p>
* This method takes a "full" local snapshot according to the configuration of the node.
* </p>
* <p>
* It first determines the necessary configuration parameters and which milestone to use as a reference. It then
* generates the local {@link Snapshot}, issues the the required {@link TransactionPruner} jobs and writes the
* resulting {@link Snapshot} to the disk.
* </p>
* <p>
* After persisting the local snapshot on the hard disk of the node, it updates the {@link Snapshot} instances used
* by the {@code snapshotProvider} to reflect the newly created {@link Snapshot}.
* </p>
*
* @param milestoneSolidifier milestone tracker that allows us to retrieve information about the known milestones
* @param transactionPruner manager for the pruning jobs that takes care of cleaning up the old data that
* @param snapshotUntillIndex The last milestone we keep, everything before gets snapshotted.
* If we can't find the milestone of this index, we attempt to look back further until we do
* @return The Snapshot we ended up making
* @throws SnapshotException if anything goes wrong while creating the local snapshot
*/
Snapshot takeLocalSnapshot(MilestoneSolidifier milestoneSolidifier, TransactionPruner transactionPruner,
int snapshotUntillIndex) throws
SnapshotException;
/**
* <p>
* This method is called when the node is cleaning up data.
* The newSnapshot is obtained from taking a successful local snapshot.
* No data should be pruned if pruning is disabled on this node.
* <p>
*
* @param transactionPruner manager for the pruning jobs that takes care of cleaning up the old data that
* @param pruningMilestoneIndex The index of the milestone we will prune until (excluding)
* @throws SnapshotException if anything goes wrong while pruning
*/
void pruneSnapshotData(TransactionPruner transactionPruner, int pruningMilestoneIndex)
throws SnapshotException;
/**
* <p>
* This method generates a local snapshot of the full ledger state at the given milestone.
* </p>
* <p>
* The generated {@link Snapshot} contains the balances and meta data plus the derived values like the solid entry
* points and all seen milestones, that were issued after the snapshot and can therefore be used to generate the
* local snapshot files.
* </p>
*
* @param milestoneSolidifier milestone tracker that allows us to retrieve information about the known milestones
* @param targetMilestone milestone that is used as a reference point for the snapshot
* @return a local snapshot of the full ledger state at the given milestone
* @throws SnapshotException if anything goes wrong while generating the local snapshot
*/
Snapshot generateSnapshot(MilestoneSolidifier milestoneSolidifier, MilestoneViewModel targetMilestone) throws
SnapshotException;
/**
* <p>
* This method generates the solid entry points for a snapshot that belong to the given milestone.
* </p>
* <p>
* A solid entry point is a confirmed transaction that had non-orphaned approvers during the time of the snapshot
* creation and therefore a connection to the most recent part of the tangle. The solid entry points allow us to
* stop the solidification process without having to go all the way back to the genesis.
* </p>
*
* @param targetMilestone milestone that is used as a reference point for the snapshot
* @return a map of solid entry points associating their hash to the milestone index that confirmed them
* @throws SnapshotException if anything goes wrong while generating the solid entry points
*/
Map<Hash, Integer> generateSolidEntryPoints(MilestoneViewModel targetMilestone) throws SnapshotException;
/**
* <p>
* This method generates the map of seen milestones that happened after the given target milestone.
* </p>
* <p>
* The map contains the hashes of the milestones associated to their milestone index and is used to allow nodes
* that use local snapshot files to bootstrap their nodes, to faster request the missing milestones when syncing the
* very first time.
* </p>
*
* @param milestoneSolidifier milestone tracker that allows us to retrieve information about the known milestones
* @param targetMilestone milestone that is used as a reference point for the snapshot
* @return a map of solid entry points associating their hash to the milestone index that confirmed them
* @throws SnapshotException if anything goes wrong while generating the solid entry points
*/
Map<Hash, Integer> generateSeenMilestones(MilestoneSolidifier milestoneSolidifier,
MilestoneViewModel targetMilestone) throws SnapshotException;
}
| 7,968 | 51.774834 | 123 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/SnapshotState.java
|
package com.iota.iri.service.snapshot;
import com.iota.iri.model.Hash;
import java.util.Map;
/**
* Represents the "state" of the ledger at a given time, which means how many IOTA are available on a certain address.
*
* It can either be a full ledger state which is used by the Snapshots or a differential State which carries only the
* resulting balances of the changed balances (see {@link #patchedState(SnapshotStateDiff)}).
*/
public interface SnapshotState {
/**
* Returns the balance of a single address.
*
* @param address address that shall be retrieved
* @return balance of the address or {@code null} if the address is unknown.
*/
Long getBalance(Hash address);
/**
* The {@link Map} returned by this method is a copy of the current state which guarantees that the object is only
* modified through its class methods.
*
* @return map with the addresses associated to their balance
*/
Map<Hash, Long> getBalances();
/**
* Checks if the state is consistent, which means that there are no addresses with a negative balance.
*
* @return true if the state is consistent and false otherwise
*/
boolean isConsistent();
/**
* Checks if the state of the ledger has the correct supply by adding the balances of all addresses and comparing it
* against the expected value.
*
* It doesn't make sense to call this functions on differential states (returned by {@link
* #patchedState(SnapshotStateDiff)} that are used to check the consistency of patches.
*
* @return true if the supply is correct and false otherwise
*/
boolean hasCorrectSupply();
/**
* Replaces the state of this instance with the values of another state.
*
* This can for example be used to "reset" the state after a failed modification attempt while being able to keep
* the same instance.
*
* @param newState the new state that shall overwrite the current one
*/
void update(SnapshotState newState);
/**
* This method applies the given {@link SnapshotStateDiff} to the current balances.
*
* Before applying the {@link SnapshotStateDiff} we check if it is consistent.
*
* @param diff the balance changes that should be applied to this state.
* @throws SnapshotException if the {@link SnapshotStateDiff} is inconsistent (see {@link
* SnapshotStateDiff#isConsistent()})
*/
void applyStateDiff(SnapshotStateDiff diff) throws SnapshotException;
/**
* This method creates a differential SnapshotState that contains the resulting balances of only the addresses
* that are modified by the given {@link SnapshotStateDiff}.
*
* It can be used to check if the modifications by a {@link SnapshotStateDiff} will result in a consistent State
* where all modified addresses are still positive. Even though this State can be consistent, it will most probably
* not return true if we call {@link #hasCorrectSupply()} since the unmodified addresses are missing.
*
* @param snapshotStateDiff the balance patches that we want to apply
* @return a differential SnapshotState that contains the resulting balances of all modified addresses
*/
SnapshotState patchedState(SnapshotStateDiff snapshotStateDiff);
}
| 3,366 | 40.060976 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/SnapshotStateDiff.java
|
package com.iota.iri.service.snapshot;
import com.iota.iri.model.Hash;
import java.util.Map;
/**
* An immutable collection of balance changes that can be used to modify the ledger state.
*/
public interface SnapshotStateDiff {
/**
* The {@link Map} returned by this method is a copy which guarantees the immutability of this object.
*
* @return the balance changes associated to their address hash
*/
Map<Hash, Long> getBalanceChanges();
/**
* Checks if the balance changes are consistent, which means that the sum of all changes is exactly zero.
*
* @return true if the sum of all balances is zero
*/
boolean isConsistent();
}
| 690 | 26.64 | 109 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/conditions/SnapshotDepthCondition.java
|
package com.iota.iri.service.snapshot.conditions;
import com.iota.iri.conf.SnapshotConfig;
import com.iota.iri.service.snapshot.SnapshotCondition;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.google.common.annotations.VisibleForTesting;
/**
*
* Conditions for snapshotting based on the distance between the current milestone and the amount we want to keep
*
*/
public class SnapshotDepthCondition implements SnapshotCondition {
private final SnapshotConfig config;
private final SnapshotProvider snapshotProvider;
/**
* Implements a {@link SnapshotCondition} based on the amount of milestones we have
*
* @param config configuration with snapshot specific settings.
* @param snapshotProvider gives us access to the relevant snapshots.
*/
public SnapshotDepthCondition(SnapshotConfig config, SnapshotProvider snapshotProvider) {
this.config = config;
this.snapshotProvider = snapshotProvider;
}
@Override
public boolean shouldTakeSnapshot(boolean isInSync) {
int localSnapshotInterval = getSnapshotInterval(isInSync);
int latestSnapshotIndex = snapshotProvider.getLatestSnapshot().getIndex();
int initialSnapshotIndex = snapshotProvider.getInitialSnapshot().getIndex();
return latestSnapshotIndex - initialSnapshotIndex > config.getLocalSnapshotsDepth() + localSnapshotInterval;
}
/**
* {@inheritDoc}
*
* <p>
* It determines the milestone by subtracting the {@link SnapshotConfig#getLocalSnapshotsDepth()} from the latest
* solid milestone index and retrieving the next milestone before this point.
* </p>
*/
@Override
public int getSnapshotStartingMilestone() {
return snapshotProvider.getLatestSnapshot().getIndex() - config.getLocalSnapshotsDepth();
}
/**
* A snapshot is taken in an interval.
* This interval changes based on the state of the node.
*
* @param inSync if this node is in sync
* @return the current interval in which we take local snapshots
*/
@VisibleForTesting
int getSnapshotInterval(boolean inSync) {
return inSync
? config.getLocalSnapshotsIntervalSynced()
: config.getLocalSnapshotsIntervalUnsynced();
}
@Override
public int getSnapshotPruningMilestone() {
return snapshotProvider.getLatestSnapshot().getIndex() - config.getLocalSnapshotsPruningDelay();
}
}
| 2,491 | 33.611111 | 117 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/impl/LocalSnapshotManagerImpl.java
|
package com.iota.iri.service.snapshot.impl;
import com.iota.iri.conf.BaseIotaConfig;
import com.iota.iri.conf.SnapshotConfig;
import com.iota.iri.service.milestone.InSyncService;
import com.iota.iri.service.milestone.MilestoneSolidifier;
import com.iota.iri.service.snapshot.*;
import com.iota.iri.service.transactionpruning.PruningCondition;
import com.iota.iri.service.transactionpruning.TransactionPruner;
import com.iota.iri.service.transactionpruning.TransactionPruningException;
import com.iota.iri.service.transactionpruning.jobs.MilestonePrunerJob;
import com.iota.iri.utils.thread.ThreadIdentifier;
import com.iota.iri.utils.thread.ThreadUtils;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang3.ArrayUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
* Creates a manager for the local snapshots, that takes care of automatically creating local snapshots when the defined
* intervals have passed.
* </p>
* <p>
* It incorporates a background worker that periodically checks if a new snapshot is due (see {@link
* #start(MilestoneSolidifier)} and {@link #shutdown()}).
* </p>
*/
public class LocalSnapshotManagerImpl implements LocalSnapshotManager {
/**
* The interval (in milliseconds) in which we check if a new local {@link com.iota.iri.service.snapshot.Snapshot} is
* due.
*/
private static final int LOCAL_SNAPSHOT_RESCAN_INTERVAL = 10000;
/**
* To prevent jumping back and forth in and out of sync, there is a buffer in between.
* Only when the latest milestone and latest snapshot differ more than this number, we fall out of sync
*/
@VisibleForTesting
static final int LOCAL_SNAPSHOT_SYNC_BUFFER = 5;
/**
* Logger for this class allowing us to dump debug and status messages.
*/
private static final Logger log = LoggerFactory.getLogger(LocalSnapshotManagerImpl.class);
/**
* Data provider for the relevant {@link com.iota.iri.service.snapshot.Snapshot} instances.
*/
private final SnapshotProvider snapshotProvider;
/**
* Service that contains the logic for generating local {@link com.iota.iri.service.snapshot.Snapshot}s.
*/
private final SnapshotService snapshotService;
/**
* Manager for the pruning jobs that allows us to clean up old transactions.
*/
private final TransactionPruner transactionPruner;
/**
* Configuration with important snapshot related parameters.
*/
private final SnapshotConfig config;
/**
* Holds a reference to the {@link ThreadIdentifier} for the monitor thread.
*
* Using a {@link ThreadIdentifier} for spawning the thread allows the {@link ThreadUtils} to spawn exactly one
* thread for this instance even when we call the {@link #start(MilestoneSolidifier)} method multiple times.
*/
private ThreadIdentifier monitorThreadIdentifier = new ThreadIdentifier("Local Snapshots Monitor");
private SnapshotCondition[] snapshotConditions;
private PruningCondition[] pruningConditions;
private InSyncService inSyncService;
/**
* @param snapshotProvider data provider for the snapshots that are relevant for the node
* @param snapshotService service instance of the snapshot package that gives us access to packages' business logic
* @param transactionPruner manager for the pruning jobs that allows us to clean up old transactions
* @param config important snapshot related configuration parameters
*/
public LocalSnapshotManagerImpl(SnapshotProvider snapshotProvider, SnapshotService snapshotService,
TransactionPruner transactionPruner, SnapshotConfig config, InSyncService inSyncService) {
this.snapshotProvider = snapshotProvider;
this.snapshotService = snapshotService;
this.transactionPruner = transactionPruner;
this.config = config;
this.inSyncService = inSyncService;
}
/**
* {@inheritDoc}
*/
@Override
public void start(MilestoneSolidifier milestoneSolidifier) {
ThreadUtils.spawnThread(() -> monitorThread(milestoneSolidifier), monitorThreadIdentifier);
}
/**
* {@inheritDoc}
*/
@Override
public void shutdown() {
ThreadUtils.stopThread(monitorThreadIdentifier);
}
/**
* This method contains the logic for the monitoring Thread.
*
* It periodically checks if a new {@link com.iota.iri.service.snapshot.Snapshot} has to be taken until the
* {@link Thread} is terminated. If it detects that a {@link com.iota.iri.service.snapshot.Snapshot} is due it
* triggers the creation of the {@link com.iota.iri.service.snapshot.Snapshot} by calling
* {@link SnapshotService#takeLocalSnapshot}.
*
* @param milestoneSolidifier tracker for the milestones to determine when a new local snapshot is due
*/
@VisibleForTesting
void monitorThread(MilestoneSolidifier milestoneSolidifier) {
while (!Thread.currentThread().isInterrupted()) {
// Possibly takes a snapshot if we can
handleSnapshot(milestoneSolidifier);
// Prunes data separate of a snapshot if we made a snapshot of the pruned data now or previously
if (config.getLocalSnapshotsPruningEnabled()) {
handlePruning();
}
ThreadUtils.sleep(LOCAL_SNAPSHOT_RESCAN_INTERVAL);
}
}
private Snapshot handleSnapshot(MilestoneSolidifier milestoneSolidifier) {
boolean isInSync = inSyncService.isInSync();
int lowestSnapshotIndex = calculateLowestSnapshotIndex(isInSync);
if (canTakeSnapshot(lowestSnapshotIndex, milestoneSolidifier)) {
try {
log.debug("Taking snapshot at index {}", lowestSnapshotIndex);
return snapshotService.takeLocalSnapshot(
milestoneSolidifier, transactionPruner, lowestSnapshotIndex);
} catch (SnapshotException e) {
log.error("error while taking local snapshot", e);
} catch (Exception e) {
log.error("could not load the target milestone", e);
}
}
return null;
}
/**
* Calculates the oldest milestone index allowed by all snapshotConditions.
*
* @param isInSync If this node is considered in sync, to prevent recalculation.
* @return The lowest allowed milestone we can snapshot according to the node
*/
private int calculateLowestSnapshotIndex(boolean isInSync) {
int lowestSnapshotIndex = -1;
for (SnapshotCondition condition : snapshotConditions) {
try {
if (condition.shouldTakeSnapshot(isInSync) && (lowestSnapshotIndex == -1
|| condition.getSnapshotStartingMilestone() < lowestSnapshotIndex)) {
lowestSnapshotIndex = condition.getSnapshotStartingMilestone();
}
} catch (SnapshotException e) {
log.error("error while checking local snapshot availabilty", e);
}
}
return lowestSnapshotIndex;
}
private boolean canTakeSnapshot(int lowestSnapshotIndex, MilestoneSolidifier milestoneSolidifier) {
return lowestSnapshotIndex != -1
&& milestoneSolidifier.isInitialScanComplete()
&& lowestSnapshotIndex > snapshotProvider.getInitialSnapshot().getIndex()
&& lowestSnapshotIndex <= snapshotProvider.getLatestSnapshot().getIndex() - config.getLocalSnapshotsDepth();
}
private void handlePruning() {
// Recalculate inSync, as a snapshot can take place which takes a while
try {
int pruningMilestoneIndex = calculateLowestPruningIndex();
if (canPrune(pruningMilestoneIndex)) {
log.info("Pruning at index {}", pruningMilestoneIndex);
// Pruning will not happen when pruning is turned off, but we don't want to know about that here
snapshotService.pruneSnapshotData(transactionPruner, pruningMilestoneIndex);
} else {
if (pruningMilestoneIndex > 0) {
log.debug("Can't prune at index {}", pruningMilestoneIndex);
}
}
} catch (SnapshotException e) {
log.error("error while pruning", e);
} catch (Exception e) {
log.error("could not prune data", e);
}
}
/**
* Calculates the oldest pruning milestone index allowed by all conditions.(
* If the lowest index violates our set minimum pruning depth, the minimum will be returned instead.
*
* @return The lowest allowed milestone we can prune according to the node, or -1 if we cannot
* @throws SnapshotException if we could not obtain the requirements for determining the snapshot milestone
*/
private int calculateLowestPruningIndex() throws TransactionPruningException {
int lowestPruningIndex = -1;
for (PruningCondition condition : pruningConditions) {
int snapshotPruningMilestone = condition.getSnapshotPruningMilestone();
if (condition.shouldPrune()
&& (lowestPruningIndex == -1 || snapshotPruningMilestone < lowestPruningIndex)) {
lowestPruningIndex = snapshotPruningMilestone;
}
}
int localSnapshotIndex = this.snapshotProvider.getInitialSnapshot().getIndex();
// since depth condition may be skipped we must make sure that we don't prune too shallow
int maxPruningIndex = localSnapshotIndex - BaseIotaConfig.Defaults.LOCAL_SNAPSHOTS_PRUNING_DELAY_MIN;
if (lowestPruningIndex != -1 && lowestPruningIndex > maxPruningIndex) {
log.warn("Attempting to prune at milestone index {}. But it is not at least {} milestones below "
+ "last local snapshot {}. Pruning at {} instead",
lowestPruningIndex, BaseIotaConfig.Defaults.LOCAL_SNAPSHOTS_PRUNING_DELAY_MIN,
localSnapshotIndex, maxPruningIndex);
return maxPruningIndex;
}
return lowestPruningIndex;
}
private boolean canPrune(int pruningMilestoneIndex) {
int snapshotIndex = snapshotProvider.getInitialSnapshot().getIndex();
// -1 means we can't prune, smaller than snapshotIndex because we prune until index + 1
return pruningMilestoneIndex > 0
&& pruningMilestoneIndex < snapshotIndex
&& !transactionPruner.hasActiveJobFor(MilestonePrunerJob.class);
}
@Override
public void addSnapshotCondition(SnapshotCondition... conditions) {
if (this.snapshotConditions == null) {
this.snapshotConditions = conditions.clone();
} else {
this.snapshotConditions = ArrayUtils.addAll(this.snapshotConditions, conditions);
}
}
@Override
public void addPruningConditions(PruningCondition... conditions) {
if (this.pruningConditions == null) {
this.pruningConditions = conditions.clone();
} else {
this.pruningConditions = ArrayUtils.addAll(this.pruningConditions, conditions);
}
}
}
| 11,360 | 42.528736 | 124 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotImpl.java
|
package com.iota.iri.service.snapshot.impl;
import com.iota.iri.model.Hash;
import com.iota.iri.service.snapshot.Snapshot;
import com.iota.iri.service.snapshot.SnapshotException;
import com.iota.iri.service.snapshot.SnapshotMetaData;
import com.iota.iri.service.snapshot.SnapshotState;
import com.iota.iri.service.snapshot.SnapshotStateDiff;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Implements the basic contract of the {@link Snapshot} interface.
*/
public class SnapshotImpl implements Snapshot {
/**
* Holds a reference to the state of this snapshot.
*/
private final SnapshotState state;
/**
* Holds a reference to the metadata of this snapshot.
*/
private final SnapshotMetaData metaData;
/**
* Lock object allowing to block access to this object from different threads.
*/
private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
/**
* Holds a set of milestones indexes that were skipped while advancing the Snapshot state.
*
* It is used to be able to identify which milestones have to be rolled back, even when additional milestones have
* become known in the mean time.
*/
private Set<Integer> skippedMilestones = new HashSet<>();
/**
/**
* Creates a snapshot object with the given information.
*
* It simply stores the passed in parameters in the internal properties.
*
* @param state state of the snapshot with the balances of all addresses
* @param metaData meta data of the snapshot with the additional information of this snapshot
*/
public SnapshotImpl(SnapshotState state, SnapshotMetaData metaData) {
this.state = state;
this.metaData = metaData;
}
/**
* Creates a deep clone of the passed in snapshot.
*
* @param snapshot object that shall be cloned
*/
private SnapshotImpl(SnapshotImpl snapshot) {
this(
new SnapshotStateImpl(snapshot.state),
new SnapshotMetaDataImpl(snapshot.metaData)
);
skippedMilestones.addAll(snapshot.skippedMilestones);
}
/**
* {@inheritDoc}
*/
@Override
public void lockRead() {
readWriteLock.readLock().lock();
}
/**
* {@inheritDoc}
*/
@Override
public void unlockRead() {
readWriteLock.readLock().unlock();
}
/**
* {@inheritDoc}
*/
@Override
public void lockWrite() {
readWriteLock.writeLock().lock();
}
/**
* {@inheritDoc}
*/
@Override
public void unlockWrite() {
readWriteLock.writeLock().unlock();
}
/**
* {@inheritDoc}
*/
@Override
public boolean addSkippedMilestone(int skippedMilestoneIndex) {
return skippedMilestones.add(skippedMilestoneIndex);
}
/**
* {@inheritDoc}
*/
@Override
public boolean removeSkippedMilestone(int skippedMilestoneIndex) {
return skippedMilestones.remove(skippedMilestoneIndex);
}
/**
* {@inheritDoc}
*/
@Override
public void update(Snapshot snapshot) {
lockWrite();
try {
state.update(((SnapshotImpl) snapshot).state);
metaData.update(((SnapshotImpl) snapshot).metaData);
} finally {
unlockWrite();
}
}
@Override
public int hashCode() {
return Objects.hash(getClass(), state.hashCode(), metaData.hashCode(), skippedMilestones);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null || !getClass().equals(obj.getClass())) {
return false;
}
return Objects.equals(state, ((SnapshotImpl) obj).state) &&
Objects.equals(metaData, ((SnapshotImpl) obj).metaData) &&
Objects.equals(skippedMilestones, ((SnapshotImpl) obj).skippedMilestones);
}
@Override
public SnapshotImpl clone() {
return new SnapshotImpl(this);
}
//region [THREAD-SAFE METADATA METHODS] ////////////////////////////////////////////////////////////////////////////
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public Hash getInitialHash() {
lockRead();
try {
return metaData.getInitialHash();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public void setInitialHash(Hash initialHash) {
lockWrite();
try {
metaData.setInitialHash(initialHash);
} finally {
unlockWrite();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public int getInitialIndex() {
lockRead();
try {
return metaData.getInitialIndex();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public void setInitialIndex(int initialIndex) {
lockWrite();
try {
metaData.setInitialIndex(initialIndex);
} finally {
unlockWrite();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public long getInitialTimestamp() {
lockRead();
try {
return metaData.getInitialTimestamp();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public void setInitialTimestamp(long initialTimestamp) {
lockWrite();
try {
metaData.setInitialTimestamp(initialTimestamp);
} finally {
unlockWrite();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public Hash getHash() {
lockRead();
try {
return this.metaData.getHash();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public void setHash(Hash hash) {
lockWrite();
try {
metaData.setHash(hash);
} finally {
unlockWrite();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public int getIndex() {
lockRead();
try {
return metaData.getIndex();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public void setIndex(int index) {
lockWrite();
try {
metaData.setIndex(index);
} finally {
unlockWrite();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public long getTimestamp() {
lockRead();
try {
return metaData.getTimestamp();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public void setTimestamp(long timestamp) {
lockWrite();
try {
metaData.setTimestamp(timestamp);
} finally {
unlockWrite();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public Map<Hash, Integer> getSolidEntryPoints() {
lockRead();
try {
return metaData.getSolidEntryPoints();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public void setSolidEntryPoints(Map<Hash, Integer> solidEntryPoints) {
lockWrite();
try {
metaData.setSolidEntryPoints(solidEntryPoints);
} finally {
unlockWrite();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public boolean hasSolidEntryPoint(Hash solidEntrypoint) {
lockRead();
try {
return metaData.hasSolidEntryPoint(solidEntrypoint);
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public int getSolidEntryPointIndex(Hash solidEntrypoint) {
lockRead();
try {
return metaData.getSolidEntryPointIndex(solidEntrypoint);
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public Map<Hash, Integer> getSeenMilestones() {
lockRead();
try {
return metaData.getSeenMilestones();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public void setSeenMilestones(Map<Hash, Integer> seenMilestones) {
lockWrite();
try {
metaData.setSeenMilestones(seenMilestones);
} finally {
unlockWrite();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotMetaData} method.
*/
@Override
public void update(SnapshotMetaData newMetaData) {
lockWrite();
try {
metaData.update(newMetaData);
} finally {
unlockWrite();
}
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
//region [THREAD-SAFE STATE METHODS] ///////////////////////////////////////////////////////////////////////////////
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotState} method.
*/
@Override
public Long getBalance(Hash address) {
lockRead();
try {
return state.getBalance(address);
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotState} method.
*/
@Override
public Map<Hash, Long> getBalances() {
lockRead();
try {
return state.getBalances();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotState} method.
*/
@Override
public boolean isConsistent() {
lockRead();
try {
return state.isConsistent();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotState} method.
*/
@Override
public boolean hasCorrectSupply() {
lockRead();
try {
return state.hasCorrectSupply();
} finally {
unlockRead();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotState} method.
*/
@Override
public void update(SnapshotState newState) {
lockWrite();
try {
state.update(newState);
} finally {
unlockWrite();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotState} method.
*/
@Override
public void applyStateDiff(SnapshotStateDiff diff) throws SnapshotException {
lockWrite();
try {
state.applyStateDiff(diff);
} finally {
unlockWrite();
}
}
/**
* {@inheritDoc}
*
* This is a thread-safe wrapper for the underlying {@link SnapshotState} method.
*/
@Override
public SnapshotState patchedState(SnapshotStateDiff snapshotStateDiff) {
lockRead();
try {
return state.patchedState(snapshotStateDiff);
} finally {
unlockRead();
}
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
}
| 13,619 | 22.321918 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotMetaDataImpl.java
|
package com.iota.iri.service.snapshot.impl;
import com.iota.iri.model.Hash;
import com.iota.iri.service.snapshot.SnapshotMetaData;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
/**
* Implements the basic contract of the {@link SnapshotMetaData} interface.
*/
public class SnapshotMetaDataImpl implements SnapshotMetaData {
/**
* Internal property for the value returned by {@link SnapshotMetaData#getInitialHash()}.
*/
private Hash initialHash;
/**
* Internal property for the value returned by {@link SnapshotMetaData#getInitialIndex()}.
*/
private int initialIndex;
/**
* Internal property for the value returned by {@link SnapshotMetaData#getInitialTimestamp()}.
*/
private long initialTimestamp;
/**
* Internal property for the value returned by {@link SnapshotMetaData#getHash()}.
*/
private Hash hash;
/**
* Internal property for the value returned by {@link SnapshotMetaData#getIndex()}.
*/
private int index;
/**
* Internal property for the value returned by {@link SnapshotMetaData#getTimestamp()}.
*/
private long timestamp;
/**
* Internal property for the value returned by {@link SnapshotMetaData#getSolidEntryPoints()}.
*/
private Map<Hash, Integer> solidEntryPoints;
/**
* Internal property for the value returned by {@link SnapshotMetaData#getSeenMilestones()}.
*/
private Map<Hash, Integer> seenMilestones;
/**
* Creates a meta data object with the given information.
*
* It simply stores the passed in parameters in the internal properties.
*
* @param hash hash of the transaction that the snapshot belongs to
* @param index milestone index that the snapshot belongs to
* @param timestamp timestamp of the transaction that the snapshot belongs to
* @param solidEntryPoints map with the transaction hashes of the solid entry points associated to their milestone
* index
* @param seenMilestones map of milestone transaction hashes associated to their milestone index
*/
public SnapshotMetaDataImpl(Hash hash, int index, Long timestamp, Map<Hash, Integer> solidEntryPoints,
Map<Hash, Integer> seenMilestones) {
this.initialHash = hash;
this.initialIndex = index;
this.initialTimestamp = timestamp;
setHash(hash);
setIndex(index);
setTimestamp(timestamp);
setSolidEntryPoints(new HashMap<>(solidEntryPoints));
setSeenMilestones(new HashMap<>(seenMilestones));
}
/**
* Creates a deep clone of the passed in {@link SnapshotMetaData}.
*
* @param snapshotMetaData object that shall be cloned
*/
public SnapshotMetaDataImpl(SnapshotMetaData snapshotMetaData) {
this(snapshotMetaData.getInitialHash(), snapshotMetaData.getInitialIndex(),
snapshotMetaData.getInitialTimestamp(), snapshotMetaData.getSolidEntryPoints(),
snapshotMetaData.getSeenMilestones());
this.setIndex(snapshotMetaData.getIndex());
this.setHash(snapshotMetaData.getHash());
this.setTimestamp(snapshotMetaData.getTimestamp());
}
/**
* {@inheritDoc}
*/
@Override
public Hash getInitialHash() {
return initialHash;
}
/**
* {@inheritDoc}
*/
@Override
public void setInitialHash(Hash initialHash) {
this.initialHash = initialHash;
}
/**
* {@inheritDoc}
*/
@Override
public int getInitialIndex() {
return initialIndex;
}
/**
* {@inheritDoc}
*/
@Override
public void setInitialIndex(int initialIndex) {
this.initialIndex = initialIndex;
}
/**
* {@inheritDoc}
*/
@Override
public long getInitialTimestamp() {
return initialTimestamp;
}
/**
* {@inheritDoc}
*/
@Override
public void setInitialTimestamp(long initialTimestamp) {
this.initialTimestamp = initialTimestamp;
}
/**
* {@inheritDoc}
*/
@Override
public Hash getHash() {
return hash;
}
/**
* {@inheritDoc}
*/
@Override
public void setHash(Hash hash) {
this.hash = hash;
}
/**
* {@inheritDoc}
*/
@Override
public int getIndex() {
return this.index;
}
/**
* {@inheritDoc}
*/
@Override
public void setIndex(int index) {
this.index = index;
}
/**
* {@inheritDoc}
*/
@Override
public long getTimestamp() {
return this.timestamp;
}
/**
* {@inheritDoc}
*/
@Override
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
/**
* {@inheritDoc}
*/
@Override
public Map<Hash, Integer> getSolidEntryPoints() {
return solidEntryPoints;
}
/**
* {@inheritDoc}
*/
@Override
public void setSolidEntryPoints(Map<Hash, Integer> solidEntryPoints) {
this.solidEntryPoints = solidEntryPoints;
}
/**
* {@inheritDoc}
*/
@Override
public boolean hasSolidEntryPoint(Hash solidEntrypoint) {
return solidEntryPoints.containsKey(solidEntrypoint);
}
/**
* {@inheritDoc}
*/
@Override
public int getSolidEntryPointIndex(Hash solidEntrypoint) {
return solidEntryPoints.get(solidEntrypoint);
}
/**
* {@inheritDoc}
*/
@Override
public Map<Hash, Integer> getSeenMilestones() {
return seenMilestones;
}
/**
* {@inheritDoc}
*/
@Override
public void setSeenMilestones(Map<Hash, Integer> seenMilestones) {
this.seenMilestones = seenMilestones;
}
/**
* {@inheritDoc}
*/
@Override
public void update(SnapshotMetaData newMetaData) {
initialIndex = newMetaData.getInitialIndex();
initialHash = newMetaData.getInitialHash();
initialTimestamp = newMetaData.getInitialTimestamp();
setIndex(newMetaData.getIndex());
setHash(newMetaData.getHash());
setTimestamp(newMetaData.getTimestamp());
setSolidEntryPoints(new HashMap<>(newMetaData.getSolidEntryPoints()));
setSeenMilestones(new HashMap<>(newMetaData.getSeenMilestones()));
}
@Override
public int hashCode() {
return Objects.hash(getClass(), initialHash, initialIndex, initialTimestamp, hash, index, timestamp,
solidEntryPoints, seenMilestones);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null || !getClass().equals(obj.getClass())) {
return false;
}
return Objects.equals(initialHash, ((SnapshotMetaDataImpl) obj).initialHash) &&
Objects.equals(initialIndex, ((SnapshotMetaDataImpl) obj).initialIndex) &&
Objects.equals(initialTimestamp, ((SnapshotMetaDataImpl) obj).initialTimestamp) &&
Objects.equals(hash, ((SnapshotMetaDataImpl) obj).hash) &&
Objects.equals(index, ((SnapshotMetaDataImpl) obj).index) &&
Objects.equals(timestamp, ((SnapshotMetaDataImpl) obj).timestamp) &&
Objects.equals(solidEntryPoints, ((SnapshotMetaDataImpl) obj).solidEntryPoints) &&
Objects.equals(seenMilestones, ((SnapshotMetaDataImpl) obj).seenMilestones);
}
}
| 7,555 | 25.794326 | 118 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotProviderImpl.java
|
package com.iota.iri.service.snapshot.impl;
import com.iota.iri.SignedFiles;
import com.iota.iri.conf.SnapshotConfig;
import com.iota.iri.controllers.LocalSnapshotViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.model.HashFactory;
import com.iota.iri.model.IntegerIndex;
import com.iota.iri.model.LocalSnapshot;
import com.iota.iri.service.snapshot.Snapshot;
import com.iota.iri.service.snapshot.SnapshotException;
import com.iota.iri.service.snapshot.SnapshotMetaData;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.snapshot.SnapshotState;
import com.iota.iri.service.spentaddresses.SpentAddressesException;
import com.iota.iri.storage.Indexable;
import com.iota.iri.storage.LocalSnapshotsPersistenceProvider;
import com.iota.iri.storage.Persistable;
import com.iota.iri.utils.Pair;
import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Stream;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
* Creates a data provider for the two {@link Snapshot} instances that are relevant for the node.
* </p>
* <p>
* It provides access to the two relevant {@link Snapshot} instances:
* <ul>
* <li>
* the {@link #initialSnapshot} (the starting point of the ledger based on the last global or local Snapshot)
* </li>
* <li>
* the {@link #latestSnapshot} (the state of the ledger after applying all changes up till the latest confirmed
* milestone)
* </li>
* </ul>
* </p>
*/
public class SnapshotProviderImpl implements SnapshotProvider {
/**
* Public key that is used to verify the builtin snapshot signature.
*/
private static final String SNAPSHOT_PUBKEY =
"TTXJUGKTNPOOEXSTQVVACENJOQUROXYKDRCVK9LHUXILCLABLGJTIPNF9REWHOIMEUKWQLUOKD9CZUYAC";
/**
* Public key depth that is used to verify the builtin snapshot signature.
*/
private static final int SNAPSHOT_PUBKEY_DEPTH = 6;
/**
* Snapshot index that is used to verify the builtin snapshot signature.
*/
private static final int SNAPSHOT_INDEX = 12;
/**
* Logger for this class allowing us to dump debug and status messages.
*/
private static final Logger log = LoggerFactory.getLogger(SnapshotProviderImpl.class);
/**
* <p>
* Holds a cached version of the builtin snapshot.
* </p>
* Note: The builtin snapshot is embedded in the iri.jar and will not change. To speed up tests that need the
* snapshot multiple times while creating their own version of the LocalSnapshotManager, we cache the instance
* here so they don't have to rebuild it from the scratch every time (massively speeds up the unit tests).
*/
@VisibleForTesting
static SnapshotImpl builtinSnapshot = null;
/**
* Holds Snapshot related configuration parameters.
*/
private final SnapshotConfig config;
/**
* Internal property for the value returned by {@link SnapshotProvider#getInitialSnapshot()}.
*/
private Snapshot initialSnapshot;
/**
* Internal property for the value returned by {@link SnapshotProvider#getLatestSnapshot()}.
*/
private Snapshot latestSnapshot;
private LocalSnapshotsPersistenceProvider localSnapshotsDb;
/**
* Implements the snapshot provider interface.
* @param configuration Snapshot configuration properties.
*/
public SnapshotProviderImpl(SnapshotConfig configuration, LocalSnapshotsPersistenceProvider localSnapshotsDb) {
this.config = configuration;
this.localSnapshotsDb = localSnapshotsDb;
}
/**
* {@inheritDoc}
*/
@Override
public void init() throws SnapshotException, SpentAddressesException {
loadSnapshots();
}
/**
* {@inheritDoc}
*/
@Override
public Snapshot getInitialSnapshot() {
return initialSnapshot;
}
/**
* {@inheritDoc}
*/
@Override
public Snapshot getLatestSnapshot() {
return latestSnapshot;
}
/**
* {@inheritDoc}
*/
@Override
public void persistSnapshot(Snapshot snapshot) throws SnapshotException {
snapshot.lockRead();
try {
log.info("persisting local snapshot; ms hash/index: {}/{}, solid entry points: {}, seen milestones: {}, " +
"ledger entries: {}", snapshot.getHash().toString(), snapshot.getIndex(),
snapshot.getSolidEntryPoints().size(), snapshot.getSeenMilestones().size(),
snapshot.getBalances().size());
// persist new one
new LocalSnapshotViewModel(snapshot.getHash(), snapshot.getIndex(), snapshot.getTimestamp(),
snapshot.getSolidEntryPoints(), snapshot.getSeenMilestones(), snapshot.getBalances())
.store(localSnapshotsDb);
log.info("persisted local snapshot; ms hash/index: {}/{}", snapshot.getHash().toString(),
snapshot.getIndex());
} catch (Exception e) {
throw new SnapshotException("failed to persist local snapshot", e);
} finally {
snapshot.unlockRead();
}
}
/**
* {@inheritDoc}
*/
@Override
public void shutdown() {
log.info("Shutting down local snapshots Persistence Providers... ");
initialSnapshot = null;
latestSnapshot = null;
localSnapshotsDb.shutdown();
}
//region SNAPSHOT RELATED UTILITY METHODS //////////////////////////////////////////////////////////////////////////
/**
* <p>
* Loads the snapshots that are provided by this data provider.
* </p>
* <p>
* We first check if a valid local {@link Snapshot} exists by trying to load it. If we fail to load the local
* {@link Snapshot}, we fall back to the builtin one.
* </p>
* <p>
* After the {@link #initialSnapshot} was successfully loaded we create a copy of it that will act as the "working
* copy" that will keep track of the latest changes that get applied while the node operates and processes the new
* confirmed transactions.
* </p>
* @throws SnapshotException if anything goes wrong while loading the snapshots
*/
private void loadSnapshots() throws SnapshotException, SpentAddressesException {
initialSnapshot = loadLocalSnapshot();
if (initialSnapshot == null) {
initialSnapshot = loadBuiltInSnapshot();
}
latestSnapshot = initialSnapshot.clone();
}
/**
* <p>
* Loads the last local snapshot from the database.
* </p>
* <p>
* This method returns null if no previous local snapshot was persisted.
* </p>
*
* @return local snapshot of the node
* @throws SnapshotException if local snapshot files exist but are malformed
*/
private Snapshot loadLocalSnapshot() throws SnapshotException {
if (!config.getLocalSnapshotsEnabled()) {
return null;
}
try {
Pair<Indexable, Persistable> pair = localSnapshotsDb.first(LocalSnapshot.class, IntegerIndex.class);
if (pair.hi == null) {
log.info("No Local snapshot was found. Starting to sync from Global Snapshot. "
+ "In case you have a local snapshot present it may be in outdated format. "
+ "See https://dbfiles.iota.org/?prefix=mainnet/iri/");
return null;
}
LocalSnapshot ls = (LocalSnapshot) pair.hi;
log.info("loading local snapshot; ms hash/index: {}/{}, solid entry points: {}, seen milestones: {}, " +
"ledger entries: {}", ls.milestoneHash, ls.milestoneIndex,
ls.solidEntryPoints.size(), ls.seenMilestones.size(), ls.ledgerState.size());
SnapshotState snapshotState = new SnapshotStateImpl(ls.ledgerState);
if (!snapshotState.hasCorrectSupply()) {
throw new SnapshotException("the snapshot state file has an invalid supply");
}
if (!snapshotState.isConsistent()) {
throw new SnapshotException("the snapshot state file is not consistent");
}
SnapshotMetaData snapshotMetaData = new SnapshotMetaDataImpl(ls.milestoneHash, ls.milestoneIndex,
ls.milestoneTimestamp, ls.solidEntryPoints, ls.seenMilestones);
log.info("resumed from local snapshot #" + snapshotMetaData.getIndex() + " ...");
return new SnapshotImpl(snapshotState, snapshotMetaData);
} catch (Exception e) {
throw new SnapshotException("failed to load existing local snapshot data", e);
}
}
/**
* <p>
* Loads the builtin snapshot (last global snapshot) that is embedded in the jar (if a different path is provided it
* can also load from the disk).
* </p>
* <p>
* We first verify the integrity of the snapshot files by checking the signature of the files and then construct
* a {@link Snapshot} from the retrieved information.
* </p>
* <p>
* We add the NULL_HASH as the only solid entry point and an empty list of seen milestones.
* </p>
* @return the builtin snapshot (last global snapshot) that is embedded in the jar
* @throws SnapshotException if anything goes wrong while loading the builtin {@link Snapshot}
*/
private Snapshot loadBuiltInSnapshot() throws SnapshotException {
if (builtinSnapshot != null) {
return builtinSnapshot.clone();
}
try {
if (!config.isTestnet() && !SignedFiles.isFileSignatureValid(
config.getSnapshotFile(),
config.getSnapshotSignatureFile(),
SNAPSHOT_PUBKEY,
SNAPSHOT_PUBKEY_DEPTH,
SNAPSHOT_INDEX
)) {
throw new SnapshotException("the snapshot signature is invalid");
}
} catch (IOException e) {
throw new SnapshotException("failed to validate the signature of the builtin snapshot file", e);
}
SnapshotState snapshotState;
try {
snapshotState = readSnapshotStateFromJAR(config.getSnapshotFile());
} catch (SnapshotException e) {
snapshotState = readSnapshotStatefromFile(config.getSnapshotFile());
}
if (!snapshotState.hasCorrectSupply()) {
throw new SnapshotException("the snapshot state file has an invalid supply");
}
if (!snapshotState.isConsistent()) {
throw new SnapshotException("the snapshot state file is not consistent");
}
HashMap<Hash, Integer> solidEntryPoints = new HashMap<>();
solidEntryPoints.put(Hash.NULL_HASH, config.getMilestoneStartIndex());
builtinSnapshot = new SnapshotImpl(
snapshotState,
new SnapshotMetaDataImpl(
Hash.NULL_HASH,
config.getMilestoneStartIndex(),
config.getSnapshotTime(),
solidEntryPoints,
new HashMap<>()
)
);
return builtinSnapshot.clone();
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
//region SNAPSHOT STATE RELATED UTILITY METHODS ////////////////////////////////////////////////////////////////////
/**
* <p>
* This method reads the balances from the given file on the disk and creates the corresponding SnapshotState.
* </p>
* <p>
* It creates the corresponding reader and for the file on the given location and passes it on to
* {@link #readSnapshotState(BufferedReader)}.
* </p>
*
* @param snapshotStateFilePath location of the snapshot state file
* @return the unserialized version of the state file
* @throws SnapshotException if anything goes wrong while reading the state file
*/
private SnapshotState readSnapshotStatefromFile(String snapshotStateFilePath) throws SnapshotException {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(new BufferedInputStream(new FileInputStream(snapshotStateFilePath))))) {
return readSnapshotState(reader);
} catch (IOException e) {
throw new SnapshotException("failed to read the snapshot file at " + snapshotStateFilePath, e);
}
}
/**
* <p>
* This method reads the balances from the given file in the JAR and creates the corresponding SnapshotState.
* </p>
* <p>
* It creates the corresponding reader and for the file on the given location in the JAR and passes it on to
* {@link #readSnapshotState(BufferedReader)}.
* </p>
*
* @param snapshotStateFilePath location of the snapshot state file
* @return the unserialized version of the state file
* @throws SnapshotException if anything goes wrong while reading the state file
*/
private SnapshotState readSnapshotStateFromJAR(String snapshotStateFilePath) throws SnapshotException {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(new BufferedInputStream(SnapshotProviderImpl.class.getResourceAsStream(snapshotStateFilePath))))) {
return readSnapshotState(reader);
} catch (NullPointerException | IOException e) {
throw new SnapshotException("failed to read the snapshot file from JAR at " + snapshotStateFilePath, e);
}
}
/**
* <p>
* This method reads the balances from the given reader.
* </p>
* <p>
* The format of the input is pairs of "address;balance" separated by newlines. It simply reads the input line by
* line, adding the corresponding values to the map.
* </p>
*
* @param reader reader allowing us to retrieve the lines of the {@link SnapshotState} file
* @return the unserialized version of the snapshot state state file
* @throws IOException if something went wrong while trying to access the file
* @throws SnapshotException if anything goes wrong while reading the state file
*/
private SnapshotState readSnapshotState(BufferedReader reader) throws IOException, SnapshotException {
Map<Hash, Long> state = new HashMap<>();
String line;
while ((line = reader.readLine()) != null) {
String[] parts = line.split(";", 2);
if (parts.length == 2) {
state.put(HashFactory.ADDRESS.create(parts[0]), Long.valueOf(parts[1]));
} else {
throw new SnapshotException("malformed snapshot state file");
}
}
return new SnapshotStateImpl(state);
}
/**
* <p>
* This method dumps the current state to a file.
* </p>
* <p>
* It is used by local snapshots to persist the in memory states and allow IRI to resume from the local snapshot.
* </p>
*
* @param snapshotState state object that shall be written
* @param snapshotPath location of the file that shall be written
* @throws SnapshotException if anything goes wrong while writing the file
*/
private void writeSnapshotStateToDisk(SnapshotState snapshotState, String snapshotPath) throws SnapshotException {
try {
Files.write(
Paths.get(snapshotPath),
() -> snapshotState.getBalances().entrySet()
.stream()
.filter(entry -> entry.getValue() != 0)
.<CharSequence>map(entry -> entry.getKey() + ";" + entry.getValue())
.sorted()
.iterator()
);
} catch (IOException e) {
throw new SnapshotException("failed to write the snapshot state file at " + snapshotPath, e);
}
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
//region SNAPSHOT METADATA RELATED UTILITY METHODS /////////////////////////////////////////////////////////////////
/**
* <p>
* This method retrieves the metadata of a snapshot from a file.
* </p>
* <p>
* It is used by local snapshots to determine the relevant information about the saved snapshot.
* </p>
*
* @param snapshotMetaDataFile File object with the path to the snapshot metadata file
* @return SnapshotMetaData instance holding all the relevant details about the snapshot
* @throws SnapshotException if anything goes wrong while reading and parsing the file
*/
private SnapshotMetaData readSnapshotMetaDatafromFile(File snapshotMetaDataFile) throws SnapshotException {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(new BufferedInputStream(
new FileInputStream(snapshotMetaDataFile))))) {
Hash hash = readMilestoneHashFromMetaDataFile(reader);
int index = readMilestoneIndexFromMetaDataFile(reader);
long timestamp = readMilestoneTimestampFromMetaDataFile(reader);
int amountOfSolidEntryPoints = readAmountOfSolidEntryPointsFromMetaDataFile(reader);
int amountOfSeenMilestones = readAmountOfSeenMilestonesFromMetaDataFile(reader);
Map<Hash, Integer> solidEntryPoints = readSolidEntryPointsFromMetaDataFile(reader,
amountOfSolidEntryPoints);
Map<Hash, Integer> seenMilestones = readSeenMilestonesFromMetaDataFile(reader, amountOfSeenMilestones);
return new SnapshotMetaDataImpl(hash, index, timestamp, solidEntryPoints, seenMilestones);
} catch (IOException e) {
throw new SnapshotException("failed to read from the snapshot metadata file at " +
snapshotMetaDataFile.getAbsolutePath(), e);
}
}
/**
* This method reads the transaction hash of the milestone that references the {@link Snapshot} from the metadata
* file.
*
* @param reader reader that is used to read the file
* @return Hash of the milestone transaction that references the {@link Snapshot}
* @throws SnapshotException if anything goes wrong while reading the hash from the file
* @throws IOException if we could not read from the file
*/
private Hash readMilestoneHashFromMetaDataFile(BufferedReader reader) throws SnapshotException, IOException {
String line;
if((line = reader.readLine()) == null) {
throw new SnapshotException("could not read the transaction hash from the metadata file");
}
return HashFactory.TRANSACTION.create(line);
}
/**
* This method reads the milestone index of the milestone that references the {@link Snapshot} from the metadata
* file.
*
* @param reader reader that is used to read the file
* @return milestone index of the milestone that references the {@link Snapshot}
* @throws SnapshotException if anything goes wrong while reading the milestone index from the file
* @throws IOException if we could not read from the file
*/
private int readMilestoneIndexFromMetaDataFile(BufferedReader reader) throws SnapshotException, IOException {
try {
String line;
if ((line = reader.readLine()) == null) {
throw new SnapshotException("could not read the milestone index from the metadata file");
}
return Integer.parseInt(line);
} catch (NumberFormatException e) {
throw new SnapshotException("could not parse the milestone index from the metadata file", e);
}
}
/**
* This method reads the timestamp of the milestone that references the {@link Snapshot} from the metadata file.
*
* @param reader reader that is used to read the file
* @return timestamp of the milestone that references the {@link Snapshot}
* @throws SnapshotException if anything goes wrong while reading the milestone timestamp from the file
* @throws IOException if we could not read from the file
*/
private long readMilestoneTimestampFromMetaDataFile(BufferedReader reader) throws SnapshotException, IOException {
try {
String line;
if ((line = reader.readLine()) == null) {
throw new SnapshotException("could not read the milestone timestamp from the metadata file");
}
return Long.parseLong(line);
} catch (NumberFormatException e) {
throw new SnapshotException("could not parse the milestone timestamp from the metadata file", e);
}
}
/**
* This method reads the amount of solid entry points of the {@link Snapshot} from the metadata file.
*
* @param reader reader that is used to read the file
* @return amount of solid entry points of the {@link Snapshot}
* @throws SnapshotException if anything goes wrong while reading the amount of solid entry points from the file
* @throws IOException if we could not read from the file
*/
private int readAmountOfSolidEntryPointsFromMetaDataFile(BufferedReader reader) throws SnapshotException,
IOException {
try {
String line;
if ((line = reader.readLine()) == null) {
throw new SnapshotException("could not read the amount of solid entry points from the metadata file");
}
return Integer.parseInt(line);
} catch (NumberFormatException e) {
throw new SnapshotException("could not parse the amount of solid entry points from the metadata file", e);
}
}
/**
* This method reads the amount of seen milestones of the {@link Snapshot} from the metadata file.
*
* @param reader reader that is used to read the file
* @return amount of seen milestones of the {@link Snapshot}
* @throws SnapshotException if anything goes wrong while reading the amount of seen milestones from the file
* @throws IOException if we could not read from the file
*/
private int readAmountOfSeenMilestonesFromMetaDataFile(BufferedReader reader) throws SnapshotException,
IOException {
try {
String line;
if ((line = reader.readLine()) == null) {
throw new SnapshotException("could not read the amount of seen milestones from the metadata file");
}
return Integer.parseInt(line);
} catch (NumberFormatException e) {
throw new SnapshotException("could not parse the amount of seen milestones from the metadata file", e);
}
}
/**
* This method reads the solid entry points of the {@link Snapshot} from the metadata file.
*
* @param reader reader that is used to read the file
* @param amountOfSolidEntryPoints the amount of solid entry points we expect
* @return the solid entry points of the {@link Snapshot}
* @throws SnapshotException if anything goes wrong while reading the solid entry points from the file
* @throws IOException if we could not read from the file
*/
private Map<Hash, Integer> readSolidEntryPointsFromMetaDataFile(BufferedReader reader, int amountOfSolidEntryPoints)
throws SnapshotException, IOException {
Map<Hash, Integer> solidEntryPoints = new HashMap<>();
for(int i = 0; i < amountOfSolidEntryPoints; i++) {
String line;
if ((line = reader.readLine()) == null) {
throw new SnapshotException("could not read a solid entry point from the metadata file");
}
String[] parts = line.split(";", 2);
if(parts.length == 2) {
try {
solidEntryPoints.put(HashFactory.TRANSACTION.create(parts[0]), Integer.parseInt(parts[1]));
} catch (NumberFormatException e) {
throw new SnapshotException("could not parse a solid entry point from the metadata file", e);
}
} else {
throw new SnapshotException("could not parse a solid entry point from the metadata file");
}
}
return solidEntryPoints;
}
/**
* This method reads the seen milestones of the {@link Snapshot} from the metadata file.
*
* @param reader reader that is used to read the file
* @param amountOfSeenMilestones the amount of seen milestones we expect
* @return the seen milestones of the {@link Snapshot}
* @throws SnapshotException if anything goes wrong while reading the seen milestones from the file
* @throws IOException if we could not read from the file
*/
private Map<Hash, Integer> readSeenMilestonesFromMetaDataFile(BufferedReader reader, int amountOfSeenMilestones)
throws SnapshotException, IOException {
Map<Hash, Integer> seenMilestones = new HashMap<>();
for(int i = 0; i < amountOfSeenMilestones; i++) {
String line;
if ((line = reader.readLine()) == null) {
throw new SnapshotException("could not read a seen milestone from the metadata file");
}
String[] parts = line.split(";", 2);
if(parts.length == 2) {
try {
seenMilestones.put(HashFactory.TRANSACTION.create(parts[0]), Integer.parseInt(parts[1]));
} catch (NumberFormatException e) {
throw new SnapshotException("could not parse a seen milestone from the metadata file", e);
}
} else {
throw new SnapshotException("could not parse a seen milestone from the metadata file");
}
}
return seenMilestones;
}
/**
* <p>
* This method writes a file containing a serialized version of the metadata object.
* </p>
* <p>
* It can be used to store the current values and read them on a later point in time. It is used by the local
* snapshot manager to generate and maintain the snapshot files.
* </p>
* @param snapshotMetaData metadata object that shall be written
* @param filePath location of the file that shall be written
* @throws SnapshotException if anything goes wrong while writing the file
*/
private void writeSnapshotMetaDataToDisk(SnapshotMetaData snapshotMetaData, String filePath)
throws SnapshotException {
try {
Map<Hash, Integer> solidEntryPoints = snapshotMetaData.getSolidEntryPoints();
Map<Hash, Integer> seenMilestones = snapshotMetaData.getSeenMilestones();
Files.write(
Paths.get(filePath),
() -> Stream.concat(
Stream.of(
snapshotMetaData.getHash().toString(),
String.valueOf(snapshotMetaData.getIndex()),
String.valueOf(snapshotMetaData.getTimestamp()),
String.valueOf(solidEntryPoints.size()),
String.valueOf(seenMilestones.size())
),
Stream.concat(
solidEntryPoints.entrySet()
.stream()
.sorted(Map.Entry.comparingByValue())
.<CharSequence>map(entry -> entry.getKey().toString() + ";" + entry.getValue()),
seenMilestones.entrySet()
.stream()
.sorted(Map.Entry.comparingByValue())
.<CharSequence>map(entry -> entry.getKey().toString() + ";" + entry.getValue())
)
).iterator()
);
} catch (IOException e) {
throw new SnapshotException("failed to write snapshot metadata file at " + filePath, e);
}
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
}
| 28,821 | 41.889881 | 177 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotServiceImpl.java
|
package com.iota.iri.service.snapshot.impl;
import com.iota.iri.conf.IotaConfig;
import com.iota.iri.conf.SnapshotConfig;
import com.iota.iri.controllers.ApproveeViewModel;
import com.iota.iri.controllers.MilestoneViewModel;
import com.iota.iri.controllers.StateDiffViewModel;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.milestone.MilestoneSolidifier;
import com.iota.iri.service.snapshot.Snapshot;
import com.iota.iri.service.snapshot.SnapshotException;
import com.iota.iri.service.snapshot.SnapshotMetaData;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.snapshot.SnapshotService;
import com.iota.iri.service.transactionpruning.TransactionPruner;
import com.iota.iri.service.transactionpruning.TransactionPruningException;
import com.iota.iri.service.transactionpruning.jobs.MilestonePrunerJob;
import com.iota.iri.service.transactionpruning.jobs.UnconfirmedSubtanglePrunerJob;
import com.iota.iri.storage.Tangle;
import com.iota.iri.utils.dag.DAGHelper;
import com.iota.iri.utils.dag.TraversalException;
import com.iota.iri.utils.log.ProgressLogger;
import com.iota.iri.utils.log.interval.IntervalProgressLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.stream.Collectors;
/**
* <p>
* Creates a service instance that allows us to access the business logic for {@link Snapshot}s.
* </p>
* <p>
* The service instance is stateless and can be shared by multiple other consumers.
* </p>
*/
public class SnapshotServiceImpl implements SnapshotService {
/**
* Logger for this class allowing us to dump debug and status messages.
*/
private static final Logger log = LoggerFactory.getLogger(SnapshotServiceImpl.class);
/**
* Holds the tangle object which acts as a database interface.
*/
private final Tangle tangle;
/**
* Holds the snapshot provider which gives us access to the relevant snapshots.
*/
private final SnapshotProvider snapshotProvider;
/**
* Holds the config with important snapshot specific settings.
*/
private final IotaConfig config;
/**
* Minimum depth for generating solid entrypoints due to coordinator allowing 15 MS back attachment.
* So we put it on 50 just to be sure
*/
private static final int MIN_LS_DEPTH_MAINNET = 50;
/**
* Implements the snapshot service. See interface for more information.
* @param tangle acts as a database interface.
* @param snapshotProvider gives us access to the relevant snapshots.
* @param config configuration with snapshot specific settings.
*/
public SnapshotServiceImpl(Tangle tangle, SnapshotProvider snapshotProvider, IotaConfig config) {
this.tangle = tangle;
this.snapshotProvider = snapshotProvider;
this.config = config;
}
/**
* {@inheritDoc}
*
* <p>
* To increase the performance of this operation, we do not apply every single milestone separately but first
* accumulate all the necessary changes and then apply it to the snapshot in a single run. This allows us to
* modify its values without having to create a "copy" of the initial state to possibly roll back the changes if
* anything unexpected happens (creating a backup of the state requires a lot of memory).
* </p>
*/
@Override
public void replayMilestones(Snapshot snapshot, int targetMilestoneIndex) throws SnapshotException {
Map<Hash, Long> balanceChanges = new HashMap<>();
MilestoneViewModel lastAppliedMilestone = null;
try {
for (int currentMilestoneIndex = snapshot.getIndex() + 1; currentMilestoneIndex <= targetMilestoneIndex;
currentMilestoneIndex++) {
MilestoneViewModel currentMilestone = MilestoneViewModel.get(tangle, currentMilestoneIndex);
if (currentMilestone != null) {
StateDiffViewModel stateDiffViewModel = StateDiffViewModel.load(tangle, currentMilestone.getHash());
if(!stateDiffViewModel.isEmpty()) {
stateDiffViewModel.getDiff().forEach((address, change) -> {
balanceChanges.compute(address, (k, balance) -> (balance == null ? 0 : balance) + change);
});
}
lastAppliedMilestone = currentMilestone;
} else {
throw new SnapshotException("milestone # " + currentMilestoneIndex + " is missing");
}
}
if (lastAppliedMilestone != null) {
try {
snapshot.lockWrite();
snapshot.applyStateDiff(new SnapshotStateDiffImpl(balanceChanges));
snapshot.setIndex(lastAppliedMilestone.index());
snapshot.setHash(lastAppliedMilestone.getHash());
TransactionViewModel milestoneTransaction = TransactionViewModel.fromHash(tangle,
lastAppliedMilestone.getHash());
if(milestoneTransaction.getType() != TransactionViewModel.PREFILLED_SLOT) {
snapshot.setTimestamp(milestoneTransaction.getTimestamp());
}
} finally {
snapshot.unlockWrite();
}
}
} catch (Exception e) {
throw new SnapshotException("failed to replay the state of the ledger", e);
}
}
/**
* {@inheritDoc}
*/
@Override
public void rollBackMilestones(Snapshot snapshot, int targetMilestoneIndex) throws SnapshotException {
if(targetMilestoneIndex <= snapshot.getInitialIndex() || targetMilestoneIndex > snapshot.getIndex()) {
throw new SnapshotException("invalid milestone index");
}
snapshot.lockWrite();
Snapshot snapshotBeforeChanges = snapshot.clone();
try {
boolean rollbackSuccessful = true;
while (targetMilestoneIndex <= snapshot.getIndex() && rollbackSuccessful) {
rollbackSuccessful = rollbackLastMilestone(tangle, snapshot);
}
if(targetMilestoneIndex < snapshot.getIndex()) {
throw new SnapshotException("failed to reach the target milestone index when rolling back milestones");
}
} catch(SnapshotException e) {
snapshot.update(snapshotBeforeChanges);
throw e;
} finally {
snapshot.unlockWrite();
}
}
/**
* {@inheritDoc}
*/
@Override
public Snapshot takeLocalSnapshot(MilestoneSolidifier milestoneSolidifier, TransactionPruner transactionPruner, int targetMilestoneIndex)
throws SnapshotException {
MilestoneViewModel targetMilestone = determineMilestoneForLocalSnapshot(tangle, snapshotProvider, targetMilestoneIndex);
Snapshot newSnapshot = generateSnapshot(milestoneSolidifier, targetMilestone);
if (transactionPruner != null) {
cleanupExpiredSolidEntryPoints(tangle, snapshotProvider.getInitialSnapshot().getSolidEntryPoints(),
newSnapshot.getSolidEntryPoints(), transactionPruner);
}
persistLocalSnapshot(snapshotProvider, newSnapshot);
return newSnapshot;
}
/**
* {@inheritDoc}
*/
@Override
public void pruneSnapshotData(TransactionPruner transactionPruner, int pruningMilestoneIndex) throws SnapshotException {
if (transactionPruner != null) {
cleanupOldData(config, transactionPruner, pruningMilestoneIndex);
}
}
/**
* {@inheritDoc}
*/
@Override
public Snapshot generateSnapshot(MilestoneSolidifier milestoneSolidifier, MilestoneViewModel targetMilestone)
throws SnapshotException {
if (targetMilestone == null) {
throw new SnapshotException("the target milestone must not be null");
} else if (targetMilestone.index() > snapshotProvider.getLatestSnapshot().getIndex()) {
throw new SnapshotException("the snapshot target " + targetMilestone + " was not solidified yet");
} else if (targetMilestone.index() < snapshotProvider.getInitialSnapshot().getIndex()) {
throw new SnapshotException("the snapshot target " + targetMilestone + " is too old");
}
snapshotProvider.getInitialSnapshot().lockRead();
snapshotProvider.getLatestSnapshot().lockRead();
Snapshot snapshot;
try {
int distanceFromInitialSnapshot = Math.abs(snapshotProvider.getInitialSnapshot().getIndex() -
targetMilestone.index());
int distanceFromLatestSnapshot = Math.abs(snapshotProvider.getLatestSnapshot().getIndex() -
targetMilestone.index());
if (distanceFromInitialSnapshot <= distanceFromLatestSnapshot) {
snapshot = snapshotProvider.getInitialSnapshot().clone();
replayMilestones(snapshot, targetMilestone.index());
} else {
snapshot = snapshotProvider.getLatestSnapshot().clone();
rollBackMilestones(snapshot, targetMilestone.index() + 1);
}
} finally {
snapshotProvider.getInitialSnapshot().unlockRead();
snapshotProvider.getLatestSnapshot().unlockRead();
}
snapshot.setSolidEntryPoints(generateSolidEntryPoints(targetMilestone));
snapshot.setSeenMilestones(generateSeenMilestones(milestoneSolidifier, targetMilestone));
return snapshot;
}
/**
* {@inheritDoc}
*/
@Override
public Map<Hash, Integer> generateSolidEntryPoints(MilestoneViewModel targetMilestone) throws SnapshotException {
Map<Hash, Integer> solidEntryPoints = new HashMap<>();
solidEntryPoints.put(Hash.NULL_HASH, targetMilestone.index());
solidEntryPoints.put(targetMilestone.getHash(), targetMilestone.index());
processOldSolidEntryPoints(tangle, snapshotProvider, targetMilestone, solidEntryPoints);
processNewSolidEntryPoints(tangle, snapshotProvider, targetMilestone, solidEntryPoints);
return solidEntryPoints;
}
/**
* {@inheritDoc}
*/
@Override
public Map<Hash, Integer> generateSeenMilestones(MilestoneSolidifier milestoneSolidifier,
MilestoneViewModel targetMilestone) throws SnapshotException {
ProgressLogger progressLogger = new IntervalProgressLogger(
"Taking local snapshot [processing seen milestones]", log)
.start(config.getLocalSnapshotsDepth());
Map<Hash, Integer> seenMilestones = new HashMap<>();
try {
MilestoneViewModel seenMilestone = targetMilestone;
while ((seenMilestone = MilestoneViewModel.findClosestNextMilestone(tangle, seenMilestone.index(),
milestoneSolidifier.getLatestMilestoneIndex())) != null) {
seenMilestones.put(seenMilestone.getHash(), seenMilestone.index());
progressLogger.progress();
}
} catch (Exception e) {
progressLogger.abort(e);
throw new SnapshotException("could not generate the set of seen milestones", e);
}
progressLogger.finish();
return seenMilestones;
}
/**
* <p>
* This method reverts the changes caused by the last milestone that was applied to this snapshot.
* </p>
* <p>
* It first checks if we didn't arrive at the initial index yet and then reverts the balance changes that were
* caused by the last milestone. Then it checks if any milestones were skipped while applying the last milestone and
* determines the {@link SnapshotMetaData} that this Snapshot had before and restores it.
* </p>
* @param tangle Tangle object which acts as a database interface
* @return true if the snapshot was rolled back or false otherwise
* @throws SnapshotException if anything goes wrong while accessing the database
*/
private boolean rollbackLastMilestone(Tangle tangle, Snapshot snapshot) throws SnapshotException {
if (snapshot.getIndex() == snapshot.getInitialIndex()) {
return false;
}
snapshot.lockWrite();
try {
// revert the last balance changes
StateDiffViewModel stateDiffViewModel = StateDiffViewModel.load(tangle, snapshot.getHash());
if (!stateDiffViewModel.isEmpty()) {
SnapshotStateDiffImpl snapshotStateDiff = new SnapshotStateDiffImpl(
stateDiffViewModel.getDiff().entrySet().stream().map(
hashLongEntry -> new HashMap.SimpleEntry<>(
hashLongEntry.getKey(), -1 * hashLongEntry.getValue()
)
).collect(
Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)
)
);
if (!snapshotStateDiff.isConsistent()) {
throw new SnapshotException("the StateDiff belonging to milestone #" + snapshot.getIndex() +
" (" + snapshot.getHash() + ") is inconsistent");
} else if (!snapshot.patchedState(snapshotStateDiff).isConsistent()) {
throw new SnapshotException("failed to apply patch belonging to milestone #" + snapshot.getIndex() +
" (" + snapshot.getHash() + ")");
}
snapshot.applyStateDiff(snapshotStateDiff);
}
// jump skipped milestones
int currentIndex = snapshot.getIndex() - 1;
while (snapshot.removeSkippedMilestone(currentIndex)) {
currentIndex--;
}
// check if we arrived at the start
if (currentIndex <= snapshot.getInitialIndex()) {
snapshot.setIndex(snapshot.getInitialIndex());
snapshot.setHash(snapshot.getInitialHash());
snapshot.setTimestamp(snapshot.getInitialTimestamp());
return true;
}
// otherwise set metadata of the previous milestone
MilestoneViewModel currentMilestone = MilestoneViewModel.get(tangle, currentIndex);
snapshot.setIndex(currentMilestone.index());
snapshot.setHash(currentMilestone.getHash());
snapshot.setTimestamp(TransactionViewModel.fromHash(tangle, currentMilestone.getHash()).getTimestamp());
return true;
} catch (Exception e) {
throw new SnapshotException("failed to rollback last milestone", e);
} finally {
snapshot.unlockWrite();
}
}
/**
* <p>
* This method determines the milestone that shall be used for the local snapshot.
* </p>
* <p>
* It determines the milestone by finding the closest previous milestone in the database
* </p>
*
* @param tangle Tangle object which acts as a database interface
* @param snapshotProvider data provider for the {@link Snapshot}s that are relevant for the node
* @param lowestIndex the determined index of the lowest milestone we can snapshot
* @return the target milestone for the local snapshot
* @throws SnapshotException if anything goes wrong while determining the target milestone for the local snapshot
*/
private MilestoneViewModel determineMilestoneForLocalSnapshot(Tangle tangle, SnapshotProvider snapshotProvider,
int lowestIndex) throws SnapshotException {
MilestoneViewModel targetMilestone;
try {
targetMilestone = MilestoneViewModel.findClosestPrevMilestone(tangle, lowestIndex,
snapshotProvider.getInitialSnapshot().getIndex());
} catch (Exception e) {
throw new SnapshotException("could not load the target milestone", e);
}
if (targetMilestone == null) {
throw new SnapshotException("missing milestone with an index of " + lowestIndex + " or lower");
}
return targetMilestone;
}
/**
* <p>
* This method creates {@link com.iota.iri.service.transactionpruning.TransactionPrunerJob}s for the expired solid
* entry points, which removes the unconfirmed subtangles branching off of these transactions.
* </p>
* <p>
* We only clean up these subtangles if the transaction that they are branching off has been cleaned up already by a
* {@link MilestonePrunerJob}. If the corresponding milestone has not been processed we leave them in the database
* so we give the node a little bit more time to "use" these transaction for references from future milestones. This
* is used to correctly reflect the {@link SnapshotConfig#getLocalSnapshotsPruningDelay()}, where we keep old data
* prior to a snapshot.
* </p>
*
* @param tangle Tangle object which acts as a database interface
* @param oldSolidEntryPoints solid entry points of the current initial {@link Snapshot}
* @param newSolidEntryPoints solid entry points of the new initial {@link Snapshot}
* @param transactionPruner manager for the pruning jobs that takes care of cleaning up the old data that
*/
private void cleanupExpiredSolidEntryPoints(Tangle tangle, Map<Hash, Integer> oldSolidEntryPoints,
Map<Hash, Integer> newSolidEntryPoints, TransactionPruner transactionPruner) {
oldSolidEntryPoints.forEach((transactionHash, milestoneIndex) -> {
if (!newSolidEntryPoints.containsKey(transactionHash)) {
try {
// only clean up if the corresponding milestone transaction was cleaned up already -> otherwise
// let the MilestonePrunerJob do this
if (TransactionViewModel.fromHash(tangle, transactionHash).getType() ==
TransactionViewModel.PREFILLED_SLOT) {
transactionPruner.addJob(new UnconfirmedSubtanglePrunerJob(transactionHash));
}
} catch (Exception e) {
log.error("failed to add cleanup job to the transaction pruner", e);
}
}
});
}
/**
* <p>
* This method creates the {@link com.iota.iri.service.transactionpruning.TransactionPrunerJob}s that are
* responsible for removing the old data.
* </p>
* <p>
* It first calculates the range of milestones that shall be deleted and then issues a {@link MilestonePrunerJob}
* for this range (if it is not empty).
* </p>
*
* @param config important snapshot related configuration parameters
* @param transactionPruner manager for the pruning jobs that takes care of cleaning up the old data that
* @param targetIndex target milestone we use to prune anything older
* @throws SnapshotException if anything goes wrong while issuing the cleanup jobs
*/
private void cleanupOldData(SnapshotConfig config, TransactionPruner transactionPruner,
int targetIndex) throws SnapshotException {
int startingIndex = getStartingIndex(config);
try {
if (targetIndex >= startingIndex) {
transactionPruner.addJob(new MilestonePrunerJob(startingIndex, targetIndex));
}
} catch (TransactionPruningException e) {
throw new SnapshotException("could not add the cleanup job to the transaction pruner", e);
}
}
private int getStartingIndex(SnapshotConfig config) throws SnapshotException {
MilestoneViewModel milestonevm = null;
try {
milestonevm = MilestoneViewModel.first(tangle);
} catch (Exception e) {
throw new SnapshotException("Can't load the first milestone from the db", e);
}
return milestonevm != null ? milestonevm.index() : config.getMilestoneStartIndex() + 1;
}
/**
* <p>
* This method persists the local snapshot on the disk and updates the instances used by the
* {@link SnapshotProvider}.
* </p>
* <p>
* It first writes the files to the disk and then updates the two {@link Snapshot}s accordingly.
* </p>
*
* @param snapshotProvider data provider for the {@link Snapshot}s that are relevant for the node
* @param newSnapshot Snapshot that shall be persisted
* @throws SnapshotException if anything goes wrong while persisting the snapshot
*/
private void persistLocalSnapshot(SnapshotProvider snapshotProvider, Snapshot newSnapshot)
throws SnapshotException {
snapshotProvider.persistSnapshot(newSnapshot);
snapshotProvider.getLatestSnapshot().lockWrite();
snapshotProvider.getLatestSnapshot().setInitialHash(newSnapshot.getHash());
snapshotProvider.getLatestSnapshot().setInitialIndex(newSnapshot.getIndex());
snapshotProvider.getLatestSnapshot().setInitialTimestamp(newSnapshot.getTimestamp());
snapshotProvider.getLatestSnapshot().unlockWrite();
snapshotProvider.getInitialSnapshot().update(newSnapshot);
}
private boolean isAboveMinMilestone(int fromIndex, int toIndex) {
return toIndex - fromIndex <= getSepDepth();
}
/**
* <p>
* This method analyzes the old solid entry points and determines if they are still not orphaned.
* </p>
* <p>
* It simply iterates through the old solid entry points and checks them one by one. If an old solid entry point is
* found to still be relevant it is added to the passed in map.
* </p>
*
* @see #processNewSolidEntryPoints to understand the definition for solid entry points
* @param tangle Tangle object which acts as a database interface
* @param snapshotProvider data provider for the {@link Snapshot}s that are relevant for the node
* @param targetMilestone milestone that is used to generate the solid entry points
* @param solidEntryPoints map that is used to collect the solid entry points
*/
private void processOldSolidEntryPoints(Tangle tangle, SnapshotProvider snapshotProvider,
MilestoneViewModel targetMilestone, Map<Hash, Integer> solidEntryPoints) throws SnapshotException {
ProgressLogger progressLogger = new IntervalProgressLogger(
"Taking local snapshot [analyzing old solid entry points]", log)
.start(snapshotProvider.getInitialSnapshot().getSolidEntryPoints().size());
try {
Snapshot initialSnapshot = snapshotProvider.getInitialSnapshot();
Map<Hash, Integer> orgSolidEntryPoints = initialSnapshot.getSolidEntryPoints();
for (Map.Entry<Hash, Integer> solidPoint : orgSolidEntryPoints.entrySet()) {
Hash hash = solidPoint.getKey();
int milestoneIndex = solidPoint.getValue();
if (!Hash.NULL_HASH.equals(hash) && isAboveMinMilestone(milestoneIndex, targetMilestone.index())) {
TransactionViewModel tvm = TransactionViewModel.fromHash(tangle, hash);
addTailsToSolidEntryPoints(milestoneIndex, solidEntryPoints, tvm);
solidEntryPoints.put(hash, milestoneIndex);
}
progressLogger.progress();
}
} catch (Exception e) {
throw new SnapshotException(
"Couldn't process old solid entry point for target milestone " + targetMilestone.index(), e);
} finally {
progressLogger.finish();
}
}
/**
* <p>
* This method retrieves the new solid entry points of the snapshot reference given by the target milestone.
* </p>
* <p>
* A transaction is considered a solid entry point if it is a
* bundle tail of a non-orphaned transaction that was approved by a milestone that is above the target milestone.
*
* It iterates over all relevant unprocessed milestones and analyzes their directly and indirectly approved transactions.
* Every transaction is checked for being SEP and added to {@param SolidEntryPoints} when required
* </p>
*
* @param tangle Tangle object which acts as a database interface
* @param snapshotProvider data provider for the {@link Snapshot}s that are relevant for the node
* @param targetMilestone milestone that is used to generate the solid entry points
* @param solidEntryPoints map that is used to collect the solid entry points
* @throws SnapshotException if anything goes wrong while determining the solid entry points
* @see #getSolidEntryPoints(int, ProgressLogger)
*/
private void processNewSolidEntryPoints(Tangle tangle, SnapshotProvider snapshotProvider,
MilestoneViewModel targetMilestone, Map<Hash, Integer> solidEntryPoints) throws SnapshotException {
ProgressLogger progressLogger = new IntervalProgressLogger(
"Taking local snapshot [generating solid entry points]", log);
try {
solidEntryPoints.putAll(getSolidEntryPoints(targetMilestone.index(), progressLogger));
progressLogger.finish();
} catch (Exception e) {
progressLogger.abort(e);
throw new SnapshotException("could not generate the solid entry points for " + targetMilestone, e);
}
}
/**
* Generates entrypoints based on target index down to the maximum depth of the node
*
* @param targetIndex The milestone index we target to generate entrypoints until.
* @param progressLogger The logger we use to write progress of entrypoint generation
* @return a map of entrypoints or <code>null</code> when we were interrupted
* @throws Exception When we fail to get entry points due to errors generally caused by db interaction
*/
private Map<Hash, Integer> getSolidEntryPoints(int targetIndex, ProgressLogger progressLogger) throws Exception {
Map<Hash, Integer> solidEntryPoints = new HashMap<>();
solidEntryPoints.put(Hash.NULL_HASH, targetIndex);
log.info("Generating entrypoints for {}", targetIndex);
int sepDepth = getSepDepth();
// Co back a but below the milestone. Limited to maxDepth or genisis
int startIndex = Math.max(snapshotProvider.getInitialSnapshot().getIndex(), targetIndex - sepDepth
) + 1; // cant start at last snapshot now can we, could be 0!
progressLogger.start(startIndex);
// Iterate from a reasonable old milestone to the target index to check for solid entry points
for (int milestoneIndex = startIndex; milestoneIndex <= targetIndex; milestoneIndex++) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
MilestoneViewModel milestone = MilestoneViewModel.get(tangle, milestoneIndex);
if (milestone == null) {
log.warn("Failed to find milestone {} during entry point analyzation", milestoneIndex);
return null;
}
List<Hash> approvees = getMilestoneApprovees(milestoneIndex, milestone);
for (Hash approvee : approvees) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
if (isSolidEntryPoint(approvee, targetIndex)) {
// A solid entry point should only be a tail transaction, otherwise the whole bundle can't be reproduced with a snapshot file
TransactionViewModel tvm = TransactionViewModel.fromHash(tangle, approvee);
addTailsToSolidEntryPoints(milestoneIndex, solidEntryPoints, tvm);
}
}
progressLogger.progress();
}
return solidEntryPoints;
}
/**
* Calculates minimum solid entrypoint depth based on network and maxDepth
*
* @return The amount of ms we go back under target snapshot for generation of solid entrypoints
*/
private int getSepDepth() {
return config.isTestnet() ? config.getMaxDepth() : Math.min(MIN_LS_DEPTH_MAINNET, config.getMaxDepth());
}
/**
* isSolidEntryPoint checks whether any direct approver of the given transaction was confirmed
* by a milestone which is above the target milestone.
*
* @param txHash The hash we check as an entrypoint
* @param targetIndex
* @return if the transaction is considered a solid entrypoint
* @throws Exception on db error
*/
private boolean isSolidEntryPoint(Hash txHash, int targetIndex) throws Exception {
ApproveeViewModel approvers = ApproveeViewModel.load(tangle, txHash);
if (approvers.getHashes().isEmpty()) {
return false;
}
for (Hash approver : approvers.getHashes()) {
TransactionViewModel tvm = TransactionViewModel.fromHash(tangle, approver);
if (tvm != null && tvm.snapshotIndex() > targetIndex) {
// confirmed by a later milestone than targetIndex => solidEntryPoint
return true;
}
}
return false;
}
/**
* getMilestoneApprovees traverses a milestone and collects all tx that were
* confirmed by that milestone or higher
*
* @param milestoneIndex
* @param milestone
* @return
* @throws TraversalException
*/
private List<Hash> getMilestoneApprovees(int milestoneIndex, MilestoneViewModel milestone) throws TraversalException {
List<Hash> approvees = new LinkedList<>();
DAGHelper.get(tangle).traverseApprovees(milestone.getHash(),
currentTransaction -> currentTransaction.snapshotIndex() == milestoneIndex,
currentTransaction -> {
approvees.add(currentTransaction.getHash());
});
return approvees;
}
private void addTailsToSolidEntryPoints(int milestoneIndex, Map<Hash, Integer> solidEntryPoints,
TransactionViewModel currentTransaction) throws TraversalException {
// if tail
if (currentTransaction.getCurrentIndex() == 0) {
solidEntryPoints.put(currentTransaction.getHash(), milestoneIndex);
} else {
Set<? extends Hash> tails = DAGHelper.get(tangle).findTails(currentTransaction);
tails.forEach(tail -> solidEntryPoints.put(tail, milestoneIndex));
}
}
}
| 30,935 | 43.705202 | 145 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotStateDiffImpl.java
|
package com.iota.iri.service.snapshot.impl;
import com.iota.iri.model.Hash;
import com.iota.iri.service.snapshot.SnapshotState;
import com.iota.iri.service.snapshot.SnapshotStateDiff;
import java.util.HashMap;
import java.util.Map;
/**
* Implements the basic contract of the {@link SnapshotStateDiff} interface.
*/
public class SnapshotStateDiffImpl implements SnapshotStateDiff {
/**
* Holds the addresses associated to their corresponding balance change.
*/
private final Map<Hash, Long> balanceChanges;
/**
* Creates an object that can be used to patch the {@link SnapshotState} and examine the consistency of the changes.
*
* @param balanceChanges map with the addresses and their balance changes
*/
public SnapshotStateDiffImpl(Map<Hash, Long> balanceChanges) {
this.balanceChanges = balanceChanges;
}
/**
* Creates a deep clone of the passed in {@link SnapshotStateDiff}.
*
* @param snapshotStateDiff object that shall be cloned
*/
public SnapshotStateDiffImpl(SnapshotStateDiff snapshotStateDiff) {
this(snapshotStateDiff.getBalanceChanges());
}
/**
* {@inheritDoc}
*/
@Override
public Map<Hash, Long> getBalanceChanges() {
return new HashMap<>(balanceChanges);
}
/**
* {@inheritDoc}
*/
@Override
public boolean isConsistent() {
return balanceChanges.entrySet()
.stream()
.map(Map.Entry::getValue)
.reduce(Math::addExact)
.orElse(0L)
.equals(0L);
}
}
| 1,613 | 26.827586 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotStateImpl.java
|
package com.iota.iri.service.snapshot.impl;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.snapshot.SnapshotException;
import com.iota.iri.service.snapshot.SnapshotState;
import com.iota.iri.service.snapshot.SnapshotStateDiff;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
/**
* Implements the basic contract of the {@link SnapshotState} interface.
*/
public class SnapshotStateImpl implements SnapshotState {
/**
* Logger for this class (used to emit debug messages).
*/
private static final Logger log = LoggerFactory.getLogger(SnapshotStateImpl.class);
/**
* Holds the balances of the addresses.
*/
private final Map<Hash, Long> balances;
/**
* Creates a deep clone of the passed in {@link SnapshotState}.
*
* @param snapshotState the object that shall be cloned
*/
public SnapshotStateImpl(SnapshotState snapshotState) {
this(snapshotState.getBalances());
}
/**
* Creates a {@link SnapshotState} from the passed in {@link Map} by storing the mapping in its private property.
*
* While most of the other methods are public, this constructor is protected since we do not want to allow the
* manual creation of {@link SnapshotState}'s outside of the snapshot logic.
*
* @param balances map with the addresses associated to their balance
*/
protected SnapshotStateImpl(Map<Hash, Long> balances) {
this.balances = balances;
}
/**
* {@inheritDoc}
*/
@Override
public Long getBalance(Hash address) {
return this.balances.get(address);
}
/**
* {@inheritDoc}
*/
@Override
public Map<Hash, Long> getBalances() {
return new HashMap<>(balances);
}
/**
* {@inheritDoc}
*/
@Override
public boolean isConsistent() {
return getInconsistentAddresses().isEmpty();
}
/**
* {@inheritDoc}
*/
@Override
public boolean hasCorrectSupply() {
long supply = balances.values()
.stream()
.reduce(Math::addExact)
.orElse(Long.MAX_VALUE);
return supply == TransactionViewModel.SUPPLY;
}
/**
* {@inheritDoc}
*/
@Override
public void update(SnapshotState newState) {
balances.clear();
balances.putAll(newState.getBalances());
}
/**
* {@inheritDoc}
*/
@Override
public void applyStateDiff(SnapshotStateDiff diff) throws SnapshotException {
if (!diff.isConsistent()) {
throw new SnapshotException("cannot apply an inconsistent SnapshotStateDiff");
}
diff.getBalanceChanges().forEach((addressHash, balance) -> {
if (balances.computeIfPresent(addressHash, (hash, aLong) -> balance + aLong) == null) {
balances.putIfAbsent(addressHash, balance);
}
if (balances.get(addressHash) == 0) {
balances.remove(addressHash);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public SnapshotState patchedState(SnapshotStateDiff snapshotStateDiff) {
Map<Hash, Long> patchedBalances = snapshotStateDiff.getBalanceChanges()
.entrySet()
.stream()
.map(hashLongEntry -> new HashMap.SimpleEntry<>(hashLongEntry.getKey(),
balances.getOrDefault(hashLongEntry.getKey(), 0L) + hashLongEntry.getValue()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
return new SnapshotStateImpl(patchedBalances);
}
@Override
public int hashCode() {
return Objects.hash(getClass(), balances);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null || !getClass().equals(obj.getClass())) {
return false;
}
return Objects.equals(balances, ((SnapshotStateImpl) obj).balances);
}
/**
* Returns all addresses that have a negative balance.
*
* While this should never happen with the state belonging to the snapshot itself, it can still happen for the
* differential states that are getting created by {@link #patchedState(SnapshotStateDiff)} for the exact reason of
* checking their consistency.
*
* @return a map of the inconsistent addresses (negative balance) and their actual balance
*/
private Map<Hash, Long> getInconsistentAddresses() {
HashMap<Hash, Long> result = new HashMap<>();
balances.forEach((key, value) -> {
if (value < 0) {
log.debug("negative value for address {}: {}", key, value);
result.put(key, value);
}
});
return result;
}
}
| 5,025 | 28.391813 | 119 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/spentaddresses/SpentAddressesException.java
|
package com.iota.iri.service.spentaddresses;
/**
* This class is used to wrap exceptions that are specific to the spent addresses logic.
*
* It allows us to distinct between the different kinds of errors that can happen during the execution of the code.
*/
public class SpentAddressesException extends Exception {
/**
* Constructor of the exception which allows us to provide a specific error message and the cause of the error.
*
* @param message reason why this error occurred
* @param cause wrapped exception that caused this error
*/
public SpentAddressesException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructor of the exception which allows us to provide a specific error message without having an underlying
* cause.
*
* @param message reason why this error occurred
*/
public SpentAddressesException(String message) {
super(message);
}
/**
* Constructor of the exception which allows us to wrap the underlying cause of the error without providing a
* specific reason.
*
* @param cause wrapped exception that caused this error
*/
public SpentAddressesException(Throwable cause) {
super(cause);
}
}
| 1,277 | 31.769231 | 116 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/spentaddresses/SpentAddressesProvider.java
|
package com.iota.iri.service.spentaddresses;
import com.iota.iri.model.Hash;
import java.util.Collection;
import java.util.List;
/**
* Find, mark and store spent addresses
*/
public interface SpentAddressesProvider {
/**
* Checks if this address hash has been spent from
*
* @param addressHash The address to check for
* @return <code>true</code> if it is, else <code>false</code>
* @throws SpentAddressesException If the provider fails to check the address
*/
boolean containsAddress(Hash addressHash) throws SpentAddressesException;
/**
* Mark an address as spent.
*
* @param addressHash the address which we want to mark.
* @throws SpentAddressesException If the provider fails to add the address
*/
void saveAddress(Hash addressHash) throws SpentAddressesException;
/**
* Mark all addresses as spent.
*
* @param addressHashes The addresses we want to mark
* @throws SpentAddressesException If the provider fails to add an address
*/
void saveAddressesBatch(Collection<Hash> addressHashes) throws SpentAddressesException;
/**
* Loads all spent addresses we know of in a collection
*
* @return The spent addresses
* @throws SpentAddressesException If the provider fails read
*/
//used by IXI
List<Hash> getAllAddresses();
/**
* Starts the SpentAddressesProvider by reading the previous spent addresses from files.
* @param assertSpentAddressesExistence a flag that forces an assertion that
* we have spent addresses data at startup
*
* @throws SpentAddressesException if we failed to create a file at the designated location
*/
void init(boolean assertSpentAddressesExistence) throws SpentAddressesException;
}
| 1,821 | 31.535714 | 95 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/spentaddresses/SpentAddressesService.java
|
package com.iota.iri.service.spentaddresses;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import java.util.Collection;
/**
*
* Check and calculate spent addresses
*
*/
public interface SpentAddressesService {
/**
* Checks whether the address is associated with a valid signed output
*
* @param addressHash the address in question
* @return <code>true</code> if the address was spent from, else <code>false</code>
* @throws SpentAddressesException
*/
boolean wasAddressSpentFrom(Hash addressHash) throws SpentAddressesException;
/**
* Persist all the verifiable spent from a given list of transactions
* @param transactions transactions to obtain spends from
* @throws SpentAddressesException
*/
void persistSpentAddresses(Collection<TransactionViewModel> transactions) throws SpentAddressesException;
/**
* Persists the spent addresses of all pre-verified valid transactions in an asynchronous manner
*
* @param transactions <b>Transactions that have their signatures verified beforehand</b>.
* Non spent transactions will be filtered out when persisting
*/
void persistValidatedSpentAddressesAsync(Collection<TransactionViewModel> transactions);
}
| 1,325 | 32.15 | 109 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/spentaddresses/impl/SpentAddressesProviderImpl.java
|
package com.iota.iri.service.spentaddresses.impl;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.stream.Collectors;
import com.iota.iri.conf.IotaConfig;
import com.iota.iri.conf.SnapshotConfig;
import com.iota.iri.model.AddressHash;
import com.iota.iri.model.Hash;
import com.iota.iri.model.HashFactory;
import com.iota.iri.model.persistables.SpentAddress;
import com.iota.iri.service.spentaddresses.SpentAddressesException;
import com.iota.iri.service.spentaddresses.SpentAddressesProvider;
import com.iota.iri.storage.Tangle;
import com.iota.iri.storage.Indexable;
import com.iota.iri.storage.Persistable;
import com.iota.iri.storage.LocalSnapshotsPersistenceProvider;
import com.iota.iri.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
* Implementation of <tt>SpentAddressesProvider</tt>.
* Addresses are saved/found on the {@link Tangle}.
* The folder location is provided by {@link IotaConfig#getLocalSnapshotsDbPath()}
*
*/
public class SpentAddressesProviderImpl implements SpentAddressesProvider {
private static final Logger log = LoggerFactory.getLogger(SpentAddressesProvider.class);
private final SnapshotConfig config;
private LocalSnapshotsPersistenceProvider localSnapshotsPersistenceProvider;
/**
* Implements the spent addresses provider interface.
* @param configuration The snapshot configuration used for file location
*/
public SpentAddressesProviderImpl(SnapshotConfig configuration, LocalSnapshotsPersistenceProvider localSnapshotsPersistenceProvider) {
this.config = configuration;
this.localSnapshotsPersistenceProvider = localSnapshotsPersistenceProvider;
}
/**
* Starts the SpentAddressesProvider by reading the previous spent addresses from files.
* @throws SpentAddressesException if we failed to create a file at the designated location
*/
public void init(boolean assertSpentAddressesExistence) throws SpentAddressesException {
try {
if (assertSpentAddressesExistence && !doSpentAddressesExist(localSnapshotsPersistenceProvider)) {
log.error("Expecting to start with a localsnapshots-db containing spent addresses when initializing " +
"from a local snapshot. Shutting down now");
//explicitly exiting rather than throwing an exception
System.exit(1);
}
readPreviousEpochsSpentAddresses();
} catch (Exception e) {
throw new SpentAddressesException("There is a problem with accessing stored spent addresses", e);
}
}
private boolean doSpentAddressesExist(LocalSnapshotsPersistenceProvider provider) throws Exception {
Pair<Indexable, Persistable> first = provider.first(SpentAddress.class, AddressHash.class);
return first.hi != null && ((SpentAddress) first.hi).exists();
}
private void readPreviousEpochsSpentAddresses() throws SpentAddressesException {
if (config.isTestnet()) {
return;
}
for (String previousEpochsSpentAddressesFile : config.getPreviousEpochSpentAddressesFiles().split(" ")) {
readSpentAddressesFromStream(
SpentAddressesProviderImpl.class.getResourceAsStream(previousEpochsSpentAddressesFile));
}
}
private void readSpentAddressesFromStream(InputStream in) throws SpentAddressesException {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
String line;
while ((line = reader.readLine()) != null) {
saveAddress(HashFactory.ADDRESS.create(line));
}
} catch (Exception e) {
throw new SpentAddressesException("Failed to read or save spent address", e);
}
}
@Override
public boolean containsAddress(Hash addressHash) throws SpentAddressesException {
try {
return localSnapshotsPersistenceProvider.exists(SpentAddress.class, addressHash);
} catch (Exception e) {
throw new SpentAddressesException(e);
}
}
@Override
public void saveAddress(Hash addressHash) throws SpentAddressesException {
try {
localSnapshotsPersistenceProvider.save(new SpentAddress(), addressHash);
} catch (Exception e) {
throw new SpentAddressesException(e);
}
}
@Override
public void saveAddressesBatch(Collection<Hash> addressHash) throws SpentAddressesException {
try {
// Its bytes are always new byte[0], therefore identical in storage
SpentAddress spentAddressModel = new SpentAddress();
localSnapshotsPersistenceProvider.saveBatch(addressHash
.stream()
.map(address -> new Pair<Indexable, Persistable>(address, spentAddressModel))
.collect(Collectors.toList())
);
} catch (Exception e) {
throw new SpentAddressesException(e);
}
}
@Override
public List<Hash> getAllAddresses() {
List<Hash> addresses = new ArrayList<>();
for (byte[] bytes : localSnapshotsPersistenceProvider.loadAllKeysFromTable(SpentAddress.class)) {
addresses.add(HashFactory.ADDRESS.create(bytes));
}
return addresses;
}
}
| 5,495 | 38.826087 | 138 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/spentaddresses/impl/SpentAddressesServiceImpl.java
|
package com.iota.iri.service.spentaddresses.impl;
import com.iota.iri.BundleValidator;
import com.iota.iri.conf.MilestoneConfig;
import com.iota.iri.controllers.AddressViewModel;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.spentaddresses.SpentAddressesException;
import com.iota.iri.service.spentaddresses.SpentAddressesProvider;
import com.iota.iri.service.spentaddresses.SpentAddressesService;
import com.iota.iri.service.tipselection.TailFinder;
import com.iota.iri.service.tipselection.impl.TailFinderImpl;
import com.iota.iri.storage.Tangle;
import com.iota.iri.utils.IotaUtils;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.collections4.CollectionUtils;
import pl.touk.throwing.ThrowingPredicate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
* Implementation of <tt>SpentAddressesService</tt> that calculates and checks spent addresses using the {@link Tangle}
*
*/
public class SpentAddressesServiceImpl implements SpentAddressesService {
private static final Logger log = LoggerFactory.getLogger(SpentAddressesServiceImpl.class);
private final Tangle tangle;
private final SnapshotProvider snapshotProvider;
private final SpentAddressesProvider spentAddressesProvider;
private final TailFinder tailFinder;
private final MilestoneConfig config;
private final BundleValidator bundleValidator;
private final ExecutorService asyncSpentAddressesPersistor =
IotaUtils.createNamedSingleThreadExecutor("Persist Spent Addresses Async");
/**
* Creates a Spent address service using the Tangler
*
* @param tangle Tangle object which is used to load models of addresses
* @param snapshotProvider {@link SnapshotProvider} to find the genesis, used to verify tails
* @param spentAddressesProvider Provider for loading/saving addresses to a database.
*/
public SpentAddressesServiceImpl(Tangle tangle, SnapshotProvider snapshotProvider,
SpentAddressesProvider spentAddressesProvider, BundleValidator bundleValidator,
MilestoneConfig config) {
this.tangle = tangle;
this.snapshotProvider = snapshotProvider;
this.spentAddressesProvider = spentAddressesProvider;
this.bundleValidator = bundleValidator;
this.tailFinder = new TailFinderImpl(tangle);
this.config = config;
}
@Override
public boolean wasAddressSpentFrom(Hash addressHash) throws SpentAddressesException {
return wasAddressSpentFrom(addressHash, getInitialUnspentAddresses());
}
@Override
public void persistSpentAddresses(Collection<TransactionViewModel> transactions) throws SpentAddressesException {
try {
Collection<Hash> spentAddresses = transactions.stream()
.filter(ThrowingPredicate.unchecked(this::wasTransactionSpentFrom))
.map(TransactionViewModel::getAddressHash)
.collect(Collectors.toSet());
spentAddressesProvider.saveAddressesBatch(spentAddresses);
} catch (RuntimeException e) {
throw new SpentAddressesException("Exception while persisting spent addresses", e);
}
}
public void persistValidatedSpentAddressesAsync(Collection<TransactionViewModel> transactions) {
if(transactions.stream().noneMatch(tx -> tx.value() < 0)){
return;
}
asyncSpentAddressesPersistor.submit(() -> {
try {
List<Hash> spentAddresses = transactions.stream()
.filter(tx -> tx.value() < 0)
.map(TransactionViewModel::getAddressHash)
.collect(Collectors.toList());
spentAddressesProvider.saveAddressesBatch(spentAddresses);
} catch (Exception e) {
log.error("Failed to persist spent-addresses... Counting on the Milestone Pruner to finish the job", e);
}
});
}
private boolean wasTransactionSpentFrom(TransactionViewModel tx) throws Exception {
Optional<Hash> tailFromTx = tailFinder.findTailFromTx(tx);
if (tailFromTx.isPresent() && tx.value() < 0) {
// Transaction is confirmed
if (tx.snapshotIndex() != 0) {
return true;
}
// transaction is pending
Hash tailHash = tailFromTx.get();
return isBundleValid(tailHash);
}
return false;
}
private boolean isBundleValid(Hash tailHash) throws Exception {
List<TransactionViewModel> validation =
bundleValidator.validate(tangle, false, snapshotProvider.getInitialSnapshot(), tailHash);
return (CollectionUtils.isNotEmpty(validation) && validation.get(0).getValidity() == 1);
}
/**
*
* @param addressHash the address in question
* @param checkedAddresses known unspent addresses, used to skip calculations.
* Must contain at least {@link Hash#NULL_HASH} and the coordinator address.
* @return {@code true} if address was spent from, else {@code false}
* @throws SpentAddressesException
* @see #wasAddressSpentFrom(Hash)
*/
private boolean wasAddressSpentFrom(Hash addressHash, Collection<Hash> checkedAddresses)
throws SpentAddressesException {
if (addressHash == null) {
return false;
}
if (spentAddressesProvider.containsAddress(addressHash)) {
return true;
}
//If address has already been checked this session, return false
if (checkedAddresses.contains(addressHash)){
return false;
}
try {
Set<Hash> hashes = AddressViewModel.load(tangle, addressHash).getHashes();
int setSizeLimit = 100_000;
//If the hash set returned contains more than 100 000 entries, it likely will not be a spent address.
//To avoid unnecessary overhead while processing, the loop will return false
if (hashes.size() > setSizeLimit){
checkedAddresses.add(addressHash);
return false;
}
for (Hash hash: hashes) {
TransactionViewModel tx = TransactionViewModel.fromHash(tangle, hash);
// Check for spending transactions
if (wasTransactionSpentFrom(tx)) {
return true;
}
}
} catch (Exception e) {
throw new SpentAddressesException(e);
}
checkedAddresses.add(addressHash);
return false;
}
private Set<Hash> getInitialUnspentAddresses() {
return Stream.of(Hash.NULL_HASH, config.getCoordinator())
.collect(Collectors.toSet());
}
}
| 7,116 | 36.856383 | 121 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/EntryPointSelector.java
|
package com.iota.iri.service.tipselection;
import com.iota.iri.model.Hash;
import com.iota.iri.utils.collections.interfaces.UnIterableMap;
/**
* Selects an {@code entryPoint} for tip selection.
* <p>
* this point is used as the starting point for {@link Walker#walk(Hash, UnIterableMap, WalkValidator)}
* </p>
*/
public interface EntryPointSelector {
/**
*get an {@code entryPoint} for tip selection
*
*Uses depth to determine the entry point for the random walk.
*
* @param depth Depth, in milestones. a notion of how deep to search for a good starting point.
* @return Entry point for walk method
* @throws Exception If DB fails to retrieve transactions
*/
Hash getEntryPoint(int depth)throws Exception;
}
| 766 | 27.407407 | 103 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/RatingCalculator.java
|
package com.iota.iri.service.tipselection;
import java.util.Map;
import com.iota.iri.model.Hash;
/**
* Calculates the rating for a sub graph
*/
@FunctionalInterface
public interface RatingCalculator {
/**
* Rating calculator
* <p>
* Calculates the rating of all the transactions that reference
* a given {@code entryPoint}.
* </p>
*
* @param entryPoint Transaction hash of a selected entry point.
* @return Map of ratings for each transaction that references entryPoint.
* @throws Exception If DB fails to retrieve transactions
*/
Map<Hash, Integer> calculate(Hash entryPoint) throws Exception;
}
| 663 | 23.592593 | 78 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/TailFinder.java
|
package com.iota.iri.service.tipselection;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import java.util.Optional;
/**
* Finds the tail of a bundle
*/
public interface TailFinder {
/**
* Method for finding a tail (current_index=0) of a bundle given any transaction in the associated bundle.
*
* @param hash The transaction hash of any transaction in the bundle.
* @return Hash of the tail transaction, or {@code Empty} if the tail is not found.
* @throws Exception If DB fails to retrieve transactions
*/
Optional<Hash> findTail(Hash hash) throws Exception;
/**
* Method for finding a tail (current_index=0) of a bundle given any transaction in the associated bundle.
*
* @param tx any transaction in the bundle.
* @return Hash of the tail transaction, or {@code Empty} if the tail is not found.
* @throws Exception If DB fails to retrieve transactions
*/
Optional<Hash> findTailFromTx(TransactionViewModel tx) throws Exception;
}
| 1,055 | 33.064516 | 110 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/TipSelector.java
|
package com.iota.iri.service.tipselection;
import java.util.List;
import java.util.Optional;
import com.iota.iri.model.Hash;
/**
* Selects tips to be approved by a new transaction.
*/
public interface TipSelector {
/**
* Method for finding tips.
*
* <p>
* This method is used to find tips for approval given a {@code depth},
* if {@code reference} is present then tips will also reference this transaction.
* </p>
*
* @param depth The depth that tip selection will start from.
* @param reference An optional transaction hash to be referenced by tips.
* @return Tips to approve
* @throws Exception If DB fails to retrieve transactions
*/
List<Hash> getTransactionsToApprove(int depth, Optional<Hash> reference) throws Exception;
}
| 811 | 26.066667 | 94 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/WalkValidator.java
|
package com.iota.iri.service.tipselection;
import com.iota.iri.model.Hash;
/**
* Validates consistency of tails.
*/
@FunctionalInterface
public interface WalkValidator {
/**
* Validation
* <p>
* Checks if a given transaction is a valid tail.
* </p>
*
* @param transactionHash Transaction hash to validate consistency of.
* @return <tt>true</tt> if tail is valid, <tt>false</tt> otherwise.
* @throws Exception If Validation fails to execute
*/
boolean isValid(Hash transactionHash) throws Exception;
}
| 562 | 22.458333 | 75 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/Walker.java
|
package com.iota.iri.service.tipselection;
import java.util.Map;
import com.iota.iri.model.Hash;
/**
* Walks the tangle from an entry point towards tips
*
*/
public interface Walker {
/**
* Walk algorithm
* <p>
* Starts from a given {@code entryPoint} to select valid transactions to be used
* as tips. Returns a valid transaction as a tip.
* </p>
*
* @param entryPoint Transaction hash to start walk from.
* @param ratings Map of ratings for each transaction that references entryPoint.
* @param walkValidator Used to validate consistency of tails.
* @return Transaction hash of tip.
* @throws Exception If DB fails to retrieve transactions
*/
Hash walk(Hash entryPoint, Map<Hash, Integer> ratings, WalkValidator walkValidator) throws Exception;
}
| 829 | 26.666667 | 105 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/impl/CumulativeWeightCalculator.java
|
package com.iota.iri.service.tipselection.impl;
import java.util.ArrayDeque;
import java.util.Collection;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import com.iota.iri.controllers.ApproveeViewModel;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.tipselection.RatingCalculator;
import com.iota.iri.storage.Tangle;
/**
* Implementation of {@link RatingCalculator} that calculates the cumulative weight
* Calculates the weight recursively/on the fly for each transaction referencing {@code entryPoint}. <br>
* Works using DFS search for new hashes and a BFS calculation.
* Uses cached values to prevent double database lookup for approvers
*/
public class CumulativeWeightCalculator implements RatingCalculator {
private final Tangle tangle;
private final SnapshotProvider snapshotProvider;
/**
* Constructor for Cumulative Weight Calculator
*
* @param tangle Tangle object which acts as a database interface
* @param snapshotProvider accesses ledger's snapshots
*/
public CumulativeWeightCalculator(Tangle tangle, SnapshotProvider snapshotProvider) {
this.tangle = tangle;
this.snapshotProvider = snapshotProvider;
}
@Override
public Map<Hash, Integer> calculate(Hash entryPoint) throws Exception {
Map<Hash, Integer> hashWeightMap = calculateRatingDfs(entryPoint);
return hashWeightMap;
}
private Map<Hash, Integer> calculateRatingDfs(Hash entryPoint) throws Exception {
TransactionViewModel tvm = TransactionViewModel.fromHash(tangle, entryPoint);
int depth = tvm.snapshotIndex() > 0
? snapshotProvider.getLatestSnapshot().getIndex() - tvm.snapshotIndex() + 1
: 1;
// Estimated capacity per depth, assumes 5 minute gap in between milestones, at 3tps
Map<Hash, Integer> hashWeightMap = createTxHashToCumulativeWeightMap( 5 * 60 * 3 * depth);
Map<Hash, Set<Hash>> txToDirectApprovers = new HashMap<>();
Deque<Hash> stack = new ArrayDeque<>();
stack.addAll(getTxDirectApproversHashes(entryPoint, txToDirectApprovers));
while (!stack.isEmpty()) {
Hash txHash = stack.pollLast();
Set<Hash> approvers = getTxDirectApproversHashes(txHash, txToDirectApprovers);
// If its empty, its a tip!
if (approvers.isEmpty()) {
hashWeightMap.put(txHash, 1);
// Else we go deeper
} else {
// Add all approvers, given we didnt go there
for (Hash h : approvers) {
if (!hashWeightMap.containsKey(h)) {
stack.add(h);
}
}
// Add the tx to the approvers list to count itself as +1 weight, preventing self-referencing
approvers.add(txHash);
// calculate and add rating. Naturally the first time all approvers need to be looked up. Then its cached.
hashWeightMap.put(txHash, getRating(approvers, txToDirectApprovers));
}
}
// If we have a self-reference, its already added, otherwise we save a big calculation
if (!hashWeightMap.containsKey(entryPoint)) {
hashWeightMap.put(entryPoint, hashWeightMap.size() + 1);
}
return hashWeightMap;
}
/**
* Gets the rating of a set, calculated by checking its approvers
*
* @param startingSet All approvers of a certain hash, including the hash itself.
* Should always start with at least 1 hash.
* @param txToDirectApproversCache The cache of approvers, used to prevent double db lookups
* @return The weight, or rating, of the starting hash
* @throws Exception If we can't get the approvers
*/
private int getRating(Set<Hash> startingSet, Map<Hash, Set<Hash>> txToDirectApproversCache) throws Exception {
Deque<Hash> stack = new ArrayDeque<>(startingSet);
while (!stack.isEmpty()) {
Set<Hash> approvers = getTxDirectApproversHashes(stack.pollLast(), txToDirectApproversCache);
for (Hash hash : approvers) {
if (startingSet.add(hash)) {
stack.add(hash);
}
}
}
return startingSet.size();
}
/**
* Finds the approvers of a transaction, and adds it to the txToDirectApprovers map if they weren't there yet.
*
* @param txHash The tx we find the approvers of
* @param txToDirectApprovers The map we look in, and add to
* @param fallback The map we check in before going in the database, can be <code>null</code>
* @return A set with the direct approvers of the given hash
* @throws Exception
*/
private Set<Hash> getTxDirectApproversHashes(Hash txHash, Map<Hash, Set<Hash>> txToDirectApprovers)
throws Exception {
Set<Hash> txApprovers = txToDirectApprovers.get(txHash);
if (txApprovers == null) {
ApproveeViewModel approvers = ApproveeViewModel.load(tangle, txHash);
Collection<Hash> appHashes;
if (approvers == null || approvers.getHashes() == null) {
appHashes = Collections.emptySet();
} else {
appHashes = approvers.getHashes();
}
txApprovers = new HashSet<>(appHashes.size());
for (Hash appHash : appHashes) {
// if not genesis (the tx that confirms itself)
if (!snapshotProvider.getInitialSnapshot().hasSolidEntryPoint(appHash)) {
txApprovers.add(appHash);
}
}
txToDirectApprovers.put(txHash, txApprovers);
}
return new HashSet<Hash>(txApprovers);
}
private static Map<Hash, Integer> createTxHashToCumulativeWeightMap(int size) {
return new HashMap<Hash, Integer>(size); //new TransformingMap<>(size, HashPrefix::createPrefix, null);
}
}
| 6,343 | 39.407643 | 122 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/impl/EntryPointSelectorImpl.java
|
package com.iota.iri.service.tipselection.impl;
import com.iota.iri.controllers.MilestoneViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.milestone.MilestoneSolidifier;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.tipselection.EntryPointSelector;
import com.iota.iri.storage.Tangle;
/**
* Implementation of {@link EntryPointSelector} that given a depth {@code N}, returns a N-deep milestone.
* Meaning <code>milestone(latestSolid - depth)</code>
* Used as a starting point for the random walk.
*/
public class EntryPointSelectorImpl implements EntryPointSelector {
private final Tangle tangle;
private final SnapshotProvider snapshotProvider;
private final MilestoneSolidifier milestoneSolidifier;
/**
* Constructor for Entry Point Selector
* @param tangle Tangle object which acts as a database interface.
* @param snapshotProvider accesses snapshots of the ledger state
* @param milestoneSolidifier used to get latest milestone.
*/
public EntryPointSelectorImpl(Tangle tangle, SnapshotProvider snapshotProvider,
MilestoneSolidifier milestoneSolidifier) {
this.tangle = tangle;
this.snapshotProvider = snapshotProvider;
this.milestoneSolidifier = milestoneSolidifier;
}
@Override
public Hash getEntryPoint(int depth) throws Exception {
int milestoneIndex = Math.max(snapshotProvider.getLatestSnapshot().getIndex() - depth - 1,
snapshotProvider.getInitialSnapshot().getIndex());
MilestoneViewModel milestoneViewModel = MilestoneViewModel.findClosestNextMilestone(tangle, milestoneIndex,
milestoneSolidifier.getLatestMilestoneIndex());
if (milestoneViewModel != null && milestoneViewModel.getHash() != null) {
return milestoneViewModel.getHash();
}
return snapshotProvider.getLatestSnapshot().getHash();
}
}
| 1,959 | 40.702128 | 115 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/impl/RatingOne.java
|
package com.iota.iri.service.tipselection.impl;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import com.iota.iri.controllers.ApproveeViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.tipselection.RatingCalculator;
import com.iota.iri.storage.Tangle;
/**
* Implementation of <tt>RatingCalculator</tt> that gives a uniform rating of 1 to each transaction.
* Used to create uniform random walks.
*/
public class RatingOne implements RatingCalculator {
private final Tangle tangle;
public RatingOne(Tangle tangle) {
this.tangle = tangle;
}
@Override
public Map<Hash, Integer> calculate(Hash entryPoint) throws Exception {
Map<Hash, Integer> rating = new HashMap<>();
Queue<Hash> queue = new LinkedList<>();
queue.add(entryPoint);
rating.put(entryPoint, 1);
Hash hash;
//traverse all transactions that reference entryPoint
while ((hash = queue.poll()) != null) {
Set<Hash> approvers = ApproveeViewModel.load(tangle, hash).getHashes();
for (Hash tx : approvers) {
if (!rating.containsKey(tx)) {
//add to rating w/ value "1"
rating.put(tx, 1);
queue.add(tx);
}
}
}
return rating;
}
}
| 1,422 | 26.365385 | 100 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/impl/TailFinderImpl.java
|
package com.iota.iri.service.tipselection.impl;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.tipselection.TailFinder;
import com.iota.iri.storage.Tangle;
import java.util.Optional;
import java.util.Set;
/**
* Implementation of {@link TailFinder} that given a transaction hash finds the tail of the associated bundle.
*
*/
public class TailFinderImpl implements TailFinder {
private final Tangle tangle;
/**
* Constructor for Tail Finder
* @param tangle Tangle object which acts as a database interface
*/
public TailFinderImpl(Tangle tangle) {
this.tangle = tangle;
}
@Override
public Optional<Hash> findTail(Hash hash) throws Exception {
TransactionViewModel tx = TransactionViewModel.fromHash(tangle, hash);
return findTailFromTx(tx);
}
@Override
public Optional<Hash> findTailFromTx(TransactionViewModel tx) throws Exception {
final Hash bundleHash = tx.getBundleHash();
long index = tx.getCurrentIndex();
while (index-- > 0 && bundleHash.equals(tx.getBundleHash())) {
Set<Hash> approvees = tx.getApprovers(tangle).getHashes();
boolean foundApprovee = false;
for (Hash approvee : approvees) {
TransactionViewModel nextTx = TransactionViewModel.fromHash(tangle, approvee);
if (nextTx.getCurrentIndex() == index && bundleHash.equals(nextTx.getBundleHash())) {
tx = nextTx;
foundApprovee = true;
break;
}
}
if (!foundApprovee) {
break;
}
}
if (tx.getCurrentIndex() == 0) {
return Optional.of(tx.getHash());
}
return Optional.empty();
}
}
| 1,860 | 31.086207 | 110 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/impl/TipSelectionCancelledException.java
|
package com.iota.iri.service.tipselection.impl;
/**
* Thrown when an ongoing tip-selection is cancelled.
*/
public class TipSelectionCancelledException extends Exception {
/**
* Creates a new {@link TipSelectionCancelledException}
*
* @param msg the specific message.
*/
public TipSelectionCancelledException(String msg) {
super(msg);
}
}
| 384 | 21.647059 | 63 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/impl/TipSelectorImpl.java
|
package com.iota.iri.service.tipselection.impl;
import java.security.InvalidAlgorithmParameterException;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import com.google.common.annotations.VisibleForTesting;
import com.iota.iri.conf.TipSelConfig;
import com.iota.iri.model.Hash;
import com.iota.iri.service.ledger.LedgerService;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.tipselection.EntryPointSelector;
import com.iota.iri.service.tipselection.RatingCalculator;
import com.iota.iri.service.tipselection.TipSelector;
import com.iota.iri.service.tipselection.WalkValidator;
import com.iota.iri.service.tipselection.Walker;
import com.iota.iri.storage.Tangle;
/**
* Implementation of <tt>TipSelector</tt> that selects 2 tips,
* based on cumulative weights and transition function alpha.
*
*/
public class TipSelectorImpl implements TipSelector {
private static final String REFERENCE_TRANSACTION_TOO_OLD = "reference transaction is too old";
private static final String TIPS_NOT_CONSISTENT = "inconsistent tips pair selected";
private static final String REFERENCE_TRANSACTION_IS_INVALID = "reference transaction is invalid";
private final EntryPointSelector entryPointSelector;
private final RatingCalculator ratingCalculator;
private final Walker walker;
private final LedgerService ledgerService;
private final Tangle tangle;
private final SnapshotProvider snapshotProvider;
private final TipSelConfig config;
/**
* Constructor for Tip Selector.
*
* @param tangle Tangle object which acts as a database interface.
* @param snapshotProvider allows access to snapshots of the ledger state
* @param ledgerService used by walk validator to check ledger consistency.
* @param entryPointSelector instance of the entry point selector to get tip selection starting points.
* @param ratingCalculator instance of rating calculator, to calculate weighted walks.
* @param walkerAlpha instance of walker (alpha), to perform weighted random walks as per the IOTA white paper.
* @param config configurations to set internal parameters.
*/
public TipSelectorImpl(Tangle tangle,
SnapshotProvider snapshotProvider,
LedgerService ledgerService,
EntryPointSelector entryPointSelector,
RatingCalculator ratingCalculator,
Walker walkerAlpha,
TipSelConfig config) {
this.entryPointSelector = entryPointSelector;
this.ratingCalculator = ratingCalculator;
this.walker = walkerAlpha;
//used by walkValidator
this.ledgerService = ledgerService;
this.tangle = tangle;
this.snapshotProvider = snapshotProvider;
this.config = config;
}
/**
* {@inheritDoc}
*
* Implementation of getTransactionsToApprove
*
* General process:
* <ol>
* <li><b>Preparation:</b> select <CODE>entryPoint</CODE> and calculate rating for all referencing transactions
* <li><b>1st Random Walk:</b> starting from <CODE>entryPoint</CODE>.
* <li><b>2nd Random Walk:</b> if <CODE>reference</CODE> exists and is in the rating calulationg, start from <CODE>reference</CODE>,
* otherwise start again from <CODE>entryPoint</CODE>.
* <li><b>Validate:</b> check that both tips are not contradicting.
* </ol>
* @param depth The depth that the transactions will be found from.
* @param reference An optional transaction hash to be referenced by tips.
* @return Transactions to approve
* @throws Exception If DB fails to retrieve transactions
*/
@Override
public List<Hash> getTransactionsToApprove(int depth, Optional<Hash> reference) throws Exception {
try {
snapshotProvider.getLatestSnapshot().lockRead();
//preparation
Hash entryPoint = entryPointSelector.getEntryPoint(depth);
Map<Hash, Integer> rating;
if(config.getAlpha() == 0) {
rating = new RatingOne(tangle).calculate(entryPoint);
} else {
rating = ratingCalculator.calculate(entryPoint);
}
//random walk
List<Hash> tips = new LinkedList<>();
//ISSUE #786: walkValidator should become a stateless dependency
WalkValidator walkValidator = new WalkValidatorImpl(tangle, snapshotProvider, ledgerService, config);
Hash tip = walker.walk(entryPoint, rating, walkValidator);
tips.add(tip);
if (reference.isPresent()) {
checkReference(reference.get(), rating, walkValidator);
entryPoint = reference.get();
}
//passing the same walkValidator means that the walks will be consistent with each other
tip = walker.walk(entryPoint, rating, walkValidator);
tips.add(tip);
//validate
if (!ledgerService.tipsConsistent(tips)) {
throw new IllegalStateException(TIPS_NOT_CONSISTENT);
}
return tips;
} finally {
snapshotProvider.getLatestSnapshot().unlockRead();
}
}
//Because walkValidator currently can't be mocked, it is easier to test this private method directly
@VisibleForTesting
void checkReference(Hash reference, Map<Hash, Integer> rating, WalkValidator walkValidator)
throws Exception {
if (config.getAlpha() != 0 && !rating.containsKey(reference)) {
throw new InvalidAlgorithmParameterException(REFERENCE_TRANSACTION_TOO_OLD);
}
else if (config.getAlpha() == 0 && !walkValidator.isValid(reference)) {
throw new InvalidAlgorithmParameterException(REFERENCE_TRANSACTION_IS_INVALID);
}
}
}
| 5,992 | 40.909091 | 136 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/impl/WalkValidatorImpl.java
|
package com.iota.iri.service.tipselection.impl;
import com.iota.iri.conf.TipSelConfig;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.ledger.LedgerService;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.tipselection.WalkValidator;
import com.iota.iri.storage.Tangle;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* Implementation of {@link WalkValidator} that checks consistency of the ledger as part of validity checks.
*
* A transaction is only valid if:
* <ol>
* <li>it is a tail
* <li>all the history of the transaction is present (is solid)
* <li>it does not reference an old unconfirmed transaction (not belowMaxDepth)
* <li>the ledger is still consistent if the transaction is added
* (balances of all addresses are correct and all signatures are valid)
* </ol>
*/
public class WalkValidatorImpl implements WalkValidator {
private final Tangle tangle;
private final Logger log = LoggerFactory.getLogger(WalkValidator.class);
private final SnapshotProvider snapshotProvider;
private final LedgerService ledgerService;
private final TipSelConfig config;
private Set<Hash> maxDepthOkMemoization;
private Map<Hash, Long> myDiff;
private Set<Hash> myApprovedHashes;
/**
* Constructor of Walk Validator
* @param tangle Tangle object which acts as a database interface.
* @param snapshotProvider grants access to snapshots od the ledger state.
* @param ledgerService allows to perform ledger related logic.
* @param config configurations to set internal parameters.
*/
public WalkValidatorImpl(Tangle tangle, SnapshotProvider snapshotProvider, LedgerService ledgerService,
TipSelConfig config) {
this.tangle = tangle;
this.snapshotProvider = snapshotProvider;
this.ledgerService = ledgerService;
this.config = config;
maxDepthOkMemoization = new HashSet<>();
myDiff = new HashMap<>();
myApprovedHashes = new HashSet<>();
}
@Override
public boolean isValid(Hash transactionHash) throws Exception {
TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(tangle, transactionHash);
if (transactionViewModel.getType() == TransactionViewModel.PREFILLED_SLOT) {
log.debug("Validation failed: {} is missing in db", transactionHash);
return false;
} else if (transactionViewModel.getCurrentIndex() != 0) {
log.debug("Validation failed: {} not a tail", transactionHash);
return false;
} else if (!transactionViewModel.isSolid()) {
log.debug("Validation failed: {} is not solid", transactionHash);
return false;
} else if (belowMaxDepth(transactionViewModel.getHash(),
snapshotProvider.getLatestSnapshot().getIndex() - config.getMaxDepth())) {
log.debug("Validation failed: {} is below max depth", transactionHash);
return false;
} else if (!ledgerService.isBalanceDiffConsistent(myApprovedHashes, myDiff, transactionViewModel.getHash())) {
log.debug("Validation failed: {} is not consistent", transactionHash);
return false;
}
return true;
}
private boolean belowMaxDepth(Hash tip, int lowerAllowedSnapshotIndex) throws Exception {
//if tip is confirmed stop
if (TransactionViewModel.fromHash(tangle, tip).snapshotIndex() >= lowerAllowedSnapshotIndex) {
return false;
}
//if tip unconfirmed, check if any referenced tx is confirmed below maxDepth
Queue<Hash> nonAnalyzedTransactions = new LinkedList<>(Collections.singleton(tip));
Set<Hash> analyzedTransactions = new HashSet<>();
Hash hash;
final int maxAnalyzedTransactions = config.getBelowMaxDepthTransactionLimit();
while ((hash = nonAnalyzedTransactions.poll()) != null) {
if (analyzedTransactions.size() == maxAnalyzedTransactions) {
log.debug("failed below max depth because of exceeding max threshold of {} analyzed transactions",
maxAnalyzedTransactions);
return true;
}
if (analyzedTransactions.add(hash)) {
TransactionViewModel transaction = TransactionViewModel.fromHash(tangle, hash);
if ((transaction.snapshotIndex() != 0 || snapshotProvider.getInitialSnapshot().hasSolidEntryPoint(transaction.getHash()))
&& transaction.snapshotIndex() < lowerAllowedSnapshotIndex) {
log.debug("failed below max depth because of reaching a tx below the allowed snapshot index {}",
lowerAllowedSnapshotIndex);
return true;
}
if (transaction.snapshotIndex() == 0) {
if (!maxDepthOkMemoization.contains(hash)) {
nonAnalyzedTransactions.offer(transaction.getTrunkTransactionHash());
nonAnalyzedTransactions.offer(transaction.getBranchTransactionHash());
}
}
}
}
maxDepthOkMemoization.add(tip);
return false;
}
}
| 5,420 | 44.554622 | 137 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/tipselection/impl/WalkerAlpha.java
|
package com.iota.iri.service.tipselection.impl;
import java.util.Collections;
import java.util.Deque;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.iota.iri.conf.TipSelConfig;
import com.iota.iri.controllers.ApproveeViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.tipselection.TailFinder;
import com.iota.iri.service.tipselection.WalkValidator;
import com.iota.iri.service.tipselection.Walker;
import com.iota.iri.storage.Tangle;
/**
* Implementation of <tt>Walker</tt> that performs a weighted random walk
* with <CODE>e^(alpha*Hy)</CODE> as the transition function.
*
*/
public class WalkerAlpha implements Walker {
/**
* {@code alpha}: a positive number that controls the randomness of the walk.
* The closer it is to 0, the less bias the random walk will be.
*/
private double alpha;
private final Random random;
private final Tangle tangle;
private final Logger log = LoggerFactory.getLogger(Walker.class);
private final TailFinder tailFinder;
/**
* Constructor for Walker Alpha.
*
* @param tailFinder instance of tailFinder, used to step from tail to tail in random walk.
* @param tangle Tangle object which acts as a database interface
* @param random a source of randomness.
* @param config configurations to set internal parameters.
*/
public WalkerAlpha(TailFinder tailFinder, Tangle tangle, Random random, TipSelConfig config) {
this.tangle = tangle;
this.tailFinder = tailFinder;
this.random = random;
this.alpha = config.getAlpha();
}
/**
* @return {@link WalkerAlpha#alpha}
*/
public double getAlpha() {
return alpha;
}
/**
* @param alpha {@link WalkerAlpha#alpha}
*/
public void setAlpha(double alpha) {
this.alpha = alpha;
}
@Override
public Hash walk(Hash entryPoint, Map<Hash, Integer> ratings, WalkValidator walkValidator) throws Exception {
if (!walkValidator.isValid(entryPoint)) {
throw new IllegalStateException("entry point failed consistency check: " + entryPoint.toString());
}
Optional<Hash> nextStep;
Deque<Hash> traversedTails = new LinkedList<>();
traversedTails.add(entryPoint);
//Walk
do {
if(Thread.interrupted()){
throw new InterruptedException();
}
nextStep = selectApprover(traversedTails.getLast(), ratings, walkValidator);
nextStep.ifPresent(traversedTails::add);
} while (nextStep.isPresent());
log.debug("{} tails traversed to find tip", traversedTails.size());
tangle.publish("mctn %d", traversedTails.size());
return traversedTails.getLast();
}
private Optional<Hash> selectApprover(Hash tailHash, Map<Hash, Integer> ratings, WalkValidator walkValidator) throws Exception {
Set<Hash> approvers = getApprovers(tailHash);
return findNextValidTail(ratings, approvers, walkValidator);
}
private Set<Hash> getApprovers(Hash tailHash) throws Exception {
ApproveeViewModel approveeViewModel = ApproveeViewModel.load(tangle, tailHash);
return approveeViewModel.getHashes();
}
private Optional<Hash> findNextValidTail(Map<Hash, Integer> ratings, Set<Hash> approvers, WalkValidator walkValidator) throws Exception {
Optional<Hash> nextTailHash = Optional.empty();
//select next tail to step to
while (!nextTailHash.isPresent()) {
Optional<Hash> nextTxHash = select(ratings, approvers);
if (!nextTxHash.isPresent()) {
//no existing approver = tip
return Optional.empty();
}
nextTailHash = findTailIfValid(nextTxHash.get(), walkValidator);
approvers.remove(nextTxHash.get());
//if next tail is not valid, re-select while removing it from approvers set
}
return nextTailHash;
}
private Optional<Hash> select(Map<Hash, Integer> ratings, Set<Hash> approversSet) {
List<Hash> approvers;
int approverIndex;
//filter based on tangle state when starting the walk
approvers = approversSet.stream().filter(ratings::containsKey).collect(Collectors.toList());
//After filtering, if no approvers are available, it's a tip.
if (approvers.size() == 0) {
return Optional.empty();
}
//Check if alpha was set to 0. If so, weight calculations are skipped and a random approver will be selected.
if(alpha != 0) {
//calculate the probabilities
List<Integer> walkRatings = approvers.stream().map(ratings::get).collect(Collectors.toList());
Integer maxRating = walkRatings.stream().max(Integer::compareTo).orElse(0);
//walkRatings.stream().reduce(0, Integer::max);
//transition probability function (normalize ratings based on Hmax)
List<Integer> normalizedWalkRatings = walkRatings.stream().map(w -> w - maxRating).collect(Collectors.toList());
List<Double> weights = normalizedWalkRatings.stream().map(w -> Math.exp(alpha * w)).collect(Collectors.toList());
//select the next transaction
Double weightsSum = weights.stream().reduce(0.0, Double::sum);
double target = random.nextDouble() * weightsSum;
for (approverIndex = 0; approverIndex < weights.size() - 1; approverIndex++) {
target -= weights.get(approverIndex);
if (target <= 0) {
break;
}
}
} else {
approverIndex = random.nextInt(approvers.size());
}
return Optional.of(approvers.get(approverIndex));
}
private Optional<Hash> findTailIfValid(Hash transactionHash, WalkValidator validator) throws Exception {
Optional<Hash> tailHash = tailFinder.findTail(transactionHash);
if (tailHash.isPresent() && validator.isValid(tailHash.get())) {
return tailHash;
}
return Optional.empty();
}
}
| 6,372 | 35.83815 | 141 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/DepthPruningCondition.java
|
package com.iota.iri.service.transactionpruning;
import com.iota.iri.conf.SnapshotConfig;
import com.iota.iri.controllers.MilestoneViewModel;
import com.iota.iri.service.snapshot.Snapshot;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.storage.Tangle;
/**
* Initiates pruning based on the number of milestones in the DB
*/
public class DepthPruningCondition implements PruningCondition {
private final SnapshotConfig config;
private final SnapshotProvider snapshotProvider;
private final Tangle tangle;
/**
* Initialize a condition to prune based on the number of milestones
*
* @param config Snapshot configuration
* @param snapshotProvider persistence connector to snapshot data
* @param tangle persistence connector to tangle data
*/
public DepthPruningCondition(SnapshotConfig config, SnapshotProvider snapshotProvider, Tangle tangle) {
this.config = config;
this.snapshotProvider = snapshotProvider;
this.tangle = tangle;
}
@Override
public boolean shouldPrune() throws TransactionPruningException {
try {
MilestoneViewModel milestonevm = MilestoneViewModel.first(tangle);
return milestonevm != null && getSnapshotPruningMilestone() > milestonevm.index();
} catch (Exception e) {
throw new TransactionPruningException("Unable to determine start milestone", e);
}
}
@Override
public int getSnapshotPruningMilestone() {
Snapshot initialSnapshot = snapshotProvider.getInitialSnapshot();
int snapshotIndex = initialSnapshot.getIndex();
return snapshotIndex - config.getLocalSnapshotsPruningDelay();
}
}
| 1,727 | 34.265306 | 107 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/PruningCondition.java
|
package com.iota.iri.service.transactionpruning;
/**
* Condition that specifies if and on what milestone pruning happens.
*/
public interface PruningCondition {
/**
* Executes a check on the node to determine if we should prune
*
* @return <code>true</code> if we should start pruning, otherwise <code>false</code>
*/
boolean shouldPrune() throws TransactionPruningException;
/**
* Determines the milestone at which we should start pruning.
* This does not take the minimum amount of milestones in the database into account.
*
* @return The index of the last milestone we will keep in the database.
* Everything before this number will be pruned away if allowed.
* @throws TransactionPruningException if we could not obtain the requirements for determining the milestone index
*/
int getSnapshotPruningMilestone() throws TransactionPruningException;
}
| 936 | 36.48 | 118 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/SizePruningCondition.java
|
package com.iota.iri.service.transactionpruning;
import com.iota.iri.conf.BaseIotaConfig;
import com.iota.iri.conf.SnapshotConfig;
import com.iota.iri.model.IntegerIndex;
import com.iota.iri.model.persistables.Milestone;
import com.iota.iri.storage.Indexable;
import com.iota.iri.storage.Persistable;
import com.iota.iri.storage.Tangle;
import com.iota.iri.utils.IotaUtils;
import com.iota.iri.utils.Pair;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Initiates pruning based on how much space the DB takes on the file system
*/
public class SizePruningCondition implements PruningCondition {
private static final Logger log = LoggerFactory.getLogger(SizePruningCondition.class);
/**
* Percentage margin for the database max size due to inaccurate size calculation
* The size can easily fluctuate 1gb in 10 seconds
*/
private static final double MARGIN = 1;
/**
* Amount of milestones we prune when the DB is too large, 4.6mb/milestone on average
*/
public static final int MILESTONES = 100;
/**
* We give the db a chance to update the size before retrying to prune
*/
private static final long TIME_DELAY = TimeUnit.SECONDS.toMillis(30);
/**
* The maximum size we want the DB to have, with margin due to DB delays for writing to disk
*/
private final long maxSize;
/**
* Database object we use to find the lowest milestone
*/
private final Tangle tangle;
/**
* The cached size of the db, so we don't keep pruning when the size doesn't change before a DB compaction
*/
private long lastSize = -1;
/**
* last time we forced a check despite size not having changed
*/
private long lastTime;
/**
* A condition to prune based on DB size
*
* @param tangle the database interface.
* @param config configuration with snapshot specific settings.
*/
public SizePruningCondition(Tangle tangle, SnapshotConfig config) {
this.tangle = tangle;
if (config.getLocalSnapshotsPruningEnabled()) {
maxSize = (long) Math.floor(IotaUtils.parseFileSize(config.getLocalSnapshotsDbMaxSize()) / 100 * (100-MARGIN));
} else {
if (config.getLocalSnapshotsDbMaxSize() != BaseIotaConfig.Defaults.LOCAL_SNAPSHOTS_DB_MAX_SIZE) {
log.warn("Local snapshots with size condition does not work with pruning disabled");
}
maxSize = -1;
}
}
@Override
public boolean shouldPrune() {
if (maxSize <= 0) {
return false;
}
long size = tangle.getPersistanceSize();
if (size == lastSize && System.currentTimeMillis() < lastTime + TIME_DELAY) {
return false;
}
lastSize = size;
lastTime = System.currentTimeMillis();
return size > maxSize;
}
@Override
public int getSnapshotPruningMilestone() throws TransactionPruningException {
int initialIndex;
try {
Pair<Indexable, Persistable> ms = tangle.getFirst(Milestone.class, IntegerIndex.class);
if (ms == null || ms.low == null) {
return -1;
}
initialIndex = ((IntegerIndex)ms.low).getValue();
} catch (Exception e) {
throw new TransactionPruningException("failed to find oldest milestone", e);
}
return initialIndex + MILESTONES;
}
}
| 3,529 | 31.385321 | 123 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/TransactionPruner.java
|
package com.iota.iri.service.transactionpruning;
/**
* Represents the manager for the cleanup jobs that are issued by the
* {@link com.iota.iri.service.snapshot.LocalSnapshotManager} in connection with local snapshots and eventually other
* parts of the code.
*/
public interface TransactionPruner {
/**
* This method adds a job to the TransactionPruner, that consequently can be executed by the {@link #processJobs()}
* method.
*
* In addition to adding the jobs to the internal list of jobs that have to be executed, it informs the job about
* the {@link TransactionPruner}, the {@link com.iota.iri.storage.Tangle}, the
* {@link com.iota.iri.controllers.TipsViewModel} and the {@link com.iota.iri.service.snapshot.Snapshot} instances
* that this job is working on.
*
* @param job the job that shall be executed
* @throws TransactionPruningException if anything goes wrong while adding the job
*/
void addJob(TransactionPrunerJob job) throws TransactionPruningException;
/**
* This method executes all jobs that where added to the {@link TransactionPruner} through
* {@link #addJob(TransactionPrunerJob)}.
*
* The jobs will only be executed exactly once before being removed from the {@link TransactionPruner}.
*
* @throws TransactionPruningException if anything goes wrong while processing the jobs
*/
void processJobs() throws TransactionPruningException;
/**
* This method initializes the instance by preparing the pruning jobs.
*/
void init();
/**
* Starts the pruning jobs.
*/
void start();
/**
* Shuts down the pruning jobs.
*/
void shutdown();
/**
* Checks if we have an active job running for the current type
*
* @param jobClass THe type of job we check for.
* @return <code>true</code> if there is a job running, otherwise <code>false</code>
*/
<T extends TransactionPrunerJob> boolean hasActiveJobFor(Class<T> jobClass);
}
| 2,042 | 34.224138 | 119 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/TransactionPrunerJob.java
|
package com.iota.iri.service.transactionpruning;
import com.iota.iri.controllers.TipsViewModel;
import com.iota.iri.service.snapshot.Snapshot;
import com.iota.iri.service.spentaddresses.SpentAddressesProvider;
import com.iota.iri.service.spentaddresses.SpentAddressesService;
import com.iota.iri.storage.Tangle;
/**
* Represents the basic contract for a job that get processed by the {@link TransactionPruner}.
*/
public interface TransactionPrunerJob {
/**
* Allows to set the {@link TransactionPruner} that this job belongs to (optional).
*
* It simply stores the passed in instance, so the job can for example persist the state, once it has finished
* processing a sub-task. We do not pass this parameter in via the constructor of the job, so we can also test
* jobs standalone outside the scope of a {@link TransactionPruner}.
*
* It gets automatically called and set when we add a job to a {@link TransactionPruner}.
*
* @param transactionPruner manager of the job that schedules it execution
*/
void setTransactionPruner(TransactionPruner transactionPruner);
/**
* This method returns the stored {@link TransactionPruner} instance.
*
* @return manager of the job that schedules it execution
*/
TransactionPruner getTransactionPruner();
/**
* Allows to set the {@link SpentAddressesService} that will ensure pruned valid transactions will have their
* spent addresses persisted the node
*
* @param spentAddressesService service to be injected
*/
void setSpentAddressesService(SpentAddressesService spentAddressesService);
/**
* Allows to set the {@link SpentAddressesProvider} that will ensure that spent addresses are written
* to the persistence layer when their corresponding transactions are pruned.
*
* @param spentAddressesProvider service to be injected
*/
void setSpentAddressesProvider(SpentAddressesProvider spentAddressesProvider);
/**
* Allows to set the {@link Tangle} object that this job should work on.
*
* We do not pass this parameter in via the constructor of the job because the container, that the job get's added
* to (the {@link TransactionPruner} already has knowledge about the {@link Tangle} instance we are working on and
* can automatically set the reference upon addition of the job.
*
* This way we reduce the amount of code that has to be written when creating a job and at the same time ensure that
* all jobs are automatically working on the same correct {@link Tangle} instance, while still keeping full control
* over the property in tests (where we might want to test a job outside the scope of a {@link TransactionPruner}).
*
* @param tangle Tangle object which acts as a database interface
*/
void setTangle(Tangle tangle);
/**
* This method returns the stored {@link Tangle} instance.
*
* @return Tangle object which acts as a database interface
*/
Tangle getTangle();
/**
* Allows to set the {@link TipsViewModel} object that this job uses to remove pruned transactions from this cached
* instance.
*
* We do not pass this parameter in via the constructor of the job because the container, that the job get's added
* to (the {@link TransactionPruner} already has knowledge about the {@link TipsViewModel} instance we are working
* on and can automatically set the reference upon addition of the job.
*
* This way we reduce the amount of code that has to be written when creating a job and at the same time ensure that
* all jobs are automatically working on the same correct {@link TipsViewModel} instance, while still keeping full
* control over the property in tests (where we might want to test a job outside the scope of a
* {@link TransactionPruner}).
*
* @param tipsViewModel manager for the tips (required for removing pruned transactions from this manager)
*/
void setTipsViewModel(TipsViewModel tipsViewModel);
/**
* This method returns the previously set {@link TipsViewModel} instance.
*
* @return manager for the tips (required for removing pruned transactions from this manager)
*/
TipsViewModel getTipsViewModel();
/**
* Allows to set the {@link Snapshot} that the node is using as a starting point for the state of the ledger.
*
* It can be used to retrieve important information about the range of transactions that are relevant for our node.
*
* @param snapshot last local or global snapshot that acts as a starting point for the state of ledger
*/
void setSnapshot(Snapshot snapshot);
/**
* This method returns the previously set {@link Snapshot} instance.
*
* @return last local or global snapshot that acts as a starting point for the state of ledger
*/
Snapshot getSnapshot();
/**
* Getter for the execution status of the job.
*
* @return execution status of the job.
*/
TransactionPrunerJobStatus getStatus();
/**
* Setter for the execution status of the job.
*
* @param transactionPrunerJobStatus new execution status of the job
*/
void setStatus(TransactionPrunerJobStatus transactionPrunerJobStatus);
/**
* This method processes the cleanup job and performs the actual pruning.
*
* @throws TransactionPruningException if something goes wrong while processing the job
*/
void process() throws TransactionPruningException;
/**
* This method is used to serialize the job before it gets persisted by the {@link TransactionPruner}.
*
* @return string representing a serialized version of this job
*/
String serialize();
}
| 5,837 | 41 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/TransactionPrunerJobStatus.java
|
package com.iota.iri.service.transactionpruning;
/**
* Represents the different states a {@link TransactionPrunerJob} can be in.
*/
public enum TransactionPrunerJobStatus {
PENDING,
RUNNING,
DONE,
FAILED
}
| 225 | 17.833333 | 76 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/TransactionPruningException.java
|
package com.iota.iri.service.transactionpruning;
/**
* This class is used to wrap exceptions that are specific to the transaction pruning logic.
*
* It allows us to distinct between the different kinds of errors that can happen during the execution of the pruning
* logic.
*/
public class TransactionPruningException extends Exception {
/**
* Constructor of the exception which allows us to provide a specific error message and the cause of the error.
*
* @param message reason why this error occurred
* @param cause wrapped exception that caused this error
*/
public TransactionPruningException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructor of the exception which allows us to provide a specific error message without having an underlying
* cause.
*
* @param message reason why this error occurred
*/
public TransactionPruningException(String message) {
super(message);
}
/**
* Constructor of the exception which allows us to wrap the underlying cause of the error without providing a
* specific reason.
*
* @param cause wrapped exception that caused this error
*/
public TransactionPruningException(Throwable cause) {
super(cause);
}
}
| 1,313 | 31.85 | 117 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/async/AsyncTransactionPruner.java
|
package com.iota.iri.service.transactionpruning.async;
import com.iota.iri.conf.SnapshotConfig;
import com.iota.iri.controllers.TipsViewModel;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.spentaddresses.SpentAddressesProvider;
import com.iota.iri.service.spentaddresses.SpentAddressesService;
import com.iota.iri.service.transactionpruning.TransactionPruner;
import com.iota.iri.service.transactionpruning.TransactionPrunerJob;
import com.iota.iri.service.transactionpruning.TransactionPrunerJobStatus;
import com.iota.iri.service.transactionpruning.TransactionPruningException;
import com.iota.iri.service.transactionpruning.jobs.MilestonePrunerJob;
import com.iota.iri.service.transactionpruning.jobs.UnconfirmedSubtanglePrunerJob;
import com.iota.iri.storage.Tangle;
import com.iota.iri.utils.thread.ThreadIdentifier;
import com.iota.iri.utils.thread.ThreadUtils;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
* Creates a {@link TransactionPruner} that is able to process it's jobs asynchronously in the background and persists
* its state in a file on the hard disk of the node.
* </p>
* <p>
* The asynchronous processing of the jobs is done through {@link Thread}s that are started and stopped by invoking the
* corresponding {@link #start()} and {@link #shutdown()} methods. Since some of the builtin jobs require a special
* logic for the way they are executed, we register the builtin job types here.
* </p>
*/
public class AsyncTransactionPruner implements TransactionPruner {
/**
* The interval in milliseconds that the {@link AsyncTransactionPruner} will check if new cleanup tasks are
* available and need to be processed.
*/
private static final int GARBAGE_COLLECTOR_RESCAN_INTERVAL = 10000;
/**
* <p>
* The interval (in milliseconds) in which the {@link AsyncTransactionPruner} will persist its state.
* </p>
* Note: Since the worst thing that could happen when not having a 100% synced state file is to have a few floating
* "zombie" transactions in the database, we do not persist the state immediately but in intervals in a
* separate {@link Thread} (to save performance - until a db-based version gets introduced).
*/
private static final int GARBAGE_COLLECTOR_PERSIST_INTERVAL = 1000;
/**
* Logger for this class allowing us to dump debug and status messages.
*/
private static final Logger log = LoggerFactory.getLogger(AsyncTransactionPruner.class);
/**
* Tangle object which acts as a database interface.
*/
private final Tangle tangle;
/**
* Data provider for the snapshots that are relevant for the node.
*/
private final SnapshotProvider snapshotProvider;
/**
* Used to check whether an address was spent from before it is pruned.
*/
private final SpentAddressesService spentAddressesService;
/**
* Used to check whether an address is already persisted in the persistence layer.
*/
private final SpentAddressesProvider spentAddressesProvider;
/**
* Manager for the tips (required for removing pruned transactions from this manager).
*/
private final TipsViewModel tipsViewModel;
/**
* Configuration with important snapshot related parameters.
*/
private final SnapshotConfig config;
/**
* Holds a reference to the {@link ThreadIdentifier} for the cleanup thread.
* <p>
* Using a {@link ThreadIdentifier} for spawning the thread allows the {@link ThreadUtils} to spawn exactly one
* thread for this instance even when we call the {@link #start()} method multiple times.
* </p>
*/
private final ThreadIdentifier cleanupThreadIdentifier = new ThreadIdentifier("Transaction Pruner");
/**
* Holds a reference to the {@link ThreadIdentifier} for the state persistence thread.
* <p>
* Using a {@link ThreadIdentifier} for spawning the thread allows the {@link ThreadUtils} to spawn exactly one
* thread for this instance even when we call the {@link #start()} method multiple times.
* </p>
*/
private final ThreadIdentifier persisterThreadIdentifier = new ThreadIdentifier("Transaction Pruner Persister");
/**
* A map of {@link JobParser}s allowing us to determine how to parse the jobs from the state file, based on their
* type.
*/
private final Map<String, JobParser> jobParsers = new HashMap<>();
/**
* List of cleanup jobs that shall get processed by the {@link AsyncTransactionPruner} (grouped by their class).
*/
private final Map<Class<? extends TransactionPrunerJob>, JobQueue> jobQueues = new HashMap<>();
/**
* @param tangle Tangle object which acts as a database interface
* @param snapshotProvider data provider for the snapshots that are relevant for the node
* @param tipsViewModel manager for the tips (required for removing pruned transactions from this manager)
* @param config Configuration with important snapshot related configuration parameters
*/
public AsyncTransactionPruner(Tangle tangle, SnapshotProvider snapshotProvider,
SpentAddressesService spentAddressesService,
SpentAddressesProvider spentAddressesProvider,
TipsViewModel tipsViewModel,
SnapshotConfig config) {
this.tangle = tangle;
this.snapshotProvider = snapshotProvider;
this.spentAddressesService = spentAddressesService;
this.spentAddressesProvider = spentAddressesProvider;
this.tipsViewModel = tipsViewModel;
this.config = config;
}
@Override
public void init() {
addJobQueue(UnconfirmedSubtanglePrunerJob.class, new SimpleJobQueue(this));
addJobQueue(MilestonePrunerJob.class, new MilestonePrunerJobQueue(this, config));
registerParser(MilestonePrunerJob.class, MilestonePrunerJob::parse);
registerParser(UnconfirmedSubtanglePrunerJob.class, UnconfirmedSubtanglePrunerJob::parse);
}
/**
* {@inheritDoc}
*
* It adds the job to its corresponding queue.
*/
@Override
public void addJob(TransactionPrunerJob job) throws TransactionPruningException {
job.setTransactionPruner(this);
job.setSpentAddressesService(spentAddressesService);
job.setSpentAddressesProvider(spentAddressesProvider);
job.setTangle(tangle);
job.setTipsViewModel(tipsViewModel);
job.setSnapshot(snapshotProvider.getInitialSnapshot());
// this call is "unchecked" to a "raw" JobQueue and it is intended since the matching JobQueue is defined by the
// registered job types
getJobQueue(job.getClass()).addJob(job);
}
/**
* {@inheritDoc}
*
* <p>
* It iterates through all available queues and triggers the processing of their jobs.
* </p>
*
* @throws TransactionPruningException if anything goes wrong while processing the cleanup jobs
*/
@Override
public void processJobs() throws TransactionPruningException {
for(JobQueue jobQueue : jobQueues.values()) {
if(Thread.currentThread().isInterrupted()) {
return;
}
jobQueue.processJobs();
}
}
/**
* <p>
* This method removes all queued jobs and resets the state of the {@link TransactionPruner}. It can for example be
* used to cleanup after tests.
* </p>
* It cycles through all registered {@link JobQueue}s and clears them before persisting the state.
*/
void clear() {
for (JobQueue jobQueue : jobQueues.values()) {
jobQueue.clear();
}
}
/**
* <p>
* This method starts the cleanup and persistence {@link Thread}s that asynchronously process the queued jobs in the
* background.
* </p>
* <p>
* Note: This method is thread safe since we use a {@link ThreadIdentifier} to address the {@link Thread}. The
* {@link ThreadUtils} take care of only launching exactly one {@link Thread} that is not terminated.
* </p>
*/
public void start() {
ThreadUtils.spawnThread(this::processJobsThread, cleanupThreadIdentifier);
}
/**
* Shuts down the background job by setting the corresponding shutdown flag.
*/
public void shutdown() {
ThreadUtils.stopThread(cleanupThreadIdentifier);
ThreadUtils.stopThread(persisterThreadIdentifier);
}
/**
* This method contains the logic for the processing of the cleanup jobs, that gets executed in a separate
* {@link Thread}.
*
* It repeatedly calls {@link #processJobs()} until the TransactionPruner is shutting down.
*/
private void processJobsThread() {
while(!Thread.currentThread().isInterrupted()) {
try {
processJobs();
} catch(TransactionPruningException e) {
log.error("error while processing the transaction pruner jobs", e);
}
ThreadUtils.sleep(GARBAGE_COLLECTOR_RESCAN_INTERVAL);
}
}
/**
* Registers the job queue that is responsible for processing the jobs of the given type.
*
* The {@link JobQueue} implements the specific logic how the jobs are being executed and scheduled.
*
* @param jobClass type of the job that we want to be able to process
* @param jobQueue the queue that is responsible for processing the transactions
*/
private void addJobQueue(Class<? extends TransactionPrunerJob> jobClass, JobQueue jobQueue) {
jobQueues.put(jobClass, jobQueue);
}
/**
* This method allows to register a {@link JobParser} for a given job type.
*
* <p>
* When we serialize the pending jobs to save the current state, we also dump their class names, which allows us to
* generically parse their serialized representation using the registered parser function back into the
* corresponding job.
* </p>
*
* @param jobClass class of the job that the TransactionPruner shall be able to handle
* @param jobParser parser function for the serialized version of jobs of the given type
*/
private void registerParser(Class<?> jobClass, JobParser jobParser) {
this.jobParsers.put(jobClass.getCanonicalName(), jobParser);
}
/**
* This method retrieves the job queue belonging to a given job type.
*
* Before returning the {@link JobQueue} we check if one exists.
*
* @param jobClass type of the job that we want to retrieve the queue for
* @return the list of jobs for the provided job type
* @throws TransactionPruningException if there is no {@link JobQueue} for the given job type
*/
private JobQueue getJobQueue(Class<? extends TransactionPrunerJob> jobClass) throws TransactionPruningException {
JobQueue jobQueue = jobQueues.get(jobClass);
if(jobQueue == null) {
throw new TransactionPruningException("jobs of type \"" + jobClass.getCanonicalName() + "\" are not supported");
}
return jobQueue;
}
@Override
public <T extends TransactionPrunerJob> boolean hasActiveJobFor(Class<T> jobClass) {
JobQueue<T> jobQueue = jobQueues.get(jobClass);
if (jobQueue == null) {
return false;
}
return jobQueue.stream().anyMatch(job -> {
return job.getStatus().equals(TransactionPrunerJobStatus.RUNNING)
|| job.getStatus().equals(TransactionPrunerJobStatus.PENDING);
});
}
/**
* Functional interface for the lambda function that takes care of parsing a specific job from its serialized String
* representation into the corresponding object in memory.
*
* @see AsyncTransactionPruner#registerParser(Class, JobParser) to register the parser
*/
@FunctionalInterface
private interface JobParser {
/**
* Parses the serialized version of a job back into its unserialized object.
*
* @param input serialized job
* @return unserialized job
* @throws TransactionPruningException if anything goes wrong while parsing the serialized job
*/
TransactionPrunerJob parse(String input) throws TransactionPruningException;
}
}
| 12,598 | 39.124204 | 124 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/async/JobQueue.java
|
package com.iota.iri.service.transactionpruning.async;
import com.iota.iri.service.transactionpruning.TransactionPrunerJob;
import com.iota.iri.service.transactionpruning.TransactionPruningException;
import java.util.stream.Stream;
/**
* Represents a queue of jobs which is used by the {@link AsyncTransactionPruner} to internally organize the different
* kind of jobs.
*/
public interface JobQueue<T extends TransactionPrunerJob> {
/**
* Allows to add a job to the queue.
*
* @param job job that shall be added to the queue
*/
void addJob(T job) throws TransactionPruningException;
/**
* Clears the stored jobs and resets the queue.
*/
void clear();
/**
* This method processes the entire queue of this job type.
*
* @throws TransactionPruningException if anything goes wrong while processing the jobs
*/
void processJobs() throws TransactionPruningException;
/**
* Returns a stream of jobs that can for example be used to serialize the jobs of this queue.
*
* @return Stream of jobs that are currently part of this queue
*/
Stream<T> stream();
}
| 1,159 | 28.74359 | 118 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/async/MilestonePrunerJobQueue.java
|
package com.iota.iri.service.transactionpruning.async;
import com.iota.iri.conf.SnapshotConfig;
import com.iota.iri.service.transactionpruning.TransactionPruner;
import com.iota.iri.service.transactionpruning.TransactionPrunerJobStatus;
import com.iota.iri.service.transactionpruning.TransactionPruningException;
import com.iota.iri.service.transactionpruning.jobs.MilestonePrunerJob;
import java.util.Deque;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.stream.Stream;
/**
* Represents a queue of {@link MilestonePrunerJob}s that are being executed by the {@link AsyncTransactionPruner}.
*
* The {@link AsyncTransactionPruner} uses a separate queue for every job type, to be able to adjust the processing
* logic based on the type of the job.
*/
public class MilestonePrunerJobQueue implements JobQueue<MilestonePrunerJob> {
/**
* Holds the youngest (highest) milestone index that was successfully cleaned (gets updated when a job finishes).
*/
private int youngestFullyCleanedMilestoneIndex;
/**
* Holds a reference to the container of this queue.
*/
private final TransactionPruner transactionPruner;
/**
* Used to internally store the queued jobs.
*/
private final Deque<MilestonePrunerJob> jobs = new ConcurrentLinkedDeque<>();
/**
* Creates a new queue that is tailored to handle {@link MilestonePrunerJob}s.
*
* Since the {@link MilestonePrunerJob}s can take a long time until they are fully processed, we handle them in a
* different way than other jobs and consolidate the queue whenever we add a new job.
*
* @param transactionPruner reference to the container of this queue
* @param snapshotConfig reference to the config with snapshot related parameters
*/
public MilestonePrunerJobQueue(TransactionPruner transactionPruner, SnapshotConfig snapshotConfig) {
this.transactionPruner = transactionPruner;
youngestFullyCleanedMilestoneIndex = snapshotConfig.getMilestoneStartIndex();
}
/**
* {@inheritDoc}
*
* Since the {@link MilestonePrunerJob}s can take a long time to finish, we try to consolidate the queue when we add
* new jobs, so the state file doesn't get to big while the node is busy cleaning up the milestones. To do so, we
* first check if the job that shall be added isn't covered by existing cleanup jobs, yet.
*
* If it targets a milestone outside of the already covered range, we first check if the last existing job can be
* extended to cover the target milestone index of our new job. If the job can not be appended to an existing job,
* we add it to the end of our queue.
*
* @param job the {@link MilestonePrunerJob} that shall be added to the queue
* @throws TransactionPruningException if the given job is no {@link MilestonePrunerJob}
*/
@Override
public void addJob(MilestonePrunerJob job) {
synchronized (jobs) {
MilestonePrunerJob lastMilestonePrunerJob = jobs.peekLast();
if (adjustedRangeCoversNewMilestone(job, lastMilestonePrunerJob)) {
if (lastMilestonePrunerJob != null) {
// synchronize the status check so we can be 100% sure, that the job will see our modification
// BEFORE it finishes and leaves the processing loop
synchronized (lastMilestonePrunerJob) {
if (lastMilestonePrunerJob.getStatus() != TransactionPrunerJobStatus.DONE) {
lastMilestonePrunerJob.setTargetIndex(job.getTargetIndex());
return;
}
}
}
jobs.add(job);
}
}
}
/**
* {@inheritDoc}
*/
@Override
public void clear() {
synchronized (jobs) {
jobs.clear();
}
}
/**
* {@inheritDoc}
*
* It retrieves the first job while leaving it in the queue, so the job can persist its changes when it finishes a
* sub-task. After the job was successfully processed, we update the internal
* {@link #youngestFullyCleanedMilestoneIndex} marker (so jobs that might be added later can continue where this job
* stopped, without having to reexamine all the previous milestones) and remove it from the queue.
*
* After every processed job, we persist the changes by calling the {@link TransactionPruner#saveState()} method.
*
* @throws TransactionPruningException if anything goes wrong while processing the jobs
*/
@Override
public void processJobs() throws TransactionPruningException {
MilestonePrunerJob currentJob;
while (!Thread.currentThread().isInterrupted() && (currentJob = jobs.peek()) != null) {
currentJob.process();
youngestFullyCleanedMilestoneIndex = currentJob.getTargetIndex();
// we always leave the last job in the queue to be able to "serialize" the queue status and allow
// to skip already processed milestones even when IRI restarts
if (jobs.size() == 1) {
break;
}
jobs.poll();
}
}
/**
* {@inheritDoc}
*/
@Override
public Stream<MilestonePrunerJob> stream() {
return jobs.stream();
}
/**
* This method checks the range of a new cleanup job and adjusts it to reflect the progress made by previous jobs.
*
* We first check if the cleanup target was covered by previous jobs already. If the target index addresses a new
* milestone we adjust the starting and current index to account for the progress made by previous jobs.
*
* @param job the new job that shall be checked and adjusted to the previous progress
* @param lastMilestonePrunerJob the last cleanup job in the queue
* @return true if the job still has a range that has to be cleaned up or false otherwise
*/
private boolean adjustedRangeCoversNewMilestone(MilestonePrunerJob job, MilestonePrunerJob lastMilestonePrunerJob) {
int lastTargetIndex = lastMilestonePrunerJob != null
? lastMilestonePrunerJob.getTargetIndex()
: youngestFullyCleanedMilestoneIndex;
if (job.getTargetIndex() <= lastTargetIndex) {
return false;
}
if (lastTargetIndex >= job.getStartingIndex()) {
job.setStartingIndex(lastTargetIndex + 1);
}
if (lastTargetIndex >= job.getCurrentIndex()) {
job.setCurrentIndex(lastTargetIndex + 1);
}
return true;
}
}
| 6,709 | 39.666667 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/async/SimpleJobQueue.java
|
package com.iota.iri.service.transactionpruning.async;
import com.iota.iri.service.transactionpruning.TransactionPruner;
import com.iota.iri.service.transactionpruning.TransactionPrunerJob;
import com.iota.iri.service.transactionpruning.TransactionPruningException;
import java.util.*;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.stream.Stream;
/**
* Represents a queue of {@link TransactionPrunerJob}s thar are being executed by the {@link AsyncTransactionPruner}.
*
* The {@link AsyncTransactionPruner} uses a separate queue for every job type, to be able to adjust the processing
* logic based on the type of the job.
*/
public class SimpleJobQueue implements JobQueue<TransactionPrunerJob> {
/**
* Holds a reference to the container of this queue.
*/
private final TransactionPruner transactionPruner;
/**
* Used to internally store the queued jobs.
*/
private final Deque<TransactionPrunerJob> jobs = new ConcurrentLinkedDeque<>();
/**
* Creates a queue of jobs that will process them in their insertion order and persist the state after every
* processed job.
*
* This is used by all job types that do not require special routines for getting processed.
*
* @param transactionPruner reference to the container of this queue
*/
public SimpleJobQueue(TransactionPruner transactionPruner) {
this.transactionPruner = transactionPruner;
}
/**
* {@inheritDoc}
*
* It simply adds the job to the underlying {@link #jobs}.
*
* Note: Since we always add to the end and remove from the start, we do not need to synchronize this.
*/
@Override
public void addJob(TransactionPrunerJob job) {
jobs.addLast(job);
}
/**
* {@inheritDoc}
*/
@Override
public void clear() {
synchronized (jobs) {
jobs.clear();
}
}
/**
* {@inheritDoc}
*
* It retrieves the first job while leaving it in the queue, so the job can still persist its changes if it is a
* long running one that has to save its progress while executing sub-tasks. Only after the job was successfully
* processed, we remove it from the queue and advance to the next one.
*
* If an error occurs while executing the job, we add it back to the end of the queue so we can try to execute it
* another time.
*
* After every processed job, we persist the changes by calling the {@link TransactionPruner#saveState()} method.
*/
@Override
public void processJobs() throws TransactionPruningException {
TransactionPrunerJob currentJob;
while (!Thread.currentThread().isInterrupted() && (currentJob = jobs.peek()) != null) {
try {
currentJob.process();
jobs.poll();
} catch (TransactionPruningException e) {
// only add the job back to the end of the queue if the queue wasn't cleaned in the mean time
synchronized (jobs) {
if (jobs.poll() == currentJob) {
jobs.addLast(currentJob);
}
}
throw e;
}
}
}
/**
* {@inheritDoc}
*/
@Override
public Stream<TransactionPrunerJob> stream() {
return jobs.stream();
}
}
| 3,406 | 32.07767 | 117 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/jobs/AbstractTransactionPrunerJob.java
|
package com.iota.iri.service.transactionpruning.jobs;
import com.iota.iri.controllers.TipsViewModel;
import com.iota.iri.service.snapshot.Snapshot;
import com.iota.iri.service.spentaddresses.SpentAddressesProvider;
import com.iota.iri.service.spentaddresses.SpentAddressesService;
import com.iota.iri.service.transactionpruning.TransactionPruner;
import com.iota.iri.service.transactionpruning.TransactionPrunerJob;
import com.iota.iri.service.transactionpruning.TransactionPrunerJobStatus;
import com.iota.iri.service.transactionpruning.TransactionPruningException;
import com.iota.iri.storage.Tangle;
/**
* Implements the most basic functionality that is shared by the different kinds of jobs.
*/
public abstract class AbstractTransactionPrunerJob implements TransactionPrunerJob {
/**
* Holds the execution status of the job.
*/
private TransactionPrunerJobStatus status = TransactionPrunerJobStatus.PENDING;
/**
* Holds a reference to the manager of the job that schedules it execution.
*/
private TransactionPruner transactionPruner;
/**
* Ascertains that pruned transactions are recorded as spent addresses where necessary.
*/
protected SpentAddressesService spentAddressesService;
/**
* Ascertains whether transactions are already added to the underlying persistence layer.
*/
protected SpentAddressesProvider spentAddressesProvider;
/**
* Holds a reference to the tangle object which acts as a database interface.
*/
private Tangle tangle;
/**
* Holds a reference to the manager for the tips (required for removing pruned transactions from this manager).
*/
private TipsViewModel tipsViewModel;
/**
* Holds a reference to the last local or global snapshot that acts as a starting point for the state of ledger.
*/
private Snapshot snapshot;
/**
* {@inheritDoc}
*/
@Override
public void setTransactionPruner(TransactionPruner transactionPruner) {
this.transactionPruner = transactionPruner;
}
/**
* {@inheritDoc}
*/
@Override
public TransactionPruner getTransactionPruner() {
return transactionPruner;
}
/**
* {@inheritDoc}
*/
@Override
public void setTangle(Tangle tangle) {
this.tangle = tangle;
}
/**
* {@inheritDoc}
*/
@Override
public Tangle getTangle() {
return tangle;
}
/**
* {@inheritDoc}
*/
@Override
public void setTipsViewModel(TipsViewModel tipsViewModel) {
this.tipsViewModel = tipsViewModel;
}
/**
* {@inheritDoc}
*/
@Override
public TipsViewModel getTipsViewModel() {
return tipsViewModel;
}
@Override
public void setSpentAddressesService(SpentAddressesService spentAddressesService) {
this.spentAddressesService = spentAddressesService;
}
@Override
public void setSpentAddressesProvider(SpentAddressesProvider spentAddressesProvider) {
this.spentAddressesProvider = spentAddressesProvider;
}
/**
* {@inheritDoc}
*/
@Override
public void setSnapshot(Snapshot snapshot) {
this.snapshot = snapshot;
}
/**
* {@inheritDoc}
*/
@Override
public Snapshot getSnapshot() {
return snapshot;
}
/**
* {@inheritDoc}
*/
@Override
public TransactionPrunerJobStatus getStatus() {
return status;
}
/**
* {@inheritDoc}
*/
@Override
public void setStatus(TransactionPrunerJobStatus status) {
this.status = status;
}
/**
* {@inheritDoc}
*/
@Override
public abstract void process() throws TransactionPruningException;
/**
* {@inheritDoc}
*/
@Override
public abstract String serialize();
}
| 3,865 | 24.103896 | 116 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/jobs/MilestonePrunerJob.java
|
package com.iota.iri.service.transactionpruning.jobs;
import com.iota.iri.controllers.MilestoneViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.model.IntegerIndex;
import com.iota.iri.model.persistables.Milestone;
import com.iota.iri.model.persistables.Transaction;
import com.iota.iri.service.transactionpruning.TransactionPrunerJobStatus;
import com.iota.iri.service.transactionpruning.TransactionPruningException;
import com.iota.iri.storage.Indexable;
import com.iota.iri.storage.Persistable;
import com.iota.iri.utils.ASCIIProgressBar;
import com.iota.iri.utils.Pair;
import com.iota.iri.utils.dag.DAGHelper;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Represents a cleanup job for {@link com.iota.iri.service.transactionpruning.TransactionPruner}s that removes
* milestones and all of their directly and indirectly referenced transactions (and the orphaned subtangles branching
* off of the deleted transactions).
*
* It is used by the {@link com.iota.iri.service.snapshot.LocalSnapshotManager} to clean up milestones prior to a
* snapshot. Even though it defines a range of milestones that shall be deleted, it gets processed one milestone at a
* time, persisting the progress after each step.
*/
public class MilestonePrunerJob extends AbstractTransactionPrunerJob {
private static final Logger log = LoggerFactory.getLogger(MilestonePrunerJob.class);
/**
* We start logging progress of deletion when the job contains at least this amount of milestones to prune
*/
private static final int MIN_LOG_NUMBER = 500;
/**
* Delay between logging attempts, if that is enabled for this job
*/
private static final int LOG_DELAY = 60000;
/**
* Holds the milestone index where this job starts cleaning up.
*/
private int startingIndex;
/**
* Holds the milestone index where this job stops cleaning up.
*/
private int targetIndex;
/**
* Holds the milestone index of the oldest milestone that was cleaned up already (the current progress).
*/
private int currentIndex;
/**
* Cached variable whether we log this job or not
*/
private boolean shouldLog;
/**
* The last time we logged, logging happens once a minute
*/
private long lastLogTime;
/**
* This method parses the string representation of a {@link MilestonePrunerJob} and creates the corresponding
* object.
*
* It splits the String input in three parts (delimited by a ";") and passes them into the constructor of a new
* {@link MilestonePrunerJob} by interpreting them as {@link #startingIndex}, {@link #currentIndex} and
* {@link #targetIndex} of the job.
*
* @param input serialized String representation of a {@link MilestonePrunerJob}
* @return a new {@link MilestonePrunerJob} with the provided details
* @throws TransactionPruningException if the string can not be parsed
*/
public static MilestonePrunerJob parse(String input) throws TransactionPruningException {
String[] parts = input.split(";");
if(parts.length == 3) {
return new MilestonePrunerJob(Integer.valueOf(parts[0]), Integer.valueOf(parts[1]),
Integer.valueOf(parts[2]));
}
throw new TransactionPruningException("failed to parse TransactionPruner file - invalid input: " + input);
}
/**
* Does same as {@link #MilestonePrunerJob(int, int, int)} but defaults to the {@link #currentIndex} being the same
* as the {@link #startingIndex}. This is usually the case when we create a NEW job programmatically that does not
* get restored from a state file, because we start cleaning up at the {@link #startingIndex}.
*
* @param startingIndex milestone index that defines where to start cleaning up
* @param targetIndex milestone index which defines the end of this pruning job
*/
public MilestonePrunerJob(int startingIndex, int targetIndex) {
this(startingIndex, startingIndex, targetIndex);
}
/**
* Creates a job that cleans all milestones prior (and including) the {@code startingIndex} and that is set to have
* progressed already to the given {@code currentIndex}.
*
* Since cleanup jobs can be consolidated (to reduce the size of the
* {@link com.iota.iri.service.transactionpruning.TransactionPruner} state file) and restored (after IRI restarts),
* we need to be able provide both parameters even tho the job usually always "starts" with its {@code currentIndex}
* being equal to the {@code startingIndex}.
*
* If the {@link #currentIndex} is bigger than the {@link #targetIndex} we set the state to DONE (necessary when
* restoring the job from the state file).
*
* @param startingIndex milestone index that defines where to start cleaning up
* @param currentIndex milestone index that defines the next milestone that should be cleaned up by this job
* @param targetIndex milestone index which defines the end of this pruning job
*/
private MilestonePrunerJob(int startingIndex, int currentIndex, int targetIndex) {
setStartingIndex(startingIndex);
setCurrentIndex(currentIndex);
setTargetIndex(targetIndex);
if (currentIndex > targetIndex) {
setStatus(TransactionPrunerJobStatus.DONE);
}
shouldLog = targetIndex - startingIndex > MIN_LOG_NUMBER;
}
/**
* {@inheritDoc}
*
* It iterates from the {@link #currentIndex} to the provided {@link #targetIndex} and processes every milestone
* one by one. After each step is finished we persist the progress to be able to continue with the current progress
* upon IRI restarts.
*/
@Override
public void process() throws TransactionPruningException {
if (getStatus() != TransactionPrunerJobStatus.DONE) {
setStatus(TransactionPrunerJobStatus.RUNNING);
try {
while (!Thread.currentThread().isInterrupted() && getStatus() != TransactionPrunerJobStatus.DONE) {
cleanupMilestoneTransactions();
setCurrentIndex(getCurrentIndex() + 1);
logProgress();
// synchronize this call because the MilestonePrunerJobQueue needs it to check if we can be extended
synchronized (this) {
if (getCurrentIndex() > getTargetIndex()) {
setStatus(TransactionPrunerJobStatus.DONE);
}
}
}
} catch (TransactionPruningException e) {
setStatus(TransactionPrunerJobStatus.FAILED);
throw e;
}
}
}
private void logProgress() {
if (!shouldLog || (lastLogTime + LOG_DELAY) > System.currentTimeMillis()) {
return;
}
StringBuilder progressSB = new StringBuilder();
// add progress bar
progressSB.append(ASCIIProgressBar.getProgressBarString(startingIndex, targetIndex, currentIndex));
// add lsm to lm
progressSB.append(String.format(" [Pruned %d / %d MS - remaining: %d]", currentIndex - startingIndex,
targetIndex - startingIndex, targetIndex - currentIndex));
log.info(progressSB.toString());
lastLogTime = System.currentTimeMillis();
}
/**
* {@inheritDoc}
*
* It simply concatenates the {@link #startingIndex} and the {@link #currentIndex} as they are necessary to fully
* describe the job.
*/
@Override
public String serialize() {
return getStartingIndex() + ";" + getCurrentIndex() + ";" + getTargetIndex();
}
/**
* Getter of the {@link #startingIndex}.
*
* @return milestone index where this job starts cleaning up
*/
public int getStartingIndex() {
return startingIndex;
}
/**
* Setter of the {@link #startingIndex}.
*
* @param startingIndex milestone index where this job starts cleaning up
*/
public void setStartingIndex(int startingIndex) {
this.startingIndex = startingIndex;
}
/**
* Getter of the {@link #currentIndex}.
*
* @return milestone index of the oldest milestone that was cleaned up already (the current progress)
*/
public int getCurrentIndex() {
return currentIndex;
}
/**
* Setter of the {@link #currentIndex}.
*
* @param currentIndex milestone index of the oldest milestone that was cleaned up already (the current progress)
*/
public void setCurrentIndex(int currentIndex) {
this.currentIndex = currentIndex;
}
/**
* Getter of the {@link #targetIndex}.
*
* @return milestone index where this job stops cleaning up
*/
public int getTargetIndex() {
return targetIndex;
}
/**
* Setter of the {@link #targetIndex}.
*
* @param targetIndex milestone index where this job stops cleaning up
*/
public void setTargetIndex(int targetIndex) {
this.targetIndex = targetIndex;
}
/**
* This method takes care of cleaning up a single milestone and all of its transactions and performs the actual
* database operations.
*
* It performs the deletions in an atomic way, which means that either the full processing succeeds or fails.
*
* We first retrieve the elements that shall be deleted and then analyze them before removing them from the
* database. While processing them, we issue additional {@link UnconfirmedSubtanglePrunerJob}s that remove the
* orphaned parts of the tangle that branch off the deleted transactions because they would otherwise loose their
* connection to the rest of the tangle unless they are branching off a solid entry point (in which case we wait
* with the deletion until the solid entry point expires).
*
* @throws TransactionPruningException if something goes wrong while cleaning up the milestone
*/
private void cleanupMilestoneTransactions() throws TransactionPruningException {
try {
List<Pair<Indexable, ? extends Class<? extends Persistable>>> elementsToDelete = getElementsToDelete();
elementsToDelete.forEach(element -> {
if(Transaction.class.equals(element.hi)) {
getTipsViewModel().removeTipHash((Hash) element.low);
if (!getSnapshot().hasSolidEntryPoint((Hash) element.low)) {
try {
getTransactionPruner().addJob(new UnconfirmedSubtanglePrunerJob((Hash) element.low));
} catch (TransactionPruningException e) {
throw new RuntimeException(e);
}
}
} else if(Milestone.class.equals(element.hi)) {
MilestoneViewModel.clear(((IntegerIndex) element.low).getValue());
}
});
getTangle().deleteBatch(elementsToDelete);
} catch(Exception e) {
throw new TransactionPruningException("failed to cleanup milestone #" + getCurrentIndex(), e);
}
}
/**
* Collects all database items that belong to the current milestone and that shall be deleted.
*
* It does that by iterating through all the transactions that belong to the current milestone (that are directly or
* indirectly referenced by the chosen milestone) and collecting them in a List of items to delete.
*
* @return list of elements that shall be deleted from the database
* @throws TransactionPruningException if anything goes wrong while collecting the elements
*/
private List<Pair<Indexable, ? extends Class<? extends Persistable>>> getElementsToDelete() throws
TransactionPruningException {
try {
List<Pair<Indexable, ? extends Class<? extends Persistable>>> elementsToDelete = new ArrayList<>();
MilestoneViewModel milestoneViewModel = MilestoneViewModel.get(getTangle(), getCurrentIndex());
if (milestoneViewModel != null) {
elementsToDelete.add(new Pair<>(milestoneViewModel.getHash(), Transaction.class));
elementsToDelete.add(new Pair<>(new IntegerIndex(milestoneViewModel.index()), Milestone.class));
DAGHelper.get(getTangle()).traverseApprovees(milestoneViewModel.getHash(),
approvedTransaction -> approvedTransaction.snapshotIndex() >= milestoneViewModel.index(),
approvedTransaction -> {
if (approvedTransaction.value() < 0 &&
!spentAddressesProvider.containsAddress(approvedTransaction.getAddressHash())) {
log.warn("Pruned spend transaction " + approvedTransaction.getHash() +
" did not have its spent address recorded. Persisting it now");
spentAddressesService
.persistSpentAddresses(Collections.singletonList(approvedTransaction));
}
elementsToDelete.add(new Pair<>(approvedTransaction.getHash(), Transaction.class));
});
}
return elementsToDelete;
} catch (Exception e) {
throw new TransactionPruningException("failed to determine which elements to delete", e);
}
}
}
| 13,823 | 41.018237 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/transactionpruning/jobs/UnconfirmedSubtanglePrunerJob.java
|
package com.iota.iri.service.transactionpruning.jobs;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.model.HashFactory;
import com.iota.iri.model.persistables.Transaction;
import com.iota.iri.service.spentaddresses.SpentAddressesService;
import com.iota.iri.service.transactionpruning.TransactionPrunerJobStatus;
import com.iota.iri.service.transactionpruning.TransactionPruningException;
import com.iota.iri.storage.Indexable;
import com.iota.iri.storage.Persistable;
import com.iota.iri.utils.Pair;
import com.iota.iri.utils.dag.DAGHelper;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.stream.Collectors;
/**
* Represents a job for the {@link com.iota.iri.service.transactionpruning.TransactionPruner} that cleans up all
* unconfirmed transactions approving a certain transaction.
*
* It is used to clean up orphaned subtangles when they become irrelevant for the ledger.
*/
public class UnconfirmedSubtanglePrunerJob extends AbstractTransactionPrunerJob {
/**
* Holds the hash of the transaction that shall have its unconfirmed approvers cleaned.
*/
private Hash transactionHash;
/**
* This method parses the serialized representation of a {@link UnconfirmedSubtanglePrunerJob} and creates the
* corresponding object.
*
* It simply creates a hash from the input and passes it on to the {@link #UnconfirmedSubtanglePrunerJob(Hash)}.
*
* @param input serialized String representation of a {@link MilestonePrunerJob}
* @return a new {@link UnconfirmedSubtanglePrunerJob} with the provided hash
*/
public static UnconfirmedSubtanglePrunerJob parse(String input) {
return new UnconfirmedSubtanglePrunerJob(HashFactory.TRANSACTION.create(input));
}
/**
* Creates a job that cleans up all transactions that are approving the transaction with the given hash, which have
* not been confirmed, yet.
*
* @param transactionHash hash of the transaction that shall have its unconfirmed approvers pruned
*/
public UnconfirmedSubtanglePrunerJob(Hash transactionHash) {
this.transactionHash = transactionHash;
}
/**
* {@inheritDoc}
*
* It iterates through all approvers and collects their hashes if they have not been approved. Then its deletes them
* from the database (atomic) and also removes them from the runtime caches. After the job is done
*/
@Override
public void process() throws TransactionPruningException {
if (getStatus() != TransactionPrunerJobStatus.DONE) {
setStatus(TransactionPrunerJobStatus.RUNNING);
Collection<TransactionViewModel> unconfirmedTxs = new HashSet<>();
try {
DAGHelper.get(getTangle()).traverseApprovers(
transactionHash,
approverTransaction -> approverTransaction.snapshotIndex() == 0,
unconfirmedTxs::add
);
//Only persist to db
spentAddressesService.persistSpentAddresses(unconfirmedTxs);
List<Pair<Indexable, ? extends Class<? extends Persistable>>> elementsToDelete = unconfirmedTxs.stream()
.map(tx -> new Pair<>((Indexable) tx.getHash(), Transaction.class))
.collect(Collectors.toList());
// clean database entries
getTangle().deleteBatch(elementsToDelete);
// clean runtime caches
elementsToDelete.forEach(element -> getTipsViewModel().removeTipHash((Hash) element.low));
setStatus(TransactionPrunerJobStatus.DONE);
} catch (Exception e) {
setStatus(TransactionPrunerJobStatus.FAILED);
throw new TransactionPruningException(
"failed to cleanup orphaned approvers of transaction " + transactionHash, e
);
}
}
}
/**
* {@inheritDoc}
*
* It simply dumps the string representation of the {@link #transactionHash} .
*/
@Override
public String serialize() {
return transactionHash.toString();
}
}
| 4,273 | 38.943925 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/validation/TransactionSolidifier.java
|
package com.iota.iri.service.validation;
import com.iota.iri.controllers.TipsViewModel;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.service.validation.impl.TransactionSolidifierImpl;
import com.iota.iri.network.TransactionRequester;
/**
* Solidification tool. Transactions placed into the solidification queue will be checked for solidity. Any missing
* reference transactions will be placed into the {@link TransactionRequester}. If a transaction is found to be solid
* it is updated as such and placed into the BroadcastQueue to be sent off to the node's neighbours.
*/
public interface TransactionSolidifier {
/**
* Initialize the executor service. Start processing transactions to solidify.
*/
void start();
/**
* Interrupt thread processes and shut down the executor service.
*/
void shutdown();
/**
* Add a hash to the solidification queue, and runs an initial {@link #checkSolidity} call.
*
* @param hash Hash of the transaction to solidify
*/
void addToSolidificationQueue(Hash hash);
/**
* Checks if milestone transaction is solid. Returns true if it is, and if it is not, it adds the hash to the
* solidification queue and returns false.
*
* @param hash Hash of the transaction to solidify
* @return True if solid, false if not
*/
boolean addMilestoneToSolidificationQueue(Hash hash);
/**
* Fetch the next transaction in the transactionsToBroadcast set.
* @return A {@link TransactionViewModel} object to be broadcast.
*/
TransactionViewModel getNextTxInBroadcastQueue();
/**
* Remove a broadcasted transaction from the transactionsToBroadcast set
* @param transactionsBroadcasted A {@link TransactionViewModel} object to remove from the set.
*/
void clearFromBroadcastQueue(TransactionViewModel transactionsBroadcasted);
/**
* This method does the same as {@link #checkSolidity(Hash, int)} but defaults to an unlimited amount
* of transactions that are allowed to be traversed.
*
* @param hash hash of the transactions that shall get checked
* @return true if the transaction is solid and false otherwise
* @throws Exception if anything goes wrong while trying to solidify the transaction
*/
boolean checkSolidity(Hash hash) throws Exception;
/**
* This method checks transactions for solidity and marks them accordingly if they are found to be solid.
*
* It iterates through all approved transactions until it finds one that is missing in the database or until it
* reached solid transactions on all traversed subtangles. In case of a missing transactions it issues a transaction
* request and returns false. If no missing transaction is found, it marks the processed transactions as solid in
* the database and returns true.
*
* Since this operation can potentially take a long time to terminate if it would have to traverse big parts of the
* tangle, it is possible to limit the amount of transactions that are allowed to be processed, while looking for
* unsolid / missing approvees. This can be useful when trying to "interrupt" the solidification of one transaction
* (if it takes too many steps) to give another one the chance to be solidified instead (i.e. prevent blocks in the
* solidification threads).
*
* @param hash hash of the transactions that shall get checked
* @param maxProcessedTransactions the maximum amount of transactions that are allowed to be traversed
* @return true if the transaction is solid and false otherwise
* @throws Exception if anything goes wrong while trying to solidify the transaction
*/
boolean checkSolidity(Hash hash, int maxProcessedTransactions) throws Exception;
/**
* Updates a transaction after it was stored in the tangle. Tells the node to not request the transaction anymore,
* to update the live tips accordingly, and attempts to quickly solidify the transaction.
*
* <p/>
* Performs the following operations:
*
* <ol>
* <li>Removes {@code transactionViewModel}'s hash from the the request queue since we already found it.</li>
* <li>If {@code transactionViewModel} has no children (approvers), we add it to the node's active tip list.</li>
* <li>Removes {@code transactionViewModel}'s parents (branch & trunk) from the node's tip list
* (if they're present there).</li>
* <li>Attempts to quickly solidify {@code transactionViewModel} by checking whether its direct parents
* are solid. If solid we add it to the queue transaction solidification thread to help it propagate the
* solidification to the approving child transactions.</li>
* <li>Requests missing direct parent (trunk & branch) transactions that are needed to solidify
* {@code transactionViewModel}.</li>
* </ol>
* @param transactionViewModel received transaction that is being updated
* @throws Exception if an error occurred while trying to solidify
* @see TipsViewModel
*/
void updateStatus(TransactionViewModel transactionViewModel) throws Exception;
/**
* Tries to solidify the transactions quickly by performing {@link TransactionSolidifierImpl#checkApproovee} on
* both parents (trunk and branch). If the parents are solid, mark the transactions as solid.
* @param transactionViewModel transaction to solidify
* @return <tt>true</tt> if we made the transaction solid, else <tt>false</tt>.
* @throws Exception
*/
boolean quickSetSolid(TransactionViewModel transactionViewModel) throws Exception;
/**
* Adds the given transaction to the internal propagation thread.
* @param hash the transaction to be placed into propagation
*/
void addToPropagationQueue(Hash hash);
}
| 6,019 | 47.943089 | 121 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/validation/TransactionValidator.java
|
package com.iota.iri.service.validation;
import com.google.common.annotations.VisibleForTesting;
import com.iota.iri.conf.ProtocolConfig;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.crypto.Curl;
import com.iota.iri.crypto.Sponge;
import com.iota.iri.crypto.SpongeFactory;
import com.iota.iri.model.TransactionHash;
import com.iota.iri.network.TransactionRequester;
import com.iota.iri.service.snapshot.SnapshotProvider;
/**
* Tool for determining validity of a transaction via a {@link TransactionViewModel}, tryte array or byte array.
*/
public class TransactionValidator {
private static final int TESTNET_MWM_CAP = 13;
private final SnapshotProvider snapshotProvider;
private final TransactionRequester transactionRequester;
private int minWeightMagnitude = 81;
private static final long MAX_TIMESTAMP_FUTURE = 2L * 60L * 60L;
private static final long MAX_TIMESTAMP_FUTURE_MS = MAX_TIMESTAMP_FUTURE * 1_000L;
/**
* Constructor for Tangle Validator
*
* @param snapshotProvider data provider for the snapshots that are relevant for the node
* @param transactionRequester used to request missing transactions from neighbors
* @param protocolConfig used for checking if we are in testnet and mwm. testnet <tt>true</tt> if we are in testnet
* mode, this caps {@code mwm} to {@value #TESTNET_MWM_CAP} regardless of parameter input.
* minimum weight magnitude: the minimal number of 9s that ought to appear at the end of the
* transaction hash
*/
public TransactionValidator(SnapshotProvider snapshotProvider, TransactionRequester transactionRequester, ProtocolConfig protocolConfig) {
this.snapshotProvider = snapshotProvider;
this.transactionRequester = transactionRequester;
setMwm(protocolConfig.isTestnet(), protocolConfig.getMwm());
}
/**
* Set the Minimum Weight Magnitude for validation checks.
*/
@VisibleForTesting
void setMwm(boolean testnet, int mwm) {
minWeightMagnitude = mwm;
//lowest allowed MWM encoded in 46 bytes.
if (!testnet){
minWeightMagnitude = Math.max(minWeightMagnitude, TESTNET_MWM_CAP);
}
}
/**
* @return the minimal number of trailing 9s that have to be present at the end of the transaction hash
* in order to validate that sufficient proof of work has been done
*/
public int getMinWeightMagnitude() {
return minWeightMagnitude;
}
/**
* Checks that the timestamp of the transaction is below the last global snapshot time
* or more than {@value #MAX_TIMESTAMP_FUTURE} seconds in the future, and thus invalid.
*
* <p>
* First the attachment timestamp (set after performing POW) is checked, and if not available
* the regular timestamp is checked. Genesis transaction will always be valid.
* </p>
* @param transactionViewModel transaction under test
* @return <tt>true</tt> if timestamp is not in valid bounds and {@code transactionViewModel} is not genesis.
* Else returns <tt>false</tt>.
*/
private boolean hasInvalidTimestamp(TransactionViewModel transactionViewModel) {
// ignore invalid timestamps for transactions that were requested by our node while solidifying a milestone
if(transactionRequester.wasTransactionRecentlyRequested(transactionViewModel.getHash())) {
return false;
}
if (transactionViewModel.getAttachmentTimestamp() == 0) {
return transactionViewModel.getTimestamp() < snapshotProvider.getInitialSnapshot().getTimestamp() && !snapshotProvider.getInitialSnapshot().hasSolidEntryPoint(transactionViewModel.getHash())
|| transactionViewModel.getTimestamp() > (System.currentTimeMillis() / 1000) + MAX_TIMESTAMP_FUTURE;
}
return transactionViewModel.getAttachmentTimestamp() < (snapshotProvider.getInitialSnapshot().getTimestamp() * 1000L)
|| transactionViewModel.getAttachmentTimestamp() > System.currentTimeMillis() + MAX_TIMESTAMP_FUTURE_MS;
}
/**
* Runs the following validation checks on a transaction:
* <ol>
* <li>{@link #hasInvalidTimestamp} check.</li>
* <li>Check that no value trits are set beyond the usable index, otherwise we will have values larger
* than max supply.</li>
* <li>Check that sufficient POW was performed.</li>
* <li>In value transactions, we check that the address has 0 set as the last trit. This must be because of the
* conversion between bytes to trits.</li>
* </ol>
*Exception is thrown upon failure.
*
* @param transactionViewModel transaction that should be validated
* @param minWeightMagnitude the minimal number of trailing 9s at the end of the transaction hash
* @throws StaleTimestampException if timestamp check fails
* @throws IllegalStateException if any of the other checks fail
*/
public void runValidation(TransactionViewModel transactionViewModel, final int minWeightMagnitude) {
transactionViewModel.setMetadata();
transactionViewModel.setAttachmentData();
if (hasInvalidTimestamp(transactionViewModel)) {
throw new StaleTimestampException("Invalid transaction timestamp.");
}
confirmValidTransactionValues(transactionViewModel);
int weightMagnitude = transactionViewModel.weightMagnitude;
if (weightMagnitude < minWeightMagnitude) {
throw new IllegalStateException("Invalid transaction hash");
}
if (transactionViewModel.value() != 0 && transactionViewModel.getAddressHash().trits()[Curl.HASH_LENGTH - 1] != 0) {
throw new IllegalStateException("Invalid transaction address");
}
}
private void confirmValidTransactionValues(TransactionViewModel transactionViewModel) throws IllegalStateException {
for (int i = TransactionViewModel.VALUE_TRINARY_OFFSET + TransactionViewModel.VALUE_USABLE_TRINARY_SIZE;
i < TransactionViewModel.VALUE_TRINARY_OFFSET + TransactionViewModel.VALUE_TRINARY_SIZE; i++) {
if (transactionViewModel.trits()[i] != 0) {
throw new IllegalStateException("Invalid transaction value");
}
}
}
/**
* Creates a new transaction from {@code trits} and validates it with {@link #runValidation}.
*
* @param trits raw transaction trits
* @param minWeightMagnitude minimal number of trailing 9s in transaction for POW validation
* @return the transaction resulting from the raw trits if valid.
* @throws RuntimeException if validation fails
*/
public TransactionViewModel validateTrits(final byte[] trits, int minWeightMagnitude) {
TransactionViewModel transactionViewModel = new TransactionViewModel(trits, TransactionHash.calculate(trits, 0, trits.length, SpongeFactory.create(SpongeFactory.Mode.CURLP81)));
runValidation(transactionViewModel, minWeightMagnitude);
return transactionViewModel;
}
/**
* Creates a new transaction from {@code bytes} and validates it with {@link #runValidation}.
*
* @param bytes raw transaction bytes
* @param minWeightMagnitude minimal number of trailing 9s in transaction for POW validation
* @return the transaction resulting from the raw bytes if valid
* @throws RuntimeException if validation fails
*/
public TransactionViewModel validateBytes(final byte[] bytes, int minWeightMagnitude, Sponge curl) {
TransactionViewModel transactionViewModel = new TransactionViewModel(bytes, TransactionHash.calculate(bytes,
TransactionViewModel.TRINARY_SIZE, curl));
runValidation(transactionViewModel, minWeightMagnitude);
return transactionViewModel;
}
/**
* Thrown if transaction fails {@link #hasInvalidTimestamp} check.
*/
public static class StaleTimestampException extends RuntimeException {
StaleTimestampException (String message) {
super(message);
}
}
}
| 8,223 | 47.093567 | 202 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/service/validation/impl/TransactionSolidifierImpl.java
|
package com.iota.iri.service.validation.impl;
import com.google.common.annotations.VisibleForTesting;
import com.iota.iri.controllers.TipsViewModel;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.network.pipeline.TransactionProcessingPipeline;
import com.iota.iri.network.TransactionRequester;
import com.iota.iri.service.snapshot.SnapshotProvider;
import com.iota.iri.service.validation.TransactionSolidifier;
import com.iota.iri.storage.Tangle;
import com.iota.iri.utils.log.interval.IntervalLogger;
import com.iota.iri.utils.thread.DedicatedScheduledExecutorService;
import com.iota.iri.utils.thread.SilentScheduledExecutorService;
import java.util.*;
import java.util.concurrent.*;
import static com.iota.iri.controllers.TransactionViewModel.PREFILLED_SLOT;
import static com.iota.iri.controllers.TransactionViewModel.fromHash;
/**
* A solidifier class for processing transactions. Transactions are checked for solidity, and missing transactions are
* subsequently requested. Once a transaction is solidified correctly it is placed into a broadcasting set to be sent to
* neighboring nodes.
*/
public class TransactionSolidifierImpl implements TransactionSolidifier {
private Tangle tangle;
private SnapshotProvider snapshotProvider;
private TransactionRequester transactionRequester;
/**
* Max size for all queues.
*/
private static final int MAX_SIZE= 100;
private static final int SOLIDIFICATION_INTERVAL = 100;
private static final IntervalLogger log = new IntervalLogger(TransactionSolidifier.class);
/**
* Defines the maximum amount of transactions that are allowed to get processed while trying to solidify a
* milestone.
*
* NOte: we want to find the next previous milestone and not get stuck somewhere at the end of the tangle with a
* long running {@link #checkSolidity(Hash)} call.
*/
private static final int SOLIDIFICATION_TRANSACTIONS_LIMIT = 300_000;
/**
* Executor service for running the {@link #processTransactionsToSolidify()}.
*/
private SilentScheduledExecutorService executorService = new DedicatedScheduledExecutorService(
"Transaction Solidifier", log.delegate());
/**
* A queue for processing transactions with the {@link #checkSolidity(Hash)} call. Once a transaction has been
* marked solid it will be placed into the {@link #transactionsToBroadcast} queue.
*/
private BlockingQueue<Hash> transactionsToSolidify = new LinkedBlockingQueue<>(MAX_SIZE);
/**
* A set of transactions that will be called by the {@link TransactionProcessingPipeline} to be broadcast to
* neighboring nodes.
*/
private BlockingQueue<TransactionViewModel> transactionsToBroadcast = new ArrayBlockingQueue<>(MAX_SIZE);
private TipsViewModel tipsViewModel;
private TransactionPropagator transactionPropagator;
private Hash cooAddress;
/**
* Constructor for the solidifier.
* @param tangle The DB reference
* @param snapshotProvider For fetching entry points for solidity checks
* @param transactionRequester A requester for missing transactions
*/
public TransactionSolidifierImpl(Tangle tangle, SnapshotProvider snapshotProvider, TransactionRequester transactionRequester,
TipsViewModel tipsViewModel, Hash cooAddress){
this.tangle = tangle;
this.snapshotProvider = snapshotProvider;
this.transactionRequester = transactionRequester;
this.tipsViewModel = tipsViewModel;
this.transactionPropagator = new TransactionPropagator();
this.cooAddress = cooAddress;
}
/**
*{@inheritDoc}
*/
@Override
public void start(){
executorService.silentScheduleWithFixedDelay(this::processTransactionsToSolidify, 0,
SOLIDIFICATION_INTERVAL, TimeUnit.MILLISECONDS);
}
/**
*{@inheritDoc}
*/
@Override
public void shutdown() {
executorService.shutdownNow();
}
/**
*{@inheritDoc}
*/
@Override
public void addToSolidificationQueue(Hash hash){
try{
if(!transactionsToSolidify.contains(hash)) {
if (transactionsToSolidify.size() >= MAX_SIZE - 1) {
transactionsToSolidify.remove();
}
transactionsToSolidify.put(hash);
}
} catch(Exception e){
log.error("Error placing transaction into solidification queue",e);
}
}
/**
* {@inheritDoc}
*/
@Override
public boolean addMilestoneToSolidificationQueue(Hash hash){
try{
TransactionViewModel tx = fromHash(tangle, hash);
if (tx.isSolid()) {
transactionPropagator.addToPropagationQueue(hash);
return false;
}
addToSolidificationQueue(hash);
return true;
} catch (Exception e) {
log.error("Error adding milestone to solidification queue", e);
return false;
}
}
/**
* {@inheritDoc}
*/
@Override
public void addToPropagationQueue(Hash hash){
try {
this.transactionPropagator.addToPropagationQueue(hash);
} catch(Exception e){
log.debug("Error adding transaction to propagation queue: " + e.getMessage());
}
}
/**
* {@inheritDoc}
*/
@Override
public TransactionViewModel getNextTxInBroadcastQueue(){
return transactionsToBroadcast.poll();
}
/**
*{@inheritDoc}
*/
@Override
public void clearFromBroadcastQueue(TransactionViewModel transaction){
transactionsToBroadcast.remove(transaction);
}
/**
* Iterate through the {@link #transactionsToSolidify} queue and call {@link #checkSolidity(Hash)} on each hash.
* Solid transactions are then processed into the {@link #transactionsToBroadcast} queue. After that, process any
* solid transactions present in the {@link TransactionPropagator}.
*/
private void processTransactionsToSolidify(){
Hash hash;
if((hash = transactionsToSolidify.poll()) != null) {
try {
checkSolidity(hash);
} catch (Exception e) {
log.info(e.getMessage());
}
}
transactionPropagator.propagateSolidTransactions();
}
/**
*{@inheritDoc}
*/
@Override
public boolean checkSolidity(Hash hash) throws Exception {
return checkSolidity(hash, SOLIDIFICATION_TRANSACTIONS_LIMIT);
}
/**
*{@inheritDoc}
*/
@Override
public boolean checkSolidity(Hash hash, int maxProcessedTransactions) throws Exception {
if(fromHash(tangle, hash).isSolid()) {
return true;
}
LinkedHashSet<Hash> analyzedHashes = new LinkedHashSet<>(snapshotProvider.getInitialSnapshot().getSolidEntryPoints().keySet());
maxProcessedTransactions = Math.addExact(maxProcessedTransactions, analyzedHashes.size());
boolean solid = true;
final Deque<Hash> nonAnalyzedTransactions = new ArrayDeque<>(Collections.singleton(hash));
Hash hashPointer;
while ((hashPointer = nonAnalyzedTransactions.poll()) != null) {
if (!analyzedHashes.add(hashPointer)) {
continue;
}
if (analyzedHashes.size() >= maxProcessedTransactions) {
return false;
}
TransactionViewModel transaction = fromHash(tangle, hashPointer);
if (isUnsolidWithoutEntryPoint(transaction, hashPointer)) {
if (transaction.getType() == PREFILLED_SLOT) {
solid = false;
checkRequester(hashPointer);
} else {
nonAnalyzedTransactions.offer(transaction.getTrunkTransactionHash());
nonAnalyzedTransactions.offer(transaction.getBranchTransactionHash());
if (transaction.getAddressHash().equals(cooAddress)) {
checkRequester(hashPointer);
}
}
}
}
if (solid) {
updateTransactions(analyzedHashes);
}
analyzedHashes.clear();
return solid;
}
/**
* Check if a transaction is present in the {@link #transactionRequester}, if not, it is added.
* @param hashPointer The hash of the transaction to request
*/
private void checkRequester(Hash hashPointer){
if (!transactionRequester.isTransactionRequested(hashPointer)) {
transactionRequester.requestTransaction(hashPointer);
}
}
/**
* Iterate through analyzed hashes and place them in the {@link #transactionsToBroadcast} queue
* @param hashes Analyzed hashes from the {@link #checkSolidity(Hash)} call
*/
private void updateTransactions(Set<Hash> hashes) {
hashes.forEach(hash -> {
try {
TransactionViewModel tvm = fromHash(tangle, hash);
tvm.updateHeights(tangle, snapshotProvider.getInitialSnapshot());
if(!tvm.isSolid()){
tvm.updateSolid(true);
tvm.update(tangle, snapshotProvider.getInitialSnapshot(), "solid|height");
}
if(!snapshotProvider.getInitialSnapshot().hasSolidEntryPoint(hash)) {
addToBroadcastQueue(tvm);
}
transactionPropagator.addToPropagationQueue(tvm.getHash());
} catch (Exception e) {
log.info(e.getMessage());
}
});
}
/**
* Returns true if transaction is not solid and there are no solid entry points from the initial snapshot.
*/
private boolean isUnsolidWithoutEntryPoint(TransactionViewModel transaction, Hash hashPointer) throws Exception{
if(!transaction.isSolid() && !snapshotProvider.getInitialSnapshot().hasSolidEntryPoint(hashPointer)){
return true;
}
transactionPropagator.addToPropagationQueue(hashPointer);
return false;
}
private void addToBroadcastQueue(TransactionViewModel tvm) {
try {
if (transactionsToBroadcast.size() >= MAX_SIZE) {
transactionsToBroadcast.remove();
}
if(!transactionsToBroadcast.contains(tvm)) {
transactionsToBroadcast.put(tvm);
}
} catch(Exception e){
log.info("Error placing transaction into broadcast queue: " + e.getMessage());
}
}
@VisibleForTesting
Set<Hash> getSolidificationSet(){
return new LinkedHashSet<>(transactionsToSolidify);
}
@Override
public void updateStatus(TransactionViewModel transactionViewModel) throws Exception {
transactionRequester.clearTransactionRequest(transactionViewModel.getHash());
if(transactionViewModel.getApprovers(tangle).size() == 0) {
tipsViewModel.addTipHash(transactionViewModel.getHash());
}
tipsViewModel.removeTipHash(transactionViewModel.getTrunkTransactionHash());
tipsViewModel.removeTipHash(transactionViewModel.getBranchTransactionHash());
if(quickSetSolid(transactionViewModel)) {
transactionViewModel.update(tangle, snapshotProvider.getInitialSnapshot(), "solid|height");
tipsViewModel.setSolid(transactionViewModel.getHash());
transactionPropagator.addToPropagationQueue(transactionViewModel.getHash());
}
}
@Override
public boolean quickSetSolid(final TransactionViewModel transactionViewModel) throws Exception {
if(!transactionViewModel.isSolid()) {
boolean solid = true;
if (!checkApproovee(transactionViewModel.getTrunkTransaction(tangle))) {
solid = false;
}
if (!checkApproovee(transactionViewModel.getBranchTransaction(tangle))) {
solid = false;
}
if(solid) {
transactionViewModel.updateSolid(true);
transactionViewModel.updateHeights(tangle, snapshotProvider.getInitialSnapshot());
transactionPropagator.addToPropagationQueue(transactionViewModel.getHash());
addToBroadcastQueue(transactionViewModel);
return true;
}
}
return false;
}
/**
* If the the {@code approvee} is missing, request it from a neighbor.
* @param approovee transaction we check.
* @return true if {@code approvee} is solid.
* @throws Exception if we encounter an error while requesting a transaction
*/
private boolean checkApproovee(TransactionViewModel approovee) throws Exception {
if(snapshotProvider.getInitialSnapshot().hasSolidEntryPoint(approovee.getHash())) {
return true;
}
if(approovee.getType() == PREFILLED_SLOT) {
// don't solidify from the bottom until cuckoo filters can identify where we deleted -> otherwise we will
// continue requesting old transactions forever
//transactionRequester.requestTransaction(approovee.getHash(), false);
return false;
}
return approovee.isSolid();
}
/**
* A transaction solidification service that propagates upwards through transactions approving a solid transaction
* and performs {@link #quickSetSolid} checks to determine their solidity as well
*/
private class TransactionPropagator {
/**
* A queue for processing transactions with the {@link #propagateSolidTransactions()} call. This will check
* approving transactions with {@link #quickSetSolid(TransactionViewModel)}.
*/
private BlockingQueue<Hash> solidTransactions = new ArrayBlockingQueue<>(MAX_SIZE);
/**
* Defines the maximum number of transactions that will be processed in a single run of the
* {@link TransactionPropagator#propagateSolidTransactions()} call. This is to stop the propagator from potentially
* stalling out solidification with an endless addition of new transactions to propagate.
*/
private static final int PROPAGATION_QUEUE_MAX_PROCESS = 10;
/**
* Add to the propagation queue where it will be processed to help solidify approving transactions faster
* @param hash The transaction hash to be removed
* @throws Exception
*/
public void addToPropagationQueue(Hash hash) throws Exception{
if(!solidTransactions.contains(hash)) {
if (solidTransactions.size() >= MAX_SIZE) {
solidTransactions.remove();
}
solidTransactions.offer(hash);
}
}
@VisibleForTesting
void propagateSolidTransactions() {
int processed = 0;
while(!Thread.currentThread().isInterrupted() && solidTransactions.peek() != null
&& processed < PROPAGATION_QUEUE_MAX_PROCESS) {
try {
++processed;
Hash hash = solidTransactions.poll();
TransactionViewModel transaction = fromHash(tangle, hash);
Set<Hash> approvers = transaction.getApprovers(tangle).getHashes();
for(Hash h: approvers) {
TransactionViewModel tx = fromHash(tangle, h);
if (quietQuickSetSolid(tx)) {
tx.update(tangle, snapshotProvider.getInitialSnapshot(), "solid|height");
tipsViewModel.setSolid(h);
}
}
} catch (Exception e) {
log.error("Error while propagating solidity upwards", e);
}
}
}
/**
* Perform a {@link #quickSetSolid} while capturing and logging errors
* @param transactionViewModel transaction we try to solidify.
* @return <tt>true</tt> if we managed to solidify, else <tt>false</tt>.
*/
private boolean quietQuickSetSolid(TransactionViewModel transactionViewModel) {
try {
return quickSetSolid(transactionViewModel);
} catch (Exception e) {
log.error(e.getMessage(), e);
return false;
}
}
}
}
| 16,696 | 36.947727 | 135 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/storage/Indexable.java
|
package com.iota.iri.storage;
import java.io.Serializable;
/**
* An object that is used as a key/index in a database to give an ID to a {@link Persistable}.
*/
public interface Indexable extends Comparable<Indexable>, Serializable {
/**
* Serializes the object to bytes in order to be stored in the db.
*
* @return the serialized bytes of the key
*/
byte[] bytes();
/**
* Deserializes the index from bytes and recreates the object.
*
* @param bytes the serialized bytes of the key
*/
void read(byte[] bytes);
/////////////////////////////////Not In Use////////////////////////////////////////////
Indexable incremented();
Indexable decremented();
}
| 725 | 24.034483 | 94 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/storage/LocalSnapshotsPersistenceProvider.java
|
package com.iota.iri.storage;
import com.iota.iri.model.LocalSnapshot;
import com.iota.iri.model.persistables.SpentAddress;
import com.iota.iri.utils.Pair;
import java.util.*;
/**
* Abstraction for localsnapshots-db persistence provider.
*/
public class LocalSnapshotsPersistenceProvider implements PersistenceProvider {
private PersistenceProvider provider;
public static final Map<String, Class<? extends Persistable>> COLUMN_FAMILIES =
new LinkedHashMap<String, Class<? extends Persistable>>() {{
put("spent-addresses", SpentAddress.class);
put("localsnapshots", LocalSnapshot.class);
}};
/**
* Constructor for {@link LocalSnapshotsPersistenceProvider}. Stores the {@link PersistenceProvider} to act as an
* abstraction of the localsnapshots-db. Restricts access to some db functionality.
*
* @param provider The {@link PersistenceProvider} that will be stored
*/
public LocalSnapshotsPersistenceProvider(PersistenceProvider provider){
this.provider = provider;
}
/**
* {@inheritDoc}
*/
public void init() throws Exception{
provider.init();
}
/**
* {@inheritDoc}
*/
@Override
public void shutdown(){
this.provider.shutdown();
}
/**
* {@inheritDoc}
*/
@Override
public boolean isAvailable(){
return this.provider.isAvailable();
}
/**
* {@inheritDoc}
*/
@Override
public boolean saveBatch(List<Pair<Indexable, Persistable>> models) throws Exception {
return provider.saveBatch(models);
}
/**
* {@inheritDoc}
*/
@Override
public boolean save(Persistable model, Indexable index) throws Exception {
return provider.save(model, index);
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(Class<?> modelClass, Indexable hash) throws Exception {
return provider.exists(modelClass, hash);
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Class<?> modelClass, Indexable hash) throws Exception {
provider.delete(modelClass, hash);
}
/**
* {@inheritDoc}
*/
@Override
public boolean update(Persistable model, Indexable index, String item) throws Exception{
return provider.update(model, index, item);
}
/**
* {@inheritDoc}
*/
@Override
public Pair<Indexable, Persistable > first(Class<?> model, Class<?> index) throws Exception {
return provider.first(model, index);
}
/**
* {@inheritDoc}
*/
@Override
public Pair<Indexable, Persistable> latest(Class<?> model, Class<?> indexModel) throws Exception {
return provider.latest(model, indexModel);
}
/**
* {@inheritDoc}
*/
@Override
public long count(Class<?> model) throws Exception {
return provider.count(model);
}
/**
* {@inheritDoc}
*/
@Override
public Persistable get(Class<?> model, Indexable index) throws Exception {
return provider.get(model, index);
}
/**
* {@inheritDoc}
*/
@Override
public void deleteBatch(Collection<Pair<Indexable, ? extends Class<? extends Persistable>>> models) throws Exception {
provider.deleteBatch(models);
}
/**
* {@inheritDoc}
*/
@Override
public List<byte[]> loadAllKeysFromTable(Class<? extends Persistable> model) {
throw new UnsupportedOperationException();
}
@Override
public long getPersistenceSize() {
throw new UnsupportedOperationException(
"Local Snapshots Persistance Provider can't know the size of SST files");
}
/**
* {@inheritDoc}
*/
@Override
public Pair<Indexable, Persistable> next(Class<?> model, Indexable index) throws Exception {
return provider.next(model, index);
}
/**
* {@inheritDoc}
*/
@Override
public Pair<Indexable, Persistable> previous(Class<?> model, Indexable index) throws Exception {
return provider.previous(model, index);
}
/**
* {@inheritDoc}
*/
@Override
public void clear(Class<?> column) throws Exception {
provider.clear(column);
}
////////////////////////////////////////////////////////////////////////////////////
////////// Unsupported methods /////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
@Override
public Persistable seek(Class<?> model, byte[] key) {
throw new UnsupportedOperationException();
}
@Override
public Set<Indexable> keysWithMissingReferences(Class<?> modelClass, Class<?> otherClass){
throw new UnsupportedOperationException();
}
@Override
public boolean mayExist(Class<?> model, Indexable index) {
throw new UnsupportedOperationException();
}
@Override
public Set<Indexable> keysStartingWith(Class<?> modelClass, byte[] value) {
throw new UnsupportedOperationException();
}
@Override
public void clearMetadata(Class<?> column) {
throw new UnsupportedOperationException();
}
}
| 5,284 | 25.034483 | 122 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/storage/Persistable.java
|
package com.iota.iri.storage;
import javax.naming.OperationNotSupportedException;
import java.io.Serializable;
/**
* Any object that can be serialized and stored in the database.
*
* @see Indexable
*/
public interface Persistable extends Serializable {
/**
* Serializes the core fields of the object to bytes that can be stored in a database. Core fields are all the
* fields that represent the core data required to construct this object.
*
* @return the serialized bytes that represents the core original object
*/
byte[] bytes();
/**
* Recreates the core state (populates the fields) of the object by deserializing {@code bytes}.
*
* @param bytes the serialized version of the object
*/
void read(byte[] bytes);
/**
* Serializes fields that usually contain data about the object that the node itself calculates
* and is not passed to different nodes. This data is stored in a different column family/table then
* the data serialized by {@link #bytes()}.
*
* @return the serialized bytes that represents the metadata of the object
*/
byte[] metadata();
/**
* Recreates the metadata (populates the relevant fields) of the object by deserializing {@code bytes}.
*
* @param bytes the serialized bytes that represents the metadata of the object
*/
void readMetadata(byte[] bytes);
/**
* Specifies whether two objects of the same type can be merged and their merged result used as a single object.
* For storing in a persistence provider this means we can append items to an index and
* for retrieving this means we can merge the results of multiple storage providers
*
* @return <tt>true</tt> if we mergeable <tt>false</tt> if not mergeable
*/
boolean canMerge();
/**
* Merges source object into this object.
* @param source the persistable that will be merged into the called persistable
* @return the updated persistable, which contains both. Or {@code null} if the persistable cannot be merged
* @throws OperationNotSupportedException when the persistable called does not support merging. Call canMerge first.
*/
Persistable mergeInto(Persistable source) throws OperationNotSupportedException;
/**
* Determines whether the data exists. This method is needed because non {@code null} data doesn't mean that the data doesn't exist.
*
* @return true if data exists , else returns false
*/
boolean exists();
}
| 2,546 | 36.455882 | 136 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/storage/PersistenceProvider.java
|
package com.iota.iri.storage;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import com.iota.iri.utils.Pair;
/**
* Abstracts access to the data persisitence layer.
* Handles the initialization and connections to the storage.
* Will enable efficient batch writes, deletes, and merges.
*/
public interface PersistenceProvider {
/**
* Will setup and configure the DB. Including initializing of heavyweight
* connections, rules on how to communicate with the DB schema, how to sync with the DB,
* how to backup the data and so on.
* @throws Exception if there was an error communicating with the DB
*/
void init() throws Exception;
//To be deleted
boolean isAvailable();
/**
* Free all the resources that are required to communicate with the DB
*/
void shutdown();
/**
* Persists the {@code model} to storage.
*
* @param model what we persist
* @param index the primary key that we use to store
* @return <tt>true</tt> if successful, else <tt>false</tt>
* @throws Exception if we encounter a problem with the DB
*/
boolean save(Persistable model, Indexable index) throws Exception;
/**
* Deletes the value that is stored at a specific {@code index} from the DB.
* @param model defines the type of object we want to delete
* @param index the key we delete at
* @throws Exception if we encounter a problem with the DB
*/
void delete(Class<?> model, Indexable index) throws Exception;
/**
* Updates metadata on a certain index
*
* @param model container of the metadata we need
* @param index the key where we store the metadata
* @param item extra piece of metadata
* @return <tt>true</tt> if successful, else <tt>false</tt>
* @throws Exception if we encounter a problem with the DB
*/
boolean update(Persistable model, Indexable index, String item) throws Exception;
/**
* Ensures that the object of type {@code model} at {@code key} is stored in the DB.
*
* @param model specifies in what table/column family to look for the object
* @param key the key we expect to find in the db
* @return <tt>true</tt> if the object is in the DB, else <tt>false</tt>
* @throws Exception if we encounter a problem with the DB
*/
boolean exists(Class<?> model, Indexable key) throws Exception;
/**
* Returns the latest object of type {@code model} that was persisted with a key of type {@code indexModel}
* @param model specifies in what table/column family to look
* @param indexModel specifies the type of the key
* @return the (key,value) pair of the found object
*
* @throws Exception if we encounter a problem with the DB
*/
Pair<Indexable, Persistable> latest(Class<?> model, Class<?> indexModel) throws Exception;
/**
* Given 2 {@link Persistable} types that should be indexed with the same keys,
* find all the keys in the database that are in the {@code modelClass} table/column family
* but are not in the {@code otherClass} table/column family.
*
* @param modelClass a persistable type
* @param otherClass a persistable type that should have the same keys as {@code modelClass}
* @return The keys of {@code modelClass} that don't index {@code otherClass} models
* @throws Exception
*/
Set<Indexable> keysWithMissingReferences(Class<?> modelClass, Class<?> otherClass) throws Exception;
/**
* Retrieves a {@code model} type indexed at with {@code index} key
* @param model the table/column family to look at
* @param index the key
* @return The stored value
* @throws Exception if we encounter a problem with the DB
*/
Persistable get(Class<?> model, Indexable index) throws Exception;
/**
* Checks with a degree of certainity whether we have a value indexed at a
* given key. Unlike {@link #exists} it can return false positives, but it should be much more efficient
*
* @param model the table/column family to look at
* @param index the key
* @return <tt>true</tt> if might exist, else <tt>false</tt> if definitely not in the DB
* @throws Exception if we encounter a problem with the DB
*/
boolean mayExist(Class<?> model, Indexable index) throws Exception;
/**
* Counts how many instances of type {@code model} are stored in the DB.
* The count may be an estimated figure.
*
* @param model the table/column family to look at
* @return an estimated count of the members at the above table/column family
* @throws Exception if we encounter a problem with the DB
*/
long count(Class<?> model) throws Exception;
/**
* Gets all the keys starting with {@code value} prefix that index type {@code modelClass}.
* @param modelClass the table/column family to look at
* @param value the key prefix in bytes
* @return keys that have {@code value} as their prefix
*/
Set<Indexable> keysStartingWith(Class<?> modelClass, byte[] value);
/**
* Gets the first object persisted with a key that has {@code key} prefix.
* @param model the table/column family to look at
* @param key the key prefix in bytes
* @return the first Persistable that was found
* @throws Exception if we encounter a problem with the DB
*/
Persistable seek(Class<?> model, byte[] key) throws Exception;
/**
* Finds the next object persisted after the key {code index}
* @param model the table/column family to look at
* @param index the key from which we start scanning forward
* @return a pair (key, value)
* @throws Exception if we encounter a problem with the DB
*/
Pair<Indexable, Persistable> next(Class<?> model, Indexable index) throws Exception;
/**
* Finds the previous object persisted after the key {code index}
* @param model the table/column family to look at
* @param index the key from which we start scanning backwards
* @return a pair (key, value)
* @throws Exception if we encounter a problem with the DB
*/
Pair<Indexable, Persistable> previous(Class<?> model, Indexable index) throws Exception;
/**
* Finds the first persisted model of type {@code model} that has a key of type {@code indexModel}
* @param model the table/column family to look at
* @param indexModel the index type
* @return a pair (key, value)
* @throws Exception if we encounter a problem with the DB
*/
Pair<Indexable, Persistable> first(Class<?> model, Class<?> indexModel) throws Exception;
/**
* Atomically saves all {@code models}
*
* @param models key-value pairs that to be stored in the db
* @return <tt>true</tt> if successful, <tt>else</tt> returns false
* @throws Exception if we encounter a problem with the DB
*/
boolean saveBatch(List<Pair<Indexable, Persistable>> models) throws Exception;
/**
* Atomically delete all {@code models}.
* @param models key value pairs that to be expunged from the db.
* @throws Exception if data could not be expunged from the db.
*/
void deleteBatch(Collection<Pair<Indexable, ? extends Class<? extends Persistable>>> models) throws Exception;
/**
* Clear all the data (but not metadata) in a column family or a table
* @param column the table/column family we clear
* @throws Exception if we encounter a problem with the DB
*/
void clear(Class<?> column) throws Exception;
/**
* Clear all the metadata in a column family or a table
* @param column the table/column family we clear the metadata from
* @throws Exception if we encounter a problem with the DB
*/
void clearMetadata(Class<?> column) throws Exception;
List<byte[]> loadAllKeysFromTable(Class<? extends Persistable> model);
/**
* Calculates the estimated size of this persistence provider on disk
*
* @return the size in bytes
*/
long getPersistenceSize();
}
| 8,156 | 38.405797 | 114 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/storage/Tangle.java
|
package com.iota.iri.storage;
import com.iota.iri.model.Hash;
import com.iota.iri.model.StateDiff;
import com.iota.iri.model.persistables.Address;
import com.iota.iri.model.persistables.Approvee;
import com.iota.iri.model.persistables.Bundle;
import com.iota.iri.model.persistables.Milestone;
import com.iota.iri.model.persistables.ObsoleteTag;
import com.iota.iri.model.persistables.Tag;
import com.iota.iri.model.persistables.Transaction;
import com.iota.iri.utils.Pair;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import com.iota.iri.zmq.MessageQueueProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.naming.OperationNotSupportedException;
/**
* Delegates methods from {@link PersistenceProvider}
*/
public class Tangle {
private static final Logger log = LoggerFactory.getLogger(Tangle.class);
public static final Map<String, Class<? extends Persistable>> COLUMN_FAMILIES =
new LinkedHashMap<String, Class<? extends Persistable>>() {{
put("transaction", Transaction.class);
put("milestone", Milestone.class);
put("stateDiff", StateDiff.class);
put("address", Address.class);
put("approvee", Approvee.class);
put("bundle", Bundle.class);
put("obsoleteTag", ObsoleteTag.class);
put("tag", Tag.class);
}};
public static final Map.Entry<String, Class<? extends Persistable>> METADATA_COLUMN_FAMILY =
new AbstractMap.SimpleImmutableEntry<>("transaction-metadata", Transaction.class);
private final List<PersistenceProvider> persistenceProviders = new ArrayList<>();
private final List<MessageQueueProvider> messageQueueProviders = new ArrayList<>();
public void addPersistenceProvider(PersistenceProvider provider) {
this.persistenceProviders.add(provider);
}
/**
*
* @see PersistenceProvider#init()
*/
public void init() throws Exception {
for(PersistenceProvider provider: this.persistenceProviders) {
provider.init();
}
}
/**
* Adds {@link com.iota.iri.zmq.MessageQueueProvider} that should be notified.
*
* @param provider that should be notified.
*/
public void addMessageQueueProvider(MessageQueueProvider provider) {
this.messageQueueProviders.add(provider);
}
/**
* @see PersistenceProvider#shutdown()
*/
public void shutdown() throws Exception {
log.info("Shutting down Tangle Persistence Providers... ");
this.persistenceProviders.forEach(PersistenceProvider::shutdown);
this.persistenceProviders.clear();
log.info("Shutting down Tangle MessageQueue Providers... ");
this.messageQueueProviders.forEach(MessageQueueProvider::shutdown);
this.messageQueueProviders.clear();
}
/**
* @see PersistenceProvider#get(Class, Indexable)
*/
public Persistable load(Class<?> model, Indexable index) throws Exception {
LinkedList<Persistable> outlist = new LinkedList<>();
for (PersistenceProvider provider : this.persistenceProviders) {
Persistable result = provider.get(model, index);
if (result != null && result.exists()) {
if (result.canMerge()) {
outlist.add(result);
} else {
// If it is a non-mergeable result then there is no need to ask another provider again.
// return immediately
return result;
}
}
}
Persistable p = outlist.stream().reduce(null, (a, b) -> {
if (a == null) {
return b;
}
try {
return a.mergeInto(b);
} catch (OperationNotSupportedException e) {
log.error("Error merging data, call canMerge before to see if an object is mergable: ", e);
return null;
}
});
//For backwards compatibility. Should be solve with issue #1591
if (p == null) {
p = (Persistable) model.newInstance();
}
return p;
}
/**
* @see PersistenceProvider#saveBatch(List)
*/
public Boolean saveBatch(List<Pair<Indexable, Persistable>> models) throws Exception {
boolean exists = false;
for(PersistenceProvider provider: persistenceProviders) {
if(exists) {
provider.saveBatch(models);
} else {
exists = provider.saveBatch(models);
}
}
return exists;
}
/**
* @see PersistenceProvider#save(Persistable, Indexable)
*/
public Boolean save(Persistable model, Indexable index) throws Exception {
boolean exists = false;
for(PersistenceProvider provider: persistenceProviders) {
if(exists) {
provider.save(model, index);
} else {
exists = provider.save(model, index);
}
}
return exists;
}
/**
* @see PersistenceProvider#deleteBatch(Collection)
*/
public void deleteBatch(Collection<Pair<Indexable, ? extends Class<? extends Persistable>>> models) throws Exception {
for(PersistenceProvider provider: persistenceProviders) {
provider.deleteBatch(models);
}
}
/**
* @see PersistenceProvider#delete(Class, Indexable)
*/
public void delete(Class<?> model, Indexable index) throws Exception {
for(PersistenceProvider provider: persistenceProviders) {
provider.delete(model, index);
}
}
/**
* @see PersistenceProvider#latest(Class, Class)
*/
public Pair<Indexable, Persistable> getLatest(Class<?> model, Class<?> index) throws Exception {
Pair<Indexable, Persistable> latest = null;
for(PersistenceProvider provider: persistenceProviders) {
if (latest == null) {
latest = provider.latest(model, index);
}
}
return latest;
}
/**
* Updates all {@link PersistenceProvider} and publishes message to all
* {@link com.iota.iri.zmq.MessageQueueProvider}.
*
* @param model with transaction data
* @param index {@link Hash} identifier of the {@link Transaction} set
* @param item identifying the purpose of the update
* @throws Exception when updating the {@link PersistenceProvider} fails
*/
public void update(Persistable model, Indexable index, String item) throws Exception {
updatePersistenceProvider(model, index, item);
updateMessageQueueProvider(model, index, item);
}
private void updatePersistenceProvider(Persistable model, Indexable index, String item) throws Exception {
for(PersistenceProvider provider: this.persistenceProviders) {
provider.update(model, index, item);
}
}
private void updateMessageQueueProvider(Persistable model, Indexable index, String item) {
for(MessageQueueProvider provider: this.messageQueueProviders) {
provider.publishTransaction(model, index, item);
}
}
/**
* Notifies all registered {@link com.iota.iri.zmq.MessageQueueProvider} and publishes message to MessageQueue.
*
* @param message that can be formatted by {@link String#format(String, Object...)}
* @param objects that should replace the placeholder in message.
* @see com.iota.iri.zmq.ZmqMessageQueueProvider#publish(String, Object...)
* @see String#format(String, Object...)
*/
public void publish(String message, Object... objects) {
for(MessageQueueProvider provider: this.messageQueueProviders) {
provider.publish(message, objects);
}
}
/**
* @see PersistenceProvider#keysWithMissingReferences(Class, Class)
*/
public Set<Indexable> keysWithMissingReferences(Class<?> modelClass, Class<?> referencedClass) throws Exception {
Set<Indexable> output = null;
for(PersistenceProvider provider: this.persistenceProviders) {
output = provider.keysWithMissingReferences(modelClass, referencedClass);
if(output != null && output.size() > 0) {
break;
}
}
return output;
}
/**
* @see PersistenceProvider#keysStartingWith(Class, byte[])
*/
public Set<Indexable> keysStartingWith(Class<?> modelClass, byte[] value) {
Set<Indexable> output = null;
for(PersistenceProvider provider: this.persistenceProviders) {
output = provider.keysStartingWith(modelClass, value);
if(output.size() != 0) {
break;
}
}
return output;
}
public <T extends Indexable> List<T> loadAllKeysFromTable(Class<? extends Persistable> modelClass,
Function<byte[], T> transformer) {
List<byte[]> keys = null;
for(PersistenceProvider provider: this.persistenceProviders) {
if ((keys = provider.loadAllKeysFromTable(modelClass)) != null) {
break;
}
}
if (keys != null) {
return keys.stream()
.map(transformer)
.collect(Collectors.toList());
}
return null;
}
/**
* @see PersistenceProvider#exists(Class, Indexable)
*/
public Boolean exists(Class<?> modelClass, Indexable hash) throws Exception {
for(PersistenceProvider provider: this.persistenceProviders) {
if (provider.exists(modelClass, hash)) {
return true;
}
}
return false;
}
/**
* @see PersistenceProvider#mayExist(Class, Indexable)
*/
public Boolean maybeHas(Class<?> model, Indexable index) throws Exception {
for(PersistenceProvider provider: this.persistenceProviders) {
if (provider.mayExist(model, index)) {
return true;
}
}
return false;
}
/**
* @see PersistenceProvider#count(Class)
*/
public Long getCount(Class<?> modelClass) throws Exception {
long value = 0;
for(PersistenceProvider provider: this.persistenceProviders) {
if((value = provider.count(modelClass)) != 0) {
break;
}
}
return value;
}
/**
* @see PersistenceProvider#seek(Class, byte[])
*/
public Persistable find(Class<?> model, byte[] key) throws Exception {
Persistable out = null;
for (PersistenceProvider provider : this.persistenceProviders) {
if ((out = provider.seek(model, key)) != null) {
break;
}
}
return out;
}
/**
* @see PersistenceProvider#next(Class, Indexable)
*/
public Pair<Indexable, Persistable> next(Class<?> model, Indexable index) throws Exception {
Pair<Indexable, Persistable> latest = null;
for(PersistenceProvider provider: persistenceProviders) {
if(latest == null) {
latest = provider.next(model, index);
}
}
return latest;
}
/**
* @see PersistenceProvider#previous(Class, Indexable)
*/
public Pair<Indexable, Persistable> previous(Class<?> model, Indexable index) throws Exception {
Pair<Indexable, Persistable> latest = null;
for(PersistenceProvider provider: persistenceProviders) {
if(latest == null) {
latest = provider.previous(model, index);
}
}
return latest;
}
/**
* @see PersistenceProvider#first(Class, Class)
*/
public Pair<Indexable, Persistable > getFirst(Class<?> model, Class<?> index) throws Exception {
Pair<Indexable, Persistable> latest = null;
for(PersistenceProvider provider: persistenceProviders) {
if(latest == null) {
latest = provider.first(model, index);
}
}
return latest;
}
/**
* @see PersistenceProvider#clear(Class)
*/
public void clearColumn(Class<?> column) throws Exception {
for(PersistenceProvider provider: persistenceProviders) {
provider.clear(column);
}
}
/**
* @see PersistenceProvider#clearMetadata(Class)
*/
public void clearMetadata(Class<?> column) throws Exception {
for(PersistenceProvider provider: persistenceProviders) {
provider.clearMetadata(column);
}
}
/**
* @see PersistenceProvider#getPersistenceSize()
*/
public long getPersistanceSize() {
long size = 0;
for (PersistenceProvider provider : persistenceProviders) {
size += provider.getPersistenceSize();
}
return size;
}
}
| 13,415 | 33.9375 | 122 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/storage/rocksDB/RocksDBPersistenceProvider.java
|
package com.iota.iri.storage.rocksDB;
import com.iota.iri.conf.BaseIotaConfig;
import com.iota.iri.conf.TestnetConfig;
import com.iota.iri.model.HashFactory;
import com.iota.iri.storage.Indexable;
import com.iota.iri.storage.Persistable;
import com.iota.iri.storage.PersistenceProvider;
import com.iota.iri.utils.IotaIOUtils;
import com.iota.iri.utils.Pair;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Paths;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.collections4.MapUtils;
import org.apache.commons.lang3.SystemUtils;
import org.rocksdb.BackupEngine;
import org.rocksdb.BackupableDBOptions;
import org.rocksdb.BlockBasedTableConfig;
import org.rocksdb.BloomFilter;
import org.rocksdb.Cache;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.ColumnFamilyOptions;
import org.rocksdb.DBOptions;
import org.rocksdb.Env;
import org.rocksdb.LRUCache;
import org.rocksdb.MergeOperator;
import org.rocksdb.OptionsUtil;
import org.rocksdb.Priority;
import org.rocksdb.RestoreOptions;
import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException;
import org.rocksdb.RocksEnv;
import org.rocksdb.RocksIterator;
import org.rocksdb.SstFileManager;
import org.rocksdb.StringAppendOperator;
import org.rocksdb.WriteBatch;
import org.rocksdb.WriteOptions;
import org.rocksdb.util.SizeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RocksDBPersistenceProvider implements PersistenceProvider {
private static final Logger log = LoggerFactory.getLogger(RocksDBPersistenceProvider.class);
private static final int BLOOM_FILTER_BITS_PER_KEY = 10;
private static final Pair<Indexable, Persistable> PAIR_OF_NULLS = new Pair<>(null, null);
private final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
private final SecureRandom seed = new SecureRandom();
private final String dbPath;
private final String logPath;
private String configPath;
private final int cacheSize;
private final Map<String, Class<? extends Persistable>> columnFamilies;
private final Map.Entry<String, Class<? extends Persistable>> metadataColumnFamily;
private Map<Class<?>, ColumnFamilyHandle> classTreeMap;
private Map<Class<?>, ColumnFamilyHandle> metadataReference = Collections.emptyMap();
private RocksDB db;
// DBOptions is only used in initDB(). However, it is closeable - so we keep a reference for shutdown.
private DBOptions options;
private BloomFilter bloomFilter;
private boolean available;
private SstFileManager sstFileManager;
private Cache cache, compressedCache;
private ColumnFamilyOptions columnFamilyOptions;
/**
* Creates a new RocksDB provider without reading from a configuration file
*
* @param dbPath The location where the database will be stored
* @param logPath The location where the log files will be stored
* @param cacheSize the size of the cache used by the database implementation
* @param columnFamilies A map of the names related to their Persistable class
* @param metadataColumnFamily Map of metadata used by the Persistable class, can be <code>null</code>
*/
public RocksDBPersistenceProvider(String dbPath, String logPath, int cacheSize,
Map<String, Class<? extends Persistable>> columnFamilies,
Map.Entry<String, Class<? extends Persistable>> metadataColumnFamily) {
this(dbPath, logPath, null, cacheSize, columnFamilies, metadataColumnFamily);
}
/**
* Creates a new RocksDB provider by reading the configuration to be used in this instance from a file
*
* @param dbPath The location where the database will be stored
* @param logPath The location where the log files will be stored
* @param configPath The location where the RocksDB config is read from
* @param cacheSize the size of the cache used by the database implementation
* @param columnFamilies A map of the names related to their Persistable class
* @param metadataColumnFamily Map of metadata used by the Persistable class, can be <code>null</code>
*/
public RocksDBPersistenceProvider(String dbPath, String logPath, String configPath, int cacheSize,
Map<String, Class<? extends Persistable>> columnFamilies,
Map.Entry<String, Class<? extends Persistable>> metadataColumnFamily) {
this.dbPath = dbPath;
this.logPath = logPath;
this.cacheSize = cacheSize;
this.columnFamilies = columnFamilies;
this.metadataColumnFamily = metadataColumnFamily;
this.configPath = configPath;
}
@Override
public void init() throws Exception {
log.info("Initializing Database on " + dbPath);
initDB(dbPath, logPath, configPath, columnFamilies);
available = true;
log.info("RocksDB persistence provider initialized.");
}
@Override
public boolean isAvailable() {
return this.available;
}
@Override
public void shutdown() {
for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) {
IotaIOUtils.closeQuietly(columnFamilyHandle);
}
IotaIOUtils.closeQuietly(db, options, bloomFilter, cache, compressedCache, columnFamilyOptions);
}
@Override
public boolean save(Persistable thing, Indexable index) throws Exception {
ColumnFamilyHandle handle = classTreeMap.get(thing.getClass());
db.put(handle, index.bytes(), thing.bytes());
ColumnFamilyHandle referenceHandle = metadataReference.get(thing.getClass());
if (referenceHandle != null) {
db.put(referenceHandle, index.bytes(), thing.metadata());
}
return true;
}
@Override
public void delete(Class<?> model, Indexable index) throws Exception {
db.delete(classTreeMap.get(model), index.bytes());
}
@Override
public boolean exists(Class<?> model, Indexable key) throws Exception {
if (mayExist(model, key)) {
//ensure existence
ColumnFamilyHandle handle = classTreeMap.get(model);
return handle != null && db.get(handle, key.bytes()) != null;
}
return false;
}
@Override
public Set<Indexable> keysWithMissingReferences(Class<?> model, Class<?> other) throws Exception {
ColumnFamilyHandle handle = classTreeMap.get(model);
ColumnFamilyHandle otherHandle = classTreeMap.get(other);
try (RocksIterator iterator = db.newIterator(handle)) {
Set<Indexable> indexables = null;
for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) {
if (db.get(otherHandle, iterator.key()) == null) {
indexables = indexables == null ? new HashSet<>() : indexables;
indexables.add(HashFactory.GENERIC.create(model, iterator.key()));
}
}
return indexables == null ? Collections.emptySet() : Collections.unmodifiableSet(indexables);
}
}
@Override
public Persistable get(Class<?> model, Indexable index) throws Exception {
Persistable object = (Persistable) model.newInstance();
object.read(db.get(classTreeMap.get(model), index == null ? new byte[0] : index.bytes()));
ColumnFamilyHandle referenceHandle = metadataReference.get(model);
if (referenceHandle != null) {
object.readMetadata(db.get(referenceHandle, index == null ? new byte[0] : index.bytes()));
}
return object;
}
@Override
public boolean mayExist(Class<?> model, Indexable index) {
ColumnFamilyHandle handle = classTreeMap.get(model);
return db.keyMayExist(handle, index.bytes(), new StringBuilder());
}
@Override
public long count(Class<?> model) throws Exception {
return getCountEstimate(model);
}
private long getCountEstimate(Class<?> model) throws RocksDBException {
ColumnFamilyHandle handle = classTreeMap.get(model);
return db.getLongProperty(handle, "rocksdb.estimate-num-keys");
}
@Override
public Set<Indexable> keysStartingWith(Class<?> modelClass, byte[] value) {
Objects.requireNonNull(value, "value byte[] cannot be null");
ColumnFamilyHandle handle = classTreeMap.get(modelClass);
Set<Indexable> keys = null;
if (handle != null) {
try (RocksIterator iterator = db.newIterator(handle)) {
iterator.seek(HashFactory.GENERIC.create(modelClass, value, 0, value.length).bytes());
byte[] found;
while (iterator.isValid() && keyStartsWithValue(value, found = iterator.key())) {
keys = keys == null ? new HashSet<>() : keys;
keys.add(HashFactory.GENERIC.create(modelClass, found));
iterator.next();
}
}
}
return keys == null ? Collections.emptySet() : Collections.unmodifiableSet(keys);
}
/**
* @param value What we are looking for.
* @param key The bytes we are searching in.
* @return true If the {@code key} starts with the {@code value}.
*/
private static boolean keyStartsWithValue(byte[] value, byte[] key) {
if (key == null || key.length < value.length) {
return false;
}
for (int n = 0; n < value.length; n++) {
if (value[n] != key[n]) {
return false;
}
}
return true;
}
@Override
public Persistable seek(Class<?> model, byte[] key) throws Exception {
Set<Indexable> hashes = keysStartingWith(model, key);
if (hashes.isEmpty()) {
return get(model, null);
}
if (hashes.size() == 1) {
return get(model, (Indexable) hashes.toArray()[0]);
}
return get(model, (Indexable) hashes.toArray()[seed.nextInt(hashes.size())]);
}
private Pair<Indexable, Persistable> modelAndIndex(Class<?> model, Class<? extends Indexable> index, RocksIterator iterator)
throws InstantiationException, IllegalAccessException, RocksDBException {
if (!iterator.isValid()) {
return PAIR_OF_NULLS;
}
Indexable indexable = index.newInstance();
indexable.read(iterator.key());
Persistable object = (Persistable) model.newInstance();
object.read(iterator.value());
ColumnFamilyHandle referenceHandle = metadataReference.get(model);
if (referenceHandle != null) {
object.readMetadata(db.get(referenceHandle, iterator.key()));
}
return new Pair<>(indexable, object);
}
@Override
public Pair<Indexable, Persistable> next(Class<?> model, Indexable index) throws Exception {
try (RocksIterator iterator = db.newIterator(classTreeMap.get(model))) {
iterator.seek(index.bytes());
iterator.next();
return modelAndIndex(model, index.getClass(), iterator);
}
}
@Override
public Pair<Indexable, Persistable> previous(Class<?> model, Indexable index) throws Exception {
try (RocksIterator iterator = db.newIterator(classTreeMap.get(model))) {
iterator.seek(index.bytes());
iterator.prev();
return modelAndIndex(model, index.getClass(), iterator);
}
}
@SuppressWarnings("unchecked")
@Override
public Pair<Indexable, Persistable> latest(Class<?> model, Class<?> indexModel) throws Exception {
try (RocksIterator iterator = db.newIterator(classTreeMap.get(model))) {
iterator.seekToLast();
return modelAndIndex(model, (Class<Indexable>) indexModel, iterator);
}
}
@SuppressWarnings("unchecked")
@Override
public Pair<Indexable, Persistable> first(Class<?> model, Class<?> index) throws Exception {
try (RocksIterator iterator = db.newIterator(classTreeMap.get(model))) {
iterator.seekToFirst();
return modelAndIndex(model, (Class<Indexable>) index, iterator);
}
}
// 2018 March 28 - Unused code
public boolean merge(Persistable model, Indexable index) throws Exception {
boolean exists = mayExist(model.getClass(), index);
db.merge(classTreeMap.get(model.getClass()), index.bytes(), model.bytes());
return exists;
}
@Override
public boolean saveBatch(List<Pair<Indexable, Persistable>> models) throws Exception {
try (WriteBatch writeBatch = new WriteBatch();
WriteOptions writeOptions = new WriteOptions()) {
for (Pair<Indexable, Persistable> entry : models) {
Indexable key = entry.low;
Persistable value = entry.hi;
ColumnFamilyHandle handle = classTreeMap.get(value.getClass());
ColumnFamilyHandle referenceHandle = metadataReference.get(value.getClass());
if (value.canMerge()) {
writeBatch.merge(handle, key.bytes(), value.bytes());
} else {
writeBatch.put(handle, key.bytes(), value.bytes());
}
if (referenceHandle != null) {
writeBatch.put(referenceHandle, key.bytes(), value.metadata());
}
}
db.write(writeOptions, writeBatch);
return true;
}
}
public void deleteBatch(Collection<Pair<Indexable, ? extends Class<? extends Persistable>>> models)
throws Exception {
if (CollectionUtils.isNotEmpty(models)) {
try (WriteBatch writeBatch = new WriteBatch();
WriteOptions writeOptions = new WriteOptions()
//We are explicit about what happens if the node reboots before a flush to the db
.setDisableWAL(false)
//We want to make sure deleted data was indeed deleted
.setSync(true)) {
for (Pair<Indexable, ? extends Class<? extends Persistable>> entry : models) {
Indexable indexable = entry.low;
byte[] keyBytes = indexable.bytes();
ColumnFamilyHandle handle = classTreeMap.get(entry.hi);
writeBatch.delete(handle, keyBytes);
ColumnFamilyHandle metadataHandle = metadataReference.get(entry.hi);
if (metadataHandle != null) {
writeBatch.delete(metadataHandle, keyBytes);
}
}
db.write(writeOptions, writeBatch);
}
}
}
@Override
public void clear(Class<?> column) throws Exception {
log.info("Deleting: {} entries", column.getSimpleName());
flushHandle(classTreeMap.get(column));
}
@Override
public void clearMetadata(Class<?> column) throws Exception {
log.info("Deleting: {} metadata", column.getSimpleName());
flushHandle(metadataReference.get(column));
}
@Override
public List<byte[]> loadAllKeysFromTable(Class<? extends Persistable> column) {
List<byte[]> keyBytes = new ArrayList<>();
ColumnFamilyHandle columnFamilyHandle = classTreeMap.get(column);
try (RocksIterator iterator = db.newIterator(columnFamilyHandle)) {
for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) {
keyBytes.add(iterator.key());
}
}
return keyBytes;
}
private void flushHandle(ColumnFamilyHandle handle) throws RocksDBException {
List<byte[]> itemsToDelete = new ArrayList<>();
try (RocksIterator iterator = db.newIterator(handle)) {
for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) {
itemsToDelete.add(iterator.key());
}
}
if (!itemsToDelete.isEmpty()) {
log.info("Amount to delete: " + itemsToDelete.size());
}
int counter = 0;
for (byte[] itemToDelete : itemsToDelete) {
if (++counter % 10000 == 0) {
log.info("Deleted: {}", counter);
}
db.delete(handle, itemToDelete);
}
}
@Override
public boolean update(Persistable thing, Indexable index, String item) throws Exception {
ColumnFamilyHandle referenceHandle = metadataReference.get(thing.getClass());
if (referenceHandle != null) {
db.put(referenceHandle, index.bytes(), thing.metadata());
}
return false;
}
// 2018 March 28 - Unused Code
public void createBackup(String path) throws RocksDBException {
try (Env env = Env.getDefault();
BackupableDBOptions backupableDBOptions = new BackupableDBOptions(path);
BackupEngine backupEngine = BackupEngine.open(env, backupableDBOptions)) {
backupEngine.createNewBackup(db, true);
}
}
// 2018 March 28 - Unused Code
public void restoreBackup(String path, String logPath) throws Exception {
try (Env env = Env.getDefault();
BackupableDBOptions backupableDBOptions = new BackupableDBOptions(path)) {
// shutdown after successful creation of env and backupable ...
shutdown();
try (BackupEngine backupEngine = BackupEngine.open(env, backupableDBOptions);
RestoreOptions restoreOptions = new RestoreOptions(false)) {
backupEngine.restoreDbFromLatestBackup(path, logPath, restoreOptions);
}
}
initDB(path, logPath, configPath, columnFamilies);
}
// options is closed in shutdown
@SuppressWarnings("resource")
private void initDB(String path, String logPath, String configFile, Map<String, Class<? extends Persistable>> columnFamilies) throws Exception {
try {
try {
RocksDB.loadLibrary();
} catch (Exception e) {
if (SystemUtils.IS_OS_WINDOWS) {
log.error("Error loading RocksDB library. Please ensure that " +
"Microsoft Visual C++ 2015 Redistributable Update 3 " +
"is installed and updated");
}
throw e;
}
sstFileManager = new SstFileManager(Env.getDefault());
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
// Pass columnFamilyDescriptors so that they are loaded from options file, we check modifications later
options = createOptions(logPath, configFile, columnFamilyDescriptors);
bloomFilter = new BloomFilter(BLOOM_FILTER_BITS_PER_KEY);
cache = new LRUCache(cacheSize * SizeUnit.KB, 2);
compressedCache = new LRUCache(32 * SizeUnit.KB, 10);
BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig()
.setFilterPolicy(bloomFilter)
.setBlockSizeDeviation(10)
.setBlockRestartInterval(16)
.setBlockCache(cache)
.setBlockCacheCompressed(compressedCache);
MergeOperator mergeOperator = new StringAppendOperator();
columnFamilyOptions = new ColumnFamilyOptions()
.setMergeOperator(mergeOperator)
.setTableFormatConfig(blockBasedTableConfig)
.setMaxWriteBufferNumber(2)
.setWriteBufferSize(2 * SizeUnit.MB);
// Column families could get loaded from the config
loadColumnFamilyDescriptors(columnFamilyDescriptors);
db = RocksDB.open(options, path, columnFamilyDescriptors, columnFamilyHandles);
db.enableFileDeletions(true);
initClassTreeMap(columnFamilyDescriptors);
} catch (Exception e) {
IotaIOUtils.closeQuietly(db, options, bloomFilter, columnFamilyOptions, cache, compressedCache);
throw e;
}
}
/**
* Checks if we have correct column families
* Currently does not use the columnFamilyDescriptors parameter, and just cleans the list.
* @param columnFamilyDescriptors The descriptors we had in described in the config
*/
private void loadColumnFamilyDescriptors(List<ColumnFamilyDescriptor> columnFamilyDescriptors) {
columnFamilyDescriptors.clear();
if (columnFamilyDescriptors.isEmpty()) {
//Add default column family. Main motivation is to not change legacy code
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions));
for (String name : columnFamilies.keySet()) {
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(name.getBytes(), columnFamilyOptions));
}
// metadata descriptor is always last
if (metadataColumnFamily != null) {
columnFamilyDescriptors.add(
new ColumnFamilyDescriptor(metadataColumnFamily.getKey().getBytes(), columnFamilyOptions));
metadataReference = new HashMap<>();
}
}
}
private void initClassTreeMap(List<ColumnFamilyDescriptor> columnFamilyDescriptors) throws Exception {
Map<Class<?>, ColumnFamilyHandle> classMap = new LinkedHashMap<>();
String mcfName = metadataColumnFamily == null ? "" : metadataColumnFamily.getKey();
//skip default column
int i = 1;
for (; i < columnFamilyDescriptors.size(); i++) {
String name = new String(columnFamilyDescriptors.get(i).getName());
if (name.equals(mcfName)) {
Map<Class<?>, ColumnFamilyHandle> metadataRef = new HashMap<>();
metadataRef.put(metadataColumnFamily.getValue(), columnFamilyHandles.get(i));
metadataReference = MapUtils.unmodifiableMap(metadataRef);
}
else {
classMap.put(columnFamilies.get(name), columnFamilyHandles.get(i));
}
}
for (; ++i < columnFamilyHandles.size(); ) {
db.dropColumnFamily(columnFamilyHandles.get(i));
}
classTreeMap = MapUtils.unmodifiableMap(classMap);
}
private DBOptions createOptions(String logPath, String configFile, List<ColumnFamilyDescriptor> columnFamilyDescriptors) throws IOException {
DBOptions options = null;
File pathToLogDir = Paths.get(logPath).toFile();
if (!pathToLogDir.exists() || !pathToLogDir.isDirectory()) {
boolean success = pathToLogDir.mkdir();
if (!success) {
log.warn("Unable to make directory: {}", pathToLogDir);
}
}
int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2);
RocksEnv.getDefault()
.setBackgroundThreads(numThreads, Priority.HIGH)
.setBackgroundThreads(numThreads, Priority.LOW);
if (configFile != null) {
File config = Paths.get(configFile).toFile();
if (config.exists() && config.isFile() && config.canRead()) {
try (InputStream stream = new FileInputStream(config)){
// Map will contain DBOptions, TableOptions/BlockBasedTable, CFOptions
// Currently we only use DBOptions
Map<String, Properties> map = IotaIOUtils.parseINI(stream);
if (map.containsKey("DBOptions")) {
options = DBOptions.getDBOptionsFromProps(map.get("DBOptions"));
} else if (map.containsKey("default")) {
options = DBOptions.getDBOptionsFromProps(map.get("default"));
}
if (map.containsKey("CFOptions")) {
ColumnFamilyOptions.getColumnFamilyOptionsFromProps((map.get("CFOptions")));
}
if (options == null) {
log.warn("Options failed to parse, check the OPTIONS-00X in the db folder");
}
} catch (IllegalArgumentException e) {
log.warn("Options failed to parse, check the OPTIONS-00X in the db folder", e);
}
}
}
if (options == null) {
options = new DBOptions();
try {
//Load previous settings
OptionsUtil.loadLatestOptions(dbPath, RocksEnv.getDefault(), options, columnFamilyDescriptors , true);
} catch (RocksDBException e) {
// We never started before
options.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true)
.setMaxLogFileSize(SizeUnit.MB)
.setMaxManifestFileSize(SizeUnit.MB)
.setMaxOpenFiles(10000)
.setMaxBackgroundCompactions(1)
.setAllowConcurrentMemtableWrite(true)
.setMaxSubcompactions(Runtime.getRuntime().availableProcessors());
}
}
//Defaults we always need to set
options.setSstFileManager(sstFileManager);
if (!(BaseIotaConfig.Defaults.DB_LOG_PATH.equals(logPath) || TestnetConfig.Defaults.DB_LOG_PATH.equals(logPath))
&& logPath != null) {
if (!options.dbLogDir().equals("")) {
log.warn("Defined a db log path in config and commandline; Using the command line setting.");
}
options.setDbLogDir(logPath);
}
return options;
}
@Override
public long getPersistenceSize() {
return sstFileManager.getTotalSize();
}
}
| 26,600 | 39.7366 | 148 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/ASCIIProgressBar.java
|
package com.iota.iri.utils;
/**
* Utility class used to create ASCII progress bars.
*/
public class ASCIIProgressBar {
private final static String filledBlock = "▓";
private final static String emptyBlock = "░";
/**
* Computes an ASCII progress bar given the start, end and current value.
* <p>
* Example:
* </p>
* <p>
* [▓▓▓▓▓▓▓▓▓▓▓▓░░░(40%)░░░░░░░░░░░░░░░]
* </p>
*
* @param start the start value of the range
* @param end the end value of the range
* @param current the current value
* @return an ASCII progress bar representing the current value
*/
public static String getProgressBarString(int start, int end, int current) {
int range = end - start;
// compute actual percentage of current value
// absolute value in case we are rolling back milestones
double percentage = Math.abs(Math.floor(((double) (current - start) / (double) range) * 1000) / 1000);
// how many progress blocks to print in the progress bar
int progressBlocks = (int) (10 * percentage);
StringBuilder progressBarSB = new StringBuilder(progressBlocks);
progressBarSB.append("[");
for (int i = 0; i < 10; i++) {
boolean blockPrinted = false;
if (progressBlocks > 0) {
progressBarSB.append(filledBlock);
blockPrinted = true;
}
progressBlocks--;
if (i < 5) {
if (!blockPrinted) {
progressBarSB.append(emptyBlock);
}
// print progress percentage in the middle of the progress bar
if (i == 4) {
progressBarSB.append("%s");
}
continue;
}
if (!blockPrinted) {
progressBarSB.append(emptyBlock);
}
}
progressBarSB.append("]");
return String.format(progressBarSB.toString(), "(" + ((int) (percentage * 100)) + "%)");
}
}
| 2,063 | 31.761905 | 110 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/BitSetUtils.java
|
package com.iota.iri.utils;
import java.util.BitSet;
/**
* This class offers utility methods to transform BitSets into different data types.
*/
public class BitSetUtils {
/**
* This method converts a byte array to a {@link BitSet} of the given size ({@code sizeOfBitSet}) by copying the
* bits of every byte into the {@link BitSet} in reverse order (starting with the given {@code startOffset}.
*
* It first checks if the byte array is big enough to provide enough bits for the provided parameters and then
* starts the copying process.
*
* @param byteArray byte array that shall be converted
* @param startOffset the amount of bytes to skip at the start
* @param sizeOfBitSet the desired amount of bits in the resulting {@link BitSet}
* @return the {@link BitSet} containing the extracted bytes
*/
public static BitSet convertByteArrayToBitSet(byte[] byteArray, int startOffset, int sizeOfBitSet) {
if((byteArray.length - startOffset) * 8 < sizeOfBitSet) {
throw new IllegalArgumentException("the byte[] is too small to create a BitSet of length " + sizeOfBitSet);
}
BitSet result = new BitSet(sizeOfBitSet);
int bitMask = 128;
for(int i = 0; i < sizeOfBitSet; i++) {
// insert the bits in reverse order
result.set(i, (byteArray[i / 8 + startOffset] & bitMask) != 0);
bitMask = bitMask / 2;
if(bitMask == 0) {
bitMask = 128;
}
}
return result;
}
/**
* Does the same as {@link #convertByteArrayToBitSet(byte[], int, int)} but defaults to copy all remaining bytes
* following the {@code startOffset}.
*
* @param byteArray byte array that shall be converted
* @param startOffset the amount of bytes to skip at the start
* @return the {@link BitSet} containing the extracted bytes
*/
public static BitSet convertByteArrayToBitSet(byte[] byteArray, int startOffset) {
return convertByteArrayToBitSet(byteArray, startOffset, (byteArray.length - startOffset) * 8);
}
/**
* Does the same as {@link #convertByteArrayToBitSet(byte[], int, int)} but defaults to a {@code startOffset} of 0
* and the full length for {@code sizeOfBitSet} resulting in converting the full byte array.
*
* @param byteArray byte array that shall be converted
* @return the {@link BitSet} containing the bytes of the byte array
*/
public static BitSet convertByteArrayToBitSet(byte[] byteArray) {
return convertByteArrayToBitSet(byteArray, 0);
}
/**
* Converts a {@link BitSet} into a byte array by copying the bits in groups of 8 into the resulting bytes of the
* array.
*
* It first calculates the size of the resulting array and then iterates over the bits of the {@link BitSet} to
* write them into the correct index of the byte array. We write the bits in reverse order, shifting them to the
* left before every step.
*
* If the {@link BitSet} is not big enough to fill up the last byte, we fill the remaining bits with zeros by
* shifting the previously written bits to the left accordingly.
*
* @param bitSet the {@link BitSet} that shall be converted.
* @return the byte array containing the bits of the {@link BitSet} in groups of 8
*/
public static byte[] convertBitSetToByteArray(BitSet bitSet) {
int lengthOfBitSet = bitSet.length();
int lengthOfArray = (int) Math.ceil(lengthOfBitSet / 8.0);
byte[] result = new byte[lengthOfArray];
for(int i = 0; i < lengthOfBitSet; i++) {
// for every new index -> start with a 1 so the shifting keeps track of the position we are on (gets shifted
// out when we arrive at the last bit of the current byte)
if(i % 8 == 0) {
result[i / 8] = 1;
}
// shift the existing bits to the left to make space for the bit that gets written now
result[i / 8] <<= 1;
// write the current bit
result[i / 8] ^= bitSet.get(i) ? 1 : 0;
// if we are at the last bit of the BitSet -> shift the missing bytes to "fill up" the remaining space (in
// case the BitSet was not long enough to fill up a full byte)
if(i == (lengthOfBitSet - 1)) {
result[i / 8] <<= (8 - (i % 8) - 1);
}
}
return result;
}
}
| 4,531 | 40.577982 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/Converter.java
|
package com.iota.iri.utils;
import java.util.Arrays;
/**
* Converts between different representations of trytes, bytes, trits and numbers.
*
* <p>
* The IOTA protocol is represented in Trits and Trytes, while data is stored and broadcast in Bytes.
* This class is used to convert between the representations.
* </p>
* <p>
* This class also provides additional conversion tools:
* <ul>
* <li>Converting Trits to numerical values based on <i>Balanced ternary</i> arithmetic</li>
* <li>Converting ascii to a Tryte string</li>
* </ul>
* </p>
*/
public class Converter {
public static final int RADIX = 3;
public static final int BYTE_RADIX = 256;
public static final int MAX_TRIT_VALUE = (RADIX - 1) / 2, MIN_TRIT_VALUE = -MAX_TRIT_VALUE;
public static final int NUMBER_OF_TRITS_IN_A_BYTE = 5;
public static final int NUMBER_OF_TRITS_IN_A_TRYTE = 3;
static final byte[][] BYTE_TO_TRITS_MAPPINGS = new byte[243][];
static final byte[][] TRYTE_TO_TRITS_MAPPINGS = new byte[27][];
public static final int HIGH_INTEGER_BITS = 0xFFFFFFFF;
public static final long HIGH_LONG_BITS = 0xFFFFFFFFFFFFFFFFL;
public static final String TRYTE_ALPHABET = "9ABCDEFGHIJKLMNOPQRSTUVWXYZ";
public static final int MIN_TRYTE_VALUE = -13, MAX_TRYTE_VALUE = 13;
//lookup tables for bytes->trits and trytes->trits conversion
static {
final byte[] trits = new byte[NUMBER_OF_TRITS_IN_A_BYTE];
for (int i = 0; i < 243; i++) {
BYTE_TO_TRITS_MAPPINGS[i] = Arrays.copyOf(trits, NUMBER_OF_TRITS_IN_A_BYTE);
increment(trits, NUMBER_OF_TRITS_IN_A_BYTE);
}
for (int i = 0; i < 27; i++) {
TRYTE_TO_TRITS_MAPPINGS[i] = Arrays.copyOf(trits, NUMBER_OF_TRITS_IN_A_TRYTE);
increment(trits, NUMBER_OF_TRITS_IN_A_TRYTE);
}
}
private static void increment(final byte[] trits, final int size) {
for (int i = 0; i < size; i++) {
if (++trits[i] > Converter.MAX_TRIT_VALUE) {
trits[i] = Converter.MIN_TRIT_VALUE;
} else {
break;
}
}
}
// Bytes <-> Trits
/**
* Allocates a byte array large enough to fit trits, used for converting trits to bytes.
*
* @param tritCount length of trit array (to be converted)
* @return a new byte array large enough to fit trits
*/
public static byte[] allocateBytesForTrits(int tritCount) {
final int expectedLength = (tritCount + NUMBER_OF_TRITS_IN_A_BYTE - 1) / NUMBER_OF_TRITS_IN_A_BYTE;
return new byte[expectedLength];
}
/**
* Converts trits array to bytes array based on {@link #NUMBER_OF_TRITS_IN_A_BYTE}.<br>
* this method will override the content of {@code dest}
*
* @param trits source trits array
* @param srcPos starting position for trits
* @param dest destination bytes array
* @param destPos starting position for destination bytes array
* @param tritsLength amount of trits to convert
*/
public static void bytes(final byte[] trits, final int srcPos, byte[] dest, int destPos, final int tritsLength) {
final int expectedLength = (tritsLength + NUMBER_OF_TRITS_IN_A_BYTE - 1) / NUMBER_OF_TRITS_IN_A_BYTE;
if((dest.length - destPos) < expectedLength) {
throw new IllegalArgumentException("Input array not large enough.");
}
for (int i = 0; i < expectedLength; i++) {
int value = 0;
for (int j = (tritsLength - i * NUMBER_OF_TRITS_IN_A_BYTE) < 5 ? (tritsLength - i * NUMBER_OF_TRITS_IN_A_BYTE) : NUMBER_OF_TRITS_IN_A_BYTE; j-- > 0; ) {
value = value * RADIX + trits[srcPos + i * NUMBER_OF_TRITS_IN_A_BYTE + j];
}
dest[destPos + i] = (byte)value;
}
}
/**
* Converts trits array to bytes array. <br>
* this method will override the content of {@code dest}
* @see #bytes(byte[], int, byte[], int, int)
*
* @param trits source trits array
* @param dest destination bytes array
*/
public static void bytes(final byte[] trits, byte[] dest) {
bytes(trits, 0, dest, 0, trits.length);
}
/**
* Converts bytes array to trits array based on {@link #NUMBER_OF_TRITS_IN_A_BYTE}.<br>
* the inverse of {@link #bytes(byte[], int, byte[], int, int)}. <br>
* this method will override the content of {@code trits}
*
* @param bytes source bytes array
* @param trits destination trits array
*/
public static void getTrits(final byte[] bytes, final byte[] trits) {
int offset = 0;
for (int i = 0; i < bytes.length && offset < trits.length; i++) {
System.arraycopy(BYTE_TO_TRITS_MAPPINGS[bytes[i] < 0 ? (bytes[i] + BYTE_TO_TRITS_MAPPINGS.length) : bytes[i]], 0, trits, offset, trits.length - offset < NUMBER_OF_TRITS_IN_A_BYTE ? (trits.length - offset) : NUMBER_OF_TRITS_IN_A_BYTE);
offset += NUMBER_OF_TRITS_IN_A_BYTE;
}
while (offset < trits.length) {
trits[offset++] = 0;
}
}
// Trytes <-> Trits
/**
* Allocates trits (a byte array) large enough to fit trytes, used for converting trytes to trits.
*
* @param tryteCount length of tryte array (to be converted)
* @return a new byte array large enough to fit trytes
*/
public static byte[] allocateTritsForTrytes(int tryteCount) {
return new byte[tryteCount * NUMBER_OF_TRITS_IN_A_TRYTE];
}
/**
* Converts a tryte string to trits (bytes array) based on {@link #NUMBER_OF_TRITS_IN_A_TRYTE}.<br>
* this method will override the content of {@code dest}
*
* @param trytes source trytes string
* @param dest destination trits array
* @param destOffset starting position for destination bytes array
*/
public static void trits(final String trytes, byte[] dest, int destOffset) {
if((dest.length - destOffset) < trytes.length() * NUMBER_OF_TRITS_IN_A_TRYTE) {
throw new IllegalArgumentException("Destination array is not large enough.");
}
for (int i = 0; i < trytes.length(); i++) {
System.arraycopy(TRYTE_TO_TRITS_MAPPINGS[TRYTE_ALPHABET.indexOf(trytes.charAt(i))], 0, dest,
destOffset + i * NUMBER_OF_TRITS_IN_A_TRYTE, NUMBER_OF_TRITS_IN_A_TRYTE);
}
}
/**
* Converts trits (bytes array)to a tryte string based on {@link #NUMBER_OF_TRITS_IN_A_TRYTE}.<br>
* the inverse of {@link #trits(String, byte[], int)}.
*
* @param trits source trits array
* @param offset starting position for trits
* @param size amount of trits to convert
* @return tryte string
*/
public static String trytes(final byte[] trits, final int offset, final int size) {
final StringBuilder trytes = new StringBuilder();
for (int i = 0; i < (size + NUMBER_OF_TRITS_IN_A_TRYTE - 1) / NUMBER_OF_TRITS_IN_A_TRYTE; i++) {
int j = trits[offset + i * 3] + trits[offset + i * 3 + 1] * 3 + trits[offset + i * 3 + 2] * 9;
if (j < 0) {
j += TRYTE_ALPHABET.length();
}
trytes.append(TRYTE_ALPHABET.charAt(j));
}
return trytes.toString();
}
/**
* Converts trits (bytes array)to a tryte string.<br>
* @see #trytes(byte[], int, int)
*
* @param trits source trits array
* @return tryte string
*/
public static String trytes(final byte[] trits) {
return trytes(trits, 0, trits.length);
}
/**
* Creates a new trits array with the converted tryte string.<br>
* should be used only for testing as it allocates memory.
* @see #allocatingTritsFromTrytes(String)
* @see #trits(String, byte[], int)
*
* @param trytes source trytes string
* @return a new trit array
*/
public static byte[] allocatingTritsFromTrytes(String trytes) {
byte[] trits = allocateTritsForTrytes(trytes.length());
trits(trytes, trits, 0);
return trits;
}
// Long <-> Trits
/**
* Parses the trits argument as a long value, based on <i>Balanced ternary</i> arithmetic
* @param trits Array of trits to be parsed
* @param srcPos Starting position in trits array
* @param size Amount of trits to parse
* @return the long value represented by the trits array
*/
public static long longValue(final byte[] trits, final int srcPos, final int size) {
long value = 0;
for (int i = size; i-- > 0; ) {
value = value * RADIX + trits[srcPos + i];
}
return value;
}
/**
* fills a trit array with a representation of the {@code value} argument in <i>Balanced ternary</i> arithmetic
* @param value a long value
* @param destination destination trit array
* @param offset starting position of destination array
* @param size max amount of trits to fill
*/
public static void copyTrits(final long value, final byte[] destination, final int offset, final int size) {
long absoluteValue = value < 0 ? -value : value;
for (int i = 0; i < size; i++) {
int remainder = (int)(absoluteValue % RADIX);
absoluteValue /= RADIX;
if (remainder > MAX_TRIT_VALUE) {
remainder = MIN_TRIT_VALUE;
absoluteValue++;
}
destination[offset + i] = (byte) remainder;
}
if (value < 0) {
for (int i = 0; i < size; i++) {
destination[offset + i] = (byte) -destination[offset + i];
}
}
}
// Additional conversions
/**
* Converts an {@code ASCII string} into a {@code TryteString} representation.
* <p>
* each ASCII character is converted to 2 Trytes.
* </p>
*
* @param input ASCII string.
* @return Trytes string
*/
public static String asciiToTrytes(String input) {
StringBuilder sb = new StringBuilder(80);
for (int i = 0; i < input.length(); i++) {
int asciiValue = input.charAt(i);
// If not recognizable ASCII character, return null
if (asciiValue > 255) {
return null;
}
int firstValue = asciiValue % 27;
int secondValue = (asciiValue - firstValue) / 27;
sb.append(TRYTE_ALPHABET.charAt(firstValue));
sb.append(TRYTE_ALPHABET.charAt(secondValue));
}
return sb.toString();
}
/**
* Converts a {@code TryteString} into an {@code ASCII string} representation.
* <p>
* every 2 Trytes are converted to a ASCII character.
* </p>
*
* @param input Trytes string
* @return ASCII string.
*/
public static String trytesToAscii(String input) {
String trytes;
if (input.length() % 2 != 0) {
trytes = input + '9';
} else {
trytes = input;
}
StringBuilder sb = new StringBuilder();
for (int i = 0; i < trytes.length() - 1; i += 2) {
int firstValue = TRYTE_ALPHABET.indexOf(trytes.charAt(i));
int secondValue = TRYTE_ALPHABET.indexOf(trytes.charAt(i + 1));
if (firstValue == -1 || secondValue == -1) {
throw new IllegalArgumentException("Input contains illegal character.");
}
int asciiValue = secondValue * 27 + firstValue;
if (asciiValue > 255) {
throw new IllegalArgumentException("Calculated result exceed the range of ASCII.");
}
sb.append((char)asciiValue);
}
return sb.toString();
}
public static Pair<long[], long[]> longPair(byte[] trits) {
long[] low = new long[trits.length];
long[] hi = new long[trits.length];
for(int i = 0; i< trits.length; i++) {
low[i] = trits[i] != 1 ? HIGH_LONG_BITS: 0;
hi[i] = trits[i] != -1 ? HIGH_LONG_BITS: 0;
}
return new Pair<>(low, hi);
}
public static byte[] trits(final Pair<long[], long[]> pair, final int bitIndex) {
final int length;
if(pair.low.length == pair.hi.length || pair.low.length < pair.hi.length) {
length = pair.low.length;
} else {
length = pair.hi.length;
}
final byte[] trits = new byte[length];
long low;
long hi;
int mask = 1 << bitIndex;
for(int i = 0; i < length; i++) {
low = pair.low[i] & mask;
hi = pair.hi[i] & mask;
if( hi == low ) {
trits[i] = 0;
} else if ( low == 0 ) {
trits[i] = 1;
} else if ( hi == 0 ) {
trits[i] = -1;
}
}
return trits;
}
public static byte[] trits(long[] low, long[] hi) {
byte[] trits = new byte[low.length];
for(int i = 0; i < trits.length; i++) {
trits[i] = low[i] == 0 ? 1 : hi[i] == 0 ? (byte) -1 : (byte) 0;
}
return trits;
}
public static byte[] trits(byte[] low, byte[] hi) {
byte[] trits = new byte[low.length];
for(int i = 0; i < trits.length; i++) {
trits[i] = low[i] == 0 ? 1 : hi[i] == 0 ? (byte) -1 : (byte) 0;
}
return trits;
}
}
| 13,528 | 34.69657 | 246 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/IotaIOUtils.java
|
package com.iota.iri.utils;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class IotaIOUtils extends IOUtils {
private static final Logger log = LoggerFactory.getLogger(IotaIOUtils.class);
public static void closeQuietly(AutoCloseable... autoCloseables) {
for (AutoCloseable it : autoCloseables) {
try {
if (it != null) {
it.close();
}
} catch (Exception ignored) {
log.debug("Silent exception occured", ignored);
}
}
}
/**
* Parses a java INI file with respect to sections, without relying on an external library.
*
* From: https://stackoverflow.com/a/41084504/4512850 -ish
* @param stream The input stream we will be reading the INI from
* @return A map of sections with their properties.
* If there was a section-less start, these will be put in "default".
* @throws IOException
*/
public static Map<String, Properties> parseINI(InputStream stream) throws IOException {
Map<String, Properties> result = new HashMap<>();
@SuppressWarnings("serial")
Properties p = new Properties() {
private Properties section;
@Override
public synchronized Object put(Object key, Object value) {
String header = (((String) key) + " " + value).trim();
if (header.startsWith("[") && header.endsWith("]")) {
return result.put(header.substring(1, header.length() - 1),
section = new Properties());
} else if (section != null){
return section.put(key, value);
} else {
return super.put(key, value);
}
};
};
p.load(stream);
result.put("default", p);
return result;
}
}
| 2,127 | 31.242424 | 95 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/IotaUtils.java
|
package com.iota.iri.utils;
import com.iota.iri.IRI;
import com.iota.iri.model.Hash;
import java.io.FileReader;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.maven.model.Model;
import org.apache.maven.model.io.xpp3.MavenXpp3Reader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class IotaUtils {
private final static long MB_FACTOR = 1000 * 1000;
private final static long MIB_FACTOR = 1024 * 1024;
private final static long GB_FACTOR = 1000 * MB_FACTOR;
private final static long GIB_FACTOR = 1024 * MIB_FACTOR;
private final static long TB_FACTOR = 1000 * GB_FACTOR;
private final static long TIB_FACTOR = 1024 * GIB_FACTOR;
private static final Logger log = LoggerFactory.getLogger(IotaUtils.class);
/**
* Returns the current version IRI is running by reading the Jar manifest.
* If we run not from a jar or the manifest is missing we read straight from the pom
*
* @return the implementation version of IRI
*/
public static String getIriVersion() {
String implementationVersion = IRI.class.getPackage().getImplementationVersion();
//If not in manifest (can happen when running from IDE)
if (implementationVersion == null) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(new FileReader("pom.xml"));
implementationVersion = model.getVersion();
} catch (Exception e) {
log.error("Failed to parse version from pom", e);
}
}
return implementationVersion;
}
public static List<String> splitStringToImmutableList(String string, String regexSplit) {
return Arrays.stream(string.split(regexSplit))
.filter(StringUtils::isNoneBlank)
.collect(Collectors
.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
/**
* Used to create low-memory index keys.
*
* @param hash the hash we create the key from
* @param length the length of the desired subhash
* @return a {@link ByteBuffer} that holds a subarray of {@link Hash#bytes()}
* that has the specified {@code length}
*/
public static ByteBuffer getSubHash(Hash hash, int length) {
if (hash == null) {
return null;
}
return ByteBuffer.wrap(Arrays.copyOf(hash.bytes(), length));
}
/**
* @param clazz Class to inspect
* @return All the declared and inherited setter method of {@code clazz}
*/
public static List<Method> getAllSetters(Class<?> clazz) {
List<Method> setters = new ArrayList<>();
while (clazz != Object.class) {
setters.addAll(Stream.of(clazz.getDeclaredMethods())
.filter(method -> method.getName().startsWith("set"))
.collect(Collectors.toList()));
clazz = clazz.getSuperclass();
}
return Collections.unmodifiableList(setters);
}
public static <T> List<T> createImmutableList(T... values) {
return Collections.unmodifiableList(Arrays.asList(values));
}
/**
* Creates a single thread executor service that has an unbounded queue.
* @param name the name to give to the thread
* @return a named single thread executor service
*
* @see com.iota.iri.utils.thread.BoundedScheduledExecutorService
* @see com.iota.iri.utils.thread.DedicatedScheduledExecutorService
*/
public static ExecutorService createNamedSingleThreadExecutor(String name) {
return Executors.newSingleThreadExecutor(r -> new Thread(r, name));
}
/**
* Parses a human readable string for a file size (tb, gb, mb -> file modifier)
* Follows the following format: [double][optional space][case insensitive file modifier]
*
* Kb is not parsed as this is too small for us to be used in a persistence provider
*
* @return The size of the human readable string in bytes, rounded down.
*/
public static long parseFileSize(String humanReadableSize) {
humanReadableSize = humanReadableSize.replaceAll(",", "").toLowerCase();
int spaceNdx = humanReadableSize.indexOf(" ");
double amount;
//If we forgot a space,check until we find it
if (spaceNdx == -1) {
spaceNdx = 0;
while (spaceNdx < humanReadableSize.length() &&
(Character.isDigit(humanReadableSize.charAt(spaceNdx)) || humanReadableSize.charAt(spaceNdx) == '.' )) {
spaceNdx++;
}
// Still nothing? started with a character
if (spaceNdx == 0) {
return -1;
}
amount = Double.parseDouble(humanReadableSize.substring(0, spaceNdx));
} else {
// ++ to skip the space
amount = Double.parseDouble(humanReadableSize.substring(0, spaceNdx++));
}
//Default to GB
String sub = amount == humanReadableSize.length() ? "gb" : humanReadableSize.substring(spaceNdx);
switch (sub) {
case "tb":
return (long) (amount * TB_FACTOR);
case "tib":
return (long) (amount * TIB_FACTOR);
case "gb":
return (long) (amount * GB_FACTOR);
case "gib":
return (long) (amount * GIB_FACTOR);
case "mb":
return (long) (amount * MB_FACTOR);
case "mib":
return (long) (amount * MIB_FACTOR);
default:
return -1;
}
}
/**
* @see FileUtils#byteCountToDisplaySize(Long)
*/
public static String bytesToReadableFilesize(long bytes) {
return FileUtils.byteCountToDisplaySize(bytes);
}
}
| 6,242 | 35.723529 | 124 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/MapIdentityManager.java
|
package com.iota.iri.utils;
import com.iota.iri.crypto.Sponge;
import com.iota.iri.crypto.SpongeFactory;
import io.undertow.security.idm.IdentityManager;
import io.undertow.security.idm.Account;
import io.undertow.security.idm.Credential;
import io.undertow.security.idm.PasswordCredential;
import java.security.Principal;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import com.iota.iri.crypto.Curl;
public class MapIdentityManager implements IdentityManager {
private final Map<String, char[]> users;
public MapIdentityManager(final Map<String, char[]> users) {
this.users = users;
}
@Override
public Account verify(Account account) {
// An existing account so for testing assume still valid.
return account;
}
@Override
public Account verify(String id, Credential credential) {
Account account = getAccount(id);
if (account != null && verifyCredential(account, credential)) {
return account;
}
return null;
}
@Override
public Account verify(Credential credential) {
// TODO Auto-generated method stub
return null;
}
private boolean verifyCredential(Account account, Credential credential) {
if (credential instanceof PasswordCredential) {
char[] givenPassword = ((PasswordCredential) credential).getPassword();
String trytes = Converter.asciiToTrytes(new String(givenPassword));
byte[] inTrits = Converter.allocateTritsForTrytes(trytes.length());
Converter.trits(trytes, inTrits, 0);
byte[] hashTrits = new byte[Curl.HASH_LENGTH];
Sponge curl;
curl = SpongeFactory.create(SpongeFactory.Mode.CURLP81);
curl.absorb(inTrits, 0, inTrits.length);
curl.squeeze(hashTrits, 0, Curl.HASH_LENGTH);
String outTrytes = Converter.trytes(hashTrits);
char[] charOutTrytes = outTrytes.toCharArray();
char[] expectedPassword = users.get(account.getPrincipal().getName());
boolean verified = Arrays.equals(givenPassword, expectedPassword);
// Password can either be clear text or the hash of the password
if (!verified) {
verified = Arrays.equals(charOutTrytes, expectedPassword);
}
return verified;
}
return false;
}
private Account getAccount(final String id) {
if (users.containsKey(id)) {
return new Account() {
private final Principal principal = new Principal() {
@Override
public String getName() {
return id;
}
};
@Override
public Principal getPrincipal() {
return principal;
}
@Override
public Set<String> getRoles() {
return Collections.emptySet();
}
};
}
return null;
}
}
| 3,156 | 30.888889 | 95 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/Pair.java
|
package com.iota.iri.utils;
public class Pair<S, T> {
public final S low;
public final T hi;
public Pair(S k, T v) {
low = k;
hi = v;
}
}
| 172 | 13.416667 | 27 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/SafeUtils.java
|
package com.iota.iri.utils;
import java.util.Collection;
/**
* Null safe utils
*/
public class SafeUtils {
public static <T> boolean isContaining(Collection<T> collection, T element) {
return collection != null && element != null && collection.contains(element);
}
}
| 289 | 18.333333 | 85 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/Serializer.java
|
package com.iota.iri.utils;
/**
* Created by paul on 3/13/17 for iri-testnet.
*/
public class Serializer {
public static byte[] serialize(long value) {
byte[] result = new byte[Long.BYTES];
for (int i = Long.BYTES - 1; i >= 0; i--) {
result[i] = (byte)(value & 0xFF);
value >>= 8;
}
return result;
}
public static byte[] serialize(int integer) {
byte[] result = new byte[Integer.BYTES];
for (int i = Integer.BYTES - 1; i >= 0; i--) {
result[i] = (byte)(integer & 0xFF);
integer >>= 8;
}
return result;
}
public static long getLong(byte[] bytes) {
return getLong(bytes, 0);
}
public static long getLong(byte[] bytes, int start) {
if(bytes == null) {
return 0;
}
int length = Long.BYTES;
long res = 0;
for (int i=0; i< length;i++) {
res |= (bytes[start + i] & 0xFFL) << ((length-i-1) * 8);
}
return res;
}
public static int getInteger(byte[] bytes) {
return getInteger(bytes, 0);
}
public static int getInteger(byte[] bytes, int start) {
if(bytes == null) {
return 0;
}
int length = Integer.BYTES;
int res = 0;
for (int i=0; i< length;i++) {
res |= (bytes[start + i] & 0xFFL) << ((length-i-1) * 8);
}
return res;
}
}
| 1,459 | 25.545455 | 68 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/SlackBotFeed.java
|
package com.iota.iri.utils;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLEncoder;
import javax.net.ssl.HttpsURLConnection;
public class SlackBotFeed {
public static void reportToSlack(final String message) {
try {
final String request = "token="
+ URLEncoder.encode("<botToken>", "UTF-8") + "&channel="
+ URLEncoder.encode("#botbox", "UTF-8") + "&text=" + URLEncoder.encode(message, "UTF-8") + "&as_user=true";
final HttpURLConnection connection = (HttpsURLConnection) (new URL("https://slack.com/api/chat.postMessage")).openConnection();
((HttpsURLConnection)connection).setHostnameVerifier((hostname, session) -> true);
connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded");
connection.setRequestMethod("POST");
connection.setDoOutput(true);
OutputStream out = connection.getOutputStream();
out.write(request.getBytes("UTF-8"));
out.close();
ByteArrayOutputStream result = new ByteArrayOutputStream();
InputStream inputStream = connection.getInputStream();
byte[] buffer = new byte[1024];
int length;
while ((length = inputStream.read(buffer)) != -1) {
result.write(buffer, 0, length);
}
} catch (final Exception e) {
e.printStackTrace();
}
}
}
| 1,597 | 34.511111 | 139 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/TransactionTruncator.java
|
package com.iota.iri.utils;
import com.iota.iri.model.persistables.Transaction;
/**
* Provides utility methods to truncate and expand raw transaction data.
*/
public class TransactionTruncator {
/**
* The max amount of bytes a signature message fragment is made up from.
*/
public final static int SIG_DATA_MAX_BYTES_LENGTH = 1312;
/**
* The amount of bytes making up the non signature message fragment part of a transaction.
*/
public final static int NON_SIG_TX_PART_BYTES_LENGTH = 292;
/**
* Truncates the given byte encoded transaction by removing unneeded bytes from the signature message fragment.
*
* @param txBytes the transaction bytes to truncate
* @return an array containing the truncated transaction data
*/
public static byte[] truncateTransaction(byte[] txBytes) {
// check how many bytes from the signature can be truncated
int bytesToTruncate = 0;
for (int i = SIG_DATA_MAX_BYTES_LENGTH - 1; i >= 0; i--) {
if (txBytes[i] != 0) {
break;
}
bytesToTruncate++;
}
// allocate space for truncated tx
byte[] truncatedTx = new byte[SIG_DATA_MAX_BYTES_LENGTH - bytesToTruncate + NON_SIG_TX_PART_BYTES_LENGTH];
System.arraycopy(txBytes, 0, truncatedTx, 0, SIG_DATA_MAX_BYTES_LENGTH - bytesToTruncate);
System.arraycopy(txBytes, SIG_DATA_MAX_BYTES_LENGTH, truncatedTx, SIG_DATA_MAX_BYTES_LENGTH - bytesToTruncate,
NON_SIG_TX_PART_BYTES_LENGTH);
return truncatedTx;
}
/**
* Expands an array containing a truncated transaction using a given reference size
* to determine the amount of bytes to pad.
*
* @param data the truncated transaction data to be expanded
* @param referenceSize the max size to use as a reference to compute the bytes to be added
* @return an array containing the expanded transaction data
*/
public static byte[] expandTransaction(byte[] data, int referenceSize) {
byte[] txDataBytes = new byte[Transaction.SIZE];
int numOfBytesOfSigMsgFragToExpand = referenceSize - data.length;
byte[] sigMsgFragPadding = new byte[numOfBytesOfSigMsgFragToExpand];
// we deduct the transaction bytes size from the reference to get the correct
// length of signature message bytes we need to copy from the source data
int sigMsgFragBytesToCopy = data.length - (referenceSize - Transaction.SIZE)
- TransactionTruncator.NON_SIG_TX_PART_BYTES_LENGTH;
// build up transaction payload. empty signature message fragment equals padding with 1312x 0 bytes
System.arraycopy(data, 0, txDataBytes, 0, sigMsgFragBytesToCopy);
System.arraycopy(sigMsgFragPadding, 0, txDataBytes, sigMsgFragBytesToCopy, sigMsgFragPadding.length);
System.arraycopy(data, sigMsgFragBytesToCopy, txDataBytes, TransactionTruncator.SIG_DATA_MAX_BYTES_LENGTH,
TransactionTruncator.NON_SIG_TX_PART_BYTES_LENGTH);
return txDataBytes;
}
/**
* Expands an array containing a truncated transaction.
*
* @param data the truncated transaction data to be expanded
* @return an array containing the expanded transaction data
*/
public static byte[] expandTransaction(byte[] data) {
return expandTransaction(data, Transaction.SIZE);
}
}
| 3,444 | 43.166667 | 118 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/collections/impl/BoundedHashSet.java
|
package com.iota.iri.utils.collections.impl;
import com.iota.iri.utils.collections.interfaces.BoundedSet;
import java.util.Collection;
import java.util.HashSet;
import java.util.stream.Collectors;
/**
* A set that doesn't allow to add elements to it once it is full
*
* @param <E> the type parameter
*/
public class BoundedHashSet<E> extends HashSet<E> implements BoundedSet<E>{
final private int maxSize;
/**
* Instantiates a new Bounded hash set.
*
* @param initialCapacity the initial capacity
* @param loadFactor the load factor of the hashmap
* @param maxSize the max size
*/
public BoundedHashSet(int initialCapacity, float loadFactor, int maxSize) {
super(initialCapacity, loadFactor);
this.maxSize = maxSize;
}
/**
* Instantiates a new Bounded hash set.
*
* @param initialCapacity the initial capacity
* @param maxSize the max size
*/
public BoundedHashSet(int initialCapacity, int maxSize) {
super(initialCapacity);
this.maxSize = maxSize;
}
/**
* Instantiates a new Bounded hash set.
*
* @param maxSize the max size
*/
public BoundedHashSet(int maxSize) {
super();
this.maxSize = maxSize;
}
/**
* Instantiates a new Bounded hash set.
*
* @param c the collection from which you create the set from
* @param maxSize the max size
* @throws NullPointerException if the specified collection is null
*/
public BoundedHashSet(Collection<? extends E> c, int maxSize) {
this(maxSize);
c = c.stream()
.limit(maxSize)
.collect(Collectors.toSet());
this.addAll(c);
}
@Override
public int getMaxSize() {
return maxSize;
}
@Override
public boolean add(E e) {
if (isFull()) {
return false;
}
return super.add(e);
}
@Override
public boolean addAll(Collection<? extends E> c) {
if (isFull()) {
return false;
}
if (!canCollectionBeFullyAdded(c)) {
int remainingSize = getMaxSize() - this.size();
c = c.stream()
.limit(remainingSize)
.collect(Collectors.toSet());
}
return super.addAll(c);
}
}
| 2,386 | 24.126316 | 79 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/collections/impl/BoundedSetWrapper.java
|
package com.iota.iri.utils.collections.impl;
import com.iota.iri.utils.collections.interfaces.BoundedSet;
import java.util.*;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.stream.Stream;
/**
* A wrapper for a set that allows up to {@code maxsize} elements.
* Old elements are removed once it is full and new elements are added.
* The way old elements are chosen depends on the underlying set implementation.
*
*
* @param <E> the type parameter
*/
public class BoundedSetWrapper<E> implements BoundedSet<E>{
private final int maxSize;
private final Set<E> delegate;
/**
* Wraps the given set {@code c}
*
* @param c the set which you delegate the actions to
* @param maxSize the max size
* @throws NullPointerException if the {@code c} is null
* @throws IllegalArgumentException if {@code c} is larger than {@code maxSize}
*/
public BoundedSetWrapper(Set<E> c, int maxSize) {
Objects.requireNonNull(c, "trying to wrap a null set");
requireCollectionIsNotAboveMaxSize(c, maxSize);
this.maxSize = maxSize;
this.delegate = c;
}
@Override
public int getMaxSize() {
return maxSize;
}
@Override
public int size() {
return delegate.size();
}
@Override
public boolean isEmpty() {
return delegate.isEmpty();
}
@Override
public boolean contains(Object o) {
return delegate.contains(o);
}
@Override
public Iterator<E> iterator() {
return delegate.iterator();
}
@Override
public void forEach(Consumer<? super E> action) {
delegate.forEach(action);
}
@Override
public Object[] toArray() {
return delegate.toArray();
}
@Override
public <T> T[] toArray(T[] a) {
return delegate.toArray(a);
}
@Override
public boolean add(E e) {
if (isFull()) {
Iterator<E> iterator = delegate.iterator();
iterator.next();
iterator.remove();
}
return delegate.add(e);
}
@Override
public boolean remove(Object o) {
return delegate.remove(o);
}
@Override
public boolean containsAll(Collection<?> c) {
return delegate.containsAll(c);
}
@Override
public boolean addAll(Collection<? extends E> c) {
requireCollectionIsNotAboveMaxSize(c, getMaxSize());
if (canCollectionBeFullyAdded(c)) {
return delegate.addAll(c);
}
else {
int itemsToDelete = delegate.size() + c.size() - getMaxSize();
Iterator<E> iterator = delegate.iterator();
for (int i = 0; i < itemsToDelete; i++) {
iterator.next();
iterator.remove();
}
return delegate.addAll(c);
}
}
@Override
public boolean removeAll(Collection<?> c) {
return delegate.removeAll(c);
}
@Override
public boolean removeIf(Predicate<? super E> filter) {
return delegate.removeIf(filter);
}
@Override
public boolean retainAll(Collection<?> c) {
return delegate.retainAll(c);
}
@Override
public void clear() {
delegate.clear();
}
@Override
public Spliterator<E> spliterator() {
return delegate.spliterator();
}
@Override
public Stream<E> stream() {
return delegate.stream();
}
@Override
public Stream<E> parallelStream() {
return delegate.parallelStream();
}
@Override
public boolean equals(Object obj) {
return delegate.equals(obj);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
@Override
public String toString() {
return delegate.toString();
}
private void requireCollectionIsNotAboveMaxSize(Collection<? extends E> c, int maxSize) {
if (c.size() > maxSize) {
throw new IllegalArgumentException(String.format("The given collection size %d is larger then maxSize %d", c.size(),
maxSize));
}
}
}
| 4,163 | 23.069364 | 128 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/collections/impl/TransformingBoundedHashSet.java
|
package com.iota.iri.utils.collections.impl;
import java.util.Collection;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
public class TransformingBoundedHashSet<E> extends BoundedHashSet<E>{
private final UnaryOperator<E> transformer;
public TransformingBoundedHashSet(int maxSize, UnaryOperator<E> transformer) {
super(maxSize);
this.transformer = transformer;
}
public TransformingBoundedHashSet(Collection<E> c, int maxSize, UnaryOperator<E> transformer) {
super(maxSize);
this.transformer = transformer;
this.addAll(c);
}
@Override
public boolean add(E e) {
if (isFull()) {
return false;
}
E el = transformer.apply(e);
return super.add(el);
}
@Override
public boolean addAll(Collection<? extends E> c) {
Collection<? extends E> col = c;
if (!isFull()) {
col = c.stream()
.map(el -> transformer.apply(el))
.collect(Collectors.toSet());
}
return super.addAll(col);
}
}
| 1,122 | 24.522727 | 99 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/collections/impl/TransformingMap.java
|
package com.iota.iri.utils.collections.impl;
import com.iota.iri.utils.collections.interfaces.UnIterableMap;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.function.UnaryOperator;
/**
* A map that performs unary operations on key-value pairs that are inserted into it.
*
* @param <K> key type
* @param <V> value type
*/
public class TransformingMap<K,V> implements UnIterableMap<K,V> {
private Map<K,V> delegateMap;
private UnaryOperator<K> keyOptimizer;
private UnaryOperator<V> valueTransformer;
public TransformingMap(UnaryOperator<K> keyOptimizer, UnaryOperator<V> valueTransformer) {
this(16, keyOptimizer, valueTransformer);
}
public TransformingMap(int initialCapacity, UnaryOperator<K> keyOptimizer, UnaryOperator<V> valueTransformer) {
this.keyOptimizer = keyOptimizer == null ? UnaryOperator.identity() : keyOptimizer;
this.valueTransformer = valueTransformer == null ? UnaryOperator.identity() : valueTransformer;
this.delegateMap = new HashMap<>(initialCapacity);
}
@Override
public int size() {
return delegateMap.size();
}
@Override
public boolean isEmpty() {
return delegateMap.isEmpty();
}
@Override
public boolean containsKey(K key) {
K newKey = keyOptimizer.apply(key);
return delegateMap.containsKey(newKey);
}
@Override
public boolean containsValue(V value) {
return delegateMap.containsValue(value);
}
@Override
public V get(K key) {
K newKey = keyOptimizer.apply(key);
return delegateMap.get(newKey);
}
@Override
public V put(K key, V value) {
key = keyOptimizer.apply(key);
value = valueTransformer.apply(value);
return delegateMap.put(key, value);
}
@Override
public V remove(K key) {
return delegateMap.remove(key);
}
@Override
public void clear() {
delegateMap.clear();
}
@Override
public Collection<V> values() {
return delegateMap.values();
}
}
| 2,112 | 25.08642 | 115 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/collections/interfaces/BoundedCollection.java
|
package com.iota.iri.utils.collections.interfaces;
import java.util.Collection;
/**
* A collection that can't hold more than {@link #getMaxSize()} elements
*
* @author galrogo on 08/02/18
**/
public interface BoundedCollection<E> extends Collection<E> {
/**
*
* @return the maximal number of elements that the collection cha hold
*/
int getMaxSize();
/**
* @return true if no more elements can be added
*/
default boolean isFull() {
return getMaxSize() <= this.size();
}
/**
*
* @param c collection to be added
* @return true only if all the elements in {@code c} can be added to this collection
* else return false
*/
default boolean canCollectionBeFullyAdded(Collection<? extends E> c) {
if (isFull()) {
return false;
}
int remainingSize = getMaxSize() - this.size();
return (c.size() <= remainingSize);
}
}
| 954 | 22.875 | 89 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/collections/interfaces/BoundedSet.java
|
package com.iota.iri.utils.collections.interfaces;
import java.util.Set;
/**
* A set that can't hold more than {@link #getMaxSize()} elements
*
* @author galrogo on 08/02/18
**/
public interface BoundedSet<E> extends BoundedCollection<E>, Set<E>{
}
| 255 | 20.333333 | 68 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/collections/interfaces/UnIterableMap.java
|
package com.iota.iri.utils.collections.interfaces;
import java.util.Collection;
import java.util.Map;
/**
* Similar to {@link Map} but hides key retrieval functionality.
* Thus one can't iterate over key or entries.
* Implementing class may transform keys to perform memory operations
*
* @param <K> The key type
* @param <V> The value type
*/
public interface UnIterableMap<K,V> {
/**
* @see Map#size()
* @return {@link Map#size()}
*/
int size();
/**
* @see Map#isEmpty()
* @return {@link Map#isEmpty()}
*/
boolean isEmpty();
/**
* @see Map#containsKey(Object)
* @param key {@link Map#containsKey(Object)}
* @return {@link Map#containsKey(Object)}
*/
boolean containsKey(K key);
/**
* @see Map#containsValue(Object)
* @param value {@link Map#containsValue(Object)}
* @return {@link Map#containsValue(Object)}
*/
boolean containsValue(V value);
/**
* @see Map#get(Object)
* @param key {@link Map#get(Object)}
* @return {@link Map#get(Object)}
*/
V get(K key);
/**
* @see Map#put(Object, Object)
* @param key {@link Map#put(Object, Object)}
* @param value {@link Map#put(Object, Object)}
* @return {@link Map#put(Object, Object)}
*/
V put(K key, V value);
/**
* @see Map#remove(Object)
* @param key {@link Map#remove(Object)}
* @return {@link Map#remove(Object)}
*/
V remove(K key);
/**
* @see Map#clear()
*/
void clear();
/**
* @see Map#values()
* @return {@link Map#values()}
*/
Collection<V> values();
}
| 1,656 | 21.093333 | 69 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/comparators/TryteIndexComparator.java
|
package com.iota.iri.utils.comparators;
import com.iota.iri.utils.Converter;
import java.util.Comparator;
import static com.iota.iri.controllers.TransactionViewModel.CURRENT_INDEX_TRINARY_OFFSET;
import static com.iota.iri.controllers.TransactionViewModel.CURRENT_INDEX_TRINARY_SIZE;
import static com.iota.iri.controllers.TransactionViewModel.TRYTES_SIZE;
/**
* A comparator for tytes based on current index.
*/
public class TryteIndexComparator implements Comparator<String> {
@Override
public int compare(String t1, String t2) {
return Long.compare(getCurrentIndex(t1), getCurrentIndex(t2));
}
private long getCurrentIndex(String tryte) {
byte[] txTrits = Converter.allocateTritsForTrytes(TRYTES_SIZE);
Converter.trits(tryte, txTrits, 0);
return Converter.longValue(txTrits, CURRENT_INDEX_TRINARY_OFFSET, CURRENT_INDEX_TRINARY_SIZE);
}
}
| 896 | 32.222222 | 102 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/dag/DAGHelper.java
|
package com.iota.iri.utils.dag;
import com.iota.iri.controllers.ApproveeViewModel;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
import com.iota.iri.storage.Tangle;
import java.util.*;
import java.util.function.Consumer;
import java.util.function.Predicate;
import pl.touk.throwing.ThrowingConsumer;
/**
* This class offers generic functions for recurring tasks that are related to the tangle and that otherwise would have
* to be implemented over and over again in different parts of the code.
*/
public class DAGHelper {
/**
* Holds references to the singleton DAGHelper instances.
*/
protected static final HashMap<Tangle, DAGHelper> instances = new HashMap<>();
/**
* Holds a reference to the tangle instance which acts as an interface to the used database.
*/
protected Tangle tangle;
/**
* This method allows us to retrieve the DAGHelper instance that corresponds to the given parameters.
*
* To save memory and to be able to use things like caches effectively, we do not allow to create multiple instances
* of this class for the same set of parameters, but create a singleton instance of it.
*
* @param tangle database interface used to retrieve data
* @return DAGHelper instance that allows to use the implemented methods.
*/
public static DAGHelper get(Tangle tangle) {
DAGHelper instance;
if((instance = instances.get(tangle)) == null) {
synchronized(DAGHelper.class) {
if((instance = instances.get(tangle)) == null) {
instance = new DAGHelper(tangle);
instances.put(tangle, instance);
}
}
}
return instance;
}
/**
* Constructor of the class which allows to provide the required dependencies for the implemented methods.
*
* Since the constructor is protected we are not able to manually create instances of this class and have to
* retrieve them through their singleton accessor {@link #get}.
*
* @param tangle database interface used to retrieve data
*/
protected DAGHelper(Tangle tangle) {
this.tangle = tangle;
}
//region TRAVERSE APPROVERS (BOTTOM -> TOP) ////////////////////////////////////////////////////////////////////////
/**
* This method provides a generic interface for traversing all approvers of a transaction up to a given point.
*
* To make the use of this method as easy as possible we provide a broad range of possible parameter signatures, so
* we do not have to manually convert between milestones, transactions and hashes (see other methods with the same
* name).
*
* It uses an non-recursive iterative algorithm that is able to handle huge chunks of the tangle without running out
* of memory. It creates a queue of transactions that are being examined and processes them one by one. As new
* approvers are found, they will be added to the queue and processed accordingly.
*
* Every found transaction is passed into the provided condition lambda, to determine if it still belongs to the
* desired set of transactions and only then will be passed on to the currentTransactionConsumer lambda.
*
* @param startingTransactionHash the starting point of the traversal
* @param condition predicate that allows to control how long the traversal should continue (receives the current
* transaction as a parameter)
* @param currentTransactionConsumer a lambda function that allows us to "process" the found transactions
* @param processedTransactions a set of hashes that shall be considered as "processed" already and that will
* consequently be ignored in the traversal
* @throws TraversalException if anything goes wrong while traversing the graph and processing the transactions
*/
public void traverseApprovers(Hash startingTransactionHash,
Predicate<TransactionViewModel> condition,
Consumer<TransactionViewModel> currentTransactionConsumer,
Set<Hash> processedTransactions) throws TraversalException {
Queue<Hash> transactionsToExamine = new ArrayDeque<>(Collections.singleton(startingTransactionHash));
try {
Hash currentTransactionHash;
while((currentTransactionHash = transactionsToExamine.poll()) != null) {
if(currentTransactionHash == startingTransactionHash || processedTransactions.add(currentTransactionHash)) {
TransactionViewModel currentTransaction = TransactionViewModel.fromHash(tangle, currentTransactionHash);
if(
// do not "test" the starting transaction since it is not an "approver"
currentTransactionHash == startingTransactionHash || (
currentTransaction.getType() != TransactionViewModel.PREFILLED_SLOT &&
condition.test(currentTransaction)
)
) {
// do not consume the starting transaction since it is not an "approver"
if(currentTransactionHash != startingTransactionHash) {
currentTransactionConsumer.accept(currentTransaction);
}
transactionsToExamine.addAll(ApproveeViewModel.load(tangle, currentTransactionHash).getHashes());
}
}
}
} catch (Exception e) {
throw new TraversalException("error while traversing the approvers of transaction " + startingTransactionHash, e);
}
}
/**
* Works like {@link DAGHelper#traverseApprovers(Hash, Predicate, Consumer, Set)} but defaults to an empty
* set of processed transactions to consider all transactions.
*
* @see DAGHelper#traverseApprovers(Hash, Predicate, Consumer, Set)
*
* @param startingTransactionHash the starting point of the traversal
* @param condition predicate that allows to control how long the traversal should continue (receives the current
* transaction as a parameter)
* @param currentTransactionConsumer a lambda function that allows us to "process" the found transactions
* @throws TraversalException if anything goes wrong while traversing the graph and processing the transactions
*/
public void traverseApprovers(Hash startingTransactionHash,
Predicate<TransactionViewModel> condition,
Consumer<TransactionViewModel> currentTransactionConsumer) throws TraversalException {
traverseApprovers(startingTransactionHash, condition, currentTransactionConsumer, new HashSet<>());
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
//region TRAVERSE APPROVEES (TOP -> BOTTOM) ////////////////////////////////////////////////////////////////////////
/**
* This method provides a generic interface for traversing all approvees of a transaction down to a given point.
*
* To make the use of this method as easy as possible we provide a broad range of possible parameter signatures, so
* we do not have to manually convert between milestones, transactions and hashes (see other methods with the same
* name).
*
* It uses an non-recursive iterative algorithm that is able to handle huge chunks of the tangle without running out
* of memory. It creates a queue of transactions that are being examined and processes them one by one. As new
* approvees are found, they will be added to the queue and processed accordingly.
*
* Every found transaction is passed into the provided condition lambda, to determine if it still belongs to the
* desired set of transactions and only then will be passed on to the currentTransactionConsumer lambda.
*
* @param startingTransactionHash the starting point of the traversal
* @param condition predicate that allows to control how long the traversal should continue (receives the current
* transaction as a parameter)
* @param currentTransactionConsumer a lambda function that allows us to "process" the found transactions
* @param processedTransactions a set of hashes that shall be considered as "processed" already and that will
* consequently be ignored in the traversal
* @throws TraversalException if anything goes wrong while traversing the graph and processing the transactions
*/
public void traverseApprovees(Hash startingTransactionHash,
Predicate<TransactionViewModel> condition,
ThrowingConsumer<TransactionViewModel, ? extends Exception> currentTransactionConsumer,
Set<Hash> processedTransactions) throws TraversalException {
Queue<Hash> transactionsToExamine = new ArrayDeque<>(Collections.singleton(startingTransactionHash));
try {
Hash currentTransactionHash;
while((currentTransactionHash = transactionsToExamine.poll()) != null) {
if(currentTransactionHash == startingTransactionHash || processedTransactions.add(currentTransactionHash)) {
TransactionViewModel currentTransaction = TransactionViewModel.fromHash(tangle, currentTransactionHash);
if(
currentTransaction.getType() != TransactionViewModel.PREFILLED_SLOT &&(
// do not "test" the starting transaction since it is not an "approvee"
currentTransactionHash == startingTransactionHash ||
condition.test(currentTransaction)
)
) {
// do not consume the starting transaction since it is not an "approvee"
if(currentTransactionHash != startingTransactionHash) {
currentTransactionConsumer.accept(currentTransaction);
}
transactionsToExamine.add(currentTransaction.getBranchTransactionHash());
transactionsToExamine.add(currentTransaction.getTrunkTransactionHash());
}
}
}
} catch (Exception e) {
throw new TraversalException("error while traversing the approvees of transaction " + startingTransactionHash, e);
}
}
/**
* Works like {@link DAGHelper#traverseApprovees(Hash, Predicate, Consumer, Set)} but defaults to an empty
* set of processed transactions to consider all transactions.
*
* @see DAGHelper#traverseApprovees(Hash, Predicate, Consumer, Set)
*
* @param startingTransactionHash the starting point of the traversal
* @param condition predicate that allows to control how long the traversal should continue (receives the current
* transaction as a parameter)
* @param currentTransactionConsumer a lambda function that allows us to "process" the found transactions
* @throws TraversalException if anything goes wrong while traversing the graph and processing the transactions
*/
public void traverseApprovees(Hash startingTransactionHash,
Predicate<TransactionViewModel> condition,
ThrowingConsumer<TransactionViewModel, ? extends Exception> currentTransactionConsumer)
throws TraversalException {
traverseApprovees(startingTransactionHash, condition, currentTransactionConsumer, new HashSet<>());
}
/**
* Find all the tail hashes that can be traversed as you walk down the approvees from {@code startingTransaction}.
* When we reach a tail we must stop.
*
* @param startingTransaction starting point
* @return set of the transaction hashes of the tails
* @throws TraversalException if anything goes wrong while traversing the graph
*/
public Set<? extends Hash> findTails(TransactionViewModel startingTransaction) throws TraversalException {
Set<Hash> tailTxs = new HashSet<>();
Queue<Hash> transactionsToExamine = new ArrayDeque<>();
transactionsToExamine.offer(startingTransaction.getTrunkTransactionHash());
transactionsToExamine.offer(startingTransaction.getBranchTransactionHash());
try {
Hash currentTransactionHash;
while ((currentTransactionHash = transactionsToExamine.poll()) != null) {
TransactionViewModel currentTransaction = TransactionViewModel.fromHash(tangle, currentTransactionHash);
if (currentTransaction.getType() != TransactionViewModel.PREFILLED_SLOT) {
// if tail
if (currentTransaction.getCurrentIndex() == 0) {
tailTxs.add(currentTransaction.getHash());
} else {
transactionsToExamine.add(currentTransaction.getBranchTransactionHash());
transactionsToExamine.add(currentTransaction.getTrunkTransactionHash());
}
}
}
} catch (Exception e) {
throw new TraversalException(
"error while traversing the approvees of transaction " + startingTransaction.getHash(), e);
}
return tailTxs;
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
}
| 13,934 | 54.517928 | 126 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/dag/TraversalException.java
|
package com.iota.iri.utils.dag;
/**
* This class is used to wrap exceptions that are specific to traversing the graph.
*
* It allows us to distinct between the different kinds of errors that can happen during the execution of the code.
*/
public class TraversalException extends Exception {
/**
* Constructor of the exception which allows us to provide a specific error message and the cause of the error.
*
* @param message reason why this error occured
* @param cause wrapped exception that caused this error
*/
public TraversalException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructor of the exception which allows us to provide a specific error message without having an underlying
* cause.
*
* @param message reason why this error occured
*/
public TraversalException(String message) {
super(message);
}
/**
* Constructor of the exception which allows us to wrap the underlying cause of the error without providing a
* specific reason.
*
* @param cause wrapped exception that caused this error
*/
public TraversalException(Throwable cause) {
super(cause);
}
}
| 1,237 | 30.74359 | 116 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/datastructure/CuckooFilter.java
|
package com.iota.iri.utils.datastructure;
/**
* The Cuckoo Filter is a probabilistic data structure that supports fast set membership testing.
*
* It is very similar to a bloom filter in that they both are very fast and space efficient. Both the bloom filter and
* cuckoo filter also report false positives on set membership.
*
* Cuckoo filters are a relatively new data structure, described in a paper in 2014 by Fan, Andersen, Kaminsky, and
* Mitzenmacher (https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf). They improve upon the design of the bloom
* filter by offering deletion, limited counting, and a bounded false positive probability, while still maintaining a
* similar space complexity.
*
* They use cuckoo hashing to resolve collisions and are essentially a compact cuckoo hash table.
*/
public interface CuckooFilter {
/**
* Adds a new elements to the filter that then can be queried with {@link #contains(String)}.
*
* @param item element that shall be stored in the filter
* @return true if the insertion was successful (if the filter is too full this can return false)
* @throws IndexOutOfBoundsException if we try to add an element to an already too full filter
*/
boolean add(String item) throws IndexOutOfBoundsException;
/**
* Adds a new elements to the filter that then can be queried with {@link #contains(byte[])}.
*
* @param item element that shall be stored in the filter
* @return true if the insertion was successful (if the filter is too full this can return false)
* @throws IndexOutOfBoundsException if we try to add an element to an already too full filter
*/
boolean add(byte[] item) throws IndexOutOfBoundsException;
/**
* Queries for the existence of an element in the filter.
*
* @param item element that shall be checked
* @return true if it is "probably" in the filter (~3% false positives) or false if it is "definitely" not in there
*/
boolean contains(String item);
/**
* Queries for the existence of an element in the filter.
*
* @param item element that shall be checked
* @return true if it is "probably" in the filter (~3% false positives) or false if it is "definitely" not in there
*/
boolean contains(byte[] item);
/**
* Deletes an element from the filter.
*
* @param item element that shall be deleted from filter
* @return true if something was deleted matching the element or false otherwise
*/
boolean delete(String item);
/**
* Deletes an element from the filter.
*
* @param item element that shall be deleted from filter
* @return true if something was deleted matching the element or false otherwise
*/
boolean delete(byte[] item);
/**
* This method returns the actual capacity of the filter.
*
* Since the capacity has to be a power of two and we want to reach a load factor of less than 0.955, the actual
* capacity is bigger than the amount of items we passed into the constructor.
*
* @return the actual capacity of the filter
*/
int getCapacity();
/**
* This method returns the amount of elements that are stored in the filter.
*
* Since a cuckoo filter can have collisions the size is not necessarily identical with the amount of items that we
* added.
*
* @return the amount of stored items
*/
int size();
}
| 3,490 | 39.126437 | 119 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/datastructure/impl/CuckooFilterImpl.java
|
package com.iota.iri.utils.datastructure.impl;
import com.iota.iri.utils.BitSetUtils;
import com.iota.iri.utils.datastructure.CuckooFilter;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.BitSet;
/**
* This class implements the basic contract of the {@link CuckooFilter}.
*/
public class CuckooFilterImpl implements CuckooFilter {
/**
* The amount of times we try to kick elements when inserting before we consider the index to be too full.
*/
private static final int MAX_NUM_KICKS = 500;
/**
* A reference to the last element that didn't fit into the filter (used for "soft failure" on first attempt).
*/
private CuckooFilterItem lastVictim;
/**
* The hash function that is used to generate finger prints and indexes (defaults to SHA1).
*/
private MessageDigest hashFunction;
/**
* the amount of buckets in our table (get's calculated from the itemCount that we want to store)
*/
private int tableSize = 1;
/**
* The amount of items that can be stored in each bucket.
*/
private int bucketSize;
/**
* The amount of bits per fingerprint for each entry (the optimum is around 7 bits with a load of ~0.955)
*/
private int fingerPrintSize;
/**
* Holds the amount if items that are stored in the filter.
*/
private int storedItems = 0;
/**
* Holds the capacity of the filter.
*/
private int capacity = 1;
/**
* The actual underlying data structure holding the elements.
*/
private CuckooFilterTable cuckooFilterTable;
/**
* Simplified constructor that automatically chooses the values with best space complexity and false positive rate.
*
* The optimal values are a bucket size of 4 and a fingerprint size of 7.2 bits (we round up to 8 bits). For more
* info regarding those values see
* <a href="https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf">cuckoo-conext2014.pdf</a> and
* <a href="https://brilliant.org/wiki/cuckoo-filter/#cuckoo-hashing">Cuckoo Hashing</a>.
*
* NOTE: The actual size will be slightly bigger since the size has to be a power of 2 and take the optimal load
* factor of 0.955 into account.
*
* @param itemCount the minimum amount of items that should fit into the filter
*/
public CuckooFilterImpl(int itemCount) {
this(itemCount, 4, 8);
}
/**
* Advanced constructor that allows for fine tuning of the desired filter.
*
* It first saves a reference to the hash function and then checks the parameters - the finger print size cannot
* be bigger than 128 bits because SHA1 generates 160 bits and we use 128 of that for the fingerprint and the rest
* for the index.
*
* After verifying that the passed in parameters are reasonable, we calculate the required size of the
* {@link CuckooFilterTable} by increasing the table size exponentially until we can fit the desired item count with
* a load factor of <= 0.955. Finally we create the {@link CuckooFilterTable} that will hold our data.
*
* NOTE: The actual size will be slightly bigger since the size has to be a power of 2 and take the optimal load
* factor of 0.955 into account.
*
* @param itemCount the minimum amount of items that should fit into the filter
* @param bucketSize the amount of items that can be stored in each bucket
* @param fingerPrintSize the amount of bits per fingerprint (it has to be bigger than 0 and smaller than 128)
* @throws IllegalArgumentException if the finger print size is too small or too big
* @throws InternalError if the SHA1 hashing function can not be found with this java version [should never happen]
*/
public CuckooFilterImpl(int itemCount, int bucketSize, int fingerPrintSize) throws IllegalArgumentException,
InternalError {
try {
hashFunction = MessageDigest.getInstance("SHA1");
} catch(NoSuchAlgorithmException e) {
throw new InternalError("missing SHA1 support - please check your JAVA installation");
}
if(fingerPrintSize <= 0 || fingerPrintSize > 128) {
throw new IllegalArgumentException("invalid finger print size \"" + fingerPrintSize +
"\" [expected value between 0 and 129]");
}
while((tableSize * bucketSize) < itemCount || itemCount * 1.0 / (tableSize * bucketSize) > 0.955) {
tableSize <<= 1;
}
this.bucketSize = bucketSize;
this.fingerPrintSize = fingerPrintSize;
this.capacity = tableSize * bucketSize + 1;
cuckooFilterTable = new CuckooFilterTable(tableSize, bucketSize, fingerPrintSize);
}
/**
* {@inheritDoc}
*
* It retrieves the necessary details by passing it into the Item class and then executes the internal add logic.
*/
@Override
public boolean add(String item) throws IndexOutOfBoundsException {
return add(new CuckooFilterItem(item));
}
/**
* {@inheritDoc}
*
* It retrieves the necessary details by passing it into the Item class and then executes the internal add logic.
*/
@Override
public boolean add(byte[] item) throws IndexOutOfBoundsException {
return add(new CuckooFilterItem(hashFunction.digest(item)));
}
/**
* {@inheritDoc}
*
* It retrieves the necessary details by passing it into the Item class and then executes the internal contains
* logic.
*/
@Override
public boolean contains(String item) {
return contains(new CuckooFilterItem(item));
}
/**
* {@inheritDoc}
*
* It retrieves the necessary details by passing it into the Item class and then executes the internal contains
* logic.
*/
@Override
public boolean contains(byte[] item) {
return contains(new CuckooFilterItem(hashFunction.digest(item)));
}
/**
* {@inheritDoc}
*
* It retrieves the necessary details by passing it into the Item class and then executes the internal delete logic.
*/
@Override
public boolean delete(String item) {
return delete(new CuckooFilterItem(item));
}
/**
* {@inheritDoc}
*
* It retrieves the necessary details by passing it into the Item class and then executes the internal delete logic.
*/
@Override
public boolean delete(byte[] item) {
return delete(new CuckooFilterItem(hashFunction.digest(item)));
}
/**
* {@inheritDoc}
*/
@Override
public int getCapacity() {
return capacity;
}
/**
* {@inheritDoc}
*/
@Override
public int size() {
return storedItems;
}
/**
* Adds a new elements to the filter that then can be queried for existence.
*
* It first checks if the item is already a part of the filter and skips the insertion if that is the case. If the
* element is not part of the filter, we check if the table is too full already by checking if we have a kicked out
* victim.
*
* If the filter is not too full, we insert the element by trying to place it in its associated position (moving
* existing elements if space is needed).
*
* @param item item to be stored in the filter
* @return true if the insertion was successful (if the filter is too full this can return false)
* @throws IndexOutOfBoundsException if we try to add an item to an already too full filter
*/
private boolean add(CuckooFilterItem item) throws IndexOutOfBoundsException {
if(contains(item)) {
return true;
}
if(lastVictim != null) {
throw new IndexOutOfBoundsException("the filter is too full");
}
// try to insert the item into the first free slot of its cuckooFilterTable (trivial)
for(int i = 0; i < bucketSize; i++) {
if(cuckooFilterTable.get(item.index, i) == null) {
cuckooFilterTable.set(item.index, i, item.fingerPrint);
storedItems++;
return true;
}
if(cuckooFilterTable.get(item.altIndex, i) == null) {
cuckooFilterTable.set(item.altIndex, i, item.fingerPrint);
storedItems++;
return true;
}
}
// filter is full -> start moving MAX_NUM_KICKS times (randomly select which bucket to start with)
int indexOfDestinationBucket = Math.random() < 0.5 ? item.index : item.altIndex;
for(int i = 0; i < MAX_NUM_KICKS; i++) {
// select a random item to kick
int indexOfItemToKick = (int) (Math.random() * bucketSize);
// swap the items
BitSet kickedFingerPrint = cuckooFilterTable.get(indexOfDestinationBucket, indexOfItemToKick);
cuckooFilterTable.set(indexOfDestinationBucket, indexOfItemToKick, item.fingerPrint);
item = new CuckooFilterItem(kickedFingerPrint, indexOfDestinationBucket);
indexOfDestinationBucket = item.altIndex;
// try to insert the items into its alternate location
for(int n = 0; n < bucketSize; n++) {
if(cuckooFilterTable.get(indexOfDestinationBucket, n) == null) {
cuckooFilterTable.set(indexOfDestinationBucket, n, item.fingerPrint);
storedItems++;
return true;
}
}
}
// store the last item that didn't fit, so we can provide a soft failure option
lastVictim = item;
storedItems++;
// return false to indicate that the addition failed
return false;
}
/**
* Queries for the existence of an element in the filter.
*
* It simply checks if the item exists in one of it's associated buckets or if it equals the lastVictim which is set
* in case the filter ever gets too full.
*
* @param item element to be checked
* @return true if it is "probably" in the filter (~3% false positives) or false if it is "definitely" not in there
*/
private boolean contains(CuckooFilterItem item) {
if(lastVictim != null && item.fingerPrint.equals(lastVictim.fingerPrint)) {
return true;
}
// check existence of our finger print in the first index
for(int i = 0; i < bucketSize; i++) {
if(item.fingerPrint.equals(cuckooFilterTable.get(item.index, i))) {
return true;
}
}
// check existence of our finger print the alternate index
for(int i = 0; i < bucketSize; i++) {
if(item.fingerPrint.equals(cuckooFilterTable.get(item.altIndex, i))) {
return true;
}
}
return false;
}
/**
* Deletes an element from the filter.
*
* It first tries to delete the item from the lastVictim slot if it matches and in case of failure cycles through
* the corresponding buckets, to remove a copy of it's fingerprint if one is found.
*
* @param item element that shall be deleted from filter
* @return true if something was deleted matching the item
*/
public boolean delete(CuckooFilterItem item) {
if(lastVictim != null && item.fingerPrint.equals(lastVictim.fingerPrint)) {
lastVictim = null;
storedItems--;
return true;
}
// check existence of our finger print in the first index
for(int i = 0; i < bucketSize; i++) {
if(item.fingerPrint.equals(cuckooFilterTable.get(item.index, i))) {
cuckooFilterTable.delete(item.index, i);
storedItems--;
return true;
}
}
// check existence of our finger print the alternate index
for(int i = 0; i < bucketSize; i++) {
if(item.fingerPrint.equals(cuckooFilterTable.get(item.altIndex, i))) {
cuckooFilterTable.delete(item.altIndex, i);
storedItems--;
return true;
}
}
return false;
}
/**
* This method derives the index of an element by the full hash of the item.
*
* It is primarily used to calculate the original position of the item, when the item is freshly inserted into the
* filter. Since we only store the finger print of the item we have to use the 2nd version of this method to later
* retrieve the alternate index through (partial-key cuckoo hashing).
*
* @param elementHash hash of the element
* @return the primary index of the element
*/
private int getIndex(byte[] elementHash) {
// initialize the new address with an empty bit sequence
long index = 0;
// process all address bytes (first 4 bytes)
for(int i = 0; i < 4; i++) {
// copy the bits from the hash into the index
index |= (elementHash[i] & 0xff);
// shift the bits to make space for the next iteration
if(i < 3) {
index <<= 8;
}
}
// extract the relevant last 8 bits
index &= 0x00000000ffffffffL;
// map the result to the domain of possible table addresses
return (int) (index % (long) tableSize);
}
/**
* This method allows us to retrieve the "alternate" index of an item based on it's current position in the table
* and it's fingerprint.
*
* It is used to move items around that are already part of the table and where the original hash value is not known
* anymore. The mechanism used to derive the new position is called partial-key cuckoo hashing and while being
* relatively bad "in theory", it turns out to be better in practice than the math would suggest when it comes to
* distributing the entries equally in the table.
*
* The operation is bi-directional, allowing us to also get the original position by passing the alternate index
* into this function.
*
* @param fingerPrint finger print of the item
* @param oldIndex the old position of the item in the cuckoo hash table
* @return the alternate index of the element
*/
private int getIndex(BitSet fingerPrint, long oldIndex) {
// calculate the hash of the finger print (partial-key cuckoo hashing)
byte[] fingerPrintHash = hashFunction.digest(BitSetUtils.convertBitSetToByteArray(fingerPrint));
// initialize the new address with an empty bit sequence
long index = 0;
// process all address bytes (first 4 bytes)
for (int i=0; i < 4; i++) {
// shift the relevant oldIndex byte into position
byte oldIndexByte = (byte) (((0xffL << (i*8)) & (long) oldIndex) >> (i * 8));
// xor the finger print and the oldIndex bytes and insert the result into the new index
index |= (((fingerPrintHash[i] ^ oldIndexByte) & 0xff) << (i * 8));
}
// extract the relevant last 8 bits
index &= 0x00000000ffffffffL;
// map the result to the domain of possible table addresses
return (int) (index % (long) tableSize);
}
/**
* This method allows us to calculate the finger print of an item based on it's hash value.
*
* It is used when inserting an item for the first time into the filter and to check the existence of items in the
* filter.
*
* @param hash full hash of the item only known when inserting or checking if an item is contained in the filter
* @return a BitSet representing the first n bits of the hash starting from index 4 up to the necessary length
* @throws IllegalArgumentException if the hash value provided to the method is too short
*/
private BitSet generateFingerPrint(byte[] hash) throws IllegalArgumentException {
if(hash.length < 20) {
throw new IllegalArgumentException("invalid hash [expected hash to contain at least 20 bytes]");
}
// do a simple conversion of the byte array to a BitSet of the desired length
return BitSetUtils.convertByteArrayToBitSet(hash, 4, fingerPrintSize);
}
/**
* Internal helper class to represent items that are stored in the filter.
*
* It bundles the logic for generating the correct indexes and eases the access to all the properties that are
* related to managing those items while moving and inserting them. By having this little wrapper we only have to do
* the expensive calculations (like generating the hashes) once and can then pass them around.
*/
private class CuckooFilterItem {
private BitSet fingerPrint;
private int index;
private int altIndex;
public CuckooFilterItem(String item) {
this(hashFunction.digest(item.getBytes()));
}
public CuckooFilterItem(byte[] hash) {
fingerPrint = generateFingerPrint(hash);
index = getIndex(hash);
altIndex = getIndex(fingerPrint, index);
}
public CuckooFilterItem(BitSet fingerPrint, int index) {
this.fingerPrint = fingerPrint;
this.index = index;
altIndex = getIndex(fingerPrint, index);
}
}
/**
* This class implements a 2 dimensional table holding BitSets, whereas the first dimension represents the bucket
* index and the 2nd dimension represents the slot in the bucket.
*
* It maps this 2-dimensional data structure to a 1-dimensional BitSet holding the actual values so even for huge
* first-level dimensions, we only call the constructor once - making it very fast.
*/
private class CuckooFilterTable {
/**
* Holds the actual data in a "flattened" way to improve performance.
*/
private BitSet data;
/**
* Holds the number of buckets (first dimension).
*/
private int bucketAmount;
/**
* Holds the size of the buckets (second dimension).
*/
private int bucketSize;
/**
* Holds the amount of bits stored in each slot of the bucket.
*/
private int bitSetSize;
/**
* This method initializes our underlying data structure and saves all the relevant parameters.
*
* @param bucketAmount number of buckets
* @param bucketSize size of the buckets
* @param bitSetSize amount of bits stored in each slot of the bucket
*/
public CuckooFilterTable(int bucketAmount, int bucketSize, int bitSetSize) {
this.bucketAmount = bucketAmount;
this.bucketSize = bucketSize;
this.bitSetSize = bitSetSize;
data = new BitSet(bucketAmount * bucketSize * (bitSetSize + 1));
}
/**
* This method allows us to retrieve elements from the table.
*
* It creates a new BitSet with the value that is stored underneath. Every consequent call of this method
* creates a new Object so we don't waste any memory with caching objects.
*
* Note: It is not possible to retrieve a bucket as a whole since it gets mapped to the 1-dimensional structure
* but this is also not necessary for the implementation of the filter.
*
* @param bucketIndex index of the bucket (1st dimension)
* @param slotIndex slot in the bucket (2nd dimension)
* @return stored BitSet or null if the slot is empty
*/
public BitSet get(int bucketIndex, int slotIndex) {
// calculates the mapped indexes
int nullIndex = bucketIndex * bucketSize * (bitSetSize + 1) + slotIndex * (bitSetSize + 1);
if(!data.get(nullIndex)) {
return null;
}
// creates the result object
BitSet result = new BitSet(bitSetSize);
// copies the bits from our underlying data structure to the result
for(int i = nullIndex + 1; i <= nullIndex + bitSetSize; i++) {
int relativeIndex = i - (nullIndex + 1);
result.set(relativeIndex, data.get(i));
}
// returns the final result object
return result;
}
/**
* This method allows us to store a new BitSet at the defined location.
*
* If we pass null as the object to store, the old items gets deleted and the flag representing "if the slot is
* filled" get's set to false.
*
* @param bucketIndex index of the bucket (1st dimension)
* @param slotIndex slot in the bucket (2nd dimension)
* @param bitSet object to store
* @return the table itself so we can chain calls
*/
public CuckooFilterTable set(int bucketIndex, int slotIndex, BitSet bitSet) {
// calculates the mapped indexes
int nullIndex = bucketIndex * bucketSize * (bitSetSize + 1) + slotIndex * (bitSetSize + 1);
// mark the location as set or unset
data.set(nullIndex, bitSet != null);
// copy the bits of the source BitSet to the mapped data structure
if(bitSet != null) {
for(int i = nullIndex + 1; i <= nullIndex + bitSetSize; i++) {
int relativeIndex = i - (nullIndex + 1);
data.set(i, bitSet.get(relativeIndex));
}
}
return this;
}
/**
* This method allows us to remove elements from the table.
*
* It internally calls the set method with null as the item to store.
*
* @param bucketIndex index of the bucket (1st dimension)
* @param slotIndex slot in the bucket (2nd dimension)
* @return the table itself so we can chain calls
*/
public CuckooFilterTable delete(int bucketIndex, int slotIndex) {
return set(bucketIndex, slotIndex, null);
}
}
}
| 22,306 | 36.808475 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/log/Logger.java
|
package com.iota.iri.utils.log;
/**
* Represents a wrapper for the logback Logger, which allows us to add aditional features to the logging capabilities of
* IRI.
*/
public interface Logger {
/**
* This method allows us to set a new info message that shall get printed to the screen.
*
* @param message info message that shall get printed
* @return the logger itself to allow the chaining of calls
*/
Logger info(String message);
/**
* This method allows us to set a new debug message that shall get printed to the screen.
*
* @param message debug message that shall get printed
* @return the logger itself to allow the chaining of calls
*/
Logger debug(String message);
/**
* This method allows us to set a new error message that shall get printed to the screen.
*
* @param message debug message that shall get printed
* @return the logger itself to allow the chaining of calls
*/
Logger error(String message);
/**
* This method allows us to set a new error message that shall get printed to the screen.
*
* @param message debug message that shall get printed
* @param cause exception object that caused the error to happen (will be printed with the complete stacktrace)
*/
Logger error(String message, Throwable cause);
/**
* This method is the getter for the enabled flag of the logger.
*
* If the logger is disabled it will simply omit showing log messages.
*
* @return true if the logger shall output messages and false otherwise (the logger is enabled by default)
*/
boolean getEnabled();
/**
* This method is the setter for the enabled flag of the logger.
*
* If the logger is disabled it will simply omit showing log messages.
*
* @param enabled true if the logger shall output messages and false otherwise (the logger is enabled by default)
* @return the logger itself to allow the chaining of calls
*/
Logger setEnabled(boolean enabled);
}
| 2,072 | 34.135593 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/log/ProgressLogger.java
|
package com.iota.iri.utils.log;
import org.slf4j.Logger;
/**
* Represents a wrapper for the {@link Logger} used by IRI that implements a logic to report the progress of a single
* task.
*
* The progress is calculated by comparing the current step to the expected step count and shown as a percentage value.
*/
public interface ProgressLogger {
/**
* Starts the logging by (re)setting the current step to 0 and trigger the output of the current progress.
*
* @return the logger itself to allow the chaining of calls
*/
ProgressLogger start();
/**
* Does the same as {@link #start()} but sets the expected step count to the given value.
*
* This allows us to initialize and start the logging in a single call.
*
* @param stepCount number of steps that the task involves
* @return the logger itself to allow the chaining of calls
*/
ProgressLogger start(int stepCount);
/**
* Increases the current step by one and triggers the output of the current progress.
*
* This should be called whenever we finish a sub task.
*
* @return the logger itself to allow the chaining of calls
*/
ProgressLogger progress();
/**
* Sets the current step to the given value and triggers the output of the current progress.
*
* @param currentStep the current step that was just finished
* @return the logger itself to allow the chaining of calls
*/
ProgressLogger progress(int currentStep);
/**
* Sets the current step to the expected step count and triggers the output of the current progress.
*
* This allows us to dump the progress when the processing of the task succeeds.
*
* @return the logger itself to allow the chaining of calls
*/
ProgressLogger finish();
/**
* Marks the task as failed and triggers the output of a status message informing us about the failure.
*
* @return the logger itself to allow the chaining of calls
*/
ProgressLogger abort();
/**
* Marks the task as failed and triggers the output of a status message informing us about the failure.
*
* The shown error message will contain the reason for the error given by the passed in {@link Exception}.
*
* @param reason reason for the failure
* @return the logger itself to allow the chaining of calls
*/
ProgressLogger abort(Throwable reason);
/**
* Returns the current finished step of this logger.
*
* @return current finished step of this logger
*/
int getCurrentStep();
/**
* Sets the current step to the given value without triggering an output.
*
* @param currentStep the current step that was just finished
* @return the logger itself to allow the chaining of calls
*/
ProgressLogger setCurrentStep(int currentStep);
/**
* Returns the expected step count of this logger.
*
* @return the expected step count of this logger
*/
int getStepCount();
/**
* Sets the expected step count to the given value without triggering an output.
*
* @param stepCount the expected step count of this logger
* @return the logger itself to allow the chaining of calls
*/
ProgressLogger setStepCount(int stepCount);
/**
* This method is the getter for the enabled flag of the logger.
*
* If the logger is disabled it will simply omit showing log messages.
*
* @return true if the logger shall output messages and false otherwise (the logger is enabled by default)
*/
boolean getEnabled();
/**
* This method is the setter for the enabled flag of the logger.
*
* If the logger is disabled it will simply omit showing log messages.
*
* @param enabled true if the logger shall output messages and false otherwise (the logger is enabled by default)
* @return the logger itself to allow the chaining of calls
*/
ProgressLogger setEnabled(boolean enabled);
}
| 4,062 | 32.578512 | 119 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/log/interval/IntervalLogger.java
|
package com.iota.iri.utils.log.interval;
import com.iota.iri.utils.log.Logger;
import org.slf4j.LoggerFactory;
import java.util.Objects;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* <p>
* This class represents a wrapper for the {@link org.slf4j.Logger} used by IRI that implements a logic to rate limits
* the output on the console.
* </p>
* <p>
* Instead of printing all messages immediately and unnecessarily spamming the console, it only prints messages every
* few seconds and if the message has changed since the last output.
* </p>
*/
public class IntervalLogger implements Logger {
/**
* Holds the default interval in milliseconds that status messages get printed.
*/
private static final int DEFAULT_LOG_INTERVAL = 3000;
/**
* Holds a reference to the underlying logback logger.
*/
private final org.slf4j.Logger delegate;
/**
* Holds the interval in milliseconds that status messages get printed.
*/
private final int logInterval;
/**
* Holds the last message that was received by the logger and shall get printed to the console.
*/
private Message lastReceivedMessage;
/**
* Holds the last status message that was printed.
*/
private Message lastPrintedMessage;
/**
* Holds a flag that is used to determine if an output was scheduled already.
*/
private AtomicBoolean outputScheduled = new AtomicBoolean(false);
/**
* Holds a reference to the scheduled job which allows us to cancel its execution if we force the output before.
*/
private Future<?> scheduledJob;
/**
* Holds the manager for the {@link Thread} that schedules the delayed output.
*/
private final ScheduledExecutorService outputScheduler = Executors.newScheduledThreadPool(1);
/**
* Holds a timestamp value when the last message was printed.
*/
private long lastLogTime = 0;
/**
* Flag that indicates if the output is enabled.
*/
private boolean enabled = true;
/**
* Does the same as {@link #IntervalLogger(Class, int)} but defaults to the {@link #DEFAULT_LOG_INTERVAL} for the
* interval.
*
* @param clazz class that is used to identify the origin of the log messages
*/
public IntervalLogger(Class<?> clazz) {
this(clazz, DEFAULT_LOG_INTERVAL);
}
/**
* Does the same as {@link #IntervalLogger(org.slf4j.Logger, int)} but defaults to the {@link #DEFAULT_LOG_INTERVAL}
* for the interval.
*
* @param delegate logback logger for issuing the messages
*/
public IntervalLogger(org.slf4j.Logger delegate) {
this(delegate, DEFAULT_LOG_INTERVAL);
}
/**
* Does the same as {@link #IntervalLogger(org.slf4j.Logger, int)} but creates a new logback logger for the given
* class.
*
* @param clazz class that is used to identify the origin of the log messages
* @param logInterval time in milliseconds between log messages
*/
public IntervalLogger(Class<?> clazz, int logInterval) {
this(LoggerFactory.getLogger(clazz), logInterval);
}
/**
* Creates a {@link Logger} for the given class that prints messages only every {@code logInterval} milliseconds.
*
* It stores the passed in parameters in its private properties to be able to access them later on.
*
* @param delegate logback logger for issuing the messages
* @param logInterval time in milliseconds between log messages
*/
public IntervalLogger(org.slf4j.Logger delegate, int logInterval) {
this.delegate = delegate;
this.logInterval = logInterval;
}
/**
* This method returns the underlying logback Logger.
* <p>
* It can be used to issue log entries directly to the underlying logger without interfering with the logic of this
* class.
* </p>
*
* @return the underlying logback Logger
*/
public org.slf4j.Logger delegate() {
return delegate;
}
/**
* {@inheritDoc}
*
* <p>
* It checks if the given message is new and then triggers the output.
* </p>
*
* @param message info message that shall get printed
*/
@Override
public IntervalLogger info(String message) {
Message newMessage = new InfoMessage(message);
if (!newMessage.equals(lastReceivedMessage)) {
lastReceivedMessage = newMessage;
triggerOutput();
}
return this;
}
/**
* {@inheritDoc}
*
* <p>
* It checks if the given message is new and then triggers the output.
* </p>
*/
@Override
public IntervalLogger debug(String message) {
Message newMessage = new DebugMessage(message);
if (!newMessage.equals(lastReceivedMessage)) {
lastReceivedMessage = newMessage;
triggerOutput();
}
return this;
}
/**
* {@inheritDoc}
*
* <p>
* It checks if the given message is new and then triggers the output.
* </p>
* <p>
* Error messages will always get dumped immediately instead of scheduling their output. Since error messages are
* usually a sign of some misbehaviour of the node we want to be able to see them when they appear (to be able to
* track down bugs more easily).
* </p>
*/
@Override
public IntervalLogger error(String message) {
return error(message, null);
}
/**
* {@inheritDoc}
*
* <p>
* It checks if the given message is new and then triggers the output.
* </p>
* <p>
* Error messages will always get dumped immediately instead of scheduling their output. Since error messages are
* usually a sign of some misbehaviour of the node we want to be able to see them when they appear (to be able to
* track down bugs more easily).
* </p>
*/
@Override
public IntervalLogger error(String message, Throwable cause) {
Message newMessage = new ErrorMessage(message, cause);
if (!newMessage.equals(lastReceivedMessage)) {
lastReceivedMessage = newMessage;
triggerOutput(true);
}
return this;
}
/**
* {@inheritDoc}
*/
@Override
public boolean getEnabled() {
return enabled;
}
/**
* {@inheritDoc}
*/
@Override
public IntervalLogger setEnabled(boolean enabled) {
this.enabled = enabled;
return this;
}
/**
* Does the same as {@link #triggerOutput(boolean)} but defaults to letting the logger decide when to print the
* message.
*/
public void triggerOutput() {
triggerOutput(false);
}
/**
* <p>
* Triggers the output of the last received message.
* </p>
* <p>
* It either prints the message immediately (if enough time has passed since the last output or requested by the
* caller) or schedules it for the next interval.
* </p>
*
* @param printImmediately flag indicating if the messages should be scheduled or printed immediately
*/
public void triggerOutput(boolean printImmediately) {
if (lastReceivedMessage != null && (printImmediately ||
System.currentTimeMillis() - lastLogTime >= logInterval)) {
lastReceivedMessage.output();
} else {
scheduleOutput();
}
}
/**
* <p>
* This method schedules the output of the last received message by spawning a {@link Thread} that will print the
* new message after the given timeout.
* </p>
* <p>
* When creating the {@link Thread} it copies its name so the output is "transparent" to the user and the effect
* is the same as dumping the message manually through the {@link org.slf4j.Logger} object itself.
* </p>
*/
private void scheduleOutput() {
if (outputScheduled.compareAndSet(false, true)) {
String currentThreadName = Thread.currentThread().getName();
scheduledJob = outputScheduler.schedule(() -> {
Thread.currentThread().setName(currentThreadName);
triggerOutput(true);
}, Math.max(logInterval - (System.currentTimeMillis() - lastLogTime), 1), TimeUnit.MILLISECONDS);
}
}
/**
* Represents the basic logic that is shared by all message types that are supported by this logger.
*/
private abstract class Message {
/**
* Holds the message that shall get printed.
*/
protected final String message;
/**
* Creates a message that gets managed by this logger.
*
* It stores the provided message in the internal property.
*
* @param message message that shall get printed
*/
public Message(String message) {
this.message = message;
}
/**
* This method triggers the output of the given message.
*
* <p>
* It first cancels any scheduled job because the latest provided message will be printed by this call already
* and then triggers the printing of the message through the instance specific {@link #print()} method.
* </p>
*
* We only print the message if the same method was not printed already, before.
*/
public void output() {
if (scheduledJob != null) {
scheduledJob.cancel(true);
}
if (!this.equals(lastPrintedMessage)) {
if (enabled) {
print();
}
lastPrintedMessage = this;
lastLogTime = System.currentTimeMillis();
}
outputScheduled.set(false);
}
/**
* This method allows us to use the Messages in HashMaps and other collections that use the hashCode method to
* identify objects. Even tho this is not actively used we anyway want to implement it to be on the safe side
* if we ever implement something like this.
*
* @return a unique int identifier that can be used to address equal objects
*/
@Override
public int hashCode() {
return Objects.hash(getClass(), message);
}
/**
* This method allows us to compare different {@link Message}s for equality and check if we have received or
* processed the same {@link Message} already.
*
* @param obj object that shall be compared to this instance
* @return true if it represents the same message or false otherwise
*/
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null || !getClass().equals(obj.getClass())) {
return false;
}
return Objects.equals(message, ((Message) obj).message);
}
/**
* This method handles the actual output of messages through the {@link org.slf4j.Logger} instance.
* <p>
* It first checks if the message that shall get printed differs from the last message that was printed and then
* issues the output of the message. After printing the message, it updates the internal variables to handle the
* next message accordingly.
* </p>
*/
protected abstract void print();
}
/**
* Represents a message for the "info logging level".
*/
private class InfoMessage extends Message {
/**
* Creates a message that gets dumped of the "info logging level".
*
* @param message message that shall get printed
*/
public InfoMessage(String message) {
super(message);
}
/**
* {@inheritDoc}
*/
@Override
protected void print() {
delegate().info(message);
}
}
/**
* Represents a message for the "debug logging level".
*/
private class DebugMessage extends Message {
/**
* Creates a message that gets dumped of the "debug logging level".
*
* @param message message that shall get printed
*/
public DebugMessage(String message) {
super(message);
}
/**
* {@inheritDoc}
*/
@Override
protected void print() {
delegate().debug(message);
}
}
/**
* Represents a message for the "error logging level".
*/
private class ErrorMessage extends Message {
/**
* Holds the cause of the error message.
*/
private final Throwable cause;
/**
* Creates a message that gets dumped of the "error logging level".
*
* @param message message that shall get printed
* @param cause exception object that caused the error to happen (will be printed with the complete stacktrace)
*/
public ErrorMessage(String message, Throwable cause) {
super(message);
this.cause = cause;
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
return Objects.hash(getClass(), message, cause);
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(Object obj) {
return super.equals(obj) && cause == ((ErrorMessage) obj).cause;
}
/**
* {@inheritDoc}
*
* It also includes a stack trace of the cause of the error if it was provided when creating this instance.
*/
@Override
protected void print() {
if (cause == null) {
delegate().error(message);
} else {
delegate().error(message, cause);
}
}
}
}
| 14,250 | 29.321277 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/log/interval/IntervalProgressLogger.java
|
package com.iota.iri.utils.log.interval;
import com.iota.iri.utils.log.ProgressLogger;
import org.slf4j.Logger;
import java.text.DecimalFormat;
/**
* Implements the basic contract of the {@link ProgressLogger} while showing the progress as messages in the console.
*
* Instead of printing all messages immediately and unnecessarily spamming the console output, it rate limits the amount
* of messages shown and only prints updates in a pre-defined interval.
*/
public class IntervalProgressLogger implements ProgressLogger {
/**
* Holds the default interval (in milliseconds) in which the logger shows messages.
*/
private static final int DEFAULT_LOG_INTERVAL = 3000;
/**
* Holds the name of the task that this logger represents.
*/
private final String taskName;
/**
* Holds the logger that takes care of showing our messages.
*/
private final IntervalLogger intervalLogger;
/**
* The last finished step of the task.
*/
private int currentStep = 0;
/**
* The total amount of steps of this task.
*/
private int stepCount = 1;
/**
* Does the same as {@link #IntervalProgressLogger(String, Class, int)} but defaults to
* {@link #DEFAULT_LOG_INTERVAL} for the interval.
*
* @param taskName name of the task that shall be shown on the screen
* @param clazz class that is used to identify the origin of the log messages
*/
public IntervalProgressLogger(String taskName, Class<?> clazz) {
this(taskName, clazz, DEFAULT_LOG_INTERVAL);
}
/**
* Does the same as {@link #IntervalProgressLogger(String, Logger, int)} but defaults to
* {@link #DEFAULT_LOG_INTERVAL} for the interval.
*
* @param taskName name of the task that shall be shown on the screen
* @param logger logback logger for issuing the messages
*/
public IntervalProgressLogger(String taskName, Logger logger) {
this(taskName, logger, DEFAULT_LOG_INTERVAL);
}
/**
* Creates a {@link ProgressLogger} that shows its progress in a pre-defined interval.
*
* @param taskName name of the task that shall be shown on the screen
* @param clazz class that is used to identify the origin of the log messages
* @param logInterval the time in milliseconds between consecutive log messages
*/
public IntervalProgressLogger(String taskName, Class<?> clazz, int logInterval) {
this.taskName = taskName;
this.intervalLogger = new IntervalLogger(clazz, logInterval);
}
/**
* Creates a {@link ProgressLogger} that shows its progress in a pre-defined interval.
*
* @param taskName name of the task that shall be shown on the screen
* @param logger logback logger for issuing the messages
* @param logInterval the time in milliseconds between consecutive log messages
*/
public IntervalProgressLogger(String taskName, Logger logger, int logInterval) {
this.taskName = taskName;
this.intervalLogger = new IntervalLogger(logger, logInterval);
}
/**
* {@inheritDoc}
*/
@Override
public IntervalProgressLogger start() {
return setCurrentStep(0).triggerOutput();
}
/**
* {@inheritDoc}
*/
@Override
public IntervalProgressLogger start(int stepCount) {
return setStepCount(stepCount).setCurrentStep(0).triggerOutput();
}
/**
* {@inheritDoc}
*/
@Override
public IntervalProgressLogger progress() {
return setCurrentStep(++currentStep).triggerOutput();
}
/**
* {@inheritDoc}
*/
@Override
public IntervalProgressLogger progress(int currentStep) {
return setCurrentStep(currentStep).triggerOutput();
}
/**
* {@inheritDoc}
*
* It will show the message immediately instead of waiting for the interval to pass.
*/
@Override
public IntervalProgressLogger finish() {
return setCurrentStep(stepCount).triggerOutput(true);
}
/**
* {@inheritDoc}
*
* It will show the message immediately instead of waiting for the interval to pass.
*/
@Override
public IntervalProgressLogger abort() {
return abort(null);
}
/**
* {@inheritDoc}
*
* It will show the message immediately instead of waiting for the interval to pass.
*/
@Override
public IntervalProgressLogger abort(Throwable cause) {
double progress = Math.min(100d, ((double) currentStep / (double) stepCount) * 100d);
String logMessage = taskName + ": " + new DecimalFormat("#.00").format(progress) + "% ... [FAILED]";
if (cause != null) {
intervalLogger.error(logMessage, cause);
} else {
intervalLogger.error(logMessage);
}
return this;
}
/**
* {@inheritDoc}
*/
@Override
public int getCurrentStep() {
return currentStep;
}
/**
* {@inheritDoc}
*/
@Override
public IntervalProgressLogger setCurrentStep(int currentStep) {
this.currentStep = currentStep;
return this;
}
/**
* {@inheritDoc}
*/
@Override
public int getStepCount() {
return stepCount;
}
/**
* {@inheritDoc}
*/
@Override
public IntervalProgressLogger setStepCount(int stepCount) {
this.stepCount = stepCount;
return this;
}
/**
* {@inheritDoc}
*/
@Override
public boolean getEnabled() {
return intervalLogger.getEnabled();
}
/**
* {@inheritDoc}
*/
@Override
public IntervalProgressLogger setEnabled(boolean enabled) {
this.intervalLogger.setEnabled(enabled);
return this;
}
/**
* Does the same as {@link #triggerOutput(boolean)} but defaults to but defaults to letting the logger decide when
* to print the message.
*
* @return the logger itself to allow the chaining of calls
*/
private IntervalProgressLogger triggerOutput() {
return triggerOutput(false);
}
/**
* This method triggers the output of the logger.
*
* It calculates the current progress, generates the log message and passes it on to the underlying status logger.
*
* @param printImmediately flag indicating if the message shall be printed immediately
* @return the logger itself to allow the chaining of calls
*/
private IntervalProgressLogger triggerOutput(boolean printImmediately) {
if (stepCount != 0) {
double progress = Math.min(100d, ((double) currentStep / (double) stepCount) * 100d);
String logMessage = taskName + ": " + new DecimalFormat("0.00").format(progress) + "% ..." +
(currentStep >= stepCount ? " [DONE]" : "");
intervalLogger.info(logMessage);
if (printImmediately) {
intervalLogger.triggerOutput(true);
}
}
return this;
}
}
| 7,057 | 27.808163 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/thread/BoundedScheduledExecutorService.java
|
package com.iota.iri.utils.thread;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
/**
* <p>
* This class represents a {@link SilentScheduledExecutorService} that accepts only a pre-defined amount of tasks that
* can be queued or executed at the same time. All tasks exceeding the defined limit will be ignored (instead of being
* queued) by either throwing a {@link RejectedExecutionException} or returning {@code null} depending on the method we
* call (non-silent vs silent).
* </p>
* <p>
* Whenever a non-recurring task finishes (or a recurring one is cancelled through its {@link Future}), it makes space
* for a new task. This is useful for classes like the {@link com.iota.iri.utils.log.interval.IntervalLogger} that want
* to delay an action if and only if there is no other delayed action queued already.
* </p>
* <p>
* Note: In contrast to other existing implementations like the SizedScheduledExecutorService of the apache package,
* this class is thread-safe and will only allow to spawn and queue the exact amount of tasks defined during its
* creation (since it does not rely on approximate numbers like the queue size).
* </p>
*/
public class BoundedScheduledExecutorService implements SilentScheduledExecutorService, ReportingExecutorService {
/**
* Holds the maximum amount of tasks that can be submitted for execution.
*/
private final int capacity;
/**
* Holds the underlying {@link ScheduledExecutorService} that manages the Threads in the background.
*/
private final ScheduledExecutorService delegate;
/**
* Holds a set of scheduled tasks tasks that are going to be executed by this
* {@link ScheduledExecutorService}.
*/
private final Set<TaskDetails> scheduledTasks = ConcurrentHashMap.newKeySet();
/**
* Thread-safe counter that is used to determine how many tasks were exactly scheduled already.
* <p>
* Note: Whenever a task finishes, we clean up the used resources and make space for new tasks.
* </p>
*/
private AtomicInteger scheduledTasksCounter = new AtomicInteger(0);
/**
* <p>
* Creates an executor service that that accepts only a pre-defined amount of tasks that can be queued and run at
* the same time.
* </p>
* <p>
* All tasks exceeding the defined limit will be ignored (instead of being queued) by either throwing a
* {@link RejectedExecutionException} or returning {@code null} depending on the method we call (non-silent vs
* silent).
* </p>
*
* @param capacity the amount of tasks that can be scheduled simultaneously
*/
public BoundedScheduledExecutorService(int capacity) {
this.capacity = capacity;
delegate = Executors.newScheduledThreadPool(capacity);
}
//region METHODS OF ReportingExecutorService INTERFACE /////////////////////////////////////////////////////////////
/**
* {@inheritDoc}
*
* <p>
* It simply adds the task to the internal set of scheduled tasks.
* </p>
*/
@Override
public void onScheduleTask(TaskDetails taskDetails) {
scheduledTasks.add(taskDetails);
}
@Override
public void onStartTask(TaskDetails taskDetails) {
// doesn't do anything but allows a child class to hook into this event by overriding this method
}
@Override
public void onFinishTask(TaskDetails taskDetails, Throwable error) {
// doesn't do anything but allows a child class to hook into this event by overriding this method
}
@Override
public void onCancelTask(TaskDetails taskDetails) {
// doesn't do anything but allows a child class to hook into this event by overriding this method
}
/**
* {@inheritDoc}
*
* <p>
* It frees the reserved resources by decrementing the {@link #scheduledTasksCounter} and removing the task from the
* {@link #scheduledTasks} set.
* </p>
*/
@Override
public void onCompleteTask(TaskDetails taskDetails, Throwable error) {
scheduledTasks.remove(taskDetails);
scheduledTasksCounter.decrementAndGet();
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
//region METHODS OF SilentScheduledExecutorService INTERFACE ///////////////////////////////////////////////////////
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link ScheduledFuture} returned by this method allows to cancel jobs without their unwinding
* logic being executed, we wrap the returned {@link ScheduledFuture} AND the {@link Runnable} to correctly
* free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called.
* </p>
*/
@Override
public ScheduledFuture<?> silentSchedule(Runnable task, long delay, TimeUnit unit) {
return reserveCapacity(1) ? wrapScheduledFuture(
wrappedTask -> delegate.schedule(wrappedTask, delay, unit),
task,
new TaskDetails()
.setDelay(delay)
.setTimeUnit(unit)
) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link ScheduledFuture} returned by this method allows to cancel jobs without their unwinding
* logic being executed, we wrap the returned {@link ScheduledFuture} AND the {@link Callable} to correctly
* free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called.
* </p>
*/
@Override
public <V> ScheduledFuture<V> silentSchedule(Callable<V> task, long delay, TimeUnit unit) {
return reserveCapacity(1) ? wrapScheduledFuture(
wrappedTask -> delegate.schedule(wrappedTask, delay, unit),
task,
new TaskDetails()
.setDelay(delay)
.setTimeUnit(unit)
) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link ScheduledFuture} returned by this method allows to cancel jobs without their unwinding
* logic being executed, we wrap the returned {@link ScheduledFuture} AND the {@link Runnable} to correctly
* free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called.
* </p>
*/
@Override
public ScheduledFuture<?> silentScheduleAtFixedRate(Runnable task, long initialDelay, long period,
TimeUnit unit) {
return reserveCapacity(1) ? wrapScheduledFuture(
wrappedTask -> delegate.scheduleAtFixedRate(wrappedTask, initialDelay, period, unit),
task,
new TaskDetails()
.setDelay(initialDelay)
.setInterval(period)
.setTimeUnit(unit)
) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link ScheduledFuture} returned by this method allows to cancel jobs without their unwinding
* logic being executed, we wrap the returned {@link ScheduledFuture} AND the {@link Runnable} to correctly
* free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called.
* </p>
*/
@Override
public ScheduledFuture<?> silentScheduleWithFixedDelay(Runnable task, long initialDelay, long delay,
TimeUnit unit) {
return reserveCapacity(1) ? wrapScheduledFuture(
wrappedTask -> delegate.scheduleWithFixedDelay(wrappedTask, initialDelay, delay, unit),
task,
new TaskDetails()
.setDelay(initialDelay)
.setInterval(delay)
.setTimeUnit(unit)
) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link Future} returned by this method allows to cancel jobs without their unwinding logic being
* executed, we wrap the returned {@link Future} AND the {@link Callable} to correctly free the resources if
* its {@link Future#cancel(boolean)} method is called.
* </p>
*/
@Override
public <T> Future<T> silentSubmit(Callable<T> task) {
return reserveCapacity(1) ? wrapFuture(
delegate::submit,
task,
new TaskDetails()
) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link Future} returned by this method allows to cancel jobs without their unwinding logic being
* executed, we wrap the returned {@link Future} AND the {@link Runnable} to correctly free the resources if
* its {@link Future#cancel(boolean)} method is called.
* </p>
*/
@Override
public Future<?> silentSubmit(Runnable task) {
return reserveCapacity(1) ? wrapFuture(
delegate::submit,
task,
new TaskDetails()
) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link Future} returned by this method allows to cancel jobs without their unwinding logic being
* executed, we wrap the returned {@link Future} AND the {@link Runnable} to correctly free the resources if
* its {@link Future#cancel(boolean)} method is called.
* </p>
*/
@Override
public <T> Future<T> silentSubmit(Runnable task, T result) {
return reserveCapacity(1) ? wrapFuture(
wrappedTask -> delegate.submit(wrappedTask, result),
task,
new TaskDetails()
) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link Future}s will all be finished (and cannot be cancelled anymore) when the underlying method
* returns, we only wrap the {@link Callable}s to correctly free the resources and omit wrapping the returned
* {@link Future}s.
* </p>
*/
@Override
public <T> List<Future<T>> silentInvokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException {
return reserveCapacity(tasks.size()) ? delegate.invokeAll(wrapTasks(tasks, new TaskDetails())) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link Future}s will all be finished (and cannot be cancelled anymore) when the underlying method
* returns, we only wrap the {@link Callable}s to correctly free the resources and omit wrapping the returned
* {@link Future}s.
* </p></p>
*/
@Override
public <T> List<Future<T>> silentInvokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException {
return reserveCapacity(tasks.size()) ? delegate.invokeAll(
wrapTasks(
tasks,
new TaskDetails()
.setTimeout(timeout)
.setTimeUnit(unit)
),
timeout,
unit
) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link Future}s are not passed to the caller, we only wrap the {@link Callable}s to correctly
* free the resources and omit wrapping the {@link Future}s as well.
* </p>
*/
@Override
public <T> T silentInvokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException,
ExecutionException {
return reserveCapacity(tasks.size()) ? delegate.invokeAny(wrapTasks(tasks, new TaskDetails())) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since the {@link Future}s are not passed to the caller, we only wrap the {@link Callable}s to correctly
* free the resources and omit wrapping the related {@link Future}s as well.
* </p>
*/
@Override
public <T> T silentInvokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws
InterruptedException, ExecutionException, TimeoutException {
return reserveCapacity(tasks.size()) ? delegate.invokeAny(
wrapTasks(
tasks,
new TaskDetails()
.setTimeout(timeout)
.setTimeUnit(unit)
),
timeout,
unit
) : null;
}
/**
* {@inheritDoc}
*
* <p>
* Note: Since there is no {@link Future} passed to the caller, we only wrap the {@link Runnable} to correctly free
* the resources.
* </p>
*/
@Override
public void silentExecute(Runnable task) {
if (reserveCapacity(1)) {
delegate.execute(wrapTask(task, new TaskDetails()));
}
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
//region METHODS OF ScheduledExecutorService INTERFACE /////////////////////////////////////////////////////////////
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
return throwCapacityExhaustedIfNull(silentSchedule(command, delay, unit));
}
@Override
public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) {
return throwCapacityExhaustedIfNull(silentSchedule(callable, delay, unit));
}
@Override
public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) {
return throwCapacityExhaustedIfNull(silentScheduleAtFixedRate(command, initialDelay, period, unit));
}
@Override
public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) {
return throwCapacityExhaustedIfNull(silentScheduleWithFixedDelay(command, initialDelay, delay, unit));
}
@Override
public <T> Future<T> submit(Callable<T> task) {
return throwCapacityExhaustedIfNull(silentSubmit(task));
}
@Override
public <T> Future<T> submit(Runnable task, T result) {
return throwCapacityExhaustedIfNull(silentSubmit(task, result));
}
@Override
public Future<?> submit(Runnable task) {
return throwCapacityExhaustedIfNull(silentSubmit(task));
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException {
return throwCapacityExhaustedIfNull(silentInvokeAll(tasks));
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException {
return throwCapacityExhaustedIfNull(silentInvokeAll(tasks, timeout, unit));
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException {
return throwCapacityExhaustedIfNull(silentInvokeAny(tasks));
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws
InterruptedException, ExecutionException, TimeoutException {
return throwCapacityExhaustedIfNull(silentInvokeAny(tasks, timeout, unit));
}
@Override
public void execute(Runnable command) {
if (reserveCapacity(1)) {
delegate.execute(wrapTask(command, new TaskDetails()));
} else {
throw new RejectedExecutionException("the capacity is exhausted");
}
}
/**
* {@inheritDoc}
*
* <p>
* In addition to delegating the method call to the internal {@link ScheduledExecutorService}, we call the cancel
* logic for recurring tasks because shutdown prevents them from firing again. If these "cancelled" jobs are
* scheduled for execution (and not running right now), we also call their
* {@link #onCompleteTask(TaskDetails, Throwable)} callback to "report" that hey have finished (otherwise this will
* be fired inside the wrapped task).
* </p>
*/
@Override
public void shutdown() {
delegate.shutdown();
for (TaskDetails currentTask : scheduledTasks) {
if (currentTask.getInterval() != null) {
onCancelTask(currentTask);
if (currentTask.getScheduledForExecution().compareAndSet(true, false)) {
onCompleteTask(currentTask, null);
}
}
}
}
/**
* {@inheritDoc}
*
* <p>
* Before delegating the method call to the internal {@link ScheduledExecutorService}, we call the
* {@link #onCancelTask(TaskDetails)} callback for all scheduled tasks and fire the
* {@link #onCompleteTask(TaskDetails, Throwable)} callback for all tasks that are not being executed right now
* (otherwise this will be fired inside the wrapped task).
* </p>
*/
@Override
public List<Runnable> shutdownNow() {
if (!delegate.isShutdown()) {
for (TaskDetails currentTask : scheduledTasks) {
onCancelTask(currentTask);
if (currentTask.getScheduledForExecution().compareAndSet(true, false)) {
onCompleteTask(currentTask, null);
}
}
}
return delegate.shutdownNow();
}
@Override
public boolean isShutdown() {
return delegate.isShutdown();
}
@Override
public boolean isTerminated() {
return delegate.isTerminated();
}
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
return delegate.awaitTermination(timeout, unit);
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
//region PRIVATE UTILITY METHODS AND MEMBERS ///////////////////////////////////////////////////////////////////////
/**
* <p>
* This method wraps the passed in task to automatically call the callbacks for its lifecycle.
* </p>
* <p>
* The lifecycle methods are for example used to manage the resources that are reserved for this task.
* </p>
*
* @param task the raw task that shall be wrapped with the resource freeing logic
* @param taskDetails metadata holding the relevant information of the task
* @param future the wrapped {@link Future} that this task belongs to - it allows us to check if the task was
* cancelled from the outside
* @param <V> the return type of the task
* @return a wrapped task that cleans up its reserved resources upon completion
*/
private <V> Callable<V> wrapTask(Callable<V> task, TaskDetails taskDetails, Future<V> future) {
onScheduleTask(taskDetails);
return () -> {
if (taskDetails.getScheduledForExecution().compareAndSet(true, false)) {
Throwable error = null;
try {
onStartTask(taskDetails);
taskDetails.getExecutionCount().incrementAndGet();
return task.call();
} catch (Exception e) {
error = e;
throw e;
} finally {
onFinishTask(taskDetails, error);
if (taskDetails.getInterval() == null || error != null || future == null || future.isCancelled() ||
delegate.isShutdown()) {
onCompleteTask(taskDetails, error);
} else {
taskDetails.getScheduledForExecution().set(true);
}
}
}
return null;
};
}
/**
* <p>
* This method does the same as {@link #wrapTask(Callable, TaskDetails, Future)} but defaults to a task that is not
* associated to a user accessible {@link Future}.
* </p>
* <p>
* This method is used whenever the {@link Future} is not returned by the underlying method so we don't need to wrap
* it.
* </p>
*
* @param task the raw task that shall be wrapped with the resource freeing logic
* @param taskDetails metadata holding the relevant information of the task
* @param <V> the return type of the task
* @return a wrapped task that cleans up its reserved resources upon completion
*/
private <V> Callable<V> wrapTask(Callable<V> task, TaskDetails taskDetails) {
return wrapTask(task, taskDetails, null);
}
/**
* <p>
* This method wraps the passed in task to automatically call the callbacks for its lifecycle.
* </p>
* <p>
* The lifecycle methods are for example used to manage the resources that are reserved for this task.
* </p>
*
* @param task the raw task that shall be wrapped with the resource freeing logic
* @param taskDetails metadata holding the relevant information of the task
* @param future the wrapped {@link Future} that this task belongs to - it allows us to check if the task was
* cancelled from the outside
* @param <V> the return type of the task
* @return a wrapped task that cleans up its reserved resources upon completion
*/
private <V> Runnable wrapTask(Runnable task, TaskDetails taskDetails, Future<V> future) {
onScheduleTask(taskDetails);
return () -> {
if (taskDetails.getScheduledForExecution().compareAndSet(true, false)) {
Throwable error = null;
try {
onStartTask(taskDetails);
taskDetails.getExecutionCount().incrementAndGet();
task.run();
} catch (Exception e) {
error = e;
throw e;
} finally {
onFinishTask(taskDetails, error);
if (taskDetails.getInterval() == null || error != null || future == null || future.isCancelled() ||
delegate.isShutdown()) {
onCompleteTask(taskDetails, error);
} else {
taskDetails.getScheduledForExecution().set(true);
}
}
}
};
}
/**
* <p>
* This method does the same as {@link #wrapTask(Runnable, TaskDetails, Future)} but defaults to a task that is not
* associated to a user accessible {@link Future}.
* </p>
* <p>
* This method is used whenever the {@link Future} is not returned by the underlying method so we don't need to wrap
* it.
* </p>
*
* @param task the raw task that shall be wrapped with the resource freeing logic
* @param taskDetails metadata holding the relevant information of the task
* @return a wrapped task that cleans up its reserved resources upon completion
*/
private Runnable wrapTask(Runnable task, TaskDetails taskDetails) {
return wrapTask(task, taskDetails, null);
}
/**
* <p>
* This is a utility method that wraps the task and the resulting {@link Future} in a single call.
* </p>
* <p>
* It creates the {@link WrappedFuture} and the wrapped task and starts the execution of the task by delegating
* the launch through the {@link FutureFactory}.
* </p>
*
* @param futureFactory the lambda that returns the original "unwrapped" future from the wrapped task
* @param task the task that shall be wrapped to clean up its reserved resources upon completion
* @param taskDetails metadata holding the relevant information of the task
* @param <V> the return type of the task
* @return the {@link WrappedFuture} that allows to interact with the task
*/
private <V> Future<V> wrapFuture(FutureFactory<Future<V>, Callable<V>> futureFactory, Callable<V> task,
TaskDetails taskDetails) {
WrappedFuture<V> wrappedFuture = new WrappedFuture<>(taskDetails);
Callable<V> wrappedCallable = wrapTask(task, taskDetails, wrappedFuture);
return wrappedFuture.delegate(futureFactory.create(wrappedCallable));
}
/**
* <p>
* This is a utility method that wraps the task and the resulting {@link Future} in a single call.
* </p>
* <p>
* It creates the {@link WrappedFuture} and the wrapped task and starts the execution of the task by delegating
* the launch through the {@link FutureFactory}.
* </p>
*
* @param futureFactory the lambda that returns the original "unwrapped" future from the wrapped task
* @param task the task that shall be wrapped to clean up its reserved resources upon completion
* @param taskDetails metadata holding the relevant information of the task
* @param <V> the return type of the task
* @return the {@link WrappedFuture} that allows to interact with the task
*/
private <V> Future<V> wrapFuture(FutureFactory<Future<V>, Runnable> futureFactory, Runnable task,
TaskDetails taskDetails) {
WrappedFuture<V> wrappedFuture = new WrappedFuture<>(taskDetails);
Runnable wrappedTask = wrapTask(task, taskDetails, wrappedFuture);
return wrappedFuture.delegate(futureFactory.create(wrappedTask));
}
/**
* <p>
* This is a utility method that wraps the task and the resulting {@link ScheduledFuture} in a single call.
* </p>
* <p>
* It creates the {@link WrappedScheduledFuture} and the wrapped task and starts the execution of the task by
* delegating the launch through the {@link FutureFactory}.
* </p>
*
* @param futureFactory the lambda that returns the original "unwrapped" future from the wrapped task
* @param task the task that shall be wrapped to clean up its reserved resources upon completion
* @param taskDetails metadata holding the relevant information of the task
* @param <V> the return type of the task
* @return the {@link WrappedFuture} that allows to interact with the task
*/
private <V> ScheduledFuture<V> wrapScheduledFuture(FutureFactory<ScheduledFuture<V>, Callable<V>> futureFactory,
Callable<V> task, TaskDetails taskDetails) {
WrappedScheduledFuture<V> wrappedFuture = new WrappedScheduledFuture<>(taskDetails);
Callable<V> wrappedCallable = wrapTask(task, taskDetails, wrappedFuture);
return wrappedFuture.delegate(futureFactory.create(wrappedCallable));
}
/**
* <p>
* This is a utility method that wraps the task and the resulting {@link ScheduledFuture} in a single call.
* </p>
* <p>
* It creates the {@link WrappedScheduledFuture} and the wrapped task and starts the execution of the task by
* delegating the launch through the {@link FutureFactory}.
* </p>
*
* @param futureFactory the lambda that returns the original "unwrapped" future from the wrapped task
* @param task the task that shall be wrapped to clean up its reserved resources upon completion
* @param taskDetails metadata holding the relevant information of the task
* @param <V> the return type of the task
* @return the {@link WrappedFuture} that allows to interact with the task
*/
private <V> ScheduledFuture<V> wrapScheduledFuture(FutureFactory<ScheduledFuture<V>, Runnable> futureFactory,
Runnable task, TaskDetails taskDetails) {
WrappedScheduledFuture<V> wrappedFuture = new WrappedScheduledFuture<>(taskDetails);
Runnable wrappedRunnable = wrapTask(task, taskDetails, wrappedFuture);
return wrappedFuture.delegate(futureFactory.create(wrappedRunnable));
}
/**
* <p>
* This is a utility method that wraps a {@link Collection} of {@link Callable}s to make them free their resources
* upon completion.
* </p>
* <p>
* It simply iterates over the tasks and wraps them one by one by calling
* {@link #wrapTask(Callable, TaskDetails)}.
* </p>
*
* @param tasks list of jobs that shall be wrapped
* @param <T> the type of the values returned by the {@link Callable}s
* @return wrapped list of jobs that will free their reserved resources upon completion
*/
private <T> Collection<? extends Callable<T>> wrapTasks(Collection<? extends Callable<T>> tasks,
TaskDetails taskDetails) {
List<Callable<T>> wrappedTasks = new ArrayList<>(tasks.size());
for (Callable<T> task : tasks) {
wrappedTasks.add(wrapTask(task, taskDetails));
}
return wrappedTasks;
}
/**
* <p>
* This method checks if we have enough resources to schedule the given amount of tasks and reserves the space if
* the check is successful.
* </p>
* <p>
* The reserved resources will be freed again once the tasks finish their execution or when they are cancelled
* through their corresponding {@link Future}.
* </p>
*
* @param requestedJobCount the amount of tasks that shall be scheduled
* @return true if we could reserve the given space and false otherwise
*/
private boolean reserveCapacity(int requestedJobCount) {
if (scheduledTasksCounter.addAndGet(requestedJobCount) <= capacity) {
return true;
} else {
scheduledTasksCounter.addAndGet(-requestedJobCount);
return false;
}
}
/**
* <p>
* This method is a utility method that simply checks the passed in object for being {@code null} and throws a
* {@link RejectedExecutionException} if it is.
* </p>
* <p>
* It is used to turn the silent methods into non-silent ones and conform with the {@link ScheduledExecutorService}
* interface.
* </p>
*
* @param result the object that shall be checked for being null (the result of a "silent" method call)
* @param <V> the type of the result
* @return the passed in object (if it is not null)
* @throws RejectedExecutionException if the passed in object was null
*/
private <V> V throwCapacityExhaustedIfNull(V result) throws RejectedExecutionException {
if (result == null) {
throw new RejectedExecutionException("the capacity is exhausted");
}
return result;
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
//region INNER CLASSES AND INTERFACES ///////////////////////////////////////////////////////////////////////
/**
* This interface is used to generically describe the lambda that is used to create the unwrapped future from the
* delegated {@link ScheduledExecutorService} by passing in the wrapped command.
*
* @param <RESULT> the kind of future returned by this factory ({@link ScheduledFuture} vs {@link Future})
* @param <ARGUMENT> type of the wrapped command that is passed in ({@link Runnable} vs {@link Callable})
*/
@FunctionalInterface
private interface FutureFactory<RESULT, ARGUMENT> {
/**
* This method creates the unwrapped future from the wrapped task by passing it on to the delegated
* {@link ScheduledExecutorService}.
*
* @param task the wrapped task that shall be scheduled
* @return the unwrapped "original" {@link Future} of the delegated {@link ScheduledExecutorService}
*/
RESULT create(ARGUMENT task);
}
/**
* This is a wrapper for the {@link Future}s returned by the {@link ScheduledExecutorService} that allows us to
* override the behaviour of the {@link #cancel(boolean)} method (to be able to free the resources of a task that
* gets cancelled without being executed).
*
* @param <V> the type of the result returned by the task that is the origin of this {@link Future}.
*/
private class WrappedFuture<V> implements Future<V> {
/**
* Holds the metadata of the task that this {@link Future} belongs to.
*/
protected final TaskDetails taskDetails;
/**
* <p>
* "Original" unwrapped {@link Future} that is returned by the delegated {@link ScheduledExecutorService}.
* </p>
* <p>
* Note: All methods except the {@link #cancel(boolean)} are getting passed through without any
* modifications.
* </p>
*/
private Future<V> delegate;
/**
* <p>
* This creates a {@link WrappedFuture} that cleans up the reserved resources when it is cancelled while the
* task is still pending.
* </p>
* <p>
* We do not hand in the {@link #delegate} in the constructor because we need to populate this instance to the
* wrapped task before we "launch" the processing of the task (see {@link #delegate(Future)}).
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
*/
public WrappedFuture(TaskDetails taskDetails) {
this.taskDetails = taskDetails;
}
/**
* <p>
* This method stores the delegated {@link Future} in its internal property.
* </p>
* <p>
* After the delegated {@link Future} is created, the underlying {@link ScheduledExecutorService} starts
* processing the task. To be able to "address" this wrapped future before we start processing the task (the
* wrapped task needs to access it), we populate this lazy (see
* {@link #wrapFuture(FutureFactory, Runnable, TaskDetails)} and
* {@link #wrapFuture(FutureFactory, Callable, TaskDetails)}).
* </p>
*
* @param delegatedFuture the "original" future that handles the logic in the background
* @return the instance itself (since we want to return the {@link WrappedFuture} after launching the underlying
* processing).
*/
public Future<V> delegate(Future<V> delegatedFuture) {
this.delegate = delegatedFuture;
return this;
}
/**
* <p>
* This method returns the delegated future.
* </p>
* <p>
* We define a getter for this property to be able to override it in the extending class and achieve a
* polymorphic behavior.
* </p>
*
* @return the original "unwrapped" {@link Future} that is used as a delegate for the methods of this class
*/
public Future<V> delegate() {
return delegate;
}
/**
* {@inheritDoc}
*
* <p>
* This method fires the {@link #onCancelTask(TaskDetails)} if the future has not been cancelled before.
* Afterwards it also fires the {@link #onCompleteTask(TaskDetails, Throwable)} callback if the task is not
* running right now (otherwise this will be fired inside the wrapped task).
* </p>
*/
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
if (!delegate().isCancelled() && !delegate().isDone()) {
onCancelTask(taskDetails);
}
if (taskDetails.getScheduledForExecution().compareAndSet(true, false)) {
onCompleteTask(taskDetails, null);
}
return delegate().cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return delegate().isCancelled();
}
@Override
public boolean isDone() {
return delegate().isDone();
}
@Override
public V get() throws InterruptedException, ExecutionException {
return delegate().get();
}
@Override
public V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
return delegate().get(timeout, unit);
}
}
/**
* This is a wrapper for the {@link ScheduledFuture}s returned by the {@link ScheduledExecutorService} that allows
* us to override the behaviour of the {@link #cancel(boolean)} method (to be able to free the resources of a task
* that gets cancelled without being executed).
*
* @param <V> the type of the result returned by the task that is the origin of this {@link ScheduledFuture}.
*/
private class WrappedScheduledFuture<V> extends WrappedFuture<V> implements ScheduledFuture<V> {
/**
* <p>
* "Original" unwrapped {@link ScheduledFuture} that is returned by the delegated
* {@link ScheduledExecutorService}.
* </p>
* <p>
* Note: All methods except the {@link #cancel(boolean)} are getting passed through without any modifications.
* </p>
*/
private ScheduledFuture<V> delegate;
/**
* <p>
* This creates a {@link ScheduledFuture} that cleans up the reserved resources when it is cancelled while the
* task is still pending.
* </p>
* <p>
* We do not hand in the {@link #delegate} in the constructor because we need to populate this instance to the
* wrapped task before we "launch" the processing of the task (see {@link #delegate(Future)}).
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
*/
private WrappedScheduledFuture(TaskDetails taskDetails) {
super(taskDetails);
}
/**
* <p>
* This method stores the delegated {@link ScheduledFuture} in its internal property.
* </p>
* <p>
* After the delegated {@link ScheduledFuture} is created, the underlying {@link ScheduledExecutorService}
* starts processing the task. To be able to "address" this wrapped future before we start processing the task
* (the wrapped task needs to access it), we populate this lazy (see
* {@link #wrapScheduledFuture(FutureFactory, Runnable, TaskDetails)}).
* </p>
*
* @param delegatedFuture the "original" future that handles the logic in the background
* @return the instance itself (since we want to return the {@link WrappedFuture} immediately after launching
* the underlying processing).
*/
public ScheduledFuture<V> delegate(ScheduledFuture<V> delegatedFuture) {
this.delegate = delegatedFuture;
return this;
}
@Override
public ScheduledFuture<V> delegate() {
return delegate;
}
@Override
public long getDelay(TimeUnit unit) {
return delegate().getDelay(unit);
}
@Override
public int compareTo(Delayed o) {
return delegate().compareTo(o);
}
/**
* {@inheritDoc}
*
* <p>
* This method fires the {@link #onCancelTask(TaskDetails)} if the future has not been cancelled before.
* Afterwards it also fires the {@link #onCompleteTask(TaskDetails, Throwable)} callback if the task is not
* running right now (otherwise this will be fired inside the wrapped task).
* </p>
*/
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
if (!delegate().isCancelled() && !delegate().isDone()) {
onCancelTask(taskDetails);
}
if (taskDetails.getScheduledForExecution().compareAndSet(true, false)) {
onCompleteTask(taskDetails, null);
}
return delegate().cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return delegate().isCancelled();
}
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
}
| 40,543 | 38.24879 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/thread/DedicatedScheduledExecutorService.java
|
package com.iota.iri.utils.thread;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.*;
/**
* <p>
* This class represents a {@link ScheduledExecutorService} that is associated with one specific task for which it
* provides automatic logging capabilities.
* </p>
* <p>
* It informs the user about its lifecycle using the logback loggers used by IRI. In addition it offers "silent" methods
* of the {@link ScheduledExecutorService} that do not throw {@link Exception}s when we try to start the same task
* multiple times. This is handy for implementing the "start" and "shutdown" methods of the background workers of IRI
* that would otherwise have to take care of not starting the same task more than once (when trying to be robust against
* coding errors or tests that start the same thread multiple times).
* </p>
*/
public class DedicatedScheduledExecutorService extends BoundedScheduledExecutorService {
/**
* Default logger for this class allowing us to dump debug and status messages.
* <p>
* Note: The used logger can be overwritten by providing a different logger in the constructor (to have transparent
* log messages that look like they are coming from a different source).
* </p>
*/
private static final Logger DEFAULT_LOGGER = LoggerFactory.getLogger(DedicatedScheduledExecutorService.class);
/**
* Holds a reference to the logger that is used to emit messages.
*/
private final Logger logger;
/**
* Holds the name of the thread that gets started by this class and that gets printed in the log messages.
*/
private final String threadName;
/**
* Flag indicating if we want to issue debug messages (for example whenever a task gets started and finished).
*/
private final boolean debug;
/**
* <p>
* Creates a {@link ScheduledExecutorService} that is associated with one specific task for which it provides
* automatic logging capabilities (using the provided thread name).
* </p>
* <p>
* It informs the user about its lifecycle using the logback loggers used by IRI. In addition it offers "silent"
* methods of the {@link ScheduledExecutorService} that do not throw {@link Exception}s when we try to start the
* same task multiple times. This is handy for implementing the "start" and "shutdown" methods of the background
* workers of IRI that would otherwise have to take care of not starting the same task more than once (when trying
* to be robust against coding errors or tests that start the same thread multiple times).
* </p>
* <p>
* <pre>
* <code>Example:
*
* private final static Logger logger = LoggerFactor.getLogger(MilestoneSolidifier.class);
*
* private DedicatedScheduledExecutorService milestoneSolidifier = new DedicatedScheduledExecutorService(
* "Solidification Thread", logger, false);
*
* // calling this multiple times will only start exactly one background job (ignore additional requests)
* public void start() {
* milestoneSolidifier.silentScheduleAtFixedRate(this::solidificationThread, 10, 50, MILLISECONDS);
* }
*
* // calling this multiple times will only stop the one instance that is running (if it is running)
* public void shutdown() {
* milestoneSolidifier.shutdownNow();
* }
*
* public void solidificationThread() {
* System.out.println("I get executed every 50 milliseconds");
* }
* </code>
* <code>Resulting Log Output:
*
* [main] INFO MilestoneSolidifier - Starting [Milestone Solidifier] (starts in 10ms / runs every 50ms) ...
* [main] INFO MilestoneSolidifier - [Milestone Solidifier] Started (execution #1) ...
* [Milestone Solidifier] INFO c.i.i.s.m.MilestoneSolidifier - I get executed every 50 milliseconds
* [Milestone Solidifier] INFO c.i.i.s.m.MilestoneSolidifier - I get executed every 50 milliseconds
* [Milestone Solidifier] INFO c.i.i.s.m.MilestoneSolidifier - I get executed every 50 milliseconds
* [main] INFO MilestoneSolidifier - Stopping [Milestone Solidifier] ...
* [main] INFO MilestoneSolidifier - [Milestone Solidifier] Stopped (after #4 executions) ...
* </code
* </pre>
* </p>
*
* @param threadName name of the thread (or null if we want to disable the automatic logging - exceptions will
* always be logged)
* @param logger logback logger that shall be used for the origin of the log messages
* @param debug debug flag that indicates if every "run" should be accompanied with a log message
*/
public DedicatedScheduledExecutorService(String threadName, Logger logger, boolean debug) {
super(1);
this.threadName = threadName;
this.logger = logger;
this.debug = debug;
}
/**
* Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to the
* {@link #DEFAULT_LOGGER} for the log messages.
*
* @param threadName name of the thread (or null if we want to disable the automatic logging - exceptions will
* always be logged)
* @param debug debug flag that indicates if every "run" should be accompanied with a log message
*/
public DedicatedScheduledExecutorService(String threadName, boolean debug) {
this(threadName, DEFAULT_LOGGER, debug);
}
/**
* Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to false
* for the debug flag.
*
* @param threadName name of the thread (or null if we want to disable the automatic logging - exceptions will
* always be logged)
* @param logger logback logger that shall be used for the origin of the log messages
*/
public DedicatedScheduledExecutorService(String threadName, Logger logger) {
this(threadName, logger, false);
}
/**
* <p>
* Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to {@code null}
* for the thread name (which causes only error messages to be printed - unless debug is true).
* </p>
* <p>
* Note: This is for example used by the {@link com.iota.iri.utils.log.interval.IntervalLogger} which does not want
* to inform the user when scheduling a log output, but which still needs the "only run one task" logic.
* </p>
*
* @param logger logback logger that shall be used for the origin of the log messages
* @param debug debug flag that indicates if every "run" should be accompanied with a log message
*/
public DedicatedScheduledExecutorService(Logger logger, boolean debug) {
this(null, logger, debug);
}
/**
* Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to the
{@link #DEFAULT_LOGGER} for the log messages and false for the debug flag.
*
* @param threadName name of the thread (or null if we want to disable the automatic logging - exceptions will
* always be logged)
*/
public DedicatedScheduledExecutorService(String threadName) {
this(threadName, DEFAULT_LOGGER, false);
}
/**
* <p>
* Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to {@code null}
* for the thread name (which causes only error messages to be printed - unless debug is true) and and false for the
* debug flag.
* </p>
* <p>
* Note: This is for example used by the {@link com.iota.iri.utils.log.interval.IntervalLogger} which does not want
* to inform the user when scheduling a log output, but which still needs the "only run one task" logic.
* </p>
*
* @param logger logback logger that shall be used for the origin of the log messages
*/
public DedicatedScheduledExecutorService(Logger logger) {
this(null, logger, false);
}
/**
* <p>
* Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to {@code null}
* for the thread name (which causes only error messages to be printed - unless debug is true) and the
* {@link #DEFAULT_LOGGER} for the log messages.
* </p>
* <p>
* Note: This is for example used by the {@link com.iota.iri.utils.log.interval.IntervalLogger} which does not want
* to inform the user when scheduling a log output, but which still needs the "only run one task" logic.
* </p>
*
* @param debug debug flag that indicates if every "run" should be accompanied with a log message
*/
public DedicatedScheduledExecutorService(boolean debug) {
this(null, DEFAULT_LOGGER, debug);
}
/**
* <p>
* Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to {@code null}
* for the thread name (which causes only error messages to be printed), the {@link #DEFAULT_LOGGER} for the log
* messages and {@code false} for the debug flag.
* </p>
* <p>
* Note: This is for example used by the {@link com.iota.iri.utils.log.interval.IntervalLogger} which does not want
* to inform the user when scheduling a log output, but which still needs the "only run one task" logic.
* </p>
*/
public DedicatedScheduledExecutorService() {
this(null, DEFAULT_LOGGER, false);
}
/**
* This method is the getter for the name of the thread that gets created by this service.
*
* @return it simply returns the private property of {@link #threadName}.
*/
public String getThreadName() {
return threadName;
}
//region METHODS OF ReportingExecutorService INTERFACE /////////////////////////////////////////////////////////////
/**
* {@inheritDoc}
*
* <p>
* This method shows a message whenever a task gets successfully scheduled.
* </p>
* <p>
* We only show the scheduling message if debugging is enabled or a thread name was defined when creating this
* {@link DedicatedScheduledExecutorService} (to not pollute the CLI with meaningless messages).
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
*/
@Override
public void onScheduleTask(TaskDetails taskDetails) {
super.onScheduleTask(taskDetails);
if (debug || threadName != null) {
printScheduledMessage(taskDetails);
}
}
/**
* {@inheritDoc}
*
* <p>
* This method shows a message whenever a task starts to be processed.
* </p>
* <p>
* We only show the starting message if debug is enabled or if it is the first start of the task in a named
* {@link DedicatedScheduledExecutorService} (display it like it would be a {@link Thread} with one start message
* and one stop message - to not pollute the CLI with meaningless messages).
* </p>
* <p>
* To increase the information available for debugging, we change the thread name to the one that initiated the
* start (rather than the randomly assigned one from the executor service) before printing the start message.
* </p>
* <p>
* After the start message was printed, we set the name of the {@link Thread} that will consequently be used for log
* messages from the task itself.
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
*/
@Override
public void onStartTask(TaskDetails taskDetails) {
super.onStartTask(taskDetails);
if (debug || (threadName != null && taskDetails.getExecutionCount().get() == 0)) {
Thread.currentThread().setName(taskDetails.getThreadName());
printStartedMessage(taskDetails);
}
if (threadName != null) {
Thread.currentThread().setName(threadName);
} else {
Thread.currentThread().setName(getPrintableThreadName(taskDetails));
}
}
/**
* {@inheritDoc}
*
* <p>
* This method shows a message when the task finishes its execution (can happen multiple times for recurring
* tasks).
* </p>
* <p>
* We only show the finishing message if debug is enabled and if no error occurred (otherwise the
* {@link #onCompleteTask(TaskDetails, Throwable)} callback will give enough information about the crash).
* </p>
* <p>
* To be consistent with the start message, we change the thread name to the one that initiated the task (this also
* makes it easier to distinguish log messages that are emitted by the "real" logic of the task from the "automated"
* messages about its lifecycle).
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
* @param error the exception that caused this task to terminate or {@code null} if it terminated normally
*/
@Override
public void onFinishTask(TaskDetails taskDetails, Throwable error) {
super.onFinishTask(taskDetails, error);
if (debug && error == null) {
Thread.currentThread().setName(taskDetails.getThreadName());
printFinishedMessage(taskDetails);
}
}
/**
* {@inheritDoc}
*
* <p>
* This method shows an information about the intent to cancel the task.
* </p>
* <p>
* We only show the cancel message if debugging is enabled or a thread name was defined when creating this
* {@link DedicatedScheduledExecutorService} (to not pollute the CLI with meaningless messages).
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
*/
@Override
public void onCancelTask(TaskDetails taskDetails) {
super.onCancelTask(taskDetails);
if (debug || threadName != null) {
printStopMessage(taskDetails);
}
}
/**
* {@inheritDoc}
*
* <p>
* This method shows a stopped message whenever it finally terminates (and doesn't get launched again in case of
* recurring tasks).
* </p>
* <p>
* We only show the stopped message if debug is enabled, an exception occurred (always show unexpected errors) or if
* we have a named {@link DedicatedScheduledExecutorService} (to not pollute the CLI with meaningless
* messages).
* </p>
* <p>
* If the completion of the task was not caused by an outside call to cancel it, we change the thread name to the
* one that initiated the task (this makes it easier to distinguish log messages that are emitted by the "real"
* logic of the task from the "automated" messages about its lifecycle).
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
* @param error the exception that caused this task to terminate or {@code null} if it terminated normally
*/
@Override
public void onCompleteTask(TaskDetails taskDetails, Throwable error) {
super.onCompleteTask(taskDetails, error);
if (debug || threadName != null || error != null) {
String oldThreadName = Thread.currentThread().getName();
if (oldThreadName.equals(getPrintableThreadName(taskDetails))) {
Thread.currentThread().setName(taskDetails.getThreadName());
}
printStoppedMessage(taskDetails, error);
if (!oldThreadName.equals(Thread.currentThread().getName())) {
Thread.currentThread().setName(oldThreadName);
}
}
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
//region PRIVATE UTILITY METHODS ///////////////////////////////////////////////////////////////////////////////////
/**
* <p>
* This method is a utility method that prints the schedule message of the task.
* </p>
* <p>
* It constructs the matching message by passing the task details into the
* {@link #buildScheduledMessage(TaskDetails)} method.
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
*/
private void printScheduledMessage(TaskDetails taskDetails) {
logger.info(buildScheduledMessage(taskDetails));
}
/**
* <p>
* This method is a utility method that prints the started message of the task.
* </p>
* <p>
* It constructs the message by passing the details into the {@link #buildStartedMessage(TaskDetails)} and printing
* it through the logger.
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
*/
private void printStartedMessage(TaskDetails taskDetails) {
logger.info(buildStartedMessage(taskDetails));
}
/**
* <p>
* This method is a utility method that prints the finished message of the task.
* </p>
* <p>
* It constructs the message by passing the details into the {@link #buildFinishedMessage(TaskDetails)} and printing
* it through the logger.
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
*/
private void printFinishedMessage(TaskDetails taskDetails) {
logger.info(buildFinishedMessage(taskDetails));
}
/**
* <p>
* This method is a utility method that prints the stop message of the task.
* </p>
* <p>
* It constructs the message by passing the details into the {@link #buildStopMessage(TaskDetails)} and printing
* it through the logger.
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
*/
private void printStopMessage(TaskDetails taskDetails) {
logger.info(buildStopMessage(taskDetails));
}
/**
* <p>
* This method is a utility method that prints the stopped message of the task.
* </p>
* <p>
* It constructs the message by passing the details into the {@link #buildStoppedMessage(TaskDetails, Throwable)}
* and printing it through the logger. If an error occurred we use the error channel and append the error to
* get a stack trace of what happened.
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
* @param error the exception that caused the task to be stopped
*/
private void printStoppedMessage(TaskDetails taskDetails, Throwable error) {
String stoppedMessage = buildStoppedMessage(taskDetails, error);
if (error != null) {
logger.error(stoppedMessage, error);
} else {
logger.info(stoppedMessage);
}
}
/**
* <p>
* This method is a utility method that generates the thread name that is used in the log messages.
* </p>
* <p>
* It simply returns the thread name (if one is set) or generates a name if this
* {@link DedicatedScheduledExecutorService} is "unnamed".
* </p>
*
* @return the thread name that is used in the log messages
*/
private String getPrintableThreadName(TaskDetails taskDetails) {
return threadName != null
? threadName
: "UNNAMED THREAD (started by \"" + taskDetails.getThreadName() + "\")";
}
/**
* This method creates the schedule message of the task by first building the temporal parts of the message and
* then appending them to the actual message.
*
* @param taskDetails metadata holding the relevant information of the task
* @return the schedule message that can be used with the logger
*/
private String buildScheduledMessage(TaskDetails taskDetails) {
String printableThreadName = getPrintableThreadName(taskDetails);
String timeoutMessageFragment = buildDelayMessageFragment(taskDetails);
String intervalMessageFragment = buildIntervalMessageFragment(taskDetails);
String timeMessageFragment = "";
if (timeoutMessageFragment != null) {
timeMessageFragment += " (" + timeoutMessageFragment + (intervalMessageFragment != null ? " / " : ")");
}
if (intervalMessageFragment != null) {
timeMessageFragment += (timeoutMessageFragment == null ? " (" : "") + intervalMessageFragment + ")";
}
return "Starting [" + printableThreadName + "]" + timeMessageFragment + " ...";
}
/**
* This method creates the started message of the task by simply extracting the relevant information from the
* {@code taskDetails} and concatenating them to the actual message.
*
* @param taskDetails metadata holding the relevant information of the task
* @return the started message that can be used with the logger
*/
private String buildStartedMessage(TaskDetails taskDetails) {
String printableThreadName = getPrintableThreadName(taskDetails);
return "[" + printableThreadName + "] Started (execution #" + (taskDetails.getExecutionCount().get() + 1) +
") ...";
}
/**
* This method creates the finished message of the task by simply extracting the relevant information from the
* {@code taskDetails} and concatenating them to the actual message.
*
* @param taskDetails metadata holding the relevant information of the task
* @return the finished message that can be used with the logger
*/
private String buildFinishedMessage(TaskDetails taskDetails) {
String printableThreadName = getPrintableThreadName(taskDetails);
return "[" + printableThreadName + "] Finished (execution #" + (taskDetails.getExecutionCount().get()) +
") ...";
}
/**
* This method creates the stop message of the task by simply extracting the relevant information from the
* {@code taskDetails} and concatenating them to the actual message.
*
* @param taskDetails metadata holding the relevant information of the task
* @return the stop message that can be used with the logger
*/
private String buildStopMessage(TaskDetails taskDetails) {
String printableThreadName = getPrintableThreadName(taskDetails);
return taskDetails.getExecutionCount().get() > 0 ? "Stopping [" + printableThreadName + "] ..."
: "Cancelling Start [" + printableThreadName + "] ...";
}
/**
* <p>
* This method creates the stopped message of the task by simply extracting the relevant information from the
* {@code taskDetails} and concatenating them to the actual message.
* </p>
* <p>
* We differentiate between different termination ways by giving different reasons in the message.
* </p>
*
* @param taskDetails metadata holding the relevant information of the task
* @return the stop message that can be used with the logger
*/
private String buildStoppedMessage(TaskDetails taskDetails, Throwable error) {
String printableThreadName = getPrintableThreadName(taskDetails);
if (error != null) {
return "[" + printableThreadName + "] Crashed (after #" + taskDetails.getExecutionCount().get() +
" executions) ...";
} else if (taskDetails.getExecutionCount().get() > 0) {
return "[" + printableThreadName + "] Stopped (after #" + taskDetails.getExecutionCount().get() +
" executions) ...";
} else {
return "[" + printableThreadName + "] Cancelled Start ...";
}
}
/**
* This method is a utility method that builds the message fragment which expresses the delay of the scheduled
* task.
*
* @param taskDetails metadata holding the relevant information of the task
* @return the message fragment which expresses the delay of the scheduled task
*/
private String buildDelayMessageFragment(TaskDetails taskDetails) {
if (taskDetails.getDelay() == null) {
return null;
} else {
return "starts in " + taskDetails.getDelay() + buildUnitAbbreviation(taskDetails.getTimeUnit());
}
}
/**
* This method is a utility method that builds the message fragment which expresses the interval of the scheduled
* task.
*
* @param taskDetails metadata holding the relevant information of the task
* @return the message fragment which expresses the interval of the scheduled task
*/
private String buildIntervalMessageFragment(TaskDetails taskDetails) {
if (taskDetails.getInterval() == null) {
return null;
} else {
return "runs every " + taskDetails.getInterval() + buildUnitAbbreviation(taskDetails.getTimeUnit());
}
}
/**
* This method is a utility method that creates a human readable abbreviation of the provided
* {@link TimeUnit}.
*
* @param unit the time unit used for the values in the {@link TaskDetails}
* @return a human readable abbreviation of the provided {@link TimeUnit}
*/
private String buildUnitAbbreviation(TimeUnit unit) {
switch (unit) {
case NANOSECONDS: return "ns";
case MICROSECONDS: return "µs";
case MILLISECONDS: return "ms";
case SECONDS: return "s";
case MINUTES: return "min";
case HOURS: return "hours";
case DAYS: return "days";
default: return "";
}
}
//endregion ////////////////////////////////////////////////////////////////////////////////////////////////////////
}
| 26,211 | 41.345719 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/thread/ReportingExecutorService.java
|
package com.iota.iri.utils.thread;
/**
* <p>
* This interface defines a contract for {@link java.util.concurrent.ExecutorService}s that makes them call a
* "reporting" method whenever an important event occurs.
* </p>
* <p>
* This way a child class extending these kind of {@link java.util.concurrent.ExecutorService}s can "hook" into these
* events by overriding the specific method and add additional logic like logging or debugging capabilities.
* </p>
*/
public interface ReportingExecutorService {
/**
* <p>
* This method gets called whenever a new task is scheduled to run.
* </p>
* <p>
* In contrast to {@link #onStartTask(TaskDetails)} this method gets called only once for "recurring" tasks that are
* scheduled to run in pre-defined intervals.
* </p>
*
* @param taskDetails object containing details about this task
*/
void onScheduleTask(TaskDetails taskDetails);
/**
* <p>
* This method gets called whenever a task is started.
* </p>
* <p>
* For recurring tasks it is called multiple times.
* </p>
*
* @param taskDetails object containing details about this task
*/
void onStartTask(TaskDetails taskDetails);
/**
* <p>
* This method gets called whenever a task is finished.
* </p>
* <p>
* For recurring tasks it is called multiple times.
* </p>
*
* @param taskDetails object containing details about this task
* @param error {@link Exception} that caused the task to complete
*/
void onFinishTask(TaskDetails taskDetails, Throwable error);
/**
* <p>
* This method gets called whenever a task is cancelled through its {@link java.util.concurrent.Future} or through
* the shutdown methods of the {@link java.util.concurrent.ExecutorService}.
* </p>
* <p>
* It only gets called once for every task.
* </p>
*
* @param taskDetails object containing details about this task
*/
void onCancelTask(TaskDetails taskDetails);
/**
* <p>
* This method gets called whenever a task completes.
* </p>
* <p>
* This can be through either raising an exception, cancelling from the outside or by simply terminating in a normal
* manner. For recurring tasks this only gets called once.
* </p>
*
* @param taskDetails object containing details about this task
* @param error {@link Exception} that caused the task to complete
*/
void onCompleteTask(TaskDetails taskDetails, Throwable error);
}
| 2,587 | 31.759494 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/thread/SilentScheduledExecutorService.java
|
package com.iota.iri.utils.thread;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.*;
/**
* <p>
* This interface extends the {@link ScheduledExecutorService} by providing additional methods to enqueue tasks without
* throwing a {@link RejectedExecutionException} exception.
* </p>
* <p>
* This can be useful when preventing additional tasks to run is no error but an intended design decision in the
* implementing class. In these cases raising an exception and catching it would cause too much unnecessary overhead
* (using {@link Exception}s for control flow is an anti pattern).
* </p>
*/
public interface SilentScheduledExecutorService extends ScheduledExecutorService {
/**
* Does the same as {@link ScheduledExecutorService#schedule(Runnable, long, TimeUnit)} but returns {@code null}
* instead of throwing a {@link RejectedExecutionException} if the task cannot be scheduled for execution.
*
* @param command the task to execute
* @param delay the time from now to delay execution
* @param unit the time unit of the delay parameter
* @return a ScheduledFuture representing pending completion of the task and whose {@code get()} method will return
* {@code null} upon completion / {@code null} if the task cannot be scheduled for execution
* @throws NullPointerException if command is null
*/
ScheduledFuture<?> silentSchedule(Runnable command, long delay, TimeUnit unit);
/**
* Does the same as {@link ScheduledExecutorService#schedule(Callable, long, TimeUnit)} but returns {@code null}
* instead of throwing a {@link java.util.concurrent.RejectedExecutionException} if the task cannot be scheduled for
* execution.
*
* @param callable the function to execute
* @param delay the time from now to delay execution
* @param unit the time unit of the delay parameter
* @param <V> the type of the callable's result
* @return a ScheduledFuture that can be used to extract result or cancel / {@code null} if the task cannot be
* scheduled for execution
* @throws NullPointerException if callable is null
*/
<V> ScheduledFuture<V> silentSchedule(Callable<V> callable, long delay, TimeUnit unit);
/**
* Does the same as {@link ScheduledExecutorService#scheduleAtFixedRate(Runnable, long, long, TimeUnit)} but returns
* {@code null} instead of throwing a {@link RejectedExecutionException} if the task cannot be scheduled for
* execution.
*
* @param command the task to execute
* @param initialDelay the time to delay first execution
* @param period the period between successive executions
* @param unit the time unit of the initialDelay and period parameters
* @return a ScheduledFuture representing pending completion of the task, and whose {@code get()} method will throw
* an exception upon cancellation / {@code null} if the task cannot be scheduled for execution
* @throws NullPointerException if command is null
* @throws IllegalArgumentException if period less than or equal to zero
*/
ScheduledFuture<?> silentScheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit);
/**
* Does the same as
* {@link ScheduledExecutorService#scheduleWithFixedDelay(Runnable, long, long, TimeUnit)} but returns {@code null}
* instead of throwing a {@link RejectedExecutionException} if the task cannot be scheduled for execution.
*
* @param command the task to execute
* @param initialDelay the time to delay first execution
* @param delay the delay between the termination of one execution and the commencement of the next
* @param unit the time unit of the initialDelay and delay parameters
* @return a ScheduledFuture representing pending completion of the task, and whose {@code get()} method will throw
* an exception upon cancellation / {@code null} if the task cannot be scheduled for execution
* @throws NullPointerException if command is null
* @throws IllegalArgumentException if delay less than or equal to zero
*/
ScheduledFuture<?> silentScheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit);
/**
* Does the same as {@link ScheduledExecutorService#submit(Callable)} but returns {@code null} instead of throwing a
* {@link RejectedExecutionException} if the task cannot be scheduled for execution.
*
* @param task the task to submit
* @param <T> the type of the task's result
* @return a Future representing pending completion of the task / {@code null} if the task cannot be scheduled for execution
* @throws NullPointerException if the task is null
*/
<T> Future<T> silentSubmit(Callable<T> task);
/**
* Does the same as {@link ScheduledExecutorService#submit(Runnable)} but returns {@code null} instead of throwing a
* {@link RejectedExecutionException} if the task cannot be scheduled for execution.
*
* @param task the task to submit
* @return a Future representing pending completion of the task / {@code null} if the task cannot be scheduled for
* execution
* @throws NullPointerException if the task is null
*/
Future<?> silentSubmit(Runnable task);
/**
* Does the same as {@link ScheduledExecutorService#submit(Runnable, Object)} but returns {@code null} instead of
* throwing a {@link RejectedExecutionException} if the task cannot be scheduled for execution.
*
* @param task the task to submit
* @param result the result to return
* @param <T> the type of the result
* @return a Future representing pending completion of the task / {@code null} if the task cannot be scheduled for
* execution
* @throws NullPointerException if the task is null
*/
<T> Future<T> silentSubmit(Runnable task, T result);
/**
* Does the same as {@link ScheduledExecutorService#invokeAll(Collection)} but returns {@code null} instead of
* throwing a {@link RejectedExecutionException} if any task cannot be scheduled for execution.
*
* @param tasks the collection of tasks
* @param <T> the type of the values returned from the tasks
* @return a list of Futures representing the tasks, in the same sequential order as produced by the iterator for
* the given task list, each of which has completed / {@code null} if any task cannot be scheduled for
* execution
* @throws InterruptedException if interrupted while waiting, in which case unfinished tasks are cancelled
* @throws NullPointerException if tasks or any of its elements are {@code null}
*/
<T> List<Future<T>> silentInvokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException;
/**
* Does the same as {@link ScheduledExecutorService#invokeAll(Collection, long, TimeUnit)} but returns {@code null}
* instead of throwing a {@link RejectedExecutionException} if any task cannot be scheduled for execution.
*
* @param tasks the collection of tasks
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @param <T> the type of the values returned from the tasks
* @return a list of Futures representing the tasks, in the same sequential order as produced by the iterator for
* the given task list. If the operation did not time out, each task will have completed. If it did time
* out, some of these tasks will not have completed. It returns {@code null} if any task cannot be scheduled
* for execution.
* @throws InterruptedException if interrupted while waiting, in which case unfinished tasks are cancelled
* @throws NullPointerException if tasks, any of its elements, or unit are {@code null}
*/
<T> List<Future<T>> silentInvokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)
throws InterruptedException;
/**
* Does the same as {@link ScheduledExecutorService#invokeAny(Collection)} but returns {@code null} instead of
* throwing a {@link RejectedExecutionException} if tasks cannot be scheduled for execution.
*
* @param tasks the collection of tasks
* @param <T> the type of the values returned from the tasks
* @return the result returned by one of the tasks or {@code null} if tasks cannot be scheduled for execution
* @throws InterruptedException if interrupted while waiting
* @throws NullPointerException if tasks or any element task subject to execution is {@code null}
* @throws IllegalArgumentException if tasks is empty
* @throws ExecutionException if no task successfully completes
*/
<T> T silentInvokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException;
/**
* Does the same as {@link ScheduledExecutorService#invokeAny(Collection, long, TimeUnit)} but returns {@code null}
* instead of throwing a {@link RejectedExecutionException} if tasks cannot be scheduled for execution.
*
* @param tasks the collection of tasks
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @param <T> the type of the values returned from the tasks
* @return the result returned by one of the tasks or {@code null} if tasks cannot be scheduled for execution
* @throws InterruptedException if interrupted while waiting
* @throws NullPointerException if tasks, or unit, or any element task subject to execution is {@code null}
* @throws TimeoutException if the given timeout elapses before any task successfully completes
* @throws ExecutionException if no task successfully completes
*/
<T> T silentInvokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException;
/**
* Does the same as {@link ScheduledExecutorService#execute(Runnable)} but doesn't throw a
* {@link RejectedExecutionException} if this task cannot be accepted for execution.
*
* @param command the runnable task
* @throws NullPointerException if command is null
*/
void silentExecute(Runnable command);
}
| 10,419 | 55.324324 | 162 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/thread/TaskDetails.java
|
package com.iota.iri.utils.thread;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* <p>
* This class represents a container for the metadata of a task that was scheduled through an
* {@link java.util.concurrent.ExecutorService} that implements the {@link ReportingExecutorService} interface.
* </p>
* <p>
* It can for example be used to show detailed log messages or even implement more sophisticated features like the
* {@link BoundedScheduledExecutorService}.
* </p>
*/
public class TaskDetails {
/**
* Holds the name of the {@link Thread} that created the task.
*/
private final String threadName;
/**
* Holds a thread-safe flag that indicates if the task is currently scheduled for execution (false if running or
* done already).
*/
private final AtomicBoolean scheduledForExecution;
/**
* Holds a thread-safe counter for the amount of times this task was executed.
*/
private final AtomicInteger executionCount;
/**
* Holds the initial delay that was provided when scheduling the task (or {@code null} if the task was set to run
* immediately).
*/
private Long delay = null;
/**
* Holds the interval in which the task is repeated (or {@code null} for non-recurring tasks).
*/
private Long interval = null;
/**
* Holds the timeout that a task can take to terminate before it gets interrupted (or {@code null} if none was
* provided).
*/
private Long timeout = null;
/**
* Holds the time unit that the other values are denominated in (or {@code null} if no time based values are
* provided).
*/
private TimeUnit timeUnit = null;
/**
* <p>
* Creates a container for the metadata of a task that was scheduled through an
* {@link java.util.concurrent.ExecutorService}.
* </p>
* <p>
* It is automatically initiated with the calling {@link Thread} name, the {@link #scheduledForExecution} flag being
* set to {@code true} and the {@link #executionCount} being set to 0.
* </p>
*/
public TaskDetails() {
this.threadName = Thread.currentThread().getName();
this.scheduledForExecution = new AtomicBoolean(true);
this.executionCount = new AtomicInteger(0);
}
/**
* Getter for the internal {@link #threadName} property.
*
* @return name of the {@link Thread} that scheduled the task.
*/
public String getThreadName() {
return threadName;
}
/**
* Getter for the internal {@link #scheduledForExecution} property.
* <p>
* Note: There is no setter for this property because it returns a mutable object that is not supposed be
* overwritten.
* </p>
*
* @return a thread-safe flag that indicates if the task is currently scheduled for execution
*/
public AtomicBoolean getScheduledForExecution() {
return scheduledForExecution;
}
/**
* Getter for the internal {@link #executionCount} property.
* <p>
* Note: There is no setter for this property because it returns a mutable object that is not supposed be
* overwritten.
* </p>
*
* @return a thread-safe counter for the amount of times this task was executed
*/
public AtomicInteger getExecutionCount() {
return executionCount;
}
/**
* Setter for the internal {@link #delay} property.
*
* @param delay the initial delay that was provided when scheduling the task
* @return the instance of TaskDetails itself to allow the chaining of calls
*/
public TaskDetails setDelay(Long delay) {
this.delay = delay;
return this;
}
/**
* Getter for the internal {@link #delay} property.
*
* @return the initial delay that was provided when scheduling the task (or {@code null} if the task was set to run
* immediately)
*/
public Long getDelay() {
return delay;
}
/**
* Setter for the internal {@link #interval} property.
*
* @param interval the interval in which the task is repeated (or {@code null} for non-recurring tasks)
* @return the instance of TaskDetails itself to allow the chaining of calls
*/
public TaskDetails setInterval(Long interval) {
this.interval = interval;
return this;
}
/**
* Getter for the internal {@link #interval} property.
*
* @return the interval in which the task is repeated (or {@code null} for non-recurring tasks)
*/
public Long getInterval() {
return interval;
}
/**
* Setter for the internal {@link #timeout} property.
*
* @param timeout the timeout that a task can take to terminate before it gets interrupted (or {@code null} if none
* was provided)
* @return the instance of TaskDetails itself to allow the chaining of calls
*/
public TaskDetails setTimeout(Long timeout) {
this.timeout = timeout;
return this;
}
/**
* Getter for the internal {@link #timeout} property.
*
* @return the timeout that a task can take to terminate before it gets interrupted (or {@code null} if none was
* provided)
*/
public Long getTimeout() {
return timeout;
}
/**
* Setter for the internal {@link #timeUnit} property.
*
* @param timeUnit the time unit that the other values are denominated in (or {@code null} if no time based values
* are provided)
* @return the instance of TaskDetails itself to allow the chaining of calls
*/
public TaskDetails setTimeUnit(TimeUnit timeUnit) {
this.timeUnit = timeUnit;
return this;
}
/**
* Getter for the internal {@link #timeUnit} property.
*
* @return the time unit that the other values are denominated in (or {@code null} if no time based values are
* provided)
*/
public TimeUnit getTimeUnit() {
return timeUnit;
}
}
| 6,174 | 30.345178 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/thread/ThreadIdentifier.java
|
package com.iota.iri.utils.thread;
/**
* The instances of this class are used by the {@link ThreadUtils} to map the {@link Thread}s and make the corresponding
* spawn and stop methods thread safe.
*/
public class ThreadIdentifier {
/**
* Holds the name of the {@link Thread}.
*/
private final String name;
/**
* The constructor simply stores the passed in name in the private property of this instance.
*
* While the name is not required to identify the {@link Thread}, we still require to give one to be able to create
* meaningful log messages.
*
* @param name name of the {@link Thread} that get referenced by this identifier.
*/
public ThreadIdentifier(String name) {
this.name = name;
}
/**
* Getter of the name that was used to create this identifier.
*
* It simply returns the internal property.
*
* @return name of the {@link Thread} that this identifier references
*/
public String getName() {
return name;
}
}
| 1,049 | 28.166667 | 120 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/utils/thread/ThreadUtils.java
|
package com.iota.iri.utils.thread;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
/**
* This class contains a collection of methods that make the management of {@link Thread}s a little bit more convenient.
*
* Apart from offering method to spawn and terminate {@link Thread}s, it also offers methods to {@link #sleep(int)}
* while correctly maintaining the {@link Thread#isInterrupted()} flags.
*/
public class ThreadUtils {
/**
* Logger for this class allowing us to dump debug and status messages.
*/
private static final Logger logger = LoggerFactory.getLogger(ThreadUtils.class);
/**
* Holds a map of {@link Thread}s that are managed by the {@link ThreadUtils}.
*
* This map is used to offer a thread safe way to deal with spawning and stopping {@link Thread}s.
*
* @see #spawnThread(Runnable, ThreadIdentifier)
* @see #stopThread(ThreadIdentifier)
*/
private static final Map<Object, Thread> threads = new HashMap<>();
/**
* This method spawns a new {@link Thread} for the given {@link ThreadIdentifier}.
*
* This method is thread safe and makes sure there is exactly one "non-interrupted" {@link Thread} associated to the
* identifier.
*
* It can therefore be used as a convenient way to offer a {@code start()} method in the corresponding manager
* instance, without having to worry about starting multiple {@link Thread}s when calling the {@code start()} method
* multiple times.
*
* @param runnable method reference that is responsible for processing the thread (i.e. {@code this::myThreadMethod})
* @param threadIdentifier identifier object that is used to associate the {@link Thread} and synchronize the access
* @return the thread that got spawned by this method
*/
public static Thread spawnThread(Runnable runnable, ThreadIdentifier threadIdentifier) {
if (threads.get(threadIdentifier) == null || threads.get(threadIdentifier).isInterrupted()) {
synchronized(threadIdentifier) {
if (threads.get(threadIdentifier) == null || threads.get(threadIdentifier).isInterrupted()) {
threads.put(threadIdentifier, spawnThread(runnable, threadIdentifier.getName()));
}
}
}
return threads.get(threadIdentifier);
}
/**
* This method spawns a new {@link Thread} with the given name.
*
* This method does not actively manage the {@link Thread}s and calling it multiple times will create multiple
* {@link Thread}s. It is internally used by {@link #spawnThread(Runnable, ThreadIdentifier)}.
*
* In addition to creating the {@link Thread}, it also dumps some log messages to inform the user about the creation
* and the lifecycle of the {@link Thread}s.
*
* @param runnable method reference that is responsible for processing the thread (i.e. {@code this::threadMethod})
* @param threadName the name of the {@link Thread} that shall be started started
* @return the thread that got spawned by this method
*/
public static Thread spawnThread(Runnable runnable, String threadName) {
logger.info("Starting Thread: " + threadName + " ...");
Thread thread = new Thread(() -> {
logger.info(threadName + " [STARTED]");
runnable.run();
logger.info(threadName + " [STOPPED]");
}, threadName);
thread.start();
return thread;
}
/**
* This method stops the {@link Thread} that is associated with the given {@link ThreadIdentifier}.
*
* This method is thread safe and only calls the {@link Thread#interrupt()} method once per running {@link Thread}.
* If no running {@link Thread} is associated with the given {@link ThreadIdentifier}, it will have no affect.
*
* It can therefore be used as a convenient way to offer a {@code shutdown()} method in the corresponding manager
* instance, without having to worry about starting multiple {@link Thread}s when calling the {@code shutdown()}
* method multiple times.
*
* @param threadIdentifier the identifier of the thread that we want to stop
* @return the {@link Thread} that will be terminated or null if there was no {@link Thread} associated to the given
* identifier
*/
public static Thread stopThread(ThreadIdentifier threadIdentifier) {
if (threads.get(threadIdentifier) != null && !threads.get(threadIdentifier).isInterrupted()) {
synchronized(threadIdentifier) {
if (threads.get(threadIdentifier) != null && !threads.get(threadIdentifier).isInterrupted()) {
logger.info("Stopping Thread: " + threadIdentifier.getName() + " ...");
threads.get(threadIdentifier).interrupt();
}
}
}
return threads.get(threadIdentifier);
}
/**
* This method is a wrapper for the {@link Thread#sleep(long)} method, that correctly handles
* {@link InterruptedException}s and the {@link Thread#interrupted()} flag of the calling {@link Thread}.
*
* It first checks if the {@link Thread} was interrupted already and otherwise issues a {@link Thread#sleep(long)}.
* If a {@link InterruptedException} is caught, it resets the interrupted flag and returns the corresponding result.
*
* See <a href="https://dzone.com/articles/how-to-handle-the-interruptedexception">Handle InterruptedException</a>
*
* @param timeoutInMS milliseconds that the current Thread should wait
* @return true if the sleep was successful and false if it got interrupted
*/
public static boolean sleep(int timeoutInMS) {
if(Thread.currentThread().isInterrupted()) {
return false;
}
try {
Thread.sleep(timeoutInMS);
return true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false;
}
}
}
| 6,145 | 42.588652 | 121 |
java
|
iri
|
iri-master/src/main/java/com/iota/iri/zmq/MessageQ.java
|
package com.iota.iri.zmq;
import com.iota.iri.conf.ZMQConfig;
import com.iota.iri.utils.IotaIOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.zeromq.ZMQ;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
/**
* Allows publishing of IRI events to a ZeroMQ queue.
*
* <p>
* In order to be able to watch IRI events, and possibly build tools on top of them, this class
* allows the code to publish operational facts to a ZMQ stream.
* </p>
* <p>
* IRI events are published to this queue prepending the {@code topic} of the event to the body
* of the {@code message}. The topic describes the type or the source of the event and is represented by
* a short lowercase string.
* Some example topics:
* <ol>
* <li><code>tx</code>: transactions events</li>
* <li><code>lm</code>: milestones events</li>
* <li><code>dnscu</code>: neighbors' DNS update events</li>
* </ol>
* To monitor for activity on a specific address, the topic is instead the {@code Address} to watch.
* For a complete list and detailed topic specification please refer to the README.md.
* </p>
*/
class MessageQ {
private final static Logger LOG = LoggerFactory.getLogger(MessageQ.class);
private final ZMQ.Context context;
private final ZMQ.Socket publisher;
private final ExecutorService publisherService = Executors.newSingleThreadExecutor();
public static MessageQ createWith(ZMQConfig config) {
return new MessageQ(config);
}
/**
* Creates and starts a ZMQ publisher.
*
* @param config {@link ZMQConfig} that should be used.
*/
private MessageQ(ZMQConfig config) {
context = ZMQ.context(config.getZmqThreads());
publisher = context.socket(ZMQ.PUB);
if (config.isZmqEnableTcp()) {
publisher.bind(String.format("tcp://*:%d", config.getZmqPort()));
}
if (config.isZmqEnableIpc()) {
publisher.bind(config.getZmqIpc());
}
}
/**
* Publishes an event to the queue
*
* @param message message body, prepended by the topic string
* @param objects arguments referenced by the message body, similar to a format string
*/
public void publish(String message, Object... objects) {
String toSend = String.format(message, objects);
publisherService.submit(() -> publisher.send(toSend));
}
/**
* Gracefully shuts down the ZMQ publisher, forcing after 5 seconds.
*/
public void shutdown() {
publisherService.shutdown();
try {
publisherService.awaitTermination(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
LOG.error("Publisher service shutdown failed.", e);
}
IotaIOUtils.closeQuietly(publisher);
IotaIOUtils.closeQuietly(context);
}
}
| 2,956 | 32.224719 | 107 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.