repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/ConcurrentHistogram.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.nio.ByteBuffer;
import java.util.concurrent.atomic.AtomicLongArray;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
import java.util.zip.DataFormatException;
/**
* <h3>An integer values High Dynamic Range (HDR) Histogram that supports safe concurrent recording operations.</h3>
* A ConcurrentHistogram guarantees lossless recording of values into the histogram even when the
* histogram is updated by multiple threads, and supports auto-resize and shift operations that may
* result from or occur concurrently with other recording operations.
* <p>
* It is important to note that concurrent recording, auto-sizing, and value shifting are the only thread-safe
* behaviors provided by {@link ConcurrentHistogram}, and that it is not otherwise synchronized. Specifically, {@link
* ConcurrentHistogram} provides no implicit synchronization that would prevent the contents of the histogram
* from changing during queries, iterations, copies, or addition operations on the histogram. Callers wishing to make
* potentially concurrent, multi-threaded updates that would safely work in the presence of queries, copies, or
* additions of histogram objects should either take care to externally synchronize and/or order their access,
* use the {@link SynchronizedHistogram} variant, or (recommended) use {@link Recorder} or
* {@link SingleWriterRecorder} which are intended for this purpose.
* <p>
* Auto-resizing: When constructed with no specified value range range (or when auto-resize is turned on with {@link
* Histogram#setAutoResize}) a {@link Histogram} will auto-resize its dynamic range to include recorded values as
* they are encountered. Note that recording calls that cause auto-resizing may take longer to execute, as resizing
* incurs allocation and copying of internal data structures.
* <p>
* See package description for {@link org.HdrHistogram} for details.
*/
@SuppressWarnings("unused")
public class ConcurrentHistogram extends Histogram {
static final AtomicLongFieldUpdater<ConcurrentHistogram> totalCountUpdater =
AtomicLongFieldUpdater.newUpdater(ConcurrentHistogram.class, "totalCount");
volatile long totalCount;
volatile ConcurrentArrayWithNormalizingOffset activeCounts;
volatile ConcurrentArrayWithNormalizingOffset inactiveCounts;
transient WriterReaderPhaser wrp = new WriterReaderPhaser();
@Override
void setIntegerToDoubleValueConversionRatio(final double integerToDoubleValueConversionRatio) {
try {
wrp.readerLock();
inactiveCounts.setDoubleToIntegerValueConversionRatio(1.0 / integerToDoubleValueConversionRatio);
// switch active and inactive:
ConcurrentArrayWithNormalizingOffset tmp = activeCounts;
activeCounts = inactiveCounts;
inactiveCounts = tmp;
wrp.flipPhase();
inactiveCounts.setDoubleToIntegerValueConversionRatio(1.0 / integerToDoubleValueConversionRatio);
// switch active and inactive again:
tmp = activeCounts;
activeCounts = inactiveCounts;
inactiveCounts = tmp;
wrp.flipPhase();
// At this point, both active and inactive have normalizingIndexOffset safely set,
// and the switch in each was done without any writers using the wrong value in flight.
} finally {
wrp.readerUnlock();
}
super.setIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio);
}
@Override
long getCountAtIndex(final int index) {
try {
wrp.readerLock();
assert (countsArrayLength == activeCounts.length());
assert (countsArrayLength == inactiveCounts.length());
long activeCount = activeCounts.get(
normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()));
long inactiveCount = inactiveCounts.get(
normalizeIndex(index, inactiveCounts.getNormalizingIndexOffset(), inactiveCounts.length()));
return activeCount + inactiveCount;
} finally {
wrp.readerUnlock();
}
}
@Override
long getCountAtNormalizedIndex(final int index) {
try {
wrp.readerLock();
assert (countsArrayLength == activeCounts.length());
assert (countsArrayLength == inactiveCounts.length());
long activeCount = activeCounts.get(index);
long inactiveCount = inactiveCounts.get(index);
return activeCount + inactiveCount;
} finally {
wrp.readerUnlock();
}
}
@Override
void incrementCountAtIndex(final int index) {
long criticalValue = wrp.writerCriticalSectionEnter();
try {
activeCounts.atomicIncrement(
normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()));
} finally {
wrp.writerCriticalSectionExit(criticalValue);
}
}
@Override
void addToCountAtIndex(final int index, final long value) {
long criticalValue = wrp.writerCriticalSectionEnter();
try {
activeCounts.atomicAdd(
normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()), value);
} finally {
wrp.writerCriticalSectionExit(criticalValue);
}
}
@Override
void setCountAtIndex(final int index, final long value) {
try {
wrp.readerLock();
assert (countsArrayLength == activeCounts.length());
assert (countsArrayLength == inactiveCounts.length());
activeCounts.lazySet(
normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()), value);
inactiveCounts.lazySet(
normalizeIndex(index, inactiveCounts.getNormalizingIndexOffset(),
inactiveCounts.length()), 0);
} finally {
wrp.readerUnlock();
}
}
@Override
void setCountAtNormalizedIndex(final int index, final long value) {
try {
wrp.readerLock();
assert (countsArrayLength == activeCounts.length());
assert (countsArrayLength == inactiveCounts.length());
inactiveCounts.lazySet(index, value);
activeCounts.lazySet(index, 0);
} finally {
wrp.readerUnlock();
}
}
@Override
void recordConvertedDoubleValue(final double value) {
long criticalValue = wrp.writerCriticalSectionEnter();
try {
long integerValue = (long) (value * activeCounts.getDoubleToIntegerValueConversionRatio());
int index = countsArrayIndex(integerValue);
activeCounts.atomicIncrement(
normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()));
updateMinAndMax(integerValue);
incrementTotalCount();
} finally {
wrp.writerCriticalSectionExit(criticalValue);
}
}
@Override
public void recordConvertedDoubleValueWithCount(final double value, final long count)
throws ArrayIndexOutOfBoundsException {
long criticalValue = wrp.writerCriticalSectionEnter();
try {
long integerValue = (long) (value * activeCounts.getDoubleToIntegerValueConversionRatio());
int index = countsArrayIndex(integerValue);
activeCounts.atomicAdd(
normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()), count);
updateMinAndMax(integerValue);
addToTotalCount(count);
} finally {
wrp.writerCriticalSectionExit(criticalValue);
}
}
@Override
int getNormalizingIndexOffset() {
return activeCounts.getNormalizingIndexOffset();
}
@Override
void setNormalizingIndexOffset(final int normalizingIndexOffset) {
setNormalizingIndexOffset(normalizingIndexOffset, 0,
false, getIntegerToDoubleValueConversionRatio());
}
private void setNormalizingIndexOffset(
final int newNormalizingIndexOffset,
final int shiftedAmount,
final boolean lowestHalfBucketPopulated,
final double newIntegerToDoubleValueConversionRatio) {
try {
wrp.readerLock();
assert (countsArrayLength == activeCounts.length());
assert (countsArrayLength == inactiveCounts.length());
assert (activeCounts.getNormalizingIndexOffset() == inactiveCounts.getNormalizingIndexOffset());
if (newNormalizingIndexOffset == activeCounts.getNormalizingIndexOffset()) {
return; // Nothing to do.
}
setNormalizingIndexOffsetForInactive(newNormalizingIndexOffset, shiftedAmount,
lowestHalfBucketPopulated, newIntegerToDoubleValueConversionRatio);
// switch active and inactive:
ConcurrentArrayWithNormalizingOffset tmp = activeCounts;
activeCounts = inactiveCounts;
inactiveCounts = tmp;
wrp.flipPhase();
setNormalizingIndexOffsetForInactive(newNormalizingIndexOffset, shiftedAmount,
lowestHalfBucketPopulated, newIntegerToDoubleValueConversionRatio);
// switch active and inactive again:
tmp = activeCounts;
activeCounts = inactiveCounts;
inactiveCounts = tmp;
wrp.flipPhase();
// At this point, both active and inactive have normalizingIndexOffset safely set,
// and the switch in each was done without any writers using the wrong value in flight.
} finally {
wrp.readerUnlock();
}
}
private void setNormalizingIndexOffsetForInactive(final int newNormalizingIndexOffset,
final int shiftedAmount,
final boolean lowestHalfBucketPopulated,
final double newIntegerToDoubleValueConversionRatio) {
int zeroIndex;
long inactiveZeroValueCount;
// Save and clear the inactive 0 value count:
zeroIndex = normalizeIndex(0, inactiveCounts.getNormalizingIndexOffset(),
inactiveCounts.length());
inactiveZeroValueCount = inactiveCounts.get(zeroIndex);
inactiveCounts.lazySet(zeroIndex, 0);
// Change the normalizingIndexOffset on the current inactiveCounts:
inactiveCounts.setNormalizingIndexOffset(newNormalizingIndexOffset);
// Handle the inactive lowest half bucket:
if ((shiftedAmount > 0) && lowestHalfBucketPopulated) {
shiftLowestInactiveHalfBucketContentsLeft(shiftedAmount, zeroIndex);
}
// Restore the inactive 0 value count:
zeroIndex = normalizeIndex(0, inactiveCounts.getNormalizingIndexOffset(), inactiveCounts.length());
inactiveCounts.lazySet(zeroIndex, inactiveZeroValueCount);
inactiveCounts.setDoubleToIntegerValueConversionRatio(1.0 / newIntegerToDoubleValueConversionRatio);
}
private void shiftLowestInactiveHalfBucketContentsLeft(final int shiftAmount, final int preShiftZeroIndex) {
final int numberOfBinaryOrdersOfMagnitude = shiftAmount >> subBucketHalfCountMagnitude;
// The lowest inactive half-bucket (not including the 0 value) is special: unlike all other half
// buckets, the lowest half bucket values cannot be scaled by simply changing the
// normalizing offset. Instead, they must be individually re-recorded at the new
// scale, and cleared from the current one.
//
// We know that all half buckets "below" the current lowest one are full of 0s, because
// we would have overflowed otherwise. So we need to shift the values in the current
// lowest half bucket into that range (including the current lowest half bucket itself).
// Iterating up from the lowermost non-zero "from slot" and copying values to the newly
// scaled "to slot" (and then zeroing the "from slot"), will work in a single pass,
// because the scale "to slot" index will always be a lower index than its or any
// preceding non-scaled "from slot" index:
//
// (Note that we specifically avoid slot 0, as it is directly handled in the outer case)
for (int fromIndex = 1; fromIndex < subBucketHalfCount; fromIndex++) {
long toValue = valueFromIndex(fromIndex) << numberOfBinaryOrdersOfMagnitude;
int toIndex = countsArrayIndex(toValue);
int normalizedToIndex =
normalizeIndex(toIndex, inactiveCounts.getNormalizingIndexOffset(), inactiveCounts.length());
long countAtFromIndex = inactiveCounts.get(fromIndex + preShiftZeroIndex);
inactiveCounts.lazySet(normalizedToIndex, countAtFromIndex);
inactiveCounts.lazySet(fromIndex + preShiftZeroIndex, 0);
}
// Note that the above loop only creates O(N) work for histograms that have values in
// the lowest half-bucket (excluding the 0 value). Histograms that never have values
// there (e.g. all integer value histograms used as internal storage in DoubleHistograms)
// will never loop, and their shifts will remain O(1).
}
@Override
void shiftNormalizingIndexByOffset(final int offsetToAdd,
final boolean lowestHalfBucketPopulated,
final double newIntegerToDoubleValueConversionRatio) {
try {
wrp.readerLock();
assert (countsArrayLength == activeCounts.length());
assert (countsArrayLength == inactiveCounts.length());
int newNormalizingIndexOffset = getNormalizingIndexOffset() + offsetToAdd;
setNormalizingIndexOffset(newNormalizingIndexOffset,
offsetToAdd,
lowestHalfBucketPopulated,
newIntegerToDoubleValueConversionRatio
);
} finally {
wrp.readerUnlock();
}
}
ConcurrentArrayWithNormalizingOffset allocateArray(int length, int normalizingIndexOffset) {
return new AtomicLongArrayWithNormalizingOffset(length, normalizingIndexOffset);
}
@Override
void resize(final long newHighestTrackableValue) {
try {
wrp.readerLock();
assert (countsArrayLength == activeCounts.length());
assert (countsArrayLength == inactiveCounts.length());
int newArrayLength = determineArrayLengthNeeded(newHighestTrackableValue);
int countsDelta = newArrayLength - countsArrayLength;
if (countsDelta <= 0) {
// This resize need was already covered by a concurrent resize op.
return;
}
// Allocate both counts arrays here, so if one allocation fails, neither will "take":
ConcurrentArrayWithNormalizingOffset newInactiveCounts1 =
allocateArray(newArrayLength, inactiveCounts.getNormalizingIndexOffset());
ConcurrentArrayWithNormalizingOffset newInactiveCounts2 =
allocateArray(newArrayLength, activeCounts.getNormalizingIndexOffset());
// Resize the current inactiveCounts:
ConcurrentArrayWithNormalizingOffset oldInactiveCounts = inactiveCounts;
inactiveCounts = newInactiveCounts1;
// Copy inactive contents to newly sized inactiveCounts:
copyInactiveCountsContentsOnResize(oldInactiveCounts, countsDelta);
// switch active and inactive:
ConcurrentArrayWithNormalizingOffset tmp = activeCounts;
activeCounts = inactiveCounts;
inactiveCounts = tmp;
wrp.flipPhase();
// Resize the newly inactiveCounts:
oldInactiveCounts = inactiveCounts;
inactiveCounts = newInactiveCounts2;
// Copy inactive contents to newly sized inactiveCounts:
copyInactiveCountsContentsOnResize(oldInactiveCounts, countsDelta);
// switch active and inactive again:
tmp = activeCounts;
activeCounts = inactiveCounts;
inactiveCounts = tmp;
wrp.flipPhase();
// At this point, both active and inactive have been safely resized,
// and the switch in each was done without any writers modifying it in flight.
// We resized things. We can now make the histogram establish size accordingly for future recordings:
establishSize(newHighestTrackableValue);
assert (countsArrayLength == activeCounts.length());
assert (countsArrayLength == inactiveCounts.length());
} finally {
wrp.readerUnlock();
}
}
void copyInactiveCountsContentsOnResize(
ConcurrentArrayWithNormalizingOffset oldInactiveCounts, int countsDelta) {
int oldNormalizedZeroIndex =
normalizeIndex(0,
oldInactiveCounts.getNormalizingIndexOffset(),
oldInactiveCounts.length());
if (oldNormalizedZeroIndex == 0) {
// Copy old inactive contents to (current) newly sized inactiveCounts, in place:
for (int i = 0; i < oldInactiveCounts.length(); i++) {
inactiveCounts.lazySet(i, oldInactiveCounts.get(i));
}
} else {
// We need to shift the stuff from the zero index and up to the end of the array:
// Copy everything up to the oldNormalizedZeroIndex in place:
for (int fromIndex = 0; fromIndex < oldNormalizedZeroIndex; fromIndex++) {
inactiveCounts.lazySet(fromIndex, oldInactiveCounts.get(fromIndex));
}
// Copy everything from the oldNormalizedZeroIndex to the end with an index delta shift:
for (int fromIndex = oldNormalizedZeroIndex; fromIndex < oldInactiveCounts.length(); fromIndex++) {
int toIndex = fromIndex + countsDelta;
inactiveCounts.lazySet(toIndex, oldInactiveCounts.get(fromIndex));
}
}
}
@Override
public void setAutoResize(final boolean autoResize) {
this.autoResize = true;
}
@Override
void clearCounts() {
try {
wrp.readerLock();
assert (countsArrayLength == activeCounts.length());
assert (countsArrayLength == inactiveCounts.length());
for (int i = 0; i < activeCounts.length(); i++) {
activeCounts.lazySet(i, 0);
inactiveCounts.lazySet(i, 0);
}
totalCountUpdater.set(this, 0);
} finally {
wrp.readerUnlock();
}
}
@Override
public ConcurrentHistogram copy() {
ConcurrentHistogram copy = new ConcurrentHistogram(this);
copy.add(this);
return copy;
}
@Override
public ConcurrentHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) {
ConcurrentHistogram toHistogram = new ConcurrentHistogram(this);
toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples);
return toHistogram;
}
@Override
public long getTotalCount() {
return totalCountUpdater.get(this);
}
@Override
void setTotalCount(final long totalCount) {
totalCountUpdater.set(this, totalCount);
}
@Override
void incrementTotalCount() {
totalCountUpdater.incrementAndGet(this);
}
@Override
void addToTotalCount(final long value) {
totalCountUpdater.addAndGet(this, value);
}
@Override
int _getEstimatedFootprintInBytes() {
return (512 + (2 * 8 * activeCounts.length()));
}
/**
* Construct an auto-resizing ConcurrentHistogram with a lowest discernible value of 1 and an auto-adjusting
* highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2).
*
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public ConcurrentHistogram(final int numberOfSignificantValueDigits) {
this(1, 2, numberOfSignificantValueDigits);
setAutoResize(true);
}
/**
* Construct a ConcurrentHistogram given the Highest value to be tracked and a number of significant decimal
* digits. The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1.
*
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} 2.
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public ConcurrentHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) {
this(1, highestTrackableValue, numberOfSignificantValueDigits);
}
/**
* Construct a ConcurrentHistogram given the Lowest and Highest values to be tracked and a number of significant
* decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used
* for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking
* time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the
* proper value for lowestDiscernibleValue would be 1000.
*
* @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram.
* Must be a positive integer that is {@literal >=} 1. May be internally rounded
* down to nearest power of 2.
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} (2 * lowestDiscernibleValue).
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public ConcurrentHistogram(final long lowestDiscernibleValue, final long highestTrackableValue,
final int numberOfSignificantValueDigits) {
this(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits,
true);
}
/**
* Construct a histogram with the same range settings as a given source histogram,
* duplicating the source's start/end timestamps (but NOT it's contents)
* @param source The source histogram to duplicate
*/
public ConcurrentHistogram(final AbstractHistogram source) {
this(source, true);
}
ConcurrentHistogram(final AbstractHistogram source, boolean allocateCountsArray) {
super(source,false);
if (allocateCountsArray) {
activeCounts = new AtomicLongArrayWithNormalizingOffset(countsArrayLength, 0);
inactiveCounts = new AtomicLongArrayWithNormalizingOffset(countsArrayLength, 0);
}
wordSizeInBytes = 8;
}
ConcurrentHistogram(final long lowestDiscernibleValue, final long highestTrackableValue,
final int numberOfSignificantValueDigits, boolean allocateCountsArray) {
super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits,
false);
if (allocateCountsArray) {
activeCounts = new AtomicLongArrayWithNormalizingOffset(countsArrayLength, 0);
inactiveCounts = new AtomicLongArrayWithNormalizingOffset(countsArrayLength, 0);
}
wordSizeInBytes = 8;
}
/**
* Construct a new histogram by decoding it from a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high
* @return The newly constructed histogram
*/
public static ConcurrentHistogram decodeFromByteBuffer(final ByteBuffer buffer,
final long minBarForHighestTrackableValue) {
return decodeFromByteBuffer(buffer, ConcurrentHistogram.class, minBarForHighestTrackableValue);
}
/**
* Construct a new histogram by decoding it from a compressed form in a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high
* @return The newly constructed histogram
* @throws java.util.zip.DataFormatException on error parsing/decompressing the buffer
*/
public static ConcurrentHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer,
final long minBarForHighestTrackableValue)
throws DataFormatException {
return decodeFromCompressedByteBuffer(buffer, ConcurrentHistogram.class, minBarForHighestTrackableValue);
}
/**
* Construct a new ConcurrentHistogram by decoding it from a String containing a base64 encoded
* compressed histogram representation.
*
* @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram
* @return A ConcurrentHistogram decoded from the string
* @throws DataFormatException on error parsing/decompressing the input
*/
public static ConcurrentHistogram fromString(final String base64CompressedHistogramString)
throws DataFormatException {
return decodeFromCompressedByteBuffer(
ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)),
0);
}
private void readObject(final ObjectInputStream o)
throws IOException, ClassNotFoundException {
o.defaultReadObject();
wrp = new WriterReaderPhaser();
}
@Override
synchronized void fillBufferFromCountsArray(final ByteBuffer buffer) {
try {
wrp.readerLock();
super.fillBufferFromCountsArray(buffer);
} finally {
wrp.readerUnlock();
}
}
interface ConcurrentArrayWithNormalizingOffset {
int getNormalizingIndexOffset();
void setNormalizingIndexOffset(int normalizingIndexOffset);
double getDoubleToIntegerValueConversionRatio();
void setDoubleToIntegerValueConversionRatio(double doubleToIntegerValueConversionRatio);
int getEstimatedFootprintInBytes();
long get(int index);
void atomicIncrement(int index);
void atomicAdd(int index, long valueToAdd);
void lazySet(int index, long newValue);
int length();
}
static class AtomicLongArrayWithNormalizingOffset extends AtomicLongArray
implements ConcurrentArrayWithNormalizingOffset {
private int normalizingIndexOffset;
private double doubleToIntegerValueConversionRatio;
AtomicLongArrayWithNormalizingOffset(int length, int normalizingIndexOffset) {
super(length);
this.normalizingIndexOffset = normalizingIndexOffset;
}
@Override
public int getNormalizingIndexOffset() {
return normalizingIndexOffset;
}
@Override
public void setNormalizingIndexOffset(int normalizingIndexOffset) {
this.normalizingIndexOffset = normalizingIndexOffset;
}
@Override
public double getDoubleToIntegerValueConversionRatio() {
return doubleToIntegerValueConversionRatio;
}
@Override
public void setDoubleToIntegerValueConversionRatio(double doubleToIntegerValueConversionRatio) {
this.doubleToIntegerValueConversionRatio = doubleToIntegerValueConversionRatio;
}
@Override
public int getEstimatedFootprintInBytes() {
return 256 + (8 * this.length());
}
@Override
public void atomicIncrement(int index) {
incrementAndGet(index);
}
@Override
public void atomicAdd(int index, long valueToAdd) {
addAndGet(index, valueToAdd);
}
}
}
| 29,671 | 41.878613 | 117 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/PackedConcurrentHistogram.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import org.HdrHistogram.packedarray.ConcurrentPackedLongArray;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.zip.DataFormatException;
/**
* <h3>An integer values High Dynamic Range (HDR) Histogram that uses a packed internal representation
* and supports safe concurrent recording operations.</h3>
* A {@link PackedConcurrentHistogram} guarantees lossless recording of values into the histogram even when the
* histogram is updated by multiple threads, and supports auto-resize and shift operations that may
* result from or occur concurrently with other recording operations.
* <p>
* {@link PackedConcurrentHistogram} tracks value counts in a packed internal representation optimized
* for typical histogram recoded values are sparse in the value range and tend to be incremented in small unit counts.
* This packed representation tends to require significantly smaller amounts of storage when compared to unpacked
* representations, but can incur additional recording cost due to resizing and repacking operations that may
* occur as previously unrecorded values are encountered.
* <p>
* It is important to note that concurrent recording, auto-sizing, and value shifting are the only thread-safe
* behaviors provided by {@link PackedConcurrentHistogram}, and that it is not otherwise synchronized. Specifically,
* {@link PackedConcurrentHistogram} provides no implicit synchronization that would prevent the contents of the
* histogram from changing during queries, iterations, copies, or addition operations on the histogram. Callers
* wishing to make potentially concurrent, multi-threaded updates that would safely work in the presence of
* queries, copies, or additions of histogram objects should either take care to externally synchronize and/or
* order their access, use {@link Recorder} or {@link SingleWriterRecorder} which are intended for
* this purpose.
* <p>
* Auto-resizing: When constructed with no specified value range range (or when auto-resize is turned on with {@link
* Histogram#setAutoResize}) a {@link PackedConcurrentHistogram} will auto-resize its dynamic range to include recorded
* values as they are encountered. Note that recording calls that cause auto-resizing may take longer to execute, as
* resizing incurs allocation and copying of internal data structures.
* <p>
* See package description for {@link org.HdrHistogram} for details.
*/
public class PackedConcurrentHistogram extends ConcurrentHistogram {
@Override
ConcurrentArrayWithNormalizingOffset allocateArray(int length, int normalizingIndexOffset) {
return new ConcurrentPackedArrayWithNormalizingOffset(length, normalizingIndexOffset);
}
@Override
void clearCounts() {
try {
wrp.readerLock();
assert (countsArrayLength == activeCounts.length());
assert (countsArrayLength == inactiveCounts.length());
for (int i = 0; i < activeCounts.length(); i++) {
activeCounts.lazySet(i, 0);
inactiveCounts.lazySet(i, 0);
}
totalCountUpdater.set(this, 0);
} finally {
wrp.readerUnlock();
}
}
@Override
public PackedConcurrentHistogram copy() {
PackedConcurrentHistogram copy = new PackedConcurrentHistogram(this);
copy.add(this);
return copy;
}
@Override
public PackedConcurrentHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) {
PackedConcurrentHistogram toHistogram = new PackedConcurrentHistogram(this);
toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples);
return toHistogram;
}
@Override
public long getTotalCount() {
return totalCountUpdater.get(this);
}
@Override
void setTotalCount(final long totalCount) {
totalCountUpdater.set(this, totalCount);
}
@Override
void incrementTotalCount() {
totalCountUpdater.incrementAndGet(this);
}
@Override
void addToTotalCount(final long value) {
totalCountUpdater.addAndGet(this, value);
}
@Override
int _getEstimatedFootprintInBytes() {
try {
wrp.readerLock();
return 128 + activeCounts.getEstimatedFootprintInBytes() + inactiveCounts.getEstimatedFootprintInBytes();
} finally {
wrp.readerUnlock();
}
}
/**
* Construct an auto-resizing ConcurrentHistogram with a lowest discernible value of 1 and an auto-adjusting
* highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2).
*
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public PackedConcurrentHistogram(final int numberOfSignificantValueDigits) {
this(1, 2, numberOfSignificantValueDigits);
setAutoResize(true);
}
/**
* Construct a ConcurrentHistogram given the Highest value to be tracked and a number of significant decimal
* digits. The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1.
*
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} 2.
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public PackedConcurrentHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) {
this(1, highestTrackableValue, numberOfSignificantValueDigits);
}
/**
* Construct a ConcurrentHistogram given the Lowest and Highest values to be tracked and a number of significant
* decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used
* for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking
* time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the
* proper value for lowestDiscernibleValue would be 1000.
*
* @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram.
* Must be a positive integer that is {@literal >=} 1. May be internally rounded
* down to nearest power of 2.
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} (2 * lowestDiscernibleValue).
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public PackedConcurrentHistogram(final long lowestDiscernibleValue, final long highestTrackableValue,
final int numberOfSignificantValueDigits) {
this(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits,
true);
}
/**
* Construct a histogram with the same range settings as a given source histogram,
* duplicating the source's start/end timestamps (but NOT it's contents)
* @param source The source histogram to duplicate
*/
public PackedConcurrentHistogram(final AbstractHistogram source) {
this(source, true);
}
PackedConcurrentHistogram(final AbstractHistogram source, boolean allocateCountsArray) {
super(source,false);
if (allocateCountsArray) {
activeCounts = new ConcurrentPackedArrayWithNormalizingOffset(countsArrayLength, 0);
inactiveCounts = new ConcurrentPackedArrayWithNormalizingOffset(countsArrayLength, 0);
}
wordSizeInBytes = 8;
}
PackedConcurrentHistogram(final long lowestDiscernibleValue, final long highestTrackableValue,
final int numberOfSignificantValueDigits, boolean allocateCountsArray) {
super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits,
false);
if (allocateCountsArray) {
activeCounts = new ConcurrentPackedArrayWithNormalizingOffset(countsArrayLength, 0);
inactiveCounts = new ConcurrentPackedArrayWithNormalizingOffset(countsArrayLength, 0);
}
wordSizeInBytes = 8;
}
/**
* Construct a new histogram by decoding it from a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high
* @return The newly constructed histogram
*/
public static PackedConcurrentHistogram decodeFromByteBuffer(final ByteBuffer buffer,
final long minBarForHighestTrackableValue) {
return decodeFromByteBuffer(buffer, PackedConcurrentHistogram.class, minBarForHighestTrackableValue);
}
/**
* Construct a new histogram by decoding it from a compressed form in a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high
* @return The newly constructed histogram
* @throws DataFormatException on error parsing/decompressing the buffer
*/
public static PackedConcurrentHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer,
final long minBarForHighestTrackableValue)
throws DataFormatException {
return decodeFromCompressedByteBuffer(buffer, PackedConcurrentHistogram.class, minBarForHighestTrackableValue);
}
/**
* Construct a new ConcurrentHistogram by decoding it from a String containing a base64 encoded
* compressed histogram representation.
*
* @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram
* @return A ConcurrentHistogram decoded from the string
* @throws DataFormatException on error parsing/decompressing the input
*/
public static PackedConcurrentHistogram fromString(final String base64CompressedHistogramString)
throws DataFormatException {
return decodeFromCompressedByteBuffer(
ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)),
0);
}
private void readObject(final ObjectInputStream o)
throws IOException, ClassNotFoundException {
o.defaultReadObject();
wrp = new WriterReaderPhaser();
}
@Override
synchronized void fillBufferFromCountsArray(final ByteBuffer buffer) {
try {
wrp.readerLock();
super.fillBufferFromCountsArray(buffer);
} finally {
wrp.readerUnlock();
}
}
static class ConcurrentPackedArrayWithNormalizingOffset
implements ConcurrentArrayWithNormalizingOffset, Serializable {
private ConcurrentPackedLongArray packedCounts;
private int normalizingIndexOffset;
private double doubleToIntegerValueConversionRatio;
ConcurrentPackedArrayWithNormalizingOffset(int length, int normalizingIndexOffset) {
packedCounts = new ConcurrentPackedLongArray(length);
this.normalizingIndexOffset = normalizingIndexOffset;
}
public int getNormalizingIndexOffset() {
return normalizingIndexOffset;
}
public void setNormalizingIndexOffset(int normalizingIndexOffset) {
this.normalizingIndexOffset = normalizingIndexOffset;
}
public double getDoubleToIntegerValueConversionRatio() {
return doubleToIntegerValueConversionRatio;
}
public void setDoubleToIntegerValueConversionRatio(double doubleToIntegerValueConversionRatio) {
this.doubleToIntegerValueConversionRatio = doubleToIntegerValueConversionRatio;
}
@Override
public long get(int index) {
return packedCounts.get(index);
}
@Override
public void atomicIncrement(int index) {
packedCounts.increment(index);
}
@Override
public void atomicAdd(int index, long valueToAdd) {
packedCounts.add(index, valueToAdd);
}
@Override
public void lazySet(int index, long newValue) {
packedCounts.set(index, newValue);
}
@Override
public int length() {
return packedCounts.length();
}
@Override
public int getEstimatedFootprintInBytes() {
return 128 + (8 * packedCounts.getPhysicalLength());
}
}
}
| 13,839 | 43.645161 | 122 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/DoubleValueRecorder.java | package org.HdrHistogram;
public interface DoubleValueRecorder {
/**
* Record a value
*
* @param value The value to be recorded
* @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range
*/
void recordValue(double value) throws ArrayIndexOutOfBoundsException;
/**
* Record a value (adding to the value's current count)
*
* @param value The value to be recorded
* @param count The number of occurrences of this value to record
* @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range
*/
void recordValueWithCount(double value, long count) throws ArrayIndexOutOfBoundsException;
/**
* Record a value.
* <p>
* To compensate for the loss of sampled values when a recorded value is larger than the expected
* interval between value samples, will auto-generate an additional series of decreasingly-smaller
* (down to the expectedIntervalBetweenValueSamples) value records.
* <p>
* Note: This is a at-recording correction method, as opposed to the post-recording correction method provided
* by {@link DoubleHistogram#copyCorrectedForCoordinatedOmission(double)}.
* The two methods are mutually exclusive, and only one of the two should be be used on a given data set to correct
* for the same coordinated omission issue.
*
* @param value The value to record
* @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add
* auto-generated value records as appropriate if value is larger
* than expectedIntervalBetweenValueSamples
* @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range
*/
void recordValueWithExpectedInterval(double value, double expectedIntervalBetweenValueSamples)
throws ArrayIndexOutOfBoundsException;
/**
* Reset the contents and collected stats
*/
void reset();
}
| 2,181 | 44.458333 | 119 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/Recorder.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.util.concurrent.atomic.AtomicLong;
/**
* Records integer values, and provides stable interval {@link Histogram} samples from
* live recorded data without interrupting or stalling active recording of values. Each interval
* histogram provided contains all value counts accumulated since the previous interval histogram
* was taken.
* <p>
* This pattern is commonly used in logging interval histogram information while recording is ongoing.
* <p>
* {@link Recorder} supports concurrent
* {@link Recorder#recordValue} or
* {@link Recorder#recordValueWithExpectedInterval} calls.
* Recording calls are wait-free on architectures that support atomic increment operations, and
* are lock-free on architectures that do not.
* <p>
* A common pattern for using a {@link Recorder} looks like this:
* <br><pre><code>
* Recorder recorder = new Recorder(2); // Two decimal point accuracy
* Histogram intervalHistogram = null;
* ...
* [start of some loop construct that periodically wants to grab an interval histogram]
* ...
* // Get interval histogram, recycling previous interval histogram:
* intervalHistogram = recorder.getIntervalHistogram(intervalHistogram);
* histogramLogWriter.outputIntervalHistogram(intervalHistogram);
* ...
* [end of loop construct]
* </code></pre>
*
*/
public class Recorder implements ValueRecorder, IntervalHistogramProvider<Histogram> {
private static AtomicLong instanceIdSequencer = new AtomicLong(1);
private final long instanceId = instanceIdSequencer.getAndIncrement();
private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser();
private volatile Histogram activeHistogram;
private Histogram inactiveHistogram;
/**
* Construct an auto-resizing {@link Recorder} with a lowest discernible value of
* 1 and an auto-adjusting highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2).
* <p>
* Depending on the valuer of the <b><code>packed</code></b> parameter {@link Recorder} can be configured to
* track value counts in a packed internal representation optimized for typical histogram recoded values are
* sparse in the value range and tend to be incremented in small unit counts. This packed representation tends
* to require significantly smaller amounts of storage when compared to unpacked representations, but can incur
* additional recording cost due to resizing and repacking operations that may
* occur as previously unrecorded values are encountered.
*
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
* @param packed Specifies whether the recorder will uses a packed internal representation or not.
*/
public Recorder(final int numberOfSignificantValueDigits, boolean packed) {
activeHistogram = packed ?
new InternalPackedConcurrentHistogram(instanceId, numberOfSignificantValueDigits) :
new InternalConcurrentHistogram(instanceId, numberOfSignificantValueDigits);
inactiveHistogram = null;
activeHistogram.setStartTimeStamp(System.currentTimeMillis());
}
/**
* Construct an auto-resizing {@link Recorder} with a lowest discernible value of
* 1 and an auto-adjusting highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2).
*
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public Recorder(final int numberOfSignificantValueDigits) {
this(numberOfSignificantValueDigits, false);
}
/**
* Construct a {@link Recorder} given the highest value to be tracked and a number of significant
* decimal digits. The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1.
*
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} 2.
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public Recorder(final long highestTrackableValue,
final int numberOfSignificantValueDigits) {
this(1, highestTrackableValue, numberOfSignificantValueDigits);
}
/**
* Construct a {@link Recorder} given the Lowest and highest values to be tracked and a number
* of significant decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used
* for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking
* time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the
* proper value for lowestDiscernibleValue would be 1000.
*
* @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram.
* Must be a positive integer that is {@literal >=} 1. May be internally rounded
* down to nearest power of 2.
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} (2 * lowestDiscernibleValue).
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public Recorder(final long lowestDiscernibleValue,
final long highestTrackableValue,
final int numberOfSignificantValueDigits) {
activeHistogram = new InternalAtomicHistogram(
instanceId, lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits);
inactiveHistogram = null;
activeHistogram.setStartTimeStamp(System.currentTimeMillis());
}
/**
* Record a value
* @param value the value to record
* @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue
*/
@Override
public void recordValue(final long value) throws ArrayIndexOutOfBoundsException {
long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
try {
activeHistogram.recordValue(value);
} finally {
recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
}
}
/**
* Record a value in the histogram (adding to the value's current count)
*
* @param value The value to be recorded
* @param count The number of occurrences of this value to record
* @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue
*/
@Override
public void recordValueWithCount(final long value, final long count) throws ArrayIndexOutOfBoundsException {
long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
try {
activeHistogram.recordValueWithCount(value, count);
} finally {
recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
}
}
/**
* Record a value
* <p>
* To compensate for the loss of sampled values when a recorded value is larger than the expected
* interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller
* (down to the expectedIntervalBetweenValueSamples) value records.
* <p>
* See related notes {@link AbstractHistogram#recordValueWithExpectedInterval(long, long)}
* for more explanations about coordinated omission and expected interval correction.
* *
* @param value The value to record
* @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add
* auto-generated value records as appropriate if value is larger
* than expectedIntervalBetweenValueSamples
* @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue
*/
@Override
public void recordValueWithExpectedInterval(final long value, final long expectedIntervalBetweenValueSamples)
throws ArrayIndexOutOfBoundsException {
long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
try {
activeHistogram.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples);
} finally {
recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
}
}
@Override
public synchronized Histogram getIntervalHistogram() {
return getIntervalHistogram(null);
}
@Override
public synchronized Histogram getIntervalHistogram(Histogram histogramToRecycle) {
return getIntervalHistogram(histogramToRecycle, true);
}
@Override
public synchronized Histogram getIntervalHistogram(Histogram histogramToRecycle,
boolean enforceContainingInstance) {
// Verify that replacement histogram can validly be used as an inactive histogram replacement:
validateFitAsReplacementHistogram(histogramToRecycle, enforceContainingInstance);
inactiveHistogram = histogramToRecycle;
performIntervalSample();
Histogram sampledHistogram = inactiveHistogram;
inactiveHistogram = null; // Once we expose the sample, we can't reuse it internally until it is recycled
return sampledHistogram;
}
@Override
public synchronized void getIntervalHistogramInto(Histogram targetHistogram) {
performIntervalSample();
inactiveHistogram.copyInto(targetHistogram);
}
/**
* Reset any value counts accumulated thus far.
*/
@Override
public synchronized void reset() {
// the currently inactive histogram is reset each time we flip. So flipping twice resets both:
performIntervalSample();
performIntervalSample();
}
private void performIntervalSample() {
try {
recordingPhaser.readerLock();
// Make sure we have an inactive version to flip in:
if (inactiveHistogram == null) {
if (activeHistogram instanceof InternalAtomicHistogram) {
inactiveHistogram = new InternalAtomicHistogram(
instanceId,
activeHistogram.getLowestDiscernibleValue(),
activeHistogram.getHighestTrackableValue(),
activeHistogram.getNumberOfSignificantValueDigits());
} else if (activeHistogram instanceof InternalConcurrentHistogram) {
inactiveHistogram = new InternalConcurrentHistogram(
instanceId,
activeHistogram.getNumberOfSignificantValueDigits());
} else if (activeHistogram instanceof InternalPackedConcurrentHistogram) {
inactiveHistogram = new InternalPackedConcurrentHistogram(
instanceId,
activeHistogram.getNumberOfSignificantValueDigits());
} else {
throw new IllegalStateException("Unexpected internal histogram type for activeHistogram");
}
}
inactiveHistogram.reset();
// Swap active and inactive histograms:
final Histogram tempHistogram = inactiveHistogram;
inactiveHistogram = activeHistogram;
activeHistogram = tempHistogram;
// Mark end time of previous interval and start time of new one:
long now = System.currentTimeMillis();
activeHistogram.setStartTimeStamp(now);
inactiveHistogram.setEndTimeStamp(now);
// Make sure we are not in the middle of recording a value on the previously active histogram:
// Flip phase to make sure no recordings that were in flight pre-flip are still active:
recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */);
} finally {
recordingPhaser.readerUnlock();
}
}
private static class InternalAtomicHistogram extends AtomicHistogram {
private final long containingInstanceId;
private InternalAtomicHistogram(long id,
long lowestDiscernibleValue,
long highestTrackableValue,
int numberOfSignificantValueDigits) {
super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits);
this.containingInstanceId = id;
}
}
private static class InternalConcurrentHistogram extends ConcurrentHistogram {
private final long containingInstanceId;
private InternalConcurrentHistogram(long id, int numberOfSignificantValueDigits) {
super(numberOfSignificantValueDigits);
this.containingInstanceId = id;
}
}
private static class InternalPackedConcurrentHistogram extends PackedConcurrentHistogram {
private final long containingInstanceId;
private InternalPackedConcurrentHistogram(long id, int numberOfSignificantValueDigits) {
super(numberOfSignificantValueDigits);
this.containingInstanceId = id;
}
}
private void validateFitAsReplacementHistogram(Histogram replacementHistogram,
boolean enforceContainingInstance) {
boolean bad = true;
if (replacementHistogram == null) {
bad = false;
} else if (replacementHistogram instanceof InternalAtomicHistogram) {
if ((activeHistogram instanceof InternalAtomicHistogram)
&&
((!enforceContainingInstance) ||
(((InternalAtomicHistogram)replacementHistogram).containingInstanceId ==
((InternalAtomicHistogram)activeHistogram).containingInstanceId)
)) {
bad = false;
}
} else if (replacementHistogram instanceof InternalConcurrentHistogram) {
if ((activeHistogram instanceof InternalConcurrentHistogram)
&&
((!enforceContainingInstance) ||
(((InternalConcurrentHistogram)replacementHistogram).containingInstanceId ==
((InternalConcurrentHistogram)activeHistogram).containingInstanceId)
)) {
bad = false;
}
} else if (replacementHistogram instanceof InternalPackedConcurrentHistogram) {
if ((activeHistogram instanceof InternalPackedConcurrentHistogram)
&&
((!enforceContainingInstance) ||
(((InternalPackedConcurrentHistogram)replacementHistogram).containingInstanceId ==
((InternalPackedConcurrentHistogram)activeHistogram).containingInstanceId)
)) {
bad = false;
}
}
if (bad) {
throw new IllegalArgumentException("replacement histogram must have been obtained via a previous" +
" getIntervalHistogram() call from this " + this.getClass().getName() +
(enforceContainingInstance ? " instance" : " class"));
}
}
}
| 16,840 | 48.532353 | 117 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/Base64Helper.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.lang.reflect.Method;
/**
* Base64Helper exists to bridge inconsistencies in Java SE support of Base64 encoding and decoding.
* Earlier Java SE platforms (up to and including Java SE 8) supported base64 encode/decode via the
* javax.xml.bind.DatatypeConverter class, which was deprecated and eventually removed in Java SE 9.
* Later Java SE platforms (Java SE 8 and later) support base64 encode/decode via the
* java.util.Base64 class (first introduced in Java SE 8, and not available on e.g. Java SE 6 or 7).
*
* This makes it "hard" to write a single piece of source code that deals with base64 encodings and
* will compile and run on e.g. Java SE 7 AND Java SE 9. And such common source is a common need for
* libraries. This class is intended to encapsulate this "hard"-ness and hide the ugly pretzel-twisting
* needed under the covers.
*
* Base64Helper provides a common API that works across Java SE 6..9 (and beyond hopefully), and
* uses late binding (Reflection) internally to avoid javac-compile-time dependencies on a specific
* Java SE version (e.g. beyond 7 or before 9).
*
*/
class Base64Helper {
/**
* Converts an array of bytes into a Base64 string.
*
* @param binaryArray A binary encoded input array
* @return a String containing the Base64 encoded equivalent of the binary input
*/
static String printBase64Binary(byte [] binaryArray) {
try {
return (String) encodeMethod.invoke(encoderObj, binaryArray);
} catch (Throwable e) {
throw new UnsupportedOperationException("Failed to use platform's base64 encode method");
}
}
/**
* Converts a Base64 encoded String to a byte array
*
* @param base64input A base64-encoded input String
* @return a byte array containing the binary representation equivalent of the Base64 encoded input
*/
static byte[] parseBase64Binary(String base64input) {
try {
return (byte []) decodeMethod.invoke(decoderObj, base64input);
} catch (Throwable e) {
throw new UnsupportedOperationException("Failed to use platform's base64 decode method");
}
}
private static Method decodeMethod;
private static Method encodeMethod;
// encoderObj and decoderObj are used in non-static method forms, and
// irrelevant for static method forms:
private static Object decoderObj;
private static Object encoderObj;
static {
try {
Class<?> javaUtilBase64Class = Class.forName("java.util.Base64");
Method getDecoderMethod = javaUtilBase64Class.getMethod("getDecoder");
decoderObj = getDecoderMethod.invoke(null);
decodeMethod = decoderObj.getClass().getMethod("decode", String.class);
Method getEncoderMethod = javaUtilBase64Class.getMethod("getEncoder");
encoderObj = getEncoderMethod.invoke(null);
encodeMethod = encoderObj.getClass().getMethod("encodeToString", byte[].class);
} catch (Throwable e) {
decodeMethod = null;
encodeMethod = null;
}
if (encodeMethod == null) {
decoderObj = null;
encoderObj = null;
try {
Class<?> javaxXmlBindDatatypeConverterClass = Class.forName("javax.xml.bind.DatatypeConverter");
decodeMethod = javaxXmlBindDatatypeConverterClass.getMethod("parseBase64Binary", String.class);
encodeMethod = javaxXmlBindDatatypeConverterClass.getMethod("printBase64Binary", byte[].class);
} catch (Throwable e) {
decodeMethod = null;
encodeMethod = null;
}
}
}
}
| 3,940 | 39.214286 | 112 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/SynchronizedHistogram.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.PrintStream;
import java.nio.ByteBuffer;
import java.util.zip.DataFormatException;
/**
* <h3>An integer values High Dynamic Range (HDR) Histogram that is synchronized as a whole</h3>
* <p>
* A {@link SynchronizedHistogram} is a variant of {@link Histogram} that is
* synchronized as a whole, such that queries, copying, and addition operations are atomic with relation to
* modification on the {@link SynchronizedHistogram}, and such that external accessors (e.g. iterations on the
* histogram data) that synchronize on the {@link SynchronizedHistogram} instance can safely assume that no
* modifications to the histogram data occur within their synchronized block.
* <p>
* It is important to note that synchronization can result in blocking recoding calls. If non-blocking recoding
* operations are required, consider using {@link ConcurrentHistogram}, {@link AtomicHistogram}, or (recommended)
* {@link Recorder} or {@link org.HdrHistogram.SingleWriterRecorder} which were intended for concurrent operations.
* <p>
* See package description for {@link org.HdrHistogram} and {@link org.HdrHistogram.Histogram} for more details.
*/
public class SynchronizedHistogram extends Histogram {
/**
* Construct an auto-resizing SynchronizedHistogram with a lowest discernible value of 1 and an auto-adjusting
* highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2).
*
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public SynchronizedHistogram(final int numberOfSignificantValueDigits) {
this(1, 2, numberOfSignificantValueDigits);
setAutoResize(true);
}
/**
* Construct a SynchronizedHistogram given the Highest value to be tracked and a number of significant decimal digits. The
* histogram will be constructed to implicitly track (distinguish from 0) values as low as 1.
*
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} 2.
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public SynchronizedHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) {
this(1, highestTrackableValue, numberOfSignificantValueDigits);
}
/**
* Construct a SynchronizedHistogram given the Lowest and Highest values to be tracked and a number of significant
* decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used
* for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking
* time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the
* proper value for lowestDiscernibleValue would be 1000.
*
* @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram.
* Must be a positive integer that is {@literal >=} 1. May be internally rounded
* down to nearest power of 2.
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} (2 * lowestDiscernibleValue).
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public SynchronizedHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, final int numberOfSignificantValueDigits) {
super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits);
}
/**
* Construct a histogram with the same range settings as a given source histogram,
* duplicating the source's start/end timestamps (but NOT it's contents)
* @param source The source histogram to duplicate
*/
public SynchronizedHistogram(final AbstractHistogram source) {
super(source);
}
/**
* Construct a new histogram by decoding it from a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high
* @return The newly constructed histogram
*/
public static SynchronizedHistogram decodeFromByteBuffer(final ByteBuffer buffer,
final long minBarForHighestTrackableValue) {
return decodeFromByteBuffer(buffer, SynchronizedHistogram.class, minBarForHighestTrackableValue);
}
/**
* Construct a new histogram by decoding it from a compressed form in a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high
* @return The newly constructed histogram
* @throws DataFormatException on error parsing/decompressing the buffer
*/
public static SynchronizedHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer,
final long minBarForHighestTrackableValue) throws DataFormatException {
return decodeFromCompressedByteBuffer(buffer, SynchronizedHistogram.class, minBarForHighestTrackableValue);
}
/**
* Construct a new SynchronizedHistogram by decoding it from a String containing a base64 encoded
* compressed histogram representation.
*
* @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram
* @return A SynchronizedHistogram decoded from the string
* @throws DataFormatException on error parsing/decompressing the input
*/
public static SynchronizedHistogram fromString(final String base64CompressedHistogramString)
throws DataFormatException {
return decodeFromCompressedByteBuffer(
ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)),
0);
}
@Override
public synchronized long getTotalCount() {
return super.getTotalCount();
}
@Override
public synchronized boolean isAutoResize() {
return super.isAutoResize();
}
@Override
public synchronized void setAutoResize(boolean autoResize) {
super.setAutoResize(autoResize);
}
@Override
public synchronized void recordValue(final long value) throws ArrayIndexOutOfBoundsException {
super.recordValue(value);
}
@Override
public synchronized void recordValueWithCount(final long value, final long count) throws ArrayIndexOutOfBoundsException {
super.recordValueWithCount(value, count);
}
@Override
public synchronized void recordValueWithExpectedInterval(final long value, final long expectedIntervalBetweenValueSamples)
throws ArrayIndexOutOfBoundsException {
super.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples);
}
/**
* @deprecated
*/
@SuppressWarnings("deprecation")
@Override
public synchronized void recordValue(final long value, final long expectedIntervalBetweenValueSamples)
throws ArrayIndexOutOfBoundsException {
super.recordValue(value, expectedIntervalBetweenValueSamples);
}
@Override
public synchronized void reset() {
super.reset();
}
@Override
public synchronized SynchronizedHistogram copy() {
SynchronizedHistogram toHistogram = new SynchronizedHistogram(this);
toHistogram.add(this);
return toHistogram;
}
@Override
public synchronized SynchronizedHistogram copyCorrectedForCoordinatedOmission(
final long expectedIntervalBetweenValueSamples) {
SynchronizedHistogram toHistogram = new SynchronizedHistogram(this);
toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples);
return toHistogram;
}
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Override
public void copyInto(final AbstractHistogram targetHistogram) {
// Synchronize copyInto(). Avoid deadlocks by synchronizing in order of construction identity count.
if (identity < targetHistogram.identity) {
synchronized (this) {
//noinspection SynchronizationOnLocalVariableOrMethodParameter
synchronized (targetHistogram) {
super.copyInto(targetHistogram);
}
}
} else {
synchronized (targetHistogram) {
synchronized (this) {
super.copyInto(targetHistogram);
}
}
}
}
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Override
public void copyIntoCorrectedForCoordinatedOmission(final AbstractHistogram targetHistogram,
final long expectedIntervalBetweenValueSamples) {
// Synchronize copyIntoCorrectedForCoordinatedOmission(). Avoid deadlocks by synchronizing in order
// of construction identity count.
if (identity < targetHistogram.identity) {
synchronized (this) {
synchronized (targetHistogram) {
super.copyIntoCorrectedForCoordinatedOmission(targetHistogram, expectedIntervalBetweenValueSamples);
}
}
} else {
synchronized (targetHistogram) {
synchronized (this) {
super.copyIntoCorrectedForCoordinatedOmission(targetHistogram, expectedIntervalBetweenValueSamples);
}
}
}
}
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Override
public void add(final AbstractHistogram otherHistogram) {
// Synchronize add(). Avoid deadlocks by synchronizing in order of construction identity count.
if (identity < otherHistogram.identity) {
synchronized (this) {
synchronized (otherHistogram) {
super.add(otherHistogram);
}
}
} else {
synchronized (otherHistogram) {
synchronized (this) {
super.add(otherHistogram);
}
}
}
}
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Override
public void subtract(final AbstractHistogram otherHistogram)
throws ArrayIndexOutOfBoundsException, IllegalArgumentException {
// Synchronize subtract(). Avoid deadlocks by synchronizing in order of construction identity count.
if (identity < otherHistogram.identity) {
synchronized (this) {
synchronized (otherHistogram) {
super.subtract(otherHistogram);
}
}
} else {
synchronized (otherHistogram) {
synchronized (this) {
super.subtract(otherHistogram);
}
}
}
}
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Override
public void addWhileCorrectingForCoordinatedOmission(final AbstractHistogram fromHistogram,
final long expectedIntervalBetweenValueSamples) {
// Synchronize addWhileCorrectingForCoordinatedOmission(). Avoid deadlocks by synchronizing in
// order of construction identity count.
if (identity < fromHistogram.identity) {
synchronized (this) {
synchronized (fromHistogram) {
super.addWhileCorrectingForCoordinatedOmission(fromHistogram, expectedIntervalBetweenValueSamples);
}
}
} else {
synchronized (fromHistogram) {
synchronized (this) {
super.addWhileCorrectingForCoordinatedOmission(fromHistogram, expectedIntervalBetweenValueSamples);
}
}
}
}
@Override
public synchronized void shiftValuesLeft(final int numberOfBinaryOrdersOfMagnitude) {
super.shiftValuesLeft(numberOfBinaryOrdersOfMagnitude);
}
@Override
public synchronized void shiftValuesRight(final int numberOfBinaryOrdersOfMagnitude) {
super.shiftValuesRight(numberOfBinaryOrdersOfMagnitude);
}
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Override
public boolean equals(final Object other){
if ( this == other ) {
return true;
}
if (other instanceof AbstractHistogram) {
AbstractHistogram otherHistogram = (AbstractHistogram) other;
if (identity < otherHistogram.identity) {
synchronized (this) {
synchronized (otherHistogram) {
return super.equals(otherHistogram);
}
}
} else {
synchronized (otherHistogram) {
synchronized (this) {
return super.equals(otherHistogram);
}
}
}
} else {
synchronized (this) {
return super.equals(other);
}
}
}
@Override
public synchronized int hashCode() {
return super.hashCode();
}
@Override
public synchronized long getLowestDiscernibleValue() {
return super.getLowestDiscernibleValue();
}
@Override
public synchronized long getHighestTrackableValue() {
return super.getHighestTrackableValue();
}
@Override
public synchronized int getNumberOfSignificantValueDigits() {
return super.getNumberOfSignificantValueDigits();
}
@Override
public synchronized long sizeOfEquivalentValueRange(final long value) {
return super.sizeOfEquivalentValueRange(value);
}
@Override
public synchronized long lowestEquivalentValue(final long value) {
return super.lowestEquivalentValue(value);
}
@Override
public synchronized long highestEquivalentValue(final long value) {
return super.highestEquivalentValue(value);
}
@Override
public synchronized long medianEquivalentValue(final long value) {
return super.medianEquivalentValue(value);
}
@Override
public synchronized long nextNonEquivalentValue(final long value) {
return super.nextNonEquivalentValue(value);
}
@Override
public synchronized boolean valuesAreEquivalent(final long value1, final long value2) {
return super.valuesAreEquivalent(value1, value2);
}
@Override
public synchronized int getEstimatedFootprintInBytes() {
return super.getEstimatedFootprintInBytes();
}
@Override
public synchronized long getStartTimeStamp() {
return super.getStartTimeStamp();
}
@Override
public synchronized void setStartTimeStamp(final long timeStampMsec) {
super.setStartTimeStamp(timeStampMsec);
}
@Override
public synchronized long getEndTimeStamp() {
return super.getEndTimeStamp();
}
@Override
public synchronized void setEndTimeStamp(final long timeStampMsec) {
super.setEndTimeStamp(timeStampMsec);
}
@Override
public synchronized long getMinValue() {
return super.getMinValue();
}
@Override
public synchronized long getMaxValue() {
return super.getMaxValue();
}
@Override
public synchronized long getMinNonZeroValue() {
return super.getMinNonZeroValue();
}
@Override
public synchronized double getMaxValueAsDouble() {
return super.getMaxValueAsDouble();
}
@Override
public synchronized double getMean() {
return super.getMean();
}
@Override
public synchronized double getStdDeviation() {
return super.getStdDeviation();
}
@Override
public synchronized long getValueAtPercentile(final double percentile) {
return super.getValueAtPercentile(percentile);
}
@Override
public synchronized double getPercentileAtOrBelowValue(final long value) {
return super.getPercentileAtOrBelowValue(value);
}
@Override
public synchronized long getCountBetweenValues(final long lowValue, final long highValue) throws ArrayIndexOutOfBoundsException {
return super.getCountBetweenValues(lowValue, highValue);
}
@Override
public synchronized long getCountAtValue(final long value) throws ArrayIndexOutOfBoundsException {
return super.getCountAtValue(value);
}
@Override
public synchronized Percentiles percentiles(final int percentileTicksPerHalfDistance) {
return super.percentiles(percentileTicksPerHalfDistance);
}
@Override
public synchronized LinearBucketValues linearBucketValues(final long valueUnitsPerBucket) {
return super.linearBucketValues(valueUnitsPerBucket);
}
@Override
public synchronized LogarithmicBucketValues logarithmicBucketValues(final long valueUnitsInFirstBucket, final double logBase) {
return super.logarithmicBucketValues(valueUnitsInFirstBucket, logBase);
}
@Override
public synchronized RecordedValues recordedValues() {
return super.recordedValues();
}
@Override
public synchronized AllValues allValues() {
return super.allValues();
}
@Override
public synchronized void outputPercentileDistribution(final PrintStream printStream,
final Double outputValueUnitScalingRatio) {
super.outputPercentileDistribution(printStream, outputValueUnitScalingRatio);
}
@Override
public synchronized void outputPercentileDistribution(final PrintStream printStream,
final int percentileTicksPerHalfDistance,
final Double outputValueUnitScalingRatio) {
super.outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio);
}
@Override
public synchronized void outputPercentileDistribution(final PrintStream printStream,
final int percentileTicksPerHalfDistance,
final Double outputValueUnitScalingRatio,
final boolean useCsvFormat) {
super.outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio, useCsvFormat);
}
@Override
public synchronized int getNeededByteBufferCapacity() {
return super.getNeededByteBufferCapacity();
}
@Override
public synchronized int encodeIntoByteBuffer(final ByteBuffer buffer) {
return super.encodeIntoByteBuffer(buffer);
}
@Override
public synchronized int encodeIntoCompressedByteBuffer(
final ByteBuffer targetBuffer,
final int compressionLevel) {
return super.encodeIntoCompressedByteBuffer(targetBuffer, compressionLevel);
}
@Override
public synchronized int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer) {
return super.encodeIntoCompressedByteBuffer(targetBuffer);
}
private void readObject(final ObjectInputStream o)
throws IOException, ClassNotFoundException {
o.defaultReadObject();
}
} | 20,798 | 38.466793 | 145 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/SingleWriterRecorder.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.util.concurrent.atomic.AtomicLong;
/**
* Records integer values, and provides stable interval {@link Histogram} samples from
* live recorded data without interrupting or stalling active recording of values. Each interval
* histogram provided contains all value counts accumulated since the previous interval histogram
* was taken.
* <p>
* This pattern is commonly used in logging interval histogram information while recording is ongoing.
* <p>
* {@link SingleWriterRecorder} expects only a single thread (the "single writer") to
* call {@link SingleWriterRecorder#recordValue} or
* {@link SingleWriterRecorder#recordValueWithExpectedInterval} at any point in time.
* It DOES NOT safely support concurrent recording calls.
* Recording calls are wait-free on architectures that support atomic increment operations, and
* re lock-free on architectures that do not.
* * <p>
* A common pattern for using a {@link SingleWriterRecorder} looks like this:
* <br><pre><code>
* SingleWriterRecorder recorder = new SingleWriterRecorder(2); // Two decimal point accuracy
* Histogram intervalHistogram = null;
* ...
* [start of some loop construct that periodically wants to grab an interval histogram]
* ...
* // Get interval histogram, recycling previous interval histogram:
* intervalHistogram = recorder.getIntervalHistogram(intervalHistogram);
* histogramLogWriter.outputIntervalHistogram(intervalHistogram);
* ...
* [end of loop construct]
* </code></pre>
*/
public class SingleWriterRecorder implements ValueRecorder, IntervalHistogramProvider<Histogram> {
private static AtomicLong instanceIdSequencer = new AtomicLong(1);
private final long instanceId = instanceIdSequencer.getAndIncrement();
private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser();
private volatile Histogram activeHistogram;
private Histogram inactiveHistogram;
/**
* Construct an auto-resizing {@link SingleWriterRecorder} with a lowest discernible value of
* 1 and an auto-adjusting highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2).
* <p>
* Depending on the valuer of the <b><code>packed</code></b> parameter {@link SingleWriterRecorder} can be configured to
* track value counts in a packed internal representation optimized for typical histogram recoded values are
* sparse in the value range and tend to be incremented in small unit counts. This packed representation tends
* to require significantly smaller amounts of storage when compared to unpacked representations, but can incur
* additional recording cost due to resizing and repacking operations that may
* occur as previously unrecorded values are encountered.
*
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
* @param packed Specifies whether the recorder will uses a packed internal representation or not.
*/
public SingleWriterRecorder(final int numberOfSignificantValueDigits, final boolean packed) {
activeHistogram = packed ?
new PackedInternalHistogram(instanceId, numberOfSignificantValueDigits) :
new InternalHistogram(instanceId, numberOfSignificantValueDigits);
inactiveHistogram = null;
activeHistogram.setStartTimeStamp(System.currentTimeMillis());
}
/**
* Construct an auto-resizing {@link SingleWriterRecorder} with a lowest discernible value of
* 1 and an auto-adjusting highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2).
*
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public SingleWriterRecorder(final int numberOfSignificantValueDigits) {
this(numberOfSignificantValueDigits, false);
}
/**
* Construct a {@link SingleWriterRecorder} given the highest value to be tracked and a number
* of significant decimal digits. The histogram will be constructed to implicitly track (distinguish from 0)
* values as low as 1.
*
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} 2.
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public SingleWriterRecorder(final long highestTrackableValue,
final int numberOfSignificantValueDigits) {
this(1, highestTrackableValue, numberOfSignificantValueDigits);
}
/**
* Construct a {@link SingleWriterRecorder} given the Lowest and highest values to be tracked
* and a number of significant decimal digits. Providing a lowestDiscernibleValue is useful is situations where
* the units used for the histogram's values are much smaller that the minimal accuracy required. E.g. when
* tracking time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the
* proper value for lowestDiscernibleValue would be 1000.
*
* @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram.
* Must be a positive integer that is {@literal >=} 1. May be internally rounded
* down to nearest power of 2.
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} (2 * lowestDiscernibleValue).
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public SingleWriterRecorder(final long lowestDiscernibleValue,
final long highestTrackableValue,
final int numberOfSignificantValueDigits) {
activeHistogram = new InternalHistogram(
instanceId, lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits);
inactiveHistogram = null;
activeHistogram.setStartTimeStamp(System.currentTimeMillis());
}
/**
* Record a value
* @param value the value to record
* @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue
*/
@Override
public void recordValue(final long value) {
long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
try {
activeHistogram.recordValue(value);
} finally {
recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
}
}
/**
* Record a value in the histogram (adding to the value's current count)
*
* @param value The value to be recorded
* @param count The number of occurrences of this value to record
* @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue
*/
@Override
public void recordValueWithCount(final long value, final long count) throws ArrayIndexOutOfBoundsException {
long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
try {
activeHistogram.recordValueWithCount(value, count);
} finally {
recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
}
}
/**
* Record a value
* <p>
* To compensate for the loss of sampled values when a recorded value is larger than the expected
* interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller
* (down to the expectedIntervalBetweenValueSamples) value records.
* <p>
* See related notes {@link AbstractHistogram#recordValueWithExpectedInterval(long, long)}
* for more explanations about coordinated omission and expected interval correction.
* *
* @param value The value to record
* @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add
* auto-generated value records as appropriate if value is larger
* than expectedIntervalBetweenValueSamples
* @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue
*/
@Override
public void recordValueWithExpectedInterval(final long value, final long expectedIntervalBetweenValueSamples)
throws ArrayIndexOutOfBoundsException {
long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
try {
activeHistogram.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples);
} finally {
recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
}
}
@Override
public synchronized Histogram getIntervalHistogram() {
return getIntervalHistogram(null);
}
@Override
public synchronized Histogram getIntervalHistogram(Histogram histogramToRecycle) {
return getIntervalHistogram(histogramToRecycle, true);
}
@Override
public synchronized Histogram getIntervalHistogram(Histogram histogramToRecycle,
boolean enforceContainingInstance) {
// Verify that replacement histogram can validly be used as an inactive histogram replacement:
validateFitAsReplacementHistogram(histogramToRecycle, enforceContainingInstance);
inactiveHistogram = histogramToRecycle;
performIntervalSample();
Histogram sampledHistogram = inactiveHistogram;
inactiveHistogram = null; // Once we expose the sample, we can't reuse it internally until it is recycled
return sampledHistogram;
}
@Override
public synchronized void getIntervalHistogramInto(Histogram targetHistogram) {
performIntervalSample();
inactiveHistogram.copyInto(targetHistogram);
}
/**
* Reset any value counts accumulated thus far.
*/
@Override
public synchronized void reset() {
// the currently inactive histogram is reset each time we flip. So flipping twice resets both:
performIntervalSample();
performIntervalSample();
}
private void performIntervalSample() {
try {
recordingPhaser.readerLock();
// Make sure we have an inactive version to flip in:
if (inactiveHistogram == null) {
if (activeHistogram instanceof InternalHistogram) {
inactiveHistogram = new InternalHistogram((InternalHistogram) activeHistogram);
} else if (activeHistogram instanceof PackedInternalHistogram) {
inactiveHistogram = new PackedInternalHistogram(
instanceId, activeHistogram.getNumberOfSignificantValueDigits());
} else {
throw new IllegalStateException("Unexpected internal histogram type for activeHistogram");
}
}
inactiveHistogram.reset();
// Swap active and inactive histograms:
final Histogram tempHistogram = inactiveHistogram;
inactiveHistogram = activeHistogram;
activeHistogram = tempHistogram;
// Mark end time of previous interval and start time of new one:
long now = System.currentTimeMillis();
activeHistogram.setStartTimeStamp(now);
inactiveHistogram.setEndTimeStamp(now);
// Make sure we are not in the middle of recording a value on the previously active histogram:
// Flip phase to make sure no recordings that were in flight pre-flip are still active:
recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */);
} finally {
recordingPhaser.readerUnlock();
}
}
private static class InternalHistogram extends Histogram {
private final long containingInstanceId;
private InternalHistogram(long id, int numberOfSignificantValueDigits) {
super(numberOfSignificantValueDigits);
this.containingInstanceId = id;
}
private InternalHistogram(long id,
long lowestDiscernibleValue,
long highestTrackableValue,
int numberOfSignificantValueDigits) {
super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits);
this.containingInstanceId = id;
}
private InternalHistogram(InternalHistogram source) {
super(source);
this.containingInstanceId = source.containingInstanceId;
}
}
private static class PackedInternalHistogram extends PackedHistogram {
private final long containingInstanceId;
private PackedInternalHistogram(long id, int numberOfSignificantValueDigits) {
super(numberOfSignificantValueDigits);
this.containingInstanceId = id;
}
}
private void validateFitAsReplacementHistogram(Histogram replacementHistogram,
boolean enforceContainingInstance) {
boolean bad = true;
if (replacementHistogram == null) {
bad = false;
} else if ((replacementHistogram instanceof InternalHistogram)
&&
((!enforceContainingInstance) ||
(((InternalHistogram) replacementHistogram).containingInstanceId ==
((InternalHistogram) activeHistogram).containingInstanceId)
)) {
bad = false;
} else if ((replacementHistogram instanceof PackedInternalHistogram)
&&
((!enforceContainingInstance) ||
(((PackedInternalHistogram) replacementHistogram).containingInstanceId ==
((PackedInternalHistogram) activeHistogram).containingInstanceId)
)) {
bad = false;
}
if (bad) {
throw new IllegalArgumentException("replacement histogram must have been obtained via a previous " +
"getIntervalHistogram() call from this " + this.getClass().getName() +
(enforceContainingInstance ? " instance" : " class"));
}
}
}
| 15,734 | 48.018692 | 124 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/PackedDoubleHistogram.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.nio.ByteBuffer;
import java.util.zip.DataFormatException;
/**
* <h3>A floating point values High Dynamic Range (HDR) Histogram that uses a packed internal representation</h3>
* <p>
* It is important to note that {@link PackedDoubleHistogram} is not thread-safe, and does not support safe concurrent
* recording by multiple threads. If concurrent operation is required, consider using
* {@link PackedConcurrentDoubleHistogram}, or (recommended) {@link DoubleRecorder} or
* {@link SingleWriterDoubleRecorder} which are intended for this purpose.
* <p>
* {@link PackedDoubleHistogram} tracks value counts in a packed internal representation optimized
* for typical histogram recoded values are sparse in the value range and tend to be incremented in small unit counts.
* This packed representation tends to require significantly smaller amounts of storage when compared to unpacked
* representations, but can incur additional recording cost due to resizing and repacking operations that may
* occur as previously unrecorded values are encountered.
* <p>
* {@link PackedDoubleHistogram} supports the recording and analyzing sampled data value counts across a
* configurable dynamic range of floating point (double) values, with configurable value precision within the range.
* Dynamic range is expressed as a ratio between the highest and lowest non-zero values trackable within the histogram
* at any given time. Value precision is expressed as the number of significant [decimal] digits in the value recording,
* and provides control over value quantization behavior across the value range and the subsequent value resolution at
* any given level.
* <p>
* Auto-ranging: Unlike integer value based histograms, the specific value range tracked by a {@link
* PackedDoubleHistogram} is not specified upfront. Only the dynamic range of values that the histogram can cover is
* (optionally) specified. E.g. When a {@link PackedDoubleHistogram} is created to track a dynamic range of
* 3600000000000 (enough to track values from a nanosecond to an hour), values could be recorded into into it in any
* consistent unit of time as long as the ratio between the highest and lowest non-zero values stays within the
* specified dynamic range, so recording in units of nanoseconds (1.0 thru 3600000000000.0), milliseconds (0.000001
* thru 3600000.0) seconds (0.000000001 thru 3600.0), hours (1/3.6E12 thru 1.0) will all work just as well.
* <p>
* Auto-resizing: When constructed with no specified dynamic range (or when auto-resize is turned on with {@link
* DoubleHistogram#setAutoResize}) a {@link DoubleHistogram} will auto-resize its dynamic range to
* include recorded values as they are encountered. Note that recording calls that cause auto-resizing may take
* longer to execute, as resizing incurs allocation and copying of internal data structures.
* <p>
* Attempts to record non-zero values that range outside of the specified dynamic range (or exceed the limits of
* of dynamic range when auto-resizing) may results in {@link ArrayIndexOutOfBoundsException} exceptions, either
* due to overflow or underflow conditions. These exceptions will only be thrown if recording the value would have
* resulted in discarding or losing the required value precision of values already recorded in the histogram.
* <p>
* See package description for {@link org.HdrHistogram} for details.
*/
public class PackedDoubleHistogram extends DoubleHistogram {
/**
* Construct a new auto-resizing DoubleHistogram using a precision stated as a number of significant decimal
* digits.
*
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal
* digits to which the histogram will maintain value resolution and
* separation. Must be a non-negative integer between 0 and 5.
*/
public PackedDoubleHistogram(final int numberOfSignificantValueDigits) {
this(2, numberOfSignificantValueDigits);
setAutoResize(true);
}
/**
* Construct a new DoubleHistogram with the specified dynamic range (provided in {@code highestToLowestValueRatio})
* and using a precision stated as a number of significant decimal digits.
*
* @param highestToLowestValueRatio specifies the dynamic range to use
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal
* digits to which the histogram will maintain value resolution and
* separation. Must be a non-negative integer between 0 and 5.
*/
public PackedDoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) {
this(highestToLowestValueRatio, numberOfSignificantValueDigits, PackedHistogram.class);
}
/**
* Construct a {@link PackedDoubleHistogram} with the same range settings as a given source,
* duplicating the source's start/end timestamps (but NOT it's contents)
* @param source The source histogram to duplicate
*/
public PackedDoubleHistogram(final DoubleHistogram source) {
super(source);
}
PackedDoubleHistogram(final long highestToLowestValueRatio,
final int numberOfSignificantValueDigits,
final Class<? extends AbstractHistogram> internalCountsHistogramClass) {
super(highestToLowestValueRatio, numberOfSignificantValueDigits, internalCountsHistogramClass);
}
PackedDoubleHistogram(final long highestToLowestValueRatio,
final int numberOfSignificantValueDigits,
final Class<? extends AbstractHistogram> internalCountsHistogramClass,
AbstractHistogram internalCountsHistogram) {
super(
highestToLowestValueRatio,
numberOfSignificantValueDigits,
internalCountsHistogramClass,
internalCountsHistogram
);
}
/**
* Construct a new ConcurrentDoubleHistogram by decoding it from a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high
* @return The newly constructed ConcurrentDoubleHistogram
*/
public static PackedDoubleHistogram decodeFromByteBuffer(
final ByteBuffer buffer,
final long minBarForHighestToLowestValueRatio) {
try {
int cookie = buffer.getInt();
if (!isNonCompressedDoubleHistogramCookie(cookie)) {
throw new IllegalArgumentException("The buffer does not contain a DoubleHistogram");
}
PackedDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer,
PackedDoubleHistogram.class, PackedHistogram.class,
minBarForHighestToLowestValueRatio);
return histogram;
} catch (DataFormatException ex) {
throw new RuntimeException(ex);
}
}
/**
* Construct a new ConcurrentDoubleHistogram by decoding it from a compressed form in a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high
* @return The newly constructed ConcurrentDoubleHistogram
* @throws DataFormatException on error parsing/decompressing the buffer
*/
public static PackedDoubleHistogram decodeFromCompressedByteBuffer(
final ByteBuffer buffer,
final long minBarForHighestToLowestValueRatio) throws DataFormatException {
int cookie = buffer.getInt();
if (!isCompressedDoubleHistogramCookie(cookie)) {
throw new IllegalArgumentException("The buffer does not contain a compressed DoubleHistogram");
}
PackedDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer,
PackedDoubleHistogram.class, PackedHistogram.class,
minBarForHighestToLowestValueRatio);
return histogram;
}
}
| 8,516 | 54.666667 | 120 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/Histogram.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.zip.DataFormatException;
/**
* <h3>A High Dynamic Range (HDR) Histogram</h3>
* <p>
* {@link Histogram} supports the recording and analyzing sampled data value counts across a configurable integer value
* range with configurable value precision within the range. Value precision is expressed as the number of significant
* digits in the value recording, and provides control over value quantization behavior across the value range and the
* subsequent value resolution at any given level.
* <p>
* For example, a Histogram could be configured to track the counts of observed integer values between 0 and
* 3,600,000,000 while maintaining a value precision of 3 significant digits across that range. Value quantization
* within the range will thus be no larger than 1/1,000th (or 0.1%) of any value. This example Histogram could
* be used to track and analyze the counts of observed response times ranging between 1 microsecond and 1 hour
* in magnitude, while maintaining a value resolution of 1 microsecond up to 1 millisecond, a resolution of
* 1 millisecond (or better) up to one second, and a resolution of 1 second (or better) up to 1,000 seconds. At its
* maximum tracked value (1 hour), it would still maintain a resolution of 3.6 seconds (or better).
* <p>
* Histogram tracks value counts in <b><code>long</code></b> fields. Smaller field types are available in the
* {@link IntCountsHistogram} and {@link ShortCountsHistogram} implementations of
* {@link org.HdrHistogram.AbstractHistogram}.
* <p>
* Auto-resizing: When constructed with no specified value range range (or when auto-resize is turned on with {@link
* Histogram#setAutoResize}) a {@link Histogram} will auto-resize its dynamic range to include recorded values as
* they are encountered. Note that recording calls that cause auto-resizing may take longer to execute, as resizing
* incurs allocation and copying of internal data structures.
* <p>
* See package description for {@link org.HdrHistogram} for details.
*/
public class Histogram extends AbstractHistogram {
long totalCount;
long[] counts;
int normalizingIndexOffset;
@Override
long getCountAtIndex(final int index) {
return counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)];
}
@Override
long getCountAtNormalizedIndex(final int index) {
return counts[index];
}
@Override
void incrementCountAtIndex(final int index) {
counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)]++;
}
@Override
void addToCountAtIndex(final int index, final long value) {
counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)] += value;
}
@Override
void setCountAtIndex(int index, long value) {
counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)] = value;
}
@Override
void setCountAtNormalizedIndex(int index, long value) {
counts[index] = value;
}
@Override
int getNormalizingIndexOffset() {
return normalizingIndexOffset;
}
@Override
void setNormalizingIndexOffset(int normalizingIndexOffset) {
this.normalizingIndexOffset = normalizingIndexOffset;
}
@Override
void setIntegerToDoubleValueConversionRatio(double integerToDoubleValueConversionRatio) {
nonConcurrentSetIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio);
}
@Override
void shiftNormalizingIndexByOffset(int offsetToAdd,
boolean lowestHalfBucketPopulated,
double newIntegerToDoubleValueConversionRatio) {
nonConcurrentNormalizingIndexShift(offsetToAdd, lowestHalfBucketPopulated);
}
@Override
void clearCounts() {
java.util.Arrays.fill(counts, 0);
totalCount = 0;
}
@Override
public Histogram copy() {
Histogram copy = new Histogram(this);
copy.add(this);
return copy;
}
@Override
public Histogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) {
Histogram copy = new Histogram(this);
copy.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples);
return copy;
}
@Override
public long getTotalCount() {
return totalCount;
}
@Override
void setTotalCount(final long totalCount) {
this.totalCount = totalCount;
}
@Override
void incrementTotalCount() {
totalCount++;
}
@Override
void addToTotalCount(final long value) {
totalCount += value;
}
@Override
int _getEstimatedFootprintInBytes() {
return (512 + (8 * counts.length));
}
@Override
void resize(long newHighestTrackableValue) {
int oldNormalizedZeroIndex = normalizeIndex(0, normalizingIndexOffset, countsArrayLength);
establishSize(newHighestTrackableValue);
int countsDelta = countsArrayLength - counts.length;
counts = Arrays.copyOf(counts, countsArrayLength);
if (oldNormalizedZeroIndex != 0) {
// We need to shift the stuff from the zero index and up to the end of the array:
int newNormalizedZeroIndex = oldNormalizedZeroIndex + countsDelta;
int lengthToCopy = (countsArrayLength - countsDelta) - oldNormalizedZeroIndex;
System.arraycopy(counts, oldNormalizedZeroIndex, counts, newNormalizedZeroIndex, lengthToCopy);
Arrays.fill(counts, oldNormalizedZeroIndex, newNormalizedZeroIndex, 0);
}
}
/**
* Construct an auto-resizing histogram with a lowest discernible value of 1 and an auto-adjusting
* highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2).
*
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public Histogram(final int numberOfSignificantValueDigits) {
this(1, 2, numberOfSignificantValueDigits);
setAutoResize(true);
}
/**
* Construct a Histogram given the Highest value to be tracked and a number of significant decimal digits. The
* histogram will be constructed to implicitly track (distinguish from 0) values as low as 1.
*
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} 2.
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public Histogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) {
this(1, highestTrackableValue, numberOfSignificantValueDigits);
}
/**
* Construct a Histogram given the Lowest and Highest values to be tracked and a number of significant
* decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used
* for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking
* time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the
* proper value for lowestDiscernibleValue would be 1000.
*
* @param lowestDiscernibleValue The lowest value that can be discerned (distinguished from 0) by the
* histogram. Must be a positive integer that is {@literal >=} 1. May be
* internally rounded down to nearest power of 2.
* @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive
* integer that is {@literal >=} (2 * lowestDiscernibleValue).
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant
* decimal digits to which the histogram will maintain value resolution
* and separation. Must be a non-negative integer between 0 and 5.
*/
public Histogram(final long lowestDiscernibleValue, final long highestTrackableValue,
final int numberOfSignificantValueDigits) {
this(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, true);
}
/**
* Construct a histogram with the same range settings as a given source histogram,
* duplicating the source's start/end timestamps (but NOT its contents)
* @param source The source histogram to duplicate
*/
public Histogram(final AbstractHistogram source) {
this(source, true);
}
Histogram(final AbstractHistogram source, boolean allocateCountsArray) {
super(source);
if (allocateCountsArray) {
counts = new long[countsArrayLength];
}
wordSizeInBytes = 8;
}
Histogram(final long lowestDiscernibleValue, final long highestTrackableValue,
final int numberOfSignificantValueDigits, boolean allocateCountsArray) {
super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits);
if (allocateCountsArray) {
counts = new long[countsArrayLength];
}
wordSizeInBytes = 8;
}
/**
* Construct a new histogram by decoding it from a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high
* @return The newly constructed histogram
*/
public static Histogram decodeFromByteBuffer(final ByteBuffer buffer,
final long minBarForHighestTrackableValue) {
return decodeFromByteBuffer(buffer, Histogram.class, minBarForHighestTrackableValue);
}
/**
* Construct a new histogram by decoding it from a compressed form in a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high
* @return The newly constructed histogram
* @throws DataFormatException on error parsing/decompressing the buffer
*/
public static Histogram decodeFromCompressedByteBuffer(final ByteBuffer buffer,
final long minBarForHighestTrackableValue)
throws DataFormatException {
return decodeFromCompressedByteBuffer(buffer, Histogram.class, minBarForHighestTrackableValue);
}
private void readObject(final ObjectInputStream o)
throws IOException, ClassNotFoundException {
o.defaultReadObject();
}
/**
* Construct a new Histogram by decoding it from a String containing a base64 encoded
* compressed histogram representation.
*
* @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram
* @return A Histogram decoded from the string
* @throws DataFormatException on error parsing/decompressing the input
*/
public static Histogram fromString(final String base64CompressedHistogramString)
throws DataFormatException {
return decodeFromCompressedByteBuffer(
ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)),
0);
}
}
| 12,327 | 42.561837 | 119 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/HistogramLogProcessor.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.PrintStream;
import java.util.*;
/**
* {@link org.HdrHistogram.HistogramLogProcessor} will process an input log and
* [can] generate two separate log files from a single histogram log file: a
* sequential interval log file and a histogram percentile distribution log file.
* <p>
* The sequential interval log file logs a single stats summary line for
* each reporting interval.
* <p>
* The histogram percentile distribution log file includes a detailed percentiles
* and fine grained distribution of the entire log file range processed.
* <p>
* HistogramLogProcessor will process an input log file when provided with
* the {@code -i <filename>} option. When no -i option is provided, standard input
* will be processed.
* <p>
* When provided with an output file name {@code <logfile>} with the -o option
* (e.g. "-o mylog"), HistogramLogProcessor will produce both output files
* under the names {@code <logfile>} and {@code <logfile>.hgrm} (e.g. mylog and mylog.hgrm).
* <p>
* When not provided with an output file name, HistogramLogProcessor will
* produce [only] the histogram percentile distribution log output to
* standard output.
* <p>
* By default, HistogramLogProcessor only processes hlog file lines lines
* with no tag specified [aka "default tagged" lines]. An optional -tag
* parameter can be used to process lines of a [single] specific tag. The
* -listtags option can be used to list all the tags found in the input file.
* <p>
* HistogramLogProcessor accepts optional -start and -end time range
* parameters. When provided, the output will only reflect the portion
* of the input log with timestamps that fall within the provided start
* and end time range parameters.
* <p>
* HistogramLogProcessor also accepts and optional -csv parameter, which
* will cause the output formatting (of both output file forms) to use
* a CSV file format.
*/
public class HistogramLogProcessor extends Thread {
static final String versionString = "Histogram Log Processor version " + Version.version;
private final HistogramLogProcessorConfiguration config;
private HistogramLogReader logReader;
private static class HistogramLogProcessorConfiguration {
boolean verbose = false;
String outputFileName = null;
String inputFileName = null;
String tag = null;
double rangeStartTimeSec = 0.0;
double rangeEndTimeSec = Double.MAX_VALUE;
boolean logFormatCsv = false;
boolean listTags = false;
boolean allTags = false;
boolean movingWindow = false;
double movingWindowPercentileToReport = 99.0;
long movingWindowLengthInMsec = 60000; // 1 minute
int percentilesOutputTicksPerHalf = 5;
Double outputValueUnitRatio = 1000000.0; // default to msec units for output.
double expectedIntervalForCoordinatedOmissionCorrection = 0.0;
String errorMessage = "";
HistogramLogProcessorConfiguration(final String[] args) {
boolean askedForHelp= false;
try {
for (int i = 0; i < args.length; ++i) {
if (args[i].equals("-csv")) {
logFormatCsv = true;
} else if (args[i].equals("-v")) {
verbose = true;
} else if (args[i].equals("-listtags")) {
listTags = true;
} else if (args[i].equals("-alltags")) {
allTags = true;
} else if (args[i].equals("-i")) {
inputFileName = args[++i]; // lgtm [java/index-out-of-bounds]
} else if (args[i].equals("-tag")) {
tag = args[++i]; // lgtm [java/index-out-of-bounds]
} else if (args[i].equals("-mwp")) {
movingWindowPercentileToReport = Double.parseDouble(args[++i]); // lgtm [java/index-out-of-bounds]
movingWindow = true;
} else if (args[i].equals("-mwpl")) {
movingWindowLengthInMsec = Long.parseLong(args[++i]); // lgtm [java/index-out-of-bounds]
movingWindow = true;
} else if (args[i].equals("-start")) {
rangeStartTimeSec = Double.parseDouble(args[++i]); // lgtm [java/index-out-of-bounds]
} else if (args[i].equals("-end")) {
rangeEndTimeSec = Double.parseDouble(args[++i]); // lgtm [java/index-out-of-bounds]
} else if (args[i].equals("-o")) {
outputFileName = args[++i]; // lgtm [java/index-out-of-bounds]
} else if (args[i].equals("-percentilesOutputTicksPerHalf")) {
percentilesOutputTicksPerHalf = Integer.parseInt(args[++i]); // lgtm [java/index-out-of-bounds]
} else if (args[i].equals("-outputValueUnitRatio")) {
outputValueUnitRatio = Double.parseDouble(args[++i]); // lgtm [java/index-out-of-bounds]
} else if (args[i].equals("-correctLogWithKnownCoordinatedOmission")) {
expectedIntervalForCoordinatedOmissionCorrection =
Double.parseDouble(args[++i]); // lgtm [java/index-out-of-bounds]
} else if (args[i].equals("-h")) {
askedForHelp = true;
throw new Exception("Help: " + args[i]);
} else {
throw new Exception("Invalid args: " + args[i]);
}
}
} catch (Exception e) {
errorMessage = "Error: " + versionString + " launched with the following args:\n";
for (String arg : args) {
errorMessage += arg + " ";
}
if (!askedForHelp) {
errorMessage += "\nWhich was parsed as an error, indicated by the following exception:\n" + e;
System.err.println(errorMessage);
}
final String validArgs =
"\"[-csv] [-v] [-i inputFileName] [-o outputFileName] [-tag tag] " +
"[-start rangeStartTimeSec] [-end rangeEndTimeSec] " +
"[-outputValueUnitRatio r] [-correctLogWithKnownCoordinatedOmission i] [-listtags]";
System.err.println("valid arguments = " + validArgs);
System.err.println(
" [-h] help\n" +
" [-v] Provide verbose error output\n" +
" [-csv] Use CSV format for output log files\n" +
" [-i logFileName] File name of Histogram Log to process (default is standard input)\n" +
" [-o outputFileName] File name to output to (default is standard output)\n" +
" [-tag tag] The tag (default no tag) of the histogram lines to be processed\n" +
" [-start rangeStartTimeSec] The start time for the range in the file, in seconds (default 0.0)\n" +
" [-end rangeEndTimeSec] The end time for the range in the file, in seconds (default is infinite)\n" +
" [-outputValueUnitRatio r] The scaling factor by which to divide histogram recorded values units\n" +
" in output. [default = 1000000.0 (1 msec in nsec)]\n" +
" [-correctLogWithKnownCoordinatedOmission i] When the supplied expected interval i is than 0, performs coordinated\n" +
" omission correction on the input log's interval histograms by adding\n" +
" missing values as appropriate based on the supplied expected interval\n" +
" value i (in whatever units the log histograms were recorded with). This\n" +
" feature should only be used when the input log is known to have been\n" +
" recorded with coordinated omissions, and when an expected interval is known.\n" +
" [-listtags] list all tags found on histogram lines the input file."
);
System.exit(1);
}
}
}
private void outputTimeRange(final PrintStream log, final String title) {
log.format(Locale.US, "#[%s between %.3f and", title, config.rangeStartTimeSec);
if (config.rangeEndTimeSec < Double.MAX_VALUE) {
log.format(" %.3f", config.rangeEndTimeSec);
} else {
log.format(" %s", "<Infinite>");
}
log.format(" seconds (relative to StartTime)]\n");
}
private void outputStartTime(final PrintStream log, final Double startTime) {
log.format(Locale.US, "#[StartTime: %.3f (seconds since epoch), %s]\n",
startTime, (new Date((long) (startTime * 1000))).toString());
}
EncodableHistogram copyCorrectedForCoordinatedOmission(final EncodableHistogram inputHistogram) {
EncodableHistogram histogram = inputHistogram;
if (histogram instanceof DoubleHistogram) {
if (config.expectedIntervalForCoordinatedOmissionCorrection > 0.0) {
histogram = ((DoubleHistogram) histogram).copyCorrectedForCoordinatedOmission(
config.expectedIntervalForCoordinatedOmissionCorrection);
}
} else if (histogram instanceof Histogram) {
long expectedInterval = (long) config.expectedIntervalForCoordinatedOmissionCorrection;
if (expectedInterval > 0) {
histogram = ((Histogram) histogram).copyCorrectedForCoordinatedOmission(expectedInterval);
}
}
return histogram;
}
private int lineNumber = 0;
private EncodableHistogram getIntervalHistogram() {
EncodableHistogram histogram = null;
try {
histogram = logReader.nextIntervalHistogram(config.rangeStartTimeSec, config.rangeEndTimeSec);
if (config.expectedIntervalForCoordinatedOmissionCorrection > 0.0) {
// Apply Coordinated Omission correction to log histograms when arguments indicate that
// such correction is desired, and an expected interval is provided.
histogram = copyCorrectedForCoordinatedOmission(histogram);
}
} catch (RuntimeException ex) {
System.err.println("Log file parsing error at line number " + lineNumber +
": line appears to be malformed.");
if (config.verbose) {
throw ex;
} else {
System.exit(1);
}
}
lineNumber++;
return histogram;
}
private EncodableHistogram getIntervalHistogram(String tag) {
EncodableHistogram histogram;
if (tag == null) {
do {
histogram = getIntervalHistogram();
} while ((histogram != null) && histogram.getTag() != null);
} else {
do {
histogram = getIntervalHistogram();
} while ((histogram != null) && !tag.equals(histogram.getTag()));
}
return histogram;
}
/**
* Run the log processor with the currently provided arguments.
*/
@Override
public void run() {
PrintStream timeIntervalLog = null;
PrintStream movingWindowLog = null;
PrintStream histogramPercentileLog = System.out;
double firstStartTime = 0.0;
boolean timeIntervalLogLegendWritten = false;
boolean movingWindowLogLegendWritten = false;
Queue<EncodableHistogram> movingWindowQueue = new LinkedList<>();
if (config.listTags) {
Set<String> tags = new TreeSet<>();
EncodableHistogram histogram;
boolean nullTagFound = false;
while ((histogram = getIntervalHistogram()) != null) {
String tag = histogram.getTag();
if (tag != null) {
tags.add(histogram.getTag());
} else {
nullTagFound = true;
}
}
System.out.println("Tags found in input file:");
if (nullTagFound) {
System.out.println("[NO TAG (default)]");
}
for (String tag : tags) {
System.out.println(tag);
}
// listtags does nothing other than list tags:
return;
}
final String logFormat;
final String movingWindowLogFormat;
if (config.logFormatCsv) {
logFormat = "%.3f,%d,%.3f,%.3f,%.3f,%d,%.3f,%.3f,%.3f,%.3f,%.3f,%.3f\n";
movingWindowLogFormat = "%.3f,%d,%.3f,%.3f\n";
} else {
logFormat = "%4.3f: I:%d ( %7.3f %7.3f %7.3f ) T:%d ( %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f )\n";
movingWindowLogFormat = "%4.3f: I:%d P:%7.3f M:%7.3f\n";
}
try {
if (config.outputFileName != null) {
try {
timeIntervalLog = new PrintStream(new FileOutputStream(config.outputFileName), false);
outputTimeRange(timeIntervalLog, "Interval percentile log");
} catch (FileNotFoundException ex) {
System.err.println("Failed to open output file " + config.outputFileName);
}
String hgrmOutputFileName = config.outputFileName + ".hgrm";
try {
histogramPercentileLog = new PrintStream(new FileOutputStream(hgrmOutputFileName), false);
outputTimeRange(histogramPercentileLog, "Overall percentile distribution");
} catch (FileNotFoundException ex) {
System.err.println("Failed to open percentiles histogram output file " + hgrmOutputFileName);
}
if (config.movingWindow) {
String movingWindowOutputFileName = config.outputFileName + ".mwp";
try {
movingWindowLog = new PrintStream(new FileOutputStream(movingWindowOutputFileName), false);
outputTimeRange(movingWindowLog, "Moving window log for " +
config.movingWindowPercentileToReport + " percentile");
} catch (FileNotFoundException ex) {
System.err.println("Failed to open moving window output file " + movingWindowOutputFileName);
}
}
}
EncodableHistogram intervalHistogram = getIntervalHistogram(config.tag);
boolean logUsesDoubleHistograms = (intervalHistogram instanceof DoubleHistogram);
Histogram accumulatedRegularHistogram = logUsesDoubleHistograms ?
new Histogram(3) :
((Histogram) intervalHistogram).copy();
accumulatedRegularHistogram.reset();
accumulatedRegularHistogram.setAutoResize(true);
DoubleHistogram accumulatedDoubleHistogram = logUsesDoubleHistograms ?
((DoubleHistogram) intervalHistogram).copy() :
new DoubleHistogram(3);
accumulatedDoubleHistogram.reset();
accumulatedDoubleHistogram.setAutoResize(true);
EncodableHistogram movingWindowSumHistogram = logUsesDoubleHistograms ?
new DoubleHistogram(3) :
new Histogram(3);
while (intervalHistogram != null) {
// handle accumulated histogram:
if (intervalHistogram instanceof DoubleHistogram) {
if (!logUsesDoubleHistograms) {
throw new IllegalStateException("Encountered a DoubleHistogram line in a log of Histograms.");
}
accumulatedDoubleHistogram.add((DoubleHistogram) intervalHistogram);
} else {
if (logUsesDoubleHistograms) {
throw new IllegalStateException("Encountered a Histogram line in a log of DoubleHistograms.");
}
accumulatedRegularHistogram.add((Histogram) intervalHistogram);
}
long windowCutOffTimeStamp = intervalHistogram.getEndTimeStamp() - config.movingWindowLengthInMsec;
// handle moving window:
if (config.movingWindow) {
// Add the current interval histogram to the moving window sums:
if ((movingWindowSumHistogram instanceof DoubleHistogram) &&
(intervalHistogram instanceof DoubleHistogram)){
((DoubleHistogram) movingWindowSumHistogram).add((DoubleHistogram) intervalHistogram);
} else if ((movingWindowSumHistogram instanceof Histogram) &&
(intervalHistogram instanceof Histogram)){
((Histogram) movingWindowSumHistogram).add((Histogram) intervalHistogram);
}
// Remove previous, now-out-of-window interval histograms from moving window:
EncodableHistogram head;
while (((head = movingWindowQueue.peek()) != null) &&
(head.getEndTimeStamp() <= windowCutOffTimeStamp)) {
EncodableHistogram prevHist = movingWindowQueue.remove();
if (movingWindowSumHistogram instanceof DoubleHistogram) {
if (prevHist != null) {
((DoubleHistogram) movingWindowSumHistogram).subtract((DoubleHistogram) prevHist);
}
} else if (movingWindowSumHistogram instanceof Histogram) {
if (prevHist != null) {
((Histogram) movingWindowSumHistogram).subtract((Histogram) prevHist);
}
}
}
// Add interval histogram to moving window previous intervals memory:
movingWindowQueue.add(intervalHistogram);
}
if ((firstStartTime == 0.0) && (logReader.getStartTimeSec() != 0.0)) {
firstStartTime = logReader.getStartTimeSec();
outputStartTime(histogramPercentileLog, firstStartTime);
if (timeIntervalLog != null) {
outputStartTime(timeIntervalLog, firstStartTime);
}
}
if (timeIntervalLog != null) {
if (!timeIntervalLogLegendWritten) {
timeIntervalLogLegendWritten = true;
if (config.logFormatCsv) {
timeIntervalLog.println("\"Timestamp\",\"Int_Count\",\"Int_50%\",\"Int_90%\",\"Int_Max\",\"Total_Count\"," +
"\"Total_50%\",\"Total_90%\",\"Total_99%\",\"Total_99.9%\",\"Total_99.99%\",\"Total_Max\"");
} else {
timeIntervalLog.println("Time: IntervalPercentiles:count ( 50% 90% Max ) TotalPercentiles:count ( 50% 90% 99% 99.9% 99.99% Max )");
}
}
if (logUsesDoubleHistograms) {
timeIntervalLog.format(Locale.US, logFormat,
((intervalHistogram.getEndTimeStamp() / 1000.0) - logReader.getStartTimeSec()),
// values recorded during the last reporting interval
((DoubleHistogram) intervalHistogram).getTotalCount(),
((DoubleHistogram) intervalHistogram).getValueAtPercentile(50.0) / config.outputValueUnitRatio,
((DoubleHistogram) intervalHistogram).getValueAtPercentile(90.0) / config.outputValueUnitRatio,
((DoubleHistogram) intervalHistogram).getMaxValue() / config.outputValueUnitRatio,
// values recorded from the beginning until now
accumulatedDoubleHistogram.getTotalCount(),
accumulatedDoubleHistogram.getValueAtPercentile(50.0) / config.outputValueUnitRatio,
accumulatedDoubleHistogram.getValueAtPercentile(90.0) / config.outputValueUnitRatio,
accumulatedDoubleHistogram.getValueAtPercentile(99.0) / config.outputValueUnitRatio,
accumulatedDoubleHistogram.getValueAtPercentile(99.9) / config.outputValueUnitRatio,
accumulatedDoubleHistogram.getValueAtPercentile(99.99) / config.outputValueUnitRatio,
accumulatedDoubleHistogram.getMaxValue() / config.outputValueUnitRatio
);
} else {
timeIntervalLog.format(Locale.US, logFormat,
((intervalHistogram.getEndTimeStamp() / 1000.0) - logReader.getStartTimeSec()),
// values recorded during the last reporting interval
((Histogram) intervalHistogram).getTotalCount(),
((Histogram) intervalHistogram).getValueAtPercentile(50.0) / config.outputValueUnitRatio,
((Histogram) intervalHistogram).getValueAtPercentile(90.0) / config.outputValueUnitRatio,
((Histogram) intervalHistogram).getMaxValue() / config.outputValueUnitRatio,
// values recorded from the beginning until now
accumulatedRegularHistogram.getTotalCount(),
accumulatedRegularHistogram.getValueAtPercentile(50.0) / config.outputValueUnitRatio,
accumulatedRegularHistogram.getValueAtPercentile(90.0) / config.outputValueUnitRatio,
accumulatedRegularHistogram.getValueAtPercentile(99.0) / config.outputValueUnitRatio,
accumulatedRegularHistogram.getValueAtPercentile(99.9) / config.outputValueUnitRatio,
accumulatedRegularHistogram.getValueAtPercentile(99.99) / config.outputValueUnitRatio,
accumulatedRegularHistogram.getMaxValue() / config.outputValueUnitRatio
);
}
}
if (movingWindowLog != null) {
if (!movingWindowLogLegendWritten) {
movingWindowLogLegendWritten = true;
if (config.logFormatCsv) {
movingWindowLog.println("\"Timestamp\",\"Window_Count\",\"" +
config.movingWindowPercentileToReport +"%'ile\",\"Max\"");
} else {
movingWindowLog.println("Time: WindowCount " + config.movingWindowPercentileToReport + "%'ile Max");
}
}
if (intervalHistogram instanceof DoubleHistogram) {
movingWindowLog.format(Locale.US, movingWindowLogFormat,
((intervalHistogram.getEndTimeStamp() / 1000.0) - logReader.getStartTimeSec()),
// values recorded during the last reporting interval
((DoubleHistogram) movingWindowSumHistogram).getTotalCount(),
((DoubleHistogram) movingWindowSumHistogram).getValueAtPercentile(config.movingWindowPercentileToReport) / config.outputValueUnitRatio,
((DoubleHistogram) movingWindowSumHistogram).getMaxValue() / config.outputValueUnitRatio
);
} else {
movingWindowLog.format(Locale.US, movingWindowLogFormat,
((intervalHistogram.getEndTimeStamp() / 1000.0) - logReader.getStartTimeSec()),
// values recorded during the last reporting interval
((Histogram) movingWindowSumHistogram).getTotalCount(),
((Histogram) movingWindowSumHistogram).getValueAtPercentile(config.movingWindowPercentileToReport) / config.outputValueUnitRatio,
((Histogram) movingWindowSumHistogram).getMaxValue() / config.outputValueUnitRatio
);
}
}
intervalHistogram = getIntervalHistogram(config.tag);
}
if (logUsesDoubleHistograms) {
accumulatedDoubleHistogram.outputPercentileDistribution(histogramPercentileLog,
config.percentilesOutputTicksPerHalf, config.outputValueUnitRatio, config.logFormatCsv);
} else {
accumulatedRegularHistogram.outputPercentileDistribution(histogramPercentileLog,
config.percentilesOutputTicksPerHalf, config.outputValueUnitRatio, config.logFormatCsv);
}
} finally {
if (timeIntervalLog != null) {
timeIntervalLog.close();
}
if (movingWindowLog != null) {
movingWindowLog.close();
}
if (histogramPercentileLog != System.out) {
histogramPercentileLog.close();
}
}
}
/**
* Construct a {@link org.HdrHistogram.HistogramLogProcessor} with the given arguments
* (provided in command line style).
* <pre>
* [-h] help
* [-csv] Use CSV format for output log files
* [-i logFileName] File name of Histogram Log to process (default is standard input)
* [-o outputFileName] File name to output to (default is standard output)
* (will replace occurrences of %pid and %date with appropriate information)
* [-tag tag] The tag (default no tag) of the histogram lines to be processed\n
* [-start rangeStartTimeSec] The start time for the range in the file, in seconds (default 0.0)
* [-end rangeEndTimeSec] The end time for the range in the file, in seconds (default is infinite)
* [-correctLogWithKnownCoordinatedOmission expectedInterval] When the supplied expected interval i is than 0, performs coordinated
* omission correction on the input log's interval histograms by adding
* missing values as appropriate based on the supplied expected interval
* value i (in whatever units the log histograms were recorded with). This
* feature should only be used when the input log is known to have been
* recorded with coordinated omissions, and when an expected interval is known.
* [-outputValueUnitRatio r] The scaling factor by which to divide histogram recorded values units
* in output. [default = 1000000.0 (1 msec in nsec)]"
* </pre>
* @param args command line arguments
* @throws FileNotFoundException if specified input file is not found
*/
public HistogramLogProcessor(final String[] args) throws FileNotFoundException {
this.setName("HistogramLogProcessor");
config = new HistogramLogProcessorConfiguration(args);
if (config.inputFileName != null) {
logReader = new HistogramLogReader(config.inputFileName);
} else {
logReader = new HistogramLogReader(System.in);
}
}
/**
* main() method.
*
* @param args command line arguments
*/
public static void main(final String[] args) {
final HistogramLogProcessor processor;
try {
processor = new HistogramLogProcessor(args);
processor.start();
} catch (FileNotFoundException ex) {
System.err.println("failed to open input file.");
}
}
}
| 30,037 | 54.936685 | 167 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/ConcurrentDoubleHistogram.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.nio.ByteBuffer;
import java.util.zip.DataFormatException;
/**
* <h3>A floating point values High Dynamic Range (HDR) Histogram that supports safe concurrent recording
* operations.</h3>
* <p>
* A {@link ConcurrentDoubleHistogram} is a variant of {@link org.HdrHistogram.DoubleHistogram} that guarantees
* lossless recording of values into the histogram even when the histogram is updated by multiple threads, and
* supports auto-resize and auto-ranging operations that may occur concurrently as a result of recording operations.
* <p>
* It is important to note that concurrent recording, auto-sizing, and value shifting are the only thread-safe behaviors
* provided by {@link ConcurrentDoubleHistogram}, and that it is not otherwise synchronized. Specifically, {@link
* ConcurrentDoubleHistogram} provides no implicit synchronization that would prevent the contents of the histogram
* from changing during queries, iterations, copies, or addition operations on the histogram. Callers wishing to make
* potentially concurrent, multi-threaded updates that would safely work in the presence of queries, copies, or
* additions of histogram objects should either take care to externally synchronize and/or order their access,
* use the {@link SynchronizedDoubleHistogram} variant, or (recommended) use the {@link DoubleRecorder}
* or {@link SingleWriterDoubleRecorder} which are intended for this purpose.
* <p>
* {@link ConcurrentDoubleHistogram} supports the recording and analyzing sampled data value counts across a
* configurable dynamic range of floating point (double) values, with configurable value precision within the range.
* Dynamic range is expressed as a ratio between the highest and lowest non-zero values trackable within the histogram
* at any given time. Value precision is expressed as the number of significant [decimal] digits in the value recording,
* and provides control over value quantization behavior across the value range and the subsequent value resolution at
* any given level.
* <p>
* Auto-ranging: Unlike integer value based histograms, the specific value range tracked by a {@link
* ConcurrentDoubleHistogram} is not specified upfront. Only the dynamic range of values that the histogram can cover is
* (optionally) specified. E.g. When a {@link ConcurrentDoubleHistogram} is created to track a dynamic range of
* 3600000000000 (enough to track values from a nanosecond to an hour), values could be recorded into into it in any
* consistent unit of time as long as the ratio between the highest and lowest non-zero values stays within the
* specified dynamic range, so recording in units of nanoseconds (1.0 thru 3600000000000.0), milliseconds (0.000001
* thru 3600000.0) seconds (0.000000001 thru 3600.0), hours (1/3.6E12 thru 1.0) will all work just as well.
* <p>
* Auto-resizing: When constructed with no specified dynamic range (or when auto-resize is turned on with {@link
* ConcurrentDoubleHistogram#setAutoResize}) a {@link ConcurrentDoubleHistogram} will auto-resize its dynamic range to
* include recorded values as they are encountered. Note that recording calls that cause auto-resizing may take
* longer to execute, as resizing incurs allocation and copying of internal data structures.
* <p>
* Attempts to record non-zero values that range outside of the specified dynamic range (or exceed the limits of
* of dynamic range when auto-resizing) may results in {@link ArrayIndexOutOfBoundsException} exceptions, either
* due to overflow or underflow conditions. These exceptions will only be thrown if recording the value would have
* resulted in discarding or losing the required value precision of values already recorded in the histogram.
* <p>
* See package description for {@link org.HdrHistogram} for details.
*/
public class ConcurrentDoubleHistogram extends DoubleHistogram {
/**
* Construct a new auto-resizing DoubleHistogram using a precision stated as a number of significant decimal
* digits.
*
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal
* digits to which the histogram will maintain value resolution and
* separation. Must be a non-negative integer between 0 and 5.
*/
public ConcurrentDoubleHistogram(final int numberOfSignificantValueDigits) {
this(2, numberOfSignificantValueDigits);
setAutoResize(true);
}
/**
* Construct a new DoubleHistogram with the specified dynamic range (provided in {@code highestToLowestValueRatio})
* and using a precision stated as a number of significant decimal digits.
*
* @param highestToLowestValueRatio specifies the dynamic range to use
* @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal
* digits to which the histogram will maintain value resolution and
* separation. Must be a non-negative integer between 0 and 5.
*/
public ConcurrentDoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) {
this(highestToLowestValueRatio, numberOfSignificantValueDigits, ConcurrentHistogram.class);
}
/**
* Construct a {@link ConcurrentDoubleHistogram} with the same range settings as a given source,
* duplicating the source's start/end timestamps (but NOT it's contents)
* @param source The source histogram to duplicate
*/
public ConcurrentDoubleHistogram(final DoubleHistogram source) {
super(source);
}
ConcurrentDoubleHistogram(final long highestToLowestValueRatio,
final int numberOfSignificantValueDigits,
final Class<? extends AbstractHistogram> internalCountsHistogramClass) {
super(highestToLowestValueRatio, numberOfSignificantValueDigits, internalCountsHistogramClass);
}
ConcurrentDoubleHistogram(final long highestToLowestValueRatio,
final int numberOfSignificantValueDigits,
final Class<? extends AbstractHistogram> internalCountsHistogramClass,
AbstractHistogram internalCountsHistogram) {
super(
highestToLowestValueRatio,
numberOfSignificantValueDigits,
internalCountsHistogramClass,
internalCountsHistogram
);
}
/**
* Construct a new ConcurrentDoubleHistogram by decoding it from a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high
* @return The newly constructed ConcurrentDoubleHistogram
*/
public static ConcurrentDoubleHistogram decodeFromByteBuffer(
final ByteBuffer buffer,
final long minBarForHighestToLowestValueRatio) {
try {
int cookie = buffer.getInt();
if (!isNonCompressedDoubleHistogramCookie(cookie)) {
throw new IllegalArgumentException("The buffer does not contain a DoubleHistogram");
}
ConcurrentDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer,
ConcurrentDoubleHistogram.class, ConcurrentHistogram.class,
minBarForHighestToLowestValueRatio);
return histogram;
} catch (DataFormatException ex) {
throw new RuntimeException(ex);
}
}
/**
* Construct a new ConcurrentDoubleHistogram by decoding it from a compressed form in a ByteBuffer.
* @param buffer The buffer to decode from
* @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high
* @return The newly constructed ConcurrentDoubleHistogram
* @throws DataFormatException on error parsing/decompressing the buffer
*/
public static ConcurrentDoubleHistogram decodeFromCompressedByteBuffer(
final ByteBuffer buffer,
final long minBarForHighestToLowestValueRatio) throws DataFormatException {
int cookie = buffer.getInt();
if (!isCompressedDoubleHistogramCookie(cookie)) {
throw new IllegalArgumentException("The buffer does not contain a compressed DoubleHistogram");
}
ConcurrentDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer,
ConcurrentDoubleHistogram.class, ConcurrentHistogram.class,
minBarForHighestToLowestValueRatio);
return histogram;
}
}
| 8,961 | 56.448718 | 120 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/HistogramLogWriter.java | package org.HdrHistogram;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.OutputStream;
import java.io.PrintStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Date;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.zip.Deflater;
import static java.nio.ByteOrder.BIG_ENDIAN;
/**
* A histogram log writer.
* <p>
* A Histogram logs are used to capture full fidelity, per-time-interval
* histograms of a recorded value.
* <p>
* For example, a histogram log can be used to capture high fidelity
* reaction-time logs for some measured system or subsystem component.
* Such a log would capture a full reaction time histogram for each
* logged interval, and could be used to later reconstruct a full
* HdrHistogram of the measured reaction time behavior for any arbitrary
* time range within the log, by adding [only] the relevant interval
* histograms.
* <p>
* This log writer will produce histogram logs that adhere to the
* histogram log format (see {{@link HistogramLogReader} for log format
* details). Optional comments, start time, legend, and format version
* can be logged.
* <p>
* The log writer will use the
* <p>
* By convention, it is typical for the logging application
* to use a comment to indicate the logging application at the head
* of the log, followed by the log format version, a start time,
* and a legend (in that order).
*
*/
public class HistogramLogWriter {
private static final String HISTOGRAM_LOG_FORMAT_VERSION = "1.3";
private static Pattern containsDelimiterPattern = Pattern.compile(".[, \\r\\n].");
private Matcher containsDelimiterMatcher = containsDelimiterPattern.matcher("");
private final PrintStream log;
private ByteBuffer targetBuffer;
private long baseTime = 0;
/**
* Constructs a new HistogramLogWriter around a newly created file with the specified file name.
* @param outputFileName The name of the file to create
* @throws FileNotFoundException when unable to open outputFileName
*/
public HistogramLogWriter(final String outputFileName) throws FileNotFoundException {
log = new PrintStream(outputFileName);
}
/**
* Constructs a new HistogramLogWriter that will write into the specified file.
* @param outputFile The File to write to
* @throws FileNotFoundException when unable to open outputFile
*/
public HistogramLogWriter(final File outputFile) throws FileNotFoundException {
log = new PrintStream(outputFile);
}
/**
* Constructs a new HistogramLogWriter that will write into the specified output stream.
* @param outputStream The OutputStream to write to
*/
public HistogramLogWriter(final OutputStream outputStream) {
log = new PrintStream(outputStream);
}
/**
* Constructs a new HistogramLogWriter that will write into the specified print stream.
* @param printStream The PrintStream to write to
*/
public HistogramLogWriter(final PrintStream printStream) {
log = printStream;
}
/**
* Closes the file or output stream for this log writer.
*/
public void close() {
log.close();
}
/**
* Output an interval histogram, with the given timestamp information and the [optional] tag
* associated with the histogram, using a configurable maxValueUnitRatio. (note that the
* specified timestamp information will be used, and the timestamp information in the actual
* histogram will be ignored).
* The max value reported with the interval line will be scaled by the given maxValueUnitRatio.
* @param startTimeStampSec The start timestamp to log with the interval histogram, in seconds.
* @param endTimeStampSec The end timestamp to log with the interval histogram, in seconds.
* @param histogram The interval histogram to log.
* @param maxValueUnitRatio The ratio by which to divide the histogram's max value when reporting on it.
*/
public synchronized void outputIntervalHistogram(final double startTimeStampSec,
final double endTimeStampSec,
final EncodableHistogram histogram,
final double maxValueUnitRatio) {
if ((targetBuffer == null) || targetBuffer.capacity() < histogram.getNeededByteBufferCapacity()) {
targetBuffer = ByteBuffer.allocate(histogram.getNeededByteBufferCapacity()).order(BIG_ENDIAN);
}
targetBuffer.clear();
int compressedLength = histogram.encodeIntoCompressedByteBuffer(targetBuffer, Deflater.BEST_COMPRESSION);
byte[] compressedArray = Arrays.copyOf(targetBuffer.array(), compressedLength);
String tag = histogram.getTag();
if (tag == null) {
log.format(Locale.US, "%.3f,%.3f,%.3f,%s\n",
startTimeStampSec,
endTimeStampSec - startTimeStampSec,
histogram.getMaxValueAsDouble() / maxValueUnitRatio,
Base64Helper.printBase64Binary(compressedArray)
);
} else {
containsDelimiterMatcher.reset(tag);
if (containsDelimiterMatcher.matches()) {
throw new IllegalArgumentException("Tag string cannot contain commas, spaces, or line breaks");
}
log.format(Locale.US, "Tag=%s,%.3f,%.3f,%.3f,%s\n",
tag,
startTimeStampSec,
endTimeStampSec - startTimeStampSec,
histogram.getMaxValueAsDouble() / maxValueUnitRatio,
Base64Helper.printBase64Binary(compressedArray)
);
}
}
/**
* Output an interval histogram, with the given timestamp information, and the [optional] tag
* associated with the histogram. (note that the specified timestamp information will be used,
* and the timestamp information in the actual histogram will be ignored).
* The max value in the histogram will be reported scaled down by a default maxValueUnitRatio of
* 1,000,000 (which is the msec : nsec ratio). Caller should use the direct form specifying
* maxValueUnitRatio some other ratio is needed for the max value output.
* @param startTimeStampSec The start timestamp to log with the interval histogram, in seconds.
* @param endTimeStampSec The end timestamp to log with the interval histogram, in seconds.
* @param histogram The interval histogram to log.
*/
public void outputIntervalHistogram(final double startTimeStampSec,
final double endTimeStampSec,
final EncodableHistogram histogram) {
outputIntervalHistogram(startTimeStampSec, endTimeStampSec, histogram, 1000000.0);
}
/**
* Output an interval histogram, using the start/end timestamp indicated in the histogram,
* and the [optional] tag associated with the histogram.
* The histogram start and end timestamps are assumed to be in msec units. Logging will be
* in seconds, relative by a base time (if set via {@link org.HdrHistogram.HistogramLogWriter#setBaseTime}).
* The default base time is 0.
* <p>
* By convention, histogram start/end time are generally stamped with absolute times in msec
* since the epoch. For logging with absolute time stamps, the base time would remain zero. For
* logging with relative time stamps (time since a start point), the base time should be set
* with {@link org.HdrHistogram.HistogramLogWriter#setBaseTime}.
* <p>
* The max value in the histogram will be reported scaled down by a default maxValueUnitRatio of
* 1,000,000 (which is the msec : nsec ratio). Caller should use the direct form specifying
* maxValueUnitRatio if some other ratio is needed for the max value output.
* @param histogram The interval histogram to log.
*/
public void outputIntervalHistogram(final EncodableHistogram histogram) {
outputIntervalHistogram((histogram.getStartTimeStamp() - baseTime)/1000.0,
(histogram.getEndTimeStamp() - baseTime)/1000.0,
histogram);
}
/**
* Log a start time in the log.
* @param startTimeMsec time (in milliseconds) since the absolute start time (the epoch)
*/
public void outputStartTime(final long startTimeMsec) {
log.format(Locale.US, "#[StartTime: %.3f (seconds since epoch), %s]\n",
startTimeMsec / 1000.0,
(new Date(startTimeMsec)).toString());
}
/**
* Log a base time in the log.
* @param baseTimeMsec time (in milliseconds) since the absolute start time (the epoch)
*/
public void outputBaseTime(final long baseTimeMsec) {
log.format(Locale.US, "#[BaseTime: %.3f (seconds since epoch)]\n",
baseTimeMsec/1000.0);
}
/**
* Log a comment to the log.
* Comments will be preceded with with the '#' character.
* @param comment the comment string.
*/
public void outputComment(final String comment) {
log.format("#%s\n", comment);
}
/**
* Output a legend line to the log.
*/
public void outputLegend() {
log.println("\"StartTimestamp\",\"Interval_Length\",\"Interval_Max\",\"Interval_Compressed_Histogram\"");
}
/**
* Output a log format version to the log.
*/
public void outputLogFormatVersion() {
outputComment("[Histogram log format version " + HISTOGRAM_LOG_FORMAT_VERSION +"]");
}
/**
* Set a base time to subtract from supplied histogram start/end timestamps when
* logging based on histogram timestamps.
* Base time is expected to be in msec since the epoch, as histogram start/end times
* are typically stamped with absolute times in msec since the epoch.
* @param baseTimeMsec base time to calculate timestamp deltas from
*/
public void setBaseTime(long baseTimeMsec) {
this.baseTime = baseTimeMsec;
}
/**
* return the current base time offset (see {@link org.HdrHistogram.HistogramLogWriter#setBaseTime}).
* @return the current base time
*/
public long getBaseTime() {
return baseTime;
}
}
| 10,432 | 41.583673 | 113 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/AllValuesIterator.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
/**
* Used for iterating through histogram values using the finest granularity steps supported by the underlying
* representation. The iteration steps through all possible unit value levels, regardless of whether or not
* there were recorded values for that value level, and terminates when all recorded histogram values are exhausted.
*/
public class AllValuesIterator extends AbstractHistogramIterator implements Iterator<HistogramIterationValue> {
int visitedIndex;
/**
* Reset iterator for re-use in a fresh iteration over the same histogram data set.
*/
public void reset() {
reset(histogram);
}
private void reset(final AbstractHistogram histogram) {
super.resetIterator(histogram);
visitedIndex = -1;
}
/**
* @param histogram The histogram this iterator will operate on
*/
public AllValuesIterator(final AbstractHistogram histogram) {
reset(histogram);
}
@Override
void incrementIterationLevel() {
visitedIndex = currentIndex;
}
@Override
boolean reachedIterationLevel() {
return (visitedIndex != currentIndex);
}
@Override
public boolean hasNext() {
if (histogram.getTotalCount() != arrayTotalCount) {
throw new ConcurrentModificationException();
}
// Unlike other iterators AllValuesIterator is only done when we've exhausted the indices:
return (currentIndex < (histogram.countsArrayLength - 1));
}
}
| 1,787 | 28.8 | 116 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/LogarithmicIterator.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.util.Iterator;
/**
* Used for iterating through histogram values in logarithmically increasing levels. The iteration is
* performed in steps that start at <i>valueUnitsInFirstBucket</i> and increase exponentially according to
* <i>logBase</i>, terminating when all recorded histogram values are exhausted. Note that each iteration "bucket"
* includes values up to and including the next bucket boundary value.
*/
public class LogarithmicIterator extends AbstractHistogramIterator implements Iterator<HistogramIterationValue> {
long valueUnitsInFirstBucket;
double logBase;
double nextValueReportingLevel;
long currentStepHighestValueReportingLevel;
long currentStepLowestValueReportingLevel;
/**
* Reset iterator for re-use in a fresh iteration over the same histogram data set.
* @param valueUnitsInFirstBucket the size (in value units) of the first value bucket step
* @param logBase the multiplier by which the bucket size is expanded in each iteration step.
*/
public void reset(final long valueUnitsInFirstBucket, final double logBase) {
reset(histogram, valueUnitsInFirstBucket, logBase);
}
private void reset(final AbstractHistogram histogram, final long valueUnitsInFirstBucket, final double logBase) {
super.resetIterator(histogram);
this.logBase = logBase;
this.valueUnitsInFirstBucket = valueUnitsInFirstBucket;
nextValueReportingLevel = valueUnitsInFirstBucket;
this.currentStepHighestValueReportingLevel = ((long) nextValueReportingLevel) - 1;
this.currentStepLowestValueReportingLevel = histogram.lowestEquivalentValue(currentStepHighestValueReportingLevel);
}
/**
* @param histogram The histogram this iterator will operate on
* @param valueUnitsInFirstBucket the size (in value units) of the first value bucket step
* @param logBase the multiplier by which the bucket size is expanded in each iteration step.
*/
public LogarithmicIterator(final AbstractHistogram histogram, final long valueUnitsInFirstBucket, final double logBase) {
reset(histogram, valueUnitsInFirstBucket, logBase);
}
@Override
public boolean hasNext() {
if (super.hasNext()) {
return true;
}
// If the next iterate will not move to the next sub bucket index (which is empty if
// if we reached this point), then we are not yet done iterating (we want to iterate
// until we are no longer on a value that has a count, rather than util we first reach
// the last value that has a count. The difference is subtle but important)...
return (histogram.lowestEquivalentValue((long) nextValueReportingLevel) < nextValueAtIndex);
}
@Override
void incrementIterationLevel() {
nextValueReportingLevel *= logBase;
this.currentStepHighestValueReportingLevel = ((long)nextValueReportingLevel) - 1;
currentStepLowestValueReportingLevel = histogram.lowestEquivalentValue(currentStepHighestValueReportingLevel);
}
@Override
long getValueIteratedTo() {
return currentStepHighestValueReportingLevel;
}
@Override
boolean reachedIterationLevel() {
return ((currentValueAtIndex >= currentStepLowestValueReportingLevel) ||
(currentIndex >= histogram.countsArrayLength - 1)) ;
}
}
| 3,600 | 42.914634 | 125 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/ZigZagEncoding.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.nio.ByteBuffer;
/**
* This class provides encoding and decoding methods for writing and reading
* ZigZag-encoded LEB128-64b9B-variant (Little Endian Base 128) values to/from a
* {@link ByteBuffer}. LEB128's variable length encoding provides for using a
* smaller number of bytes for smaller values, and the use of ZigZag encoding
* allows small (closer to zero) negative values to use fewer bytes. Details
* on both LEB128 and ZigZag can be readily found elsewhere.
*
* The LEB128-64b9B-variant encoding used here diverges from the "original"
* LEB128 as it extends to 64 bit values: In the original LEB128, a 64 bit
* value can take up to 10 bytes in the stream, where this variant's encoding
* of a 64 bit values will max out at 9 bytes.
*
* As such, this encoder/decoder should NOT be used for encoding or decoding
* "standard" LEB128 formats (e.g. Google Protocol Buffers).
*/
class ZigZagEncoding {
/**
* Writes a long value to the given buffer in LEB128 ZigZag encoded format
* @param buffer the buffer to write to
* @param value the value to write to the buffer
*/
static void putLong(ByteBuffer buffer, long value) {
value = (value << 1) ^ (value >> 63);
if (value >>> 7 == 0) {
buffer.put((byte) value);
} else {
buffer.put((byte) ((value & 0x7F) | 0x80));
if (value >>> 14 == 0) {
buffer.put((byte) (value >>> 7));
} else {
buffer.put((byte) (value >>> 7 | 0x80));
if (value >>> 21 == 0) {
buffer.put((byte) (value >>> 14));
} else {
buffer.put((byte) (value >>> 14 | 0x80));
if (value >>> 28 == 0) {
buffer.put((byte) (value >>> 21));
} else {
buffer.put((byte) (value >>> 21 | 0x80));
if (value >>> 35 == 0) {
buffer.put((byte) (value >>> 28));
} else {
buffer.put((byte) (value >>> 28 | 0x80));
if (value >>> 42 == 0) {
buffer.put((byte) (value >>> 35));
} else {
buffer.put((byte) (value >>> 35 | 0x80));
if (value >>> 49 == 0) {
buffer.put((byte) (value >>> 42));
} else {
buffer.put((byte) (value >>> 42 | 0x80));
if (value >>> 56 == 0) {
buffer.put((byte) (value >>> 49));
} else {
buffer.put((byte) (value >>> 49 | 0x80));
buffer.put((byte) (value >>> 56));
}
}
}
}
}
}
}
}
}
/**
* Writes an int value to the given buffer in LEB128-64b9B ZigZag encoded format
* @param buffer the buffer to write to
* @param value the value to write to the buffer
*/
static void putInt(ByteBuffer buffer, int value) {
value = (value << 1) ^ (value >> 31);
if (value >>> 7 == 0) {
buffer.put((byte) value);
} else {
buffer.put((byte) ((value & 0x7F) | 0x80));
if (value >>> 14 == 0) {
buffer.put((byte) (value >>> 7));
} else {
buffer.put((byte) (value >>> 7 | 0x80));
if (value >>> 21 == 0) {
buffer.put((byte) (value >>> 14));
} else {
buffer.put((byte) (value >>> 14 | 0x80));
if (value >>> 28 == 0) {
buffer.put((byte) (value >>> 21));
} else {
buffer.put((byte) (value >>> 21 | 0x80));
buffer.put((byte) (value >>> 28));
}
}
}
}
}
/**
* Read an LEB128-64b9B ZigZag encoded long value from the given buffer
* @param buffer the buffer to read from
* @return the value read from the buffer
*/
static long getLong(ByteBuffer buffer) {
long v = buffer.get();
long value = v & 0x7F;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 7;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 14;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 21;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 28;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 35;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 42;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 49;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= v << 56;
}
}
}
}
}
}
}
}
value = (value >>> 1) ^ (-(value & 1));
return value;
}
/**
* Read an LEB128-64b9B ZigZag encoded int value from the given buffer
* @param buffer the buffer to read from
* @return the value read from the buffer
*/
static int getInt (ByteBuffer buffer) {
int v = buffer.get();
int value = v & 0x7F;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 7;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 14;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 21;
if ((v & 0x80) != 0) {
v = buffer.get();
value |= (v & 0x7F) << 28;
}
}
}
}
value = (value >>> 1) ^ (-(value & 1));
return value;
}
}
| 7,056 | 37.774725 | 84 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/packedarray/AbstractPackedArrayContext.java | package org.HdrHistogram.packedarray;
import java.io.Serializable;
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* A packed-value, sparse array context used for storing 64 bit signed values.
* <p>
* An array context is optimised for tracking sparsely set (as in mostly zeros) values that tend to not make use of the
* full 64 bit value range even when they are non-zero. The array context's internal representation is such that the
* packed value at each virtual array index may be represented by 0-8 bytes of actual storage.
* <p>
* An array context encodes the packed values in 8 "set trees" with each set tree representing one byte of the packed
* value at the virtual index in question. The {@link #getPackedIndex(int, int, boolean)} method is used to look up the
* byte-index corresponding to the given (set tree) value byte of the given virtual index, and can be used to add
* entries to represent that byte as needed. As a successful {@link #getPackedIndex(int, int, boolean)} may require a
* resizing of the array, it can throw a {@link ResizeException} to indicate that the requested packed index cannot be
* found or added without a resize of the physical storage.
*/
abstract class AbstractPackedArrayContext implements Serializable {
/*
*
* The physical representation uses an insert-at-the-end mechanism for adding contents to the array. Any
* insertion will occur at the very end of the array, and any expansion of an element will move it to the end,
* leaving an empty slot behind.
*
* Terminology:
*
* long-word: a 64-bit-aligned 64 bit word
* short-word: a 16-bit-aligned 16 bit word
* byte: an 8-bit-aligned byte
*
* long-index: an index of a 64-bit-aligned word within the overall array (i.e. in multiples of 8 bytes)
* short-index: an index of a 16-bit aligned short within the overall array (i.e. in multiples of 2 bytes)
* byte-index: an index of an 8-bit aligned byte within the overall array (i.e. in multiples of 1 byte)
*
* The storage array stores long (64 bit) words. Lookups for the various sizes are done as such:
*
* long getAtLongIndex(int longIndex) { return array[longIndex]; }
* short getAtShortIndex(int shortIndex) { return (short)((array[shortIndex >> 2] >> (shortIndex & 0x3)) & 0xffff);}
* byte getAtByteIndex(int byteIndex) { return (byte)((array[byteIndex >> 3] >> (byteIndex & 0x7)) & 0xff); }
*
* [Therefore there is no dependence on byte endianness of the underlying architecture]
*
* Structure:
*
* The packed array captures values at virtual indexes in a collection of striped "set trees" (also called "sets"),
* with each set tree representing one byte of the value at the virtual index in question. As such, there are 8
* sets in the array, each corresponding to a byte in the overall value being stored. Set 0 contains the LSByte
* of the value, and Set 7 contains the MSByte of the value.
*
* The array contents is comprised of there types of entries:
*
* - The root indexes: A fixed size 8 short-words array of short indexes at the start of the array, containing
* the short-index of the root entry of each of the 8 set trees.
*
* - Non-Leaf Entries: Variable sized, 2-18 short-words entries representing non-leaf entries in a set tree.
* Non-Leaf entries comprise of a 2 short-word header containing a packed slot indicators bitmask and the
* (optional non-zero) index of previous version of the entry, followed by an array of 0-16 shortwords.
* The shortword found at a given slot in this array holds an index to an entry in the next level of
* the set tree.
*
* - Leaf Entries: comprised of long-words. Each byte [0-7] in the longword holds an actual value. Specifically,
* the byte-index of that LeafEntry byte in the array is the byte-index for the given set's byte value of a
* virtual index.
*
* If a given virtual index for a given set has no entry in a given set tree, the byte value for that set of
* that virtual index interpreted as 0. If a given set tree does not have an entry for a given virtual index,
* it is safe to assume that no higher significance set tree have one either.
**
* Non-leaf entries structure and mutation protocols:
*
* The structure of a Non-Leaf entry in the array can be roughly described in terms of this C-style struct:
*
* struct nonLeafEntry {
* short packedSlotIndicators;
* short previousVersionIndex;
* short[] entrySlotsIndexes;
* }
*
* Non-leaf entries are 2-18 short-words in length, with the length determined by the number of bits set in
* the packedSlotIndicators short-word in the entry. The packed slot indicators short-word is a bit mask which
* represents the 16 possible next-level entries below the given entry, and has a bit set (to '1') for each slot
* that is actually populated with a next level entry. Each of the short-words in the entrySlots is
* associated with a specific active ('1') bit in the packedSlotIndicators short-word, and holds the index
* to the next level's entry associated with ta given path in the tree. [Note: the values in entrySlotsIndexes[]
* are short-indexes if the next level is not a leaf level, and long-indexes if the next level is
* a leaf.]
*
* Summary of Non-leaf entry use and replacement protocol:
*
* - No value in any entrySlotsIndexes[] array is ever initialized to a zero value. Zero values in
* entrySlotsIndexes[] can only appear through consolidation (see below). Once an entrySlotsIndexes[]
* slot is observed to contain a zero, it cannot change to a non-zero value.
*
* - Zero values encountered in entrySlotsIndexes[] arrays are never followed. If a zero value is found
* when looking for the index to a lower level entry during a tree walk, the tree walking operation is
* restarted from the root.
*
* - A Non-Leaf entry with an active (non zero index) previous version is never followed or expanded.
* Instead, any thread encountering a Non-leaf entry with an active previous version will consolidate
* the previous version with the current one. the consolidation operation will clear (zero) the
* previousVersionIndex, which will then allow the caller to continue with whatever use the thread was
* attempting to make of the entry.
*
* - Expansion of entries: Since entries hold only enough storage to represent currently populated paths
* below them in the set tree, any addition of entries at a lower level requires the expansion of the entry
* to make room for a larger entrySlotsIndexes array. The expansion of an entry in order to add a new
* next-level entry under follows the following steps:
*
* - Allocate a new and larger entry structure (initializes all slots to -1)
*
* - Populate the newly inserted slot with an index to a newly allocated next-level entry
*
* - Link the newly expanded entry to the previous entry structure via the previousVersionIndex field
*
* - Publish the newly expanded entry by [atomically] replacing the "pointer index" to the previous
* entry (located at a higher level entry's slot, or in the root indexes) with a "pointer index" to
* the newly expanded entry structure
*
* A failure to atomically publish a newly expanded entry (e.g. if the "pointer index" being replaced
* holds a value other than that in our not-yet-published previousVersionIndex) will restart the expansion
* operation from the beginning.
*
* When first published, a newly-visible expanded entry is not immediately "usable" because it has an
* active, "not yet consolidated" previous version entry, and any user of the entry will first have to
* consolidate it. The expansion will follow publication of the expanded entry with a consolidation of
* the previous entry into the new one, clearing the previousVersionIndex field in the process, and
* enabling normal use of the expanded entry.
*
* - Concurrent consolidation: While expansion and consolidation are ongoing, other threads can be
* concurrently walking the set trees. Per the protocol stated here, any tree walk encountering a Non-Leaf
* entry with an active previous version will consolidate the entry before using it. Consolidation can
* of a given entry can occur concurrently by an an expanding thread and by multiple walking threads.
*
* - Consolidation of a a previous version entry into a current one is done by:
*
* - For each non-zero index in the previous version entry, copy that index to the new associated
* entry slot in the entry, and CAS a zero in the old entry slot. If the CAS fails, repeat (including
* the zero check).
*
* - Once all entry slots in the previous version entry have been consolidated and zeroed, zero
* the index to the previous version entry.
*/
private static final int PACKED_ARRAY_GROWTH_INCREMENT = 16;
private static final int PACKED_ARRAY_GROWTH_FRACTION_POW2 = 4;
private static final int SET_0_START_INDEX = 0;
private static final int NUMBER_OF_SETS = 8;
private static final int LEAF_LEVEL_SHIFT = 3;
private static final int NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS = 2;
private static final int NON_LEAF_ENTRY_SLOT_INDICATORS_OFFSET = 0;
private static final int NON_LEAF_ENTRY_PREVIOUS_VERSION_OFFSET = 1;
static final int MINIMUM_INITIAL_PACKED_ARRAY_CAPACITY = 16;
static final int MAX_SUPPORTED_PACKED_COUNTS_ARRAY_LENGTH = (Short.MAX_VALUE / 4);
private final boolean isPacked;
private int physicalLength;
private int virtualLength = 0;
private int topLevelShift = Integer.MAX_VALUE; // Make it nonsensical until properly initialized.
AbstractPackedArrayContext(final int virtualLength, final int initialPhysicalLength) {
physicalLength = Math.max(initialPhysicalLength, MINIMUM_INITIAL_PACKED_ARRAY_CAPACITY);
isPacked = (physicalLength <= AbstractPackedArrayContext.MAX_SUPPORTED_PACKED_COUNTS_ARRAY_LENGTH);
if (!isPacked) {
physicalLength = virtualLength;
}
}
void init(final int virtualLength) {
if (!isPacked()) {
// Deal with non-packed context init:
this.virtualLength = virtualLength;
return;
}
// room for the 8 shorts root indexes:
boolean success;
do {
success = casPopulatedShortLength(getPopulatedShortLength(), SET_0_START_INDEX + 8);
} while (!success);
// Populate empty root entries, and point to them from the root indexes:
for (int i = 0; i < NUMBER_OF_SETS; i++) {
setAtShortIndex(SET_0_START_INDEX + i, (short) 0);
}
setVirtualLength(virtualLength);
}
//
// ### ######## ###### ######## ######## ### ###### ######## ######
// ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
// ## ## ## ## ## ## ## ## ## ## ## ## ##
// ## ## ######## ###### ## ######## ## ## ## ## ######
// ######### ## ## ## ## ## ## ######### ## ## ##
// ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
// ## ## ######## ###### ## ## ## ## ## ###### ## ######
//
abstract int length();
abstract int getPopulatedShortLength();
abstract boolean casPopulatedShortLength(int expectedPopulatedShortLength, int newPopulatedShortLength);
abstract boolean casPopulatedLongLength(int expectedPopulatedShortLength, int newPopulatedShortLength);
abstract long getAtLongIndex(int longIndex);
abstract boolean casAtLongIndex(int longIndex, long expectedValue, long newValue);
abstract void lazySetAtLongIndex(int longIndex, long newValue);
abstract void clearContents();
abstract void resizeArray(int newLength);
abstract long getAtUnpackedIndex(int index);
abstract void setAtUnpackedIndex(int index, long newValue);
abstract void lazySetAtUnpackedIndex(int index, long newValue);
abstract long incrementAndGetAtUnpackedIndex(int index);
abstract long addAndGetAtUnpackedIndex(int index, long valueToAdd);
abstract String unpackedToString();
//
// ######## ######## #### ## ## #### ######## #### ## ## ######## ####### ######## ######
// ## ## ## ## ## ### ### ## ## ## ## ## ## ## ## ## ## ## ##
// ## ## ## ## ## #### #### ## ## ## ## ## ## ## ## ## ## ##
// ######## ######## ## ## ### ## ## ## ## ## ## ###### ## ## ######## ######
// ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
// ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
// ## ## ## #### ## ## #### ## #### ### ######## ####### ## ######
//
void setValuePart(final int longIndex,
final long valuePartAsLong,
final long valuePartMask,
final int valuePartShift) {
boolean success;
do {
long currentLongValue = getAtLongIndex(longIndex);
long newLongValue = (currentLongValue & ~valuePartMask) | (valuePartAsLong << valuePartShift);
success = casAtLongIndex(longIndex, currentLongValue, newLongValue);
}
while (!success);
}
short getAtShortIndex(final int shortIndex) {
return (short) ((getAtLongIndex(shortIndex >> 2) >> ((shortIndex & 0x3) << 4)) & 0xffff);
}
short getIndexAtShortIndex(final int shortIndex) {
return (short) ((getAtLongIndex(shortIndex >> 2) >> ((shortIndex & 0x3) << 4)) & 0x7fff);
}
void setAtShortIndex(final int shortIndex, final short value) {
int longIndex = shortIndex >> 2;
int shortShift = (shortIndex & 0x3) << 4;
long shortMask = ((long) 0xffff) << shortShift;
long shortValueAsLong = ((long) value) & 0xffff;
setValuePart(longIndex, shortValueAsLong, shortMask, shortShift);
}
boolean casAtShortIndex(final int shortIndex, final short expectedValue, final short newValue) {
int longIndex = shortIndex >> 2;
int shortShift = (shortIndex & 0x3) << 4;
long shortMask = ~(((long) 0xffff) << shortShift);
long newShortValueAsLong = ((long) newValue) & 0xffff;
long expectedShortValueAsLong = ((long) expectedValue) & 0xffff;
boolean success;
do {
long currentLongValue = getAtLongIndex(longIndex);
long currentShortValueAsLong = (currentLongValue >> shortShift) & 0xffff;
if (currentShortValueAsLong != expectedShortValueAsLong) {
return false;
}
long newLongValue = (currentLongValue & shortMask) | (newShortValueAsLong << shortShift);
success = casAtLongIndex(longIndex, currentLongValue, newLongValue);
}
while (!success);
return true;
}
byte getAtByteIndex(final int byteIndex) {
return (byte) ((getAtLongIndex(byteIndex >> 3) >> ((byteIndex & 0x7) << 3)) & 0xff);
}
void setAtByteIndex(final int byteIndex, final byte value) {
int longIndex = byteIndex >> 3;
int byteShift = (byteIndex & 0x7) << 3;
long byteMask = ((long) 0xff) << byteShift;
long byteValueAsLong = ((long) value) & 0xff;
setValuePart(longIndex, byteValueAsLong, byteMask, byteShift);
}
/**
* add a byte value to a current byte value in the array
*
* @param byteIndex index of byte value to add to
* @param valueToAdd byte value to add
* @return the afterAddValue. ((afterAddValue & 0x100) != 0) indicates a carry.
*/
long addAtByteIndex(final int byteIndex, final byte valueToAdd) {
int longIndex = byteIndex >> 3;
int byteShift = (byteIndex & 0x7) << 3;
long byteMask = ((long) 0xff) << byteShift;
boolean success;
long newValue;
do {
long currentLongValue = getAtLongIndex(longIndex);
long byteValueAsLong = (currentLongValue >> byteShift) & 0xff;
newValue = byteValueAsLong + (((long) valueToAdd) & 0xff);
long newByteValueAsLong = newValue & 0xff;
long newLongValue = (currentLongValue & ~byteMask) | (newByteValueAsLong << byteShift);
success = casAtLongIndex(longIndex, currentLongValue, newLongValue);
}
while (!success);
return newValue;
}
//
// ######## ## ## ######## ######## ## ## ######## #### ######## ## ######## ######
// ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ##
// ## #### ## ## ## ## #### ## ## ## ## ## ## ##
// ###### ## ## ## ## ######## ## ###### ## ###### ## ## ## ######
// ## ## #### ## ## ## ## ## ## ## ## ## ## ##
// ## ## ### ## ## ## ## ## ## ## ## ## ## ## ##
// ######## ## ## ## ## ## ## ## #### ######## ######## ######## ######
//
private int getPackedSlotIndicators(final int entryIndex) {
return ((int) getAtShortIndex(entryIndex + NON_LEAF_ENTRY_SLOT_INDICATORS_OFFSET)) & 0xffff;
}
private void setPackedSlotIndicators(final int entryIndex, final short newPackedSlotIndicators) {
setAtShortIndex(entryIndex + NON_LEAF_ENTRY_SLOT_INDICATORS_OFFSET, newPackedSlotIndicators);
}
private short getPreviousVersionIndex(final int entryIndex) {
return getAtShortIndex(entryIndex + NON_LEAF_ENTRY_PREVIOUS_VERSION_OFFSET);
}
private void setPreviousVersionIndex(final int entryIndex, final short newPreviousVersionIndex) {
setAtShortIndex(entryIndex + NON_LEAF_ENTRY_PREVIOUS_VERSION_OFFSET, newPreviousVersionIndex);
}
private short getIndexAtEntrySlot(final int entryIndex, final int slot) {
return getAtShortIndex(entryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + slot);
}
private void setIndexAtEntrySlot(final int entryIndex, final int slot, final short newIndexValue) {
setAtShortIndex(entryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + slot, newIndexValue);
}
private boolean casIndexAtEntrySlot(final int entryIndex,
final int slot,
final short expectedIndexValue,
final short newIndexValue) {
return casAtShortIndex(entryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + slot,
expectedIndexValue, newIndexValue);
}
private boolean casIndexAtEntrySlotIfNonZeroAndLessThan(final int entryIndex,
final int slot,
final short newIndexValue) {
boolean success;
do {
short existingIndexValue = getIndexAtEntrySlot(entryIndex, slot);
if (existingIndexValue == 0) return false;
if (newIndexValue <= existingIndexValue) return false;
success = casIndexAtEntrySlot(entryIndex, slot, existingIndexValue, newIndexValue);
} while (!success);
return true;
}
//
// ######## ## ## ######## ######## ## ## ####### ######## ######
// ## ### ## ## ## ## ## ## ## ## ## ## ## ##
// ## #### ## ## ## ## #### ## ## ## ## ##
// ###### ## ## ## ## ######## ## ## ## ######## ######
// ## ## #### ## ## ## ## ## ## ## ##
// ## ## ### ## ## ## ## ## ## ## ## ##
// ######## ## ## ## ## ## ## ####### ## ######
//
private void expandArrayIfNeeded(final int entryLengthInLongs) throws ResizeException {
final int currentLength = length();
if (length() < getPopulatedLongLength() + entryLengthInLongs) {
int growthIncrement = Math.max(entryLengthInLongs, PACKED_ARRAY_GROWTH_INCREMENT);
growthIncrement = Math.max(growthIncrement, getPopulatedLongLength() >> PACKED_ARRAY_GROWTH_FRACTION_POW2);
throw new ResizeException(currentLength + growthIncrement);
}
}
private int newEntry(final int entryLengthInShorts) throws ResizeException {
// Add entry at the end of the array:
int newEntryIndex;
boolean success;
do {
newEntryIndex = getPopulatedShortLength();
expandArrayIfNeeded((entryLengthInShorts >> 2) + 1);
success = casPopulatedShortLength(newEntryIndex, (newEntryIndex + entryLengthInShorts));
} while (!success);
for (int i = 0; i < entryLengthInShorts; i++) {
setAtShortIndex(newEntryIndex + i, (short) -1); // Poison value -1. Must be overridden before read
}
return newEntryIndex;
}
private int newLeafEntry() throws ResizeException {
// Add entry at the end of the array:
int newEntryIndex;
boolean success;
do {
newEntryIndex = getPopulatedLongLength();
expandArrayIfNeeded(1);
success = casPopulatedLongLength(newEntryIndex, (newEntryIndex + 1));
} while (!success);
lazySetAtLongIndex(newEntryIndex, 0);
return newEntryIndex;
}
/**
* Consolidate entry with previous entry version if one exists
*
* @param entryIndex The shortIndex of the entry to be consolidated
*/
private void consolidateEntry(final int entryIndex) {
int previousVersionIndex = getPreviousVersionIndex(entryIndex);
if (previousVersionIndex == 0) return;
if (getPreviousVersionIndex(previousVersionIndex) != 0) {
throw new IllegalStateException("Encountered Previous Version Entry that is not itself consolidated.");
}
int previousVersionPackedSlotsIndicators = getPackedSlotIndicators(previousVersionIndex);
// Previous version exists, needs consolidation
int packedSlotsIndicators = getPackedSlotIndicators(entryIndex);
int insertedSlotMask = packedSlotsIndicators ^ previousVersionPackedSlotsIndicators; // only bit that differs
int slotsBelowBitNumber = packedSlotsIndicators & (insertedSlotMask - 1);
int insertedSlotIndex = Integer.bitCount(slotsBelowBitNumber);
int numberOfSlotsInEntry = Integer.bitCount(packedSlotsIndicators);
// Copy the entry slots from previous version, skipping the newly inserted slot in the target:
int sourceSlot = 0;
for (int targetSlot = 0; targetSlot < numberOfSlotsInEntry; targetSlot++) {
if (targetSlot != insertedSlotIndex) {
boolean success = true;
do {
short indexAtSlot = getIndexAtEntrySlot(previousVersionIndex, sourceSlot);
if (indexAtSlot != 0) {
// Copy observed index at slot to current entry
// (only copy value in if previous value is less than new one AND is non-zero)
casIndexAtEntrySlotIfNonZeroAndLessThan(entryIndex, targetSlot, indexAtSlot);
// CAS the previous version slot to 0.
// (Succeeds only if the index in that slot has not changed. Retry if it did).
success = casIndexAtEntrySlot(previousVersionIndex, sourceSlot, indexAtSlot, (short) 0);
}
}
while (!success);
sourceSlot++;
}
}
setPreviousVersionIndex(entryIndex, (short) 0);
}
/**
* Expand entry as indicated.
*
* @param existingEntryIndex the index of the entry
* @param entryPointerIndex index to the slot pointing to the entry (needs to be fixed up)
* @param insertedSlotIndex relative [packed] index of slot being inserted into entry
* @param insertedSlotMask mask value fo slot being inserted
* @param nextLevelIsLeaf the level below this one is a leaf level
* @return the updated index of the entry (-1 if expansion failed due to conflict)
* @throws RetryException if expansion fails due to concurrent conflict, and caller should try again.
*/
private int expandEntry(final int existingEntryIndex,
final int entryPointerIndex,
final int insertedSlotIndex,
final int insertedSlotMask,
final boolean nextLevelIsLeaf) throws RetryException, ResizeException {
int packedSlotIndicators = ((int) getAtShortIndex(existingEntryIndex)) & 0xffff;
packedSlotIndicators |= insertedSlotMask;
int numberOfSlotsInExpandedEntry = Integer.bitCount(packedSlotIndicators);
if (insertedSlotIndex >= numberOfSlotsInExpandedEntry) {
throw new IllegalStateException("inserted slot index is out of range given provided masks");
}
int expandedEntryLength = numberOfSlotsInExpandedEntry + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS;
// Create new next-level entry to refer to from slot at this level:
int indexOfNewNextLevelEntry = 0;
if (nextLevelIsLeaf) {
indexOfNewNextLevelEntry = newLeafEntry(); // Establish long-index to new leaf entry
} else {
// TODO: Optimize this by creating the whole sub-tree here, rather than a step that will immediately expand
// Create a new 1 word (empty, no slots set) entry for the next level:
indexOfNewNextLevelEntry = newEntry(NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS); // Establish index to new entry
setPackedSlotIndicators(indexOfNewNextLevelEntry, (short) 0);
setPreviousVersionIndex(indexOfNewNextLevelEntry, (short) 0);
}
short insertedSlotValue = (short) indexOfNewNextLevelEntry;
int expandedEntryIndex = newEntry(expandedEntryLength);
// populate the packed indicators word:
setPackedSlotIndicators(expandedEntryIndex, (short) packedSlotIndicators);
setPreviousVersionIndex(expandedEntryIndex, (short) existingEntryIndex);
// Populate the inserted slot with the index of the new next level entry:
setIndexAtEntrySlot(expandedEntryIndex, insertedSlotIndex, insertedSlotValue);
// Copy of previous version entries is deferred to later consolidateEntry() call.
// Set the pointer to the updated entry index. If CAS fails, discard by throwing retry exception.
boolean success = casAtShortIndex(entryPointerIndex, (short) existingEntryIndex, (short) expandedEntryIndex);
if (!success) {
throw new RetryException();
}
// Expanded entry is published, now consolidate it:
consolidateEntry(expandedEntryIndex);
return expandedEntryIndex;
}
//
// ###### ######## ######## ## ## ### ## ## #### ## ## ######## ######## ## ##
// ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ## ##
// ## ## ## ## ## ## ## ## ## ## #### ## ## ## ## ## ##
// ## #### ###### ## ## ## ## ## ## ## ## ## ## ## ## ## ###### ###
// ## ## ## ## ## ## ######### ## ## ## ## #### ## ## ## ## ##
// ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## ## ##
// ###### ######## ## ### ## ## ######## ## #### ## ## ######## ######## ## ##
//
private int getRootEntry(final int setNumber) {
try {
return getRootEntry(setNumber, false);
} catch (RetryException | ResizeException ex) {
throw new IllegalStateException("Should not Resize or Retry exceptions on real-only read: ", ex);
}
}
private int getRootEntry(final int setNumber, boolean insertAsNeeded) throws RetryException, ResizeException {
int entryPointerIndex = SET_0_START_INDEX + setNumber;
int entryIndex = getIndexAtShortIndex(entryPointerIndex);
if (entryIndex == 0) {
if (!insertAsNeeded) {
return 0; // Index does not currently exist in packed array;
}
entryIndex = newEntry(NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS);
// Create a new empty (no slots set) entry for the next level:
setPackedSlotIndicators(entryIndex, (short) 0);
setPreviousVersionIndex(entryIndex, (short) 0);
boolean success = casAtShortIndex(entryPointerIndex, (short) 0, (short) entryIndex);
if (!success) {
throw new RetryException();
}
}
if (((getTopLevelShift() != LEAF_LEVEL_SHIFT)) && getPreviousVersionIndex(entryIndex) != 0) {
consolidateEntry(entryIndex);
}
return entryIndex;
}
/**
* Get the byte-index (into the packed array) corresponding to a given (set tree) value byte of given virtual index.
* Inserts new set tree nodes as needed if indicated.
*
* @param setNumber The set tree number (0-7, 0 corresponding with the LSByte set tree)
* @param virtualIndex The virtual index into the PackedArray
* @param insertAsNeeded If true, will insert new set tree nodes as needed if they do not already exist
* @return the byte-index corresponding to the given (set tree) value byte of the given virtual index
*/
int getPackedIndex(final int setNumber, final int virtualIndex, final boolean insertAsNeeded)
throws ResizeException {
int byteIndex = 0; // Must be overwritten to finish. Will retry until non-zero.
do {
try {
assert (setNumber >= 0 && setNumber < NUMBER_OF_SETS);
if (virtualIndex >= getVirtualLength()) {
throw new ArrayIndexOutOfBoundsException(
String.format("Attempting access at index %d, beyond virtualLength %d",
virtualIndex, getVirtualLength()));
}
int entryPointerIndex = SET_0_START_INDEX + setNumber;
int entryIndex = getRootEntry(setNumber, insertAsNeeded);
if (entryIndex == 0) {
return -1; // Index does not currently exist in packed array;
}
// Work down the levels of non-leaf entries:
for (int indexShift = getTopLevelShift(); indexShift >= LEAF_LEVEL_SHIFT; indexShift -= 4) {
boolean nextLevelIsLeaf = (indexShift == LEAF_LEVEL_SHIFT);
// Target is a packedSlotIndicators entry
int packedSlotIndicators = getPackedSlotIndicators(entryIndex);
int slotBitNumber = (virtualIndex >>> indexShift) & 0xf;
int slotMask = 1 << slotBitNumber;
int slotsBelowBitNumber = packedSlotIndicators & (slotMask - 1);
int slotNumber = Integer.bitCount(slotsBelowBitNumber);
if ((packedSlotIndicators & slotMask) == 0) {
// The entryIndex slot does not have the contents we want
if (!insertAsNeeded) {
return -1; // Index does not currently exist in packed array;
}
// Expand the entry, adding the index to new entry at the proper slot:
entryIndex = expandEntry(entryIndex, entryPointerIndex, slotNumber, slotMask, nextLevelIsLeaf);
}
// Next level's entry pointer index is in the appropriate slot in the entries array in this entry:
entryPointerIndex = entryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + slotNumber;
entryIndex = getIndexAtShortIndex(entryPointerIndex);
if (entryIndex == 0) {
throw new RetryException();
}
if ((!nextLevelIsLeaf) && getPreviousVersionIndex(entryIndex) != 0) {
consolidateEntry(entryIndex);
}
// entryIndex is either holds the long-index of a leaf entry, or the shorty-index of the next
// level entry's packed slot indicators short-word.
}
// entryIndex is the long-index of a leaf entry that contains the value byte for the given set
byteIndex = (entryIndex << 3) + (virtualIndex & 0x7); // Determine byte index offset within leaf entry
} catch (RetryException ignored) {
// Retry will happen automatically since byteIndex was not set to non-zero value;
}
}
while (byteIndex == 0);
return byteIndex;
}
private long contextLocalGetValueAtIndex(final int virtualIndex) {
long value = 0;
for (int byteNum = 0; byteNum < NUMBER_OF_SETS; byteNum++) {
int packedIndex = 0;
long byteValueAtPackedIndex;
do {
try {
packedIndex = getPackedIndex(byteNum, virtualIndex, false);
if (packedIndex < 0) {
return value;
}
byteValueAtPackedIndex = (((long) getAtByteIndex(packedIndex)) & 0xff) << (byteNum << 3);
} catch (ResizeException ex) {
throw new IllegalStateException("Should never encounter a resize exception without inserts");
}
} while (packedIndex == 0);
value += byteValueAtPackedIndex;
}
return value;
}
//
// ## ## ######## ####### ######## ## ## ## ### ######## ########
// ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
// ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
// ### ####### ######## ## ## ######## ## ## ## ## ## ## ######
// ## ## ## ## ## ## ## ## ## ######### ## ##
// ## ## ## ## ## ## ## ## ## ## ## ## ##
// ## ## ## ####### ## ####### ######## ## ## ## ########
//
void populateEquivalentEntriesWithZerosFromOther(final AbstractPackedArrayContext other) {
if (getVirtualLength() < other.getVirtualLength()) {
throw new IllegalStateException("Cannot populate array of smaller virtual length");
}
for (int i = 0; i < NUMBER_OF_SETS; i++) {
int otherEntryIndex = other.getAtShortIndex(SET_0_START_INDEX + i);
if (otherEntryIndex == 0) continue; // No tree to duplicate
int entryIndexPointer = SET_0_START_INDEX + i;
for (int j = getTopLevelShift(); j > other.getTopLevelShift(); j -= 4) {
// for each inserted level:
// Allocate entry in other:
int sizeOfEntry = NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + 1;
int newEntryIndex = 0;
do {
try {
newEntryIndex = newEntry(sizeOfEntry);
} catch (ResizeException ex) {
resizeArray(ex.getNewSize());
}
}
while (newEntryIndex == 0);
// Link new level in.
setAtShortIndex(entryIndexPointer, (short) newEntryIndex);
// Populate new level entry, use pointer to slot 0 as place to populate under:
setPackedSlotIndicators(newEntryIndex, (short) 0x1); // Slot 0 populated
setPreviousVersionIndex(newEntryIndex, (short) 0); // No previous version
entryIndexPointer = newEntryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS; // Where slot 0 index goes.
}
copyEntriesAtLevelFromOther(other, otherEntryIndex,
entryIndexPointer, other.getTopLevelShift());
}
}
private void copyEntriesAtLevelFromOther(final AbstractPackedArrayContext other,
final int otherLevelEntryIndex,
final int levelEntryIndexPointer,
final int otherIndexShift) {
boolean nextLevelIsLeaf = (otherIndexShift == LEAF_LEVEL_SHIFT);
int packedSlotIndicators = other.getPackedSlotIndicators(otherLevelEntryIndex);
int numberOfSlots = Integer.bitCount(packedSlotIndicators);
int sizeOfEntry = NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + numberOfSlots;
// Allocate entry:
int entryIndex = 0;
do {
try {
entryIndex = newEntry(sizeOfEntry);
} catch (ResizeException ex) {
resizeArray(ex.getNewSize());
}
}
while (entryIndex == 0);
setAtShortIndex(levelEntryIndexPointer, (short) entryIndex);
setAtShortIndex(entryIndex + NON_LEAF_ENTRY_SLOT_INDICATORS_OFFSET, (short) packedSlotIndicators);
setAtShortIndex(entryIndex + NON_LEAF_ENTRY_PREVIOUS_VERSION_OFFSET, (short) 0);
for (int i = 0; i < numberOfSlots; i++) {
if (nextLevelIsLeaf) {
// Make leaf in other:
int leafEntryIndex = 0;
do {
try {
leafEntryIndex = newLeafEntry();
} catch (ResizeException ex) {
resizeArray(ex.getNewSize());
}
}
while (leafEntryIndex == 0);
setIndexAtEntrySlot(entryIndex, i, (short) leafEntryIndex);
lazySetAtLongIndex(leafEntryIndex, 0);
} else {
int otherNextLevelEntryIndex = other.getIndexAtEntrySlot(otherLevelEntryIndex, i);
copyEntriesAtLevelFromOther(other, otherNextLevelEntryIndex,
(entryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + i),
otherIndexShift - 4);
}
}
}
//
// #### ######## ######## ######## ### ######## #### ####### ## ##
// ## ## ## ## ## ## ## ## ## ## ## ### ##
// ## ## ## ## ## ## ## ## ## ## ## #### ##
// ## ## ###### ######## ## ## ## ## ## ## ## ## ##
// ## ## ## ## ## ######### ## ## ## ## ## ####
// ## ## ## ## ## ## ## ## ## ## ## ## ###
// #### ## ######## ## ## ## ## ## #### ####### ## ##
//
// Recorded Value iteration:
private int seekToPopulatedVirtualIndexStartingAtLevel(final int startingVirtualIndex,
final int levelEntryIndex,
final int indexShift) throws RetryException {
int virtualIndex = startingVirtualIndex;
int firstVirtualIndexPastThisLevel = (((virtualIndex >>> indexShift) | 0xf) + 1) << indexShift;
boolean nextLevelIsLeaf = (indexShift == LEAF_LEVEL_SHIFT);
do {
// Target is a packedSlotIndicators entry
int packedSlotIndicators = getPackedSlotIndicators(levelEntryIndex);
int startingSlotBitNumber = (virtualIndex >>> indexShift) & 0xf;
int slotMask = 1 << startingSlotBitNumber;
int slotsAtAndAboveBitNumber = packedSlotIndicators & ~(slotMask - 1);
int nextActiveSlotBitNumber = Integer.numberOfTrailingZeros(slotsAtAndAboveBitNumber);
if (nextActiveSlotBitNumber > 15) {
// this level has no more set bits, pop back up a level.
int indexShiftAbove = indexShift + 4;
virtualIndex += 1 << indexShiftAbove;
virtualIndex &= ~((1 << indexShiftAbove) - 1); // Start at the beginning of the next slot a level above.
return -virtualIndex; // Negative value indicates a skip to a different index.
}
// Drill into bit.
if (nextActiveSlotBitNumber != startingSlotBitNumber) {
virtualIndex += (nextActiveSlotBitNumber - startingSlotBitNumber) << indexShift;
virtualIndex &= ~((1 << indexShift) - 1); // Start at the beginning of the next slot of this level
}
if (nextLevelIsLeaf) {
// There is recorded value here. No need to look.
return virtualIndex;
}
// Next level is not a leaf. Drill into it:
int nextSlotMask = 1 << nextActiveSlotBitNumber;
int slotsBelowNextBitNumber = packedSlotIndicators & (nextSlotMask - 1);
int nextSlotNumber = Integer.bitCount(slotsBelowNextBitNumber);
if ((packedSlotIndicators & nextSlotMask) == 0) {
throw new IllegalStateException("Unexpected 0 at slot index");
}
int entryPointerIndex = levelEntryIndex + NON_LEAF_ENTRY_HEADER_SIZE_IN_SHORTS + nextSlotNumber;
int nextLevelEntryIndex = getIndexAtShortIndex(entryPointerIndex);
if (nextLevelEntryIndex == 0) {
throw new RetryException();
}
if (getPreviousVersionIndex(nextLevelEntryIndex) != 0) {
consolidateEntry(nextLevelEntryIndex);
}
virtualIndex =
seekToPopulatedVirtualIndexStartingAtLevel(virtualIndex, nextLevelEntryIndex, indexShift - 4);
if (virtualIndex < 0) {
virtualIndex = -virtualIndex;
} else {
return virtualIndex;
}
} while (virtualIndex < firstVirtualIndexPastThisLevel);
return virtualIndex;
}
private int findFirstPotentiallyPopulatedVirtualIndexStartingAt(final int startingVirtualIndex) {
int nextVirtualIndex = -1;
// Look for a populated virtual index in set 0:
boolean retry;
do {
retry = false;
try {
int entryIndex = getRootEntry(0);
if (entryIndex == 0) return getVirtualLength(); // Nothing under the root
nextVirtualIndex =
seekToPopulatedVirtualIndexStartingAtLevel(startingVirtualIndex, entryIndex,
getTopLevelShift());
} catch (RetryException ex) {
retry = true;
}
} while (retry);
// Don't drill to value if out of range:
if ((nextVirtualIndex < 0) || (nextVirtualIndex >= getVirtualLength())) {
return getVirtualLength();
}
return nextVirtualIndex;
}
// Recorded values iteration:
class NonZeroValuesIterator implements Iterator<IterationValue> {
int nextVirtualIndex = 0;
long nextValue;
final IterationValue currentIterationValue = new IterationValue();
private void findFirstNonZeroValueVirtualIndexStartingAt(final int startingVirtualIndex) {
if (!isPacked()) {
// Look for non-zero value in unpacked context:
for (nextVirtualIndex = startingVirtualIndex;
nextVirtualIndex < getVirtualLength();
nextVirtualIndex++) {
if ((nextValue = getAtUnpackedIndex(nextVirtualIndex)) != 0) {
return;
}
}
return;
}
// Context is packed:
nextVirtualIndex = startingVirtualIndex;
do {
nextVirtualIndex = findFirstPotentiallyPopulatedVirtualIndexStartingAt(nextVirtualIndex);
if (nextVirtualIndex >= getVirtualLength()) break;
if ((nextValue = contextLocalGetValueAtIndex(nextVirtualIndex)) != 0) break;
nextVirtualIndex++;
} while (true);
}
@Override
public IterationValue next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
currentIterationValue.set(nextVirtualIndex, nextValue);
findFirstNonZeroValueVirtualIndexStartingAt(nextVirtualIndex + 1);
return currentIterationValue;
}
@Override
public boolean hasNext() {
return ((nextVirtualIndex >= 0) &&
(nextVirtualIndex < getVirtualLength()));
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
NonZeroValuesIterator() {
findFirstNonZeroValueVirtualIndexStartingAt(0);
}
}
/**
* An Iterator over all non-Zero values in the array
*
* @return an Iterator over all non-Zero values in the array
*/
Iterable<IterationValue> nonZeroValues() {
return new Iterable<IterationValue>() {
public Iterator<IterationValue> iterator() {
return new NonZeroValuesIterator();
}
};
}
//
// ###### #### ######## ######## #### ###### ## ## #### ######## ########
// ## ## ## ## ## ## ## ## ## ## ## ## ## ##
// ## ## ## ## #### ## ## ## ## ## ##
// ###### ## ## ###### #### ###### ######### ## ###### ##
// ## ## ## ## ## ## ## ## ## ## ## ## ##
// ## ## ## ## ## ## ## ## ## ## ## ## ## ##
// ###### #### ######## ######## #### ## ###### ## ## #### ## ##
//
boolean isPacked() {
return isPacked;
}
int getPhysicalLength() {
return physicalLength;
}
int getVirtualLength() {
return virtualLength;
}
int determineTopLevelShiftForVirtualLength(final int virtualLength) {
int sizeMagnitude = (int) Math.ceil(Math.log(virtualLength) / Math.log(2));
int eightsSizeMagnitude = sizeMagnitude - 3;
int multipleOfFourSizeMagnitude = (int) Math.ceil(eightsSizeMagnitude / 4.0) * 4;
multipleOfFourSizeMagnitude = Math.max(multipleOfFourSizeMagnitude, 8);
int topLevelShiftNeeded = (multipleOfFourSizeMagnitude - 4) + 3;
return topLevelShiftNeeded;
}
void setVirtualLength(final int virtualLength) {
if (!isPacked()) {
throw new IllegalStateException("Should never be adjusting the virtual size of a non-packed context");
}
int newTopLevelShift = determineTopLevelShiftForVirtualLength(virtualLength);
setTopLevelShift(newTopLevelShift);
this.virtualLength = virtualLength;
}
int getTopLevelShift() {
return topLevelShift;
}
private void setTopLevelShift(final int topLevelShift) {
this.topLevelShift = topLevelShift;
}
int getPopulatedLongLength() {
return (getPopulatedShortLength() + 3) >> 2; // round up
}
int getPopulatedByteLength() {
return getPopulatedShortLength() << 1;
}
//
// ######## ####### ###### ######## ######## #### ## ## ######
// ## ## ## ## ## ## ## ## ## ### ## ## ##
// ## ## ## ## ## ## ## ## #### ## ##
// ## ## ## ####### ###### ## ######## ## ## ## ## ## ####
// ## ## ## ## ## ## ## ## ## #### ## ##
// ## ## ## ## ## ## ## ## ## ## ### ## ##
// ## ####### ###### ## ## ## #### ## ## ######
//
private String nonLeafEntryToString(final int entryIndex,
final int indexShift,
final int indentLevel) {
String output = "";
for (int i = 0; i < indentLevel; i++) {
output += " ";
}
try {
final int packedSlotIndicators = getPackedSlotIndicators(entryIndex);
output += String.format("slotIndicators: 0x%02x, prevVersionIndex: %3d: [ ",
packedSlotIndicators,
getPreviousVersionIndex(entryIndex));
final int numberOfSlotsInEntry = Integer.bitCount(packedSlotIndicators);
for (int i = 0; i < numberOfSlotsInEntry; i++) {
output += String.format("%d", getIndexAtEntrySlot(entryIndex, i));
if (i < numberOfSlotsInEntry - 1) {
output += ", ";
}
}
output += String.format(" ] (indexShift = %d)\n", indexShift);
final boolean nextLevelIsLeaf = (indexShift == LEAF_LEVEL_SHIFT);
for (int i = 0; i < numberOfSlotsInEntry; i++) {
final int nextLevelEntryIndex = getIndexAtEntrySlot(entryIndex, i);
if (nextLevelIsLeaf) {
output += leafEntryToString(nextLevelEntryIndex, indentLevel + 4);
} else {
output += nonLeafEntryToString(nextLevelEntryIndex,
indexShift - 4, indentLevel + 4);
}
}
} catch (Exception ex) {
output += String.format("Exception thrown at nonLeafEntry at index %d with indexShift %d\n",
entryIndex, indexShift);
}
return output;
}
private String leafEntryToString(final int entryIndex, final int indentLevel) {
String output = "";
for (int i = 0; i < indentLevel; i++) {
output += " ";
}
try {
output += "Leaf bytes : ";
for (int i = 56; i >= 0; i -= 8) {
output += String.format("0x%02x ", (getAtLongIndex(entryIndex) >>> i) & 0xff);
}
output += "\n";
} catch (Exception ex) {
output += String.format("Exception thrown at leafEntry at index %d\n", entryIndex);
}
return output;
}
private String recordedValuesToString() {
String output = "";
try {
for (IterationValue v : nonZeroValues()) {
output += String.format("[%d] : %d\n", v.getIndex(), v.getValue());
}
return output;
} catch (Exception ex) {
output += "!!! Exception thrown in value iteration...\n";
}
return output;
}
@Override
public String toString() {
String output = "PackedArrayContext:\n";
if (!isPacked()) {
return output + "Context is unpacked:\n" + unpackedToString();
}
for (int setNumber = 0; setNumber < NUMBER_OF_SETS; setNumber++) {
try {
int entryPointerIndex = SET_0_START_INDEX + setNumber;
int entryIndex = getIndexAtShortIndex(entryPointerIndex);
output += String.format("Set %d: root = %d \n", setNumber, entryIndex);
if (entryIndex == 0) continue;
output += nonLeafEntryToString(entryIndex, getTopLevelShift(), 4);
} catch (Exception ex) {
output += String.format("Exception thrown in set %d\n", setNumber);
}
}
output += recordedValuesToString();
return output;
}
private static class RetryException extends Exception {}
}
| 53,321 | 47.298913 | 120 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/packedarray/ResizeException.java | package org.HdrHistogram.packedarray;
class ResizeException extends Exception {
private final int newSize;
ResizeException(final int newSize) {
this.newSize = newSize;
}
int getNewSize() {
return newSize;
}
}
| 248 | 16.785714 | 41 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/packedarray/ConcurrentPackedArrayContext.java | package org.HdrHistogram.packedarray;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.atomic.AtomicLongArray;
class ConcurrentPackedArrayContext extends PackedArrayContext {
ConcurrentPackedArrayContext(final int virtualLength,
final int initialPhysicalLength,
final boolean allocateArray) {
super(virtualLength, initialPhysicalLength, false);
if (allocateArray) {
array = new AtomicLongArray(getPhysicalLength());
init(virtualLength);
}
}
ConcurrentPackedArrayContext(final int virtualLength,
final int initialPhysicalLength) {
this(virtualLength, initialPhysicalLength, true);
}
ConcurrentPackedArrayContext(final int newVirtualCountsArraySize,
final AbstractPackedArrayContext from,
final int arrayLength) {
this(newVirtualCountsArraySize, arrayLength);
if (isPacked()) {
populateEquivalentEntriesWithZerosFromOther(from);
}
}
private AtomicLongArray array;
private volatile int populatedShortLength;
private static final AtomicIntegerFieldUpdater<ConcurrentPackedArrayContext> populatedShortLengthUpdater =
AtomicIntegerFieldUpdater.newUpdater(ConcurrentPackedArrayContext.class, "populatedShortLength");
@Override
int length() {
return array.length();
}
@Override
int getPopulatedShortLength() {
return populatedShortLength;
}
@Override
boolean casPopulatedShortLength(final int expectedPopulatedShortLength, final int newPopulatedShortLength) {
return populatedShortLengthUpdater.compareAndSet(this, expectedPopulatedShortLength, newPopulatedShortLength);
}
@Override
boolean casPopulatedLongLength(final int expectedPopulatedLongLength, final int newPopulatedLongLength) {
int existingShortLength = getPopulatedShortLength();
int existingLongLength = (existingShortLength + 3) >> 2;
if (existingLongLength != expectedPopulatedLongLength) return false;
return casPopulatedShortLength(existingShortLength, newPopulatedLongLength << 2);
}
@Override
long getAtLongIndex(final int longIndex) {
return array.get(longIndex);
}
@Override
boolean casAtLongIndex(final int longIndex, final long expectedValue, final long newValue) {
return array.compareAndSet(longIndex, expectedValue, newValue);
}
@Override
void lazySetAtLongIndex(final int longIndex, final long newValue) {
array.lazySet(longIndex, newValue);
}
@Override
void clearContents() {
for (int i = 0; i < array.length(); i++) {
array.lazySet(i, 0);
}
init(getVirtualLength());
}
@Override
void resizeArray(final int newLength) {
final AtomicLongArray newArray = new AtomicLongArray(newLength);
int copyLength = Math.min(array.length(), newLength);
for (int i = 0; i < copyLength; i++) {
newArray.lazySet(i, array.get(i));
}
array = newArray;
}
@Override
long getAtUnpackedIndex(final int index) {
return array.get(index);
}
@Override
void setAtUnpackedIndex(final int index, final long newValue) {
array.set(index, newValue);
}
@Override
void lazySetAtUnpackedIndex(final int index, final long newValue) {
array.lazySet(index, newValue);
}
@Override
long incrementAndGetAtUnpackedIndex(final int index) {
return array.incrementAndGet(index);
}
@Override
long addAndGetAtUnpackedIndex(final int index, final long valueToAdd) {
return array.addAndGet(index, valueToAdd);
}
@Override
String unpackedToString() {
return array.toString();
}
}
| 3,964 | 30.72 | 118 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/packedarray/AbstractPackedLongArray.java | package org.HdrHistogram.packedarray;
import java.io.Serializable;
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* A Packed array of signed 64 bit values, and supports {@link #get get()}, {@link #set set()},
* {@link #add add()} and {@link #increment increment()} operations on the logical contents of the array.
*/
abstract class AbstractPackedLongArray implements Iterable<Long>, Serializable {
/**
* An {@link AbstractPackedLongArray} Uses {@link AbstractPackedArrayContext} to track
* the array's logical contents. Contexts may be switched when a context requires resizing
* to complete logical array operations (get, set, add, increment). Contexts are
* established and used within critical sections in order to facilitate concurrent
* implementors.
*/
private static final int NUMBER_OF_SETS = 8;
private AbstractPackedArrayContext arrayContext;
private long startTimeStampMsec = Long.MAX_VALUE;
private long endTimeStampMsec = 0;
AbstractPackedArrayContext getArrayContext() {
return arrayContext;
}
void setArrayContext(AbstractPackedArrayContext newArrayContext) {
arrayContext = newArrayContext;
}
/**
* get the start time stamp [optionally] stored with this array
* @return the start time stamp [optionally] stored with this array
*/
public long getStartTimeStamp() {
return startTimeStampMsec;
}
/**
* Set the start time stamp value associated with this array to a given value.
* @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch.
*/
public void setStartTimeStamp(final long timeStampMsec) {
this.startTimeStampMsec = timeStampMsec;
}
/**
* get the end time stamp [optionally] stored with this array
* @return the end time stamp [optionally] stored with this array
*/
public long getEndTimeStamp() {
return endTimeStampMsec;
}
/**
* Set the end time stamp value associated with this array to a given value.
* @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch.
*/
public void setEndTimeStamp(final long timeStampMsec) {
this.endTimeStampMsec = timeStampMsec;
}
/**
* Set a new virtual length for the array.
* @param newVirtualArrayLength the
*/
abstract public void setVirtualLength(final int newVirtualArrayLength);
/**
* Create a copy of this array, complete with data and everything.
*
* @return A distinct copy of this array.
*/
abstract public AbstractPackedLongArray copy();
abstract void resizeStorageArray(int newPhysicalLengthInLongs);
abstract void clearContents();
abstract long criticalSectionEnter();
abstract void criticalSectionExit(long criticalValueAtEnter);
@Override
public String toString() {
String output = "PackedArray:\n";
AbstractPackedArrayContext arrayContext = getArrayContext();
output += arrayContext.toString();
return output;
}
/**
* Get value at virtual index in the array
* @param index the virtual array index
* @return the array value at the virtual index given
*/
public long get(final int index) {
long value = 0;
for (int byteNum = 0; byteNum < NUMBER_OF_SETS; byteNum ++) {
int packedIndex = 0;
long byteValueAtPackedIndex = 0;
do {
int newArraySize = 0;
long criticalValue = criticalSectionEnter();
try {
// Establish context within: critical section
AbstractPackedArrayContext arrayContext = getArrayContext();
// Deal with unpacked context:
if (!arrayContext.isPacked()) {
return arrayContext.getAtUnpackedIndex(index);
}
// Context is packed:
packedIndex = arrayContext.getPackedIndex(byteNum, index, false);
if (packedIndex < 0) {
return value;
}
byteValueAtPackedIndex =
(((long)arrayContext.getAtByteIndex(packedIndex)) & 0xff) << (byteNum << 3);
} catch (ResizeException ex) {
newArraySize = ex.getNewSize(); // Resize outside of critical section
} finally {
criticalSectionExit(criticalValue);
if (newArraySize != 0) {
resizeStorageArray(newArraySize);
}
}
} while (packedIndex == 0);
value += byteValueAtPackedIndex;
}
return value;
}
/**
* Increment value at a virtual index in the array
* @param index virtual index of value to increment
*/
public void increment(final int index) {
add(index, 1);
}
/**
* Add to a value at a virtual index in the array
* @param index the virtual index of the value to be added to
* @param value the value to add
*/
public void add(final int index, final long value) {
if (value == 0) {
return;
}
long remainingValueToAdd = value;
do {
try {
long byteMask = 0xff;
for (int byteNum = 0, byteShift = 0;
byteNum < NUMBER_OF_SETS;
byteNum++, byteShift += 8, byteMask <<= 8) {
final long criticalValue = criticalSectionEnter();
try {
// Establish context within: critical section
AbstractPackedArrayContext arrayContext = getArrayContext();
// Deal with unpacked context:
if (!arrayContext.isPacked()) {
arrayContext.addAndGetAtUnpackedIndex(index, remainingValueToAdd);
return;
}
// Context is packed:
int packedIndex = arrayContext.getPackedIndex(byteNum, index, true);
long amountToAddAtSet = remainingValueToAdd & byteMask;
byte byteToAdd = (byte) (amountToAddAtSet >> byteShift);
long afterAddByteValue = arrayContext.addAtByteIndex(packedIndex, byteToAdd);
// Reduce remaining value to add by amount just added:
remainingValueToAdd -= amountToAddAtSet;
// Account for carry:
long carryAmount = afterAddByteValue & 0x100;
remainingValueToAdd += carryAmount << byteShift;
if (remainingValueToAdd == 0) {
return; // nothing to add to higher magnitudes
}
} finally {
criticalSectionExit(criticalValue);
}
}
return;
} catch (ResizeException ex){
resizeStorageArray(ex.getNewSize()); // Resize outside of critical section
}
} while (true);
}
/**
* Set the value at a virtual index in the array
* @param index the virtual index of the value to set
* @param value the value to set
*/
public void set(final int index, final long value) {
int bytesAlreadySet = 0;
do {
long valueForNextLevels = value;
try {
for (int byteNum = 0; byteNum < NUMBER_OF_SETS; byteNum++) {
long criticalValue = criticalSectionEnter();
try {
// Establish context within: critical section
AbstractPackedArrayContext arrayContext = getArrayContext();
// Deal with unpacked context:
if (!arrayContext.isPacked()) {
arrayContext.setAtUnpackedIndex(index, value);
return;
}
// Context is packed:
if (valueForNextLevels == 0) {
// Special-case zeros to avoid inflating packed array for no reason
int packedIndex = arrayContext.getPackedIndex(byteNum, index, false);
if (packedIndex < 0) {
return; // no need to create entries for zero values if they don't already exist
}
}
// Make sure byte is populated:
int packedIndex = arrayContext.getPackedIndex(byteNum, index, true);
// Determine value to write, and prepare for next levels
byte byteToWrite = (byte) (valueForNextLevels & 0xff);
valueForNextLevels >>= 8;
if (byteNum < bytesAlreadySet) {
// We want to avoid writing to the same byte twice when not doing so for the
// entire 64 bit value atomically, as doing so opens a race with e.g. concurrent
// adders. So don't actually write the byte if has been written before.
continue;
}
arrayContext.setAtByteIndex(packedIndex, byteToWrite);
bytesAlreadySet++;
} finally {
criticalSectionExit(criticalValue);
}
}
return;
} catch (ResizeException ex) {
resizeStorageArray(ex.getNewSize()); // Resize outside of critical section
}
} while (true);
}
/**
* Add the contents of the other array to this one
*
* @param other The to add to this array
*/
public void add(final AbstractPackedLongArray other) {
for (IterationValue v : other.nonZeroValues()) {
add(v.getIndex(), v.getValue());
}
}
/**
* Clear the array contents
*/
public void clear() {
clearContents();
}
/**
* Get the current physical length (in longs) of the array's backing storage
* @return the current physical length (in longs) of the array's current backing storage
*/
public int getPhysicalLength() {
return getArrayContext().length();
}
/**
* Get the (virtual) length of the array
* @return the (virtual) length of the array
*/
public int length() {
return getArrayContext().getVirtualLength();
}
// Regular array iteration (iterates over all virtual indexes, zero-value or not:
class AllValuesIterator implements Iterator<Long> {
int nextVirtualIndex = 0;
@Override
public Long next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return get(nextVirtualIndex++);
}
@Override
public boolean hasNext() {
return ((nextVirtualIndex >= 0) &&
(nextVirtualIndex < length()));
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
/**
* An Iterator over all values in the array
* @return an Iterator over all values in the array
*/
public Iterator<Long> iterator() {
return new AllValuesIterator();
}
/**
* An Iterator over all non-Zero values in the array
* @return an Iterator over all non-Zero values in the array
*/
public Iterable<IterationValue> nonZeroValues() {
return getArrayContext().nonZeroValues();
}
/**
* Determine if this array is equivalent to another.
*
* @param other the other array to compare to
* @return True if this array are equivalent with the other.
*/
@Override
public boolean equals(final Object other) {
if (this == other) {
return true;
}
if (!(other instanceof AbstractPackedLongArray)) {
return false;
}
AbstractPackedLongArray that = (AbstractPackedLongArray) other;
if (length() != that.length()) {
return false;
}
if (this.arrayContext.isPacked() || that.arrayContext.isPacked()) {
// If at least one of the arrays is packed, comparing only the
// non-zero values that exist in both arrays, using two passes,
// will likely be more efficient than a single all-index pass:
// - If both are packed, it will obviously be much faster.
// - If one is packed and the other is not, we would be visiting
// every index in the non-packed array, in one of the passes,
// but would still only visit the non-zero elements in the
// packed one.
for (IterationValue v : this.nonZeroValues()) {
if (that.get(v.getIndex()) != v.getValue()) {
return false;
}
}
for (IterationValue v : that.nonZeroValues()) {
if (this.get(v.getIndex()) != v.getValue()) {
return false;
}
}
} else {
for (int i = 0; i < this.length(); i++) {
if (this.get(i) != that.get(i)) {
return false;
}
}
}
return true;
}
static final int NUMBER_OF_NON_ZEROS_TO_HASH = 8;
@Override
public int hashCode() {
int h = 0;
h = oneAtATimeHashStep(h, length());
int count = 0;
// Include the first NUMBER_OF_NON_ZEROS_TO_HASH non-zeros in the hash:
for (IterationValue v : nonZeroValues()) {
if (++count > NUMBER_OF_NON_ZEROS_TO_HASH) {
break;
}
h = oneAtATimeHashStep(h, (int) v.getIndex());
h = oneAtATimeHashStep(h, (int) v.getValue());
}
h += (h << 3);
h ^= (h >> 11);
h += (h << 15);
return h;
}
private int oneAtATimeHashStep(final int incomingHash, final int v) {
int h = incomingHash;
h += v;
h += (h << 10);
h ^= (h >> 6);
return h;
}
}
| 14,675 | 35.058968 | 112 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/packedarray/PackedArrayRecorder.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram.packedarray;
import org.HdrHistogram.*;
import java.util.concurrent.atomic.AtomicLong;
/**
* Records increments and adds of integer values at indexes of a logical array of 64 bit signed integer values, and
* provides stable interval {@link PackedLongArray} samples from live recorded data without interrupting or stalling
* active recording of values. Each interval array provided contains all values accumulated since the previous
* interval array was taken.
* <p>
* This pattern is commonly used in logging interval accumulator information while recording is ongoing.
* <p>
* {@link PackedArrayRecorder} supports fully concurrent
* {@link PackedArrayRecorder#increment(int)} and
* {@link PackedArrayRecorder#add(int, long)} calls.
* While the {@link #increment increment()} and {@link #add add()} methods are not quite wait-free, they
* come "close" to that behavior in the sense that a given thread will incur a total of no more than a capped
* fixed number (e.g. 74 in a current implementation) of non-wait-free add or increment operations during
* the lifetime of an interval array (including across recycling of that array across intervals within the
* same recorder), regardless of the number of operations done.
* <p>
* A common pattern for using a {@link PackedArrayRecorder} looks like this:
* <br><pre><code>
* PackedArrayRecorder recorder = new PackedArrayRecorder(); //
* PackedLongArray intervalArray = null;
* ...
* [start of some loop construct that periodically wants to grab an interval array]
* ...
* // Get interval array, recycling previous interval array:
* intervalArray = recorder.getIntervalArray(intervalArray);
* // Process the interval array, which is nice and stable here:
* myLogWriter.logArrayContents(intervalArray);
* ...
* [end of loop construct]
* </code></pre>
*
*/
public class PackedArrayRecorder {
private static AtomicLong instanceIdSequencer = new AtomicLong(1);
private final long instanceId = instanceIdSequencer.getAndIncrement();
private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser();
private volatile PackedLongArray activeArray;
/**
* Construct a {@link PackedArrayRecorder} with a given (virtual) array length.
*
* @param virtualLength The (virtual) array length
*/
public PackedArrayRecorder(final int virtualLength) {
activeArray = new InternalConcurrentPackedLongArray(instanceId, virtualLength);
activeArray.setStartTimeStamp(System.currentTimeMillis());
}
/**
* Construct a {@link PackedArrayRecorder} with a given (virtual) array length, starting with a given
* initial physical backing store length
*
* @param virtualLength The (virtual) array length
* @param initialPhysicalLength The initial physical backing store length
*/
public PackedArrayRecorder(final int virtualLength, final int initialPhysicalLength) {
activeArray = new InternalConcurrentPackedLongArray(instanceId, virtualLength, initialPhysicalLength);
activeArray.setStartTimeStamp(System.currentTimeMillis());
}
/**
* Returns the virtual length of the array represented by this recorder
* @return The virtual length of the array represented by this recorder
*/
public int length() {
return activeArray.length();
}
/**
* Change the (virtual) length of the array represented by the this recorder
* @param newVirtualLength the new (virtual) length to use
*/
public void setVirtualLength(int newVirtualLength) {
try {
recordingPhaser.readerLock();
// We don't care about concurrent modifications to the array, as setVirtualLength() in the
// ConcurrentPackedLongArray takes care of those. However, we must perform the change of virtual
// length under the recorder's readerLock protection to prevent mid-change observations:
activeArray.setVirtualLength(newVirtualLength);
} finally {
recordingPhaser.readerUnlock();
}
}
/**
* Increment a value at a given index in the array
* @param index the index of the value to be incremented
* @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds length()
*/
public void increment(final int index) throws ArrayIndexOutOfBoundsException {
long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
try {
activeArray.increment(index);
} finally {
recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
}
}
/**
* Add to a value at a given index in the array
* @param index The index of value to add to
* @param valueToAdd The amount to add to the value at the given index
* @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds length()
*/
public void add(final int index, final long valueToAdd) throws ArrayIndexOutOfBoundsException {
long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
try {
activeArray.add(index, valueToAdd);
} finally {
recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
}
}
/**
* Get an interval array, which will include a stable, consistent view of all values
* accumulated since the last interval array was taken.
* <p>
* Calling this method is equivalent to calling {@code getIntervalArray(null)}. It is generally recommended
* that the {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalHistogram(arrayToRecycle)} orm be used for
* regular interval array sampling, as that form accepts a previously returned interval array that can be
* recycled internally to avoid allocation and content copying operations, and is therefore significantly
* more efficient for repeated use than {@link PackedArrayRecorder#getIntervalArray()}.
* <p>
* Calling {@link PackedArrayRecorder#getIntervalArray()} will reset the values at
* all indexes of the array tracked by the recorder, and start accumulating values for the next interval.
*
* @return an array containing the values accumulated since the last interval array was taken.
*/
public synchronized PackedLongArray getIntervalArray() {
return getIntervalArray(null);
}
/**
* Get an interval array, which will include a stable, consistent view of all values
* accumulated since the last interval array was taken.
* <p>
* {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle)}
* accepts a previously returned interval array that can be recycled internally to avoid allocation
* and content copying operations, and is therefore significantly more efficient for repeated use than
* {@link PackedArrayRecorder#getIntervalArray()}. The provided {@code arrayToRecycle} must
* be either be null or an interval array returned by a previous call to
* {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle)} or
* {@link PackedArrayRecorder#getIntervalArray()}.
* <p>
* NOTE: The caller is responsible for not recycling the same returned interval array more than once. If
* the same interval array instance is recycled more than once, behavior is undefined.
* <p>
* Calling {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle)} will reset the values at all indexes of the array
* tracked by the recorder, and start accumulating values for the next interval.
*
* @param arrayToRecycle a previously returned interval array (from this instance of
* {@link PackedArrayRecorder}) that may be recycled to avoid allocation and
* copy operations.
* @return an array containing the values accumulated since the last interval array was taken.
*/
public synchronized PackedLongArray getIntervalArray(final PackedLongArray arrayToRecycle) {
return getIntervalArray(arrayToRecycle, true);
}
/**
* Get an interval array, which will include a stable, consistent view of all values
* accumulated since the last interval array was taken.
* <p>
* {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle)}
* accepts a previously returned interval array that can be recycled internally to avoid allocation
* and content copying operations, and is therefore significantly more efficient for repeated use than
* {@link PackedArrayRecorder#getIntervalArray()}. The provided {@code arrayToRecycle} must
* be either be null or an interval array returned by a previous call to
* {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle)} or
* {@link PackedArrayRecorder#getIntervalArray()}.
* <p>
* NOTE: The caller is responsible for not recycling the same returned interval array more than once. If
* the same interval array instance is recycled more than once, behavior is undefined.
* <p>
* Calling {@link PackedArrayRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle, enforeContainingInstance)} will reset the values at all indexes
* of the array tracked by the recorder, and start accumulating values for the next interval.
*
* @param arrayToRecycle a previously returned interval array that may be recycled to avoid allocation and
* copy operations.
* @param enforceContainingInstance if true, will only allow recycling of arrays previously returned from this
* instance of {@link PackedArrayRecorder}. If false, will allow recycling arrays
* previously returned by other instances of {@link PackedArrayRecorder}.
* @return an array containing the values accumulated since the last interval array was taken.
*/
public synchronized PackedLongArray getIntervalArray(final PackedLongArray arrayToRecycle,
final boolean enforceContainingInstance) {
// Verify that replacement array can validly be used as an inactive array replacement:
validateFitAsReplacementArray(arrayToRecycle, enforceContainingInstance);
PackedLongArray sampledArray = performIntervalSample(arrayToRecycle);
return sampledArray;
}
/**
* Reset the array contents to all zeros.
*/
public synchronized void reset() {
// the currently active array is reset each time we flip:
performIntervalSample(null);
}
private PackedLongArray performIntervalSample(final PackedLongArray arrayToRecycle) {
PackedLongArray inactiveArray = arrayToRecycle;
try {
recordingPhaser.readerLock();
// Make sure we have an inactive version to flip in:
if (inactiveArray == null) {
if (activeArray instanceof InternalConcurrentPackedLongArray) {
inactiveArray = new InternalConcurrentPackedLongArray(instanceId, activeArray.length());
} else {
throw new IllegalStateException("Unexpected internal array type for activeArray");
}
} else {
inactiveArray.clear();
}
// Swap active and inactive arrays:
final PackedLongArray tempArray = inactiveArray;
inactiveArray = activeArray;
activeArray = tempArray;
// Mark end time of previous interval and start time of new one:
long now = System.currentTimeMillis();
activeArray.setStartTimeStamp(now);
inactiveArray.setEndTimeStamp(now);
// Make sure we are not in the middle of recording a value on the previously active array:
// Flip phase to make sure no recordings that were in flight pre-flip are still active:
recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */);
} finally {
recordingPhaser.readerUnlock();
}
return inactiveArray;
}
private static class InternalConcurrentPackedLongArray extends ConcurrentPackedLongArray {
private final long containingInstanceId;
private InternalConcurrentPackedLongArray(final long id, int virtualLength, final int initialPhysicalLength) {
super(virtualLength, initialPhysicalLength);
this.containingInstanceId = id;
}
private InternalConcurrentPackedLongArray(final long id, final int virtualLength) {
super(virtualLength);
this.containingInstanceId = id;
}
}
private void validateFitAsReplacementArray(final PackedLongArray replacementArray,
final boolean enforceContainingInstance) {
boolean bad = true;
if (replacementArray == null) {
bad = false;
} else if (replacementArray instanceof InternalConcurrentPackedLongArray) {
if ((activeArray instanceof InternalConcurrentPackedLongArray)
&&
((!enforceContainingInstance) ||
(((InternalConcurrentPackedLongArray)replacementArray).containingInstanceId ==
((InternalConcurrentPackedLongArray) activeArray).containingInstanceId)
)) {
bad = false;
}
}
if (bad) {
throw new IllegalArgumentException("replacement array must have been obtained via a previous" +
" getIntervalArray() call from this " + this.getClass().getName() +
(enforceContainingInstance ? " instance" : " class"));
}
}
}
| 14,419 | 47.552189 | 118 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/packedarray/IterationValue.java | package org.HdrHistogram.packedarray;
/**
* An iteration value representing the index iterated to, and the value found at that index
*/
public class IterationValue {
IterationValue() {
}
void set(final int index, final long value) {
this.index = index;
this.value = value;
}
/**
* The index iterated to
* @return the index iterated to
*/
public int getIndex() {
return index;
}
/**
* The value at the index iterated to
* @return the value at the index iterated to
*/
public long getValue() {
return value;
}
private int index;
private long value;
}
| 666 | 18.617647 | 91 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/packedarray/PackedArraySingleWriterRecorder.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram.packedarray;
import org.HdrHistogram.SingleWriterRecorder;
import org.HdrHistogram.WriterReaderPhaser;
import java.util.concurrent.atomic.AtomicLong;
/**
* Records increments and adds of integer values at indexes of a logical array of 64 bit signed integer values, and
* provides stable interval {@link PackedLongArray} samples from live recorded data without interrupting or stalling
* active recording of values. Each interval array provided contains all values accumulated since the previous
* interval array was taken.
* <p>
* This pattern is commonly used in logging interval accumulator information while recording is ongoing.
* <p>
* {@link PackedArraySingleWriterRecorder} expects only a single thread (the "single writer") to call
* {@link PackedArraySingleWriterRecorder#increment(int)} or
* {@link PackedArraySingleWriterRecorder#add(int, long)} at any point in time.
* It DOES NOT safely support concurrent increment or add calls.
* While the {@link #increment increment()} and {@link #add add()} methods are not quite wait-free, they
* come "close" to that behavior in the sense that a given thread will incur a total of no more than a capped
* fixed number (e.g. 74 in a current implementation) of non-wait-free add or increment operations during
* the lifetime of an interval array (including across recycling of that array across intervals within the
* same recorder), regardless of the number of operations done.
* <p>
* A common pattern for using a {@link PackedArraySingleWriterRecorder} looks like this:
* <br><pre><code>
* PackedArraySingleWriterRecorder recorder = new PackedArraySingleWriterRecorder(); //
* PackedLongArray intervalArray = null;
* ...
* [start of some loop construct that periodically wants to grab an interval array]
* ...
* // Get interval array, recycling previous interval array:
* intervalArray = recorder.getIntervalArray(intervalArray);
* // Process the interval array, which is nice and stable here:
* myLogWriter.logArrayContents(intervalArray);
* ...
* [end of loop construct]
* </code></pre>
*
*/
public class PackedArraySingleWriterRecorder {
private static final AtomicLong instanceIdSequencer = new AtomicLong(1);
private final long instanceId = instanceIdSequencer.getAndIncrement();
private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser();
private volatile PackedLongArray activeArray;
/**
* Construct a {@link PackedArraySingleWriterRecorder} with a given (virtual) array length.
*
* @param virtualLength The (virtual) array length
*/
public PackedArraySingleWriterRecorder(final int virtualLength) {
activeArray = new InternalPackedLongArray(instanceId, virtualLength);
activeArray.setStartTimeStamp(System.currentTimeMillis());
}
/**
* Construct a {@link PackedArraySingleWriterRecorder} with a given (virtual) array length, starting with a given
* initial physical backing store length
*
* @param virtualLength The (virtual) array length
* @param initialPhysicalLength The initial physical backing store length
*/
public PackedArraySingleWriterRecorder(final int virtualLength, final int initialPhysicalLength) {
activeArray = new InternalPackedLongArray(instanceId, virtualLength, initialPhysicalLength);
activeArray.setStartTimeStamp(System.currentTimeMillis());
}
/**
* Returns the virtual length of the array represented by this recorder
* @return The virtual length of the array represented by this recorder
*/
public int length() {
return activeArray.length();
}
/**
* Change the (virtual) length of the array represented by the this recorder
* @param newVirtualLength the new (virtual) length to use
*/
public void setVirtualLength(int newVirtualLength) {
try {
recordingPhaser.readerLock();
// We don't care about concurrent modifications to the array, as setVirtualLength() in the
// ConcurrentPackedLongArray takes care of those. However, we must perform the change of virtual
// length under the recorder's readerLock protection to prevent mid-change observations:
activeArray.setVirtualLength(newVirtualLength);
} finally {
recordingPhaser.readerUnlock();
}
}
/**
* Increment a value at a given index in the array
* @param index the index of the value to be incremented
* @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds length()
*/
public void increment(final int index) throws ArrayIndexOutOfBoundsException {
long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
try {
activeArray.increment(index);
} finally {
recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
}
}
/**
* Add to a value at a given index in the array
* @param index The index of value to add to
* @param valueToAdd The amount to add to the value at the given index
* @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds length()
*/
public void add(final int index, final long valueToAdd) throws ArrayIndexOutOfBoundsException {
long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter();
try {
activeArray.add(index, valueToAdd);
} finally {
recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter);
}
}
/**
* Get an interval array, which will include a stable, consistent view of all values
* accumulated since the last interval array was taken.
* <p>
* Calling this method is equivalent to calling {@code getIntervalArray(null)}. It is generally recommended
* that the {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalHistogram(arrayToRecycle)} orm be used for
* regular interval array sampling, as that form accepts a previously returned interval array that can be
* recycled internally to avoid allocation and content copying operations, and is therefore significantly
* more efficient for repeated use than {@link PackedArraySingleWriterRecorder#getIntervalArray()}.
* <p>
* Calling {@link PackedArraySingleWriterRecorder#getIntervalArray()} will reset the values at
* all indexes of the array tracked by the recorder, and start accumulating values for the next interval.
*
* @return an array containing the values accumulated since the last interval array was taken.
*/
public synchronized PackedLongArray getIntervalArray() {
return getIntervalArray(null);
}
/**
* Get an interval array, which will include a stable, consistent view of all values
* accumulated since the last interval array was taken.
* <p>
* {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle)}
* accepts a previously returned interval array that can be recycled internally to avoid allocation
* and content copying operations, and is therefore significantly more efficient for repeated use than
* {@link PackedArraySingleWriterRecorder#getIntervalArray()}. The provided {@code arrayToRecycle} must
* be either be null or an interval array returned by a previous call to
* {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle)} or
* {@link PackedArraySingleWriterRecorder#getIntervalArray()}.
* <p>
* NOTE: The caller is responsible for not recycling the same returned interval array more than once. If
* the same interval array instance is recycled more than once, behavior is undefined.
* <p>
* Calling {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle)} will reset the values at all indexes of the array
* tracked by the recorder, and start accumulating values for the next interval.
*
* @param arrayToRecycle a previously returned interval array (from this instance of
* {@link PackedArraySingleWriterRecorder}) that may be recycled to avoid allocation and
* copy operations.
* @return an array containing the values accumulated since the last interval array was taken.
*/
public synchronized PackedLongArray getIntervalArray(final PackedLongArray arrayToRecycle) {
return getIntervalArray(arrayToRecycle, true);
}
/**
* Get an interval array, which will include a stable, consistent view of all values
* accumulated since the last interval array was taken.
* <p>
* {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle)}
* accepts a previously returned interval array that can be recycled internally to avoid allocation
* and content copying operations, and is therefore significantly more efficient for repeated use than
* {@link PackedArraySingleWriterRecorder#getIntervalArray()}. The provided {@code arrayToRecycle} must
* be either be null or an interval array returned by a previous call to
* {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle)} or
* {@link PackedArraySingleWriterRecorder#getIntervalArray()}.
* <p>
* NOTE: The caller is responsible for not recycling the same returned interval array more than once. If
* the same interval array instance is recycled more than once, behavior is undefined.
* <p>
* Calling {@link PackedArraySingleWriterRecorder#getIntervalArray(PackedLongArray arrayToRecycle)
* getIntervalArray(arrayToRecycle, enforeContainingInstance)} will reset the values at all indexes
* of the array tracked by the recorder, and start accumulating values for the next interval.
*
* @param arrayToRecycle a previously returned interval array that may be recycled to avoid allocation and
* copy operations.
* @param enforceContainingInstance if true, will only allow recycling of arrays previously returned from this
* instance of {@link PackedArraySingleWriterRecorder}. If false, will allow recycling arrays
* previously returned by other instances of {@link PackedArraySingleWriterRecorder}.
* @return an array containing the values accumulated since the last interval array was taken.
*/
public synchronized PackedLongArray getIntervalArray(final PackedLongArray arrayToRecycle,
final boolean enforceContainingInstance) {
// Verify that replacement array can validly be used as an inactive array replacement:
validateFitAsReplacementArray(arrayToRecycle, enforceContainingInstance);
PackedLongArray sampledArray = performIntervalSample(arrayToRecycle);
return sampledArray;
}
/**
* Reset the array contents to all zeros.
*/
public synchronized void reset() {
// the currently active array is reset each time we flip:
performIntervalSample(null);
}
private PackedLongArray performIntervalSample(final PackedLongArray arrayToRecycle) {
PackedLongArray inactiveArray = arrayToRecycle;
try {
recordingPhaser.readerLock();
// Make sure we have an inactive version to flip in:
if (inactiveArray == null) {
if (activeArray instanceof InternalPackedLongArray) {
inactiveArray = new InternalPackedLongArray(instanceId, activeArray.length());
} else {
throw new IllegalStateException("Unexpected internal array type for activeArray");
}
} else {
inactiveArray.clear();
}
// Swap active and inactive arrays:
final PackedLongArray tempArray = inactiveArray;
inactiveArray = activeArray;
activeArray = tempArray;
// Mark end time of previous interval and start time of new one:
long now = System.currentTimeMillis();
activeArray.setStartTimeStamp(now);
inactiveArray.setEndTimeStamp(now);
// Make sure we are not in the middle of recording a value on the previously active array:
// Flip phase to make sure no recordings that were in flight pre-flip are still active:
recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */);
} finally {
recordingPhaser.readerUnlock();
}
return inactiveArray;
}
private static class InternalPackedLongArray extends PackedLongArray {
private final long containingInstanceId;
private InternalPackedLongArray(final long id, int virtualLength, final int initialPhysicalLength) {
super(virtualLength, initialPhysicalLength);
this.containingInstanceId = id;
}
private InternalPackedLongArray(final long id, final int virtualLength) {
super(virtualLength);
this.containingInstanceId = id;
}
}
private void validateFitAsReplacementArray(final PackedLongArray replacementArray,
final boolean enforceContainingInstance) {
boolean bad = true;
if (replacementArray == null) {
bad = false;
} else if (replacementArray instanceof InternalPackedLongArray) {
if ((activeArray instanceof InternalPackedLongArray)
&&
((!enforceContainingInstance) ||
(((InternalPackedLongArray)replacementArray).containingInstanceId ==
((InternalPackedLongArray) activeArray).containingInstanceId)
)) {
bad = false;
}
}
if (bad) {
throw new IllegalArgumentException("replacement array must have been obtained via a previous" +
" getIntervalArray() call from this " + this.getClass().getName() +
(enforceContainingInstance ? " instance" : " class"));
}
}
}
| 14,804 | 48.51505 | 129 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/packedarray/PackedArrayContext.java | package org.HdrHistogram.packedarray;
import java.util.Arrays;
/**
* A non-concurrent array context. No atomics used.
*/
class PackedArrayContext extends AbstractPackedArrayContext {
PackedArrayContext(final int virtualLength,
final int initialPhysicalLength,
final boolean allocateArray) {
super(virtualLength, initialPhysicalLength);
if (allocateArray) {
array = new long[getPhysicalLength()];
init(virtualLength);
}
}
PackedArrayContext(final int virtualLength,
final int initialPhysicalLength) {
this(virtualLength, initialPhysicalLength, true);
}
PackedArrayContext(final int virtualLength,
final AbstractPackedArrayContext from,
final int newPhysicalArrayLength) {
this(virtualLength, newPhysicalArrayLength);
if (isPacked()) {
populateEquivalentEntriesWithZerosFromOther(from);
}
}
private long[] array;
private int populatedShortLength = 0;
@Override
int length() {
return array.length;
}
@Override
int getPopulatedShortLength() {
return populatedShortLength;
}
@Override
boolean casPopulatedShortLength(final int expectedPopulatedShortLength, final int newPopulatedShortLength) {
if (this.populatedShortLength != expectedPopulatedShortLength) return false;
this.populatedShortLength = newPopulatedShortLength;
return true;
}
@Override
boolean casPopulatedLongLength(final int expectedPopulatedLongLength, final int newPopulatedLongLength) {
if (getPopulatedLongLength() != expectedPopulatedLongLength) return false;
return casPopulatedShortLength(populatedShortLength, newPopulatedLongLength << 2);
}
@Override
long getAtLongIndex(final int longIndex) {
return array[longIndex];
}
@Override
boolean casAtLongIndex(final int longIndex, final long expectedValue, final long newValue) {
if (array[longIndex] != expectedValue) return false;
array[longIndex] = newValue;
return true;
}
@Override
void lazySetAtLongIndex(final int longIndex, final long newValue) {
array[longIndex] = newValue;
}
@Override
void clearContents() {
java.util.Arrays.fill(array, 0);
init(getVirtualLength());
}
@Override
void resizeArray(final int newLength) {
array = Arrays.copyOf(array, newLength);
}
@Override
long getAtUnpackedIndex(final int index) {
return array[index];
}
@Override
void setAtUnpackedIndex(final int index, final long newValue) {
array[index] = newValue;
}
@Override
void lazySetAtUnpackedIndex(final int index, final long newValue) {
array[index] = newValue;
}
@Override
long incrementAndGetAtUnpackedIndex(final int index) {
array[index]++;
return array[index];
}
@Override
long addAndGetAtUnpackedIndex(final int index, final long valueToAdd) {
array[index] += valueToAdd;
return array[index];
}
@Override
String unpackedToString() {
return Arrays.toString(array);
}
}
| 3,304 | 26.541667 | 112 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/packedarray/PackedLongArray.java | package org.HdrHistogram.packedarray;
/**
* A Packed array of signed 64 bit values, and supports {@link #get get()}, {@link #set set()},
* {@link #add add()} and {@link #increment increment()} operations on the logical contents of the array.
*/
public class PackedLongArray extends AbstractPackedLongArray {
PackedLongArray() {}
public PackedLongArray(final int virtualLength) {
this(virtualLength, AbstractPackedArrayContext.MINIMUM_INITIAL_PACKED_ARRAY_CAPACITY);
}
public PackedLongArray(final int virtualLength, final int initialPhysicalLength) {
setArrayContext(new PackedArrayContext(virtualLength, initialPhysicalLength));
}
@Override
void resizeStorageArray(final int newPhysicalLengthInLongs) {
AbstractPackedArrayContext oldArrayContext = getArrayContext();
PackedArrayContext newArrayContext =
new PackedArrayContext(oldArrayContext.getVirtualLength(), oldArrayContext, newPhysicalLengthInLongs);
setArrayContext(newArrayContext);
for (IterationValue v : oldArrayContext.nonZeroValues()) {
set(v.getIndex(), v.getValue());
}
}
@Override
public void setVirtualLength(final int newVirtualArrayLength) {
if (newVirtualArrayLength < length()) {
throw new IllegalArgumentException(
"Cannot set virtual length, as requested length " + newVirtualArrayLength +
" is smaller than the current virtual length " + length());
}
AbstractPackedArrayContext currentArrayContext = getArrayContext();
if (currentArrayContext.isPacked() &&
(currentArrayContext.determineTopLevelShiftForVirtualLength(newVirtualArrayLength) ==
currentArrayContext.getTopLevelShift())) {
// No changes to the array context contents is needed. Just change the virtual length.
currentArrayContext.setVirtualLength(newVirtualArrayLength);
return;
}
AbstractPackedArrayContext oldArrayContext = currentArrayContext;
setArrayContext(new PackedArrayContext(newVirtualArrayLength, oldArrayContext, oldArrayContext.length()));
for (IterationValue v : oldArrayContext.nonZeroValues()) {
set(v.getIndex(), v.getValue());
}
}
@Override
public PackedLongArray copy() {
PackedLongArray copy = new PackedLongArray(this.length(), this.getPhysicalLength());
copy.add(this);
return copy;
}
@Override
void clearContents() {
getArrayContext().clearContents();
}
@Override
long criticalSectionEnter() {
return 0;
}
@Override
void criticalSectionExit(final long criticalValueAtEnter) {
}
}
| 2,792 | 36.743243 | 118 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/packedarray/ConcurrentPackedLongArray.java | package org.HdrHistogram.packedarray;
import org.HdrHistogram.WriterReaderPhaser;
import java.io.IOException;
import java.io.ObjectInputStream;
/**
* A Packed array of signed 64 bit values that supports {@link #get get()}, {@link #set set()}, {@link #add add()} and
* {@link #increment increment()} operations the logical contents of the array.
* <p>
* {@link ConcurrentPackedLongArray} supports concurrent accumulation, with the {@link #add add()} and {@link #increment
* increment()} methods providing lossless atomic accumulation in the presence of multiple writers. However, it is
* important to note that {@link #add add()} and {@link #increment increment()} are the *only* safe concurrent
* operations, and that all other operations, including {@link #get get()}, {@link #set set()} and {@link #clear()} may
* produce "surprising" results if used on an array that is not at rest.
* <p>
* While the {@link #add add()} and {@link #increment increment()} methods are not quite wait-free, they come "close"
* that behavior in the sense that a given thread will incur a total of no more than a capped fixed number (e.g. 74 in a
* current implementation) of non-wait-free add or increment operations during the lifetime of an array, regardless of
* the number of operations done.
* </p>
*/
public class ConcurrentPackedLongArray extends PackedLongArray {
public ConcurrentPackedLongArray(final int virtualLength) {
this(virtualLength, AbstractPackedArrayContext.MINIMUM_INITIAL_PACKED_ARRAY_CAPACITY);
}
public ConcurrentPackedLongArray(final int virtualLength, final int initialPhysicalLength) {
super();
setArrayContext(new ConcurrentPackedArrayContext(virtualLength, initialPhysicalLength));
}
transient WriterReaderPhaser wrp = new WriterReaderPhaser();
@Override
void resizeStorageArray(final int newPhysicalLengthInLongs) {
AbstractPackedArrayContext inactiveArrayContext;
try {
wrp.readerLock();
// Create a new array context, mimicking the structure of the currently active
// context, but without actually populating any values.
ConcurrentPackedArrayContext newArrayContext =
new ConcurrentPackedArrayContext(
getArrayContext().getVirtualLength(),
getArrayContext(), newPhysicalLengthInLongs
);
// Flip the current live array context and the newly created one:
inactiveArrayContext = getArrayContext();
setArrayContext(newArrayContext);
wrp.flipPhase();
// The now inactive array context is stable, and the new array context is active.
// We don't want to try to record values from the inactive into the new array context
// here (under the wrp reader lock) because we could deadlock if resizing is needed.
// Instead, value recording will be done after we release the read lock.
} finally {
wrp.readerUnlock();
}
// Record all contents from the now inactive array to new live one:
for (IterationValue v : inactiveArrayContext.nonZeroValues()) {
add(v.getIndex(), v.getValue());
}
// inactive array contents is fully committed into the newly resized live array. It can now die in peace.
}
@Override
public void setVirtualLength(final int newVirtualArrayLength) {
if (newVirtualArrayLength < length()) {
throw new IllegalArgumentException(
"Cannot set virtual length, as requested length " + newVirtualArrayLength +
" is smaller than the current virtual length " + length());
}
AbstractPackedArrayContext inactiveArrayContext;
try {
wrp.readerLock();
AbstractPackedArrayContext currentArrayContext = getArrayContext();
if (currentArrayContext.isPacked() &&
(currentArrayContext.determineTopLevelShiftForVirtualLength(newVirtualArrayLength) ==
currentArrayContext.getTopLevelShift())) {
// No changes to the array context contents is needed. Just change the virtual length.
currentArrayContext.setVirtualLength(newVirtualArrayLength);
return;
}
inactiveArrayContext = currentArrayContext;
setArrayContext(
new ConcurrentPackedArrayContext(
newVirtualArrayLength,
inactiveArrayContext,
inactiveArrayContext.length()
));
wrp.flipPhase();
// The now inactive array context is stable, and the new array context is active.
// We don't want to try to record values from the inactive into the new array context
// here (under the wrp reader lock) because we could deadlock if resizing is needed.
// Instead, value recording will be done after we release the read lock.
} finally {
wrp.readerUnlock();
}
for (IterationValue v : inactiveArrayContext.nonZeroValues()) {
add(v.getIndex(), v.getValue());
}
}
@Override
public ConcurrentPackedLongArray copy() {
ConcurrentPackedLongArray copy = new ConcurrentPackedLongArray(this.length(), this.getPhysicalLength());
copy.add(this);
return copy;
}
@Override
void clearContents() {
try {
wrp.readerLock();
getArrayContext().clearContents();
} finally {
wrp.readerUnlock();
}
}
@Override
long criticalSectionEnter() {
return wrp.writerCriticalSectionEnter();
}
@Override
void criticalSectionExit(long criticalValueAtEnter) {
wrp.writerCriticalSectionExit(criticalValueAtEnter);
}
@Override
public String toString() {
try {
wrp.readerLock();
return super.toString();
} finally {
wrp.readerUnlock();
}
}
@Override
public void clear() {
try {
wrp.readerLock();
super.clear();
} finally {
wrp.readerUnlock();
}
}
private void readObject(final ObjectInputStream o)
throws IOException, ClassNotFoundException {
o.defaultReadObject();
wrp = new WriterReaderPhaser();
}
}
| 6,572 | 37.664706 | 120 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/HdrHistogram-benchmarks/src/main/java/org/HdrHistogram/SkinnyHistogram.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package org.HdrHistogram;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.zip.Deflater;
/**
* This is a Java port of the Scala SkinnyHistogram encdoding logic from the Khronus
* project on github. The port only covers the encoding side, since it is (currently) mainly used
* to gauage the compression volume (and not for correctness). It includes ZigZag putLong and putInt
* implementations to avoid dependencies on other libs (including HdrHistogram), so that benchmarking
* against older versions will be possible.
*/
public class SkinnyHistogram extends Histogram {
private static int encodingCompressedCookieBase = 130;
private static int defaultCompressionLevel = Deflater.DEFAULT_COMPRESSION;
public SkinnyHistogram(long max, int digits) {
super(max, digits);
}
public SkinnyHistogram(int digits) {
super(digits);
}
synchronized public int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer) {
ByteBuffer intermediateUncompressedByteBuffer = ByteBuffer.allocate(this.getNeededByteBufferCapacity());
int uncompressedLength = this.encodeIntoByteBuffer(intermediateUncompressedByteBuffer);
targetBuffer.putInt(SkinnyHistogram.encodingCompressedCookieBase);
targetBuffer.putInt(0);
targetBuffer.putInt(uncompressedLength);
Deflater compressor = new Deflater(defaultCompressionLevel);
compressor.setInput(intermediateUncompressedByteBuffer.array(), 0, uncompressedLength);
compressor.finish();
byte[] targetArray = targetBuffer.array();
int compressedDataLength = compressor.deflate(targetArray, 12, targetArray.length - 12);
compressor.reset();
targetBuffer.putInt(4, compressedDataLength);
return compressedDataLength + 12;
}
synchronized public int encodeIntoByteBuffer(final ByteBuffer buffer) {
// val output = new Output(buffer.array())
long maxValue = getMaxValue();
int initialPosition = buffer.position();
buffer.putInt(normalizingIndexOffset);
buffer.putInt(getNumberOfSignificantValueDigits());
buffer.putLong(getLowestDiscernibleValue());
buffer.putLong(getHighestTrackableValue());
buffer.putDouble(integerToDoubleValueConversionRatio);
buffer.putLong(getTotalCount());
int seqPairBufferPosition = buffer.position();
buffer.putInt(0);
int seqPairLength = writeCountsDiffs(buffer);
buffer.putInt(seqPairBufferPosition, seqPairLength);
return (buffer.position() - initialPosition);
}
private int writeCountsDiffs(ByteBuffer buffer) {
long lastValue = 0;
int lastIdx = 0;
int seqLength = 0;
for (int i = 0; i < counts.length; i++) {
long value = counts[i];
if (value > 0) {
putInt(buffer, i - lastIdx);
putLong(buffer, value - lastValue);
lastIdx = i;
lastValue = value;
seqLength++;
}
}
return seqLength;
}
/**
* Writes a long value to the given buffer in LEB128 ZigZag encoded format
* @param buffer the buffer to write to
* @param value the value to write to the buffer
*/
static void putLong(ByteBuffer buffer, long value) {
value = (value << 1) ^ (value >> 63);
if (value >>> 7 == 0) {
buffer.put((byte) value);
} else {
buffer.put((byte) ((value & 0x7F) | 0x80));
if (value >>> 14 == 0) {
buffer.put((byte) (value >>> 7));
} else {
buffer.put((byte) (value >>> 7 | 0x80));
if (value >>> 21 == 0) {
buffer.put((byte) (value >>> 14));
} else {
buffer.put((byte) (value >>> 14 | 0x80));
if (value >>> 28 == 0) {
buffer.put((byte) (value >>> 21));
} else {
buffer.put((byte) (value >>> 21 | 0x80));
if (value >>> 35 == 0) {
buffer.put((byte) (value >>> 28));
} else {
buffer.put((byte) (value >>> 28 | 0x80));
if (value >>> 42 == 0) {
buffer.put((byte) (value >>> 35));
} else {
buffer.put((byte) (value >>> 35 | 0x80));
if (value >>> 49 == 0) {
buffer.put((byte) (value >>> 42));
} else {
buffer.put((byte) (value >>> 42 | 0x80));
if (value >>> 56 == 0) {
buffer.put((byte) (value >>> 49));
} else {
buffer.put((byte) (value >>> 49 | 0x80));
buffer.put((byte) (value >>> 56));
}
}
}
}
}
}
}
}
}
/**
* Writes an int value to the given buffer in LEB128 ZigZag encoded format
* @param buffer the buffer to write to
* @param value the value to write to the buffer
*/
static void putInt(ByteBuffer buffer, int value) {
value = (value << 1) ^ (value >> 31);
if (value >>> 7 == 0) {
buffer.put((byte) value);
} else {
buffer.put((byte) ((value & 0x7F) | 0x80));
if (value >>> 14 == 0) {
buffer.put((byte) (value >>> 7));
} else {
buffer.put((byte) (value >>> 7 | 0x80));
if (value >>> 21 == 0) {
buffer.put((byte) (value >>> 14));
} else {
buffer.put((byte) (value >>> 14 | 0x80));
if (value >>> 28 == 0) {
buffer.put((byte) (value >>> 21));
} else {
buffer.put((byte) (value >>> 21 | 0x80));
buffer.put((byte) (value >>> 28));
}
}
}
}
}
}
| 6,631 | 36.897143 | 112 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/HdrHistogram-benchmarks/src/main/java/bench/HdrHistogramEncodingBench.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package bench;
import org.HdrHistogram.*;
import org.openjdk.jmh.annotations.*;
import java.nio.ByteBuffer;
import java.util.concurrent.TimeUnit;
import java.util.zip.DataFormatException;
/*
Run all benchmarks:
$ java -jar target/benchmarks.jar
Run selected benchmarks:
$ java -jar target/benchmarks.jar (regexp)
Run the profiling (Linux only):
$ java -Djmh.perfasm.events=cycles,cache-misses -jar target/benchmarks.jar -f 1 -prof perfasm
*/
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(3)
@State(Scope.Thread)
public class HdrHistogramEncodingBench {
@Param({"case1", "case2", "case3", "sparsed1", "sparsed2", "quadratic", "cubic",
"case1PlusSparsed2", "longestjHiccupLine", "shortestjHiccupLine", "sumOfjHiccupLines"})
String latencySeriesName;
@Param({ "2", "3" })
int numberOfSignificantValueDigits;
AbstractHistogram histogram;
SkinnyHistogram skinnyHistogram;
ByteBuffer buffer;
@Setup
public void setup() throws NoSuchMethodException {
histogram = new Histogram(numberOfSignificantValueDigits);
skinnyHistogram = new SkinnyHistogram(numberOfSignificantValueDigits);
Iterable<Long> latencySeries = HistogramData.data.get(latencySeriesName);
for (long latency : latencySeries) {
histogram.recordValue(latency);
skinnyHistogram.recordValue(latency);
}
buffer = ByteBuffer.allocate(histogram.getNeededByteBufferCapacity());
}
@Benchmark
public void encodeIntoCompressedByteBuffer() {
buffer.clear();
histogram.encodeIntoCompressedByteBuffer(buffer);
}
@Benchmark
public void skinnyEncodeIntoCompressedByteBuffer() {
buffer.clear();
skinnyHistogram.encodeIntoCompressedByteBuffer(buffer);
}
@Benchmark
public void roundtripCompressed() throws DataFormatException {
buffer.clear();
histogram.encodeIntoCompressedByteBuffer(buffer);
buffer.rewind();
Histogram.decodeFromCompressedByteBuffer(buffer, 0);
}
}
| 2,333 | 28.923077 | 99 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/HdrHistogram-benchmarks/src/main/java/bench/HistogramData.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package bench;
import org.HdrHistogram.*;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
public class HistogramData {
static Iterable<Integer> case1IntLatencies = Arrays.asList(
1623, 1752, 3215, 1437, 154, 1358, 625, 217, 698, 6862, 1167, 1948, 1215, 665, 1372,
889, 767, 2135, 3163, 573, 1839, 922, 475, 1233, 1013, 434, 140, 684, 400, 879,
621, 1167, 1518, 534, 420, 9, 906, 1060, 646, 1181, 661, 2661, 844, 1132, 1169,
614, 904, 3531, 1830, 941, 1641, 546, 729, 10, 254, 779, 1233, 499, 439, 2597,
263, 1009, 469, 621, 1238, 1623, 2911, 380, 4654, 1105, 501, 771, 692, 3493, 2120,
2959, 2931, 895, 835, 483, 1274, 1551, 617, 666, 1296, 1041, 2639, 10, 290, 1289,
720, 190, 1320, 663, 520, 3646, 653, 1691, 201, 2959, 183, 2534, 632, 565, 2844,
3421, 1645, 480, 894, 290, 1465, 1972, 752, 623, 1500, 2422, 1708, 755, 287, 1116,
1806, 88, 676, 2118, 533, 766, 1090, 1066, 97, 437, 103, 1148, 684, 645, 2673,
738, 1151, 757, 459, 2302, 671, 1080, 2775, 663, 762, 11448, 1442, 2726, 942, 1203,
3435, 3509, 788, 1149, 3363, 1495, 3758, 4678, 5421, 493, 1072, 1702, 603, 1191, 726,
3878, 866, 1136, 291, 1025, 863, 443, 786, 615, 676, 962, 136, 681, 1031, 970,
822, 712, 735, 387, 596, 248, 1175, 275, 955, 1300, 677, 323, 408, 633, 745,
623, 550, 522, 719, 334, 1614, 1238, 546, 296, 1090, 392, 828, 519, 2444, 257,
973, 461, 997, 728, 748, 610, 595, 2012, 3476, 374, 2929, 429, 435, 1141, 665,
677, 3022, 1400, 965, 406, 530, 518, 255, 435, 880, 968, 1132, 1365, 314, 2987,
704, 688, 1398, 610, 741, 339, 1333, 746, 551, 621, 348, 571, 1420, 2360, 1099,
485, 224, 521, 1849, 1144, 750, 2156, 1792, 11, 867, 740, 771, 1198, 625, 1202,
894, 3372, 3061, 845, 535, 3036, 680, 2240, 1394, 1776, 1010, 3556, 647, 617, 892,
397, 972, 99, 1848, 815, 1493, 715, 1279, 131, 1101, 1025, 10, 2963, 547, 383,
1039, 1024, 847, 966, 1871, 341, 774, 3093, 391, 2391, 1899, 810, 172, 709, 645,
584, 10, 2142, 1562, 549, 431, 489, 1254, 1249, 1733, 2775, 2455, 531, 413, 300,
320, 521, 1184, 438, 564, 630, 500, 655, 530, 1028, 1575, 362, 530, 642, 526,
542, 13, 638, 751, 830, 783, 628, 533, 599, 224, 1581, 1174, 338, 1231, 223,
1234, 9, 19775, 2639, 1304, 754, 2010, 132, 355, 626, 717, 3014, 2053, 1667, 936,
512, 249, 523, 604, 682, 1285, 626, 2203, 1537, 494, 462, 428, 3209, 890, 410,
1317, 3257, 614, 2740, 265, 771, 740, 1004, 919, 543, 444, 1062, 834, 152, 1275,
558, 2205, 428, 446, 264, 146, 503, 463, 283, 143, 816, 943, 1014, 556, 864,
691, 648, 805, 1546, 530, 996, 1370, 1277, 506, 2140, 449, 1283, 571, 3348, 2436,
158, 514, 1278, 537, 733, 553, 1005, 773, 565, 147, 1298, 117, 1926, 2095, 319,
329, 3376, 539, 1495, 14459, 116, 293, 393, 143, 248, 146, 866, 1645, 1178, 208,
13427, 1796, 461, 3538, 763, 837, 1399, 3307, 1304, 403, 585, 182, 439, 2393, 368,
341, 385, 2114, 2919, 160, 280, 1679, 252, 1545, 919, 359, 721, 1372, 489, 451,
1447, 684, 2414, 118, 1477, 883, 771, 1289, 655, 1975, 5604, 2918, 724, 95, 1046,
906, 478, 412, 1688, 3305, 1063, 1115, 1518, 973, 771, 382, 1672, 1062, 3910, 1522,
828, 346, 285, 3023, 2133, 1777, 1279, 5463, 3587, 1036, 1551, 694, 872, 150, 587,
180, 237, 2646, 770, 3438, 1184, 859, 2672, 1377, 176, 419, 164, 1342, 1428, 605,
743, 1060, 1451, 1193, 3126, 567, 999, 492, 562, 1027, 534, 1135, 401, 589, 767,
849, 684, 619, 761, 701, 1707, 2210, 735, 740, 388, 2665, 1088, 426, 2530, 382,
589, 591, 1733, 1124, 1176, 290, 1247, 2854, 534, 2120, 885, 617, 526, 394, 1266,
3110, 9868, 822, 1551, 790, 519, 812, 721, 594, 935, 586, 1323, 700, 1632, 1084,
932, 153, 764, 1165, 3121, 212, 178, 1362, 603, 258, 1225, 10, 465, 538, 518,
2947, 775, 398, 2364, 306, 857, 389, 5339, 1869, 2242, 460, 146, 1045, 490, 781,
1487, 1945, 520, 1417, 1674, 199, 1897, 2460, 941, 1446, 910, 683, 1056, 1243, 555,
231, 752, 500, 479, 839, 1301, 1096, 226, 506, 404, 148, 1490, 776, 402, 974,
965, 781, 464, 1117, 993, 433, 1060, 703, 1079, 1082, 528, 436, 392, 684, 820,
10553, 326, 1150, 792, 318, 929, 2275, 2012, 520, 2732, 1024, 808, 441, 218, 510,
961, 1940, 2418, 948, 213, 1691, 1178, 635, 170, 658, 2494, 1190, 2917, 2652, 363,
2101, 1409, 984, 3725, 519, 420, 964, 1555, 1177, 1180, 406, 363, 484, 1652, 2729,
642, 489, 1539, 549, 1214, 248, 1805, 679, 12352, 727, 224, 1198, 3077, 965, 848,
348, 226, 453, 349, 493, 1134, 411, 442, 1378, 3752, 982, 938, 1483, 793, 781,
514, 521, 1755, 2093, 440, 2101, 4215, 1004, 578, 2544, 1777, 622, 266, 9, 93,
783, 179, 578, 3655, 675, 717, 884, 2339, 328, 1070, 1450, 1171, 1940, 1247, 2496,
3164, 362, 786, 1903, 1606, 1428, 3027, 135, 268, 9, 8, 372, 863, 499, 233,
1732, 337, 1116, 1152, 813, 359, 2944, 893, 261, 620, 375, 2891, 391, 2858, 1569,
499, 2672, 601, 883, 92, 954, 522, 517, 1831, 637, 670, 1163, 487, 459, 1246,
685, 741, 906, 880, 2245, 1805, 579, 1077, 693, 727, 708, 1301, 10, 1470, 351,
2872, 744, 129, 1852, 657, 1403, 869, 460, 4478, 2549, 437, 873, 719, 3370, 552,
1075, 1586, 1271, 152, 1303, 10, 8, 230, 1105, 837, 450, 206, 735, 747, 562,
1039, 2065, 1894, 115, 976, 1180, 3171, 693, 801, 1199, 690, 519, 592, 147, 2180,
726, 1457, 759, 392, 1068, 1934, 398, 601, 669, 458, 918, 466, 861, 481, 1786,
22603, 483, 2211, 633, 597, 631, 481, 746, 1118, 476, 439, 466, 327, 1104, 825,
305, 17, 1141, 453, 3389, 324, 782, 866, 2637, 657, 468, 3432, 474, 1046, 493,
2082, 557, 588, 445, 293, 427, 622, 1902, 3047, 10, 337, 899, 489, 295, 1041,
837, 831, 1065, 314, 1147, 502, 1013, 4753, 908, 1007, 1449, 762, 181, 1529, 908,
3910, 471, 2920, 397, 668, 922, 493, 3339, 1030, 1270, 1805, 613, 847, 601, 893,
508, 114, 2248, 2936, 2297, 289, 830, 578, 360, 4733, 536, 2042, 1998, 875, 477,
479, 592, 205, 1574, 1305, 849, 660, 4853, 2138, 404, 514, 1210, 1130, 1268, 629,
236, 308, 464, 2297, 221, 964, 1185, 710, 4388, 464, 866, 575, 1417, 4019, 1189,
1296, 835, 186, 886, 2276, 890, 1168, 3468, 767, 1283, 394, 736, 530, 699, 398,
1830, 1833, 680, 1324, 3636, 1922, 230, 480, 1949, 844, 1388, 519, 1062, 681, 673,
942, 359, 458, 639, 1139, 1267, 686, 1771, 1452, 94, 973, 530, 1134, 1186, 512,
1254, 412, 456, 1710, 960, 650, 197, 1283, 791, 105, 1521, 1196, 396, 3583, 1619,
2497, 1027, 593, 744, 1191, 715, 442, 1425, 1112, 580, 1158, 2397, 622, 2720, 704,
887, 544, 4209, 607, 5361, 505, 678, 482, 933, 1359, 602, 1547, 80, 9, 2596,
715, 1823, 1730, 1842, 146, 966, 454, 1014, 2916, 552, 1174, 915, 275, 1429, 1850,
309, 553, 372, 492, 619, 1405, 1772, 204, 975, 584, 3240, 3176, 3573, 3409, 769,
436, 2069, 103, 338, 553, 1118, 1338, 1353, 635, 572, 340, 645, 2607, 1128, 580,
2349, 1063, 2740, 12, 202, 570, 1162, 1326, 1973, 205, 693, 2568, 340, 711, 1230,
115, 558, 3503, 201, 759, 1859, 919, 601, 657, 1333, 229, 2940, 463, 1045, 498,
736, 741, 633, 532, 201, 886, 1001, 917, 164, 1064, 2585, 2512, 1078, 809, 1953,
2231, 550, 1044, 1247, 1404, 759, 808, 1079, 771, 547, 1161, 1349, 509, 534, 239,
675, 580, 560, 507, 952, 408, 522, 1959, 1776, 1836, 253, 3580, 2106, 4104, 486,
678, 1723, 1535, 1369, 802, 473, 236, 1918, 668, 895, 1037, 688, 1114, 594, 1038,
817, 258, 973, 2231, 1193, 1117, 449, 728, 482, 3180, 1107, 3624, 1041, 666, 973,
475, 1083, 254, 650, 373, 2535, 666, 3343, 713, 998, 374, 204, 581, 503, 726,
293, 1585, 1038, 480, 451, 2074, 600, 522, 761, 656, 332, 638, 815, 734, 1358,
506, 1846, 324, 364, 538, 452, 479, 667, 2720, 240, 734, 466, 609, 352, 305,
83, 13569, 247, 1061, 163, 1886, 1125, 634, 1647, 419, 219, 2116, 1123, 1094, 523,
504, 125, 1951, 2731, 1183, 2495, 461, 189, 722, 1652, 2321, 2597, 1040, 662, 2841,
323, 69, 999, 659, 915, 701, 881, 492, 1148, 2408, 1623, 1612, 1015, 478, 2229,
1368, 1006, 1643, 1780, 1942, 806, 3176, 458, 1711, 11, 1100, 790, 1500, 591, 519,
2294, 1718, 358, 733, 427, 509, 609, 1305, 579, 2443, 2342, 2869, 1490, 939, 628,
10, 10, 108, 1048, 1815, 233, 691, 1071, 1348, 1995, 16, 667, 373, 2780, 10,
607, 1197, 696, 1715, 1051, 2094, 2801, 1204, 606, 3048, 1523, 856, 1295, 175, 906,
445, 930, 6525, 590, 659, 626, 2403, 630, 886, 556, 87, 1515, 408, 1820, 437,
366, 690, 683, 373, 2664, 4202, 709, 1035, 677, 3500, 442, 1005, 632, 582, 3749,
597, 790, 1137, 1652, 2091, 372, 1325, 227, 190, 249, 441, 1839, 1046, 607, 776,
534, 1502, 10, 669, 827, 567, 1765, 744, 2889, 489, 486, 447, 503, 511, 9,
857, 3319, 1119, 10, 4683, 797, 224, 441, 10990, 984, 850, 653, 3790, 1105, 4321,
940, 686, 910, 260, 1393, 266, 923, 3213, 14929, 1525, 2679, 672, 964, 226, 268,
897, 2579, 3039, 941, 623, 702, 1585, 91, 2207, 985, 759, 859, 2541, 1208, 539,
2264, 1033, 823, 953, 421, 934, 496, 1717, 455, 653, 1833, 699, 626, 651, 1206,
102, 2865, 453, 137, 631, 513, 183, 561, 727, 606, 2350, 467, 1519, 1089, 1270,
349, 1649, 560, 576, 934, 924, 294, 366, 2666, 498, 411, 913, 707, 262, 419,
1003, 543, 475, 1169, 152, 217, 521, 221, 2239, 952, 526, 514, 2414, 387, 771,
739, 1600, 503, 123, 948, 2078, 390, 1675, 563, 1470, 4583, 537, 2501, 557, 3184,
589, 503, 1853, 2247, 2131, 2687, 621, 2180, 760, 977, 698, 2333, 1849, 12, 6816,
1042, 3926, 2414, 158, 361, 278, 2074, 1204, 1812, 918, 441, 974, 1803);
static ArrayList<Long> case1Latencies = new ArrayList<Long>();
static {
for (int latency : case1IntLatencies) {
case1Latencies.add((long)latency);
}
}
static ArrayList<Long> case2Latencies = new ArrayList<Long>();
static {
for (long i = 1; i <= 10000; i++) {
case2Latencies.add(1000 * i);
}
}
static ArrayList<Long> case3Latencies = new ArrayList<Long>();
static {
for (long i = 1; i <= 100000; i++) {
case3Latencies.add(1000 * i);
}
}
static ArrayList<Long> sparsed1Latencies = new ArrayList<Long>();
static {
for (long i = 1; i <= 5; i++) {
sparsed1Latencies.add((long) Math.pow(i, i));
}
}
static ArrayList<Long> sparsed2Latencies = new ArrayList<Long>();
static {
for (long i = 1; i <= 8; i++) {
sparsed2Latencies.add((long) Math.pow(i, i));
}
}
static ArrayList<Long> quadratic = new ArrayList<Long>();
static {
for (long i = 1; i <= 10000; i++) {
long value = (long)Math.pow(i, 2);
if (value < Integer.MAX_VALUE) {
quadratic.add(value);
}
}
}
static ArrayList<Long> cubic = new ArrayList<Long>();
static {
for (long i = 1; i <= 10000; i++) {
long value = (long)Math.pow(i, 3);
if (value < Integer.MAX_VALUE) {
cubic.add(value);
}
}
}
static ArrayList<Long> case1PlusSparsed2 = new ArrayList<Long>();
static {
case1PlusSparsed2.addAll(case1Latencies);
case1PlusSparsed2.addAll(sparsed2Latencies);
}
static ArrayList<Long> longestjHiccupLine = new ArrayList<Long>();
static ArrayList<Long> shortestjHiccupLine = new ArrayList<Long>();
static ArrayList<Long> sumOfjHiccupLines = new ArrayList<Long>();
static {
InputStream readerStream = HistogramData.class.getResourceAsStream("jHiccup-2.0.6.logV1.hlog");
HistogramLogReader reader = new HistogramLogReader(readerStream);
Histogram histogram;
long maxCount = 0;
long minCount = Long.MAX_VALUE;
Histogram longestHistogram = null;
Histogram shortestHistogram = null;
Histogram accumulatedHistigram = new Histogram(3);
while ((histogram = (Histogram) reader.nextIntervalHistogram()) != null) {
if (histogram.getTotalCount() == 0) {
continue;
}
if (histogram.getTotalCount() > maxCount) {
longestHistogram = histogram;
maxCount = histogram.getTotalCount();
}
if (histogram.getTotalCount() < minCount) {
shortestHistogram = histogram;
minCount = histogram.getTotalCount();
}
accumulatedHistigram.add(histogram);
}
if (longestHistogram != null) {
for (HistogramIterationValue v : longestHistogram.recordedValues()) {
for (long i = 0; i < v.getCountAtValueIteratedTo(); i++) {
longestjHiccupLine.add(v.getValueIteratedTo());
}
}
}
if (shortestHistogram != null) {
for (HistogramIterationValue v : shortestHistogram.recordedValues()) {
for (long i = 0; i < v.getCountAtValueIteratedTo(); i++) {
shortestjHiccupLine.add(v.getValueIteratedTo());
}
}
}
for (HistogramIterationValue v : accumulatedHistigram.recordedValues()) {
for (long i = 0; i < v.getCountAtValueIteratedTo(); i++) {
sumOfjHiccupLines.add(v.getValueIteratedTo());
}
}
}
static final HashMap<String, Iterable<Long>> data = new HashMap<String, Iterable<Long>>();
static {
data.put("case1", case1Latencies);
data.put("case2", case2Latencies);
data.put("case3", case3Latencies);
data.put("sparsed1", sparsed1Latencies);
data.put("sparsed2", sparsed2Latencies);
data.put("case1PlusSparsed2", case1PlusSparsed2);
data.put("quadratic", quadratic);
data.put("cubic", cubic);
data.put("longestjHiccupLine", longestjHiccupLine);
data.put("shortestjHiccupLine", shortestjHiccupLine);
data.put("sumOfjHiccupLines", sumOfjHiccupLines);
}
static {
System.out.println();
for (String seriesName :
new String[] {
"case1", "case2", "case3", "sparsed1", "sparsed2", "quadratic", "cubic",
"case1PlusSparsed2", "longestjHiccupLine", "shortestjHiccupLine", "sumOfjHiccupLines"
}) {
Iterable<Long> latencies = data.get(seriesName);
for (int digits = 2; digits <= 3; digits++) {
Histogram histogram = new Histogram(digits);
SkinnyHistogram skinnyHistogram = new SkinnyHistogram(digits);
for (long latency : latencies) {
histogram.recordValueWithCount(latency, 1);
skinnyHistogram.recordValueWithCount(latency, 1);
}
ByteBuffer buffer = ByteBuffer.allocate(histogram.getNeededByteBufferCapacity());
int histogramBytes = histogram.encodeIntoByteBuffer(buffer);
buffer.rewind();
int histogramCompressedBytes = histogram.encodeIntoCompressedByteBuffer(buffer);
buffer.rewind();
int skinnyBytes = skinnyHistogram.encodeIntoByteBuffer(buffer);
buffer.rewind();
int skinnyCompressBytes = skinnyHistogram.encodeIntoCompressedByteBuffer(buffer);
System.out.format(
"%20s [%1d] (Histogram/Skinny/%%Reduction): " +
"[%6d /%6d /%7.2f%%] %5d /%5d /%7.2f%%\n",
seriesName, digits,
histogramBytes, skinnyBytes,
(100.0 - 100.0 * (histogramBytes / (skinnyBytes * 1.0))),
histogramCompressedBytes, skinnyCompressBytes,
(100.0 - 100.0 * (histogramCompressedBytes / (skinnyCompressBytes * 1.0)))
);
}
}
}
}
| 16,918 | 56.547619 | 109 | java |
null | NearPMSW-main/baseline/logging/YCSB/HdrHistogram/HdrHistogram-benchmarks/src/main/java/bench/HdrHistogramRecordingBench.java | /**
* Written by Gil Tene of Azul Systems, and released to the public domain,
* as explained at http://creativecommons.org/publicdomain/zero/1.0/
*
* @author Gil Tene
*/
package bench;
import org.HdrHistogram.*;
import org.HdrHistogram.AbstractHistogram;
import org.HdrHistogram.Recorder;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import java.lang.reflect.Constructor;
import java.util.Random;
import java.util.concurrent.TimeUnit;
/*
Run all benchmarks:
$ java -jar target/benchmarks.jar
Run selected benchmarks:
$ java -jar target/benchmarks.jar (regexp)
Run the profiling (Linux only):
$ java -Djmh.perfasm.events=cycles,cache-misses -jar target/benchmarks.jar -f 1 -prof perfasm
*/
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(5)
@State(Scope.Thread)
public class HdrHistogramRecordingBench {
static final long highestTrackableValue = 3600L * 1000 * 1000; // e.g. for 1 hr in usec units
static final int numberOfSignificantValueDigits = 3;
static final long testValueLevel = 12340;
AbstractHistogram histogram;
AbstractHistogram synchronizedHistogram;
AbstractHistogram atomicHistogram;
AbstractHistogram concurrentHistogram;
Recorder recorder;
SingleWriterRecorder singleWriterRecorder;
DoubleHistogram doubleHistogram;
DoubleRecorder doubleRecorder;
SingleWriterDoubleRecorder singleWriterDoubleRecorder;
int i;
@Setup
public void setup() throws NoSuchMethodException {
histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits);
synchronizedHistogram = new SynchronizedHistogram(highestTrackableValue, numberOfSignificantValueDigits);
atomicHistogram = new AtomicHistogram(highestTrackableValue, numberOfSignificantValueDigits);
concurrentHistogram = new ConcurrentHistogram(highestTrackableValue, numberOfSignificantValueDigits);
recorder = new Recorder(highestTrackableValue, numberOfSignificantValueDigits);
singleWriterRecorder = new SingleWriterRecorder(highestTrackableValue, numberOfSignificantValueDigits);
doubleHistogram = new DoubleHistogram(highestTrackableValue, numberOfSignificantValueDigits);
doubleRecorder = new DoubleRecorder(highestTrackableValue, numberOfSignificantValueDigits);
singleWriterDoubleRecorder = new SingleWriterDoubleRecorder(highestTrackableValue, numberOfSignificantValueDigits);
}
@Benchmark
public void rawRecordingSpeed() {
histogram.recordValue(testValueLevel + (i++ & 0x800));
}
@Benchmark
public void rawSynchroniedRecordingSpeed() {
synchronizedHistogram.recordValue(testValueLevel + (i++ & 0x800));
}
@Benchmark
public void rawAtomicRecordingSpeed() {
atomicHistogram.recordValue(testValueLevel + (i++ & 0x800));
}
@Benchmark
public void rawConcurrentRecordingSpeed() {
concurrentHistogram.recordValue(testValueLevel + (i++ & 0x800));
}
@Benchmark
public void singleWriterRecorderRecordingSpeed() {
singleWriterRecorder.recordValue(testValueLevel + (i++ & 0x800));;
}
@Benchmark
public void recorderRecordingSpeed() {
recorder.recordValue(testValueLevel + (i++ & 0x800));;
}
@Benchmark
public void rawDoubleRecordingSpeed() {
doubleHistogram.recordValue(testValueLevel + (i++ & 0x800));;
}
@Benchmark
public void doubleRecorderRecordingSpeed() {
doubleRecorder.recordValue(testValueLevel + (i++ & 0x800));;
}
@Benchmark
public void singleWriterDoubleRecorderRecordingSpeed() {
singleWriterDoubleRecorder.recordValue(testValueLevel + (i++ & 0x800));
}
}
| 4,046 | 32.725 | 123 | java |
null | NearPMSW-main/baseline/logging/YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/package-info.java | /**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="https://www.arangodb.com/">ArangoDB</a>.
*/
package site.ycsb.db.arangodb;
| 773 | 32.652174 | 73 | java |
null | NearPMSW-main/baseline/logging/YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java | /**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.arangodb;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.arangodb.ArangoCursor;
import com.arangodb.ArangoDB;
import com.arangodb.ArangoDBException;
import com.arangodb.Protocol;
import com.arangodb.entity.BaseDocument;
import com.arangodb.model.DocumentCreateOptions;
import com.arangodb.model.TransactionOptions;
import com.arangodb.util.MapBuilder;
import com.arangodb.velocypack.VPackBuilder;
import com.arangodb.velocypack.VPackSlice;
import com.arangodb.velocypack.ValueType;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
/**
* ArangoDB binding for YCSB framework using the ArangoDB Inc. <a
* href="https://github.com/arangodb/arangodb-java-driver">driver</a>
* <p>
* See the <code>README.md</code> for configuration information.
* </p>
*
* @see <a href="https://github.com/arangodb/arangodb-java-driver">ArangoDB Inc.
* driver</a>
*/
public class ArangoDBClient extends DB {
private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class);
/**
* Count the number of times initialized to teardown on the last
* {@link #cleanup()}.
*/
private static final AtomicInteger INIT_COUNT = new AtomicInteger(0);
/** ArangoDB Driver related, Singleton. */
private ArangoDB arangoDB;
private String databaseName = "ycsb";
private String collectionName;
private Boolean dropDBBeforeRun;
private Boolean waitForSync = false;
private Boolean transactionUpdate = false;
/**
* Initialize any state for this DB. Called once per DB instance; there is
* one DB instance per client thread.
*
* Actually, one client process will share one DB instance here.(Coincide to
* mongoDB driver)
*/
@Override
public void init() throws DBException {
synchronized (ArangoDBClient.class) {
Properties props = getProperties();
collectionName = props.getProperty("table", "usertable");
// Set the DB address
String ip = props.getProperty("arangodb.ip", "localhost");
String portStr = props.getProperty("arangodb.port", "8529");
int port = Integer.parseInt(portStr);
// Set network protocol
String protocolStr = props.getProperty("arangodb.protocol", "VST");
Protocol protocol = Protocol.valueOf(protocolStr);
// If clear db before run
String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false");
dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr);
// Set the sync mode
String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false");
waitForSync = Boolean.parseBoolean(waitForSyncStr);
// Set if transaction for update
String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false");
transactionUpdate = Boolean.parseBoolean(transactionUpdateStr);
// Init ArangoDB connection
try {
arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build();
} catch (Exception e) {
logger.error("Failed to initialize ArangoDB", e);
System.exit(-1);
}
if(INIT_COUNT.getAndIncrement() == 0) {
// Init the database
if (dropDBBeforeRun) {
// Try delete first
try {
arangoDB.db(databaseName).drop();
} catch (ArangoDBException e) {
logger.info("Fail to delete DB: {}", databaseName);
}
}
try {
arangoDB.createDatabase(databaseName);
logger.info("Database created: " + databaseName);
} catch (ArangoDBException e) {
logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString());
}
try {
arangoDB.db(databaseName).createCollection(collectionName);
logger.info("Collection created: " + collectionName);
} catch (ArangoDBException e) {
logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString());
}
logger.info("ArangoDB client connection created to {}:{}", ip, port);
// Log the configuration
logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};"
+ " waitForSync: {}; transactionUpdate: {};",
dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate);
}
}
}
/**
* Cleanup any state for this DB. Called once per DB instance; there is one
* DB instance per client thread.
*
* Actually, one client process will share one DB instance here.(Coincide to
* mongoDB driver)
*/
@Override
public void cleanup() throws DBException {
if (INIT_COUNT.decrementAndGet() == 0) {
arangoDB.shutdown();
arangoDB = null;
logger.info("Local cleaned up.");
}
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key.
*
* @param table
* The name of the table
* @param key
* The record key of the record to insert.
* @param values
* A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error. See the
* {@link DB} class's description for a discussion of error codes.
*/
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
try {
BaseDocument toInsert = new BaseDocument(key);
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue()));
}
DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync);
arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options);
return Status.OK;
} catch (ArangoDBException e) {
logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString());
}
return Status.ERROR;
}
/**
* Read a record from the database. Each field/value pair from the result
* will be stored in a HashMap.
*
* @param table
* The name of the table
* @param key
* The record key of the record to read.
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error or "not found".
*/
@Override
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
try {
VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null);
if (!this.fillMap(result, document, fields)) {
return Status.ERROR;
}
return Status.OK;
} catch (ArangoDBException e) {
logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString());
}
return Status.ERROR;
}
/**
* Update a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key, overwriting any existing values with the same field name.
*
* @param table
* The name of the table
* @param key
* The record key of the record to write.
* @param values
* A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error. See this class's
* description for a discussion of error codes.
*/
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
try {
if (!transactionUpdate) {
BaseDocument updateDoc = new BaseDocument();
for (Entry<String, ByteIterator> field : values.entrySet()) {
updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue()));
}
arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc);
return Status.OK;
} else {
// id for documentHandle
String transactionAction = "function (id) {"
// use internal database functions
+ "var db = require('internal').db;"
// collection.update(document, data, overwrite, keepNull, waitForSync)
+ String.format("db._update(id, %s, true, false, %s);}",
mapToJson(values), Boolean.toString(waitForSync).toLowerCase());
TransactionOptions options = new TransactionOptions();
options.writeCollections(table);
options.params(createDocumentHandle(table, key));
arangoDB.db(databaseName).transaction(transactionAction, Void.class, options);
return Status.OK;
}
} catch (ArangoDBException e) {
logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString());
}
return Status.ERROR;
}
/**
* Delete a record from the database.
*
* @param table
* The name of the table
* @param key
* The record key of the record to delete.
* @return Zero on success, a non-zero error code on error. See the
* {@link DB} class's description for a discussion of error codes.
*/
@Override
public Status delete(String table, String key) {
try {
arangoDB.db(databaseName).collection(table).deleteDocument(key);
return Status.OK;
} catch (ArangoDBException e) {
logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString());
}
return Status.ERROR;
}
/**
* Perform a range scan for a set of records in the database. Each
* field/value pair from the result will be stored in a HashMap.
*
* @param table
* The name of the table
* @param startkey
* The record key of the first record to read.
* @param recordcount
* The number of records to read
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one record
* @return Zero on success, a non-zero error code on error. See the
* {@link DB} class's description for a discussion of error codes.
*/
@Override
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
ArangoCursor<VPackSlice> cursor = null;
try {
String aqlQuery = String.format(
"FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table,
recordcount, constructReturnForAQL(fields, "target"));
Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get();
cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class);
while (cursor.hasNext()) {
VPackSlice aDocument = cursor.next();
HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size());
if (!this.fillMap(aMap, aDocument)) {
return Status.ERROR;
}
result.add(aMap);
}
return Status.OK;
} catch (Exception e) {
logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString());
} finally {
if (cursor != null) {
try {
cursor.close();
} catch (IOException e) {
logger.error("Fail to close cursor", e);
}
}
}
return Status.ERROR;
}
private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException {
validateCollectionName(collection);
return collection + "/" + documentKey;
}
private void validateCollectionName(String name) throws ArangoDBException {
if (name.indexOf('/') != -1) {
throw new ArangoDBException("does not allow '/' in name.");
}
}
private String constructReturnForAQL(Set<String> fields, String targetName) {
// Construct the AQL query string.
String resultDes = targetName;
if (fields != null && fields.size() != 0) {
StringBuilder builder = new StringBuilder("{");
for (String field : fields) {
builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field));
}
//Replace last ',' to newline.
builder.setCharAt(builder.length() - 1, '\n');
builder.append("}");
resultDes = builder.toString();
}
return resultDes;
}
private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) {
return fillMap(resultMap, document, null);
}
/**
* Fills the map with the properties from the BaseDocument.
*
* @param resultMap
* The map to fill/
* @param document
* The record to read from
* @param fields
* The list of fields to read, or null for all of them
* @return isSuccess
*/
private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) {
if (fields == null || fields.size() == 0) {
for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) {
Entry<String, VPackSlice> next = iterator.next();
VPackSlice value = next.getValue();
if (value.isString()) {
resultMap.put(next.getKey(), stringToByteIterator(value.getAsString()));
} else if (!value.isCustom()) {
logger.error("Error! Not the format expected! Actually is {}",
value.getClass().getName());
return false;
}
}
} else {
for (String field : fields) {
VPackSlice value = document.get(field);
if (value.isString()) {
resultMap.put(field, stringToByteIterator(value.getAsString()));
} else if (!value.isCustom()) {
logger.error("Error! Not the format expected! Actually is {}",
value.getClass().getName());
return false;
}
}
}
return true;
}
private String byteIteratorToString(ByteIterator byteIter) {
return new String(byteIter.toArray());
}
private ByteIterator stringToByteIterator(String content) {
return new StringByteIterator(content);
}
private String mapToJson(Map<String, ByteIterator> values) {
VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT);
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
builder.add(entry.getKey(), byteIteratorToString(entry.getValue()));
}
builder.close();
return arangoDB.util().deserialize(builder.slice(), String.class);
}
}
| 15,726 | 35.405093 | 114 | java |
null | NearPMSW-main/baseline/logging/YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/package-info.java | /*
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for Google's <a href="https://cloud.google.com/spanner/">
* Cloud Spanner</a>.
*/
package site.ycsb.db.cloudspanner;
| 801 | 33.869565 | 77 | java |
null | NearPMSW-main/baseline/logging/YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java | /**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.cloudspanner;
import com.google.common.base.Joiner;
import com.google.cloud.spanner.DatabaseId;
import com.google.cloud.spanner.DatabaseClient;
import com.google.cloud.spanner.Key;
import com.google.cloud.spanner.KeySet;
import com.google.cloud.spanner.KeyRange;
import com.google.cloud.spanner.Mutation;
import com.google.cloud.spanner.Options;
import com.google.cloud.spanner.ResultSet;
import com.google.cloud.spanner.SessionPoolOptions;
import com.google.cloud.spanner.Spanner;
import com.google.cloud.spanner.SpannerOptions;
import com.google.cloud.spanner.Statement;
import com.google.cloud.spanner.Struct;
import com.google.cloud.spanner.StructReader;
import com.google.cloud.spanner.TimestampBound;
import site.ycsb.ByteIterator;
import site.ycsb.Client;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.workloads.CoreWorkload;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.concurrent.TimeUnit;
/**
* YCSB Client for Google's Cloud Spanner.
*/
public class CloudSpannerClient extends DB {
/**
* The names of properties which can be specified in the config files and flags.
*/
public static final class CloudSpannerProperties {
private CloudSpannerProperties() {}
/**
* The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'.
*/
static final String DATABASE = "cloudspanner.database";
/**
* The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'.
*/
static final String INSTANCE = "cloudspanner.instance";
/**
* Choose between 'read' and 'query'. Affects both read() and scan() operations.
*/
static final String READ_MODE = "cloudspanner.readmode";
/**
* The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching
* is done. Recommended value during data load is 1000.
*/
static final String BATCH_INSERTS = "cloudspanner.batchinserts";
/**
* Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default).
* For performance gains, this should be set to 10 seconds.
*/
static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness";
// The properties below usually do not need to be set explicitly.
/**
* The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly
* necessary and can often be inferred from the environment.
*/
static final String PROJECT = "cloudspanner.project";
/**
* The Cloud Spanner host name to use in the YCSB run.
*/
static final String HOST = "cloudspanner.host";
/**
* Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value.
*/
static final String NUM_CHANNELS = "cloudspanner.channels";
}
private static int fieldCount;
private static boolean queriesForReads;
private static int batchInserts;
private static TimestampBound timestampBound;
private static String standardQuery;
private static String standardScan;
private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>();
private static final String PRIMARY_KEY_COLUMN = "id";
private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName());
// Static lock for the class.
private static final Object CLASS_LOCK = new Object();
// Single Spanner client per process.
private static Spanner spanner = null;
// Single database client per process.
private static DatabaseClient dbClient = null;
// Buffered mutations on a per object/thread basis for batch inserts.
// Note that we have a separate CloudSpannerClient object per thread.
private final ArrayList<Mutation> bufferedMutations = new ArrayList<>();
private static void constructStandardQueriesAndFields(Properties properties) {
String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX,
CoreWorkload.FIELD_NAME_PREFIX_DEFAULT);
standardQuery = new StringBuilder()
.append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString();
standardScan = new StringBuilder()
.append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString();
for (int i = 0; i < fieldCount; i++) {
STANDARD_FIELDS.add(fieldprefix + i);
}
}
private static Spanner getSpanner(Properties properties, String host, String project) {
if (spanner != null) {
return spanner;
}
String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS);
int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1"));
SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder()
.setSessionPoolOption(SessionPoolOptions.newBuilder()
.setMinSessions(numThreads)
// Since we have no read-write transactions, we can set the write session fraction to 0.
.setWriteSessionsFraction(0)
.build());
if (host != null) {
optionsBuilder.setHost(host);
}
if (project != null) {
optionsBuilder.setProjectId(project);
}
if (numChannels != null) {
optionsBuilder.setNumChannels(Integer.parseInt(numChannels));
}
spanner = optionsBuilder.build().getService();
Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") {
@Override
public void run() {
spanner.close();
}
});
return spanner;
}
@Override
public void init() throws DBException {
synchronized (CLASS_LOCK) {
if (dbClient != null) {
return;
}
Properties properties = getProperties();
String host = properties.getProperty(CloudSpannerProperties.HOST);
String project = properties.getProperty(CloudSpannerProperties.PROJECT);
String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance");
String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database");
fieldCount = Integer.parseInt(properties.getProperty(
CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT));
queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query");
batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1"));
constructStandardQueriesAndFields(properties);
int boundedStalenessSeconds = Integer.parseInt(properties.getProperty(
CloudSpannerProperties.BOUNDED_STALENESS, "0"));
timestampBound = (boundedStalenessSeconds <= 0) ?
TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS);
try {
spanner = getSpanner(properties, host, project);
if (project == null) {
project = spanner.getOptions().getProjectId();
}
dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database));
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "init()", e);
throw new DBException(e);
}
LOGGER.log(Level.INFO, new StringBuilder()
.append("\nHost: ").append(spanner.getOptions().getHost())
.append("\nProject: ").append(project)
.append("\nInstance: ").append(instance)
.append("\nDatabase: ").append(database)
.append("\nUsing queries for reads: ").append(queriesForReads)
.append("\nBatching inserts: ").append(batchInserts)
.append("\nBounded staleness seconds: ").append(boundedStalenessSeconds)
.toString());
}
}
private Status readUsingQuery(
String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
Statement query;
Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields;
if (fields == null || fields.size() == fieldCount) {
query = Statement.newBuilder(standardQuery).bind("key").to(key).build();
} else {
Joiner joiner = Joiner.on(',');
query = Statement.newBuilder("SELECT ")
.append(joiner.join(fields))
.append(" FROM ")
.append(table)
.append(" WHERE id=@key")
.bind("key").to(key)
.build();
}
try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) {
resultSet.next();
decodeStruct(columns, resultSet, result);
if (resultSet.next()) {
throw new Exception("Expected exactly one row for each read.");
}
return Status.OK;
} catch (Exception e) {
LOGGER.log(Level.INFO, "readUsingQuery()", e);
return Status.ERROR;
}
}
@Override
public Status read(
String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
if (queriesForReads) {
return readUsingQuery(table, key, fields, result);
}
Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields;
try {
Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns);
decodeStruct(columns, row, result);
return Status.OK;
} catch (Exception e) {
LOGGER.log(Level.INFO, "read()", e);
return Status.ERROR;
}
}
private Status scanUsingQuery(
String table, String startKey, int recordCount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields;
Statement query;
if (fields == null || fields.size() == fieldCount) {
query = Statement.newBuilder(standardScan).bind("startKey").to(startKey).bind("count").to(recordCount).build();
} else {
Joiner joiner = Joiner.on(',');
query = Statement.newBuilder("SELECT ")
.append(joiner.join(fields))
.append(" FROM ")
.append(table)
.append(" WHERE id>=@startKey LIMIT @count")
.bind("startKey").to(startKey)
.bind("count").to(recordCount)
.build();
}
try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) {
while (resultSet.next()) {
HashMap<String, ByteIterator> row = new HashMap<>();
decodeStruct(columns, resultSet, row);
result.add(row);
}
return Status.OK;
} catch (Exception e) {
LOGGER.log(Level.INFO, "scanUsingQuery()", e);
return Status.ERROR;
}
}
@Override
public Status scan(
String table, String startKey, int recordCount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
if (queriesForReads) {
return scanUsingQuery(table, startKey, recordCount, fields, result);
}
Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields;
KeySet keySet =
KeySet.newBuilder().addRange(KeyRange.closedClosed(Key.of(startKey), Key.of())).build();
try (ResultSet resultSet = dbClient.singleUse(timestampBound)
.read(table, keySet, columns, Options.limit(recordCount))) {
while (resultSet.next()) {
HashMap<String, ByteIterator> row = new HashMap<>();
decodeStruct(columns, resultSet, row);
result.add(row);
}
return Status.OK;
} catch (Exception e) {
LOGGER.log(Level.INFO, "scan()", e);
return Status.ERROR;
}
}
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table);
m.set(PRIMARY_KEY_COLUMN).to(key);
for (Map.Entry<String, ByteIterator> e : values.entrySet()) {
m.set(e.getKey()).to(e.getValue().toString());
}
try {
dbClient.writeAtLeastOnce(Arrays.asList(m.build()));
} catch (Exception e) {
LOGGER.log(Level.INFO, "update()", e);
return Status.ERROR;
}
return Status.OK;
}
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
if (bufferedMutations.size() < batchInserts) {
Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table);
m.set(PRIMARY_KEY_COLUMN).to(key);
for (Map.Entry<String, ByteIterator> e : values.entrySet()) {
m.set(e.getKey()).to(e.getValue().toString());
}
bufferedMutations.add(m.build());
} else {
LOGGER.log(Level.INFO, "Limit of cached mutations reached. The given mutation with key " + key +
" is ignored. Is this a retry?");
}
if (bufferedMutations.size() < batchInserts) {
return Status.BATCHED_OK;
}
try {
dbClient.writeAtLeastOnce(bufferedMutations);
bufferedMutations.clear();
} catch (Exception e) {
LOGGER.log(Level.INFO, "insert()", e);
return Status.ERROR;
}
return Status.OK;
}
@Override
public void cleanup() {
try {
if (bufferedMutations.size() > 0) {
dbClient.writeAtLeastOnce(bufferedMutations);
bufferedMutations.clear();
}
} catch (Exception e) {
LOGGER.log(Level.INFO, "cleanup()", e);
}
}
@Override
public Status delete(String table, String key) {
try {
dbClient.writeAtLeastOnce(Arrays.asList(Mutation.delete(table, Key.of(key))));
} catch (Exception e) {
LOGGER.log(Level.INFO, "delete()", e);
return Status.ERROR;
}
return Status.OK;
}
private static void decodeStruct(
Iterable<String> columns, StructReader structReader, Map<String, ByteIterator> result) {
for (String col : columns) {
result.put(col, new StringByteIterator(structReader.getString(col)));
}
}
}
| 14,857 | 36.145 | 117 | java |
null | NearPMSW-main/baseline/logging/YCSB/ignite/src/test/java/site/ycsb/db/ignite/IgniteClientTestBase.java | /**
* Copyright (c) 2013-2018 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License. See accompanying LICENSE file.
* <p>
*/
package site.ycsb.db.ignite;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import org.apache.ignite.Ignite;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Test;
import java.util.*;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
/**
* Common test class.
*/
public class IgniteClientTestBase {
protected static final String DEFAULT_CACHE_NAME = "usertable";
/** */
protected static Ignite cluster;
/** */
protected DB client;
/**
*
*/
@After
public void tearDown() throws Exception {
client.cleanup();
}
/**
*
*/
@AfterClass
public static void afterClass() {
cluster.close();
try {
Thread.sleep(1000);
}
catch (InterruptedException e) {
e.printStackTrace();
}
}
@Test
public void scanNotImplemented() {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2");
final Status status = client.insert(DEFAULT_CACHE_NAME, key, StringByteIterator.getByteIteratorMap(input));
assertThat(status, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
final Vector<HashMap<String, ByteIterator>> results = new Vector<>();
final Status scan = client.scan(DEFAULT_CACHE_NAME, key, 1, null, results);
assertThat(scan, is(Status.NOT_IMPLEMENTED));
}
}
| 2,236 | 25.630952 | 111 | java |
null | NearPMSW-main/baseline/logging/YCSB/ignite/src/test/java/site/ycsb/db/ignite/IgniteClientTest.java | /**
* Copyright (c) 2018 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.ignite;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.measurements.Measurements;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.Ignition;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.logger.log4j2.Log4J2Logger;
import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.*;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.is;
/**
* Integration tests for the Ignite client
*/
public class IgniteClientTest extends IgniteClientTestBase {
private final static String HOST = "127.0.0.1";
private final static String PORTS = "47500..47509";
private final static String SERVER_NODE_NAME = "YCSB Server Node";
private static TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true);
@BeforeClass
public static void beforeTest() throws IgniteCheckedException {
IgniteConfiguration igcfg = new IgniteConfiguration();
igcfg.setIgniteInstanceName(SERVER_NODE_NAME);
igcfg.setClientMode(false);
TcpDiscoverySpi disco = new TcpDiscoverySpi();
Collection<String> adders = new LinkedHashSet<>();
adders.add(HOST + ":" + PORTS);
((TcpDiscoveryVmIpFinder) ipFinder).setAddresses(adders);
disco.setIpFinder(ipFinder);
igcfg.setDiscoverySpi(disco);
igcfg.setNetworkTimeout(2000);
CacheConfiguration ccfg = new CacheConfiguration().setName(DEFAULT_CACHE_NAME);
igcfg.setCacheConfiguration(ccfg);
Log4J2Logger logger = new Log4J2Logger(IgniteClientTest.class.getClassLoader().getResource("log4j2.xml"));
igcfg.setGridLogger(logger);
cluster = Ignition.start(igcfg);
cluster.active();
}
@Before
public void setUp() throws Exception {
Properties p = new Properties();
p.setProperty("hosts", HOST);
p.setProperty("ports", PORTS);
Measurements.setProperties(p);
client = new IgniteClient();
client.setProperties(p);
client.init();
}
@Test
public void testInsert() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2");
final Status status = client.insert(DEFAULT_CACHE_NAME, key, StringByteIterator.getByteIteratorMap(input));
assertThat(status, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
}
@Test
public void testDelete() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key1 = "key1";
final Map<String, String> input1 = new HashMap<>();
input1.put("field0", "value1");
input1.put("field1", "value2");
final Status status1 = client.insert(DEFAULT_CACHE_NAME, key1, StringByteIterator.getByteIteratorMap(input1));
assertThat(status1, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
final String key2 = "key2";
final Map<String, String> input2 = new HashMap<>();
input2.put("field0", "value1");
input2.put("field1", "value2");
final Status status2 = client.insert(DEFAULT_CACHE_NAME, key2, StringByteIterator.getByteIteratorMap(input2));
assertThat(status2, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(2));
final Status status3 = client.delete(DEFAULT_CACHE_NAME, key2);
assertThat(status3, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
}
@Test
public void testRead() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2A");
input.put("field3", null);
final Status sPut = client.insert(DEFAULT_CACHE_NAME, key, StringByteIterator.getByteIteratorMap(input));
assertThat(sPut, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
final Set<String> fld = new TreeSet<>();
fld.add("field0");
fld.add("field1");
fld.add("field3");
final HashMap<String, ByteIterator> result = new HashMap<>();
final Status sGet = client.read(DEFAULT_CACHE_NAME, key, fld, result);
assertThat(sGet, is(Status.OK));
final HashMap<String, String> strResult = new HashMap<String, String>();
for (final Map.Entry<String, ByteIterator> e : result.entrySet()) {
if (e.getValue() != null) {
strResult.put(e.getKey(), e.getValue().toString());
}
}
assertThat(strResult, hasEntry("field0", "value1"));
assertThat(strResult, hasEntry("field1", "value2A"));
}
@Test
public void testReadAllFields() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2A");
input.put("field3", null);
final Status sPut = client.insert(DEFAULT_CACHE_NAME, key, StringByteIterator.getByteIteratorMap(input));
assertThat(sPut, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
final Set<String> fld = new TreeSet<>();
final HashMap<String, ByteIterator> result1 = new HashMap<>();
final Status sGet = client.read(DEFAULT_CACHE_NAME, key, fld, result1);
assertThat(sGet, is(Status.OK));
final HashMap<String, String> strResult = new HashMap<String, String>();
for (final Map.Entry<String, ByteIterator> e : result1.entrySet()) {
if (e.getValue() != null) {
strResult.put(e.getKey(), e.getValue().toString());
}
}
assertThat(strResult, hasEntry("field0", "value1"));
assertThat(strResult, hasEntry("field1", "value2A"));
}
@Test
public void testReadNotPresent() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2A");
input.put("field3", null);
final Status sPut = client.insert(DEFAULT_CACHE_NAME, key, StringByteIterator.getByteIteratorMap(input));
assertThat(sPut, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
final Set<String> fld = new TreeSet<>();
final String newKey = "newKey";
final HashMap<String, ByteIterator> result1 = new HashMap<>();
final Status sGet = client.read(DEFAULT_CACHE_NAME, newKey, fld, result1);
assertThat(sGet, is(Status.NOT_FOUND));
}
}
| 7,651 | 35.966184 | 114 | java |
null | NearPMSW-main/baseline/logging/YCSB/ignite/src/test/java/site/ycsb/db/ignite/IgniteSqlClientTest.java | /**
* Copyright (c) 2018 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.ignite;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.measurements.Measurements;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.Ignition;
import org.apache.ignite.cache.QueryEntity;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.logger.log4j2.Log4J2Logger;
import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.is;
/**
* Integration tests for the Ignite client
*/
public class IgniteSqlClientTest extends IgniteClientTestBase {
private static final String TABLE_NAME = "usertable";
private final static String HOST = "127.0.0.1";
private final static String PORTS = "47500..47509";
private final static String SERVER_NODE_NAME = "YCSB Server Node";
private static TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true);
/**
*
*/
@BeforeClass
public static void beforeTest() throws IgniteCheckedException {
IgniteConfiguration igcfg = new IgniteConfiguration();
igcfg.setIgniteInstanceName(SERVER_NODE_NAME);
igcfg.setClientMode(false);
TcpDiscoverySpi disco = new TcpDiscoverySpi();
Collection<String> adders = new LinkedHashSet<>();
adders.add(HOST + ":" + PORTS);
QueryEntity qe = new QueryEntity("java.lang.String", "UserTableType")
.addQueryField("ycsb_key", "java.lang.String", null)
.addQueryField("field0", "java.lang.String", null)
.addQueryField("field1", "java.lang.String", null)
.addQueryField("field2", "java.lang.String", null)
.addQueryField("field3", "java.lang.String", null)
.addQueryField("field4", "java.lang.String", null)
.addQueryField("field5", "java.lang.String", null)
.addQueryField("field6", "java.lang.String", null)
.addQueryField("field7", "java.lang.String", null)
.addQueryField("field8", "java.lang.String", null)
.addQueryField("field9", "java.lang.String", null)
.setKeyFieldName("ycsb_key");
qe.setTableName("usertable");
CacheConfiguration ccfg = new CacheConfiguration().setQueryEntities(Collections.singleton(qe))
.setName(DEFAULT_CACHE_NAME);
igcfg.setCacheConfiguration(ccfg);
((TcpDiscoveryVmIpFinder) ipFinder).setAddresses(adders);
disco.setIpFinder(ipFinder);
igcfg.setDiscoverySpi(disco);
igcfg.setNetworkTimeout(2000);
Log4J2Logger logger = new Log4J2Logger(IgniteSqlClientTest.class.getClassLoader().getResource("log4j2.xml"));
igcfg.setGridLogger(logger);
cluster = Ignition.start(igcfg);
cluster.active();
}
@Before
public void setUp() throws Exception {
Properties p = new Properties();
p.setProperty("hosts", HOST);
p.setProperty("ports", PORTS);
Measurements.setProperties(p);
client = new IgniteSqlClient();
client.setProperties(p);
client.init();
}
@Test
public void testInsert() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2");
final Status status = client.insert(TABLE_NAME, key, StringByteIterator.getByteIteratorMap(input));
assertThat(status, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
}
@Test
public void testDelete() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key1 = "key1";
final Map<String, String> input1 = new HashMap<>();
input1.put("field0", "value1");
input1.put("field1", "value2");
final Status status1 = client.insert(TABLE_NAME, key1, StringByteIterator.getByteIteratorMap(input1));
assertThat(status1, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
final String key2 = "key2";
final Map<String, String> input2 = new HashMap<>();
input2.put("field0", "value1");
input2.put("field1", "value2");
final Status status2 = client.insert(TABLE_NAME, key2, StringByteIterator.getByteIteratorMap(input2));
assertThat(status2, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(2));
final Status status3 = client.delete(TABLE_NAME, key2);
assertThat(status3, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
}
@Test
public void testRead() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2A");
input.put("field3", null);
final Status sPut = client.insert(TABLE_NAME, key, StringByteIterator.getByteIteratorMap(input));
assertThat(sPut, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
final Set<String> fld = new TreeSet<>();
fld.add("field0");
fld.add("field1");
fld.add("field3");
final HashMap<String, ByteIterator> result = new HashMap<>();
final Status sGet = client.read(TABLE_NAME, key, fld, result);
assertThat(sGet, is(Status.OK));
final HashMap<String, String> strResult = new HashMap<String, String>();
for (final Map.Entry<String, ByteIterator> e : result.entrySet()) {
if (e.getValue() != null) {
strResult.put(e.getKey(), e.getValue().toString());
}
}
assertThat(strResult, hasEntry("field0", "value1"));
assertThat(strResult, hasEntry("field1", "value2A"));
}
@Test
public void testUpdate() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2A");
input.put("field3", null);
client.insert(TABLE_NAME, key, StringByteIterator.getByteIteratorMap(input));
input.put("field1", "value2B");
input.put("field4", "value4A");
final Status sUpd = client.update(TABLE_NAME, key, StringByteIterator.getByteIteratorMap(input));
assertThat(sUpd, is(Status.OK));
final Set<String> fld = new TreeSet<>();
fld.add("field0");
fld.add("field1");
fld.add("field3");
fld.add("field4");
final HashMap<String, ByteIterator> result = new HashMap<>();
final Status sGet = client.read(TABLE_NAME, key, fld, result);
assertThat(sGet, is(Status.OK));
final HashMap<String, String> strResult = new HashMap<String, String>();
for (final Map.Entry<String, ByteIterator> e : result.entrySet()) {
if (e.getValue() != null) {
strResult.put(e.getKey(), e.getValue().toString());
}
}
assertThat(strResult, hasEntry("field0", "value1"));
assertThat(strResult, hasEntry("field1", "value2B"));
assertThat(strResult, hasEntry("field4", "value4A"));
}
@Test
public void testConcurrentUpdate() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2A");
input.put("field3", null);
client.insert(TABLE_NAME, key, StringByteIterator.getByteIteratorMap(input));
input.put("field1", "value2B");
input.put("field4", "value4A");
ExecutorService exec = Executors.newCachedThreadPool();
final AtomicLong l = new AtomicLong(0);
final Boolean[] updError = {false};
Runnable task = new Runnable() {
@Override
public void run() {
for (int i = 0; i < 100; ++i) {
input.put("field1", "value2B_" + l.incrementAndGet());
final Status sUpd = client.update(TABLE_NAME, key, StringByteIterator.getByteIteratorMap(input));
if (!sUpd.isOk()) {
updError[0] = true;
break;
}
}
}
};
for (int i = 0; i < 32; ++i) {
exec.execute(task);
}
exec.awaitTermination(60, TimeUnit.SECONDS);
exec.shutdownNow();
assertThat(updError[0], is(false));
}
@Test
public void testReadAllFields() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2A");
input.put("field3", null);
final Status sPut = client.insert(DEFAULT_CACHE_NAME, key, StringByteIterator.getByteIteratorMap(input));
assertThat(sPut, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
final Set<String> fld = new TreeSet<>();
final HashMap<String, ByteIterator> result1 = new HashMap<>();
final Status sGet = client.read(TABLE_NAME, key, fld, result1);
assertThat(sGet, is(Status.OK));
final HashMap<String, String> strResult = new HashMap<String, String>();
for (final Map.Entry<String, ByteIterator> e : result1.entrySet()) {
if (e.getValue() != null) {
strResult.put(e.getKey(), e.getValue().toString());
}
}
assertThat(strResult, hasEntry("field0", "value1"));
assertThat(strResult, hasEntry("field1", "value2A"));
}
@Test
public void testReadNotPresent() throws Exception {
cluster.cache(DEFAULT_CACHE_NAME).clear();
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2A");
input.put("field3", null);
final Status sPut = client.insert(TABLE_NAME, key, StringByteIterator.getByteIteratorMap(input));
assertThat(sPut, is(Status.OK));
assertThat(cluster.cache(DEFAULT_CACHE_NAME).size(), is(1));
final Set<String> fld = new TreeSet<>();
final String newKey = "newKey";
final HashMap<String, ByteIterator> result1 = new HashMap<>();
final Status sGet = client.read(TABLE_NAME, newKey, fld, result1);
assertThat(sGet, is(Status.NOT_FOUND));
}
}
| 11,331 | 35.204473 | 113 | java |
null | NearPMSW-main/baseline/logging/YCSB/ignite/src/main/java/site/ycsb/db/ignite/package-info.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="http://ignite.apache.org/">Ignite</a>.
* Naive implementation.
*/
package site.ycsb.db.ignite;
| 788 | 31.875 | 71 | java |
null | NearPMSW-main/baseline/logging/YCSB/ignite/src/main/java/site/ycsb/db/ignite/IgniteAbstractClient.java | /**
* Copyright (c) 2013-2018 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License. See accompanying LICENSE file.
* <p>
*/
package site.ycsb.db.ignite;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.logger.log4j2.Log4J2Logger;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.Ignition;
import org.apache.ignite.binary.BinaryObject;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* Ignite abstract client.
* <p>
* See {@code ignite/README.md} for details.
*/
public abstract class IgniteAbstractClient extends DB {
/** */
protected static Logger log = LogManager.getLogger(IgniteAbstractClient.class);
protected static final String DEFAULT_CACHE_NAME = "usertable";
protected static final String HOSTS_PROPERTY = "hosts";
protected static final String PORTS_PROPERTY = "ports";
protected static final String CLIENT_NODE_NAME = "YCSB client node";
protected static final String PORTS_DEFAULTS = "47500..47509";
/**
* Count the number of times initialized to teardown on the last
* {@link #cleanup()}.
*/
protected static final AtomicInteger INIT_COUNT = new AtomicInteger(0);
/** Ignite cluster. */
protected static Ignite cluster = null;
/** Ignite cache to store key-values. */
protected static IgniteCache<String, BinaryObject> cache = null;
/** Debug flag. */
protected static boolean debug = false;
protected static TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true);
/**
* Initialize any state for this DB. Called once per DB instance; there is one
* DB instance per client thread.
*/
@Override
public void init() throws DBException {
// Keep track of number of calls to init (for later cleanup)
INIT_COUNT.incrementAndGet();
// Synchronized so that we only have a single
// cluster/session instance for all the threads.
synchronized (INIT_COUNT) {
// Check if the cluster has already been initialized
if (cluster != null) {
return;
}
try {
debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false"));
IgniteConfiguration igcfg = new IgniteConfiguration();
igcfg.setIgniteInstanceName(CLIENT_NODE_NAME);
String host = getProperties().getProperty(HOSTS_PROPERTY);
if (host == null) {
throw new DBException(String.format(
"Required property \"%s\" missing for Ignite Cluster",
HOSTS_PROPERTY));
}
String ports = getProperties().getProperty(PORTS_PROPERTY, PORTS_DEFAULTS);
if (ports == null) {
throw new DBException(String.format(
"Required property \"%s\" missing for Ignite Cluster",
PORTS_PROPERTY));
}
System.setProperty("IGNITE_QUIET", "false");
TcpDiscoverySpi disco = new TcpDiscoverySpi();
Collection<String> addrs = new LinkedHashSet<>();
addrs.add(host + ":" + ports);
((TcpDiscoveryVmIpFinder) ipFinder).setAddresses(addrs);
disco.setIpFinder(ipFinder);
igcfg.setDiscoverySpi(disco);
igcfg.setNetworkTimeout(2000);
igcfg.setClientMode(true);
Log4J2Logger logger = new Log4J2Logger(this.getClass().getClassLoader().getResource("log4j2.xml"));
igcfg.setGridLogger(logger);
log.info("Start Ignite client node.");
cluster = Ignition.start(igcfg);
log.info("Activate Ignite cluster.");
cluster.active(true);
cache = cluster.cache(DEFAULT_CACHE_NAME).withKeepBinary();
if(cache == null) {
throw new DBException(new IgniteCheckedException("Failed to find cache " + DEFAULT_CACHE_NAME));
}
} catch (Exception e) {
throw new DBException(e);
}
} // synchronized
}
/**
* Cleanup any state for this DB. Called once per DB instance; there is one DB
* instance per client thread.
*/
@Override
public void cleanup() throws DBException {
synchronized (INIT_COUNT) {
final int curInitCount = INIT_COUNT.decrementAndGet();
if (curInitCount <= 0) {
cluster.close();
cluster = null;
}
if (curInitCount < 0) {
// This should never happen.
throw new DBException(
String.format("initCount is negative: %d", curInitCount));
}
}
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
return Status.NOT_IMPLEMENTED;
}
}
| 5,765 | 31.761364 | 107 | java |
null | NearPMSW-main/baseline/logging/YCSB/ignite/src/main/java/site/ycsb/db/ignite/IgniteClient.java | /**
* Copyright (c) 2013-2018 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License. See accompanying LICENSE file.
* <p>
*/
package site.ycsb.db.ignite;
import site.ycsb.*;
import org.apache.ignite.binary.BinaryField;
import org.apache.ignite.binary.BinaryObject;
import org.apache.ignite.binary.BinaryObjectBuilder;
import org.apache.ignite.binary.BinaryType;
import org.apache.ignite.cache.CacheEntryProcessor;
import org.apache.ignite.internal.util.typedef.F;
import javax.cache.processor.EntryProcessorException;
import javax.cache.processor.MutableEntry;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* Ignite client.
* <p>
* See {@code ignite/README.md} for details.
*/
public class IgniteClient extends IgniteAbstractClient {
/** */
private static Logger log = LogManager.getLogger(IgniteClient.class);
/** Cached binary type. */
private BinaryType binType = null;
/** Cached binary type's fields. */
private final ConcurrentHashMap<String, BinaryField> fieldsCache = new ConcurrentHashMap<>();
/**
* Read a record from the database. Each field/value pair from the result will
* be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
try {
BinaryObject po = cache.get(key);
if (po == null) {
return Status.NOT_FOUND;
}
if (binType == null) {
binType = po.type();
}
for (String s : F.isEmpty(fields) ? binType.fieldNames() : fields) {
BinaryField bfld = fieldsCache.get(s);
if (bfld == null) {
bfld = binType.field(s);
fieldsCache.put(s, bfld);
}
String val = bfld.value(po);
if (val != null) {
result.put(s, new StringByteIterator(val));
}
if (debug) {
log.info("table:{" + table + "}, key:{" + key + "}" + ", fields:{" + fields + "}");
log.info("fields in po{" + binType.fieldNames() + "}");
log.info("result {" + result + "}");
}
}
return Status.OK;
} catch (Exception e) {
log.error(String.format("Error reading key: %s", key), e);
return Status.ERROR;
}
}
/**
* Update a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key, overwriting any existing values with the same field name.
*
* @param table The name of the table
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
try {
cache.invoke(key, new Updater(values));
return Status.OK;
} catch (Exception e) {
log.error(String.format("Error updating key: %s", key), e);
return Status.ERROR;
}
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
try {
BinaryObjectBuilder bob = cluster.binary().builder("CustomType");
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
bob.setField(entry.getKey(), entry.getValue().toString());
if (debug) {
log.info(entry.getKey() + ":" + entry.getValue());
}
}
BinaryObject bo = bob.build();
if (table.equals(DEFAULT_CACHE_NAME)) {
cache.put(key, bo);
} else {
throw new UnsupportedOperationException("Unexpected table name: " + table);
}
return Status.OK;
} catch (Exception e) {
log.error(String.format("Error inserting key: %s", key), e);
return Status.ERROR;
}
}
/**
* Delete a record from the database.
*
* @param table The name of the table
* @param key The record key of the record to delete.
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status delete(String table, String key) {
try {
cache.remove(key);
return Status.OK;
} catch (Exception e) {
log.error(String.format("Error deleting key: %s ", key), e);
}
return Status.ERROR;
}
/**
* Entry processor to update values.
*/
public static class Updater implements CacheEntryProcessor<String, BinaryObject, Object> {
private String[] flds;
private String[] vals;
/**
* @param values Updated fields.
*/
Updater(Map<String, ByteIterator> values) {
flds = new String[values.size()];
vals = new String[values.size()];
int idx = 0;
for (Map.Entry<String, ByteIterator> e : values.entrySet()) {
flds[idx] = e.getKey();
vals[idx] = e.getValue().toString();
++idx;
}
}
/**
* {@inheritDoc}
*/
@Override
public Object process(MutableEntry<String, BinaryObject> mutableEntry, Object... objects)
throws EntryProcessorException {
BinaryObjectBuilder bob = mutableEntry.getValue().toBuilder();
for (int i = 0; i < flds.length; ++i) {
bob.setField(flds[i], vals[i]);
}
mutableEntry.setValue(bob.build());
return null;
}
}
}
| 6,725 | 28.116883 | 95 | java |
null | NearPMSW-main/baseline/logging/YCSB/ignite/src/main/java/site/ycsb/db/ignite/IgniteSqlClient.java | /**
* Copyright (c) 2013-2018 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License. See accompanying LICENSE file.
* <p>
*/
package site.ycsb.db.ignite;
import site.ycsb.*;
import org.apache.ignite.cache.query.FieldsQueryCursor;
import org.apache.ignite.cache.query.SqlFieldsQuery;
import org.apache.ignite.internal.util.typedef.F;
import javax.cache.CacheException;
import java.util.*;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* Ignite client.
* <p>
* See {@code ignite/README.md} for details.
*/
public class IgniteSqlClient extends IgniteAbstractClient {
/** */
private static Logger log = LogManager.getLogger(IgniteSqlClient.class);
/** */
private static final String PRIMARY_KEY = "YCSB_KEY";
/**
* Read a record from the database. Each field/value pair from the result will
* be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
try {
StringBuilder sb = new StringBuilder("SELECT * FROM ").append(table)
.append(" WHERE ").append(PRIMARY_KEY).append("=?");
SqlFieldsQuery qry = new SqlFieldsQuery(sb.toString());
qry.setArgs(key);
FieldsQueryCursor<List<?>> cur = cache.query(qry);
Iterator<List<?>> it = cur.iterator();
if (!it.hasNext()) {
return Status.NOT_FOUND;
}
String[] colNames = new String[cur.getColumnsCount()];
for (int i = 0; i < colNames.length; ++i) {
String colName = cur.getFieldName(i);
if (F.isEmpty(fields)) {
colNames[i] = colName.toLowerCase();
} else {
for (String f : fields) {
if (f.equalsIgnoreCase(colName)) {
colNames[i] = f;
}
}
}
}
while (it.hasNext()) {
List<?> row = it.next();
for (int i = 0; i < colNames.length; ++i) {
if (colNames[i] != null) {
result.put(colNames[i], new StringByteIterator((String) row.get(i)));
}
}
}
return Status.OK;
} catch (Exception e) {
log.error(String.format("Error in processing read from table: %s", table), e);
return Status.ERROR;
}
}
/**
* Update a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key, overwriting any existing values with the same field name.
*
* @param table The name of the table
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
while (true) {
try {
UpdateData updData = new UpdateData(key, values);
StringBuilder sb = new StringBuilder("UPDATE ").append(table).append(" SET ");
for (int i = 0; i < updData.getFields().length; ++i) {
sb.append(updData.getFields()[i]).append("=?");
if (i < updData.getFields().length - 1) {
sb.append(", ");
}
}
sb.append(" WHERE ").append(PRIMARY_KEY).append("=?");
SqlFieldsQuery qry = new SqlFieldsQuery(sb.toString());
qry.setArgs(updData.getArgs());
cache.query(qry).getAll();
return Status.OK;
} catch (CacheException e) {
if (!e.getMessage().contains("Failed to update some keys because they had been modified concurrently")) {
log.error(String.format("Error in processing update table: %s", table), e);
return Status.ERROR;
}
} catch (Exception e) {
log.error(String.format("Error in processing update table: %s", table), e);
return Status.ERROR;
}
}
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
try {
InsertData insertData = new InsertData(key, values);
StringBuilder sb = new StringBuilder("INSERT INTO ").append(table).append(" (")
.append(insertData.getInsertFields()).append(") VALUES (")
.append(insertData.getInsertParams()).append(')');
SqlFieldsQuery qry = new SqlFieldsQuery(sb.toString());
qry.setArgs(insertData.getArgs());
cache.query(qry).getAll();
return Status.OK;
} catch (Exception e) {
log.error(String.format("Error in processing insert to table: %s", table), e);
return Status.ERROR;
}
}
/**
* Delete a record from the database.
*
* @param table The name of the table
* @param key The record key of the record to delete.
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status delete(String table, String key) {
try {
StringBuilder sb = new StringBuilder("DELETE FROM ").append(table)
.append(" WHERE ").append(PRIMARY_KEY).append(" = ?");
SqlFieldsQuery qry = new SqlFieldsQuery(sb.toString());
qry.setArgs(key);
cache.query(qry).getAll();
return Status.OK;
} catch (Exception e) {
log.error(String.format("Error in processing read from table: %s", table), e);
return Status.ERROR;
}
}
/**
* Field and values for insert queries.
*/
private static class InsertData {
private final Object[] args;
private final String insertFields;
private final String insertParams;
/**
* @param key Key.
* @param values Field values.
*/
InsertData(String key, Map<String, ByteIterator> values) {
args = new String[values.size() + 1];
int idx = 0;
args[idx++] = key;
StringBuilder sbFields = new StringBuilder(PRIMARY_KEY);
StringBuilder sbParams = new StringBuilder("?");
for (Map.Entry<String, ByteIterator> e : values.entrySet()) {
args[idx++] = e.getValue().toString();
sbFields.append(',').append(e.getKey());
sbParams.append(", ?");
}
insertFields = sbFields.toString();
insertParams = sbParams.toString();
}
public Object[] getArgs() {
return args;
}
public String getInsertFields() {
return insertFields;
}
public String getInsertParams() {
return insertParams;
}
}
/**
* Field and values for update queries.
*/
private static class UpdateData {
private final Object[] args;
private final String[] fields;
/**
* @param key Key.
* @param values Field values.
*/
UpdateData(String key, Map<String, ByteIterator> values) {
args = new String[values.size() + 1];
fields = new String[values.size()];
int idx = 0;
for (Map.Entry<String, ByteIterator> e : values.entrySet()) {
args[idx] = e.getValue().toString();
fields[idx++] = e.getKey();
}
args[idx] = key;
}
public Object[] getArgs() {
return args;
}
public String[] getFields() {
return fields;
}
}
}
| 8,437 | 29.243728 | 113 | java |
null | NearPMSW-main/baseline/logging/YCSB/couchbase2/src/main/java/site/ycsb/db/couchbase2/package-info.java | /*
* Copyright (c) 2015 - 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="http://www.couchbase.com/">Couchbase</a>, new driver.
*/
package site.ycsb.db.couchbase2;
| 794 | 33.565217 | 86 | java |
null | NearPMSW-main/baseline/logging/YCSB/couchbase2/src/main/java/site/ycsb/db/couchbase2/Couchbase2Client.java | /**
* Copyright (c) 2016 Yahoo! Inc. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.couchbase2;
import com.couchbase.client.core.env.DefaultCoreEnvironment;
import com.couchbase.client.core.env.resources.IoPoolShutdownHook;
import com.couchbase.client.core.logging.CouchbaseLogger;
import com.couchbase.client.core.logging.CouchbaseLoggerFactory;
import com.couchbase.client.core.metrics.DefaultLatencyMetricsCollectorConfig;
import com.couchbase.client.core.metrics.DefaultMetricsCollectorConfig;
import com.couchbase.client.core.metrics.LatencyMetricsCollectorConfig;
import com.couchbase.client.core.metrics.MetricsCollectorConfig;
import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonFactory;
import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonGenerator;
import com.couchbase.client.deps.com.fasterxml.jackson.databind.JsonNode;
import com.couchbase.client.deps.com.fasterxml.jackson.databind.node.ObjectNode;
import com.couchbase.client.deps.io.netty.channel.DefaultSelectStrategyFactory;
import com.couchbase.client.deps.io.netty.channel.EventLoopGroup;
import com.couchbase.client.deps.io.netty.channel.SelectStrategy;
import com.couchbase.client.deps.io.netty.channel.SelectStrategyFactory;
import com.couchbase.client.deps.io.netty.channel.epoll.EpollEventLoopGroup;
import com.couchbase.client.deps.io.netty.channel.nio.NioEventLoopGroup;
import com.couchbase.client.deps.io.netty.util.IntSupplier;
import com.couchbase.client.deps.io.netty.util.concurrent.DefaultThreadFactory;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.Cluster;
import com.couchbase.client.java.CouchbaseCluster;
import com.couchbase.client.java.PersistTo;
import com.couchbase.client.java.ReplicateTo;
import com.couchbase.client.java.document.Document;
import com.couchbase.client.java.document.RawJsonDocument;
import com.couchbase.client.java.document.json.JsonArray;
import com.couchbase.client.java.document.json.JsonObject;
import com.couchbase.client.java.env.CouchbaseEnvironment;
import com.couchbase.client.java.env.DefaultCouchbaseEnvironment;
import com.couchbase.client.java.error.TemporaryFailureException;
import com.couchbase.client.java.query.*;
import com.couchbase.client.java.transcoder.JacksonTransformers;
import com.couchbase.client.java.util.Blocking;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import rx.Observable;
import rx.Subscriber;
import rx.functions.Action1;
import rx.functions.Func1;
import java.io.StringWriter;
import java.io.Writer;
import java.nio.channels.spi.SelectorProvider;
import java.util.*;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;
/**
* A class that wraps the 2.x Couchbase SDK to be used with YCSB.
*
* <p> The following options can be passed when using this database client to override the defaults.
*
* <ul>
* <li><b>couchbase.host=127.0.0.1</b> The hostname from one server.</li>
* <li><b>couchbase.bucket=default</b> The bucket name to use.</li>
* <li><b>couchbase.password=</b> The password of the bucket.</li>
* <li><b>couchbase.syncMutationResponse=true</b> If mutations should wait for the response to complete.</li>
* <li><b>couchbase.persistTo=0</b> Persistence durability requirement</li>
* <li><b>couchbase.replicateTo=0</b> Replication durability requirement</li>
* <li><b>couchbase.upsert=false</b> Use upsert instead of insert or replace.</li>
* <li><b>couchbase.adhoc=false</b> If set to true, prepared statements are not used.</li>
* <li><b>couchbase.kv=true</b> If set to false, mutation operations will also be performed through N1QL.</li>
* <li><b>couchbase.maxParallelism=1</b> The server parallelism for all n1ql queries.</li>
* <li><b>couchbase.kvEndpoints=1</b> The number of KV sockets to open per server.</li>
* <li><b>couchbase.queryEndpoints=5</b> The number of N1QL Query sockets to open per server.</li>
* <li><b>couchbase.epoll=false</b> If Epoll instead of NIO should be used (only available for linux.</li>
* <li><b>couchbase.boost=3</b> If > 0 trades CPU for higher throughput. N is the number of event loops, ideally
* set to the number of physical cores. Setting higher than that will likely degrade performance.</li>
* <li><b>couchbase.networkMetricsInterval=0</b> The interval in seconds when latency metrics will be logged.</li>
* <li><b>couchbase.runtimeMetricsInterval=0</b> The interval in seconds when runtime metrics will be logged.</li>
* <li><b>couchbase.documentExpiry=0</b> Document Expiry is the amount of time until a document expires in
* Couchbase.</li>
* </ul>
*/
public class Couchbase2Client extends DB {
static {
// No need to send the full encoded_plan for this benchmark workload, less network overhead!
System.setProperty("com.couchbase.query.encodedPlanEnabled", "false");
}
private static final String SEPARATOR = ":";
private static final CouchbaseLogger LOGGER = CouchbaseLoggerFactory.getInstance(Couchbase2Client.class);
private static final Object INIT_COORDINATOR = new Object();
private static volatile CouchbaseEnvironment env = null;
private Cluster cluster;
private Bucket bucket;
private String bucketName;
private boolean upsert;
private PersistTo persistTo;
private ReplicateTo replicateTo;
private boolean syncMutResponse;
private boolean epoll;
private long kvTimeout;
private boolean adhoc;
private boolean kv;
private int maxParallelism;
private String host;
private int kvEndpoints;
private int queryEndpoints;
private int boost;
private int networkMetricsInterval;
private int runtimeMetricsInterval;
private String scanAllQuery;
private int documentExpiry;
@Override
public void init() throws DBException {
Properties props = getProperties();
host = props.getProperty("couchbase.host", "127.0.0.1");
bucketName = props.getProperty("couchbase.bucket", "default");
String bucketPassword = props.getProperty("couchbase.password", "");
upsert = props.getProperty("couchbase.upsert", "false").equals("true");
persistTo = parsePersistTo(props.getProperty("couchbase.persistTo", "0"));
replicateTo = parseReplicateTo(props.getProperty("couchbase.replicateTo", "0"));
syncMutResponse = props.getProperty("couchbase.syncMutationResponse", "true").equals("true");
adhoc = props.getProperty("couchbase.adhoc", "false").equals("true");
kv = props.getProperty("couchbase.kv", "true").equals("true");
maxParallelism = Integer.parseInt(props.getProperty("couchbase.maxParallelism", "1"));
kvEndpoints = Integer.parseInt(props.getProperty("couchbase.kvEndpoints", "1"));
queryEndpoints = Integer.parseInt(props.getProperty("couchbase.queryEndpoints", "1"));
epoll = props.getProperty("couchbase.epoll", "false").equals("true");
boost = Integer.parseInt(props.getProperty("couchbase.boost", "3"));
networkMetricsInterval = Integer.parseInt(props.getProperty("couchbase.networkMetricsInterval", "0"));
runtimeMetricsInterval = Integer.parseInt(props.getProperty("couchbase.runtimeMetricsInterval", "0"));
documentExpiry = Integer.parseInt(props.getProperty("couchbase.documentExpiry", "0"));
scanAllQuery = "SELECT RAW meta().id FROM `" + bucketName +
"` WHERE meta().id >= '$1' ORDER BY meta().id LIMIT $2";
try {
synchronized (INIT_COORDINATOR) {
if (env == null) {
LatencyMetricsCollectorConfig latencyConfig = networkMetricsInterval <= 0
? DefaultLatencyMetricsCollectorConfig.disabled()
: DefaultLatencyMetricsCollectorConfig
.builder()
.emitFrequency(networkMetricsInterval)
.emitFrequencyUnit(TimeUnit.SECONDS)
.build();
MetricsCollectorConfig runtimeConfig = runtimeMetricsInterval <= 0
? DefaultMetricsCollectorConfig.disabled()
: DefaultMetricsCollectorConfig.create(runtimeMetricsInterval, TimeUnit.SECONDS);
DefaultCouchbaseEnvironment.Builder builder = DefaultCouchbaseEnvironment
.builder()
.queryEndpoints(queryEndpoints)
.callbacksOnIoPool(true)
.runtimeMetricsCollectorConfig(runtimeConfig)
.networkLatencyMetricsCollectorConfig(latencyConfig)
.socketConnectTimeout(10000) // 10 secs socket connect timeout
.connectTimeout(30000) // 30 secs overall bucket open timeout
.kvTimeout(10000) // 10 instead of 2.5s for KV ops
.kvEndpoints(kvEndpoints);
// Tune boosting and epoll based on settings
SelectStrategyFactory factory = boost > 0 ?
new BackoffSelectStrategyFactory() : DefaultSelectStrategyFactory.INSTANCE;
int poolSize = boost > 0 ? boost : Integer.parseInt(
System.getProperty("com.couchbase.ioPoolSize", Integer.toString(DefaultCoreEnvironment.IO_POOL_SIZE))
);
ThreadFactory threadFactory = new DefaultThreadFactory("cb-io", true);
EventLoopGroup group = epoll ? new EpollEventLoopGroup(poolSize, threadFactory, factory)
: new NioEventLoopGroup(poolSize, threadFactory, SelectorProvider.provider(), factory);
builder.ioPool(group, new IoPoolShutdownHook(group));
env = builder.build();
logParams();
}
}
cluster = CouchbaseCluster.create(env, host);
bucket = cluster.openBucket(bucketName, bucketPassword);
kvTimeout = env.kvTimeout();
} catch (Exception ex) {
throw new DBException("Could not connect to Couchbase Bucket.", ex);
}
if (!kv && !syncMutResponse) {
throw new DBException("Not waiting for N1QL responses on mutations not yet implemented.");
}
}
/**
* Helper method to log the CLI params so that on the command line debugging is easier.
*/
private void logParams() {
StringBuilder sb = new StringBuilder();
sb.append("host=").append(host);
sb.append(", bucket=").append(bucketName);
sb.append(", upsert=").append(upsert);
sb.append(", persistTo=").append(persistTo);
sb.append(", replicateTo=").append(replicateTo);
sb.append(", syncMutResponse=").append(syncMutResponse);
sb.append(", adhoc=").append(adhoc);
sb.append(", kv=").append(kv);
sb.append(", maxParallelism=").append(maxParallelism);
sb.append(", queryEndpoints=").append(queryEndpoints);
sb.append(", kvEndpoints=").append(kvEndpoints);
sb.append(", queryEndpoints=").append(queryEndpoints);
sb.append(", epoll=").append(epoll);
sb.append(", boost=").append(boost);
sb.append(", networkMetricsInterval=").append(networkMetricsInterval);
sb.append(", runtimeMetricsInterval=").append(runtimeMetricsInterval);
LOGGER.info("===> Using Params: " + sb.toString());
}
@Override
public Status read(final String table, final String key, Set<String> fields,
final Map<String, ByteIterator> result) {
try {
String docId = formatId(table, key);
if (kv) {
return readKv(docId, fields, result);
} else {
return readN1ql(docId, fields, result);
}
} catch (Exception ex) {
ex.printStackTrace();
return Status.ERROR;
}
}
/**
* Performs the {@link #read(String, String, Set, Map)} operation via Key/Value ("get").
*
* @param docId the document ID
* @param fields the fields to be loaded
* @param result the result map where the doc needs to be converted into
* @return The result of the operation.
*/
private Status readKv(final String docId, final Set<String> fields, final Map<String, ByteIterator> result)
throws Exception {
RawJsonDocument loaded = bucket.get(docId, RawJsonDocument.class);
if (loaded == null) {
return Status.NOT_FOUND;
}
decode(loaded.content(), fields, result);
return Status.OK;
}
/**
* Performs the {@link #read(String, String, Set, Map)} operation via N1QL ("SELECT").
*
* If this option should be used, the "-p couchbase.kv=false" property must be set.
*
* @param docId the document ID
* @param fields the fields to be loaded
* @param result the result map where the doc needs to be converted into
* @return The result of the operation.
*/
private Status readN1ql(final String docId, Set<String> fields, final Map<String, ByteIterator> result)
throws Exception {
String readQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName + "` USE KEYS [$1]";
N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
readQuery,
JsonArray.from(docId),
N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
));
if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
throw new DBException("Error while parsing N1QL Result. Query: " + readQuery
+ ", Errors: " + queryResult.errors());
}
N1qlQueryRow row;
try {
row = queryResult.rows().next();
} catch (NoSuchElementException ex) {
return Status.NOT_FOUND;
}
JsonObject content = row.value();
if (fields == null) {
content = content.getObject(bucketName); // n1ql result set scoped under *.bucketName
fields = content.getNames();
}
for (String field : fields) {
Object value = content.get(field);
result.put(field, new StringByteIterator(value != null ? value.toString() : ""));
}
return Status.OK;
}
@Override
public Status update(final String table, final String key, final Map<String, ByteIterator> values) {
if (upsert) {
return upsert(table, key, values);
}
try {
String docId = formatId(table, key);
if (kv) {
return updateKv(docId, values);
} else {
return updateN1ql(docId, values);
}
} catch (Exception ex) {
ex.printStackTrace();
return Status.ERROR;
}
}
/**
* Performs the {@link #update(String, String, Map)} operation via Key/Value ("replace").
*
* @param docId the document ID
* @param values the values to update the document with.
* @return The result of the operation.
*/
private Status updateKv(final String docId, final Map<String, ByteIterator> values) {
waitForMutationResponse(bucket.async().replace(
RawJsonDocument.create(docId, documentExpiry, encode(values)),
persistTo,
replicateTo
));
return Status.OK;
}
/**
* Performs the {@link #update(String, String, Map)} operation via N1QL ("UPDATE").
*
* If this option should be used, the "-p couchbase.kv=false" property must be set.
*
* @param docId the document ID
* @param values the values to update the document with.
* @return The result of the operation.
*/
private Status updateN1ql(final String docId, final Map<String, ByteIterator> values)
throws Exception {
String fields = encodeN1qlFields(values);
String updateQuery = "UPDATE `" + bucketName + "` USE KEYS [$1] SET " + fields;
N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
updateQuery,
JsonArray.from(docId),
N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
));
if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
throw new DBException("Error while parsing N1QL Result. Query: " + updateQuery
+ ", Errors: " + queryResult.errors());
}
return Status.OK;
}
@Override
public Status insert(final String table, final String key, final Map<String, ByteIterator> values) {
if (upsert) {
return upsert(table, key, values);
}
try {
String docId = formatId(table, key);
if (kv) {
return insertKv(docId, values);
} else {
return insertN1ql(docId, values);
}
} catch (Exception ex) {
ex.printStackTrace();
return Status.ERROR;
}
}
/**
* Performs the {@link #insert(String, String, Map)} operation via Key/Value ("INSERT").
*
* Note that during the "load" phase it makes sense to retry TMPFAILS (so that even if the server is
* overloaded temporarily the ops will succeed eventually). The current code will retry TMPFAILs
* for maximum of one minute and then bubble up the error.
*
* @param docId the document ID
* @param values the values to update the document with.
* @return The result of the operation.
*/
private Status insertKv(final String docId, final Map<String, ByteIterator> values) {
int tries = 60; // roughly 60 seconds with the 1 second sleep, not 100% accurate.
for(int i = 0; i < tries; i++) {
try {
waitForMutationResponse(bucket.async().insert(
RawJsonDocument.create(docId, documentExpiry, encode(values)),
persistTo,
replicateTo
));
return Status.OK;
} catch (TemporaryFailureException ex) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while sleeping on TMPFAIL backoff.", ex);
}
}
}
throw new RuntimeException("Still receiving TMPFAIL from the server after trying " + tries + " times. " +
"Check your server.");
}
/**
* Performs the {@link #insert(String, String, Map)} operation via N1QL ("INSERT").
*
* If this option should be used, the "-p couchbase.kv=false" property must be set.
*
* @param docId the document ID
* @param values the values to update the document with.
* @return The result of the operation.
*/
private Status insertN1ql(final String docId, final Map<String, ByteIterator> values)
throws Exception {
String insertQuery = "INSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)";
N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
insertQuery,
JsonArray.from(docId, valuesToJsonObject(values)),
N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
));
if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
throw new DBException("Error while parsing N1QL Result. Query: " + insertQuery
+ ", Errors: " + queryResult.errors());
}
return Status.OK;
}
/**
* Performs an upsert instead of insert or update using either Key/Value or N1QL.
*
* If this option should be used, the "-p couchbase.upsert=true" property must be set.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return The result of the operation.
*/
private Status upsert(final String table, final String key, final Map<String, ByteIterator> values) {
try {
String docId = formatId(table, key);
if (kv) {
return upsertKv(docId, values);
} else {
return upsertN1ql(docId, values);
}
} catch (Exception ex) {
ex.printStackTrace();
return Status.ERROR;
}
}
/**
* Performs the {@link #upsert(String, String, Map)} operation via Key/Value ("upsert").
*
* If this option should be used, the "-p couchbase.upsert=true" property must be set.
*
* @param docId the document ID
* @param values the values to update the document with.
* @return The result of the operation.
*/
private Status upsertKv(final String docId, final Map<String, ByteIterator> values) {
waitForMutationResponse(bucket.async().upsert(
RawJsonDocument.create(docId, documentExpiry, encode(values)),
persistTo,
replicateTo
));
return Status.OK;
}
/**
* Performs the {@link #upsert(String, String, Map)} operation via N1QL ("UPSERT").
*
* If this option should be used, the "-p couchbase.upsert=true -p couchbase.kv=false" properties must be set.
*
* @param docId the document ID
* @param values the values to update the document with.
* @return The result of the operation.
*/
private Status upsertN1ql(final String docId, final Map<String, ByteIterator> values)
throws Exception {
String upsertQuery = "UPSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)";
N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
upsertQuery,
JsonArray.from(docId, valuesToJsonObject(values)),
N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
));
if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
throw new DBException("Error while parsing N1QL Result. Query: " + upsertQuery
+ ", Errors: " + queryResult.errors());
}
return Status.OK;
}
@Override
public Status delete(final String table, final String key) {
try {
String docId = formatId(table, key);
if (kv) {
return deleteKv(docId);
} else {
return deleteN1ql(docId);
}
} catch (Exception ex) {
ex.printStackTrace();
return Status.ERROR;
}
}
/**
* Performs the {@link #delete(String, String)} (String, String)} operation via Key/Value ("remove").
*
* @param docId the document ID.
* @return The result of the operation.
*/
private Status deleteKv(final String docId) {
waitForMutationResponse(bucket.async().remove(
docId,
persistTo,
replicateTo
));
return Status.OK;
}
/**
* Performs the {@link #delete(String, String)} (String, String)} operation via N1QL ("DELETE").
*
* If this option should be used, the "-p couchbase.kv=false" property must be set.
*
* @param docId the document ID.
* @return The result of the operation.
*/
private Status deleteN1ql(final String docId) throws Exception {
String deleteQuery = "DELETE FROM `" + bucketName + "` USE KEYS [$1]";
N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
deleteQuery,
JsonArray.from(docId),
N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
));
if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
throw new DBException("Error while parsing N1QL Result. Query: " + deleteQuery
+ ", Errors: " + queryResult.errors());
}
return Status.OK;
}
@Override
public Status scan(final String table, final String startkey, final int recordcount, final Set<String> fields,
final Vector<HashMap<String, ByteIterator>> result) {
try {
if (fields == null || fields.isEmpty()) {
return scanAllFields(table, startkey, recordcount, result);
} else {
return scanSpecificFields(table, startkey, recordcount, fields, result);
}
} catch (Exception ex) {
ex.printStackTrace();
return Status.ERROR;
}
}
/**
* Performs the {@link #scan(String, String, int, Set, Vector)} operation, optimized for all fields.
*
* Since the full document bodies need to be loaded anyways, it makes sense to just grab the document IDs
* from N1QL and then perform the bulk loading via KV for better performance. This is a usual pattern with
* Couchbase and shows the benefits of using both N1QL and KV together.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return The result of the operation.
*/
private Status scanAllFields(final String table, final String startkey, final int recordcount,
final Vector<HashMap<String, ByteIterator>> result) {
final List<HashMap<String, ByteIterator>> data = new ArrayList<HashMap<String, ByteIterator>>(recordcount);
bucket.async()
.query(N1qlQuery.parameterized(
scanAllQuery,
JsonArray.from(formatId(table, startkey), recordcount),
N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
))
.doOnNext(new Action1<AsyncN1qlQueryResult>() {
@Override
public void call(AsyncN1qlQueryResult result) {
if (!result.parseSuccess()) {
throw new RuntimeException("Error while parsing N1QL Result. Query: " + scanAllQuery
+ ", Errors: " + result.errors());
}
}
})
.flatMap(new Func1<AsyncN1qlQueryResult, Observable<AsyncN1qlQueryRow>>() {
@Override
public Observable<AsyncN1qlQueryRow> call(AsyncN1qlQueryResult result) {
return result.rows();
}
})
.flatMap(new Func1<AsyncN1qlQueryRow, Observable<RawJsonDocument>>() {
@Override
public Observable<RawJsonDocument> call(AsyncN1qlQueryRow row) {
String id = new String(row.byteValue()).trim();
return bucket.async().get(id.substring(1, id.length()-1), RawJsonDocument.class);
}
})
.map(new Func1<RawJsonDocument, HashMap<String, ByteIterator>>() {
@Override
public HashMap<String, ByteIterator> call(RawJsonDocument document) {
HashMap<String, ByteIterator> tuple = new HashMap<String, ByteIterator>();
decode(document.content(), null, tuple);
return tuple;
}
})
.toBlocking()
.forEach(new Action1<HashMap<String, ByteIterator>>() {
@Override
public void call(HashMap<String, ByteIterator> tuple) {
data.add(tuple);
}
});
result.addAll(data);
return Status.OK;
}
/**
* Performs the {@link #scan(String, String, int, Set, Vector)} operation N1Ql only for a subset of the fields.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return The result of the operation.
*/
private Status scanSpecificFields(final String table, final String startkey, final int recordcount,
final Set<String> fields, final Vector<HashMap<String, ByteIterator>> result) {
String scanSpecQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName
+ "` WHERE meta().id >= '$1' LIMIT $2";
N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
scanSpecQuery,
JsonArray.from(formatId(table, startkey), recordcount),
N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
));
if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
throw new RuntimeException("Error while parsing N1QL Result. Query: " + scanSpecQuery
+ ", Errors: " + queryResult.errors());
}
boolean allFields = fields == null || fields.isEmpty();
result.ensureCapacity(recordcount);
for (N1qlQueryRow row : queryResult) {
JsonObject value = row.value();
if (fields == null) {
value = value.getObject(bucketName);
}
Set<String> f = allFields ? value.getNames() : fields;
HashMap<String, ByteIterator> tuple = new HashMap<String, ByteIterator>(f.size());
for (String field : f) {
tuple.put(field, new StringByteIterator(value.getString(field)));
}
result.add(tuple);
}
return Status.OK;
}
/**
* Helper method to block on the response, depending on the property set.
*
* By default, since YCSB is sync the code will always wait for the operation to complete. In some
* cases it can be useful to just "drive load" and disable the waiting. Note that when the
* "-p couchbase.syncMutationResponse=false" option is used, the measured results by YCSB can basically
* be thrown away. Still helpful sometimes during load phases to speed them up :)
*
* @param input the async input observable.
*/
private void waitForMutationResponse(final Observable<? extends Document<?>> input) {
if (!syncMutResponse) {
((Observable<Document<?>>)input).subscribe(new Subscriber<Document<?>>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(Document<?> document) {
}
});
} else {
Blocking.blockForSingle(input, kvTimeout, TimeUnit.MILLISECONDS);
}
}
/**
* Helper method to turn the values into a String, used with {@link #upsertN1ql(String, Map)}.
*
* @param values the values to encode.
* @return the encoded string.
*/
private static String encodeN1qlFields(final Map<String, ByteIterator> values) {
if (values.isEmpty()) {
return "";
}
StringBuilder sb = new StringBuilder();
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
String raw = entry.getValue().toString();
String escaped = raw.replace("\"", "\\\"").replace("\'", "\\\'");
sb.append(entry.getKey()).append("=\"").append(escaped).append("\" ");
}
String toReturn = sb.toString();
return toReturn.substring(0, toReturn.length() - 1);
}
/**
* Helper method to turn the map of values into a {@link JsonObject} for further use.
*
* @param values the values to transform.
* @return the created json object.
*/
private static JsonObject valuesToJsonObject(final Map<String, ByteIterator> values) {
JsonObject result = JsonObject.create();
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
result.put(entry.getKey(), entry.getValue().toString());
}
return result;
}
/**
* Helper method to join the set of fields into a String suitable for N1QL.
*
* @param fields the fields to join.
* @return the joined fields as a String.
*/
private static String joinFields(final Set<String> fields) {
if (fields == null || fields.isEmpty()) {
return "*";
}
StringBuilder builder = new StringBuilder();
for (String f : fields) {
builder.append("`").append(f).append("`").append(",");
}
String toReturn = builder.toString();
return toReturn.substring(0, toReturn.length() - 1);
}
/**
* Helper method to turn the prefix and key into a proper document ID.
*
* @param prefix the prefix (table).
* @param key the key itself.
* @return a document ID that can be used with Couchbase.
*/
private static String formatId(final String prefix, final String key) {
return prefix + SEPARATOR + key;
}
/**
* Helper method to parse the "ReplicateTo" property on startup.
*
* @param property the proeprty to parse.
* @return the parsed setting.
*/
private static ReplicateTo parseReplicateTo(final String property) throws DBException {
int value = Integer.parseInt(property);
switch (value) {
case 0:
return ReplicateTo.NONE;
case 1:
return ReplicateTo.ONE;
case 2:
return ReplicateTo.TWO;
case 3:
return ReplicateTo.THREE;
default:
throw new DBException("\"couchbase.replicateTo\" must be between 0 and 3");
}
}
/**
* Helper method to parse the "PersistTo" property on startup.
*
* @param property the proeprty to parse.
* @return the parsed setting.
*/
private static PersistTo parsePersistTo(final String property) throws DBException {
int value = Integer.parseInt(property);
switch (value) {
case 0:
return PersistTo.NONE;
case 1:
return PersistTo.ONE;
case 2:
return PersistTo.TWO;
case 3:
return PersistTo.THREE;
case 4:
return PersistTo.FOUR;
default:
throw new DBException("\"couchbase.persistTo\" must be between 0 and 4");
}
}
/**
* Decode the String from server and pass it into the decoded destination.
*
* @param source the loaded object.
* @param fields the fields to check.
* @param dest the result passed back to YCSB.
*/
private void decode(final String source, final Set<String> fields,
final Map<String, ByteIterator> dest) {
try {
JsonNode json = JacksonTransformers.MAPPER.readTree(source);
boolean checkFields = fields != null && !fields.isEmpty();
for (Iterator<Map.Entry<String, JsonNode>> jsonFields = json.fields(); jsonFields.hasNext();) {
Map.Entry<String, JsonNode> jsonField = jsonFields.next();
String name = jsonField.getKey();
if (checkFields && !fields.contains(name)) {
continue;
}
JsonNode jsonValue = jsonField.getValue();
if (jsonValue != null && !jsonValue.isNull()) {
dest.put(name, new StringByteIterator(jsonValue.asText()));
}
}
} catch (Exception e) {
throw new RuntimeException("Could not decode JSON");
}
}
/**
* Encode the source into a String for storage.
*
* @param source the source value.
* @return the encoded string.
*/
private String encode(final Map<String, ByteIterator> source) {
Map<String, String> stringMap = StringByteIterator.getStringMap(source);
ObjectNode node = JacksonTransformers.MAPPER.createObjectNode();
for (Map.Entry<String, String> pair : stringMap.entrySet()) {
node.put(pair.getKey(), pair.getValue());
}
JsonFactory jsonFactory = new JsonFactory();
Writer writer = new StringWriter();
try {
JsonGenerator jsonGenerator = jsonFactory.createGenerator(writer);
JacksonTransformers.MAPPER.writeTree(jsonGenerator, node);
} catch (Exception e) {
throw new RuntimeException("Could not encode JSON value");
}
return writer.toString();
}
}
/**
* Factory for the {@link BackoffSelectStrategy} to be used with boosting.
*/
class BackoffSelectStrategyFactory implements SelectStrategyFactory {
@Override
public SelectStrategy newSelectStrategy() {
return new BackoffSelectStrategy();
}
}
/**
* Custom IO select strategy which trades CPU for throughput, used with the boost setting.
*/
class BackoffSelectStrategy implements SelectStrategy {
private int counter = 0;
@Override
public int calculateStrategy(final IntSupplier supplier, final boolean hasTasks) throws Exception {
int selectNowResult = supplier.get();
if (hasTasks || selectNowResult != 0) {
counter = 0;
return selectNowResult;
}
counter++;
if (counter > 2000) {
LockSupport.parkNanos(1);
} else if (counter > 3000) {
Thread.yield();
} else if (counter > 4000) {
LockSupport.parkNanos(1000);
} else if (counter > 5000) {
// defer to blocking select
counter = 0;
return SelectStrategy.SELECT;
}
return SelectStrategy.CONTINUE;
}
}
| 35,589 | 36.821467 | 115 | java |
null | NearPMSW-main/baseline/logging/YCSB/riak/src/test/java/site/ycsb/db/riak/RiakKVClientTest.java | /**
* Copyright (c) 2016 YCSB contributors All rights reserved.
* Copyright 2014 Basho Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.riak;
import java.util.*;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assume.assumeNoException;
import static org.junit.Assume.assumeThat;
/**
* Integration tests for the Riak KV client.
*/
public class RiakKVClientTest {
private static RiakKVClient riakClient;
private static final String bucket = "testBucket";
private static final String keyPrefix = "testKey";
private static final int recordsToInsert = 20;
private static final int recordsToScan = 7;
private static final String firstField = "Key number";
private static final String secondField = "Key number doubled";
private static final String thirdField = "Key number square";
private static boolean testStarted = false;
/**
* Creates a cluster for testing purposes.
*/
@BeforeClass
public static void setUpClass() throws Exception {
riakClient = new RiakKVClient();
riakClient.init();
// Set the test bucket environment with the appropriate parameters.
try {
riakClient.setTestEnvironment(bucket);
} catch(Exception e) {
assumeNoException("Unable to configure Riak KV for test, aborting.", e);
}
// Just add some records to work on...
for (int i = 0; i < recordsToInsert; i++) {
// Abort the entire test whenever the dataset population operation fails.
assumeThat("Riak KV is NOT RUNNING, aborting test.",
riakClient.insert(bucket, keyPrefix + String.valueOf(i), StringByteIterator.getByteIteratorMap(
createExpectedHashMap(i))),
is(Status.OK));
}
// Variable to check to determine whether the test has started or not.
testStarted = true;
}
/**
* Shuts down the cluster created.
*/
@AfterClass
public static void tearDownClass() throws Exception {
// Delete all added keys before cleanup ONLY IF TEST ACTUALLY STARTED.
if (testStarted) {
for (int i = 0; i <= recordsToInsert; i++) {
delete(keyPrefix + Integer.toString(i));
}
}
riakClient.cleanup();
}
/**
* Test method for read transaction. It is designed to read two of the three fields stored for each key, to also test
* if the createResultHashMap() function implemented in RiakKVClient.java works as expected.
*/
@Test
public void testRead() {
// Choose a random key to read, among the available ones.
int readKeyNumber = new Random().nextInt(recordsToInsert);
// Prepare two fields to read.
Set<String> fields = new HashSet<>();
fields.add(firstField);
fields.add(thirdField);
// Prepare an expected result.
HashMap<String, String> expectedValue = new HashMap<>();
expectedValue.put(firstField, Integer.toString(readKeyNumber));
expectedValue.put(thirdField, Integer.toString(readKeyNumber * readKeyNumber));
// Define a HashMap to store the actual result.
HashMap<String, ByteIterator> readValue = new HashMap<>();
// If a read transaction has been properly done, then one has to receive a Status.OK return from the read()
// function. Moreover, the actual returned result MUST match the expected one.
assertEquals("Read transaction FAILED.",
Status.OK,
riakClient.read(bucket, keyPrefix + Integer.toString(readKeyNumber), fields, readValue));
assertEquals("Read test FAILED. Actual read transaction value is NOT MATCHING the expected one.",
expectedValue.toString(),
readValue.toString());
}
/**
* Test method for scan transaction. A scan transaction has to be considered successfully completed only if all the
* requested values are read (i.e. scan transaction returns with Status.OK). Moreover, one has to check if the
* obtained results match the expected ones.
*/
@Test
public void testScan() {
// Choose, among the available ones, a random key as starting point for the scan transaction.
int startScanKeyNumber = new Random().nextInt(recordsToInsert - recordsToScan);
// Prepare a HashMap vector to store the scan transaction results.
Vector<HashMap<String, ByteIterator>> scannedValues = new Vector<>();
// Check whether the scan transaction is correctly performed or not.
assertEquals("Scan transaction FAILED.",
Status.OK,
riakClient.scan(bucket, keyPrefix + Integer.toString(startScanKeyNumber), recordsToScan, null,
scannedValues));
// After the scan transaction completes, compare the obtained results with the expected ones.
for (int i = 0; i < recordsToScan; i++) {
assertEquals("Scan test FAILED: the current scanned key is NOT MATCHING the expected one.",
createExpectedHashMap(startScanKeyNumber + i).toString(),
scannedValues.get(i).toString());
}
}
/**
* Test method for update transaction. The test is designed to restore the previously read key. It is assumed to be
* correct when, after performing the update transaction, one reads the just provided values.
*/
@Test
public void testUpdate() {
// Choose a random key to read, among the available ones.
int updateKeyNumber = new Random().nextInt(recordsToInsert);
// Define a HashMap to save the previously stored values for eventually restoring them.
HashMap<String, ByteIterator> readValueBeforeUpdate = new HashMap<>();
riakClient.read(bucket, keyPrefix + Integer.toString(updateKeyNumber), null, readValueBeforeUpdate);
// Prepare an update HashMap to store.
HashMap<String, String> updateValue = new HashMap<>();
updateValue.put(firstField, "UPDATED");
updateValue.put(secondField, "UPDATED");
updateValue.put(thirdField, "UPDATED");
// First of all, perform the update and check whether it's failed or not.
assertEquals("Update transaction FAILED.",
Status.OK,
riakClient.update(bucket, keyPrefix + Integer.toString(updateKeyNumber), StringByteIterator
.getByteIteratorMap(updateValue)));
// Then, read the key again and...
HashMap<String, ByteIterator> readValueAfterUpdate = new HashMap<>();
assertEquals("Update test FAILED. Unable to read key value.",
Status.OK,
riakClient.read(bucket, keyPrefix + Integer.toString(updateKeyNumber), null, readValueAfterUpdate));
// ...compare the result with the new one!
assertEquals("Update transaction NOT EXECUTED PROPERLY. Values DID NOT CHANGE.",
updateValue.toString(),
readValueAfterUpdate.toString());
// Finally, restore the previously read key.
assertEquals("Update test FAILED. Unable to restore previous key value.",
Status.OK,
riakClient.update(bucket, keyPrefix + Integer.toString(updateKeyNumber), readValueBeforeUpdate));
}
/**
* Test method for insert transaction. It is designed to insert a key just after the last key inserted in the setUp()
* phase.
*/
@Test
public void testInsert() {
// Define a HashMap to insert and another one for the comparison operation.
HashMap<String, String> insertValue = createExpectedHashMap(recordsToInsert);
HashMap<String, ByteIterator> readValue = new HashMap<>();
// Check whether the insertion transaction was performed or not.
assertEquals("Insert transaction FAILED.",
Status.OK,
riakClient.insert(bucket, keyPrefix + Integer.toString(recordsToInsert), StringByteIterator.
getByteIteratorMap(insertValue)));
// Finally, compare the insertion performed with the one expected by reading the key.
assertEquals("Insert test FAILED. Unable to read inserted value.",
Status.OK,
riakClient.read(bucket, keyPrefix + Integer.toString(recordsToInsert), null, readValue));
assertEquals("Insert test FAILED. Actual read transaction value is NOT MATCHING the inserted one.",
insertValue.toString(),
readValue.toString());
}
/**
* Test method for delete transaction. The test deletes a key, then performs a read that should give a
* Status.NOT_FOUND response. Finally, it restores the previously read key.
*/
@Test
public void testDelete() {
// Choose a random key to delete, among the available ones.
int deleteKeyNumber = new Random().nextInt(recordsToInsert);
// Define a HashMap to save the previously stored values for its eventual restore.
HashMap<String, ByteIterator> readValueBeforeDelete = new HashMap<>();
riakClient.read(bucket, keyPrefix + Integer.toString(deleteKeyNumber), null, readValueBeforeDelete);
// First of all, delete the key.
assertEquals("Delete transaction FAILED.",
Status.OK,
delete(keyPrefix + Integer.toString(deleteKeyNumber)));
// Then, check if the deletion was actually achieved.
assertEquals("Delete test FAILED. Key NOT deleted.",
Status.NOT_FOUND,
riakClient.read(bucket, keyPrefix + Integer.toString(deleteKeyNumber), null, null));
// Finally, restore the previously deleted key.
assertEquals("Delete test FAILED. Unable to restore previous key value.",
Status.OK,
riakClient.insert(bucket, keyPrefix + Integer.toString(deleteKeyNumber), readValueBeforeDelete));
}
private static Status delete(String key) {
return riakClient.delete(bucket, key);
}
private static HashMap<String, String> createExpectedHashMap(int value) {
HashMap<String, String> values = new HashMap<>();
values.put(firstField, Integer.toString(value));
values.put(secondField, Integer.toString(2 * value));
values.put(thirdField, Integer.toString(value * value));
return values;
}
}
| 10,509 | 38.660377 | 119 | java |
null | NearPMSW-main/baseline/logging/YCSB/riak/src/main/java/site/ycsb/db/riak/package-info.java | /**
* Copyright (c) 2016 YCSB contributors All rights reserved.
* Copyright 2014 Basho Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="http://basho.com/products/riak-kv/">Riak KV</a> 2.x.y.
*
*/
package site.ycsb.db.riak;
| 827 | 33.5 | 87 | java |
null | NearPMSW-main/baseline/logging/YCSB/riak/src/main/java/site/ycsb/db/riak/RiakKVClient.java | /**
* Copyright (c) 2016 YCSB contributors All rights reserved.
* Copyright 2014 Basho Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.riak;
import com.basho.riak.client.api.commands.buckets.StoreBucketProperties;
import com.basho.riak.client.api.commands.kv.StoreValue;
import com.basho.riak.client.api.commands.kv.UpdateValue;
import com.basho.riak.client.core.RiakFuture;
import com.basho.riak.client.core.query.RiakObject;
import com.basho.riak.client.core.query.indexes.LongIntIndex;
import com.basho.riak.client.core.util.BinaryValue;
import site.ycsb.*;
import java.io.IOException;
import java.io.InputStream;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import com.basho.riak.client.api.RiakClient;
import com.basho.riak.client.api.cap.Quorum;
import com.basho.riak.client.api.commands.indexes.IntIndexQuery;
import com.basho.riak.client.api.commands.kv.DeleteValue;
import com.basho.riak.client.api.commands.kv.FetchValue;
import com.basho.riak.client.core.RiakCluster;
import com.basho.riak.client.core.RiakNode;
import com.basho.riak.client.core.query.Location;
import com.basho.riak.client.core.query.Namespace;
import static site.ycsb.db.riak.RiakUtils.createResultHashMap;
import static site.ycsb.db.riak.RiakUtils.getKeyAsLong;
import static site.ycsb.db.riak.RiakUtils.serializeTable;
/**
* Riak KV 2.x.y client for YCSB framework.
*
*/
public class RiakKVClient extends DB {
private static final String HOST_PROPERTY = "riak.hosts";
private static final String PORT_PROPERTY = "riak.port";
private static final String BUCKET_TYPE_PROPERTY = "riak.bucket_type";
private static final String R_VALUE_PROPERTY = "riak.r_val";
private static final String W_VALUE_PROPERTY = "riak.w_val";
private static final String READ_RETRY_COUNT_PROPERTY = "riak.read_retry_count";
private static final String WAIT_TIME_BEFORE_RETRY_PROPERTY = "riak.wait_time_before_retry";
private static final String TRANSACTION_TIME_LIMIT_PROPERTY = "riak.transaction_time_limit";
private static final String STRONG_CONSISTENCY_PROPERTY = "riak.strong_consistency";
private static final String STRONG_CONSISTENT_SCANS_BUCKET_TYPE_PROPERTY = "riak.strong_consistent_scans_bucket_type";
private static final String DEBUG_PROPERTY = "riak.debug";
private static final Status TIME_OUT = new Status("TIME_OUT", "Cluster didn't respond after maximum wait time.");
private String[] hosts;
private int port;
private String bucketType;
private String bucketType2i;
private Quorum rvalue;
private Quorum wvalue;
private int readRetryCount;
private int waitTimeBeforeRetry;
private int transactionTimeLimit;
private boolean strongConsistency;
private String strongConsistentScansBucketType;
private boolean performStrongConsistentScans;
private boolean debug;
private RiakClient riakClient;
private RiakCluster riakCluster;
private void loadDefaultProperties() {
InputStream propFile = RiakKVClient.class.getClassLoader().getResourceAsStream("riak.properties");
Properties propsPF = new Properties(System.getProperties());
try {
propsPF.load(propFile);
} catch (IOException e) {
e.printStackTrace();
}
hosts = propsPF.getProperty(HOST_PROPERTY).split(",");
port = Integer.parseInt(propsPF.getProperty(PORT_PROPERTY));
bucketType = propsPF.getProperty(BUCKET_TYPE_PROPERTY);
rvalue = new Quorum(Integer.parseInt(propsPF.getProperty(R_VALUE_PROPERTY)));
wvalue = new Quorum(Integer.parseInt(propsPF.getProperty(W_VALUE_PROPERTY)));
readRetryCount = Integer.parseInt(propsPF.getProperty(READ_RETRY_COUNT_PROPERTY));
waitTimeBeforeRetry = Integer.parseInt(propsPF.getProperty(WAIT_TIME_BEFORE_RETRY_PROPERTY));
transactionTimeLimit = Integer.parseInt(propsPF.getProperty(TRANSACTION_TIME_LIMIT_PROPERTY));
strongConsistency = Boolean.parseBoolean(propsPF.getProperty(STRONG_CONSISTENCY_PROPERTY));
strongConsistentScansBucketType = propsPF.getProperty(STRONG_CONSISTENT_SCANS_BUCKET_TYPE_PROPERTY);
debug = Boolean.parseBoolean(propsPF.getProperty(DEBUG_PROPERTY));
}
private void loadProperties() {
// First, load the default properties...
loadDefaultProperties();
// ...then, check for some props set at command line!
Properties props = getProperties();
String portString = props.getProperty(PORT_PROPERTY);
if (portString != null) {
port = Integer.parseInt(portString);
}
String hostsString = props.getProperty(HOST_PROPERTY);
if (hostsString != null) {
hosts = hostsString.split(",");
}
String bucketTypeString = props.getProperty(BUCKET_TYPE_PROPERTY);
if (bucketTypeString != null) {
bucketType = bucketTypeString;
}
String rValueString = props.getProperty(R_VALUE_PROPERTY);
if (rValueString != null) {
rvalue = new Quorum(Integer.parseInt(rValueString));
}
String wValueString = props.getProperty(W_VALUE_PROPERTY);
if (wValueString != null) {
wvalue = new Quorum(Integer.parseInt(wValueString));
}
String readRetryCountString = props.getProperty(READ_RETRY_COUNT_PROPERTY);
if (readRetryCountString != null) {
readRetryCount = Integer.parseInt(readRetryCountString);
}
String waitTimeBeforeRetryString = props.getProperty(WAIT_TIME_BEFORE_RETRY_PROPERTY);
if (waitTimeBeforeRetryString != null) {
waitTimeBeforeRetry = Integer.parseInt(waitTimeBeforeRetryString);
}
String transactionTimeLimitString = props.getProperty(TRANSACTION_TIME_LIMIT_PROPERTY);
if (transactionTimeLimitString != null) {
transactionTimeLimit = Integer.parseInt(transactionTimeLimitString);
}
String strongConsistencyString = props.getProperty(STRONG_CONSISTENCY_PROPERTY);
if (strongConsistencyString != null) {
strongConsistency = Boolean.parseBoolean(strongConsistencyString);
}
String strongConsistentScansBucketTypeString = props.getProperty(STRONG_CONSISTENT_SCANS_BUCKET_TYPE_PROPERTY);
if (strongConsistentScansBucketTypeString != null) {
strongConsistentScansBucketType = strongConsistentScansBucketTypeString;
}
String debugString = props.getProperty(DEBUG_PROPERTY);
if (debugString != null) {
debug = Boolean.parseBoolean(debugString);
}
}
public void init() throws DBException {
loadProperties();
RiakNode.Builder builder = new RiakNode.Builder().withRemotePort(port);
List<RiakNode> nodes = RiakNode.Builder.buildNodes(builder, Arrays.asList(hosts));
riakCluster = new RiakCluster.Builder(nodes).build();
try {
riakCluster.start();
riakClient = new RiakClient(riakCluster);
} catch (Exception e) {
System.err.println("Unable to properly start up the cluster. Reason: " + e.toString());
throw new DBException(e);
}
// If strong consistency is in use, we need to change the bucket-type where the 2i indexes will be stored.
if (strongConsistency && !strongConsistentScansBucketType.isEmpty()) {
// The 2i indexes have to be stored in the appositely created strongConsistentScansBucketType: this however has
// to be done only if the user actually created it! So, if the latter doesn't exist, then the scan transactions
// will not be performed at all.
bucketType2i = strongConsistentScansBucketType;
performStrongConsistentScans = true;
} else {
// If instead eventual consistency is in use, then the 2i indexes have to be stored in the bucket-type
// indicated with the bucketType variable.
bucketType2i = bucketType;
performStrongConsistentScans = false;
}
if (debug) {
System.err.println("DEBUG ENABLED. Configuration parameters:");
System.err.println("-----------------------------------------");
System.err.println("Hosts: " + Arrays.toString(hosts));
System.err.println("Port: " + port);
System.err.println("Bucket Type: " + bucketType);
System.err.println("R Val: " + rvalue.toString());
System.err.println("W Val: " + wvalue.toString());
System.err.println("Read Retry Count: " + readRetryCount);
System.err.println("Wait Time Before Retry: " + waitTimeBeforeRetry + " ms");
System.err.println("Transaction Time Limit: " + transactionTimeLimit + " s");
System.err.println("Consistency model: " + (strongConsistency ? "Strong" : "Eventual"));
if (strongConsistency) {
System.err.println("Strong Consistent Scan Transactions " + (performStrongConsistentScans ? "" : "NOT ") +
"allowed.");
}
}
}
/**
* Read a record from the database. Each field/value pair from the result will be stored in a HashMap.
*
* @param table The name of the table (Riak bucket)
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
Location location = new Location(new Namespace(bucketType, table), key);
FetchValue fv = new FetchValue.Builder(location).withOption(FetchValue.Option.R, rvalue).build();
FetchValue.Response response;
try {
response = fetch(fv);
if (response.isNotFound()) {
if (debug) {
System.err.println("Unable to read key " + key + ". Reason: NOT FOUND");
}
return Status.NOT_FOUND;
}
} catch (TimeoutException e) {
if (debug) {
System.err.println("Unable to read key " + key + ". Reason: TIME OUT");
}
return TIME_OUT;
} catch (Exception e) {
if (debug) {
System.err.println("Unable to read key " + key + ". Reason: " + e.toString());
}
return Status.ERROR;
}
// Create the result HashMap.
HashMap<String, ByteIterator> partialResult = new HashMap<>();
createResultHashMap(fields, response, partialResult);
result.putAll(partialResult);
return Status.OK;
}
/**
* Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored in
* a HashMap.
* Note: The scan operation requires the use of secondary indexes (2i) and LevelDB.
*
* @param table The name of the table (Riak bucket)
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
if (strongConsistency && !performStrongConsistentScans) {
return Status.NOT_IMPLEMENTED;
}
// The strong consistent bucket-type is not capable of storing 2i indexes. So, we need to read them from the fake
// one (which we use only to store indexes). This is why, when using such a consistency model, the bucketType2i
// variable is set to FAKE_BUCKET_TYPE.
IntIndexQuery iiq = new IntIndexQuery
.Builder(new Namespace(bucketType2i, table), "key", getKeyAsLong(startkey), Long.MAX_VALUE)
.withMaxResults(recordcount)
.withPaginationSort(true)
.build();
Location location;
RiakFuture<IntIndexQuery.Response, IntIndexQuery> future = riakClient.executeAsync(iiq);
try {
IntIndexQuery.Response response = future.get(transactionTimeLimit, TimeUnit.SECONDS);
List<IntIndexQuery.Response.Entry> entries = response.getEntries();
// If no entries were retrieved, then something bad happened...
if (entries.size() == 0) {
if (debug) {
System.err.println("Unable to scan any record starting from key " + startkey + ", aborting transaction. " +
"Reason: NOT FOUND");
}
return Status.NOT_FOUND;
}
for (IntIndexQuery.Response.Entry entry : entries) {
// If strong consistency is in use, then the actual location of the object we want to read is obtained by
// fetching the key from the one retrieved with the 2i indexes search operation.
if (strongConsistency) {
location = new Location(new Namespace(bucketType, table), entry.getRiakObjectLocation().getKeyAsString());
} else {
location = entry.getRiakObjectLocation();
}
FetchValue fv = new FetchValue.Builder(location)
.withOption(FetchValue.Option.R, rvalue)
.build();
FetchValue.Response keyResponse = fetch(fv);
if (keyResponse.isNotFound()) {
if (debug) {
System.err.println("Unable to scan all requested records starting from key " + startkey + ", aborting " +
"transaction. Reason: NOT FOUND");
}
return Status.NOT_FOUND;
}
// Create the partial result to add to the result vector.
HashMap<String, ByteIterator> partialResult = new HashMap<>();
createResultHashMap(fields, keyResponse, partialResult);
result.add(partialResult);
}
} catch (TimeoutException e) {
if (debug) {
System.err.println("Unable to scan all requested records starting from key " + startkey + ", aborting " +
"transaction. Reason: TIME OUT");
}
return TIME_OUT;
} catch (Exception e) {
if (debug) {
System.err.println("Unable to scan all records starting from key " + startkey + ", aborting transaction. " +
"Reason: " + e.toString());
}
return Status.ERROR;
}
return Status.OK;
}
/**
* Tries to perform a read and, whenever it fails, retries to do it. It actually does try as many time as indicated,
* even if the function riakClient.execute(fv) throws an exception. This is needed for those situation in which the
* cluster is unable to respond properly due to overload. Note however that if the cluster doesn't respond after
* transactionTimeLimit, the transaction is discarded immediately.
*
* @param fv The value to fetch from the cluster.
*/
private FetchValue.Response fetch(FetchValue fv) throws TimeoutException {
FetchValue.Response response = null;
for (int i = 0; i < readRetryCount; i++) {
RiakFuture<FetchValue.Response, Location> future = riakClient.executeAsync(fv);
try {
response = future.get(transactionTimeLimit, TimeUnit.SECONDS);
if (!response.isNotFound()) {
break;
}
} catch (TimeoutException e) {
// Let the callee decide how to handle this exception...
throw new TimeoutException();
} catch (Exception e) {
// Sleep for a few ms before retrying...
try {
Thread.sleep(waitTimeBeforeRetry);
} catch (InterruptedException e1) {
e1.printStackTrace();
}
}
}
return response;
}
/**
* Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key. Also creates a secondary index (2i) for each record consisting of the key
* converted to long to be used for the scan operation.
*
* @param table The name of the table (Riak bucket)
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
Location location = new Location(new Namespace(bucketType, table), key);
RiakObject object = new RiakObject();
// Strong consistency doesn't support secondary indexing, but eventually consistent model does. So, we can mock a
// 2i usage by creating a fake object stored in an eventually consistent bucket-type with the SAME KEY THAT THE
// ACTUAL OBJECT HAS. This latter is obviously stored in the strong consistent bucket-type indicated with the
// riak.bucket_type property.
if (strongConsistency && performStrongConsistentScans) {
// Create a fake object to store in the default bucket-type just to keep track of the 2i indices.
Location fakeLocation = new Location(new Namespace(strongConsistentScansBucketType, table), key);
// Obviously, we want the fake object to contain as less data as possible. We can't create a void object, so
// we have to choose the minimum data size allowed: it is one byte.
RiakObject fakeObject = new RiakObject();
fakeObject.setValue(BinaryValue.create(new byte[]{0x00}));
fakeObject.getIndexes().getIndex(LongIntIndex.named("key_int")).add(getKeyAsLong(key));
StoreValue fakeStore = new StoreValue.Builder(fakeObject)
.withLocation(fakeLocation)
.build();
// We don't mind whether the operation is finished or not, because waiting for it to complete would slow down the
// client and make our solution too heavy to be seen as a valid compromise. This will obviously mean that under
// heavy load conditions a scan operation could fail due to an unfinished "fakeStore".
riakClient.executeAsync(fakeStore);
} else if (!strongConsistency) {
// The next operation is useless when using strong consistency model, so it's ok to perform it only when using
// eventual consistency.
object.getIndexes().getIndex(LongIntIndex.named("key_int")).add(getKeyAsLong(key));
}
// Store proper values into the object.
object.setValue(BinaryValue.create(serializeTable(values)));
StoreValue store = new StoreValue.Builder(object)
.withOption(StoreValue.Option.W, wvalue)
.withLocation(location)
.build();
RiakFuture<StoreValue.Response, Location> future = riakClient.executeAsync(store);
try {
future.get(transactionTimeLimit, TimeUnit.SECONDS);
} catch (TimeoutException e) {
if (debug) {
System.err.println("Unable to insert key " + key + ". Reason: TIME OUT");
}
return TIME_OUT;
} catch (Exception e) {
if (debug) {
System.err.println("Unable to insert key " + key + ". Reason: " + e.toString());
}
return Status.ERROR;
}
return Status.OK;
}
/**
* Auxiliary class needed for object substitution within the update operation. It is a fundamental part of the
* fetch-update (locally)-store cycle described by Basho to properly perform a strong-consistent update.
*/
private static final class UpdateEntity extends UpdateValue.Update<RiakObject> {
private final RiakObject object;
private UpdateEntity(RiakObject object) {
this.object = object;
}
//Simply returns the object.
@Override
public RiakObject apply(RiakObject original) {
return object;
}
}
/**
* Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key, overwriting any existing values with the same field name.
*
* @param table The name of the table (Riak bucket)
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
// If eventual consistency model is in use, then an update operation is pratically equivalent to an insert one.
if (!strongConsistency) {
return insert(table, key, values);
}
Location location = new Location(new Namespace(bucketType, table), key);
UpdateValue update = new UpdateValue.Builder(location)
.withUpdate(new UpdateEntity(new RiakObject().setValue(BinaryValue.create(serializeTable(values)))))
.build();
RiakFuture<UpdateValue.Response, Location> future = riakClient.executeAsync(update);
try {
// For some reason, the update transaction doesn't throw any exception when no cluster has been started, so one
// needs to check whether it was done or not. When calling the wasUpdated() function with no nodes available, a
// NullPointerException is thrown.
// Moreover, such exception could be thrown when more threads are trying to update the same key or, more
// generally, when the system is being queried by many clients (i.e. overloaded). This is a known limitation of
// Riak KV's strong consistency implementation.
future.get(transactionTimeLimit, TimeUnit.SECONDS).wasUpdated();
} catch (TimeoutException e) {
if (debug) {
System.err.println("Unable to update key " + key + ". Reason: TIME OUT");
}
return TIME_OUT;
} catch (Exception e) {
if (debug) {
System.err.println("Unable to update key " + key + ". Reason: " + e.toString());
}
return Status.ERROR;
}
return Status.OK;
}
/**
* Delete a record from the database.
*
* @param table The name of the table (Riak bucket)
* @param key The record key of the record to delete.
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status delete(String table, String key) {
Location location = new Location(new Namespace(bucketType, table), key);
DeleteValue dv = new DeleteValue.Builder(location).build();
RiakFuture<Void, Location> future = riakClient.executeAsync(dv);
try {
future.get(transactionTimeLimit, TimeUnit.SECONDS);
} catch (TimeoutException e) {
if (debug) {
System.err.println("Unable to delete key " + key + ". Reason: TIME OUT");
}
return TIME_OUT;
} catch (Exception e) {
if (debug) {
System.err.println("Unable to delete key " + key + ". Reason: " + e.toString());
}
return Status.ERROR;
}
return Status.OK;
}
public void cleanup() throws DBException {
try {
riakCluster.shutdown();
} catch (Exception e) {
System.err.println("Unable to properly shutdown the cluster. Reason: " + e.toString());
throw new DBException(e);
}
}
/**
* Auxiliary function needed for testing. It configures the default bucket-type to take care of the consistency
* problem by disallowing the siblings creation. Moreover, it disables strong consistency, because we don't have
* the possibility to create a proper bucket-type to use to fake 2i indexes usage.
*
* @param bucket The bucket name.
* @throws Exception Thrown if something bad happens.
*/
void setTestEnvironment(String bucket) throws Exception {
bucketType = "default";
bucketType2i = bucketType;
strongConsistency = false;
Namespace ns = new Namespace(bucketType, bucket);
StoreBucketProperties newBucketProperties = new StoreBucketProperties.Builder(ns).withAllowMulti(false).build();
riakClient.execute(newBucketProperties);
}
}
| 23,996 | 39.263423 | 120 | java |
null | NearPMSW-main/baseline/logging/YCSB/riak/src/main/java/site/ycsb/db/riak/RiakUtils.java | /**
* Copyright (c) 2016 YCSB contributors All rights reserved.
* Copyright 2014 Basho Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.riak;
import java.io.*;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import com.basho.riak.client.api.commands.kv.FetchValue;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import static com.google.common.base.Preconditions.checkArgument;
/**
* Utility class for Riak KV Client.
*
*/
final class RiakUtils {
private RiakUtils() {
super();
}
private static byte[] toBytes(final int anInteger) {
byte[] aResult = new byte[4];
aResult[0] = (byte) (anInteger >> 24);
aResult[1] = (byte) (anInteger >> 16);
aResult[2] = (byte) (anInteger >> 8);
aResult[3] = (byte) (anInteger /* >> 0 */);
return aResult;
}
private static int fromBytes(final byte[] aByteArray) {
checkArgument(aByteArray.length == 4);
return (aByteArray[0] << 24) | (aByteArray[1] & 0xFF) << 16 | (aByteArray[2] & 0xFF) << 8 | (aByteArray[3] & 0xFF);
}
private static void close(final OutputStream anOutputStream) {
try {
anOutputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
private static void close(final InputStream anInputStream) {
try {
anInputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* Serializes a Map, transforming the contained list of (String, ByteIterator) couples into a byte array.
*
* @param aTable A Map to serialize.
* @return A byte array containng the serialized table.
*/
static byte[] serializeTable(Map<String, ByteIterator> aTable) {
final ByteArrayOutputStream anOutputStream = new ByteArrayOutputStream();
final Set<Map.Entry<String, ByteIterator>> theEntries = aTable.entrySet();
try {
for (final Map.Entry<String, ByteIterator> anEntry : theEntries) {
final byte[] aColumnName = anEntry.getKey().getBytes();
anOutputStream.write(toBytes(aColumnName.length));
anOutputStream.write(aColumnName);
final byte[] aColumnValue = anEntry.getValue().toArray();
anOutputStream.write(toBytes(aColumnValue.length));
anOutputStream.write(aColumnValue);
}
return anOutputStream.toByteArray();
} catch (IOException e) {
throw new IllegalStateException(e);
} finally {
close(anOutputStream);
}
}
/**
* Deserializes an input byte array, transforming it into a list of (String, ByteIterator) pairs (i.e. a Map).
*
* @param aValue A byte array containing the table to deserialize.
* @param theResult A Map containing the deserialized table.
*/
private static void deserializeTable(final byte[] aValue, final Map<String, ByteIterator> theResult) {
final ByteArrayInputStream anInputStream = new ByteArrayInputStream(aValue);
byte[] aSizeBuffer = new byte[4];
try {
while (anInputStream.available() > 0) {
anInputStream.read(aSizeBuffer);
final int aColumnNameLength = fromBytes(aSizeBuffer);
final byte[] aColumnNameBuffer = new byte[aColumnNameLength];
anInputStream.read(aColumnNameBuffer);
anInputStream.read(aSizeBuffer);
final int aColumnValueLength = fromBytes(aSizeBuffer);
final byte[] aColumnValue = new byte[aColumnValueLength];
anInputStream.read(aColumnValue);
theResult.put(new String(aColumnNameBuffer), new ByteArrayByteIterator(aColumnValue));
}
} catch (Exception e) {
throw new IllegalStateException(e);
} finally {
close(anInputStream);
}
}
/**
* Obtains a Long number from a key string. This will be the key used by Riak for all the transactions.
*
* @param key The key to convert from String to Long.
* @return A Long number parsed from the key String.
*/
static Long getKeyAsLong(String key) {
String keyString = key.replaceFirst("[a-zA-Z]*", "");
return Long.parseLong(keyString);
}
/**
* Function that retrieves all the fields searched within a read or scan operation and puts them in the result
* HashMap.
*
* @param fields The list of fields to read, or null for all of them.
* @param response A Vector of HashMaps, where each HashMap is a set field/value pairs for one record.
* @param resultHashMap The HashMap to return as result.
*/
static void createResultHashMap(Set<String> fields, FetchValue.Response response,
HashMap<String, ByteIterator>resultHashMap) {
// If everything went fine, then a result must be given. Such an object is a hash table containing the (field,
// value) pairs based on the requested fields. Note that in a read operation, ONLY ONE OBJECT IS RETRIEVED!
// The following line retrieves the previously serialized table which was store with an insert transaction.
byte[] responseFieldsAndValues = response.getValues().get(0).getValue().getValue();
// Deserialize the stored response table.
HashMap<String, ByteIterator> deserializedTable = new HashMap<>();
deserializeTable(responseFieldsAndValues, deserializedTable);
// If only specific fields are requested, then only these should be put in the result object!
if (fields != null) {
// Populate the HashMap to provide as result.
for (Object field : fields.toArray()) {
// Comparison between a requested field and the ones retrieved. If they're equal (i.e. the get() operation
// DOES NOT return a null value), then proceed to store the pair in the resultHashMap.
ByteIterator value = deserializedTable.get(field);
if (value != null) {
resultHashMap.put((String) field, value);
}
}
} else {
// If, instead, no field is specified, then all those retrieved must be provided as result.
for (String field : deserializedTable.keySet()) {
resultHashMap.put(field, deserializedTable.get(field));
}
}
}
}
| 6,643 | 34.153439 | 119 | java |
null | NearPMSW-main/baseline/logging/YCSB/infinispan/src/main/java/site/ycsb/db/package-info.java | /*
* Copyright (c) 2015-2016 YCSB Contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="http://infinispan.org/">Infinispan</a>.
*/
package site.ycsb.db;
| 767 | 32.391304 | 72 | java |
null | NearPMSW-main/baseline/logging/YCSB/infinispan/src/main/java/site/ycsb/db/RemoteCacheManagerHolder.java | /**
* Copyright (c) 2015-2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import java.util.Properties;
import org.infinispan.client.hotrod.RemoteCacheManager;
/**
* Utility class to ensure only a single RemoteCacheManager is created.
*/
final class RemoteCacheManagerHolder {
private static volatile RemoteCacheManager cacheManager = null;
private RemoteCacheManagerHolder() {
}
static RemoteCacheManager getInstance(Properties props) {
RemoteCacheManager result = cacheManager;
if (result == null) {
synchronized (RemoteCacheManagerHolder.class) {
result = cacheManager;
if (result == null) {
result = new RemoteCacheManager(props);
cacheManager = new RemoteCacheManager(props);
}
}
}
return result;
}
}
| 1,407 | 28.333333 | 71 | java |
null | NearPMSW-main/baseline/logging/YCSB/infinispan/src/main/java/site/ycsb/db/InfinispanClient.java | /**
* Copyright (c) 2012-2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import org.infinispan.Cache;
import org.infinispan.atomic.AtomicMap;
import org.infinispan.atomic.AtomicMapLookup;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
/**
* This is a client implementation for Infinispan 5.x.
*/
public class InfinispanClient extends DB {
private static final Log LOGGER = LogFactory.getLog(InfinispanClient.class);
// An optimisation for clustered mode
private final boolean clustered;
private EmbeddedCacheManager infinispanManager;
public InfinispanClient() {
clustered = Boolean.getBoolean("infinispan.clustered");
}
public void init() throws DBException {
try {
infinispanManager = new DefaultCacheManager("infinispan-config.xml");
} catch (IOException e) {
throw new DBException(e);
}
}
public void cleanup() {
infinispanManager.stop();
infinispanManager = null;
}
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
try {
Map<String, String> row;
if (clustered) {
row = AtomicMapLookup.getAtomicMap(infinispanManager.getCache(table), key, false);
} else {
Cache<String, Map<String, String>> cache = infinispanManager.getCache(table);
row = cache.get(key);
}
if (row != null) {
result.clear();
if (fields == null || fields.isEmpty()) {
StringByteIterator.putAllAsByteIterators(result, row);
} else {
for (String field : fields) {
result.put(field, new StringByteIterator(row.get(field)));
}
}
}
return Status.OK;
} catch (Exception e) {
LOGGER.error(e);
return Status.ERROR;
}
}
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
LOGGER.warn("Infinispan does not support scan semantics");
return Status.OK;
}
public Status update(String table, String key, Map<String, ByteIterator> values) {
try {
if (clustered) {
AtomicMap<String, String> row = AtomicMapLookup.getAtomicMap(infinispanManager.getCache(table), key);
StringByteIterator.putAllAsStrings(row, values);
} else {
Cache<String, Map<String, String>> cache = infinispanManager.getCache(table);
Map<String, String> row = cache.get(key);
if (row == null) {
row = StringByteIterator.getStringMap(values);
cache.put(key, row);
} else {
StringByteIterator.putAllAsStrings(row, values);
}
}
return Status.OK;
} catch (Exception e) {
LOGGER.error(e);
return Status.ERROR;
}
}
public Status insert(String table, String key, Map<String, ByteIterator> values) {
try {
if (clustered) {
AtomicMap<String, String> row = AtomicMapLookup.getAtomicMap(infinispanManager.getCache(table), key);
row.clear();
StringByteIterator.putAllAsStrings(row, values);
} else {
infinispanManager.getCache(table).put(key, values);
}
return Status.OK;
} catch (Exception e) {
LOGGER.error(e);
return Status.ERROR;
}
}
public Status delete(String table, String key) {
try {
if (clustered) {
AtomicMapLookup.removeAtomicMap(infinispanManager.getCache(table), key);
} else {
infinispanManager.getCache(table).remove(key);
}
return Status.OK;
} catch (Exception e) {
LOGGER.error(e);
return Status.ERROR;
}
}
}
| 4,646 | 29.175325 | 109 | java |
null | NearPMSW-main/baseline/logging/YCSB/infinispan/src/main/java/site/ycsb/db/InfinispanRemoteClient.java | /**
* Copyright (c) 2015-2016 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import site.ycsb.*;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
/**
* This is a client implementation for Infinispan 5.x in client-server mode.
*/
public class InfinispanRemoteClient extends DB {
private static final Log LOGGER = LogFactory.getLog(InfinispanRemoteClient.class);
private RemoteCacheManager remoteIspnManager;
private String cacheName = null;
@Override
public void init() throws DBException {
remoteIspnManager = RemoteCacheManagerHolder.getInstance(getProperties());
cacheName = getProperties().getProperty("cache");
}
@Override
public void cleanup() {
remoteIspnManager.stop();
remoteIspnManager = null;
}
@Override
public Status insert(String table, String recordKey, Map<String, ByteIterator> values) {
String compositKey = createKey(table, recordKey);
Map<String, String> stringValues = new HashMap<>();
StringByteIterator.putAllAsStrings(stringValues, values);
try {
cache().put(compositKey, stringValues);
return Status.OK;
} catch (Exception e) {
LOGGER.error(e);
return Status.ERROR;
}
}
@Override
public Status read(String table, String recordKey, Set<String> fields, Map<String, ByteIterator> result) {
String compositKey = createKey(table, recordKey);
try {
Map<String, String> values = cache().get(compositKey);
if (values == null || values.isEmpty()) {
return Status.NOT_FOUND;
}
if (fields == null) { //get all field/value pairs
StringByteIterator.putAllAsByteIterators(result, values);
} else {
for (String field : fields) {
String value = values.get(field);
if (value != null) {
result.put(field, new StringByteIterator(value));
}
}
}
return Status.OK;
} catch (Exception e) {
LOGGER.error(e);
return Status.ERROR;
}
}
@Override
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
LOGGER.warn("Infinispan does not support scan semantics");
return Status.NOT_IMPLEMENTED;
}
@Override
public Status update(String table, String recordKey, Map<String, ByteIterator> values) {
String compositKey = createKey(table, recordKey);
try {
Map<String, String> stringValues = new HashMap<>();
StringByteIterator.putAllAsStrings(stringValues, values);
cache().put(compositKey, stringValues);
return Status.OK;
} catch (Exception e) {
LOGGER.error(e);
return Status.ERROR;
}
}
@Override
public Status delete(String table, String recordKey) {
String compositKey = createKey(table, recordKey);
try {
cache().remove(compositKey);
return Status.OK;
} catch (Exception e) {
LOGGER.error(e);
return Status.ERROR;
}
}
private RemoteCache<String, Map<String, String>> cache() {
if (this.cacheName != null) {
return remoteIspnManager.getCache(cacheName);
} else {
return remoteIspnManager.getCache();
}
}
private String createKey(String table, String recordKey) {
return table + "-" + recordKey;
}
}
| 4,164 | 28.75 | 108 | java |
null | NearPMSW-main/baseline/logging/YCSB/rocksdb/src/test/java/site/ycsb/db/rocksdb/RocksDBClientTest.java | /*
* Copyright (c) 2018 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.rocksdb;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.workloads.CoreWorkload;
import org.junit.*;
import org.junit.rules.TemporaryFolder;
import java.util.*;
import static org.junit.Assert.assertEquals;
public class RocksDBClientTest {
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
private static final String MOCK_TABLE = "ycsb";
private static final String MOCK_KEY0 = "0";
private static final String MOCK_KEY1 = "1";
private static final String MOCK_KEY2 = "2";
private static final String MOCK_KEY3 = "3";
private static final int NUM_RECORDS = 10;
private static final String FIELD_PREFIX = CoreWorkload.FIELD_NAME_PREFIX_DEFAULT;
private static final Map<String, ByteIterator> MOCK_DATA;
static {
MOCK_DATA = new HashMap<>(NUM_RECORDS);
for (int i = 0; i < NUM_RECORDS; i++) {
MOCK_DATA.put(FIELD_PREFIX + i, new StringByteIterator("value" + i));
}
}
private RocksDBClient instance;
@Before
public void setup() throws Exception {
instance = new RocksDBClient();
final Properties properties = new Properties();
properties.setProperty(RocksDBClient.PROPERTY_ROCKSDB_DIR, tmpFolder.getRoot().getAbsolutePath());
instance.setProperties(properties);
instance.init();
}
@After
public void tearDown() throws Exception {
instance.cleanup();
}
@Test
public void insertAndRead() throws Exception {
final Status insertResult = instance.insert(MOCK_TABLE, MOCK_KEY0, MOCK_DATA);
assertEquals(Status.OK, insertResult);
final Set<String> fields = MOCK_DATA.keySet();
final Map<String, ByteIterator> resultParam = new HashMap<>(NUM_RECORDS);
final Status readResult = instance.read(MOCK_TABLE, MOCK_KEY0, fields, resultParam);
assertEquals(Status.OK, readResult);
}
@Test
public void insertAndDelete() throws Exception {
final Status insertResult = instance.insert(MOCK_TABLE, MOCK_KEY1, MOCK_DATA);
assertEquals(Status.OK, insertResult);
final Status result = instance.delete(MOCK_TABLE, MOCK_KEY1);
assertEquals(Status.OK, result);
}
@Test
public void insertUpdateAndRead() throws Exception {
final Map<String, ByteIterator> newValues = new HashMap<>(NUM_RECORDS);
final Status insertResult = instance.insert(MOCK_TABLE, MOCK_KEY2, MOCK_DATA);
assertEquals(Status.OK, insertResult);
for (int i = 0; i < NUM_RECORDS; i++) {
newValues.put(FIELD_PREFIX + i, new StringByteIterator("newvalue" + i));
}
final Status result = instance.update(MOCK_TABLE, MOCK_KEY2, newValues);
assertEquals(Status.OK, result);
//validate that the values changed
final Map<String, ByteIterator> resultParam = new HashMap<>(NUM_RECORDS);
instance.read(MOCK_TABLE, MOCK_KEY2, MOCK_DATA.keySet(), resultParam);
for (int i = 0; i < NUM_RECORDS; i++) {
assertEquals("newvalue" + i, resultParam.get(FIELD_PREFIX + i).toString());
}
}
@Test
public void insertAndScan() throws Exception {
final Status insertResult = instance.insert(MOCK_TABLE, MOCK_KEY3, MOCK_DATA);
assertEquals(Status.OK, insertResult);
final Set<String> fields = MOCK_DATA.keySet();
final Vector<HashMap<String, ByteIterator>> resultParam = new Vector<>(NUM_RECORDS);
final Status result = instance.scan(MOCK_TABLE, MOCK_KEY3, NUM_RECORDS, fields, resultParam);
assertEquals(Status.OK, result);
}
}
| 4,146 | 32.443548 | 102 | java |
null | NearPMSW-main/baseline/logging/YCSB/rocksdb/src/test/java/site/ycsb/db/rocksdb/RocksDBOptionsFileTest.java | /*
* Copyright (c) 2019 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.rocksdb;
import org.junit.*;
import org.junit.rules.TemporaryFolder;
import org.rocksdb.*;
import java.util.*;
import static org.junit.Assert.assertEquals;
public class RocksDBOptionsFileTest {
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
private RocksDBClient instance;
@Test
public void loadOptionsFromFile() throws Exception {
final String optionsPath = RocksDBClient.class.getClassLoader().getResource("testcase.ini").getPath();
final String dbPath = tmpFolder.getRoot().getAbsolutePath();
initDbWithOptionsFile(dbPath, optionsPath);
checkOptions(dbPath);
}
private void initDbWithOptionsFile(final String dbPath, final String optionsPath) throws Exception {
instance = new RocksDBClient();
final Properties properties = new Properties();
properties.setProperty(RocksDBClient.PROPERTY_ROCKSDB_DIR, dbPath);
properties.setProperty(RocksDBClient.PROPERTY_ROCKSDB_OPTIONS_FILE, optionsPath);
instance.setProperties(properties);
instance.init();
instance.cleanup();
}
private void checkOptions(final String dbPath) throws Exception {
final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
final DBOptions dbOptions = new DBOptions();
RocksDB.loadLibrary();
OptionsUtil.loadLatestOptions(dbPath, Env.getDefault(), dbOptions, cfDescriptors);
try {
assertEquals(dbOptions.walSizeLimitMB(), 42);
// the two CFs should be "default" and "usertable"
assertEquals(cfDescriptors.size(), 2);
assertEquals(cfDescriptors.get(0).getOptions().ttl(), 42);
assertEquals(cfDescriptors.get(1).getOptions().ttl(), 42);
}
finally {
dbOptions.close();
}
}
};
| 2,398 | 30.565789 | 106 | java |
null | NearPMSW-main/baseline/logging/YCSB/rocksdb/src/main/java/site/ycsb/db/rocksdb/package-info.java | /*
* Copyright (c) 2018 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The RocksDB Java binding for <a href="http://rocksdb.org/">RocksDB</a>.
*/
package site.ycsb.db.rocksdb;
| 772 | 32.608696 | 74 | java |
null | NearPMSW-main/baseline/logging/YCSB/rocksdb/src/main/java/site/ycsb/db/rocksdb/RocksDBClient.java | /*
* Copyright (c) 2018 - 2019 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.rocksdb;
import site.ycsb.*;
import site.ycsb.Status;
import net.jcip.annotations.GuardedBy;
import org.rocksdb.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.file.*;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static java.nio.charset.StandardCharsets.UTF_8;
/**
* RocksDB binding for <a href="http://rocksdb.org/">RocksDB</a>.
*
* See {@code rocksdb/README.md} for details.
*/
public class RocksDBClient extends DB {
static final String PROPERTY_ROCKSDB_DIR = "rocksdb.dir";
static final String PROPERTY_ROCKSDB_OPTIONS_FILE = "rocksdb.optionsfile";
private static final String COLUMN_FAMILY_NAMES_FILENAME = "CF_NAMES";
private static final Logger LOGGER = LoggerFactory.getLogger(RocksDBClient.class);
@GuardedBy("RocksDBClient.class") private static Path rocksDbDir = null;
@GuardedBy("RocksDBClient.class") private static Path optionsFile = null;
@GuardedBy("RocksDBClient.class") private static RocksObject dbOptions = null;
@GuardedBy("RocksDBClient.class") private static RocksDB rocksDb = null;
@GuardedBy("RocksDBClient.class") private static int references = 0;
private static final ConcurrentMap<String, ColumnFamily> COLUMN_FAMILIES = new ConcurrentHashMap<>();
private static final ConcurrentMap<String, Lock> COLUMN_FAMILY_LOCKS = new ConcurrentHashMap<>();
@Override
public void init() throws DBException {
synchronized(RocksDBClient.class) {
if(rocksDb == null) {
rocksDbDir = Paths.get(getProperties().getProperty(PROPERTY_ROCKSDB_DIR));
LOGGER.info("RocksDB data dir: " + rocksDbDir);
String optionsFileString = getProperties().getProperty(PROPERTY_ROCKSDB_OPTIONS_FILE);
if (optionsFileString != null) {
optionsFile = Paths.get(optionsFileString);
LOGGER.info("RocksDB options file: " + optionsFile);
}
try {
if (optionsFile != null) {
rocksDb = initRocksDBWithOptionsFile();
} else {
rocksDb = initRocksDB();
}
} catch (final IOException | RocksDBException e) {
throw new DBException(e);
}
}
references++;
}
}
/**
* Initializes and opens the RocksDB database.
*
* Should only be called with a {@code synchronized(RocksDBClient.class)` block}.
*
* @return The initialized and open RocksDB instance.
*/
private RocksDB initRocksDBWithOptionsFile() throws IOException, RocksDBException {
if(!Files.exists(rocksDbDir)) {
Files.createDirectories(rocksDbDir);
}
final DBOptions options = new DBOptions();
final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
RocksDB.loadLibrary();
OptionsUtil.loadOptionsFromFile(optionsFile.toAbsolutePath().toString(), Env.getDefault(), options, cfDescriptors);
dbOptions = options;
final RocksDB db = RocksDB.open(options, rocksDbDir.toAbsolutePath().toString(), cfDescriptors, cfHandles);
for(int i = 0; i < cfDescriptors.size(); i++) {
String cfName = new String(cfDescriptors.get(i).getName());
final ColumnFamilyHandle cfHandle = cfHandles.get(i);
final ColumnFamilyOptions cfOptions = cfDescriptors.get(i).getOptions();
COLUMN_FAMILIES.put(cfName, new ColumnFamily(cfHandle, cfOptions));
}
return db;
}
/**
* Initializes and opens the RocksDB database.
*
* Should only be called with a {@code synchronized(RocksDBClient.class)` block}.
*
* @return The initialized and open RocksDB instance.
*/
private RocksDB initRocksDB() throws IOException, RocksDBException {
if(!Files.exists(rocksDbDir)) {
Files.createDirectories(rocksDbDir);
}
final List<String> cfNames = loadColumnFamilyNames();
final List<ColumnFamilyOptions> cfOptionss = new ArrayList<>();
final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
for(final String cfName : cfNames) {
final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()
.optimizeLevelStyleCompaction();
final ColumnFamilyDescriptor cfDescriptor = new ColumnFamilyDescriptor(
cfName.getBytes(UTF_8),
cfOptions
);
cfOptionss.add(cfOptions);
cfDescriptors.add(cfDescriptor);
}
final int rocksThreads = Runtime.getRuntime().availableProcessors() * 2;
if(cfDescriptors.isEmpty()) {
final Options options = new Options()
.optimizeLevelStyleCompaction()
.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true)
.setIncreaseParallelism(rocksThreads)
.setMaxBackgroundCompactions(rocksThreads)
.setInfoLogLevel(InfoLogLevel.INFO_LEVEL);
dbOptions = options;
return RocksDB.open(options, rocksDbDir.toAbsolutePath().toString());
} else {
final DBOptions options = new DBOptions()
.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true)
.setIncreaseParallelism(rocksThreads)
.setMaxBackgroundCompactions(rocksThreads)
.setInfoLogLevel(InfoLogLevel.INFO_LEVEL);
dbOptions = options;
final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
final RocksDB db = RocksDB.open(options, rocksDbDir.toAbsolutePath().toString(), cfDescriptors, cfHandles);
for(int i = 0; i < cfNames.size(); i++) {
COLUMN_FAMILIES.put(cfNames.get(i), new ColumnFamily(cfHandles.get(i), cfOptionss.get(i)));
}
return db;
}
}
@Override
public void cleanup() throws DBException {
super.cleanup();
synchronized (RocksDBClient.class) {
try {
if (references == 1) {
for (final ColumnFamily cf : COLUMN_FAMILIES.values()) {
cf.getHandle().close();
}
rocksDb.close();
rocksDb = null;
dbOptions.close();
dbOptions = null;
for (final ColumnFamily cf : COLUMN_FAMILIES.values()) {
cf.getOptions().close();
}
saveColumnFamilyNames();
COLUMN_FAMILIES.clear();
rocksDbDir = null;
}
} catch (final IOException e) {
throw new DBException(e);
} finally {
references--;
}
}
}
@Override
public Status read(final String table, final String key, final Set<String> fields,
final Map<String, ByteIterator> result) {
try {
if (!COLUMN_FAMILIES.containsKey(table)) {
createColumnFamily(table);
}
final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle();
final byte[] values = rocksDb.get(cf, key.getBytes(UTF_8));
if(values == null) {
return Status.NOT_FOUND;
}
deserializeValues(values, fields, result);
return Status.OK;
} catch(final RocksDBException e) {
LOGGER.error(e.getMessage(), e);
return Status.ERROR;
}
}
@Override
public Status scan(final String table, final String startkey, final int recordcount, final Set<String> fields,
final Vector<HashMap<String, ByteIterator>> result) {
try {
if (!COLUMN_FAMILIES.containsKey(table)) {
createColumnFamily(table);
}
final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle();
try(final RocksIterator iterator = rocksDb.newIterator(cf)) {
int iterations = 0;
for (iterator.seek(startkey.getBytes(UTF_8)); iterator.isValid() && iterations < recordcount;
iterator.next()) {
final HashMap<String, ByteIterator> values = new HashMap<>();
deserializeValues(iterator.value(), fields, values);
result.add(values);
iterations++;
}
}
return Status.OK;
} catch(final RocksDBException e) {
LOGGER.error(e.getMessage(), e);
return Status.ERROR;
}
}
@Override
public Status update(final String table, final String key, final Map<String, ByteIterator> values) {
//TODO(AR) consider if this would be faster with merge operator
try {
if (!COLUMN_FAMILIES.containsKey(table)) {
createColumnFamily(table);
}
final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle();
final Map<String, ByteIterator> result = new HashMap<>();
final byte[] currentValues = rocksDb.get(cf, key.getBytes(UTF_8));
if(currentValues == null) {
return Status.NOT_FOUND;
}
deserializeValues(currentValues, null, result);
//update
result.putAll(values);
//store
rocksDb.put(cf, key.getBytes(UTF_8), serializeValues(result));
return Status.OK;
} catch(final RocksDBException | IOException e) {
LOGGER.error(e.getMessage(), e);
return Status.ERROR;
}
}
@Override
public Status insert(final String table, final String key, final Map<String, ByteIterator> values) {
try {
if (!COLUMN_FAMILIES.containsKey(table)) {
createColumnFamily(table);
}
final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle();
rocksDb.put(cf, key.getBytes(UTF_8), serializeValues(values));
return Status.OK;
} catch(final RocksDBException | IOException e) {
LOGGER.error(e.getMessage(), e);
return Status.ERROR;
}
}
@Override
public Status delete(final String table, final String key) {
try {
if (!COLUMN_FAMILIES.containsKey(table)) {
createColumnFamily(table);
}
final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle();
rocksDb.delete(cf, key.getBytes(UTF_8));
return Status.OK;
} catch(final RocksDBException e) {
LOGGER.error(e.getMessage(), e);
return Status.ERROR;
}
}
private void saveColumnFamilyNames() throws IOException {
final Path file = rocksDbDir.resolve(COLUMN_FAMILY_NAMES_FILENAME);
try(final PrintWriter writer = new PrintWriter(Files.newBufferedWriter(file, UTF_8))) {
writer.println(new String(RocksDB.DEFAULT_COLUMN_FAMILY, UTF_8));
for(final String cfName : COLUMN_FAMILIES.keySet()) {
writer.println(cfName);
}
}
}
private List<String> loadColumnFamilyNames() throws IOException {
final List<String> cfNames = new ArrayList<>();
final Path file = rocksDbDir.resolve(COLUMN_FAMILY_NAMES_FILENAME);
if(Files.exists(file)) {
try (final LineNumberReader reader =
new LineNumberReader(Files.newBufferedReader(file, UTF_8))) {
String line = null;
while ((line = reader.readLine()) != null) {
cfNames.add(line);
}
}
}
return cfNames;
}
private Map<String, ByteIterator> deserializeValues(final byte[] values, final Set<String> fields,
final Map<String, ByteIterator> result) {
final ByteBuffer buf = ByteBuffer.allocate(4);
int offset = 0;
while(offset < values.length) {
buf.put(values, offset, 4);
buf.flip();
final int keyLen = buf.getInt();
buf.clear();
offset += 4;
final String key = new String(values, offset, keyLen);
offset += keyLen;
buf.put(values, offset, 4);
buf.flip();
final int valueLen = buf.getInt();
buf.clear();
offset += 4;
if(fields == null || fields.contains(key)) {
result.put(key, new ByteArrayByteIterator(values, offset, valueLen));
}
offset += valueLen;
}
return result;
}
private byte[] serializeValues(final Map<String, ByteIterator> values) throws IOException {
try(final ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
final ByteBuffer buf = ByteBuffer.allocate(4);
for(final Map.Entry<String, ByteIterator> value : values.entrySet()) {
final byte[] keyBytes = value.getKey().getBytes(UTF_8);
final byte[] valueBytes = value.getValue().toArray();
buf.putInt(keyBytes.length);
baos.write(buf.array());
baos.write(keyBytes);
buf.clear();
buf.putInt(valueBytes.length);
baos.write(buf.array());
baos.write(valueBytes);
buf.clear();
}
return baos.toByteArray();
}
}
private ColumnFamilyOptions getDefaultColumnFamilyOptions(final String destinationCfName) {
final ColumnFamilyOptions cfOptions;
if (COLUMN_FAMILIES.containsKey("default")) {
LOGGER.warn("no column family options for \"" + destinationCfName + "\" " +
"in options file - using options from \"default\"");
cfOptions = COLUMN_FAMILIES.get("default").getOptions();
} else {
LOGGER.warn("no column family options for either \"" + destinationCfName + "\" or " +
"\"default\" in options file - initializing with empty configuration");
cfOptions = new ColumnFamilyOptions();
}
LOGGER.warn("Add a CFOptions section for \"" + destinationCfName + "\" to the options file, " +
"or subsequent runs on this DB will fail.");
return cfOptions;
}
private void createColumnFamily(final String name) throws RocksDBException {
COLUMN_FAMILY_LOCKS.putIfAbsent(name, new ReentrantLock());
final Lock l = COLUMN_FAMILY_LOCKS.get(name);
l.lock();
try {
if(!COLUMN_FAMILIES.containsKey(name)) {
final ColumnFamilyOptions cfOptions;
if (optionsFile != null) {
// RocksDB requires all options files to include options for the "default" column family;
// apply those options to this column family
cfOptions = getDefaultColumnFamilyOptions(name);
} else {
cfOptions = new ColumnFamilyOptions().optimizeLevelStyleCompaction();
}
final ColumnFamilyHandle cfHandle = rocksDb.createColumnFamily(
new ColumnFamilyDescriptor(name.getBytes(UTF_8), cfOptions)
);
COLUMN_FAMILIES.put(name, new ColumnFamily(cfHandle, cfOptions));
}
} finally {
l.unlock();
}
}
private static final class ColumnFamily {
private final ColumnFamilyHandle handle;
private final ColumnFamilyOptions options;
private ColumnFamily(final ColumnFamilyHandle handle, final ColumnFamilyOptions options) {
this.handle = handle;
this.options = options;
}
public ColumnFamilyHandle getHandle() {
return handle;
}
public ColumnFamilyOptions getOptions() {
return options;
}
}
}
| 15,353 | 31.807692 | 119 | java |
null | NearPMSW-main/baseline/logging/YCSB/memcached/src/main/java/site/ycsb/db/package-info.java | /**
* Copyright (c) 2015 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* YCSB binding for memcached.
*/
package site.ycsb.db;
| 720 | 31.772727 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB/memcached/src/main/java/site/ycsb/db/MemcachedClient.java | /**
* Copyright (c) 2014-2015 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import java.net.InetSocketAddress;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import net.spy.memcached.ConnectionFactoryBuilder;
import net.spy.memcached.FailureMode;
// We also use `net.spy.memcached.MemcachedClient`; it is not imported
// explicitly and referred to with its full path to avoid conflicts with the
// class of the same name in this file.
import net.spy.memcached.internal.GetFuture;
import net.spy.memcached.internal.OperationFuture;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.node.ObjectNode;
import org.apache.log4j.Logger;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
/**
* Concrete Memcached client implementation.
*/
public class MemcachedClient extends DB {
private final Logger logger = Logger.getLogger(getClass());
protected static final ObjectMapper MAPPER = new ObjectMapper();
private boolean checkOperationStatus;
private long shutdownTimeoutMillis;
private int objectExpirationTime;
public static final String HOSTS_PROPERTY = "memcached.hosts";
public static final int DEFAULT_PORT = 11211;
private static final String TEMPORARY_FAILURE_MSG = "Temporary failure";
private static final String CANCELLED_MSG = "cancelled";
public static final String SHUTDOWN_TIMEOUT_MILLIS_PROPERTY =
"memcached.shutdownTimeoutMillis";
public static final String DEFAULT_SHUTDOWN_TIMEOUT_MILLIS = "30000";
public static final String OBJECT_EXPIRATION_TIME_PROPERTY =
"memcached.objectExpirationTime";
public static final String DEFAULT_OBJECT_EXPIRATION_TIME =
String.valueOf(Integer.MAX_VALUE);
public static final String CHECK_OPERATION_STATUS_PROPERTY =
"memcached.checkOperationStatus";
public static final String CHECK_OPERATION_STATUS_DEFAULT = "true";
public static final String READ_BUFFER_SIZE_PROPERTY =
"memcached.readBufferSize";
public static final String DEFAULT_READ_BUFFER_SIZE = "3000000";
public static final String OP_TIMEOUT_PROPERTY = "memcached.opTimeoutMillis";
public static final String DEFAULT_OP_TIMEOUT = "60000";
public static final String FAILURE_MODE_PROPERTY = "memcached.failureMode";
public static final FailureMode FAILURE_MODE_PROPERTY_DEFAULT =
FailureMode.Redistribute;
public static final String PROTOCOL_PROPERTY = "memcached.protocol";
public static final ConnectionFactoryBuilder.Protocol DEFAULT_PROTOCOL =
ConnectionFactoryBuilder.Protocol.TEXT;
/**
* The MemcachedClient implementation that will be used to communicate
* with the memcached server.
*/
private net.spy.memcached.MemcachedClient client;
/**
* @returns Underlying Memcached protocol client, implemented by
* SpyMemcached.
*/
protected net.spy.memcached.MemcachedClient memcachedClient() {
return client;
}
@Override
public void init() throws DBException {
try {
client = createMemcachedClient();
checkOperationStatus = Boolean.parseBoolean(
getProperties().getProperty(CHECK_OPERATION_STATUS_PROPERTY,
CHECK_OPERATION_STATUS_DEFAULT));
objectExpirationTime = Integer.parseInt(
getProperties().getProperty(OBJECT_EXPIRATION_TIME_PROPERTY,
DEFAULT_OBJECT_EXPIRATION_TIME));
shutdownTimeoutMillis = Integer.parseInt(
getProperties().getProperty(SHUTDOWN_TIMEOUT_MILLIS_PROPERTY,
DEFAULT_SHUTDOWN_TIMEOUT_MILLIS));
} catch (Exception e) {
throw new DBException(e);
}
}
protected net.spy.memcached.MemcachedClient createMemcachedClient()
throws Exception {
ConnectionFactoryBuilder connectionFactoryBuilder =
new ConnectionFactoryBuilder();
connectionFactoryBuilder.setReadBufferSize(Integer.parseInt(
getProperties().getProperty(READ_BUFFER_SIZE_PROPERTY,
DEFAULT_READ_BUFFER_SIZE)));
connectionFactoryBuilder.setOpTimeout(Integer.parseInt(
getProperties().getProperty(OP_TIMEOUT_PROPERTY, DEFAULT_OP_TIMEOUT)));
String protocolString = getProperties().getProperty(PROTOCOL_PROPERTY);
connectionFactoryBuilder.setProtocol(
protocolString == null ? DEFAULT_PROTOCOL
: ConnectionFactoryBuilder.Protocol.valueOf(protocolString.toUpperCase()));
String failureString = getProperties().getProperty(FAILURE_MODE_PROPERTY);
connectionFactoryBuilder.setFailureMode(
failureString == null ? FAILURE_MODE_PROPERTY_DEFAULT
: FailureMode.valueOf(failureString));
// Note: this only works with IPv4 addresses due to its assumption of
// ":" being the separator of hostname/IP and port; this is not the case
// when dealing with IPv6 addresses.
//
// TODO(mbrukman): fix this.
List<InetSocketAddress> addresses = new ArrayList<InetSocketAddress>();
String[] hosts = getProperties().getProperty(HOSTS_PROPERTY).split(",");
for (String address : hosts) {
int colon = address.indexOf(":");
int port = DEFAULT_PORT;
String host = address;
if (colon != -1) {
port = Integer.parseInt(address.substring(colon + 1));
host = address.substring(0, colon);
}
addresses.add(new InetSocketAddress(host, port));
}
return new net.spy.memcached.MemcachedClient(
connectionFactoryBuilder.build(), addresses);
}
@Override
public Status read(
String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
key = createQualifiedKey(table, key);
try {
GetFuture<Object> future = memcachedClient().asyncGet(key);
Object document = future.get();
if (document != null) {
fromJson((String) document, fields, result);
}
return Status.OK;
} catch (Exception e) {
logger.error("Error encountered for key: " + key, e);
return Status.ERROR;
}
}
@Override
public Status scan(
String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result){
return Status.NOT_IMPLEMENTED;
}
@Override
public Status update(
String table, String key, Map<String, ByteIterator> values) {
key = createQualifiedKey(table, key);
try {
OperationFuture<Boolean> future =
memcachedClient().replace(key, objectExpirationTime, toJson(values));
return getReturnCode(future);
} catch (Exception e) {
logger.error("Error updating value with key: " + key, e);
return Status.ERROR;
}
}
@Override
public Status insert(
String table, String key, Map<String, ByteIterator> values) {
key = createQualifiedKey(table, key);
try {
OperationFuture<Boolean> future =
memcachedClient().add(key, objectExpirationTime, toJson(values));
return getReturnCode(future);
} catch (Exception e) {
logger.error("Error inserting value", e);
return Status.ERROR;
}
}
@Override
public Status delete(String table, String key) {
key = createQualifiedKey(table, key);
try {
OperationFuture<Boolean> future = memcachedClient().delete(key);
return getReturnCode(future);
} catch (Exception e) {
logger.error("Error deleting value", e);
return Status.ERROR;
}
}
protected Status getReturnCode(OperationFuture<Boolean> future) {
if (!checkOperationStatus) {
return Status.OK;
}
if (future.getStatus().isSuccess()) {
return Status.OK;
} else if (TEMPORARY_FAILURE_MSG.equals(future.getStatus().getMessage())) {
return new Status("TEMPORARY_FAILURE", TEMPORARY_FAILURE_MSG);
} else if (CANCELLED_MSG.equals(future.getStatus().getMessage())) {
return new Status("CANCELLED_MSG", CANCELLED_MSG);
}
return new Status("ERROR", future.getStatus().getMessage());
}
@Override
public void cleanup() throws DBException {
if (client != null) {
memcachedClient().shutdown(shutdownTimeoutMillis, MILLISECONDS);
}
}
protected static String createQualifiedKey(String table, String key) {
return MessageFormat.format("{0}-{1}", table, key);
}
protected static void fromJson(
String value, Set<String> fields,
Map<String, ByteIterator> result) throws IOException {
JsonNode json = MAPPER.readTree(value);
boolean checkFields = fields != null && !fields.isEmpty();
for (Iterator<Map.Entry<String, JsonNode>> jsonFields = json.getFields();
jsonFields.hasNext();
/* increment in loop body */) {
Map.Entry<String, JsonNode> jsonField = jsonFields.next();
String name = jsonField.getKey();
if (checkFields && !fields.contains(name)) {
continue;
}
JsonNode jsonValue = jsonField.getValue();
if (jsonValue != null && !jsonValue.isNull()) {
result.put(name, new StringByteIterator(jsonValue.asText()));
}
}
}
protected static String toJson(Map<String, ByteIterator> values)
throws IOException {
ObjectNode node = MAPPER.createObjectNode();
Map<String, String> stringMap = StringByteIterator.getStringMap(values);
for (Map.Entry<String, String> pair : stringMap.entrySet()) {
node.put(pair.getKey(), pair.getValue());
}
JsonFactory jsonFactory = new JsonFactory();
Writer writer = new StringWriter();
JsonGenerator jsonGenerator = jsonFactory.createJsonGenerator(writer);
MAPPER.writeTree(jsonGenerator, node);
return writer.toString();
}
}
| 10,775 | 34.447368 | 100 | java |
null | NearPMSW-main/baseline/logging/YCSB/elasticsearch/src/test/java/site/ycsb/db/ElasticsearchClientTest.java | /**
* Copyright (c) 2012-2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package site.ycsb.db;
import site.ycsb.ByteIterator;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.workloads.CoreWorkload;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.util.HashMap;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import static org.junit.Assert.assertEquals;
public class ElasticsearchClientTest {
@ClassRule public final static TemporaryFolder temp = new TemporaryFolder();
private final static ElasticsearchClient instance = new ElasticsearchClient();
private final static HashMap<String, ByteIterator> MOCK_DATA;
private final static String MOCK_TABLE = "MOCK_TABLE";
private final static String MOCK_KEY0 = "0";
private final static String MOCK_KEY1 = "1";
private final static String MOCK_KEY2 = "2";
private final static String FIELD_PREFIX = CoreWorkload.FIELD_NAME_PREFIX_DEFAULT;
static {
MOCK_DATA = new HashMap<>(10);
for (int i = 1; i <= 10; i++) {
MOCK_DATA.put(FIELD_PREFIX + i, new StringByteIterator("value" + i));
}
}
@BeforeClass
public static void setUpClass() throws DBException {
final Properties props = new Properties();
props.put("path.home", temp.getRoot().toString());
instance.setProperties(props);
instance.init();
}
@AfterClass
public static void tearDownClass() throws DBException {
instance.cleanup();
}
@Before
public void setUp() {
instance.insert(MOCK_TABLE, MOCK_KEY1, MOCK_DATA);
instance.insert(MOCK_TABLE, MOCK_KEY2, MOCK_DATA);
}
@After
public void tearDown() {
instance.delete(MOCK_TABLE, MOCK_KEY1);
instance.delete(MOCK_TABLE, MOCK_KEY2);
}
/**
* Test of insert method, of class ElasticsearchClient.
*/
@Test
public void testInsert() {
Status result = instance.insert(MOCK_TABLE, MOCK_KEY0, MOCK_DATA);
assertEquals(Status.OK, result);
}
/**
* Test of delete method, of class ElasticsearchClient.
*/
@Test
public void testDelete() {
Status result = instance.delete(MOCK_TABLE, MOCK_KEY1);
assertEquals(Status.OK, result);
}
/**
* Test of read method, of class ElasticsearchClient.
*/
@Test
public void testRead() {
Set<String> fields = MOCK_DATA.keySet();
HashMap<String, ByteIterator> resultParam = new HashMap<>(10);
Status result = instance.read(MOCK_TABLE, MOCK_KEY1, fields, resultParam);
assertEquals(Status.OK, result);
}
/**
* Test of update method, of class ElasticsearchClient.
*/
@Test
public void testUpdate() {
int i;
HashMap<String, ByteIterator> newValues = new HashMap<>(10);
for (i = 1; i <= 10; i++) {
newValues.put(FIELD_PREFIX + i, new StringByteIterator("newvalue" + i));
}
Status result = instance.update(MOCK_TABLE, MOCK_KEY1, newValues);
assertEquals(Status.OK, result);
//validate that the values changed
HashMap<String, ByteIterator> resultParam = new HashMap<>(10);
instance.read(MOCK_TABLE, MOCK_KEY1, MOCK_DATA.keySet(), resultParam);
for (i = 1; i <= 10; i++) {
assertEquals("newvalue" + i, resultParam.get(FIELD_PREFIX + i).toString());
}
}
/**
* Test of scan method, of class ElasticsearchClient.
*/
@Test
public void testScan() {
int recordcount = 10;
Set<String> fields = MOCK_DATA.keySet();
Vector<HashMap<String, ByteIterator>> resultParam = new Vector<>(10);
Status result = instance.scan(MOCK_TABLE, MOCK_KEY1, recordcount, fields, resultParam);
assertEquals(Status.OK, result);
}
}
| 4,761 | 30.328947 | 95 | java |
null | NearPMSW-main/baseline/logging/YCSB/elasticsearch/src/main/java/site/ycsb/db/package-info.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for
* <a href="https://www.elastic.co/products/elasticsearch">Elasticsearch</a>.
*/
package site.ycsb.db;
| 787 | 31.833333 | 77 | java |
null | NearPMSW-main/baseline/logging/YCSB/elasticsearch/src/main/java/site/ycsb/db/ElasticsearchClient.java | /**
* Copyright (c) 2012 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import static org.elasticsearch.common.settings.Settings.Builder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.RangeQueryBuilder;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.SearchHit;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
/**
* Elasticsearch client for YCSB framework.
*
* <p>
* Default properties to set:
* </p>
* <ul>
* <li>cluster.name = es.ycsb.cluster
* <li>es.index.key = es.ycsb
* <li>es.number_of_shards = 1
* <li>es.number_of_replicas = 0
* </ul>
*/
public class ElasticsearchClient extends DB {
private static final String DEFAULT_CLUSTER_NAME = "es.ycsb.cluster";
private static final String DEFAULT_INDEX_KEY = "es.ycsb";
private static final String DEFAULT_REMOTE_HOST = "localhost:9300";
private static final int NUMBER_OF_SHARDS = 1;
private static final int NUMBER_OF_REPLICAS = 0;
private Node node;
private Client client;
private String indexKey;
private Boolean remoteMode;
/**
* Initialize any state for this DB. Called once per DB instance; there is one
* DB instance per client thread.
*/
@Override
public void init() throws DBException {
final Properties props = getProperties();
// Check if transport client needs to be used (To connect to multiple
// elasticsearch nodes)
remoteMode = Boolean.parseBoolean(props.getProperty("es.remote", "false"));
final String pathHome = props.getProperty("path.home");
// when running in embedded mode, require path.home
if (!remoteMode && (pathHome == null || pathHome.isEmpty())) {
throw new IllegalArgumentException("path.home must be specified when running in embedded mode");
}
this.indexKey = props.getProperty("es.index.key", DEFAULT_INDEX_KEY);
int numberOfShards = parseIntegerProperty(props, "es.number_of_shards", NUMBER_OF_SHARDS);
int numberOfReplicas = parseIntegerProperty(props, "es.number_of_replicas", NUMBER_OF_REPLICAS);
Boolean newdb = Boolean.parseBoolean(props.getProperty("es.newdb", "false"));
Builder settings = Settings.settingsBuilder()
.put("cluster.name", DEFAULT_CLUSTER_NAME)
.put("node.local", Boolean.toString(!remoteMode))
.put("path.home", pathHome);
// if properties file contains elasticsearch user defined properties
// add it to the settings file (will overwrite the defaults).
settings.put(props);
final String clusterName = settings.get("cluster.name");
System.err.println("Elasticsearch starting node = " + clusterName);
System.err.println("Elasticsearch node path.home = " + settings.get("path.home"));
System.err.println("Elasticsearch Remote Mode = " + remoteMode);
// Remote mode support for connecting to remote elasticsearch cluster
if (remoteMode) {
settings.put("client.transport.sniff", true)
.put("client.transport.ignore_cluster_name", false)
.put("client.transport.ping_timeout", "30s")
.put("client.transport.nodes_sampler_interval", "30s");
// Default it to localhost:9300
String[] nodeList = props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST).split(",");
System.out.println("Elasticsearch Remote Hosts = " + props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST));
TransportClient tClient = TransportClient.builder().settings(settings).build();
for (String h : nodeList) {
String[] nodes = h.split(":");
try {
tClient.addTransportAddress(new InetSocketTransportAddress(
InetAddress.getByName(nodes[0]),
Integer.parseInt(nodes[1])
));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Unable to parse port number.", e);
} catch (UnknownHostException e) {
throw new IllegalArgumentException("Unable to Identify host.", e);
}
}
client = tClient;
} else { // Start node only if transport client mode is disabled
node = nodeBuilder().clusterName(clusterName).settings(settings).node();
node.start();
client = node.client();
}
final boolean exists =
client.admin().indices()
.exists(Requests.indicesExistsRequest(indexKey)).actionGet()
.isExists();
if (exists && newdb) {
client.admin().indices().prepareDelete(indexKey).execute().actionGet();
}
if (!exists || newdb) {
client.admin().indices().create(
new CreateIndexRequest(indexKey)
.settings(
Settings.builder()
.put("index.number_of_shards", numberOfShards)
.put("index.number_of_replicas", numberOfReplicas)
.put("index.mapping._id.indexed", true)
)).actionGet();
}
client.admin().cluster().health(new ClusterHealthRequest().waitForGreenStatus()).actionGet();
}
private int parseIntegerProperty(Properties properties, String key, int defaultValue) {
String value = properties.getProperty(key);
return value == null ? defaultValue : Integer.parseInt(value);
}
@Override
public void cleanup() throws DBException {
if (!remoteMode) {
if (!node.isClosed()) {
client.close();
node.close();
}
} else {
client.close();
}
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key.
*
* @param table
* The name of the table
* @param key
* The record key of the record to insert.
* @param values
* A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error. See this class's
* description for a discussion of error codes.
*/
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
try {
final XContentBuilder doc = jsonBuilder().startObject();
for (Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) {
doc.field(entry.getKey(), entry.getValue());
}
doc.endObject();
client.prepareIndex(indexKey, table, key).setSource(doc).execute().actionGet();
return Status.OK;
} catch (Exception e) {
e.printStackTrace();
return Status.ERROR;
}
}
/**
* Delete a record from the database.
*
* @param table
* The name of the table
* @param key
* The record key of the record to delete.
* @return Zero on success, a non-zero error code on error. See this class's
* description for a discussion of error codes.
*/
@Override
public Status delete(String table, String key) {
try {
DeleteResponse response = client.prepareDelete(indexKey, table, key).execute().actionGet();
if (response.isFound()) {
return Status.OK;
} else {
return Status.NOT_FOUND;
}
} catch (Exception e) {
e.printStackTrace();
return Status.ERROR;
}
}
/**
* Read a record from the database. Each field/value pair from the result will
* be stored in a HashMap.
*
* @param table
* The name of the table
* @param key
* The record key of the record to read.
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error or "not found".
*/
@Override
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
try {
final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet();
if (response.isExists()) {
if (fields != null) {
for (String field : fields) {
result.put(field, new StringByteIterator(
(String) response.getSource().get(field)));
}
} else {
for (String field : response.getSource().keySet()) {
result.put(field, new StringByteIterator(
(String) response.getSource().get(field)));
}
}
return Status.OK;
} else {
return Status.NOT_FOUND;
}
} catch (Exception e) {
e.printStackTrace();
return Status.ERROR;
}
}
/**
* Update a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key, overwriting any existing values with the same field name.
*
* @param table
* The name of the table
* @param key
* The record key of the record to write.
* @param values
* A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error. See this class's
* description for a discussion of error codes.
*/
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
try {
final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet();
if (response.isExists()) {
for (Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) {
response.getSource().put(entry.getKey(), entry.getValue());
}
client.prepareIndex(indexKey, table, key).setSource(response.getSource()).execute().actionGet();
return Status.OK;
} else {
return Status.NOT_FOUND;
}
} catch (Exception e) {
e.printStackTrace();
return Status.ERROR;
}
}
/**
* Perform a range scan for a set of records in the database. Each field/value
* pair from the result will be stored in a HashMap.
*
* @param table
* The name of the table
* @param startkey
* The record key of the first record to read.
* @param recordcount
* The number of records to read
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one record
* @return Zero on success, a non-zero error code on error. See this class's
* description for a discussion of error codes.
*/
@Override
public Status scan(
String table,
String startkey,
int recordcount,
Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
try {
final RangeQueryBuilder rangeQuery = rangeQuery("_id").gte(startkey);
final SearchResponse response = client.prepareSearch(indexKey)
.setTypes(table)
.setQuery(rangeQuery)
.setSize(recordcount)
.execute()
.actionGet();
HashMap<String, ByteIterator> entry;
for (SearchHit hit : response.getHits()) {
entry = new HashMap<>(fields.size());
for (String field : fields) {
entry.put(field, new StringByteIterator((String) hit.getSource().get(field)));
}
result.add(entry);
}
return Status.OK;
} catch (Exception e) {
e.printStackTrace();
return Status.ERROR;
}
}
}
| 13,255 | 34.730458 | 116 | java |
null | NearPMSW-main/baseline/logging/YCSB/nosqldb/src/main/java/site/ycsb/db/package-info.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License. See accompanying LICENSE file.
*/
/**
* The YCSB binding for <a href=
* "http://www.oracle.com/us/products/database/nosql/overview/index.html">Oracle
* 's NoSQL DB</a>.
*/
package site.ycsb.db;
| 814 | 34.434783 | 80 | java |
null | NearPMSW-main/baseline/logging/YCSB/nosqldb/src/main/java/site/ycsb/db/NoSqlDbClient.java | /**
* Copyright (c) 2012 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.SortedMap;
import java.util.Vector;
import java.util.concurrent.TimeUnit;
import oracle.kv.Consistency;
import oracle.kv.Durability;
import oracle.kv.FaultException;
import oracle.kv.KVStore;
import oracle.kv.KVStoreConfig;
import oracle.kv.KVStoreFactory;
import oracle.kv.Key;
import oracle.kv.RequestLimitConfig;
import oracle.kv.Value;
import oracle.kv.ValueVersion;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
/**
* A database interface layer for Oracle NoSQL Database.
*/
public class NoSqlDbClient extends DB {
private KVStore store;
private int getPropertyInt(Properties properties, String key,
int defaultValue) throws DBException {
String p = properties.getProperty(key);
int i = defaultValue;
if (p != null) {
try {
i = Integer.parseInt(p);
} catch (NumberFormatException e) {
throw new DBException("Illegal number format in " + key + " property");
}
}
return i;
}
@Override
public void init() throws DBException {
Properties properties = getProperties();
/* Mandatory properties */
String storeName = properties.getProperty("storeName", "kvstore");
String[] helperHosts =
properties.getProperty("helperHost", "localhost:5000").split(",");
KVStoreConfig config = new KVStoreConfig(storeName, helperHosts);
/* Optional properties */
String p;
p = properties.getProperty("consistency");
if (p != null) {
if (p.equalsIgnoreCase("ABSOLUTE")) {
config.setConsistency(Consistency.ABSOLUTE);
} else if (p.equalsIgnoreCase("NONE_REQUIRED")) {
config.setConsistency(Consistency.NONE_REQUIRED);
} else {
throw new DBException("Illegal value in consistency property");
}
}
p = properties.getProperty("durability");
if (p != null) {
if (p.equalsIgnoreCase("COMMIT_NO_SYNC")) {
config.setDurability(Durability.COMMIT_NO_SYNC);
} else if (p.equalsIgnoreCase("COMMIT_SYNC")) {
config.setDurability(Durability.COMMIT_SYNC);
} else if (p.equalsIgnoreCase("COMMIT_WRITE_NO_SYNC")) {
config.setDurability(Durability.COMMIT_WRITE_NO_SYNC);
} else {
throw new DBException("Illegal value in durability property");
}
}
int maxActiveRequests =
getPropertyInt(properties, "requestLimit.maxActiveRequests",
RequestLimitConfig.DEFAULT_MAX_ACTIVE_REQUESTS);
int requestThresholdPercent =
getPropertyInt(properties, "requestLimit.requestThresholdPercent",
RequestLimitConfig.DEFAULT_REQUEST_THRESHOLD_PERCENT);
int nodeLimitPercent =
getPropertyInt(properties, "requestLimit.nodeLimitPercent",
RequestLimitConfig.DEFAULT_NODE_LIMIT_PERCENT);
RequestLimitConfig requestLimitConfig;
/*
* It is said that the constructor could throw NodeRequestLimitException in
* Javadoc, the exception is not provided
*/
// try {
requestLimitConfig = new RequestLimitConfig(maxActiveRequests,
requestThresholdPercent, nodeLimitPercent);
// } catch (NodeRequestLimitException e) {
// throw new DBException(e);
// }
config.setRequestLimit(requestLimitConfig);
p = properties.getProperty("requestTimeout");
if (p != null) {
long timeout = 1;
try {
timeout = Long.parseLong(p);
} catch (NumberFormatException e) {
throw new DBException(
"Illegal number format in requestTimeout property");
}
try {
// TODO Support other TimeUnit
config.setRequestTimeout(timeout, TimeUnit.SECONDS);
} catch (IllegalArgumentException e) {
throw new DBException(e);
}
}
try {
store = KVStoreFactory.getStore(config);
} catch (FaultException e) {
throw new DBException(e);
}
}
@Override
public void cleanup() throws DBException {
store.close();
}
/**
* Create a key object. We map "table" and (YCSB's) "key" to a major component
* of the oracle.kv.Key, and "field" to a minor component.
*
* @return An oracle.kv.Key object.
*/
private static Key createKey(String table, String key, String field) {
List<String> majorPath = new ArrayList<String>();
majorPath.add(table);
majorPath.add(key);
if (field == null) {
return Key.createKey(majorPath);
}
return Key.createKey(majorPath, field);
}
private static Key createKey(String table, String key) {
return createKey(table, key, null);
}
private static String getFieldFromKey(Key key) {
return key.getMinorPath().get(0);
}
@Override
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
Key kvKey = createKey(table, key);
SortedMap<Key, ValueVersion> kvResult;
try {
kvResult = store.multiGet(kvKey, null, null);
} catch (FaultException e) {
System.err.println(e);
return Status.ERROR;
}
for (Map.Entry<Key, ValueVersion> entry : kvResult.entrySet()) {
/* If fields is null, read all fields */
String field = getFieldFromKey(entry.getKey());
if (fields != null && !fields.contains(field)) {
continue;
}
result.put(field,
new ByteArrayByteIterator(entry.getValue().getValue().getValue()));
}
return Status.OK;
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
System.err.println("Oracle NoSQL Database does not support Scan semantics");
return Status.ERROR;
}
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
Key kvKey = createKey(table, key, entry.getKey());
Value kvValue = Value.createValue(entry.getValue().toArray());
try {
store.put(kvKey, kvValue);
} catch (FaultException e) {
System.err.println(e);
return Status.ERROR;
}
}
return Status.OK;
}
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
return update(table, key, values);
}
@Override
public Status delete(String table, String key) {
Key kvKey = createKey(table, key);
try {
store.multiDelete(kvKey, null, null);
} catch (FaultException e) {
System.err.println(e);
return Status.ERROR;
}
return Status.OK;
}
}
| 7,466 | 29.108871 | 102 | java |
null | NearPMSW-main/baseline/logging/YCSB2/scylla/src/test/java/site/ycsb/db/scylla/ScyllaCQLClientTest.java | /*
* Copyright (c) 2020 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License. See accompanying LICENSE file.
*/
package site.ycsb.db.scylla;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
import com.google.common.collect.Sets;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.querybuilder.Insert;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Select;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.measurements.Measurements;
import site.ycsb.workloads.CoreWorkload;
import org.cassandraunit.CassandraCQLUnit;
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
/**
* Integration tests for the Scylla client
*/
public class ScyllaCQLClientTest {
// Change the default Scylla timeout from 10s to 120s for slow CI machines
private final static long timeout = 120000L;
private final static String TABLE = "usertable";
private final static String HOST = "localhost";
private final static int PORT = 9142;
private final static String DEFAULT_ROW_KEY = "user1";
private ScyllaCQLClient client;
private Session session;
@ClassRule
public static CassandraCQLUnit unit = new CassandraCQLUnit(
new ClassPathCQLDataSet("ycsb.cql", "ycsb"), null, timeout);
@Before
public void setUp() throws Exception {
session = unit.getSession();
Properties p = new Properties();
p.setProperty("scylla.hosts", HOST);
p.setProperty("scylla.port", Integer.toString(PORT));
p.setProperty("scylla.table", TABLE);
Measurements.setProperties(p);
final CoreWorkload workload = new CoreWorkload();
workload.init(p);
client = new ScyllaCQLClient();
client.setProperties(p);
client.init();
}
@After
public void tearDownClient() throws Exception {
if (client != null) {
client.cleanup();
}
client = null;
}
@After
public void clearTable() {
// Clear the table so that each test starts fresh.
final Statement truncate = QueryBuilder.truncate(TABLE);
if (unit != null) {
unit.getSession().execute(truncate);
}
}
@Test
public void testReadMissingRow() {
final HashMap<String, ByteIterator> result = new HashMap<>();
final Status status = client.read(TABLE, "Missing row", null, result);
assertThat(result.size(), is(0));
assertThat(status, is(Status.NOT_FOUND));
}
private void insertRow() {
Insert insertStmt = QueryBuilder.insertInto(TABLE);
insertStmt.value(ScyllaCQLClient.YCSB_KEY, DEFAULT_ROW_KEY);
insertStmt.value("field0", "value1");
insertStmt.value("field1", "value2");
session.execute(insertStmt);
}
@Test
public void testRead() {
insertRow();
final HashMap<String, ByteIterator> result = new HashMap<>();
final Status status = client.read(TABLE, DEFAULT_ROW_KEY, null, result);
assertThat(status, is(Status.OK));
assertThat(result.entrySet(), hasSize(11));
assertThat(result, hasEntry("field2", null));
final HashMap<String, String> strResult = new HashMap<>();
for (final Map.Entry<String, ByteIterator> e : result.entrySet()) {
if (e.getValue() != null) {
strResult.put(e.getKey(), e.getValue().toString());
}
}
assertThat(strResult, hasEntry(ScyllaCQLClient.YCSB_KEY, DEFAULT_ROW_KEY));
assertThat(strResult, hasEntry("field0", "value1"));
assertThat(strResult, hasEntry("field1", "value2"));
}
@Test
public void testReadSingleColumn() {
insertRow();
final HashMap<String, ByteIterator> result = new HashMap<>();
final Set<String> fields = Sets.newHashSet("field1");
final Status status = client.read(TABLE, DEFAULT_ROW_KEY, fields, result);
assertThat(status, is(Status.OK));
assertThat(result.entrySet(), hasSize(1));
final Map<String, String> strResult = StringByteIterator.getStringMap(result);
assertThat(strResult, hasEntry("field1", "value2"));
}
@Test
public void testInsert() {
final String key = "key";
final Map<String, String> input = new HashMap<>();
input.put("field0", "value1");
input.put("field1", "value2");
final Status status = client.insert(TABLE, key, StringByteIterator.getByteIteratorMap(input));
assertThat(status, is(Status.OK));
// Verify result
final Select selectStmt =
QueryBuilder.select("field0", "field1")
.from(TABLE)
.where(QueryBuilder.eq(ScyllaCQLClient.YCSB_KEY, key))
.limit(1);
final ResultSet rs = session.execute(selectStmt);
final Row row = rs.one();
assertThat(row, notNullValue());
assertThat(rs.isExhausted(), is(true));
assertThat(row.getString("field0"), is("value1"));
assertThat(row.getString("field1"), is("value2"));
}
@Test
public void testUpdate() {
insertRow();
final Map<String, String> input = new HashMap<>();
input.put("field0", "new-value1");
input.put("field1", "new-value2");
final Status status = client.update(TABLE,
DEFAULT_ROW_KEY,
StringByteIterator.getByteIteratorMap(input));
assertThat(status, is(Status.OK));
// Verify result
final Select selectStmt =
QueryBuilder.select("field0", "field1")
.from(TABLE)
.where(QueryBuilder.eq(ScyllaCQLClient.YCSB_KEY, DEFAULT_ROW_KEY))
.limit(1);
final ResultSet rs = session.execute(selectStmt);
final Row row = rs.one();
assertThat(row, notNullValue());
assertThat(rs.isExhausted(), is(true));
assertThat(row.getString("field0"), is("new-value1"));
assertThat(row.getString("field1"), is("new-value2"));
}
@Test
public void testDelete() {
insertRow();
final Status status = client.delete(TABLE, DEFAULT_ROW_KEY);
assertThat(status, is(Status.OK));
// Verify result
final Select selectStmt =
QueryBuilder.select("field0", "field1")
.from(TABLE)
.where(QueryBuilder.eq(ScyllaCQLClient.YCSB_KEY, DEFAULT_ROW_KEY))
.limit(1);
final ResultSet rs = session.execute(selectStmt);
final Row row = rs.one();
assertThat(row, nullValue());
}
@Test
public void testPreparedStatements() {
final int LOOP_COUNT = 3;
for (int i = 0; i < LOOP_COUNT; i++) {
testInsert();
testUpdate();
testRead();
testReadSingleColumn();
testReadMissingRow();
testDelete();
}
}
}
| 7,630 | 30.533058 | 98 | java |
null | NearPMSW-main/baseline/logging/YCSB2/scylla/src/main/java/site/ycsb/db/scylla/package-info.java | /*
* Copyright (c) 2020 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="https://www.scylladb.com/">scylla</a>
* via CQL.
*/
package site.ycsb.db.scylla;
| 779 | 31.5 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/scylla/src/main/java/site/ycsb/db/scylla/ScyllaCQLClient.java | /*
* Copyright (c) 2020 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License. See accompanying LICENSE file.
*/
package site.ycsb.db.scylla;
import com.datastax.driver.core.*;
import com.datastax.driver.core.policies.DCAwareRoundRobinPolicy;
import com.datastax.driver.core.policies.LoadBalancingPolicy;
import com.datastax.driver.core.policies.TokenAwarePolicy;
import com.datastax.driver.core.querybuilder.*;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.helpers.MessageFormatter;
/**
* Scylla DB implementation.
*/
public class ScyllaCQLClient extends DB {
private static final Logger LOGGER = LoggerFactory.getLogger(ScyllaCQLClient.class);
private static Cluster cluster = null;
private static Session session = null;
private static final ConcurrentMap<Set<String>, PreparedStatement> READ_STMTS = new ConcurrentHashMap<>();
private static final ConcurrentMap<Set<String>, PreparedStatement> SCAN_STMTS = new ConcurrentHashMap<>();
private static final ConcurrentMap<Set<String>, PreparedStatement> INSERT_STMTS = new ConcurrentHashMap<>();
private static final ConcurrentMap<Set<String>, PreparedStatement> UPDATE_STMTS = new ConcurrentHashMap<>();
private static final AtomicReference<PreparedStatement> READ_ALL_STMT = new AtomicReference<>();
private static final AtomicReference<PreparedStatement> SCAN_ALL_STMT = new AtomicReference<>();
private static final AtomicReference<PreparedStatement> DELETE_STMT = new AtomicReference<>();
private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.QUORUM;
private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.QUORUM;
private static boolean lwt = false;
public static final String YCSB_KEY = "y_id";
public static final String KEYSPACE_PROPERTY = "scylla.keyspace";
public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb";
public static final String USERNAME_PROPERTY = "scylla.username";
public static final String PASSWORD_PROPERTY = "scylla.password";
public static final String HOSTS_PROPERTY = "scylla.hosts";
public static final String PORT_PROPERTY = "scylla.port";
public static final String PORT_PROPERTY_DEFAULT = "9042";
public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "scylla.readconsistencylevel";
public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = readConsistencyLevel.name();
public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "scylla.writeconsistencylevel";
public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = writeConsistencyLevel.name();
public static final String MAX_CONNECTIONS_PROPERTY = "scylla.maxconnections";
public static final String CORE_CONNECTIONS_PROPERTY = "scylla.coreconnections";
public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = "scylla.connecttimeoutmillis";
public static final String READ_TIMEOUT_MILLIS_PROPERTY = "scylla.readtimeoutmillis";
public static final String SCYLLA_LWT = "scylla.lwt";
public static final String TOKEN_AWARE_LOCAL_DC = "scylla.local_dc";
public static final String TRACING_PROPERTY = "scylla.tracing";
public static final String TRACING_PROPERTY_DEFAULT = "false";
public static final String USE_SSL_CONNECTION = "scylla.useSSL";
private static final String DEFAULT_USE_SSL_CONNECTION = "false";
/**
* Count the number of times initialized to teardown on the last
* {@link #cleanup()}.
*/
private static final AtomicInteger INIT_COUNT = new AtomicInteger(0);
private static boolean debug = false;
private static boolean trace = false;
/**
* Initialize any state for this DB. Called once per DB instance; there is one
* DB instance per client thread.
*/
@Override
public void init() throws DBException {
// Keep track of number of calls to init (for later cleanup)
INIT_COUNT.incrementAndGet();
// Synchronized so that we only have a single
// cluster/session instance for all the threads.
synchronized (INIT_COUNT) {
// Check if the cluster has already been initialized
if (cluster != null) {
return;
}
try {
debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false"));
trace = Boolean.parseBoolean(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT));
String host = getProperties().getProperty(HOSTS_PROPERTY);
if (host == null) {
throw new DBException(String.format("Required property \"%s\" missing for scyllaCQLClient", HOSTS_PROPERTY));
}
String[] hosts = host.split(",");
String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT);
String username = getProperties().getProperty(USERNAME_PROPERTY);
String password = getProperties().getProperty(PASSWORD_PROPERTY);
String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT);
readConsistencyLevel = ConsistencyLevel.valueOf(
getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT));
writeConsistencyLevel = ConsistencyLevel.valueOf(
getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT));
boolean useSSL = Boolean.parseBoolean(
getProperties().getProperty(USE_SSL_CONNECTION, DEFAULT_USE_SSL_CONNECTION));
Cluster.Builder builder;
if ((username != null) && !username.isEmpty()) {
builder = Cluster.builder().withCredentials(username, password)
.addContactPoints(hosts).withPort(Integer.parseInt(port));
if (useSSL) {
builder = builder.withSSL();
}
} else {
builder = Cluster.builder().withPort(Integer.parseInt(port))
.addContactPoints(hosts);
}
final String localDC = getProperties().getProperty(TOKEN_AWARE_LOCAL_DC);
if (localDC != null && !localDC.isEmpty()) {
final LoadBalancingPolicy local = DCAwareRoundRobinPolicy.builder().withLocalDc(localDC).build();
final TokenAwarePolicy tokenAware = new TokenAwarePolicy(local);
builder = builder.withLoadBalancingPolicy(tokenAware);
LOGGER.info("Using local datacenter with token awareness: {}\n", localDC);
// If was not overridden explicitly, set LOCAL_QUORUM
if (getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY) == null) {
readConsistencyLevel = ConsistencyLevel.LOCAL_QUORUM;
}
if (getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY) == null) {
writeConsistencyLevel = ConsistencyLevel.LOCAL_QUORUM;
}
}
cluster = builder.build();
String maxConnections = getProperties().getProperty(
MAX_CONNECTIONS_PROPERTY);
if (maxConnections != null) {
cluster.getConfiguration().getPoolingOptions()
.setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.parseInt(maxConnections));
}
String coreConnections = getProperties().getProperty(
CORE_CONNECTIONS_PROPERTY);
if (coreConnections != null) {
cluster.getConfiguration().getPoolingOptions()
.setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.parseInt(coreConnections));
}
String connectTimeoutMillis = getProperties().getProperty(
CONNECT_TIMEOUT_MILLIS_PROPERTY);
if (connectTimeoutMillis != null) {
cluster.getConfiguration().getSocketOptions()
.setConnectTimeoutMillis(Integer.parseInt(connectTimeoutMillis));
}
String readTimeoutMillis = getProperties().getProperty(
READ_TIMEOUT_MILLIS_PROPERTY);
if (readTimeoutMillis != null) {
cluster.getConfiguration().getSocketOptions()
.setReadTimeoutMillis(Integer.parseInt(readTimeoutMillis));
}
Metadata metadata = cluster.getMetadata();
LOGGER.info("Connected to cluster: {}\n", metadata.getClusterName());
for (Host discoveredHost : metadata.getAllHosts()) {
LOGGER.info("Datacenter: {}; Host: {}; Rack: {}\n",
discoveredHost.getDatacenter(), discoveredHost.getEndPoint().resolve().getAddress(),
discoveredHost.getRack());
}
session = cluster.connect(keyspace);
if (Boolean.parseBoolean(getProperties().getProperty(SCYLLA_LWT, Boolean.toString(lwt)))) {
LOGGER.info("Using LWT\n");
lwt = true;
readConsistencyLevel = ConsistencyLevel.SERIAL;
writeConsistencyLevel = ConsistencyLevel.ANY;
} else {
LOGGER.info("Not using LWT\n");
}
LOGGER.info("Read consistency: {}, Write consistency: {}\n",
readConsistencyLevel.name(),
writeConsistencyLevel.name());
} catch (Exception e) {
throw new DBException(e);
}
} // synchronized
}
/**
* Cleanup any state for this DB. Called once per DB instance; there is one DB
* instance per client thread.
*/
@Override
public void cleanup() throws DBException {
synchronized (INIT_COUNT) {
final int curInitCount = INIT_COUNT.decrementAndGet();
if (curInitCount <= 0) {
READ_STMTS.clear();
SCAN_STMTS.clear();
INSERT_STMTS.clear();
UPDATE_STMTS.clear();
READ_ALL_STMT.set(null);
SCAN_ALL_STMT.set(null);
DELETE_STMT.set(null);
if (session != null) {
session.close();
session = null;
}
if (cluster != null) {
cluster.close();
cluster = null;
}
}
if (curInitCount < 0) {
// This should never happen.
throw new DBException(String.format("initCount is negative: %d", curInitCount));
}
}
}
/**
* Read a record from the database. Each field/value pair from the result will
* be stored in a HashMap.
*
* @param table
* The name of the table
* @param key
* The record key of the record to read.
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
try {
PreparedStatement stmt = (fields == null) ? READ_ALL_STMT.get() : READ_STMTS.get(fields);
// Prepare statement on demand
if (stmt == null) {
Select.Builder selectBuilder;
if (fields == null) {
selectBuilder = QueryBuilder.select().all();
} else {
selectBuilder = QueryBuilder.select();
for (String col : fields) {
((Select.Selection) selectBuilder).column(col);
}
}
stmt = session.prepare(selectBuilder.from(table)
.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))
.limit(1));
stmt.setConsistencyLevel(readConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = (fields == null) ?
READ_ALL_STMT.getAndSet(stmt) :
READ_STMTS.putIfAbsent(new HashSet<>(fields), stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
LOGGER.debug(stmt.getQueryString());
LOGGER.debug("key = {}", key);
ResultSet rs = session.execute(stmt.bind(key));
if (rs.isExhausted()) {
return Status.NOT_FOUND;
}
// Should be only 1 row
Row row = rs.one();
ColumnDefinitions cd = row.getColumnDefinitions();
for (ColumnDefinitions.Definition def : cd) {
ByteBuffer val = row.getBytesUnsafe(def.getName());
if (val != null) {
result.put(def.getName(), new ByteArrayByteIterator(val.array()));
} else {
result.put(def.getName(), null);
}
}
return Status.OK;
} catch (Exception e) {
LOGGER.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e);
return Status.ERROR;
}
}
/**
* Perform a range scan for a set of records in the database. Each field/value
* pair from the result will be stored in a HashMap.
*
* scylla CQL uses "token" method for range scan which doesn't always yield
* intuitive results.
*
* @param table
* The name of the table
* @param startkey
* The record key of the first record to read.
* @param recordcount
* The number of records to read
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
try {
PreparedStatement stmt = (fields == null) ? SCAN_ALL_STMT.get() : SCAN_STMTS.get(fields);
// Prepare statement on demand
if (stmt == null) {
Select.Builder selectBuilder;
if (fields == null) {
selectBuilder = QueryBuilder.select().all();
} else {
selectBuilder = QueryBuilder.select();
for (String col : fields) {
((Select.Selection) selectBuilder).column(col);
}
}
Select selectStmt = selectBuilder.from(table);
// The statement builder is not setup right for tokens.
// So, we need to build it manually.
String initialStmt = selectStmt.toString();
String scanStmt = initialStmt.substring(0, initialStmt.length() - 1) +
" WHERE " + QueryBuilder.token(YCSB_KEY) +
" >= token(" + QueryBuilder.bindMarker() + ")" +
" LIMIT " + QueryBuilder.bindMarker();
stmt = session.prepare(scanStmt);
stmt.setConsistencyLevel(readConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = (fields == null) ?
SCAN_ALL_STMT.getAndSet(stmt) :
SCAN_STMTS.putIfAbsent(new HashSet<>(fields), stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
LOGGER.debug(stmt.getQueryString());
LOGGER.debug("startKey = {}, recordcount = {}", startkey, recordcount);
ResultSet rs = session.execute(stmt.bind(startkey, recordcount));
HashMap<String, ByteIterator> tuple;
while (!rs.isExhausted()) {
Row row = rs.one();
tuple = new HashMap<>();
ColumnDefinitions cd = row.getColumnDefinitions();
for (ColumnDefinitions.Definition def : cd) {
ByteBuffer val = row.getBytesUnsafe(def.getName());
if (val != null) {
tuple.put(def.getName(), new ByteArrayByteIterator(val.array()));
} else {
tuple.put(def.getName(), null);
}
}
result.add(tuple);
}
return Status.OK;
} catch (Exception e) {
LOGGER.error(
MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e);
return Status.ERROR;
}
}
/**
* Update a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key, overwriting any existing values with the same field name.
*
* @param table
* The name of the table
* @param key
* The record key of the record to write.
* @param values
* A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
try {
Set<String> fields = values.keySet();
PreparedStatement stmt = UPDATE_STMTS.get(fields);
// Prepare statement on demand
if (stmt == null) {
Update updateStmt = QueryBuilder.update(table);
// Add fields
for (String field : fields) {
updateStmt.with(QueryBuilder.set(field, QueryBuilder.bindMarker()));
}
// Add key
updateStmt.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()));
if (lwt) {
updateStmt.where().ifExists();
}
stmt = session.prepare(updateStmt);
stmt.setConsistencyLevel(writeConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = UPDATE_STMTS.putIfAbsent(new HashSet<>(fields), stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(stmt.getQueryString());
LOGGER.debug("key = {}", key);
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
LOGGER.debug("{} = {}", entry.getKey(), entry.getValue());
}
}
// Add fields
ColumnDefinitions vars = stmt.getVariables();
BoundStatement boundStmt = stmt.bind();
for (int i = 0; i < vars.size() - 1; i++) {
boundStmt.setString(i, values.get(vars.getName(i)).toString());
}
// Add key
boundStmt.setString(vars.size() - 1, key);
session.execute(boundStmt);
return Status.OK;
} catch (Exception e) {
LOGGER.error(MessageFormatter.format("Error updating key: {}", key).getMessage(), e);
}
return Status.ERROR;
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key.
*
* @param table
* The name of the table
* @param key
* The record key of the record to insert.
* @param values
* A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
try {
Set<String> fields = values.keySet();
PreparedStatement stmt = INSERT_STMTS.get(fields);
// Prepare statement on demand
if (stmt == null) {
Insert insertStmt = QueryBuilder.insertInto(table);
// Add key
insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker());
// Add fields
for (String field : fields) {
insertStmt.value(field, QueryBuilder.bindMarker());
}
if (lwt) {
insertStmt.ifNotExists();
}
stmt = session.prepare(insertStmt);
stmt.setConsistencyLevel(writeConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = INSERT_STMTS.putIfAbsent(new HashSet<>(fields), stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(stmt.getQueryString());
LOGGER.debug("key = {}", key);
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
LOGGER.debug("{} = {}", entry.getKey(), entry.getValue());
}
}
// Add key
BoundStatement boundStmt = stmt.bind().setString(0, key);
// Add fields
ColumnDefinitions vars = stmt.getVariables();
for (int i = 1; i < vars.size(); i++) {
boundStmt.setString(i, values.get(vars.getName(i)).toString());
}
session.execute(boundStmt);
return Status.OK;
} catch (Exception e) {
LOGGER.error(MessageFormatter.format("Error inserting key: {}", key).getMessage(), e);
}
return Status.ERROR;
}
/**
* Delete a record from the database.
*
* @param table
* The name of the table
* @param key
* The record key of the record to delete.
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status delete(String table, String key) {
try {
PreparedStatement stmt = DELETE_STMT.get();
// Prepare statement on demand
if (stmt == null) {
Delete s = QueryBuilder.delete().from(table);
s.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()));
if (lwt) {
s.ifExists();
}
stmt = session.prepare(s);
stmt.setConsistencyLevel(writeConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = DELETE_STMT.getAndSet(stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
LOGGER.debug(stmt.getQueryString());
LOGGER.debug("key = {}", key);
session.execute(stmt.bind(key));
return Status.OK;
} catch (Exception e) {
LOGGER.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e);
}
return Status.ERROR;
}
}
| 22,304 | 33.315385 | 119 | java |
null | NearPMSW-main/baseline/logging/YCSB2/zookeeper/src/test/java/site/ycsb/db/zookeeper/ZKClientTest.java | /**
* Copyright (c) 2020 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.zookeeper;
import org.apache.curator.test.TestingServer;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.measurements.Measurements;
import site.ycsb.workloads.CoreWorkload;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import static junit.framework.TestCase.assertEquals;
import static org.junit.Assert.fail;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
/**
* Integration tests for the YCSB ZooKeeper client.
*/
public class ZKClientTest {
private static TestingServer zkTestServer;
private ZKClient client;
private String tableName;
private final static String path = "benchmark";
private static final int PORT = 2181;
@BeforeClass
public static void setUpClass() throws Exception {
zkTestServer = new TestingServer(PORT);
zkTestServer.start();
}
@AfterClass
public static void tearDownClass() throws Exception {
zkTestServer.stop();
}
@Before
public void setUp() throws Exception {
client = new ZKClient();
Properties p = new Properties();
p.setProperty("zookeeper.connectString", "127.0.0.1:" + String.valueOf(PORT));
Measurements.setProperties(p);
final CoreWorkload workload = new CoreWorkload();
workload.init(p);
tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
client.setProperties(p);
client.init();
}
@After
public void tearDown() throws Exception {
client.cleanup();
}
@Test
public void testZKClient() {
// insert
Map<String, String> m = new HashMap<>();
String field1 = "field_1";
String value1 = "value_1";
m.put(field1, value1);
Map<String, ByteIterator> result = StringByteIterator.getByteIteratorMap(m);
client.insert(tableName, path, result);
// read
result.clear();
Status status = client.read(tableName, path, null, result);
assertEquals(Status.OK, status);
assertEquals(1, result.size());
assertEquals(value1, result.get(field1).toString());
// update(the same field)
m.clear();
result.clear();
String newVal = "value_new";
m.put(field1, newVal);
result = StringByteIterator.getByteIteratorMap(m);
client.update(tableName, path, result);
assertEquals(1, result.size());
// Verify result
result.clear();
status = client.read(tableName, path, null, result);
assertEquals(Status.OK, status);
// here we only have one field: field_1
assertEquals(1, result.size());
assertEquals(newVal, result.get(field1).toString());
// update(two different field)
m.clear();
result.clear();
String field2 = "field_2";
String value2 = "value_2";
m.put(field2, value2);
result = StringByteIterator.getByteIteratorMap(m);
client.update(tableName, path, result);
assertEquals(1, result.size());
// Verify result
result.clear();
status = client.read(tableName, path, null, result);
assertEquals(Status.OK, status);
// here we have two field: field_1 and field_2
assertEquals(2, result.size());
assertEquals(value2, result.get(field2).toString());
assertEquals(newVal, result.get(field1).toString());
// delete
status = client.delete(tableName, path);
assertEquals(Status.OK, status);
// Verify result
result.clear();
status = client.read(tableName, path, null, result);
// NoNode return ERROR
assertEquals(Status.ERROR, status);
assertEquals(0, result.size());
}
@Test
@Ignore("Not yet implemented")
public void testScan() {
fail("Not yet implemented");
}
}
| 4,549 | 27.797468 | 82 | java |
null | NearPMSW-main/baseline/logging/YCSB2/zookeeper/src/main/java/site/ycsb/db/zookeeper/package-info.java | /*
* Copyright (c) 2020 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="https://zookeeper.apache.org/">ZooKeeper</a>.
*/
package site.ycsb.db.zookeeper;
| 778 | 32.869565 | 78 | java |
null | NearPMSW-main/baseline/logging/YCSB2/zookeeper/src/main/java/site/ycsb/db/zookeeper/ZKClient.java | /**
* Copyright (c) 2020 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
* <p>
* ZooKeeper client binding for YCSB.
* <p>
*/
package site.ycsb.db.zookeeper;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.TimeUnit;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.ZooKeeper;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* YCSB binding for <a href="https://zookeeper.apache.org/">ZooKeeper</a>.
*
* See {@code zookeeper/README.md} for details.
*/
public class ZKClient extends DB {
private ZooKeeper zk;
private Watcher watcher;
private static final String CONNECT_STRING = "zookeeper.connectString";
private static final String DEFAULT_CONNECT_STRING = "127.0.0.1:2181";
private static final String SESSION_TIMEOUT_PROPERTY = "zookeeper.sessionTimeout";
private static final long DEFAULT_SESSION_TIMEOUT = TimeUnit.SECONDS.toMillis(30L);
private static final String WATCH_FLAG = "zookeeper.watchFlag";
private static final Charset UTF_8 = Charset.forName("UTF-8");
private static final Logger LOG = LoggerFactory.getLogger(ZKClient.class);
public void init() throws DBException {
Properties props = getProperties();
String connectString = props.getProperty(CONNECT_STRING);
if (connectString == null || connectString.length() == 0) {
connectString = DEFAULT_CONNECT_STRING;
}
if(Boolean.parseBoolean(props.getProperty(WATCH_FLAG))) {
watcher = new SimpleWatcher();
} else {
watcher = null;
}
long sessionTimeout;
String sessionTimeoutString = props.getProperty(SESSION_TIMEOUT_PROPERTY);
if (sessionTimeoutString != null) {
sessionTimeout = Integer.parseInt(sessionTimeoutString);
} else {
sessionTimeout = DEFAULT_SESSION_TIMEOUT;
}
try {
zk = new ZooKeeper(connectString, (int) sessionTimeout, new SimpleWatcher());
} catch (IOException e) {
throw new DBException("Creating connection failed.");
}
}
public void cleanup() throws DBException {
try {
zk.close();
} catch (InterruptedException e) {
throw new DBException("Closing connection failed.");
}
}
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
String path = getPath(key);
try {
byte[] data = zk.getData(path, watcher, null);
if (data == null || data.length == 0) {
return Status.NOT_FOUND;
}
deserializeValues(data, fields, result);
return Status.OK;
} catch (KeeperException | InterruptedException e) {
LOG.error("Error when reading a path:{},tableName:{}", path, table, e);
return Status.ERROR;
}
}
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
String path = getPath(key);
String data = getJsonStrFromByteMap(values);
try {
zk.create(path, data.getBytes(UTF_8), ZooDefs.Ids.OPEN_ACL_UNSAFE,
CreateMode.PERSISTENT);
return Status.OK;
} catch (KeeperException.NodeExistsException e1) {
return Status.OK;
} catch (KeeperException | InterruptedException e2) {
LOG.error("Error when inserting a path:{},tableName:{}", path, table, e2);
return Status.ERROR;
}
}
@Override
public Status delete(String table, String key) {
String path = getPath(key);
try {
zk.delete(path, -1);
return Status.OK;
} catch (InterruptedException | KeeperException e) {
LOG.error("Error when deleting a path:{},tableName:{}", path, table, e);
return Status.ERROR;
}
}
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
String path = getPath(key);
try {
// we have to do a read operation here before setData to meet the YCSB's update semantics:
// update a single record in the database, adding or replacing the specified fields.
byte[] data = zk.getData(path, watcher, null);
if (data == null || data.length == 0) {
return Status.NOT_FOUND;
}
final Map<String, ByteIterator> result = new HashMap<>();
deserializeValues(data, null, result);
result.putAll(values);
// update
zk.setData(path, getJsonStrFromByteMap(result).getBytes(UTF_8), -1);
return Status.OK;
} catch (KeeperException | InterruptedException e) {
LOG.error("Error when updating a path:{},tableName:{}", path, table, e);
return Status.ERROR;
}
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
return Status.NOT_IMPLEMENTED;
}
private String getPath(String key) {
return key.startsWith("/") ? key : "/" + key;
}
/**
* converting the key:values map to JSON Strings.
*/
private static String getJsonStrFromByteMap(Map<String, ByteIterator> map) {
Map<String, String> stringMap = StringByteIterator.getStringMap(map);
return JSONValue.toJSONString(stringMap);
}
private Map<String, ByteIterator> deserializeValues(final byte[] data, final Set<String> fields,
final Map<String, ByteIterator> result) {
JSONObject jsonObject = (JSONObject)JSONValue.parse(new String(data, UTF_8));
Iterator<String> iterator = jsonObject.keySet().iterator();
while(iterator.hasNext()) {
String field = iterator.next();
String value = jsonObject.get(field).toString();
if(fields == null || fields.contains(field)) {
result.put(field, new StringByteIterator(value));
}
}
return result;
}
private static class SimpleWatcher implements Watcher {
public void process(WatchedEvent e) {
if (e.getType() == Event.EventType.None) {
return;
}
if (e.getState() == Event.KeeperState.SyncConnected) {
//do nothing
}
}
}
}
| 7,198 | 31.282511 | 98 | java |
null | NearPMSW-main/baseline/logging/YCSB2/couchbase/src/main/java/site/ycsb/db/package-info.java | /*
* Copyright (c) 2015 - 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="http://www.couchbase.com/">Couchbase</a>.
*/
package site.ycsb.db;
| 771 | 32.565217 | 74 | java |
null | NearPMSW-main/baseline/logging/YCSB2/couchbase/src/main/java/site/ycsb/db/CouchbaseClient.java | /**
* Copyright (c) 2013 - 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import com.couchbase.client.protocol.views.*;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import net.spy.memcached.PersistTo;
import net.spy.memcached.ReplicateTo;
import net.spy.memcached.internal.OperationFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.StringWriter;
import java.io.Writer;
import java.net.URI;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
/**
* A class that wraps the CouchbaseClient to allow it to be interfaced with YCSB.
* This class extends {@link DB} and implements the database interface used by YCSB client.
*/
public class CouchbaseClient extends DB {
public static final String URL_PROPERTY = "couchbase.url";
public static final String BUCKET_PROPERTY = "couchbase.bucket";
public static final String PASSWORD_PROPERTY = "couchbase.password";
public static final String CHECKF_PROPERTY = "couchbase.checkFutures";
public static final String PERSIST_PROPERTY = "couchbase.persistTo";
public static final String REPLICATE_PROPERTY = "couchbase.replicateTo";
public static final String JSON_PROPERTY = "couchbase.json";
public static final String DESIGN_DOC_PROPERTY = "couchbase.ddoc";
public static final String VIEW_PROPERTY = "couchbase.view";
public static final String STALE_PROPERTY = "couchbase.stale";
public static final String SCAN_PROPERTY = "scanproportion";
public static final String STALE_PROPERTY_DEFAULT = Stale.OK.name();
public static final String SCAN_PROPERTY_DEFAULT = "0.0";
protected static final ObjectMapper JSON_MAPPER = new ObjectMapper();
private com.couchbase.client.CouchbaseClient client;
private PersistTo persistTo;
private ReplicateTo replicateTo;
private boolean checkFutures;
private boolean useJson;
private String designDoc;
private String viewName;
private Stale stale;
private View view;
private final Logger log = LoggerFactory.getLogger(getClass());
@Override
public void init() throws DBException {
Properties props = getProperties();
String url = props.getProperty(URL_PROPERTY, "http://127.0.0.1:8091/pools");
String bucket = props.getProperty(BUCKET_PROPERTY, "default");
String password = props.getProperty(PASSWORD_PROPERTY, "");
checkFutures = props.getProperty(CHECKF_PROPERTY, "true").equals("true");
useJson = props.getProperty(JSON_PROPERTY, "true").equals("true");
persistTo = parsePersistTo(props.getProperty(PERSIST_PROPERTY, "0"));
replicateTo = parseReplicateTo(props.getProperty(REPLICATE_PROPERTY, "0"));
designDoc = getProperties().getProperty(DESIGN_DOC_PROPERTY);
viewName = getProperties().getProperty(VIEW_PROPERTY);
stale = Stale.valueOf(getProperties().getProperty(STALE_PROPERTY, STALE_PROPERTY_DEFAULT).toUpperCase());
Double scanproportion = Double.valueOf(props.getProperty(SCAN_PROPERTY, SCAN_PROPERTY_DEFAULT));
Properties systemProperties = System.getProperties();
systemProperties.put("net.spy.log.LoggerImpl", "net.spy.memcached.compat.log.SLF4JLogger");
System.setProperties(systemProperties);
try {
client = new com.couchbase.client.CouchbaseClient(Arrays.asList(new URI(url)), bucket, password);
} catch (Exception e) {
throw new DBException("Could not create CouchbaseClient object.", e);
}
if (scanproportion > 0) {
try {
view = client.getView(designDoc, viewName);
} catch (Exception e) {
throw new DBException(String.format("%s=%s and %s=%s provided, unable to connect to view.",
DESIGN_DOC_PROPERTY, designDoc, VIEW_PROPERTY, viewName), e.getCause());
}
}
}
/**
* Parse the replicate property into the correct enum.
*
* @param property the stringified property value.
* @throws DBException if parsing the property did fail.
* @return the correct enum.
*/
private ReplicateTo parseReplicateTo(final String property) throws DBException {
int value = Integer.parseInt(property);
switch (value) {
case 0:
return ReplicateTo.ZERO;
case 1:
return ReplicateTo.ONE;
case 2:
return ReplicateTo.TWO;
case 3:
return ReplicateTo.THREE;
default:
throw new DBException(REPLICATE_PROPERTY + " must be between 0 and 3");
}
}
/**
* Parse the persist property into the correct enum.
*
* @param property the stringified property value.
* @throws DBException if parsing the property did fail.
* @return the correct enum.
*/
private PersistTo parsePersistTo(final String property) throws DBException {
int value = Integer.parseInt(property);
switch (value) {
case 0:
return PersistTo.ZERO;
case 1:
return PersistTo.ONE;
case 2:
return PersistTo.TWO;
case 3:
return PersistTo.THREE;
case 4:
return PersistTo.FOUR;
default:
throw new DBException(PERSIST_PROPERTY + " must be between 0 and 4");
}
}
/**
* Shutdown the client.
*/
@Override
public void cleanup() {
client.shutdown();
}
@Override
public Status read(final String table, final String key, final Set<String> fields,
final Map<String, ByteIterator> result) {
String formattedKey = formatKey(table, key);
try {
Object loaded = client.get(formattedKey);
if (loaded == null) {
return Status.ERROR;
}
decode(loaded, fields, result);
return Status.OK;
} catch (Exception e) {
if (log.isErrorEnabled()) {
log.error("Could not read value for key " + formattedKey, e);
}
return Status.ERROR;
}
}
@Override
public Status scan(final String table, final String startkey, final int recordcount, final Set<String> fields,
final Vector<HashMap<String, ByteIterator>> result) {
try {
Query query = new Query().setRangeStart(startkey)
.setLimit(recordcount)
.setIncludeDocs(true)
.setStale(stale);
ViewResponse response = client.query(view, query);
for (ViewRow row : response) {
HashMap<String, ByteIterator> rowMap = new HashMap();
decode(row.getDocument(), fields, rowMap);
result.add(rowMap);
}
return Status.OK;
} catch (Exception e) {
log.error(e.getMessage());
}
return Status.ERROR;
}
@Override
public Status update(final String table, final String key, final Map<String, ByteIterator> values) {
String formattedKey = formatKey(table, key);
try {
final OperationFuture<Boolean> future = client.replace(formattedKey, encode(values), persistTo, replicateTo);
return checkFutureStatus(future);
} catch (Exception e) {
if (log.isErrorEnabled()) {
log.error("Could not update value for key " + formattedKey, e);
}
return Status.ERROR;
}
}
@Override
public Status insert(final String table, final String key, final Map<String, ByteIterator> values) {
String formattedKey = formatKey(table, key);
try {
final OperationFuture<Boolean> future = client.add(formattedKey, encode(values), persistTo, replicateTo);
return checkFutureStatus(future);
} catch (Exception e) {
if (log.isErrorEnabled()) {
log.error("Could not insert value for key " + formattedKey, e);
}
return Status.ERROR;
}
}
@Override
public Status delete(final String table, final String key) {
String formattedKey = formatKey(table, key);
try {
final OperationFuture<Boolean> future = client.delete(formattedKey, persistTo, replicateTo);
return checkFutureStatus(future);
} catch (Exception e) {
if (log.isErrorEnabled()) {
log.error("Could not delete value for key " + formattedKey, e);
}
return Status.ERROR;
}
}
/**
* Prefix the key with the given prefix, to establish a unique namespace.
*
* @param prefix the prefix to use.
* @param key the actual key.
* @return the formatted and prefixed key.
*/
private String formatKey(final String prefix, final String key) {
return prefix + ":" + key;
}
/**
* Wrapper method that either inspects the future or not.
*
* @param future the future to potentially verify.
* @return the status of the future result.
*/
private Status checkFutureStatus(final OperationFuture<?> future) {
if (checkFutures) {
return future.getStatus().isSuccess() ? Status.OK : Status.ERROR;
} else {
return Status.OK;
}
}
/**
* Decode the object from server into the storable result.
*
* @param source the loaded object.
* @param fields the fields to check.
* @param dest the result passed back to the ycsb core.
*/
private void decode(final Object source, final Set<String> fields, final Map<String, ByteIterator> dest) {
if (useJson) {
try {
JsonNode json = JSON_MAPPER.readTree((String) source);
boolean checkFields = fields != null && !fields.isEmpty();
for (Iterator<Map.Entry<String, JsonNode>> jsonFields = json.fields(); jsonFields.hasNext();) {
Map.Entry<String, JsonNode> jsonField = jsonFields.next();
String name = jsonField.getKey();
if (checkFields && fields.contains(name)) {
continue;
}
JsonNode jsonValue = jsonField.getValue();
if (jsonValue != null && !jsonValue.isNull()) {
dest.put(name, new StringByteIterator(jsonValue.asText()));
}
}
} catch (Exception e) {
throw new RuntimeException("Could not decode JSON");
}
} else {
Map<String, String> converted = (HashMap<String, String>) source;
for (Map.Entry<String, String> entry : converted.entrySet()) {
dest.put(entry.getKey(), new StringByteIterator(entry.getValue()));
}
}
}
/**
* Encode the object for couchbase storage.
*
* @param source the source value.
* @return the storable object.
*/
private Object encode(final Map<String, ByteIterator> source) {
Map<String, String> stringMap = StringByteIterator.getStringMap(source);
if (!useJson) {
return stringMap;
}
ObjectNode node = JSON_MAPPER.createObjectNode();
for (Map.Entry<String, String> pair : stringMap.entrySet()) {
node.put(pair.getKey(), pair.getValue());
}
JsonFactory jsonFactory = new JsonFactory();
Writer writer = new StringWriter();
try {
JsonGenerator jsonGenerator = jsonFactory.createGenerator(writer);
JSON_MAPPER.writeTree(jsonGenerator, node);
} catch (Exception e) {
throw new RuntimeException("Could not encode JSON value");
}
return writer.toString();
}
}
| 11,866 | 32.148045 | 115 | java |
null | NearPMSW-main/baseline/logging/YCSB2/hbase2/src/test/java/site/ycsb/db/hbase2/HBaseClient2Test.java | /**
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.hbase2;
import static org.junit.Assert.assertArrayEquals;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.measurements.Measurements;
import site.ycsb.workloads.CoreWorkload;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;
import java.util.Vector;
/**
* Integration tests for the YCSB HBase 2 client using an HBase minicluster.
*/
public class HBaseClient2Test {
private final static String COLUMN_FAMILY = "cf";
private static HBaseTestingUtility testingUtil;
private HBaseClient2 client;
private Table table = null;
private String tableName;
private static boolean isWindows() {
final String os = System.getProperty("os.name");
return os.startsWith("Windows");
}
/**
* Creates a mini-cluster for use in these tests.
* This is a heavy-weight operation, so invoked only once for the test class.
*/
@BeforeClass
public static void setUpClass() throws Exception {
// Minicluster setup fails on Windows with an UnsatisfiedLinkError.
// Skip if windows.
assumeTrue(!isWindows());
testingUtil = HBaseTestingUtility.createLocalHTU();
testingUtil.startMiniCluster();
}
/**
* Tears down mini-cluster.
*/
@AfterClass
public static void tearDownClass() throws Exception {
if (testingUtil != null) {
testingUtil.shutdownMiniCluster();
}
}
/**
* Re-create the table for each test. Using default properties.
*/
public void setUp() throws Exception {
setUp(new Properties());
}
/**
* Re-create the table for each test. Using custom properties.
*/
public void setUp(Properties p) throws Exception {
client = new HBaseClient2();
client.setConfiguration(new Configuration(testingUtil.getConfiguration()));
p.setProperty("columnfamily", COLUMN_FAMILY);
Measurements.setProperties(p);
final CoreWorkload workload = new CoreWorkload();
workload.init(p);
tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
table = testingUtil.createTable(TableName.valueOf(tableName), Bytes.toBytes(COLUMN_FAMILY));
client.setProperties(p);
client.init();
}
@After
public void tearDown() throws Exception {
table.close();
testingUtil.deleteTable(TableName.valueOf(tableName));
}
@Test
public void testRead() throws Exception {
setUp();
final String rowKey = "row1";
final Put p = new Put(Bytes.toBytes(rowKey));
p.addColumn(Bytes.toBytes(COLUMN_FAMILY),
Bytes.toBytes("column1"), Bytes.toBytes("value1"));
p.addColumn(Bytes.toBytes(COLUMN_FAMILY),
Bytes.toBytes("column2"), Bytes.toBytes("value2"));
table.put(p);
final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
final Status status = client.read(tableName, rowKey, null, result);
assertEquals(Status.OK, status);
assertEquals(2, result.size());
assertEquals("value1", result.get("column1").toString());
assertEquals("value2", result.get("column2").toString());
}
@Test
public void testReadMissingRow() throws Exception {
setUp();
final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
final Status status = client.read(tableName, "Missing row", null, result);
assertEquals(Status.NOT_FOUND, status);
assertEquals(0, result.size());
}
@Test
public void testScan() throws Exception {
setUp();
// Fill with data
final String colStr = "row_number";
final byte[] col = Bytes.toBytes(colStr);
final int n = 10;
final List<Put> puts = new ArrayList<Put>(n);
for(int i = 0; i < n; i++) {
final byte[] key = Bytes.toBytes(String.format("%05d", i));
final byte[] value = java.nio.ByteBuffer.allocate(4).putInt(i).array();
final Put p = new Put(key);
p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value);
puts.add(p);
}
table.put(puts);
// Test
final Vector<HashMap<String, ByteIterator>> result =
new Vector<HashMap<String, ByteIterator>>();
// Scan 5 records, skipping the first
client.scan(tableName, "00001", 5, null, result);
assertEquals(5, result.size());
for(int i = 0; i < 5; i++) {
final HashMap<String, ByteIterator> row = result.get(i);
assertEquals(1, row.size());
assertTrue(row.containsKey(colStr));
final byte[] bytes = row.get(colStr).toArray();
final ByteBuffer buf = ByteBuffer.wrap(bytes);
final int rowNum = buf.getInt();
assertEquals(i + 1, rowNum);
}
}
@Test
public void testScanWithValueFilteringUsingDefaultProperties() throws Exception {
testScanWithValueFiltering(null, null, 100, new byte[][] {
Bytes.fromHex("0000"), Bytes.fromHex("1111"), Bytes.fromHex("2222"), Bytes.fromHex("3333"),
Bytes.fromHex("4444"), Bytes.fromHex("5555"), Bytes.fromHex("6666"), Bytes.fromHex("7777"),
});
}
@Test
public void testScanWithValueFilteringOperationLessOrEqual() throws Exception {
testScanWithValueFiltering("less_or_equal", "3333", 100, new byte[][] {
Bytes.fromHex("0000"), Bytes.fromHex("1111"), Bytes.fromHex("2222"), Bytes.fromHex("3333"),
});
}
@Test
public void testScanWithValueFilteringOperationEqual() throws Exception {
testScanWithValueFiltering("equal", "AAAA", 100, new byte[][]{
Bytes.fromHex("AAAA")
});
}
@Test
public void testScanWithValueFilteringOperationNotEqual() throws Exception {
testScanWithValueFiltering("not_equal", "AAAA", 100 , new byte[][]{
Bytes.fromHex("0000"), Bytes.fromHex("1111"), Bytes.fromHex("2222"), Bytes.fromHex("3333"),
Bytes.fromHex("4444"), Bytes.fromHex("5555"), Bytes.fromHex("6666"), Bytes.fromHex("7777"),
Bytes.fromHex("8888"), Bytes.fromHex("9999"), Bytes.fromHex("BBBB"),
Bytes.fromHex("CCCC"), Bytes.fromHex("DDDD"), Bytes.fromHex("EEEE"), Bytes.fromHex("FFFF")
});
}
@Test
public void testScanWithValueFilteringAndRowLimit() throws Exception {
testScanWithValueFiltering("greater", "8887", 3, new byte[][] {
Bytes.fromHex("8888"), Bytes.fromHex("9999"), Bytes.fromHex("AAAA")
});
}
@Test
public void testUpdate() throws Exception{
setUp();
final String key = "key";
final HashMap<String, String> input = new HashMap<String, String>();
input.put("column1", "value1");
input.put("column2", "value2");
final Status status = client.insert(tableName, key, StringByteIterator.getByteIteratorMap(input));
assertEquals(Status.OK, status);
// Verify result
final Get get = new Get(Bytes.toBytes(key));
final Result result = this.table.get(get);
assertFalse(result.isEmpty());
assertEquals(2, result.size());
for(final java.util.Map.Entry<String, String> entry : input.entrySet()) {
assertEquals(entry.getValue(),
new String(result.getValue(Bytes.toBytes(COLUMN_FAMILY),
Bytes.toBytes(entry.getKey()))));
}
}
@Test
@Ignore("Not yet implemented")
public void testDelete() {
fail("Not yet implemented");
}
private void testScanWithValueFiltering(String operation, String filterValue, int scanRowLimit,
byte[][] expectedValuesReturned) throws Exception {
Properties properties = new Properties();
properties.setProperty("hbase.usescanvaluefiltering", String.valueOf(true));
if(operation != null) {
properties.setProperty("hbase.scanfilteroperator", operation);
}
if(filterValue != null) {
properties.setProperty("hbase.scanfiltervalue", filterValue);
}
// setup the client and fill two columns with data
setUp(properties);
setupTableColumnWithHexValues("col_1");
setupTableColumnWithHexValues("col_2");
Vector<HashMap<String, ByteIterator>> result = new Vector<>();
// first scan the whole table (both columns)
client.scan(tableName, "00000", scanRowLimit, null, result);
assertEquals(expectedValuesReturned.length, result.size());
for(int i = 0; i < expectedValuesReturned.length; i++) {
final HashMap<String, ByteIterator> row = result.get(i);
assertEquals(2, row.size());
assertTrue(row.containsKey("col_1") && row.containsKey("col_2"));
assertArrayEquals(expectedValuesReturned[i], row.get("col_1").toArray());
assertArrayEquals(expectedValuesReturned[i], row.get("col_2").toArray());
}
// now scan only a single column (the filter should work here too)
result = new Vector<>();
client.scan(tableName, "00000", scanRowLimit, Collections.singleton("col_1"), result);
assertEquals(expectedValuesReturned.length, result.size());
for(int i = 0; i < expectedValuesReturned.length; i++) {
final HashMap<String, ByteIterator> row = result.get(i);
assertEquals(1, row.size());
assertTrue(row.containsKey("col_1"));
assertArrayEquals(expectedValuesReturned[i], row.get("col_1").toArray());
}
}
private void setupTableColumnWithHexValues(String colStr) throws Exception {
final byte[] col = Bytes.toBytes(colStr);
final byte[][] values = {
Bytes.fromHex("0000"), Bytes.fromHex("1111"), Bytes.fromHex("2222"), Bytes.fromHex("3333"),
Bytes.fromHex("4444"), Bytes.fromHex("5555"), Bytes.fromHex("6666"), Bytes.fromHex("7777"),
Bytes.fromHex("8888"), Bytes.fromHex("9999"), Bytes.fromHex("AAAA"), Bytes.fromHex("BBBB"),
Bytes.fromHex("CCCC"), Bytes.fromHex("DDDD"), Bytes.fromHex("EEEE"), Bytes.fromHex("FFFF")
};
final List<Put> puts = new ArrayList<>(16);
for(int i = 0; i < 16; i++) {
final byte[] key = Bytes.toBytes(String.format("%05d", i));
final byte[] value = values[i];
final Put p = new Put(key);
p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value);
puts.add(p);
}
table.put(puts);
}
}
| 11,470 | 34.513932 | 102 | java |
null | NearPMSW-main/baseline/logging/YCSB2/hbase2/src/main/java/site/ycsb/db/hbase2/package-info.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="https://hbase.apache.org/">HBase</a>
* using the HBase 2 shaded API.
*/
package site.ycsb.db.hbase2;
| 795 | 32.166667 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/hbase2/src/main/java/site/ycsb/db/hbase2/HBaseClient2.java | /**
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.hbase2;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.ValueFilter;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.measurements.Measurements;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.BufferedMutatorParams;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
import java.util.ConcurrentModificationException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.atomic.AtomicInteger;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
/**
* HBase 2 client for YCSB framework.
*
* Intended for use with HBase's shaded client.
*/
public class HBaseClient2 extends site.ycsb.DB {
private static final AtomicInteger THREAD_COUNT = new AtomicInteger(0);
private Configuration config = HBaseConfiguration.create();
private boolean debug = false;
private String tableName = "";
/**
* A Cluster Connection instance that is shared by all running ycsb threads.
* Needs to be initialized late so we pick up command-line configs if any.
* To ensure one instance only in a multi-threaded context, guard access
* with a 'lock' object.
* @See #CONNECTION_LOCK.
*/
private static Connection connection = null;
// Depending on the value of clientSideBuffering, either bufferedMutator
// (clientSideBuffering) or currentTable (!clientSideBuffering) will be used.
private Table currentTable = null;
private BufferedMutator bufferedMutator = null;
private String columnFamily = "";
private byte[] columnFamilyBytes;
/**
* Durability to use for puts and deletes.
*/
private Durability durability = Durability.USE_DEFAULT;
/** Whether or not a page filter should be used to limit scan length. */
private boolean usePageFilter = true;
/**
* If true, buffer mutations on the client. This is the default behavior for
* HBaseClient. For measuring insert/update/delete latencies, client side
* buffering should be disabled.
*/
private boolean clientSideBuffering = false;
private long writeBufferSize = 1024 * 1024 * 12;
/**
* If true, we will configure server-side value filtering during scans.
*/
private boolean useScanValueFiltering = false;
private CompareOperator scanFilterOperator;
private static final String DEFAULT_SCAN_FILTER_OPERATOR = "less_or_equal";
private ByteArrayComparable scanFilterValue;
private static final String DEFAULT_SCAN_FILTER_VALUE = // 200 hexadecimal chars translated into 100 bytes
"7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF";
/**
* Initialize any state for this DB. Called once per DB instance; there is one
* DB instance per client thread.
*/
@Override
public void init() throws DBException {
if ("true"
.equals(getProperties().getProperty("clientbuffering", "false"))) {
this.clientSideBuffering = true;
}
if (getProperties().containsKey("writebuffersize")) {
writeBufferSize =
Long.parseLong(getProperties().getProperty("writebuffersize"));
}
if (getProperties().getProperty("durability") != null) {
this.durability =
Durability.valueOf(getProperties().getProperty("durability"));
}
if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) {
config.set("hadoop.security.authentication", "Kerberos");
UserGroupInformation.setConfiguration(config);
}
if ((getProperties().getProperty("principal") != null)
&& (getProperties().getProperty("keytab") != null)) {
try {
UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"),
getProperties().getProperty("keytab"));
} catch (IOException e) {
System.err.println("Keytab file is not readable or not found");
throw new DBException(e);
}
}
String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
try {
THREAD_COUNT.getAndIncrement();
synchronized (THREAD_COUNT) {
if (connection == null) {
// Initialize if not set up already.
connection = ConnectionFactory.createConnection(config);
// Terminate right now if table does not exist, since the client
// will not propagate this error upstream once the workload
// starts.
final TableName tName = TableName.valueOf(table);
try (Admin admin = connection.getAdmin()) {
if (!admin.tableExists(tName)) {
throw new DBException("Table " + tName + " does not exists");
}
}
}
}
} catch (java.io.IOException e) {
throw new DBException(e);
}
if ((getProperties().getProperty("debug") != null)
&& (getProperties().getProperty("debug").compareTo("true") == 0)) {
debug = true;
}
usePageFilter = isBooleanParamSet("hbase.usepagefilter", usePageFilter);
if (isBooleanParamSet("hbase.usescanvaluefiltering", false)) {
useScanValueFiltering=true;
String operator = getProperties().getProperty("hbase.scanfilteroperator");
operator = operator == null || operator.trim().isEmpty() ? DEFAULT_SCAN_FILTER_OPERATOR : operator;
scanFilterOperator = CompareOperator.valueOf(operator.toUpperCase());
String filterValue = getProperties().getProperty("hbase.scanfiltervalue");
filterValue = filterValue == null || filterValue.trim().isEmpty() ? DEFAULT_SCAN_FILTER_VALUE : filterValue;
scanFilterValue = new BinaryComparator(Bytes.fromHex(filterValue));
}
columnFamily = getProperties().getProperty("columnfamily");
if (columnFamily == null) {
System.err.println("Error, must specify a columnfamily for HBase table");
throw new DBException("No columnfamily specified");
}
columnFamilyBytes = Bytes.toBytes(columnFamily);
}
/**
* Cleanup any state for this DB. Called once per DB instance; there is one DB
* instance per client thread.
*/
@Override
public void cleanup() throws DBException {
// Get the measurements instance as this is the only client that should
// count clean up time like an update if client-side buffering is
// enabled.
Measurements measurements = Measurements.getMeasurements();
try {
long st = System.nanoTime();
if (bufferedMutator != null) {
bufferedMutator.close();
}
if (currentTable != null) {
currentTable.close();
}
long en = System.nanoTime();
final String type = clientSideBuffering ? "UPDATE" : "CLEANUP";
measurements.measure(type, (int) ((en - st) / 1000));
int threadCount = THREAD_COUNT.decrementAndGet();
if (threadCount <= 0) {
// Means we are done so ok to shut down the Connection.
synchronized (THREAD_COUNT) {
if (connection != null) {
connection.close();
connection = null;
}
}
}
} catch (IOException e) {
throw new DBException(e);
}
}
public void getHTable(String table) throws IOException {
final TableName tName = TableName.valueOf(table);
this.currentTable = connection.getTable(tName);
if (clientSideBuffering) {
final BufferedMutatorParams p = new BufferedMutatorParams(tName);
p.writeBufferSize(writeBufferSize);
this.bufferedMutator = connection.getBufferedMutator(p);
}
}
/**
* Read a record from the database. Each field/value pair from the result will
* be stored in a HashMap.
*
* @param table
* The name of the table
* @param key
* The record key of the record to read.
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error
*/
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
// if this is a "new" table, init HTable object. Else, use existing one
if (!tableName.equals(table)) {
currentTable = null;
try {
getHTable(table);
tableName = table;
} catch (IOException e) {
System.err.println("Error accessing HBase table: " + e);
return Status.ERROR;
}
}
Result r = null;
try {
if (debug) {
System.out
.println("Doing read from HBase columnfamily " + columnFamily);
System.out.println("Doing read for key: " + key);
}
Get g = new Get(Bytes.toBytes(key));
if (fields == null) {
g.addFamily(columnFamilyBytes);
} else {
for (String field : fields) {
g.addColumn(columnFamilyBytes, Bytes.toBytes(field));
}
}
r = currentTable.get(g);
} catch (IOException e) {
if (debug) {
System.err.println("Error doing get: " + e);
}
return Status.ERROR;
} catch (ConcurrentModificationException e) {
// do nothing for now...need to understand HBase concurrency model better
return Status.ERROR;
}
if (r.isEmpty()) {
return Status.NOT_FOUND;
}
while (r.advance()) {
final Cell c = r.current();
result.put(Bytes.toString(CellUtil.cloneQualifier(c)),
new ByteArrayByteIterator(CellUtil.cloneValue(c)));
if (debug) {
System.out.println(
"Result for field: " + Bytes.toString(CellUtil.cloneQualifier(c))
+ " is: " + Bytes.toString(CellUtil.cloneValue(c)));
}
}
return Status.OK;
}
/**
* Perform a range scan for a set of records in the database. Each field/value
* pair from the result will be stored in a HashMap.
*
* @param table
* The name of the table
* @param startkey
* The record key of the first record to read.
* @param recordcount
* The number of records to read
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
// if this is a "new" table, init HTable object. Else, use existing one
if (!tableName.equals(table)) {
currentTable = null;
try {
getHTable(table);
tableName = table;
} catch (IOException e) {
System.err.println("Error accessing HBase table: " + e);
return Status.ERROR;
}
}
Scan s = new Scan(Bytes.toBytes(startkey));
// HBase has no record limit. Here, assume recordcount is small enough to
// bring back in one call.
// We get back recordcount records
FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
s.setCaching(recordcount);
if (this.usePageFilter) {
filterList.addFilter(new PageFilter(recordcount));
}
// add specified fields or else all fields
if (fields == null) {
s.addFamily(columnFamilyBytes);
} else {
for (String field : fields) {
s.addColumn(columnFamilyBytes, Bytes.toBytes(field));
}
}
// define value filter if needed
if (useScanValueFiltering){
filterList.addFilter(new ValueFilter(scanFilterOperator, scanFilterValue));
}
s.setFilter(filterList);
// get results
ResultScanner scanner = null;
try {
scanner = currentTable.getScanner(s);
int numResults = 0;
for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
// get row key
String key = Bytes.toString(rr.getRow());
if (debug) {
System.out.println("Got scan result for key: " + key);
}
HashMap<String, ByteIterator> rowResult =
new HashMap<String, ByteIterator>();
while (rr.advance()) {
final Cell cell = rr.current();
rowResult.put(Bytes.toString(CellUtil.cloneQualifier(cell)),
new ByteArrayByteIterator(CellUtil.cloneValue(cell)));
}
// add rowResult to result vector
result.add(rowResult);
numResults++;
// PageFilter does not guarantee that the number of results is <=
// pageSize, so this
// break is required.
if (numResults >= recordcount) {// if hit recordcount, bail out
break;
}
} // done with row
} catch (IOException e) {
if (debug) {
System.out.println("Error in getting/parsing scan result: " + e);
}
return Status.ERROR;
} finally {
if (scanner != null) {
scanner.close();
}
}
return Status.OK;
}
/**
* Update a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key, overwriting any existing values with the same field name.
*
* @param table
* The name of the table
* @param key
* The record key of the record to write
* @param values
* A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
// if this is a "new" table, init HTable object. Else, use existing one
if (!tableName.equals(table)) {
currentTable = null;
try {
getHTable(table);
tableName = table;
} catch (IOException e) {
System.err.println("Error accessing HBase table: " + e);
return Status.ERROR;
}
}
if (debug) {
System.out.println("Setting up put for key: " + key);
}
Put p = new Put(Bytes.toBytes(key));
p.setDurability(durability);
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
byte[] value = entry.getValue().toArray();
if (debug) {
System.out.println("Adding field/value " + entry.getKey() + "/"
+ Bytes.toStringBinary(value) + " to put request");
}
p.addColumn(columnFamilyBytes, Bytes.toBytes(entry.getKey()), value);
}
try {
if (clientSideBuffering) {
// removed Preconditions.checkNotNull, which throws NPE, in favor of NPE on next line
bufferedMutator.mutate(p);
} else {
currentTable.put(p);
}
} catch (IOException e) {
if (debug) {
System.err.println("Error doing put: " + e);
}
return Status.ERROR;
} catch (ConcurrentModificationException e) {
// do nothing for now...hope this is rare
return Status.ERROR;
}
return Status.OK;
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key.
*
* @param table
* The name of the table
* @param key
* The record key of the record to insert.
* @param values
* A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
return update(table, key, values);
}
/**
* Delete a record from the database.
*
* @param table
* The name of the table
* @param key
* The record key of the record to delete.
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status delete(String table, String key) {
// if this is a "new" table, init HTable object. Else, use existing one
if (!tableName.equals(table)) {
currentTable = null;
try {
getHTable(table);
tableName = table;
} catch (IOException e) {
System.err.println("Error accessing HBase table: " + e);
return Status.ERROR;
}
}
if (debug) {
System.out.println("Doing delete for key: " + key);
}
final Delete d = new Delete(Bytes.toBytes(key));
d.setDurability(durability);
try {
if (clientSideBuffering) {
// removed Preconditions.checkNotNull, which throws NPE, in favor of NPE on next line
bufferedMutator.mutate(d);
} else {
currentTable.delete(d);
}
} catch (IOException e) {
if (debug) {
System.err.println("Error doing delete: " + e);
}
return Status.ERROR;
}
return Status.OK;
}
// Only non-private for testing.
void setConfiguration(final Configuration newConfig) {
this.config = newConfig;
}
private boolean isBooleanParamSet(String param, boolean defaultValue){
return Boolean.parseBoolean(getProperties().getProperty(param, Boolean.toString(defaultValue)));
}
}
/*
* For customized vim control set autoindent set si set shiftwidth=4
*/
| 19,193 | 32.732865 | 114 | java |
null | NearPMSW-main/baseline/logging/YCSB2/dynamodb/src/main/java/site/ycsb/db/package-info.java | /*
* Copyright 2015-2016 YCSB Contributors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="https://aws.amazon.com/dynamodb/">DynamoDB</a>.
*/
package site.ycsb.db;
| 771 | 32.565217 | 80 | java |
null | NearPMSW-main/baseline/logging/YCSB2/dynamodb/src/main/java/site/ycsb/db/DynamoDBClient.java | /*
* Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright 2015-2016 YCSB Contributors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package site.ycsb.db;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.PropertiesCredentials;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.dynamodbv2.model.*;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import site.ycsb.*;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.Vector;
/**
* DynamoDB client for YCSB.
*/
public class DynamoDBClient extends DB {
/**
* Defines the primary key type used in this particular DB instance.
* <p>
* By default, the primary key type is "HASH". Optionally, the user can
* choose to use hash_and_range key type. See documentation in the
* DynamoDB.Properties file for more details.
*/
private enum PrimaryKeyType {
HASH,
HASH_AND_RANGE
}
private AmazonDynamoDB dynamoDB;
private String primaryKeyName;
private PrimaryKeyType primaryKeyType = PrimaryKeyType.HASH;
// If the user choose to use HASH_AND_RANGE as primary key type, then
// the following two variables become relevant. See documentation in the
// DynamoDB.Properties file for more details.
private String hashKeyValue;
private String hashKeyName;
private boolean consistentRead = false;
private String region = "us-east-1";
private String endpoint = null;
private int maxConnects = 50;
private static final Logger LOGGER = Logger.getLogger(DynamoDBClient.class);
private static final Status CLIENT_ERROR = new Status("CLIENT_ERROR", "An error occurred on the client.");
private static final String DEFAULT_HASH_KEY_VALUE = "YCSB_0";
@Override
public void init() throws DBException {
String debug = getProperties().getProperty("dynamodb.debug", null);
if (null != debug && "true".equalsIgnoreCase(debug)) {
LOGGER.setLevel(Level.DEBUG);
}
String configuredEndpoint = getProperties().getProperty("dynamodb.endpoint", null);
String credentialsFile = getProperties().getProperty("dynamodb.awsCredentialsFile", null);
String primaryKey = getProperties().getProperty("dynamodb.primaryKey", null);
String primaryKeyTypeString = getProperties().getProperty("dynamodb.primaryKeyType", null);
String consistentReads = getProperties().getProperty("dynamodb.consistentReads", null);
String connectMax = getProperties().getProperty("dynamodb.connectMax", null);
String configuredRegion = getProperties().getProperty("dynamodb.region", null);
if (null != connectMax) {
this.maxConnects = Integer.parseInt(connectMax);
}
if (null != consistentReads && "true".equalsIgnoreCase(consistentReads)) {
this.consistentRead = true;
}
if (null != configuredEndpoint) {
this.endpoint = configuredEndpoint;
}
if (null == primaryKey || primaryKey.length() < 1) {
throw new DBException("Missing primary key attribute name, cannot continue");
}
if (null != primaryKeyTypeString) {
try {
this.primaryKeyType = PrimaryKeyType.valueOf(primaryKeyTypeString.trim().toUpperCase());
} catch (IllegalArgumentException e) {
throw new DBException("Invalid primary key mode specified: " + primaryKeyTypeString +
". Expecting HASH or HASH_AND_RANGE.");
}
}
if (this.primaryKeyType == PrimaryKeyType.HASH_AND_RANGE) {
// When the primary key type is HASH_AND_RANGE, keys used by YCSB
// are range keys so we can benchmark performance of individual hash
// partitions. In this case, the user must specify the hash key's name
// and optionally can designate a value for the hash key.
String configuredHashKeyName = getProperties().getProperty("dynamodb.hashKeyName", null);
if (null == configuredHashKeyName || configuredHashKeyName.isEmpty()) {
throw new DBException("Must specify a non-empty hash key name when the primary key type is HASH_AND_RANGE.");
}
this.hashKeyName = configuredHashKeyName;
this.hashKeyValue = getProperties().getProperty("dynamodb.hashKeyValue", DEFAULT_HASH_KEY_VALUE);
}
if (null != configuredRegion && configuredRegion.length() > 0) {
region = configuredRegion;
}
try {
AmazonDynamoDBClientBuilder dynamoDBBuilder = AmazonDynamoDBClientBuilder.standard();
dynamoDBBuilder = null == endpoint ?
dynamoDBBuilder.withRegion(this.region) :
dynamoDBBuilder.withEndpointConfiguration(
new AwsClientBuilder.EndpointConfiguration(this.endpoint, this.region)
);
dynamoDB = dynamoDBBuilder
.withClientConfiguration(
new ClientConfiguration()
.withTcpKeepAlive(true)
.withMaxConnections(this.maxConnects)
)
.withCredentials(new AWSStaticCredentialsProvider(new PropertiesCredentials(new File(credentialsFile))))
.build();
primaryKeyName = primaryKey;
LOGGER.info("dynamodb connection created with " + this.endpoint);
} catch (Exception e1) {
LOGGER.error("DynamoDBClient.init(): Could not initialize DynamoDB client.", e1);
}
}
@Override
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("readkey: " + key + " from table: " + table);
}
GetItemRequest req = new GetItemRequest(table, createPrimaryKey(key));
req.setAttributesToGet(fields);
req.setConsistentRead(consistentRead);
GetItemResult res;
try {
res = dynamoDB.getItem(req);
} catch (AmazonServiceException ex) {
LOGGER.error(ex);
return Status.ERROR;
} catch (AmazonClientException ex) {
LOGGER.error(ex);
return CLIENT_ERROR;
}
if (null != res.getItem()) {
result.putAll(extractResult(res.getItem()));
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Result: " + res.toString());
}
}
return Status.OK;
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("scan " + recordcount + " records from key: " + startkey + " on table: " + table);
}
/*
* on DynamoDB's scan, startkey is *exclusive* so we need to
* getItem(startKey) and then use scan for the res
*/
GetItemRequest greq = new GetItemRequest(table, createPrimaryKey(startkey));
greq.setAttributesToGet(fields);
GetItemResult gres;
try {
gres = dynamoDB.getItem(greq);
} catch (AmazonServiceException ex) {
LOGGER.error(ex);
return Status.ERROR;
} catch (AmazonClientException ex) {
LOGGER.error(ex);
return CLIENT_ERROR;
}
if (null != gres.getItem()) {
result.add(extractResult(gres.getItem()));
}
int count = 1; // startKey is done, rest to go.
Map<String, AttributeValue> startKey = createPrimaryKey(startkey);
ScanRequest req = new ScanRequest(table);
req.setAttributesToGet(fields);
while (count < recordcount) {
req.setExclusiveStartKey(startKey);
req.setLimit(recordcount - count);
ScanResult res;
try {
res = dynamoDB.scan(req);
} catch (AmazonServiceException ex) {
LOGGER.error(ex);
return Status.ERROR;
} catch (AmazonClientException ex) {
LOGGER.error(ex);
return CLIENT_ERROR;
}
count += res.getCount();
for (Map<String, AttributeValue> items : res.getItems()) {
result.add(extractResult(items));
}
startKey = res.getLastEvaluatedKey();
}
return Status.OK;
}
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("updatekey: " + key + " from table: " + table);
}
Map<String, AttributeValueUpdate> attributes = new HashMap<>(values.size());
for (Entry<String, ByteIterator> val : values.entrySet()) {
AttributeValue v = new AttributeValue(val.getValue().toString());
attributes.put(val.getKey(), new AttributeValueUpdate().withValue(v).withAction("PUT"));
}
UpdateItemRequest req = new UpdateItemRequest(table, createPrimaryKey(key), attributes);
try {
dynamoDB.updateItem(req);
} catch (AmazonServiceException ex) {
LOGGER.error(ex);
return Status.ERROR;
} catch (AmazonClientException ex) {
LOGGER.error(ex);
return CLIENT_ERROR;
}
return Status.OK;
}
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("insertkey: " + primaryKeyName + "-" + key + " from table: " + table);
}
Map<String, AttributeValue> attributes = createAttributes(values);
// adding primary key
attributes.put(primaryKeyName, new AttributeValue(key));
if (primaryKeyType == PrimaryKeyType.HASH_AND_RANGE) {
// If the primary key type is HASH_AND_RANGE, then what has been put
// into the attributes map above is the range key part of the primary
// key, we still need to put in the hash key part here.
attributes.put(hashKeyName, new AttributeValue(hashKeyValue));
}
PutItemRequest putItemRequest = new PutItemRequest(table, attributes);
try {
dynamoDB.putItem(putItemRequest);
} catch (AmazonServiceException ex) {
LOGGER.error(ex);
return Status.ERROR;
} catch (AmazonClientException ex) {
LOGGER.error(ex);
return CLIENT_ERROR;
}
return Status.OK;
}
@Override
public Status delete(String table, String key) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("deletekey: " + key + " from table: " + table);
}
DeleteItemRequest req = new DeleteItemRequest(table, createPrimaryKey(key));
try {
dynamoDB.deleteItem(req);
} catch (AmazonServiceException ex) {
LOGGER.error(ex);
return Status.ERROR;
} catch (AmazonClientException ex) {
LOGGER.error(ex);
return CLIENT_ERROR;
}
return Status.OK;
}
private static Map<String, AttributeValue> createAttributes(Map<String, ByteIterator> values) {
Map<String, AttributeValue> attributes = new HashMap<>(values.size() + 1);
for (Entry<String, ByteIterator> val : values.entrySet()) {
attributes.put(val.getKey(), new AttributeValue(val.getValue().toString()));
}
return attributes;
}
private HashMap<String, ByteIterator> extractResult(Map<String, AttributeValue> item) {
if (null == item) {
return null;
}
HashMap<String, ByteIterator> rItems = new HashMap<>(item.size());
for (Entry<String, AttributeValue> attr : item.entrySet()) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Result- key: %s, value: %s", attr.getKey(), attr.getValue()));
}
rItems.put(attr.getKey(), new StringByteIterator(attr.getValue().getS()));
}
return rItems;
}
private Map<String, AttributeValue> createPrimaryKey(String key) {
Map<String, AttributeValue> k = new HashMap<>();
if (primaryKeyType == PrimaryKeyType.HASH) {
k.put(primaryKeyName, new AttributeValue().withS(key));
} else if (primaryKeyType == PrimaryKeyType.HASH_AND_RANGE) {
k.put(hashKeyName, new AttributeValue().withS(hashKeyValue));
k.put(primaryKeyName, new AttributeValue().withS(key));
} else {
throw new RuntimeException("Assertion Error: impossible primary key type");
}
return k;
}
}
| 12,595 | 34.184358 | 117 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/TestNumericByteIterator.java | /**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import org.testng.annotations.Test;
import static org.testng.AssertJUnit.*;
public class TestNumericByteIterator {
@Test
public void testLong() throws Exception {
NumericByteIterator it = new NumericByteIterator(42L);
assertFalse(it.isFloatingPoint());
assertEquals(42L, it.getLong());
try {
it.getDouble();
fail("Expected IllegalStateException.");
} catch (IllegalStateException e) { }
try {
it.next();
fail("Expected UnsupportedOperationException.");
} catch (UnsupportedOperationException e) { }
assertEquals(8, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(7, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(6, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(5, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(4, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(3, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(2, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(1, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 42, (byte) it.nextByte());
assertEquals(0, it.bytesLeft());
assertFalse(it.hasNext());
it.reset();
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
}
@Test
public void testDouble() throws Exception {
NumericByteIterator it = new NumericByteIterator(42.75);
assertTrue(it.isFloatingPoint());
assertEquals(42.75, it.getDouble(), 0.001);
try {
it.getLong();
fail("Expected IllegalStateException.");
} catch (IllegalStateException e) { }
try {
it.next();
fail("Expected UnsupportedOperationException.");
} catch (UnsupportedOperationException e) { }
assertEquals(8, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 64, (byte) it.nextByte());
assertEquals(7, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 69, (byte) it.nextByte());
assertEquals(6, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 96, (byte) it.nextByte());
assertEquals(5, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(4, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(3, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(2, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(1, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(0, it.bytesLeft());
assertFalse(it.hasNext());
it.reset();
assertTrue(it.hasNext());
assertEquals((byte) 64, (byte) it.nextByte());
}
}
| 3,935 | 32.355932 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/TestStatus.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import org.testng.annotations.Test;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
/**
* Test class for {@link Status}.
*/
public class TestStatus {
@Test
public void testAcceptableStatus() {
assertTrue(Status.OK.isOk());
assertTrue(Status.BATCHED_OK.isOk());
assertFalse(Status.BAD_REQUEST.isOk());
assertFalse(Status.ERROR.isOk());
assertFalse(Status.FORBIDDEN.isOk());
assertFalse(Status.NOT_FOUND.isOk());
assertFalse(Status.NOT_IMPLEMENTED.isOk());
assertFalse(Status.SERVICE_UNAVAILABLE.isOk());
assertFalse(Status.UNEXPECTED_STATE.isOk());
}
}
| 1,318 | 30.404762 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/TestUtils.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
import java.util.Arrays;
import org.testng.annotations.Test;
public class TestUtils {
@Test
public void bytesToFromLong() throws Exception {
byte[] bytes = new byte[8];
assertEquals(Utils.bytesToLong(bytes), 0L);
assertArrayEquals(Utils.longToBytes(0), bytes);
bytes[7] = 1;
assertEquals(Utils.bytesToLong(bytes), 1L);
assertArrayEquals(Utils.longToBytes(1L), bytes);
bytes = new byte[] { 127, -1, -1, -1, -1, -1, -1, -1 };
assertEquals(Utils.bytesToLong(bytes), Long.MAX_VALUE);
assertArrayEquals(Utils.longToBytes(Long.MAX_VALUE), bytes);
bytes = new byte[] { -128, 0, 0, 0, 0, 0, 0, 0 };
assertEquals(Utils.bytesToLong(bytes), Long.MIN_VALUE);
assertArrayEquals(Utils.longToBytes(Long.MIN_VALUE), bytes);
bytes = new byte[] { (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF,
(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF };
assertEquals(Utils.bytesToLong(bytes), -1L);
assertArrayEquals(Utils.longToBytes(-1L), bytes);
// if the array is too long we just skip the remainder
bytes = new byte[] { 0, 0, 0, 0, 0, 0, 0, 1, 42, 42, 42 };
assertEquals(Utils.bytesToLong(bytes), 1L);
}
@Test
public void bytesToFromDouble() throws Exception {
byte[] bytes = new byte[8];
assertEquals(Utils.bytesToDouble(bytes), 0, 0.0001);
assertArrayEquals(Utils.doubleToBytes(0), bytes);
bytes = new byte[] { 63, -16, 0, 0, 0, 0, 0, 0 };
assertEquals(Utils.bytesToDouble(bytes), 1, 0.0001);
assertArrayEquals(Utils.doubleToBytes(1), bytes);
bytes = new byte[] { -65, -16, 0, 0, 0, 0, 0, 0 };
assertEquals(Utils.bytesToDouble(bytes), -1, 0.0001);
assertArrayEquals(Utils.doubleToBytes(-1), bytes);
bytes = new byte[] { 127, -17, -1, -1, -1, -1, -1, -1 };
assertEquals(Utils.bytesToDouble(bytes), Double.MAX_VALUE, 0.0001);
assertArrayEquals(Utils.doubleToBytes(Double.MAX_VALUE), bytes);
bytes = new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 };
assertEquals(Utils.bytesToDouble(bytes), Double.MIN_VALUE, 0.0001);
assertArrayEquals(Utils.doubleToBytes(Double.MIN_VALUE), bytes);
bytes = new byte[] { 127, -8, 0, 0, 0, 0, 0, 0 };
assertTrue(Double.isNaN(Utils.bytesToDouble(bytes)));
assertArrayEquals(Utils.doubleToBytes(Double.NaN), bytes);
bytes = new byte[] { 63, -16, 0, 0, 0, 0, 0, 0, 42, 42, 42 };
assertEquals(Utils.bytesToDouble(bytes), 1, 0.0001);
}
@Test (expectedExceptions = NullPointerException.class)
public void bytesToLongNull() throws Exception {
Utils.bytesToLong(null);
}
@Test (expectedExceptions = IndexOutOfBoundsException.class)
public void bytesToLongTooShort() throws Exception {
Utils.bytesToLong(new byte[] { 0, 0, 0, 0, 0, 0, 0 });
}
@Test (expectedExceptions = IllegalArgumentException.class)
public void bytesToDoubleTooShort() throws Exception {
Utils.bytesToDouble(new byte[] { 0, 0, 0, 0, 0, 0, 0 });
}
@Test
public void jvmUtils() throws Exception {
// This should ALWAYS return at least one thread.
assertTrue(Utils.getActiveThreadCount() > 0);
// This should always be greater than 0 or something is goofed up in the JVM.
assertTrue(Utils.getUsedMemoryBytes() > 0);
// Some operating systems may not implement this so we don't have a good
// test. Just make sure it doesn't throw an exception.
Utils.getSystemLoadAverage();
// This will probably be zero but should never be negative.
assertTrue(Utils.getGCTotalCollectionCount() >= 0);
// Could be zero similar to GC total collection count
assertTrue(Utils.getGCTotalTime() >= 0);
// Could be empty
assertTrue(Utils.getGCStatst().size() >= 0);
}
/**
* Since this version of TestNG doesn't appear to have an assertArrayEquals,
* this will compare the two to make sure they're the same.
* @param actual Actual array to validate
* @param expected What the array should contain
* @throws AssertionError if the test fails.
*/
public void assertArrayEquals(final byte[] actual, final byte[] expected) {
if (actual == null && expected != null) {
throw new AssertionError("Expected " + Arrays.toString(expected) +
" but found [null]");
}
if (actual != null && expected == null) {
throw new AssertionError("Expected [null] but found " +
Arrays.toString(actual));
}
if (actual.length != expected.length) {
throw new AssertionError("Expected length " + expected.length +
" but found " + actual.length);
}
for (int i = 0; i < expected.length; i++) {
if (actual[i] != expected[i]) {
throw new AssertionError("Expected byte [" + expected[i] +
"] at index " + i + " but found [" + actual[i] + "]");
}
}
}
} | 5,606 | 37.40411 | 81 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/TestByteIterator.java | /**
* Copyright (c) 2012 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import org.testng.annotations.Test;
import static org.testng.AssertJUnit.*;
public class TestByteIterator {
@Test
public void testRandomByteIterator() {
int size = 100;
ByteIterator itor = new RandomByteIterator(size);
assertTrue(itor.hasNext());
assertEquals(size, itor.bytesLeft());
assertEquals(size, itor.toString().getBytes().length);
assertFalse(itor.hasNext());
assertEquals(0, itor.bytesLeft());
itor = new RandomByteIterator(size);
assertEquals(size, itor.toArray().length);
assertFalse(itor.hasNext());
assertEquals(0, itor.bytesLeft());
}
}
| 1,283 | 31.1 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/measurements/exporter/TestMeasurementsExporter.java | /**
* Copyright (c) 2015 Yahoo! Inc. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements.exporter;
import site.ycsb.generator.ZipfianGenerator;
import site.ycsb.measurements.Measurements;
import site.ycsb.measurements.OneMeasurementHistogram;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.testng.annotations.Test;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Properties;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
public class TestMeasurementsExporter {
@Test
public void testJSONArrayMeasurementsExporter() throws IOException {
Properties props = new Properties();
props.put(Measurements.MEASUREMENT_TYPE_PROPERTY, "histogram");
props.put(OneMeasurementHistogram.VERBOSE_PROPERTY, "true");
Measurements mm = new Measurements(props);
ByteArrayOutputStream out = new ByteArrayOutputStream();
JSONArrayMeasurementsExporter export = new JSONArrayMeasurementsExporter(out);
long min = 5000;
long max = 100000;
ZipfianGenerator zipfian = new ZipfianGenerator(min, max);
for (int i = 0; i < 1000; i++) {
int rnd = zipfian.nextValue().intValue();
mm.measure("UPDATE", rnd);
}
mm.exportMeasurements(export);
export.close();
ObjectMapper mapper = new ObjectMapper();
JsonNode json = mapper.readTree(out.toString("UTF-8"));
assertTrue(json.isArray());
assertEquals(json.get(0).get("measurement").asText(), "Operations");
assertEquals(json.get(4).get("measurement").asText(), "MaxLatency(us)");
assertEquals(json.get(11).get("measurement").asText(), "4");
}
}
| 2,394 | 37.629032 | 86 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/workloads/TestCoreWorkload.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import static org.testng.Assert.assertTrue;
import java.util.Properties;
import org.testng.annotations.Test;
import site.ycsb.generator.DiscreteGenerator;
public class TestCoreWorkload {
@Test
public void createOperationChooser() {
final Properties p = new Properties();
p.setProperty(CoreWorkload.READ_PROPORTION_PROPERTY, "0.20");
p.setProperty(CoreWorkload.UPDATE_PROPORTION_PROPERTY, "0.20");
p.setProperty(CoreWorkload.INSERT_PROPORTION_PROPERTY, "0.20");
p.setProperty(CoreWorkload.SCAN_PROPORTION_PROPERTY, "0.20");
p.setProperty(CoreWorkload.READMODIFYWRITE_PROPORTION_PROPERTY, "0.20");
final DiscreteGenerator generator = CoreWorkload.createOperationGenerator(p);
final int[] counts = new int[5];
for (int i = 0; i < 100; ++i) {
switch (generator.nextString()) {
case "READ":
++counts[0];
break;
case "UPDATE":
++counts[1];
break;
case "INSERT":
++counts[2];
break;
case "SCAN":
++counts[3];
break;
default:
++counts[4];
}
}
for (int i : counts) {
// Doesn't do a wonderful job of equal distribution, but in a hundred, if we
// don't see at least one of each operation then the generator is really broke.
assertTrue(i > 1);
}
}
@Test (expectedExceptions = IllegalArgumentException.class)
public void createOperationChooserNullProperties() {
CoreWorkload.createOperationGenerator(null);
}
} | 4,257 | 59.828571 | 186 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/workloads/TestTimeSeriesWorkload.java | /**
* Copyright (c) 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.TreeMap;
import java.util.Vector;
import site.ycsb.ByteIterator;
import site.ycsb.Client;
import site.ycsb.DB;
import site.ycsb.NumericByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.Utils;
import site.ycsb.WorkloadException;
import site.ycsb.measurements.Measurements;
import org.testng.annotations.Test;
public class TestTimeSeriesWorkload {
@Test
public void twoThreads() throws Exception {
final Properties p = getUTProperties();
Measurements.setProperties(p);
final TimeSeriesWorkload wl = new TimeSeriesWorkload();
wl.init(p);
Object threadState = wl.initThread(p, 0, 2);
MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertNotNull(db.values.get(i).get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT));
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
threadState = wl.initThread(p, 1, 2);
db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAB");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertNotNull(db.values.get(i).get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT));
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
}
@Test (expectedExceptions = WorkloadException.class)
public void badTimeUnit() throws Exception {
final Properties p = new Properties();
p.put(TimeSeriesWorkload.TIMESTAMP_UNITS_PROPERTY, "foobar");
getWorkload(p, true);
}
@Test (expectedExceptions = WorkloadException.class)
public void failedToInitWorkloadBeforeThreadInit() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, false);
//wl.init(p); // <-- we NEED this :(
final Object threadState = wl.initThread(p, 0, 2);
final MockDB db = new MockDB();
wl.doInsert(db, threadState);
}
@Test (expectedExceptions = IllegalStateException.class)
public void failedToInitThread() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, true);
final MockDB db = new MockDB();
wl.doInsert(db, null);
}
@Test
public void insertOneKeyOneTagCardinalityOne() throws Exception {
final Properties p = getUTProperties();
p.put(CoreWorkload.FIELD_COUNT_PROPERTY, "1");
p.put(TimeSeriesWorkload.TAG_COUNT_PROPERTY, "1");
p.put(TimeSeriesWorkload.TAG_CARDINALITY_PROPERTY, "1");
final TimeSeriesWorkload wl = getWorkload(p, true);
final Object threadState = wl.initThread(p, 0, 1);
final MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertTrue(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
timestamp += 60;
}
}
@Test
public void insertOneKeyTwoTagsLowCardinality() throws Exception {
final Properties p = getUTProperties();
p.put(CoreWorkload.FIELD_COUNT_PROPERTY, "1");
final TimeSeriesWorkload wl = getWorkload(p, true);
final Object threadState = wl.initThread(p, 0, 1);
final MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertTrue(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
}
@Test
public void insertTwoKeysTwoTagsLowCardinality() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, true);
final Object threadState = wl.initThread(p, 0, 1);
final MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
int metricCtr = 0;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertTrue(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
}
if (metricCtr++ > 1) {
assertEquals(db.keys.get(i), "AAAB");
if (metricCtr >= 4) {
metricCtr = 0;
timestamp += 60;
}
} else {
assertEquals(db.keys.get(i), "AAAA");
}
}
}
@Test
public void insertTwoKeysTwoThreads() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, true);
Object threadState = wl.initThread(p, 0, 2);
MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA"); // <-- key 1
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertTrue(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
threadState = wl.initThread(p, 1, 2);
db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAB"); // <-- key 2
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertNotNull(db.values.get(i).get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT));
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
}
@Test
public void insertThreeKeysTwoThreads() throws Exception {
// To make sure the distribution doesn't miss any metrics
final Properties p = getUTProperties();
p.put(CoreWorkload.FIELD_COUNT_PROPERTY, "3");
final TimeSeriesWorkload wl = getWorkload(p, true);
Object threadState = wl.initThread(p, 0, 2);
MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertTrue(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
threadState = wl.initThread(p, 1, 2);
db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
timestamp = 1451606400;
int metricCtr = 0;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertNotNull(db.values.get(i).get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT));
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
}
if (metricCtr++ > 1) {
assertEquals(db.keys.get(i), "AAAC");
if (metricCtr >= 4) {
metricCtr = 0;
timestamp += 60;
}
} else {
assertEquals(db.keys.get(i), "AAAB");
}
}
}
@Test
public void insertWithValidation() throws Exception {
final Properties p = getUTProperties();
p.put(CoreWorkload.FIELD_COUNT_PROPERTY, "1");
p.put(CoreWorkload.DATA_INTEGRITY_PROPERTY, "true");
p.put(TimeSeriesWorkload.VALUE_TYPE_PROPERTY, "integers");
final TimeSeriesWorkload wl = getWorkload(p, true);
final Object threadState = wl.initThread(p, 0, 1);
final MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertFalse(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
// validation check
final TreeMap<String, String> validationTags = new TreeMap<String, String>();
for (final Entry<String, ByteIterator> entry : db.values.get(i).entrySet()) {
if (entry.getKey().equals(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT) ||
entry.getKey().equals(TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT)) {
continue;
}
validationTags.put(entry.getKey(), entry.getValue().toString());
}
assertEquals(wl.validationFunction(db.keys.get(i), timestamp, validationTags),
((NumericByteIterator) db.values.get(i).get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).getLong());
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
}
@Test
public void read() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, true);
final Object threadState = wl.initThread(p, 0, 1);
final MockDB db = new MockDB();
for (int i = 0; i < 20; i++) {
wl.doTransactionRead(db, threadState);
}
}
@Test
public void verifyRow() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, true);
final TreeMap<String, String> validationTags = new TreeMap<String, String>();
final HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
validationTags.put("AA", "AAAA");
cells.put("AA", new StringByteIterator("AAAA"));
validationTags.put("AB", "AAAB");
cells.put("AB", new StringByteIterator("AAAB"));
long hash = wl.validationFunction("AAAA", 1451606400L, validationTags);
cells.put(TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT, new NumericByteIterator(1451606400L));
cells.put(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT, new NumericByteIterator(hash));
assertEquals(wl.verifyRow("AAAA", cells), Status.OK);
// tweak the last value a bit
for (final ByteIterator it : cells.values()) {
it.reset();
}
cells.put(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT, new NumericByteIterator(hash + 1));
assertEquals(wl.verifyRow("AAAA", cells), Status.UNEXPECTED_STATE);
// no value cell, returns an unexpected state
for (final ByteIterator it : cells.values()) {
it.reset();
}
cells.remove(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT);
assertEquals(wl.verifyRow("AAAA", cells), Status.UNEXPECTED_STATE);
}
@Test
public void validateSettingsDataIntegrity() throws Exception {
Properties p = getUTProperties();
// data validation incompatibilities
p.setProperty(CoreWorkload.DATA_INTEGRITY_PROPERTY, "true");
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
p.setProperty(TimeSeriesWorkload.VALUE_TYPE_PROPERTY, "integers"); // now it's ok
p.setProperty(TimeSeriesWorkload.GROUPBY_PROPERTY, "sum"); // now it's not
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
p.setProperty(TimeSeriesWorkload.GROUPBY_PROPERTY, "");
p.setProperty(TimeSeriesWorkload.DOWNSAMPLING_FUNCTION_PROPERTY, "sum");
p.setProperty(TimeSeriesWorkload.DOWNSAMPLING_INTERVAL_PROPERTY, "60");
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
p.setProperty(TimeSeriesWorkload.DOWNSAMPLING_FUNCTION_PROPERTY, "");
p.setProperty(TimeSeriesWorkload.DOWNSAMPLING_INTERVAL_PROPERTY, "");
p.setProperty(TimeSeriesWorkload.QUERY_TIMESPAN_PROPERTY, "60");
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
p = getUTProperties();
p.setProperty(CoreWorkload.DATA_INTEGRITY_PROPERTY, "true");
p.setProperty(TimeSeriesWorkload.VALUE_TYPE_PROPERTY, "integers");
p.setProperty(TimeSeriesWorkload.RANDOMIZE_TIMESERIES_ORDER_PROPERTY, "true");
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
p.setProperty(TimeSeriesWorkload.RANDOMIZE_TIMESERIES_ORDER_PROPERTY, "false");
p.setProperty(TimeSeriesWorkload.INSERT_START_PROPERTY, "");
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
}
/** Helper method that generates unit testing defaults for the properties map */
private Properties getUTProperties() {
final Properties p = new Properties();
p.put(Client.RECORD_COUNT_PROPERTY, "10");
p.put(CoreWorkload.FIELD_COUNT_PROPERTY, "2");
p.put(CoreWorkload.FIELD_LENGTH_PROPERTY, "4");
p.put(TimeSeriesWorkload.TAG_KEY_LENGTH_PROPERTY, "2");
p.put(TimeSeriesWorkload.TAG_VALUE_LENGTH_PROPERTY, "4");
p.put(TimeSeriesWorkload.TAG_COUNT_PROPERTY, "2");
p.put(TimeSeriesWorkload.TAG_CARDINALITY_PROPERTY, "1,2");
p.put(CoreWorkload.INSERT_START_PROPERTY, "1451606400");
p.put(TimeSeriesWorkload.DELAYED_SERIES_PROPERTY, "0");
p.put(TimeSeriesWorkload.RANDOMIZE_TIMESERIES_ORDER_PROPERTY, "false");
return p;
}
/** Helper to setup the workload for testing. */
private TimeSeriesWorkload getWorkload(final Properties p, final boolean init)
throws WorkloadException {
Measurements.setProperties(p);
if (!init) {
return new TimeSeriesWorkload();
} else {
final TimeSeriesWorkload workload = new TimeSeriesWorkload();
workload.init(p);
return workload;
}
}
static class MockDB extends DB {
final List<String> keys = new ArrayList<String>();
final List<Map<String, ByteIterator>> values =
new ArrayList<Map<String, ByteIterator>>();
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
return Status.OK;
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
// TODO Auto-generated method stub
return Status.OK;
}
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
// TODO Auto-generated method stub
return Status.OK;
}
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
keys.add(key);
this.values.add(values);
return Status.OK;
}
@Override
public Status delete(String table, String key) {
// TODO Auto-generated method stub
return Status.OK;
}
public void dumpStdout() {
for (int i = 0; i < keys.size(); i++) {
System.out.print("[" + i + "] Key: " + keys.get(i) + " Values: {");
int x = 0;
for (final Entry<String, ByteIterator> entry : values.get(i).entrySet()) {
if (x++ > 0) {
System.out.print(", ");
}
System.out.print("{" + entry.getKey() + " => ");
if (entry.getKey().equals("YCSBV")) {
System.out.print(new String(Utils.bytesToDouble(entry.getValue().toArray()) + "}"));
} else if (entry.getKey().equals("YCSBTS")) {
System.out.print(new String(Utils.bytesToLong(entry.getValue().toArray()) + "}"));
} else {
System.out.print(new String(entry.getValue().toArray()) + "}");
}
}
System.out.println("}");
}
}
}
} | 21,195 | 35.734835 | 113 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/generator/TestUnixEpochTimestampGenerator.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import static org.testng.Assert.assertEquals;
import java.util.concurrent.TimeUnit;
import org.testng.annotations.Test;
public class TestUnixEpochTimestampGenerator {
@Test
public void defaultCtor() throws Exception {
final UnixEpochTimestampGenerator generator =
new UnixEpochTimestampGenerator();
final long startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 60);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 120);
assertEquals((long) generator.lastValue(), startTime + 60);
assertEquals((long) generator.nextValue(), startTime + 180);
}
@Test
public void ctorWithIntervalAndUnits() throws Exception {
final UnixEpochTimestampGenerator generator =
new UnixEpochTimestampGenerator(120, TimeUnit.SECONDS);
final long startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 120);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 240);
assertEquals((long) generator.lastValue(), startTime + 120);
}
@Test
public void ctorWithIntervalAndUnitsAndStart() throws Exception {
final UnixEpochTimestampGenerator generator =
new UnixEpochTimestampGenerator(120, TimeUnit.SECONDS, 1072915200L);
assertEquals((long) generator.nextValue(), 1072915200L);
assertEquals((long) generator.lastValue(), 1072915200L - 120);
assertEquals((long) generator.nextValue(), 1072915200L + 120);
assertEquals((long) generator.lastValue(), 1072915200L);
}
@Test
public void variousIntervalsAndUnits() throws Exception {
// negatives could happen, just start and roll back in time
UnixEpochTimestampGenerator generator =
new UnixEpochTimestampGenerator(-60, TimeUnit.SECONDS);
long startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime - 60);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime - 120);
assertEquals((long) generator.lastValue(), startTime - 60);
generator = new UnixEpochTimestampGenerator(100, TimeUnit.NANOSECONDS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 100);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 200);
assertEquals((long) generator.lastValue(), startTime + 100);
generator = new UnixEpochTimestampGenerator(100, TimeUnit.MICROSECONDS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 100);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 200);
assertEquals((long) generator.lastValue(), startTime + 100);
generator = new UnixEpochTimestampGenerator(100, TimeUnit.MILLISECONDS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 100);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 200);
assertEquals((long) generator.lastValue(), startTime + 100);
generator = new UnixEpochTimestampGenerator(100, TimeUnit.SECONDS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 100);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 200);
assertEquals((long) generator.lastValue(), startTime + 100);
generator = new UnixEpochTimestampGenerator(1, TimeUnit.MINUTES);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + (1 * 60));
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + (2 * 60));
assertEquals((long) generator.lastValue(), startTime + (1 * 60));
generator = new UnixEpochTimestampGenerator(1, TimeUnit.HOURS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + (1 * 60 * 60));
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + (2 * 60 * 60));
assertEquals((long) generator.lastValue(), startTime + (1 * 60 * 60));
generator = new UnixEpochTimestampGenerator(1, TimeUnit.DAYS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + (1 * 60 * 60 * 24));
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + (2 * 60 * 60 * 24));
assertEquals((long) generator.lastValue(), startTime + (1 * 60 * 60 * 24));
}
// TODO - With PowerMockito we could UT the initializeTimestamp(long) call.
// Otherwise it would involve creating more functions and that would get ugly.
}
| 7,813 | 62.528455 | 186 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/generator/TestZipfianGenerator.java | /**
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import org.testng.annotations.Test;
import static org.testng.AssertJUnit.assertFalse;
public class TestZipfianGenerator {
@Test
public void testMinAndMaxParameter() {
long min = 5;
long max = 10;
ZipfianGenerator zipfian = new ZipfianGenerator(min, max);
for (int i = 0; i < 10000; i++) {
long rnd = zipfian.nextValue();
assertFalse(rnd < min);
assertFalse(rnd > max);
}
}
}
| 1,150 | 27.775 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/generator/AcknowledgedCounterGeneratorTest.java | /**
* Copyright (c) 2015-2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.Random;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import org.testng.annotations.Test;
/**
* Tests for the AcknowledgedCounterGenerator class.
*/
public class AcknowledgedCounterGeneratorTest {
/**
* Test that advancing past {@link Integer#MAX_VALUE} works.
*/
@Test
public void testIncrementPastIntegerMaxValue() {
final long toTry = AcknowledgedCounterGenerator.WINDOW_SIZE * 3;
AcknowledgedCounterGenerator generator =
new AcknowledgedCounterGenerator(Integer.MAX_VALUE - 1000);
Random rand = new Random(System.currentTimeMillis());
BlockingQueue<Long> pending = new ArrayBlockingQueue<Long>(1000);
for (long i = 0; i < toTry; ++i) {
long value = generator.nextValue();
while (!pending.offer(value)) {
Long first = pending.poll();
// Don't always advance by one.
if (rand.nextBoolean()) {
generator.acknowledge(first);
} else {
Long second = pending.poll();
pending.add(first);
generator.acknowledge(second);
}
}
}
}
}
| 1,835 | 28.612903 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/generator/TestIncrementingPrintableStringGenerator.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.fail;
import java.util.NoSuchElementException;
import org.testng.annotations.Test;
public class TestIncrementingPrintableStringGenerator {
private final static int[] ATOC = new int[] { 65, 66, 67 };
@Test
public void rolloverOK() throws Exception {
final IncrementingPrintableStringGenerator gen =
new IncrementingPrintableStringGenerator(2, ATOC);
assertNull(gen.lastValue());
assertEquals(gen.nextValue(), "AA");
assertEquals(gen.lastValue(), "AA");
assertEquals(gen.nextValue(), "AB");
assertEquals(gen.lastValue(), "AB");
assertEquals(gen.nextValue(), "AC");
assertEquals(gen.lastValue(), "AC");
assertEquals(gen.nextValue(), "BA");
assertEquals(gen.lastValue(), "BA");
assertEquals(gen.nextValue(), "BB");
assertEquals(gen.lastValue(), "BB");
assertEquals(gen.nextValue(), "BC");
assertEquals(gen.lastValue(), "BC");
assertEquals(gen.nextValue(), "CA");
assertEquals(gen.lastValue(), "CA");
assertEquals(gen.nextValue(), "CB");
assertEquals(gen.lastValue(), "CB");
assertEquals(gen.nextValue(), "CC");
assertEquals(gen.lastValue(), "CC");
assertEquals(gen.nextValue(), "AA"); // <-- rollover
assertEquals(gen.lastValue(), "AA");
}
@Test
public void rolloverOneCharacterOK() throws Exception {
// It would be silly to create a generator with one character.
final IncrementingPrintableStringGenerator gen =
new IncrementingPrintableStringGenerator(2, new int[] { 65 });
for (int i = 0; i < 5; i++) {
assertEquals(gen.nextValue(), "AA");
}
}
@Test
public void rolloverException() throws Exception {
final IncrementingPrintableStringGenerator gen =
new IncrementingPrintableStringGenerator(2, ATOC);
gen.setThrowExceptionOnRollover(true);
int i = 0;
try {
while(i < 11) {
++i;
gen.nextValue();
}
fail("Expected NoSuchElementException");
} catch (NoSuchElementException e) {
assertEquals(i, 10);
}
}
@Test
public void rolloverOneCharacterException() throws Exception {
// It would be silly to create a generator with one character.
final IncrementingPrintableStringGenerator gen =
new IncrementingPrintableStringGenerator(2, new int[] { 65 });
gen.setThrowExceptionOnRollover(true);
int i = 0;
try {
while(i < 3) {
++i;
gen.nextValue();
}
fail("Expected NoSuchElementException");
} catch (NoSuchElementException e) {
assertEquals(i, 2);
}
}
@Test
public void invalidLengths() throws Exception {
try {
new IncrementingPrintableStringGenerator(0, ATOC);
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) { }
try {
new IncrementingPrintableStringGenerator(-42, ATOC);
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) { }
}
@Test
public void invalidCharacterSets() throws Exception {
try {
new IncrementingPrintableStringGenerator(2, null);
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) { }
try {
new IncrementingPrintableStringGenerator(2, new int[] {});
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) { }
}
}
| 4,188 | 30.977099 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/test/java/site/ycsb/generator/TestRandomDiscreteTimestampGenerator.java | /**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.fail;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.testng.annotations.Test;
import org.testng.collections.Lists;
public class TestRandomDiscreteTimestampGenerator {
@Test
public void systemTime() throws Exception {
final RandomDiscreteTimestampGenerator generator =
new RandomDiscreteTimestampGenerator(60, TimeUnit.SECONDS, 60);
List<Long> generated = Lists.newArrayList();
for (int i = 0; i < 60; i++) {
generated.add(generator.nextValue());
}
assertEquals(generated.size(), 60);
try {
generator.nextValue();
fail("Expected IllegalStateException");
} catch (IllegalStateException e) { }
}
@Test
public void withStartTime() throws Exception {
final RandomDiscreteTimestampGenerator generator =
new RandomDiscreteTimestampGenerator(60, TimeUnit.SECONDS, 1072915200L, 60);
List<Long> generated = Lists.newArrayList();
for (int i = 0; i < 60; i++) {
generated.add(generator.nextValue());
}
assertEquals(generated.size(), 60);
Collections.sort(generated);
long ts = 1072915200L - 60; // starts 1 interval in the past
for (final long t : generated) {
assertEquals(t, ts);
ts += 60;
}
try {
generator.nextValue();
fail("Expected IllegalStateException");
} catch (IllegalStateException e) { }
}
@Test (expectedExceptions = IllegalArgumentException.class)
public void tooLarge() throws Exception {
new RandomDiscreteTimestampGenerator(60, TimeUnit.SECONDS,
RandomDiscreteTimestampGenerator.MAX_INTERVALS + 1);
}
//TODO - With PowerMockito we could UT the initializeTimestamp(long) call.
// Otherwise it would involve creating more functions and that would get ugly.
}
| 2,582 | 32.986842 | 84 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/UnknownDBException.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* Could not create the specified DB.
*/
public class UnknownDBException extends Exception {
/**
*
*/
private static final long serialVersionUID = 459099842269616836L;
public UnknownDBException(String message) {
super(message);
}
public UnknownDBException() {
super();
}
public UnknownDBException(String message, Throwable cause) {
super(message, cause);
}
public UnknownDBException(Throwable cause) {
super(cause);
}
}
| 1,185 | 24.782609 | 83 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/DBFactory.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import org.apache.htrace.core.Tracer;
import java.util.Properties;
/**
* Creates a DB layer by dynamically classloading the specified DB class.
*/
public final class DBFactory {
private DBFactory() {
// not used
}
public static DB newDB(String dbname, Properties properties, final Tracer tracer) throws UnknownDBException {
ClassLoader classLoader = DBFactory.class.getClassLoader();
DB ret;
try {
Class dbclass = classLoader.loadClass(dbname);
ret = (DB) dbclass.newInstance();
} catch (Exception e) {
e.printStackTrace();
return null;
}
ret.setProperties(properties);
return new DBWrapper(ret, tracer);
}
}
| 1,397 | 25.884615 | 111 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/TerminatorThread.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.Collection;
/**
* A thread that waits for the maximum specified time and then interrupts all the client
* threads passed at initialization of this thread.
*
* The maximum execution time passed is assumed to be in seconds.
*
*/
public class TerminatorThread extends Thread {
private final Collection<? extends Thread> threads;
private long maxExecutionTime;
private Workload workload;
private long waitTimeOutInMS;
public TerminatorThread(long maxExecutionTime, Collection<? extends Thread> threads,
Workload workload) {
this.maxExecutionTime = maxExecutionTime;
this.threads = threads;
this.workload = workload;
waitTimeOutInMS = 2000;
System.err.println("Maximum execution time specified as: " + maxExecutionTime + " secs");
}
public void run() {
try {
Thread.sleep(maxExecutionTime * 1000);
} catch (InterruptedException e) {
System.err.println("Could not wait until max specified time, TerminatorThread interrupted.");
return;
}
System.err.println("Maximum time elapsed. Requesting stop for the workload.");
workload.requestStop();
System.err.println("Stop requested for workload. Now Joining!");
for (Thread t : threads) {
while (t.isAlive()) {
try {
t.join(waitTimeOutInMS);
if (t.isAlive()) {
System.out.println("Still waiting for thread " + t.getName() + " to complete. " +
"Workload status: " + workload.isStopRequested());
}
} catch (InterruptedException e) {
// Do nothing. Don't know why I was interrupted.
}
}
}
}
}
| 2,383 | 33.550725 | 99 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/package-info.java | /*
* Copyright (c) 2015 - 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB core package.
*/
package site.ycsb;
| 719 | 30.304348 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/ByteArrayByteIterator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* A ByteIterator that iterates through a byte array.
*/
public class ByteArrayByteIterator extends ByteIterator {
private final int originalOffset;
private final byte[] str;
private int off;
private final int len;
public ByteArrayByteIterator(byte[] s) {
this.str = s;
this.off = 0;
this.len = s.length;
originalOffset = 0;
}
public ByteArrayByteIterator(byte[] s, int off, int len) {
this.str = s;
this.off = off;
this.len = off + len;
originalOffset = off;
}
@Override
public boolean hasNext() {
return off < len;
}
@Override
public byte nextByte() {
byte ret = str[off];
off++;
return ret;
}
@Override
public long bytesLeft() {
return len - off;
}
@Override
public void reset() {
off = originalOffset;
}
@Override
public byte[] toArray() {
int size = (int) bytesLeft();
byte[] bytes = new byte[size];
System.arraycopy(str, off, bytes, 0, size);
off = len;
return bytes;
}
}
| 1,726 | 22.337838 | 83 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/GoodBadUglyDB.java | /**
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.LockSupport;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import static java.util.concurrent.TimeUnit.MICROSECONDS;
/**
* Basic DB that just prints out the requested operations, instead of doing them against a database.
*/
public class GoodBadUglyDB extends DB {
public static final String SIMULATE_DELAY = "gbudb.delays";
public static final String SIMULATE_DELAY_DEFAULT = "200,1000,10000,50000,100000";
private static final ReadWriteLock DB_ACCESS = new ReentrantReadWriteLock();
private long[] delays;
public GoodBadUglyDB() {
delays = new long[]{200, 1000, 10000, 50000, 200000};
}
private void delay() {
final Random random = ThreadLocalRandom.current();
double p = random.nextDouble();
int mod;
if (p < 0.9) {
mod = 0;
} else if (p < 0.99) {
mod = 1;
} else if (p < 0.9999) {
mod = 2;
} else {
mod = 3;
}
// this will make mod 3 pauses global
Lock lock = mod == 3 ? DB_ACCESS.writeLock() : DB_ACCESS.readLock();
if (mod == 3) {
System.out.println("OUCH");
}
lock.lock();
try {
final long baseDelayNs = MICROSECONDS.toNanos(delays[mod]);
final int delayRangeNs = (int) (MICROSECONDS.toNanos(delays[mod + 1]) - baseDelayNs);
final long delayNs = baseDelayNs + random.nextInt(delayRangeNs);
final long deadline = System.nanoTime() + delayNs;
do {
LockSupport.parkNanos(deadline - System.nanoTime());
} while (System.nanoTime() < deadline && !Thread.interrupted());
} finally {
lock.unlock();
}
}
/**
* Initialize any state for this DB. Called once per DB instance; there is one DB instance per client thread.
*/
public void init() {
int i = 0;
for (String delay : getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT).split(",")) {
delays[i++] = Long.parseLong(delay);
}
}
/**
* Read a record from the database. Each field/value pair from the result will be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error
*/
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
delay();
return Status.OK;
}
/**
* Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored
* in a HashMap.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return Zero on success, a non-zero error code on error
*/
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
delay();
return Status.OK;
}
/**
* Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key, overwriting any existing values with the same field name.
*
* @param table The name of the table
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error
*/
public Status update(String table, String key, Map<String, ByteIterator> values) {
delay();
return Status.OK;
}
/**
* Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error
*/
public Status insert(String table, String key, Map<String, ByteIterator> values) {
delay();
return Status.OK;
}
/**
* Delete a record from the database.
*
* @param table The name of the table
* @param key The record key of the record to delete.
* @return Zero on success, a non-zero error code on error
*/
public Status delete(String table, String key) {
delay();
return Status.OK;
}
}
| 5,612 | 33.648148 | 116 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/NumericByteIterator.java | /**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* A byte iterator that handles encoding and decoding numeric values.
* Currently this iterator can handle 64 bit signed values and double precision
* floating point values.
*/
public class NumericByteIterator extends ByteIterator {
private final byte[] payload;
private final boolean floatingPoint;
private int off;
public NumericByteIterator(final long value) {
floatingPoint = false;
payload = Utils.longToBytes(value);
off = 0;
}
public NumericByteIterator(final double value) {
floatingPoint = true;
payload = Utils.doubleToBytes(value);
off = 0;
}
@Override
public boolean hasNext() {
return off < payload.length;
}
@Override
public byte nextByte() {
return payload[off++];
}
@Override
public long bytesLeft() {
return payload.length - off;
}
@Override
public void reset() {
off = 0;
}
public long getLong() {
if (floatingPoint) {
throw new IllegalStateException("Byte iterator is of the type double");
}
return Utils.bytesToLong(payload);
}
public double getDouble() {
if (!floatingPoint) {
throw new IllegalStateException("Byte iterator is of the type long");
}
return Utils.bytesToDouble(payload);
}
public boolean isFloatingPoint() {
return floatingPoint;
}
} | 2,002 | 24.35443 | 79 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/Status.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* The result of an operation.
*/
public class Status {
private final String name;
private final String description;
/**
* @param name A short name for the status.
* @param description A description of the status.
*/
public Status(String name, String description) {
super();
this.name = name;
this.description = description;
}
public String getName() {
return name;
}
public String getDescription() {
return description;
}
@Override
public String toString() {
return "Status [name=" + name + ", description=" + description + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((description == null) ? 0 : description.hashCode());
result = prime * result + ((name == null) ? 0 : name.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Status other = (Status) obj;
if (description == null) {
if (other.description != null) {
return false;
}
} else if (!description.equals(other.description)) {
return false;
}
if (name == null) {
if (other.name != null) {
return false;
}
} else if (!name.equals(other.name)) {
return false;
}
return true;
}
/**
* Is {@code this} a passing state for the operation: {@link Status#OK} or {@link Status#BATCHED_OK}.
* @return true if the operation is successful, false otherwise
*/
public boolean isOk() {
return this == OK || this == BATCHED_OK;
}
public static final Status OK = new Status("OK", "The operation completed successfully.");
public static final Status ERROR = new Status("ERROR", "The operation failed.");
public static final Status NOT_FOUND = new Status("NOT_FOUND", "The requested record was not found.");
public static final Status NOT_IMPLEMENTED = new Status("NOT_IMPLEMENTED", "The operation is not " +
"implemented for the current binding.");
public static final Status UNEXPECTED_STATE = new Status("UNEXPECTED_STATE", "The operation reported" +
" success, but the result was not as expected.");
public static final Status BAD_REQUEST = new Status("BAD_REQUEST", "The request was not valid.");
public static final Status FORBIDDEN = new Status("FORBIDDEN", "The operation is forbidden.");
public static final Status SERVICE_UNAVAILABLE = new Status("SERVICE_UNAVAILABLE", "Dependant " +
"service for the current binding is not available.");
public static final Status BATCHED_OK = new Status("BATCHED_OK", "The operation has been batched by " +
"the binding to be executed later.");
}
| 3,541 | 30.90991 | 105 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/ByteIterator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.util.Iterator;
/**
* YCSB-specific buffer class. ByteIterators are designed to support
* efficient field generation, and to allow backend drivers that can stream
* fields (instead of materializing them in RAM) to do so.
* <p>
* YCSB originially used String objects to represent field values. This led to
* two performance issues.
* </p><p>
* First, it leads to unnecessary conversions between UTF-16 and UTF-8, both
* during field generation, and when passing data to byte-based backend
* drivers.
* </p><p>
* Second, Java strings are represented internally using UTF-16, and are
* built by appending to a growable array type (StringBuilder or
* StringBuffer), then calling a toString() method. This leads to a 4x memory
* overhead as field values are being built, which prevented YCSB from
* driving large object stores.
* </p>
* The StringByteIterator class contains a number of convenience methods for
* backend drivers that convert between Map<String,String> and
* Map<String,ByteBuffer>.
*
*/
public abstract class ByteIterator implements Iterator<Byte> {
@Override
public abstract boolean hasNext();
@Override
public Byte next() {
throw new UnsupportedOperationException();
}
public abstract byte nextByte();
/** @return byte offset immediately after the last valid byte */
public int nextBuf(byte[] buf, int bufOff) {
int sz = bufOff;
while (sz < buf.length && hasNext()) {
buf[sz] = nextByte();
sz++;
}
return sz;
}
public abstract long bytesLeft();
@Override
public void remove() {
throw new UnsupportedOperationException();
}
/** Resets the iterator so that it can be consumed again. Not all
* implementations support this call.
* @throws UnsupportedOperationException if the implementation hasn't implemented
* the method.
*/
public void reset() {
throw new UnsupportedOperationException();
}
/** Consumes remaining contents of this object, and returns them as a string. */
public String toString() {
Charset cset = Charset.forName("UTF-8");
CharBuffer cb = cset.decode(ByteBuffer.wrap(this.toArray()));
return cb.toString();
}
/** Consumes remaining contents of this object, and returns them as a byte array. */
public byte[] toArray() {
long left = bytesLeft();
if (left != (int) left) {
throw new ArrayIndexOutOfBoundsException("Too much data to fit in one array!");
}
byte[] ret = new byte[(int) left];
for (int i = 0; i < ret.length; i++) {
ret[i] = nextByte();
}
return ret;
}
}
| 3,407 | 31.150943 | 86 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/Utils.java | /**
* Copyright (c) 2010 Yahoo! Inc., 2016 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.lang.management.OperatingSystemMXBean;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
/**
* Utility functions.
*/
public final class Utils {
private Utils() {
// not used
}
/**
* Hash an integer value.
*/
public static long hash(long val) {
return fnvhash64(val);
}
public static final long FNV_OFFSET_BASIS_64 = 0xCBF29CE484222325L;
public static final long FNV_PRIME_64 = 1099511628211L;
/**
* 64 bit FNV hash. Produces more "random" hashes than (say) String.hashCode().
*
* @param val The value to hash.
* @return The hash value
*/
public static long fnvhash64(long val) {
//from http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash
long hashval = FNV_OFFSET_BASIS_64;
for (int i = 0; i < 8; i++) {
long octet = val & 0x00ff;
val = val >> 8;
hashval = hashval ^ octet;
hashval = hashval * FNV_PRIME_64;
//hashval = hashval ^ octet;
}
return Math.abs(hashval);
}
/**
* Reads a big-endian 8-byte long from an offset in the given array.
* @param bytes The array to read from.
* @return A long integer.
* @throws IndexOutOfBoundsException if the byte array is too small.
* @throws NullPointerException if the byte array is null.
*/
public static long bytesToLong(final byte[] bytes) {
return (bytes[0] & 0xFFL) << 56
| (bytes[1] & 0xFFL) << 48
| (bytes[2] & 0xFFL) << 40
| (bytes[3] & 0xFFL) << 32
| (bytes[4] & 0xFFL) << 24
| (bytes[5] & 0xFFL) << 16
| (bytes[6] & 0xFFL) << 8
| (bytes[7] & 0xFFL) << 0;
}
/**
* Writes a big-endian 8-byte long at an offset in the given array.
* @param val The value to encode.
* @throws IndexOutOfBoundsException if the byte array is too small.
*/
public static byte[] longToBytes(final long val) {
final byte[] bytes = new byte[8];
bytes[0] = (byte) (val >>> 56);
bytes[1] = (byte) (val >>> 48);
bytes[2] = (byte) (val >>> 40);
bytes[3] = (byte) (val >>> 32);
bytes[4] = (byte) (val >>> 24);
bytes[5] = (byte) (val >>> 16);
bytes[6] = (byte) (val >>> 8);
bytes[7] = (byte) (val >>> 0);
return bytes;
}
/**
* Parses the byte array into a double.
* The byte array must be at least 8 bytes long and have been encoded using
* {@link #doubleToBytes}. If the array is longer than 8 bytes, only the
* first 8 bytes are parsed.
* @param bytes The byte array to parse, at least 8 bytes.
* @return A double value read from the byte array.
* @throws IllegalArgumentException if the byte array is not 8 bytes wide.
*/
public static double bytesToDouble(final byte[] bytes) {
if (bytes.length < 8) {
throw new IllegalArgumentException("Byte array must be 8 bytes wide.");
}
return Double.longBitsToDouble(bytesToLong(bytes));
}
/**
* Encodes the double value as an 8 byte array.
* @param val The double value to encode.
* @return A byte array of length 8.
*/
public static byte[] doubleToBytes(final double val) {
return longToBytes(Double.doubleToRawLongBits(val));
}
/**
* Measure the estimated active thread count in the current thread group.
* Since this calls {@link Thread.activeCount} it should be called from the
* main thread or one started by the main thread. Threads included in the
* count can be in any state.
* For a more accurate count we could use {@link Thread.getAllStackTraces().size()}
* but that freezes the JVM and incurs a high overhead.
* @return An estimated thread count, good for showing the thread count
* over time.
*/
public static int getActiveThreadCount() {
return Thread.activeCount();
}
/** @return The currently used memory in bytes */
public static long getUsedMemoryBytes() {
final Runtime runtime = Runtime.getRuntime();
return runtime.totalMemory() - runtime.freeMemory();
}
/** @return The currently used memory in megabytes. */
public static int getUsedMemoryMegaBytes() {
return (int) (getUsedMemoryBytes() / 1024 / 1024);
}
/** @return The current system load average if supported by the JDK.
* If it's not supported, the value will be negative. */
public static double getSystemLoadAverage() {
final OperatingSystemMXBean osBean =
ManagementFactory.getOperatingSystemMXBean();
return osBean.getSystemLoadAverage();
}
/** @return The total number of garbage collections executed for all
* memory pools. */
public static long getGCTotalCollectionCount() {
final List<GarbageCollectorMXBean> gcBeans =
ManagementFactory.getGarbageCollectorMXBeans();
long count = 0;
for (final GarbageCollectorMXBean bean : gcBeans) {
if (bean.getCollectionCount() < 0) {
continue;
}
count += bean.getCollectionCount();
}
return count;
}
/** @return The total time, in milliseconds, spent in GC. */
public static long getGCTotalTime() {
final List<GarbageCollectorMXBean> gcBeans =
ManagementFactory.getGarbageCollectorMXBeans();
long time = 0;
for (final GarbageCollectorMXBean bean : gcBeans) {
if (bean.getCollectionTime() < 0) {
continue;
}
time += bean.getCollectionTime();
}
return time;
}
/**
* Returns a map of garbage collectors and their stats.
* The first object in the array is the total count since JVM start and the
* second is the total time (ms) since JVM start.
* If a garbage collectors does not support the collector MXBean, then it
* will not be represented in the map.
* @return A non-null map of garbage collectors and their metrics. The map
* may be empty.
*/
public static Map<String, Long[]> getGCStatst() {
final List<GarbageCollectorMXBean> gcBeans =
ManagementFactory.getGarbageCollectorMXBeans();
final Map<String, Long[]> map = new HashMap<String, Long[]>(gcBeans.size());
for (final GarbageCollectorMXBean bean : gcBeans) {
if (!bean.isValid() || bean.getCollectionCount() < 0 ||
bean.getCollectionTime() < 0) {
continue;
}
final Long[] measurements = new Long[]{
bean.getCollectionCount(),
bean.getCollectionTime()
};
map.put(bean.getName().replace(" ", "_"), measurements);
}
return map;
}
/**
* Simple Fisher-Yates array shuffle to randomize discrete sets.
* @param array The array to randomly shuffle.
* @return The shuffled array.
*/
public static <T> T [] shuffleArray(final T[] array) {
for (int i = array.length -1; i > 0; i--) {
final int idx = ThreadLocalRandom.current().nextInt(i + 1);
final T temp = array[idx];
array[idx] = array[i];
array[i] = temp;
}
return array;
}
}
| 7,671 | 31.927039 | 85 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/Workload.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.Properties;
/**
* One experiment scenario. One object of this type will
* be instantiated and shared among all client threads. This class
* should be constructed using a no-argument constructor, so we can
* load it dynamically. Any argument-based initialization should be
* done by init().
*
* If you extend this class, you should support the "insertstart" property. This
* allows the Client to proceed from multiple clients on different machines, in case
* the client is the bottleneck. For example, if we want to load 1 million records from
* 2 machines, the first machine should have insertstart=0 and the second insertstart=500000. Additionally,
* the "insertcount" property, which is interpreted by Client, can be used to tell each instance of the
* client how many inserts to do. In the example above, both clients should have insertcount=500000.
*/
public abstract class Workload {
public static final String INSERT_START_PROPERTY = "insertstart";
public static final String INSERT_COUNT_PROPERTY = "insertcount";
public static final String INSERT_START_PROPERTY_DEFAULT = "0";
private volatile AtomicBoolean stopRequested = new AtomicBoolean(false);
/** Operations available for a database. */
public enum Operation {
READ,
UPDATE,
INSERT,
SCAN,
DELETE
}
/**
* Initialize the scenario. Create any generators and other shared objects here.
* Called once, in the main client thread, before any operations are started.
*/
public void init(Properties p) throws WorkloadException {
}
/**
* Initialize any state for a particular client thread. Since the scenario object
* will be shared among all threads, this is the place to create any state that is specific
* to one thread. To be clear, this means the returned object should be created anew on each
* call to initThread(); do not return the same object multiple times.
* The returned object will be passed to invocations of doInsert() and doTransaction()
* for this thread. There should be no side effects from this call; all state should be encapsulated
* in the returned object. If you have no state to retain for this thread, return null. (But if you have
* no state to retain for this thread, probably you don't need to override initThread().)
*
* @return false if the workload knows it is done for this thread. Client will terminate the thread.
* Return true otherwise. Return true for workloads that rely on operationcount. For workloads that read
* traces from a file, return true when there are more to do, false when you are done.
*/
public Object initThread(Properties p, int mythreadid, int threadcount) throws WorkloadException {
return null;
}
/**
* Cleanup the scenario. Called once, in the main client thread, after all operations have completed.
*/
public void cleanup() throws WorkloadException {
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads, this
* function must be thread safe. However, avoid synchronized, or the threads will block waiting for each
* other, and it will be difficult to reach the target throughput. Ideally, this function would have no side
* effects other than DB operations and mutations on threadstate. Mutations to threadstate do not need to be
* synchronized, since each thread has its own threadstate instance.
*/
public abstract boolean doInsert(DB db, Object threadstate);
/**
* Do one transaction operation. Because it will be called concurrently from multiple client threads, this
* function must be thread safe. However, avoid synchronized, or the threads will block waiting for each
* other, and it will be difficult to reach the target throughput. Ideally, this function would have no side
* effects other than DB operations and mutations on threadstate. Mutations to threadstate do not need to be
* synchronized, since each thread has its own threadstate instance.
*
* @return false if the workload knows it is done for this thread. Client will terminate the thread.
* Return true otherwise. Return true for workloads that rely on operationcount. For workloads that read
* traces from a file, return true when there are more to do, false when you are done.
*/
public abstract boolean doTransaction(DB db, Object threadstate);
/**
* Allows scheduling a request to stop the workload.
*/
public void requestStop() {
stopRequested.set(true);
}
/**
* Check the status of the stop request flag.
* @return true if stop was requested, false otherwise.
*/
public boolean isStopRequested() {
return stopRequested.get();
}
}
| 5,488 | 43.626016 | 110 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/StatusThread.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import site.ycsb.measurements.Measurements;
import java.text.DecimalFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
/**
* A thread to periodically show the status of the experiment to reassure you that progress is being made.
*/
public class StatusThread extends Thread {
// Counts down each of the clients completing
private final CountDownLatch completeLatch;
// Stores the measurements for the run
private final Measurements measurements;
// Whether or not to track the JVM stats per run
private final boolean trackJVMStats;
// The clients that are running.
private final List<ClientThread> clients;
private final String label;
private final boolean standardstatus;
// The interval for reporting status.
private long sleeptimeNs;
// JVM max/mins
private int maxThreads;
private int minThreads = Integer.MAX_VALUE;
private long maxUsedMem;
private long minUsedMem = Long.MAX_VALUE;
private double maxLoadAvg;
private double minLoadAvg = Double.MAX_VALUE;
private long lastGCCount = 0;
private long lastGCTime = 0;
/**
* Creates a new StatusThread without JVM stat tracking.
*
* @param completeLatch The latch that each client thread will {@link CountDownLatch#countDown()}
* as they complete.
* @param clients The clients to collect metrics from.
* @param label The label for the status.
* @param standardstatus If true the status is printed to stdout in addition to stderr.
* @param statusIntervalSeconds The number of seconds between status updates.
*/
public StatusThread(CountDownLatch completeLatch, List<ClientThread> clients,
String label, boolean standardstatus, int statusIntervalSeconds) {
this(completeLatch, clients, label, standardstatus, statusIntervalSeconds, false);
}
/**
* Creates a new StatusThread.
*
* @param completeLatch The latch that each client thread will {@link CountDownLatch#countDown()}
* as they complete.
* @param clients The clients to collect metrics from.
* @param label The label for the status.
* @param standardstatus If true the status is printed to stdout in addition to stderr.
* @param statusIntervalSeconds The number of seconds between status updates.
* @param trackJVMStats Whether or not to track JVM stats.
*/
public StatusThread(CountDownLatch completeLatch, List<ClientThread> clients,
String label, boolean standardstatus, int statusIntervalSeconds,
boolean trackJVMStats) {
this.completeLatch = completeLatch;
this.clients = clients;
this.label = label;
this.standardstatus = standardstatus;
sleeptimeNs = TimeUnit.SECONDS.toNanos(statusIntervalSeconds);
measurements = Measurements.getMeasurements();
this.trackJVMStats = trackJVMStats;
}
/**
* Run and periodically report status.
*/
@Override
public void run() {
final long startTimeMs = System.currentTimeMillis();
final long startTimeNanos = System.nanoTime();
long deadline = startTimeNanos + sleeptimeNs;
long startIntervalMs = startTimeMs;
long lastTotalOps = 0;
boolean alldone;
do {
long nowMs = System.currentTimeMillis();
lastTotalOps = computeStats(startTimeMs, startIntervalMs, nowMs, lastTotalOps);
if (trackJVMStats) {
measureJVM();
}
alldone = waitForClientsUntil(deadline);
startIntervalMs = nowMs;
deadline += sleeptimeNs;
}
while (!alldone);
if (trackJVMStats) {
measureJVM();
}
// Print the final stats.
computeStats(startTimeMs, startIntervalMs, System.currentTimeMillis(), lastTotalOps);
}
/**
* Computes and prints the stats.
*
* @param startTimeMs The start time of the test.
* @param startIntervalMs The start time of this interval.
* @param endIntervalMs The end time (now) for the interval.
* @param lastTotalOps The last total operations count.
* @return The current operation count.
*/
private long computeStats(final long startTimeMs, long startIntervalMs, long endIntervalMs,
long lastTotalOps) {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS");
long totalops = 0;
long todoops = 0;
// Calculate the total number of operations completed.
for (ClientThread t : clients) {
totalops += t.getOpsDone();
todoops += t.getOpsTodo();
}
long interval = endIntervalMs - startTimeMs;
double throughput = 1000.0 * (((double) totalops) / (double) interval);
double curthroughput = 1000.0 * (((double) (totalops - lastTotalOps)) /
((double) (endIntervalMs - startIntervalMs)));
long estremaining = (long) Math.ceil(todoops / throughput);
DecimalFormat d = new DecimalFormat("#.##");
String labelString = this.label + format.format(new Date());
StringBuilder msg = new StringBuilder(labelString).append(" ").append(interval / 1000).append(" sec: ");
msg.append(totalops).append(" operations; ");
if (totalops != 0) {
msg.append(d.format(curthroughput)).append(" current ops/sec; ");
}
if (todoops != 0) {
msg.append("est completion in ").append(RemainingFormatter.format(estremaining));
}
msg.append(Measurements.getMeasurements().getSummary());
System.err.println(msg);
if (standardstatus) {
System.out.println(msg);
}
return totalops;
}
/**
* Waits for all of the client to finish or the deadline to expire.
*
* @param deadline The current deadline.
* @return True if all of the clients completed.
*/
private boolean waitForClientsUntil(long deadline) {
boolean alldone = false;
long now = System.nanoTime();
while (!alldone && now < deadline) {
try {
alldone = completeLatch.await(deadline - now, TimeUnit.NANOSECONDS);
} catch (InterruptedException ie) {
// If we are interrupted the thread is being asked to shutdown.
// Return true to indicate that and reset the interrupt state
// of the thread.
Thread.currentThread().interrupt();
alldone = true;
}
now = System.nanoTime();
}
return alldone;
}
/**
* Executes the JVM measurements.
*/
private void measureJVM() {
final int threads = Utils.getActiveThreadCount();
if (threads < minThreads) {
minThreads = threads;
}
if (threads > maxThreads) {
maxThreads = threads;
}
measurements.measure("THREAD_COUNT", threads);
// TODO - once measurements allow for other number types, switch to using
// the raw bytes. Otherwise we can track in MB to avoid negative values
// when faced with huge heaps.
final int usedMem = Utils.getUsedMemoryMegaBytes();
if (usedMem < minUsedMem) {
minUsedMem = usedMem;
}
if (usedMem > maxUsedMem) {
maxUsedMem = usedMem;
}
measurements.measure("USED_MEM_MB", usedMem);
// Some JVMs may not implement this feature so if the value is less than
// zero, just ommit it.
final double systemLoad = Utils.getSystemLoadAverage();
if (systemLoad >= 0) {
// TODO - store the double if measurements allows for them
measurements.measure("SYS_LOAD_AVG", (int) systemLoad);
if (systemLoad > maxLoadAvg) {
maxLoadAvg = systemLoad;
}
if (systemLoad < minLoadAvg) {
minLoadAvg = systemLoad;
}
}
final long gcs = Utils.getGCTotalCollectionCount();
measurements.measure("GCS", (int) (gcs - lastGCCount));
final long gcTime = Utils.getGCTotalTime();
measurements.measure("GCS_TIME", (int) (gcTime - lastGCTime));
lastGCCount = gcs;
lastGCTime = gcTime;
}
/**
* @return The maximum threads running during the test.
*/
public int getMaxThreads() {
return maxThreads;
}
/**
* @return The minimum threads running during the test.
*/
public int getMinThreads() {
return minThreads;
}
/**
* @return The maximum memory used during the test.
*/
public long getMaxUsedMem() {
return maxUsedMem;
}
/**
* @return The minimum memory used during the test.
*/
public long getMinUsedMem() {
return minUsedMem;
}
/**
* @return The maximum load average during the test.
*/
public double getMaxLoadAvg() {
return maxLoadAvg;
}
/**
* @return The minimum load average during the test.
*/
public double getMinLoadAvg() {
return minLoadAvg;
}
/**
* @return Whether or not the thread is tracking JVM stats.
*/
public boolean trackJVMStats() {
return trackJVMStats;
}
}
| 9,635 | 30.184466 | 108 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/BasicTSDB.java | /**
* Copyright (c) 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import site.ycsb.workloads.TimeSeriesWorkload;
/**
* Basic DB for printing out time series workloads and/or tracking the distribution
* of keys and fields.
*/
public class BasicTSDB extends BasicDB {
/** Time series workload specific counters. */
protected static Map<Long, Integer> timestamps;
protected static Map<Integer, Integer> floats;
protected static Map<Integer, Integer> integers;
private String timestampKey;
private String valueKey;
private String tagPairDelimiter;
private String queryTimeSpanDelimiter;
private long lastTimestamp;
@Override
public void init() {
super.init();
synchronized (MUTEX) {
if (timestamps == null) {
timestamps = new HashMap<Long, Integer>();
floats = new HashMap<Integer, Integer>();
integers = new HashMap<Integer, Integer>();
}
}
timestampKey = getProperties().getProperty(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY,
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT);
valueKey = getProperties().getProperty(
TimeSeriesWorkload.VALUE_KEY_PROPERTY,
TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT);
tagPairDelimiter = getProperties().getProperty(
TimeSeriesWorkload.PAIR_DELIMITER_PROPERTY,
TimeSeriesWorkload.PAIR_DELIMITER_PROPERTY_DEFAULT);
queryTimeSpanDelimiter = getProperties().getProperty(
TimeSeriesWorkload.QUERY_TIMESPAN_DELIMITER_PROPERTY,
TimeSeriesWorkload.QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT);
}
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("READ ").append(table).append(" ").append(key).append(" [ ");
if (fields != null) {
for (String f : fields) {
sb.append(f).append(" ");
}
} else {
sb.append("<all fields>");
}
sb.append("]");
System.out.println(sb);
}
if (count) {
Set<String> filtered = null;
if (fields != null) {
filtered = new HashSet<String>();
for (final String field : fields) {
if (field.startsWith(timestampKey)) {
String[] parts = field.split(tagPairDelimiter);
if (parts[1].contains(queryTimeSpanDelimiter)) {
parts = parts[1].split(queryTimeSpanDelimiter);
lastTimestamp = Long.parseLong(parts[0]);
} else {
lastTimestamp = Long.parseLong(parts[1]);
}
synchronized(timestamps) {
Integer ctr = timestamps.get(lastTimestamp);
if (ctr == null) {
timestamps.put(lastTimestamp, 1);
} else {
timestamps.put(lastTimestamp, ctr + 1);
}
}
} else {
filtered.add(field);
}
}
}
incCounter(reads, hash(table, key, filtered));
}
return Status.OK;
}
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
delay();
boolean isFloat = false;
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ ");
if (values != null) {
final TreeMap<String, ByteIterator> tree = new TreeMap<String, ByteIterator>(values);
for (Map.Entry<String, ByteIterator> entry : tree.entrySet()) {
if (entry.getKey().equals(timestampKey)) {
sb.append(entry.getKey()).append("=")
.append(Utils.bytesToLong(entry.getValue().toArray())).append(" ");
} else if (entry.getKey().equals(valueKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
isFloat = it.isFloatingPoint();
sb.append(entry.getKey()).append("=")
.append(isFloat ? it.getDouble() : it.getLong()).append(" ");
} else {
sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" ");
}
}
}
sb.append("]");
System.out.println(sb);
}
if (count) {
if (!verbose) {
isFloat = ((NumericByteIterator) values.get(valueKey)).isFloatingPoint();
}
int hash = hash(table, key, values);
incCounter(updates, hash);
synchronized(timestamps) {
Integer ctr = timestamps.get(lastTimestamp);
if (ctr == null) {
timestamps.put(lastTimestamp, 1);
} else {
timestamps.put(lastTimestamp, ctr + 1);
}
}
if (isFloat) {
incCounter(floats, hash);
} else {
incCounter(integers, hash);
}
}
return Status.OK;
}
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
delay();
boolean isFloat = false;
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("INSERT ").append(table).append(" ").append(key).append(" [ ");
if (values != null) {
final TreeMap<String, ByteIterator> tree = new TreeMap<String, ByteIterator>(values);
for (Map.Entry<String, ByteIterator> entry : tree.entrySet()) {
if (entry.getKey().equals(timestampKey)) {
sb.append(entry.getKey()).append("=")
.append(Utils.bytesToLong(entry.getValue().toArray())).append(" ");
} else if (entry.getKey().equals(valueKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
isFloat = it.isFloatingPoint();
sb.append(entry.getKey()).append("=")
.append(isFloat ? it.getDouble() : it.getLong()).append(" ");
} else {
sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" ");
}
}
}
sb.append("]");
System.out.println(sb);
}
if (count) {
if (!verbose) {
isFloat = ((NumericByteIterator) values.get(valueKey)).isFloatingPoint();
}
int hash = hash(table, key, values);
incCounter(inserts, hash);
synchronized(timestamps) {
Integer ctr = timestamps.get(lastTimestamp);
if (ctr == null) {
timestamps.put(lastTimestamp, 1);
} else {
timestamps.put(lastTimestamp, ctr + 1);
}
}
if (isFloat) {
incCounter(floats, hash);
} else {
incCounter(integers, hash);
}
}
return Status.OK;
}
@Override
public void cleanup() {
super.cleanup();
if (count && counter < 1) {
System.out.println("[TIMESTAMPS], Unique, " + timestamps.size());
System.out.println("[FLOATS], Unique series, " + floats.size());
System.out.println("[INTEGERS], Unique series, " + integers.size());
long minTs = Long.MAX_VALUE;
long maxTs = Long.MIN_VALUE;
for (final long ts : timestamps.keySet()) {
if (ts > maxTs) {
maxTs = ts;
}
if (ts < minTs) {
minTs = ts;
}
}
System.out.println("[TIMESTAMPS], Min, " + minTs);
System.out.println("[TIMESTAMPS], Max, " + maxTs);
}
}
@Override
protected int hash(final String table, final String key, final Map<String, ByteIterator> values) {
final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>();
for (final Entry<String, ByteIterator> entry : values.entrySet()) {
if (entry.getKey().equals(valueKey)) {
continue;
} else if (entry.getKey().equals(timestampKey)) {
lastTimestamp = ((NumericByteIterator) entry.getValue()).getLong();
entry.getValue().reset();
continue;
}
sorted.put(entry.getKey(), entry.getValue());
}
// yeah it's ugly but gives us a unique hash without having to add hashers
// to all of the ByteIterators.
StringBuilder buf = new StringBuilder().append(table).append(key);
for (final Entry<String, ByteIterator> entry : sorted.entrySet()) {
entry.getValue().reset();
buf.append(entry.getKey())
.append(entry.getValue().toString());
}
return buf.toString().hashCode();
}
} | 9,106 | 32.358974 | 102 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/RandomByteIterator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.concurrent.ThreadLocalRandom;
/**
* A ByteIterator that generates a random sequence of bytes.
*/
public class RandomByteIterator extends ByteIterator {
private final long len;
private long off;
private int bufOff;
private final byte[] buf;
@Override
public boolean hasNext() {
return (off + bufOff) < len;
}
private void fillBytesImpl(byte[] buffer, int base) {
int bytes = ThreadLocalRandom.current().nextInt();
switch (buffer.length - base) {
default:
buffer[base + 5] = (byte) (((bytes >> 25) & 95) + ' ');
case 5:
buffer[base + 4] = (byte) (((bytes >> 20) & 63) + ' ');
case 4:
buffer[base + 3] = (byte) (((bytes >> 15) & 31) + ' ');
case 3:
buffer[base + 2] = (byte) (((bytes >> 10) & 95) + ' ');
case 2:
buffer[base + 1] = (byte) (((bytes >> 5) & 63) + ' ');
case 1:
buffer[base + 0] = (byte) (((bytes) & 31) + ' ');
case 0:
break;
}
}
private void fillBytes() {
if (bufOff == buf.length) {
fillBytesImpl(buf, 0);
bufOff = 0;
off += buf.length;
}
}
public RandomByteIterator(long len) {
this.len = len;
this.buf = new byte[6];
this.bufOff = buf.length;
fillBytes();
this.off = 0;
}
public byte nextByte() {
fillBytes();
bufOff++;
return buf[bufOff - 1];
}
@Override
public int nextBuf(byte[] buffer, int bufOffset) {
int ret;
if (len - off < buffer.length - bufOffset) {
ret = (int) (len - off);
} else {
ret = buffer.length - bufOffset;
}
int i;
for (i = 0; i < ret; i += 6) {
fillBytesImpl(buffer, i + bufOffset);
}
off += ret;
return ret + bufOffset;
}
@Override
public long bytesLeft() {
return len - off - bufOff;
}
@Override
public void reset() {
off = 0;
}
/** Consumes remaining contents of this object, and returns them as a byte array. */
public byte[] toArray() {
long left = bytesLeft();
if (left != (int) left) {
throw new ArrayIndexOutOfBoundsException("Too much data to fit in one array!");
}
byte[] ret = new byte[(int) left];
int bufOffset = 0;
while (bufOffset < ret.length) {
bufOffset = nextBuf(ret, bufOffset);
}
return ret;
}
}
| 3,004 | 24.252101 | 86 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.