repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/Client.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import site.ycsb.measurements.Measurements;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import site.ycsb.measurements.exporter.TextMeasurementsExporter;
import org.apache.htrace.core.HTraceConfiguration;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
/**
* Turn seconds remaining into more useful units.
* i.e. if there are hours or days worth of seconds, use them.
*/
final class RemainingFormatter {
private RemainingFormatter() {
// not used
}
public static StringBuilder format(long seconds) {
StringBuilder time = new StringBuilder();
long days = TimeUnit.SECONDS.toDays(seconds);
if (days > 0) {
time.append(days).append(days == 1 ? " day " : " days ");
seconds -= TimeUnit.DAYS.toSeconds(days);
}
long hours = TimeUnit.SECONDS.toHours(seconds);
if (hours > 0) {
time.append(hours).append(hours == 1 ? " hour " : " hours ");
seconds -= TimeUnit.HOURS.toSeconds(hours);
}
/* Only include minute granularity if we're < 1 day. */
if (days < 1) {
long minutes = TimeUnit.SECONDS.toMinutes(seconds);
if (minutes > 0) {
time.append(minutes).append(minutes == 1 ? " minute " : " minutes ");
seconds -= TimeUnit.MINUTES.toSeconds(seconds);
}
}
/* Only bother to include seconds if we're < 1 minute */
if (time.length() == 0) {
time.append(seconds).append(time.length() == 1 ? " second " : " seconds ");
}
return time;
}
}
/**
* Main class for executing YCSB.
*/
public final class Client {
private Client() {
//not used
}
public static final String DEFAULT_RECORD_COUNT = "0";
/**
* The target number of operations to perform.
*/
public static final String OPERATION_COUNT_PROPERTY = "operationcount";
/**
* The number of records to load into the database initially.
*/
public static final String RECORD_COUNT_PROPERTY = "recordcount";
/**
* The workload class to be loaded.
*/
public static final String WORKLOAD_PROPERTY = "workload";
/**
* The database class to be used.
*/
public static final String DB_PROPERTY = "db";
/**
* The exporter class to be used. The default is
* site.ycsb.measurements.exporter.TextMeasurementsExporter.
*/
public static final String EXPORTER_PROPERTY = "exporter";
/**
* If set to the path of a file, YCSB will write all output to this file
* instead of STDOUT.
*/
public static final String EXPORT_FILE_PROPERTY = "exportfile";
/**
* The number of YCSB client threads to run.
*/
public static final String THREAD_COUNT_PROPERTY = "threadcount";
/**
* Indicates how many inserts to do if less than recordcount.
* Useful for partitioning the load among multiple servers if the client is the bottleneck.
* Additionally workloads should support the "insertstart" property which tells them which record to start at.
*/
public static final String INSERT_COUNT_PROPERTY = "insertcount";
/**
* Target number of operations per second.
*/
public static final String TARGET_PROPERTY = "target";
/**
* The maximum amount of time (in seconds) for which the benchmark will be run.
*/
public static final String MAX_EXECUTION_TIME = "maxexecutiontime";
/**
* Whether or not this is the transaction phase (run) or not (load).
*/
public static final String DO_TRANSACTIONS_PROPERTY = "dotransactions";
/**
* Whether or not to show status during run.
*/
public static final String STATUS_PROPERTY = "status";
/**
* Use label for status (e.g. to label one experiment out of a whole batch).
*/
public static final String LABEL_PROPERTY = "label";
/**
* An optional thread used to track progress and measure JVM stats.
*/
private static StatusThread statusthread = null;
// HTrace integration related constants.
/**
* All keys for configuring the tracing system start with this prefix.
*/
private static final String HTRACE_KEY_PREFIX = "htrace.";
private static final String CLIENT_WORKLOAD_INIT_SPAN = "Client#workload_init";
private static final String CLIENT_INIT_SPAN = "Client#init";
private static final String CLIENT_WORKLOAD_SPAN = "Client#workload";
private static final String CLIENT_CLEANUP_SPAN = "Client#cleanup";
private static final String CLIENT_EXPORT_MEASUREMENTS_SPAN = "Client#export_measurements";
public static void usageMessage() {
System.out.println("Usage: java site.ycsb.Client [options]");
System.out.println("Options:");
System.out.println(" -threads n: execute using n threads (default: 1) - can also be specified as the \n" +
" \"threadcount\" property using -p");
System.out.println(" -target n: attempt to do n operations per second (default: unlimited) - can also\n" +
" be specified as the \"target\" property using -p");
System.out.println(" -load: run the loading phase of the workload");
System.out.println(" -t: run the transactions phase of the workload (default)");
System.out.println(" -db dbname: specify the name of the DB to use (default: site.ycsb.BasicDB) - \n" +
" can also be specified as the \"db\" property using -p");
System.out.println(" -P propertyfile: load properties from the given file. Multiple files can");
System.out.println(" be specified, and will be processed in the order specified");
System.out.println(" -p name=value: specify a property to be passed to the DB and workloads;");
System.out.println(" multiple properties can be specified, and override any");
System.out.println(" values in the propertyfile");
System.out.println(" -s: show status during run (default: no status)");
System.out.println(" -l label: use label for status (e.g. to label one experiment out of a whole batch)");
System.out.println("");
System.out.println("Required properties:");
System.out.println(" " + WORKLOAD_PROPERTY + ": the name of the workload class to use (e.g. " +
"site.ycsb.workloads.CoreWorkload)");
System.out.println("");
System.out.println("To run the transaction phase from multiple servers, start a separate client on each.");
System.out.println("To run the load phase from multiple servers, start a separate client on each; additionally,");
System.out.println("use the \"insertcount\" and \"insertstart\" properties to divide up the records " +
"to be inserted");
}
public static boolean checkRequiredProperties(Properties props) {
if (props.getProperty(WORKLOAD_PROPERTY) == null) {
System.out.println("Missing property: " + WORKLOAD_PROPERTY);
return false;
}
return true;
}
/**
* Exports the measurements to either sysout or a file using the exporter
* loaded from conf.
*
* @throws IOException Either failed to write to output stream or failed to close it.
*/
private static void exportMeasurements(Properties props, int opcount, long runtime)
throws IOException {
MeasurementsExporter exporter = null;
try {
// if no destination file is provided the results will be written to stdout
OutputStream out;
String exportFile = props.getProperty(EXPORT_FILE_PROPERTY);
if (exportFile == null) {
out = System.out;
} else {
out = new FileOutputStream(exportFile);
}
// if no exporter is provided the default text one will be used
String exporterStr = props.getProperty(EXPORTER_PROPERTY,
"site.ycsb.measurements.exporter.TextMeasurementsExporter");
try {
exporter = (MeasurementsExporter) Class.forName(exporterStr).getConstructor(OutputStream.class)
.newInstance(out);
} catch (Exception e) {
System.err.println("Could not find exporter " + exporterStr
+ ", will use default text reporter.");
e.printStackTrace();
exporter = new TextMeasurementsExporter(out);
}
exporter.write("OVERALL", "RunTime(ms)", runtime);
double throughput = 1000.0 * (opcount) / (runtime);
exporter.write("OVERALL", "Throughput(ops/sec)", throughput);
final Map<String, Long[]> gcs = Utils.getGCStatst();
long totalGCCount = 0;
long totalGCTime = 0;
for (final Entry<String, Long[]> entry : gcs.entrySet()) {
exporter.write("TOTAL_GCS_" + entry.getKey(), "Count", entry.getValue()[0]);
exporter.write("TOTAL_GC_TIME_" + entry.getKey(), "Time(ms)", entry.getValue()[1]);
exporter.write("TOTAL_GC_TIME_%_" + entry.getKey(), "Time(%)",
((double) entry.getValue()[1] / runtime) * (double) 100);
totalGCCount += entry.getValue()[0];
totalGCTime += entry.getValue()[1];
}
exporter.write("TOTAL_GCs", "Count", totalGCCount);
exporter.write("TOTAL_GC_TIME", "Time(ms)", totalGCTime);
exporter.write("TOTAL_GC_TIME_%", "Time(%)", ((double) totalGCTime / runtime) * (double) 100);
if (statusthread != null && statusthread.trackJVMStats()) {
exporter.write("MAX_MEM_USED", "MBs", statusthread.getMaxUsedMem());
exporter.write("MIN_MEM_USED", "MBs", statusthread.getMinUsedMem());
exporter.write("MAX_THREADS", "Count", statusthread.getMaxThreads());
exporter.write("MIN_THREADS", "Count", statusthread.getMinThreads());
exporter.write("MAX_SYS_LOAD_AVG", "Load", statusthread.getMaxLoadAvg());
exporter.write("MIN_SYS_LOAD_AVG", "Load", statusthread.getMinLoadAvg());
}
Measurements.getMeasurements().exportMeasurements(exporter);
} finally {
if (exporter != null) {
exporter.close();
}
}
}
@SuppressWarnings("unchecked")
public static void main(String[] args) {
Properties props = parseArguments(args);
boolean status = Boolean.valueOf(props.getProperty(STATUS_PROPERTY, String.valueOf(false)));
String label = props.getProperty(LABEL_PROPERTY, "");
long maxExecutionTime = Integer.parseInt(props.getProperty(MAX_EXECUTION_TIME, "0"));
//get number of threads, target and db
int threadcount = Integer.parseInt(props.getProperty(THREAD_COUNT_PROPERTY, "1"));
String dbname = props.getProperty(DB_PROPERTY, "site.ycsb.BasicDB");
int target = Integer.parseInt(props.getProperty(TARGET_PROPERTY, "0"));
//compute the target throughput
double targetperthreadperms = -1;
if (target > 0) {
double targetperthread = ((double) target) / ((double) threadcount);
targetperthreadperms = targetperthread / 1000.0;
}
Thread warningthread = setupWarningThread();
warningthread.start();
Measurements.setProperties(props);
Workload workload = getWorkload(props);
final Tracer tracer = getTracer(props, workload);
initWorkload(props, warningthread, workload, tracer);
System.err.println("Starting test.");
final CountDownLatch completeLatch = new CountDownLatch(threadcount);
final List<ClientThread> clients = initDb(dbname, props, threadcount, targetperthreadperms,
workload, tracer, completeLatch);
if (status) {
boolean standardstatus = false;
if (props.getProperty(Measurements.MEASUREMENT_TYPE_PROPERTY, "").compareTo("timeseries") == 0) {
standardstatus = true;
}
int statusIntervalSeconds = Integer.parseInt(props.getProperty("status.interval", "10"));
boolean trackJVMStats = props.getProperty(Measurements.MEASUREMENT_TRACK_JVM_PROPERTY,
Measurements.MEASUREMENT_TRACK_JVM_PROPERTY_DEFAULT).equals("true");
statusthread = new StatusThread(completeLatch, clients, label, standardstatus, statusIntervalSeconds,
trackJVMStats);
statusthread.start();
}
Thread terminator = null;
long st;
long en;
int opsDone;
try (final TraceScope span = tracer.newScope(CLIENT_WORKLOAD_SPAN)) {
final Map<Thread, ClientThread> threads = new HashMap<>(threadcount);
for (ClientThread client : clients) {
threads.put(new Thread(tracer.wrap(client, "ClientThread")), client);
}
st = System.currentTimeMillis();
for (Thread t : threads.keySet()) {
t.start();
}
if (maxExecutionTime > 0) {
terminator = new TerminatorThread(maxExecutionTime, threads.keySet(), workload);
terminator.start();
}
opsDone = 0;
for (Map.Entry<Thread, ClientThread> entry : threads.entrySet()) {
try {
entry.getKey().join();
opsDone += entry.getValue().getOpsDone();
} catch (InterruptedException ignored) {
// ignored
}
}
en = System.currentTimeMillis();
}
try {
try (final TraceScope span = tracer.newScope(CLIENT_CLEANUP_SPAN)) {
if (terminator != null && !terminator.isInterrupted()) {
terminator.interrupt();
}
if (status) {
// wake up status thread if it's asleep
statusthread.interrupt();
// at this point we assume all the monitored threads are already gone as per above join loop.
try {
statusthread.join();
} catch (InterruptedException ignored) {
// ignored
}
}
workload.cleanup();
}
} catch (WorkloadException e) {
e.printStackTrace();
e.printStackTrace(System.out);
System.exit(0);
}
try {
try (final TraceScope span = tracer.newScope(CLIENT_EXPORT_MEASUREMENTS_SPAN)) {
exportMeasurements(props, opsDone, en - st);
}
} catch (IOException e) {
System.err.println("Could not export measurements, error: " + e.getMessage());
e.printStackTrace();
System.exit(-1);
}
System.exit(0);
}
private static List<ClientThread> initDb(String dbname, Properties props, int threadcount,
double targetperthreadperms, Workload workload, Tracer tracer,
CountDownLatch completeLatch) {
boolean initFailed = false;
boolean dotransactions = Boolean.valueOf(props.getProperty(DO_TRANSACTIONS_PROPERTY, String.valueOf(true)));
final List<ClientThread> clients = new ArrayList<>(threadcount);
try (final TraceScope span = tracer.newScope(CLIENT_INIT_SPAN)) {
int opcount;
if (dotransactions) {
opcount = Integer.parseInt(props.getProperty(OPERATION_COUNT_PROPERTY, "0"));
} else {
if (props.containsKey(INSERT_COUNT_PROPERTY)) {
opcount = Integer.parseInt(props.getProperty(INSERT_COUNT_PROPERTY, "0"));
} else {
opcount = Integer.parseInt(props.getProperty(RECORD_COUNT_PROPERTY, DEFAULT_RECORD_COUNT));
}
}
if (threadcount > opcount && opcount > 0){
threadcount = opcount;
System.out.println("Warning: the threadcount is bigger than recordcount, the threadcount will be recordcount!");
}
for (int threadid = 0; threadid < threadcount; threadid++) {
DB db;
try {
db = DBFactory.newDB(dbname, props, tracer);
} catch (UnknownDBException e) {
System.out.println("Unknown DB " + dbname);
initFailed = true;
break;
}
int threadopcount = opcount / threadcount;
// ensure correct number of operations, in case opcount is not a multiple of threadcount
if (threadid < opcount % threadcount) {
++threadopcount;
}
ClientThread t = new ClientThread(db, dotransactions, workload, props, threadopcount, targetperthreadperms,
completeLatch);
t.setThreadId(threadid);
t.setThreadCount(threadcount);
clients.add(t);
}
if (initFailed) {
System.err.println("Error initializing datastore bindings.");
System.exit(0);
}
}
return clients;
}
private static Tracer getTracer(Properties props, Workload workload) {
return new Tracer.Builder("YCSB " + workload.getClass().getSimpleName())
.conf(getHTraceConfiguration(props))
.build();
}
private static void initWorkload(Properties props, Thread warningthread, Workload workload, Tracer tracer) {
try {
try (final TraceScope span = tracer.newScope(CLIENT_WORKLOAD_INIT_SPAN)) {
workload.init(props);
warningthread.interrupt();
}
} catch (WorkloadException e) {
e.printStackTrace();
e.printStackTrace(System.out);
System.exit(0);
}
}
private static HTraceConfiguration getHTraceConfiguration(Properties props) {
final Map<String, String> filteredProperties = new HashMap<>();
for (String key : props.stringPropertyNames()) {
if (key.startsWith(HTRACE_KEY_PREFIX)) {
filteredProperties.put(key.substring(HTRACE_KEY_PREFIX.length()), props.getProperty(key));
}
}
return HTraceConfiguration.fromMap(filteredProperties);
}
private static Thread setupWarningThread() {
//show a warning message that creating the workload is taking a while
//but only do so if it is taking longer than 2 seconds
//(showing the message right away if the setup wasn't taking very long was confusing people)
return new Thread() {
@Override
public void run() {
try {
sleep(2000);
} catch (InterruptedException e) {
return;
}
System.err.println(" (might take a few minutes for large data sets)");
}
};
}
private static Workload getWorkload(Properties props) {
ClassLoader classLoader = Client.class.getClassLoader();
try {
Properties projectProp = new Properties();
projectProp.load(classLoader.getResourceAsStream("project.properties"));
System.err.println("YCSB Client " + projectProp.getProperty("version"));
} catch (IOException e) {
System.err.println("Unable to retrieve client version.");
}
System.err.println();
System.err.println("Loading workload...");
try {
Class workloadclass = classLoader.loadClass(props.getProperty(WORKLOAD_PROPERTY));
return (Workload) workloadclass.newInstance();
} catch (Exception e) {
e.printStackTrace();
e.printStackTrace(System.out);
System.exit(0);
}
return null;
}
private static Properties parseArguments(String[] args) {
Properties props = new Properties();
System.err.print("Command line:");
for (String arg : args) {
System.err.print(" " + arg);
}
System.err.println();
Properties fileprops = new Properties();
int argindex = 0;
if (args.length == 0) {
usageMessage();
System.out.println("At least one argument specifying a workload is required.");
System.exit(0);
}
while (args[argindex].startsWith("-")) {
if (args[argindex].compareTo("-threads") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -threads.");
System.exit(0);
}
int tcount = Integer.parseInt(args[argindex]);
props.setProperty(THREAD_COUNT_PROPERTY, String.valueOf(tcount));
argindex++;
} else if (args[argindex].compareTo("-target") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -target.");
System.exit(0);
}
int ttarget = Integer.parseInt(args[argindex]);
props.setProperty(TARGET_PROPERTY, String.valueOf(ttarget));
argindex++;
} else if (args[argindex].compareTo("-load") == 0) {
props.setProperty(DO_TRANSACTIONS_PROPERTY, String.valueOf(false));
argindex++;
} else if (args[argindex].compareTo("-t") == 0) {
props.setProperty(DO_TRANSACTIONS_PROPERTY, String.valueOf(true));
argindex++;
} else if (args[argindex].compareTo("-s") == 0) {
props.setProperty(STATUS_PROPERTY, String.valueOf(true));
argindex++;
} else if (args[argindex].compareTo("-db") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -db.");
System.exit(0);
}
props.setProperty(DB_PROPERTY, args[argindex]);
argindex++;
} else if (args[argindex].compareTo("-l") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -l.");
System.exit(0);
}
props.setProperty(LABEL_PROPERTY, args[argindex]);
argindex++;
} else if (args[argindex].compareTo("-P") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -P.");
System.exit(0);
}
String propfile = args[argindex];
argindex++;
Properties myfileprops = new Properties();
try {
myfileprops.load(new FileInputStream(propfile));
} catch (IOException e) {
System.out.println("Unable to open the properties file " + propfile);
System.out.println(e.getMessage());
System.exit(0);
}
//Issue #5 - remove call to stringPropertyNames to make compilable under Java 1.5
for (Enumeration e = myfileprops.propertyNames(); e.hasMoreElements();) {
String prop = (String) e.nextElement();
fileprops.setProperty(prop, myfileprops.getProperty(prop));
}
} else if (args[argindex].compareTo("-p") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -p");
System.exit(0);
}
int eq = args[argindex].indexOf('=');
if (eq < 0) {
usageMessage();
System.out.println("Argument '-p' expected to be in key=value format (e.g., -p operationcount=99999)");
System.exit(0);
}
String name = args[argindex].substring(0, eq);
String value = args[argindex].substring(eq + 1);
props.put(name, value);
argindex++;
} else {
usageMessage();
System.out.println("Unknown option " + args[argindex]);
System.exit(0);
}
if (argindex >= args.length) {
break;
}
}
if (argindex != args.length) {
usageMessage();
if (argindex < args.length) {
System.out.println("An argument value without corresponding argument specifier (e.g., -p, -s) was found. "
+ "We expected an argument specifier and instead found " + args[argindex]);
} else {
System.out.println("An argument specifier without corresponding value was found at the end of the supplied " +
"command line arguments.");
}
System.exit(0);
}
//overwrite file properties with properties from the command line
//Issue #5 - remove call to stringPropertyNames to make compilable under Java 1.5
for (Enumeration e = props.propertyNames(); e.hasMoreElements();) {
String prop = (String) e.nextElement();
fileprops.setProperty(prop, props.getProperty(prop));
}
props = fileprops;
if (!checkRequiredProperties(props)) {
System.out.println("Failed check required properties.");
System.exit(0);
}
return props;
}
}
| 24,543 | 35.094118 | 120 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/ClientThread.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import site.ycsb.measurements.Measurements;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.locks.LockSupport;
/**
* A thread for executing transactions or data inserts to the database.
*/
public class ClientThread implements Runnable {
// Counts down each of the clients completing.
private final CountDownLatch completeLatch;
private static boolean spinSleep;
private DB db;
private boolean dotransactions;
private Workload workload;
private int opcount;
private double targetOpsPerMs;
private int opsdone;
private int threadid;
private int threadcount;
private Object workloadstate;
private Properties props;
private long targetOpsTickNs;
private final Measurements measurements;
/**
* Constructor.
*
* @param db the DB implementation to use
* @param dotransactions true to do transactions, false to insert data
* @param workload the workload to use
* @param props the properties defining the experiment
* @param opcount the number of operations (transactions or inserts) to do
* @param targetperthreadperms target number of operations per thread per ms
* @param completeLatch The latch tracking the completion of all clients.
*/
public ClientThread(DB db, boolean dotransactions, Workload workload, Properties props, int opcount,
double targetperthreadperms, CountDownLatch completeLatch) {
this.db = db;
this.dotransactions = dotransactions;
this.workload = workload;
this.opcount = opcount;
opsdone = 0;
if (targetperthreadperms > 0) {
targetOpsPerMs = targetperthreadperms;
targetOpsTickNs = (long) (1000000 / targetOpsPerMs);
}
this.props = props;
measurements = Measurements.getMeasurements();
spinSleep = Boolean.valueOf(this.props.getProperty("spin.sleep", "false"));
this.completeLatch = completeLatch;
}
public void setThreadId(final int threadId) {
threadid = threadId;
}
public void setThreadCount(final int threadCount) {
threadcount = threadCount;
}
public int getOpsDone() {
return opsdone;
}
@Override
public void run() {
try {
db.init();
} catch (DBException e) {
e.printStackTrace();
e.printStackTrace(System.out);
return;
}
try {
workloadstate = workload.initThread(props, threadid, threadcount);
} catch (WorkloadException e) {
e.printStackTrace();
e.printStackTrace(System.out);
return;
}
//NOTE: Switching to using nanoTime and parkNanos for time management here such that the measurements
// and the client thread have the same view on time.
//spread the thread operations out so they don't all hit the DB at the same time
// GH issue 4 - throws exception if _target>1 because random.nextInt argument must be >0
// and the sleep() doesn't make sense for granularities < 1 ms anyway
if ((targetOpsPerMs > 0) && (targetOpsPerMs <= 1.0)) {
long randomMinorDelay = ThreadLocalRandom.current().nextInt((int) targetOpsTickNs);
sleepUntil(System.nanoTime() + randomMinorDelay);
}
try {
if (dotransactions) {
long startTimeNanos = System.nanoTime();
while (((opcount == 0) || (opsdone < opcount)) && !workload.isStopRequested()) {
if (!workload.doTransaction(db, workloadstate)) {
break;
}
opsdone++;
throttleNanos(startTimeNanos);
}
} else {
long startTimeNanos = System.nanoTime();
while (((opcount == 0) || (opsdone < opcount)) && !workload.isStopRequested()) {
if (!workload.doInsert(db, workloadstate)) {
break;
}
opsdone++;
throttleNanos(startTimeNanos);
}
}
} catch (Exception e) {
e.printStackTrace();
e.printStackTrace(System.out);
System.exit(0);
}
try {
measurements.setIntendedStartTimeNs(0);
db.cleanup();
} catch (DBException e) {
e.printStackTrace();
e.printStackTrace(System.out);
} finally {
completeLatch.countDown();
}
}
private static void sleepUntil(long deadline) {
while (System.nanoTime() < deadline) {
if (!spinSleep) {
LockSupport.parkNanos(deadline - System.nanoTime());
}
}
}
private void throttleNanos(long startTimeNanos) {
//throttle the operations
if (targetOpsPerMs > 0) {
// delay until next tick
long deadline = startTimeNanos + opsdone * targetOpsTickNs;
sleepUntil(deadline);
measurements.setIntendedStartTimeNs(deadline);
}
}
/**
* The total amount of work this thread is still expected to do.
*/
int getOpsTodo() {
int todo = opcount - opsdone;
return todo < 0 ? 0 : todo;
}
}
| 5,667 | 29.31016 | 105 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/DBWrapper.java | /**
* Copyright (c) 2010 Yahoo! Inc., 2016-2020 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.Map;
import site.ycsb.measurements.Measurements;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Wrapper around a "real" DB that measures latencies and counts return codes.
* Also reports latency separately between OK and failed operations.
*/
public class DBWrapper extends DB {
private final DB db;
private final Measurements measurements;
private final Tracer tracer;
private boolean reportLatencyForEachError = false;
private Set<String> latencyTrackedErrors = new HashSet<String>();
private static final String REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY = "reportlatencyforeacherror";
private static final String REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY_DEFAULT = "false";
private static final String LATENCY_TRACKED_ERRORS_PROPERTY = "latencytrackederrors";
private static final AtomicBoolean LOG_REPORT_CONFIG = new AtomicBoolean(false);
private final String scopeStringCleanup;
private final String scopeStringDelete;
private final String scopeStringInit;
private final String scopeStringInsert;
private final String scopeStringRead;
private final String scopeStringScan;
private final String scopeStringUpdate;
public DBWrapper(final DB db, final Tracer tracer) {
this.db = db;
measurements = Measurements.getMeasurements();
this.tracer = tracer;
final String simple = db.getClass().getSimpleName();
scopeStringCleanup = simple + "#cleanup";
scopeStringDelete = simple + "#delete";
scopeStringInit = simple + "#init";
scopeStringInsert = simple + "#insert";
scopeStringRead = simple + "#read";
scopeStringScan = simple + "#scan";
scopeStringUpdate = simple + "#update";
}
/**
* Set the properties for this DB.
*/
public void setProperties(Properties p) {
db.setProperties(p);
}
/**
* Get the set of properties for this DB.
*/
public Properties getProperties() {
return db.getProperties();
}
/**
* Initialize any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
public void init() throws DBException {
try (final TraceScope span = tracer.newScope(scopeStringInit)) {
db.init();
this.reportLatencyForEachError = Boolean.parseBoolean(getProperties().
getProperty(REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY,
REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY_DEFAULT));
if (!reportLatencyForEachError) {
String latencyTrackedErrorsProperty = getProperties().getProperty(LATENCY_TRACKED_ERRORS_PROPERTY, null);
if (latencyTrackedErrorsProperty != null) {
this.latencyTrackedErrors = new HashSet<String>(Arrays.asList(
latencyTrackedErrorsProperty.split(",")));
}
}
if (LOG_REPORT_CONFIG.compareAndSet(false, true)) {
System.err.println("DBWrapper: report latency for each error is " +
this.reportLatencyForEachError + " and specific error codes to track" +
" for latency are: " + this.latencyTrackedErrors.toString());
}
}
}
/**
* Cleanup any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
public void cleanup() throws DBException {
try (final TraceScope span = tracer.newScope(scopeStringCleanup)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.cleanup();
long en = System.nanoTime();
measure("CLEANUP", Status.OK, ist, st, en);
}
}
/**
* Read a record from the database. Each field/value pair from the result
* will be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return The result of the operation.
*/
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
try (final TraceScope span = tracer.newScope(scopeStringRead)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
Status res = db.read(table, key, fields, result);
long en = System.nanoTime();
measure("READ", res, ist, st, en);
measurements.reportStatus("READ", res);
return res;
}
}
/**
* Perform a range scan for a set of records in the database.
* Each field/value pair from the result will be stored in a HashMap.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return The result of the operation.
*/
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
try (final TraceScope span = tracer.newScope(scopeStringScan)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
Status res = db.scan(table, startkey, recordcount, fields, result);
long en = System.nanoTime();
measure("SCAN", res, ist, st, en);
measurements.reportStatus("SCAN", res);
return res;
}
}
private void measure(String op, Status result, long intendedStartTimeNanos,
long startTimeNanos, long endTimeNanos) {
String measurementName = op;
if (result == null || !result.isOk()) {
if (this.reportLatencyForEachError ||
this.latencyTrackedErrors.contains(result.getName())) {
measurementName = op + "-" + result.getName();
} else {
measurementName = op + "-FAILED";
}
}
measurements.measure(measurementName,
(int) ((endTimeNanos - startTimeNanos) / 1000));
measurements.measureIntended(measurementName,
(int) ((endTimeNanos - intendedStartTimeNanos) / 1000));
}
/**
* Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key, overwriting any existing values with the same field name.
*
* @param table The name of the table
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return The result of the operation.
*/
public Status update(String table, String key,
Map<String, ByteIterator> values) {
try (final TraceScope span = tracer.newScope(scopeStringUpdate)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
Status res = db.update(table, key, values);
long en = System.nanoTime();
measure("UPDATE", res, ist, st, en);
measurements.reportStatus("UPDATE", res);
return res;
}
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified
* record key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return The result of the operation.
*/
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
try (final TraceScope span = tracer.newScope(scopeStringInsert)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
Status res = db.insert(table, key, values);
long en = System.nanoTime();
measure("INSERT", res, ist, st, en);
measurements.reportStatus("INSERT", res);
return res;
}
}
/**
* Delete a record from the database.
*
* @param table The name of the table
* @param key The record key of the record to delete.
* @return The result of the operation.
*/
public Status delete(String table, String key) {
try (final TraceScope span = tracer.newScope(scopeStringDelete)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
Status res = db.delete(table, key);
long en = System.nanoTime();
measure("DELETE", res, ist, st, en);
measurements.reportStatus("DELETE", res);
return res;
}
}
}
| 9,369 | 35.889764 | 116 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/CommandLine.java | /**
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import site.ycsb.workloads.CoreWorkload;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.*;
/**
* A simple command line client to a database, using the appropriate site.ycsb.DB implementation.
*/
public final class CommandLine {
private CommandLine() {
//not used
}
public static final String DEFAULT_DB = "site.ycsb.BasicDB";
public static void usageMessage() {
System.out.println("YCSB Command Line Client");
System.out.println("Usage: java site.ycsb.CommandLine [options]");
System.out.println("Options:");
System.out.println(" -P filename: Specify a property file");
System.out.println(" -p name=value: Specify a property value");
System.out.println(" -db classname: Use a specified DB class (can also set the \"db\" property)");
System.out.println(" -table tablename: Use the table name instead of the default \"" +
CoreWorkload.TABLENAME_PROPERTY_DEFAULT + "\"");
System.out.println();
}
public static void help() {
System.out.println("Commands:");
System.out.println(" read key [field1 field2 ...] - Read a record");
System.out.println(" scan key recordcount [field1 field2 ...] - Scan starting at key");
System.out.println(" insert key name1=value1 [name2=value2 ...] - Insert a new record");
System.out.println(" update key name1=value1 [name2=value2 ...] - Update a record");
System.out.println(" delete key - Delete a record");
System.out.println(" table [tablename] - Get or [set] the name of the table");
System.out.println(" quit - Quit");
}
public static void main(String[] args) {
Properties props = new Properties();
Properties fileprops = new Properties();
parseArguments(args, props, fileprops);
for (Enumeration e = props.propertyNames(); e.hasMoreElements();) {
String prop = (String) e.nextElement();
fileprops.setProperty(prop, props.getProperty(prop));
}
props = fileprops;
System.out.println("YCSB Command Line client");
System.out.println("Type \"help\" for command line help");
System.out.println("Start with \"-help\" for usage info");
String table = props.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
//create a DB
String dbname = props.getProperty(Client.DB_PROPERTY, DEFAULT_DB);
ClassLoader classLoader = CommandLine.class.getClassLoader();
DB db = null;
try {
Class dbclass = classLoader.loadClass(dbname);
db = (DB) dbclass.newInstance();
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
db.setProperties(props);
try {
db.init();
} catch (DBException e) {
e.printStackTrace();
System.exit(0);
}
System.out.println("Connected.");
//main loop
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
for (;;) {
//get user input
System.out.print("> ");
String input = null;
try {
input = br.readLine();
} catch (IOException e) {
e.printStackTrace();
System.exit(1);
}
if (input.compareTo("") == 0) {
continue;
}
if (input.compareTo("help") == 0) {
help();
continue;
}
if (input.compareTo("quit") == 0) {
break;
}
String[] tokens = input.split(" ");
long st = System.currentTimeMillis();
//handle commands
if (tokens[0].compareTo("table") == 0) {
handleTable(tokens, table);
} else if (tokens[0].compareTo("read") == 0) {
handleRead(tokens, table, db);
} else if (tokens[0].compareTo("scan") == 0) {
handleScan(tokens, table, db);
} else if (tokens[0].compareTo("update") == 0) {
handleUpdate(tokens, table, db);
} else if (tokens[0].compareTo("insert") == 0) {
handleInsert(tokens, table, db);
} else if (tokens[0].compareTo("delete") == 0) {
handleDelete(tokens, table, db);
} else {
System.out.println("Error: unknown command \"" + tokens[0] + "\"");
}
System.out.println((System.currentTimeMillis() - st) + " ms");
}
}
private static void parseArguments(String[] args, Properties props, Properties fileprops) {
int argindex = 0;
while ((argindex < args.length) && (args[argindex].startsWith("-"))) {
if ((args[argindex].compareTo("-help") == 0) ||
(args[argindex].compareTo("--help") == 0) ||
(args[argindex].compareTo("-?") == 0) ||
(args[argindex].compareTo("--?") == 0)) {
usageMessage();
System.exit(0);
}
if (args[argindex].compareTo("-db") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.exit(0);
}
props.setProperty(Client.DB_PROPERTY, args[argindex]);
argindex++;
} else if (args[argindex].compareTo("-P") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.exit(0);
}
String propfile = args[argindex];
argindex++;
Properties myfileprops = new Properties();
try {
myfileprops.load(new FileInputStream(propfile));
} catch (IOException e) {
System.out.println(e.getMessage());
System.exit(0);
}
for (Enumeration e = myfileprops.propertyNames(); e.hasMoreElements();) {
String prop = (String) e.nextElement();
fileprops.setProperty(prop, myfileprops.getProperty(prop));
}
} else if (args[argindex].compareTo("-p") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.exit(0);
}
int eq = args[argindex].indexOf('=');
if (eq < 0) {
usageMessage();
System.exit(0);
}
String name = args[argindex].substring(0, eq);
String value = args[argindex].substring(eq + 1);
props.put(name, value);
argindex++;
} else if (args[argindex].compareTo("-table") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.exit(0);
}
props.put(CoreWorkload.TABLENAME_PROPERTY, args[argindex]);
argindex++;
} else {
System.out.println("Unknown option " + args[argindex]);
usageMessage();
System.exit(0);
}
if (argindex >= args.length) {
break;
}
}
if (argindex != args.length) {
usageMessage();
System.exit(0);
}
}
private static void handleDelete(String[] tokens, String table, DB db) {
if (tokens.length != 2) {
System.out.println("Error: syntax is \"delete keyname\"");
} else {
Status ret = db.delete(table, tokens[1]);
System.out.println("Return result: " + ret.getName());
}
}
private static void handleInsert(String[] tokens, String table, DB db) {
if (tokens.length < 3) {
System.out.println("Error: syntax is \"insert keyname name1=value1 [name2=value2 ...]\"");
} else {
HashMap<String, ByteIterator> values = new HashMap<>();
for (int i = 2; i < tokens.length; i++) {
String[] nv = tokens[i].split("=");
values.put(nv[0], new StringByteIterator(nv[1]));
}
Status ret = db.insert(table, tokens[1], values);
System.out.println("Result: " + ret.getName());
}
}
private static void handleUpdate(String[] tokens, String table, DB db) {
if (tokens.length < 3) {
System.out.println("Error: syntax is \"update keyname name1=value1 [name2=value2 ...]\"");
} else {
HashMap<String, ByteIterator> values = new HashMap<>();
for (int i = 2; i < tokens.length; i++) {
String[] nv = tokens[i].split("=");
values.put(nv[0], new StringByteIterator(nv[1]));
}
Status ret = db.update(table, tokens[1], values);
System.out.println("Result: " + ret.getName());
}
}
private static void handleScan(String[] tokens, String table, DB db) {
if (tokens.length < 3) {
System.out.println("Error: syntax is \"scan keyname scanlength [field1 field2 ...]\"");
} else {
Set<String> fields = null;
if (tokens.length > 3) {
fields = new HashSet<>();
fields.addAll(Arrays.asList(tokens).subList(3, tokens.length));
}
Vector<HashMap<String, ByteIterator>> results = new Vector<>();
Status ret = db.scan(table, tokens[1], Integer.parseInt(tokens[2]), fields, results);
System.out.println("Result: " + ret.getName());
int record = 0;
if (results.isEmpty()) {
System.out.println("0 records");
} else {
System.out.println("--------------------------------");
}
for (Map<String, ByteIterator> result : results) {
System.out.println("Record " + (record++));
for (Map.Entry<String, ByteIterator> ent : result.entrySet()) {
System.out.println(ent.getKey() + "=" + ent.getValue());
}
System.out.println("--------------------------------");
}
}
}
private static void handleRead(String[] tokens, String table, DB db) {
if (tokens.length == 1) {
System.out.println("Error: syntax is \"read keyname [field1 field2 ...]\"");
} else {
Set<String> fields = null;
if (tokens.length > 2) {
fields = new HashSet<>();
fields.addAll(Arrays.asList(tokens).subList(2, tokens.length));
}
HashMap<String, ByteIterator> result = new HashMap<>();
Status ret = db.read(table, tokens[1], fields, result);
System.out.println("Return code: " + ret.getName());
for (Map.Entry<String, ByteIterator> ent : result.entrySet()) {
System.out.println(ent.getKey() + "=" + ent.getValue());
}
}
}
private static void handleTable(String[] tokens, String table) {
if (tokens.length == 1) {
System.out.println("Using table \"" + table + "\"");
} else if (tokens.length == 2) {
table = tokens[1];
System.out.println("Using table \"" + table + "\"");
} else {
System.out.println("Error: syntax is \"table tablename\"");
}
}
}
| 11,031 | 30.52 | 111 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/WorkloadException.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* The workload tried to do something bad.
*/
public class WorkloadException extends Exception {
/**
*
*/
private static final long serialVersionUID = 8844396756042772132L;
public WorkloadException(String message) {
super(message);
}
public WorkloadException() {
super();
}
public WorkloadException(String message, Throwable cause) {
super(message, cause);
}
public WorkloadException(Throwable cause) {
super(cause);
}
}
| 1,186 | 24.804348 | 83 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/BasicDB.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.*;
import java.util.Map.Entry;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;
/**
* Basic DB that just prints out the requested operations, instead of doing them against a database.
*/
public class BasicDB extends DB {
public static final String COUNT = "basicdb.count";
public static final String COUNT_DEFAULT = "false";
public static final String VERBOSE = "basicdb.verbose";
public static final String VERBOSE_DEFAULT = "true";
public static final String SIMULATE_DELAY = "basicdb.simulatedelay";
public static final String SIMULATE_DELAY_DEFAULT = "0";
public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay";
public static final String RANDOMIZE_DELAY_DEFAULT = "true";
protected static final Object MUTEX = new Object();
protected static int counter = 0;
protected static Map<Integer, Integer> reads;
protected static Map<Integer, Integer> scans;
protected static Map<Integer, Integer> updates;
protected static Map<Integer, Integer> inserts;
protected static Map<Integer, Integer> deletes;
protected boolean verbose;
protected boolean randomizedelay;
protected int todelay;
protected boolean count;
public BasicDB() {
todelay = 0;
}
protected void delay() {
if (todelay > 0) {
long delayNs;
if (randomizedelay) {
delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay));
if (delayNs == 0) {
return;
}
} else {
delayNs = TimeUnit.MILLISECONDS.toNanos(todelay);
}
final long deadline = System.nanoTime() + delayNs;
do {
LockSupport.parkNanos(deadline - System.nanoTime());
} while (System.nanoTime() < deadline && !Thread.interrupted());
}
}
/**
* Initialize any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
public void init() {
verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT));
todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT));
randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT));
count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT));
if (verbose) {
synchronized (System.out) {
System.out.println("***************** properties *****************");
Properties p = getProperties();
if (p != null) {
for (Enumeration e = p.propertyNames(); e.hasMoreElements();) {
String k = (String) e.nextElement();
System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\"");
}
}
System.out.println("**********************************************");
}
}
synchronized (MUTEX) {
if (counter == 0 && count) {
reads = new HashMap<Integer, Integer>();
scans = new HashMap<Integer, Integer>();
updates = new HashMap<Integer, Integer>();
inserts = new HashMap<Integer, Integer>();
deletes = new HashMap<Integer, Integer>();
}
counter++;
}
}
protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() {
@Override
protected StringBuilder initialValue() {
return new StringBuilder();
}
};
protected static StringBuilder getStringBuilder() {
StringBuilder sb = TL_STRING_BUILDER.get();
sb.setLength(0);
return sb;
}
/**
* Read a record from the database. Each field/value pair from the result will be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error
*/
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("READ ").append(table).append(" ").append(key).append(" [a ");
if (fields != null) {
for (String f : fields) {
sb.append(f).append(" ");
}
} else {
sb.append("<all fields>");
}
sb.append("]");
System.out.println(sb);
}
if (count) {
incCounter(reads, hash(table, key, fields));
}
return Status.OK;
}
/**
* Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored
* in a HashMap.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return Zero on success, a non-zero error code on error
*/
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ ");
if (fields != null) {
for (String f : fields) {
sb.append(f).append(" ");
}
} else {
sb.append("<all fields>");
}
sb.append("]");
System.out.println(sb);
}
if (count) {
incCounter(scans, hash(table, startkey, fields));
}
return Status.OK;
}
/**
* Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key, overwriting any existing values with the same field name.
*
* @param table The name of the table
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error
*/
public Status update(String table, String key, Map<String, ByteIterator> values) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ ");
if (values != null) {
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" ");
}
}
sb.append("]");
System.out.println(sb);
}
if (count) {
incCounter(updates, hash(table, key, values));
}
return Status.OK;
}
/**
* Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error
*/
public Status insert(String table, String key, Map<String, ByteIterator> values) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("INSERT ").append(table).append(" ").append(key).append(" [ ");
if (values != null) {
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" ");
}
}
sb.append("]");
System.out.println(sb);
}
if (count) {
incCounter(inserts, hash(table, key, values));
}
return Status.OK;
}
/**
* Delete a record from the database.
*
* @param table The name of the table
* @param key The record key of the record to delete.
* @return Zero on success, a non-zero error code on error
*/
public Status delete(String table, String key) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("DELETE ").append(table).append(" ").append(key);
System.out.println(sb);
}
if (count) {
incCounter(deletes, (table + key).hashCode());
}
return Status.OK;
}
@Override
public void cleanup() {
synchronized (MUTEX) {
int countDown = --counter;
if (count && countDown < 1) {
// TODO - would be nice to call something like:
// Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size());
System.out.println("[READS], Uniques, " + reads.size());
System.out.println("[SCANS], Uniques, " + scans.size());
System.out.println("[UPDATES], Uniques, " + updates.size());
System.out.println("[INSERTS], Uniques, " + inserts.size());
System.out.println("[DELETES], Uniques, " + deletes.size());
}
}
}
/**
* Increments the count on the hash in the map.
* @param map A non-null map to sync and use for incrementing.
* @param hash A hash code to increment.
*/
protected void incCounter(final Map<Integer, Integer> map, final int hash) {
synchronized (map) {
Integer ctr = map.get(hash);
if (ctr == null) {
map.put(hash, 1);
} else {
map.put(hash, ctr + 1);
}
}
}
/**
* Hashes the table, key and fields, sorting the fields first for a consistent
* hash.
* Note that this is expensive as we generate a copy of the fields and a string
* buffer to hash on. Hashing on the objects is problematic.
* @param table The user table.
* @param key The key read or scanned.
* @param fields The fields read or scanned.
* @return The hash code.
*/
protected int hash(final String table, final String key, final Set<String> fields) {
if (fields == null) {
return (table + key).hashCode();
}
StringBuilder buf = getStringBuilder().append(table).append(key);
List<String> sorted = new ArrayList<String>(fields);
Collections.sort(sorted);
for (final String field : sorted) {
buf.append(field);
}
return buf.toString().hashCode();
}
/**
* Hashes the table, key and fields, sorting the fields first for a consistent
* hash.
* Note that this is expensive as we generate a copy of the fields and a string
* buffer to hash on. Hashing on the objects is problematic.
* @param table The user table.
* @param key The key read or scanned.
* @param values The values to hash on.
* @return The hash code.
*/
protected int hash(final String table, final String key, final Map<String, ByteIterator> values) {
if (values == null) {
return (table + key).hashCode();
}
final TreeMap<String, ByteIterator> sorted =
new TreeMap<String, ByteIterator>(values);
StringBuilder buf = getStringBuilder().append(table).append(key);
for (final Entry<String, ByteIterator> entry : sorted.entrySet()) {
entry.getValue().reset();
buf.append(entry.getKey())
.append(entry.getValue().toString());
}
return buf.toString().hashCode();
}
/**
* Short test of BasicDB
*/
/*
public static void main(String[] args) {
BasicDB bdb = new BasicDB();
Properties p = new Properties();
p.setProperty("Sky", "Blue");
p.setProperty("Ocean", "Wet");
bdb.setProperties(p);
bdb.init();
HashMap<String, String> fields = new HashMap<String, ByteIterator>();
fields.put("A", new StringByteIterator("X"));
fields.put("B", new StringByteIterator("Y"));
bdb.read("table", "key", null, null);
bdb.insert("table", "key", fields);
fields = new HashMap<String, ByteIterator>();
fields.put("C", new StringByteIterator("Z"));
bdb.update("table", "key", fields);
bdb.delete("table", "key");
}
*/
}
| 12,884 | 31.455919 | 116 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/InputStreamByteIterator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.io.IOException;
import java.io.InputStream;
/**
* A ByteIterator that iterates through an inputstream of bytes.
*/
public class InputStreamByteIterator extends ByteIterator {
private final long len;
private final InputStream ins;
private long off;
private final boolean resetable;
public InputStreamByteIterator(InputStream ins, long len) {
this.len = len;
this.ins = ins;
off = 0;
resetable = ins.markSupported();
if (resetable) {
ins.mark((int) len);
}
}
@Override
public boolean hasNext() {
return off < len;
}
@Override
public byte nextByte() {
int ret;
try {
ret = ins.read();
} catch (Exception e) {
throw new IllegalStateException(e);
}
if (ret == -1) {
throw new IllegalStateException("Past EOF!");
}
off++;
return (byte) ret;
}
@Override
public long bytesLeft() {
return len - off;
}
@Override
public byte[] toArray() {
int size = (int) bytesLeft();
byte[] bytes = new byte[size];
try {
if (ins.read(bytes) < size) {
throw new IllegalStateException("Past EOF!");
}
} catch (IOException e) {
throw new IllegalStateException(e);
}
off = len;
return bytes;
}
@Override
public void reset() {
if (resetable) {
try {
ins.reset();
ins.mark((int) len);
off = 0;
} catch (IOException e) {
throw new IllegalStateException("Failed to reset the input stream", e);
}
} else {
throw new UnsupportedOperationException();
}
}
}
| 2,312 | 22.845361 | 83 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/DB.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
/**
* A layer for accessing a database to be benchmarked. Each thread in the client
* will be given its own instance of whatever DB class is to be used in the test.
* This class should be constructed using a no-argument constructor, so we can
* load it dynamically. Any argument-based initialization should be
* done by init().
*
* Note that YCSB does not make any use of the return codes returned by this class.
* Instead, it keeps a count of the return values and presents them to the user.
*
* The semantics of methods such as insert, update and delete vary from database
* to database. In particular, operations may or may not be durable once these
* methods commit, and some systems may return 'success' regardless of whether
* or not a tuple with a matching key existed before the call. Rather than dictate
* the exact semantics of these methods, we recommend you either implement them
* to match the database's default semantics, or the semantics of your
* target application. For the sake of comparison between experiments we also
* recommend you explain the semantics you chose when presenting performance results.
*/
public abstract class DB {
/**
* Properties for configuring this DB.
*/
private Properties properties = new Properties();
/**
* Set the properties for this DB.
*/
public void setProperties(Properties p) {
properties = p;
}
/**
* Get the set of properties for this DB.
*/
public Properties getProperties() {
return properties;
}
/**
* Initialize any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
public void init() throws DBException {
}
/**
* Cleanup any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
public void cleanup() throws DBException {
}
/**
* Read a record from the database. Each field/value pair from the result will be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return The result of the operation.
*/
public abstract Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result);
/**
* Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored
* in a HashMap.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return The result of the operation.
*/
public abstract Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result);
/**
* Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key, overwriting any existing values with the same field name.
*
* @param table The name of the table
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return The result of the operation.
*/
public abstract Status update(String table, String key, Map<String, ByteIterator> values);
/**
* Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return The result of the operation.
*/
public abstract Status insert(String table, String key, Map<String, ByteIterator> values);
/**
* Delete a record from the database.
*
* @param table The name of the table
* @param key The record key of the record to delete.
* @return The result of the operation.
*/
public abstract Status delete(String table, String key);
}
| 5,202 | 37.257353 | 116 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/TimeseriesDB.java | /*
* Copyright (c) 2018 YCSB Contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import site.ycsb.generator.Generator;
import site.ycsb.generator.IncrementingPrintableStringGenerator;
import site.ycsb.workloads.TimeSeriesWorkload;
import java.util.*;
import java.util.concurrent.TimeUnit;
/**
* Abstract class to adapt the default ycsb DB interface to Timeseries databases.
* This class is mostly here to be extended by Timeseries dataabases
* originally developed by Andreas Bader in <a href="https://github.com/TSDBBench/YCSB-TS">YCSB-TS</a>.
* <p>
* This class is mostly parsing the workload information passed through the default ycsb interface
* according to the information outlined in {@link TimeSeriesWorkload}.
* It also contains some minor utility methods relevant to Timeseries databases.
* </p>
*
* @implSpec It's vital to call <tt>super.init()</tt> when overwriting the init method
* to correctly initialize the workload-parsing.
*/
public abstract class TimeseriesDB extends DB {
// defaults for downsampling. Basically we ignore it
private static final String DOWNSAMPLING_FUNCTION_PROPERTY_DEFAULT = "NONE";
private static final String DOWNSAMPLING_INTERVAL_PROPERTY_DEFAULT = "0";
// debug property loading
private static final String DEBUG_PROPERTY = "debug";
private static final String DEBUG_PROPERTY_DEFAULT = "false";
// test property loading
private static final String TEST_PROPERTY = "test";
private static final String TEST_PROPERTY_DEFAULT = "false";
// Workload parameters that we need to parse this
protected String timestampKey;
protected String valueKey;
protected String tagPairDelimiter;
protected String queryTimeSpanDelimiter;
protected String deleteDelimiter;
protected TimeUnit timestampUnit;
protected String groupByKey;
protected String downsamplingKey;
protected Integer downsamplingInterval;
protected AggregationOperation downsamplingFunction;
// YCSB-parameters
protected boolean debug;
protected boolean test;
/**
* Initialize any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
@Override
public void init() throws DBException {
// taken from BasicTSDB
timestampKey = getProperties().getProperty(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY,
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT);
valueKey = getProperties().getProperty(
TimeSeriesWorkload.VALUE_KEY_PROPERTY,
TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT);
tagPairDelimiter = getProperties().getProperty(
TimeSeriesWorkload.PAIR_DELIMITER_PROPERTY,
TimeSeriesWorkload.PAIR_DELIMITER_PROPERTY_DEFAULT);
queryTimeSpanDelimiter = getProperties().getProperty(
TimeSeriesWorkload.QUERY_TIMESPAN_DELIMITER_PROPERTY,
TimeSeriesWorkload.QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT);
deleteDelimiter = getProperties().getProperty(
TimeSeriesWorkload.DELETE_DELIMITER_PROPERTY,
TimeSeriesWorkload.DELETE_DELIMITER_PROPERTY_DEFAULT);
timestampUnit = TimeUnit.valueOf(getProperties().getProperty(
TimeSeriesWorkload.TIMESTAMP_UNITS_PROPERTY,
TimeSeriesWorkload.TIMESTAMP_UNITS_PROPERTY_DEFAULT));
groupByKey = getProperties().getProperty(
TimeSeriesWorkload.GROUPBY_KEY_PROPERTY,
TimeSeriesWorkload.GROUPBY_KEY_PROPERTY_DEFAULT);
downsamplingKey = getProperties().getProperty(
TimeSeriesWorkload.DOWNSAMPLING_KEY_PROPERTY,
TimeSeriesWorkload.DOWNSAMPLING_KEY_PROPERTY_DEFAULT);
downsamplingFunction = TimeseriesDB.AggregationOperation.valueOf(getProperties()
.getProperty(TimeSeriesWorkload.DOWNSAMPLING_FUNCTION_PROPERTY, DOWNSAMPLING_FUNCTION_PROPERTY_DEFAULT));
downsamplingInterval = Integer.valueOf(getProperties()
.getProperty(TimeSeriesWorkload.DOWNSAMPLING_INTERVAL_PROPERTY, DOWNSAMPLING_INTERVAL_PROPERTY_DEFAULT));
test = Boolean.parseBoolean(getProperties().getProperty(TEST_PROPERTY, TEST_PROPERTY_DEFAULT));
debug = Boolean.parseBoolean(getProperties().getProperty(DEBUG_PROPERTY, DEBUG_PROPERTY_DEFAULT));
}
@Override
public final Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
Map<String, List<String>> tagQueries = new HashMap<>();
Long timestamp = null;
for (String field : fields) {
if (field.startsWith(timestampKey)) {
String[] timestampParts = field.split(tagPairDelimiter);
if (timestampParts[1].contains(queryTimeSpanDelimiter)) {
// Since we're looking for a single datapoint, a range of timestamps makes no sense.
// As we cannot throw an exception to bail out here, we return `BAD_REQUEST` instead.
return Status.BAD_REQUEST;
}
timestamp = Long.valueOf(timestampParts[1]);
} else {
String[] queryParts = field.split(tagPairDelimiter);
tagQueries.computeIfAbsent(queryParts[0], k -> new ArrayList<>()).add(queryParts[1]);
}
}
if (timestamp == null) {
return Status.BAD_REQUEST;
}
return read(table, timestamp, tagQueries);
}
/**
* Read a record from the database. Each value from the result will be stored in a HashMap
*
* @param metric The name of the metric
* @param timestamp The timestamp of the record to read.
* @param tags actual tags that were want to receive (can be empty)
* @return Zero on success, a non-zero error code on error or "not found".
*/
protected abstract Status read(String metric, long timestamp, Map<String, List<String>> tags);
/**
* @inheritDoc
* @implNote this method parses the information passed to it and subsequently passes it to the modified
* interface at {@link #scan(String, long, long, Map, AggregationOperation, int, TimeUnit)}
*/
@Override
public final Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
Map<String, List<String>> tagQueries = new HashMap<>();
TimeseriesDB.AggregationOperation aggregationOperation = TimeseriesDB.AggregationOperation.NONE;
Set<String> groupByFields = new HashSet<>();
boolean rangeSet = false;
long start = 0;
long end = 0;
for (String field : fields) {
if (field.startsWith(timestampKey)) {
String[] timestampParts = field.split(tagPairDelimiter);
if (!timestampParts[1].contains(queryTimeSpanDelimiter)) {
// seems like this should be a more elaborate query.
// for now we don't support scanning single timestamps
// TODO: Support Timestamp range queries
return Status.NOT_IMPLEMENTED;
}
String[] rangeParts = timestampParts[1].split(queryTimeSpanDelimiter);
rangeSet = true;
start = Long.valueOf(rangeParts[0]);
end = Long.valueOf(rangeParts[1]);
} else if (field.startsWith(groupByKey)) {
String groupBySpecifier = field.split(tagPairDelimiter)[1];
aggregationOperation = TimeseriesDB.AggregationOperation.valueOf(groupBySpecifier);
} else if (field.startsWith(downsamplingKey)) {
String downsamplingSpec = field.split(tagPairDelimiter)[1];
// apparently that needs to always hold true:
if (!downsamplingSpec.equals(downsamplingFunction.toString() + downsamplingInterval.toString())) {
System.err.print("Downsampling specification for Scan did not match configured downsampling");
return Status.BAD_REQUEST;
}
} else {
String[] queryParts = field.split(tagPairDelimiter);
if (queryParts.length == 1) {
// we should probably warn about this being ignored...
System.err.println("Grouping by arbitrary series is currently not supported");
groupByFields.add(field);
} else {
tagQueries.computeIfAbsent(queryParts[0], k -> new ArrayList<>()).add(queryParts[1]);
}
}
}
if (!rangeSet) {
return Status.BAD_REQUEST;
}
return scan(table, start, end, tagQueries, downsamplingFunction, downsamplingInterval, timestampUnit);
}
/**
* Perform a range scan for a set of records in the database. Each value from the result will be stored in a
* HashMap.
*
* @param metric The name of the metric
* @param startTs The timestamp of the first record to read.
* @param endTs The timestamp of the last record to read.
* @param tags actual tags that were want to receive (can be empty).
* @param aggreg The aggregation operation to perform.
* @param timeValue value for timeUnit for aggregation
* @param timeUnit timeUnit for aggregation
* @return A {@link Status} detailing the outcome of the scan operation.
*/
protected abstract Status scan(String metric, long startTs, long endTs, Map<String, List<String>> tags,
AggregationOperation aggreg, int timeValue, TimeUnit timeUnit);
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
return Status.NOT_IMPLEMENTED;
// not supportable for general TSDBs
// can be explicitly overwritten in inheriting classes
}
@Override
public final Status insert(String table, String key, Map<String, ByteIterator> values) {
NumericByteIterator tsContainer = (NumericByteIterator) values.remove(timestampKey);
NumericByteIterator valueContainer = (NumericByteIterator) values.remove(valueKey);
if (valueContainer.isFloatingPoint()) {
return insert(table, tsContainer.getLong(), valueContainer.getDouble(), values);
} else {
return insert(table, tsContainer.getLong(), valueContainer.getLong(), values);
}
}
/**
* Insert a record into the database. Any tags/tagvalue pairs in the specified tagmap and the given value will be
* written into the record with the specified timestamp.
*
* @param metric The name of the metric
* @param timestamp The timestamp of the record to insert.
* @param value The actual value to insert.
* @param tags A Map of tag/tagvalue pairs to insert as tags
* @return A {@link Status} detailing the outcome of the insert
*/
protected abstract Status insert(String metric, long timestamp, long value, Map<String, ByteIterator> tags);
/**
* Insert a record in the database. Any tags/tagvalue pairs in the specified tagmap and the given value will be
* written into the record with the specified timestamp.
*
* @param metric The name of the metric
* @param timestamp The timestamp of the record to insert.
* @param value actual value to insert
* @param tags A HashMap of tag/tagvalue pairs to insert as tags
* @return A {@link Status} detailing the outcome of the insert
*/
protected abstract Status insert(String metric, long timestamp, double value, Map<String, ByteIterator> tags);
/**
* NOTE: This operation is usually <b>not</b> supported for Time-Series databases.
* Deletion of data is often instead regulated through automatic cleanup and "retention policies" or similar.
*
* @return Status.NOT_IMPLEMENTED or a {@link Status} specifying the outcome of deletion
* in case the operation is supported.
*/
public Status delete(String table, String key) {
return Status.NOT_IMPLEMENTED;
}
/**
* Examines the given {@link Properties} and returns an array containing the Tag Keys
* (basically matching column names for traditional Relational DBs) that are detailed in the workload specification.
* See {@link TimeSeriesWorkload} for how these are generated.
* <p>
* This method is intended to be called during the initialization phase to create a table schema
* for DBMS that require such a schema before values can be inserted (or queried)
*
* @param properties The properties detailing the workload configuration.
* @return An array of strings specifying all allowed TagKeys (or column names)
* except for the "value" and the "timestamp" column name.
* @implSpec WARNING this method must exactly match how tagKeys are generated by the {@link TimeSeriesWorkload},
* otherwise databases requiring this information will most likely break!
*/
protected static String[] getPossibleTagKeys(Properties properties) {
final int tagCount = Integer.parseInt(properties.getProperty(TimeSeriesWorkload.TAG_COUNT_PROPERTY,
TimeSeriesWorkload.TAG_COUNT_PROPERTY_DEFAULT));
final int tagKeylength = Integer.parseInt(properties.getProperty(TimeSeriesWorkload.TAG_KEY_LENGTH_PROPERTY,
TimeSeriesWorkload.TAG_KEY_LENGTH_PROPERTY_DEFAULT));
Generator<String> tagKeyGenerator = new IncrementingPrintableStringGenerator(tagKeylength);
String[] tagNames = new String[tagCount];
for (int i = 0; i < tagCount; i++) {
tagNames[i] = tagKeyGenerator.nextValue();
}
return tagNames;
}
/**
* An enum containing the possible aggregation operations.
* Not all of these operations are required to be supported by implementing classes.
* <p>
* Aggregations are applied when using the <tt>SCAN</tt> operation on a range of timestamps.
* That way the result set is reduced from multiple records into
* a single one or one record for each group specified through <tt>GROUP BY</tt> clauses.
*/
public enum AggregationOperation {
/**
* No aggregation whatsoever. Return the results as a full table
*/
NONE,
/**
* Sum the values of the matching records when calculating the value.
* GroupBy criteria apply where relevant for sub-summing.
*/
SUM,
/**
* Calculate the arithmetic mean over the value across matching records when calculating the value.
* GroupBy criteria apply where relevant for group-targeted averages
*/
AVERAGE,
/**
* Count the number of matching records and return that as value.
* GroupBy criteria apply where relevant.
*/
COUNT,
/**
* Return only the maximum of the matching record values.
* GroupBy criteria apply and result in group-based maxima.
*/
MAX,
/**
* Return only the minimum of the matching record values.
* GroupBy criteria apply and result in group-based minima.
*/
MIN;
}
}
| 14,962 | 43.400593 | 118 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/DBException.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* Something bad happened while interacting with the database.
*/
public class DBException extends Exception {
/**
*
*/
private static final long serialVersionUID = 6646883591588721475L;
public DBException(String message) {
super(message);
}
public DBException() {
super();
}
public DBException(String message, Throwable cause) {
super(message, cause);
}
public DBException(Throwable cause) {
super(cause);
}
}
| 1,176 | 24.586957 | 83 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/StringByteIterator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.HashMap;
import java.util.Map;
/**
* A ByteIterator that iterates through a string.
*/
public class StringByteIterator extends ByteIterator {
private String str;
private int off;
/**
* Put all of the entries of one map into the other, converting
* String values into ByteIterators.
*/
public static void putAllAsByteIterators(Map<String, ByteIterator> out, Map<String, String> in) {
for (Map.Entry<String, String> entry : in.entrySet()) {
out.put(entry.getKey(), new StringByteIterator(entry.getValue()));
}
}
/**
* Put all of the entries of one map into the other, converting
* ByteIterator values into Strings.
*/
public static void putAllAsStrings(Map<String, String> out, Map<String, ByteIterator> in) {
for (Map.Entry<String, ByteIterator> entry : in.entrySet()) {
out.put(entry.getKey(), entry.getValue().toString());
}
}
/**
* Create a copy of a map, converting the values from Strings to
* StringByteIterators.
*/
public static Map<String, ByteIterator> getByteIteratorMap(Map<String, String> m) {
HashMap<String, ByteIterator> ret =
new HashMap<String, ByteIterator>();
for (Map.Entry<String, String> entry : m.entrySet()) {
ret.put(entry.getKey(), new StringByteIterator(entry.getValue()));
}
return ret;
}
/**
* Create a copy of a map, converting the values from
* StringByteIterators to Strings.
*/
public static Map<String, String> getStringMap(Map<String, ByteIterator> m) {
HashMap<String, String> ret = new HashMap<String, String>();
for (Map.Entry<String, ByteIterator> entry : m.entrySet()) {
ret.put(entry.getKey(), entry.getValue().toString());
}
return ret;
}
public StringByteIterator(String s) {
this.str = s;
this.off = 0;
}
@Override
public boolean hasNext() {
return off < str.length();
}
@Override
public byte nextByte() {
byte ret = (byte) str.charAt(off);
off++;
return ret;
}
@Override
public long bytesLeft() {
return str.length() - off;
}
@Override
public void reset() {
off = 0;
}
@Override
public byte[] toArray() {
byte[] bytes = new byte[(int) bytesLeft()];
for (int i = 0; i < bytes.length; i++) {
bytes[i] = (byte) str.charAt(off + i);
}
off = str.length();
return bytes;
}
/**
* Specialization of general purpose toString() to avoid unnecessary
* copies.
* <p>
* Creating a new StringByteIterator, then calling toString()
* yields the original String object, and does not perform any copies
* or String conversion operations.
* </p>
*/
@Override
public String toString() {
if (off > 0) {
return super.toString();
} else {
return str;
}
}
}
| 3,517 | 25.651515 | 99 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/OneMeasurementHistogram.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.IOException;
import java.text.DecimalFormat;
import java.util.Properties;
/**
* Take measurements and maintain a histogram of a given metric, such as READ LATENCY.
*
*/
public class OneMeasurementHistogram extends OneMeasurement {
public static final String BUCKETS = "histogram.buckets";
public static final String BUCKETS_DEFAULT = "1000";
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Specify the range of latencies to track in the histogram.
*/
private final int buckets;
/**
* Groups operations in discrete blocks of 1ms width.
*/
private long[] histogram;
/**
* Counts all operations outside the histogram's range.
*/
private long histogramoverflow;
/**
* The total number of reported operations.
*/
private long operations;
/**
* The sum of each latency measurement over all operations.
* Calculated in ms.
*/
private long totallatency;
/**
* The sum of each latency measurement squared over all operations.
* Used to calculate variance of latency.
* Calculated in ms.
*/
private double totalsquaredlatency;
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
//keep a windowed version of these stats for printing status
private long windowoperations;
private long windowtotallatency;
private int min;
private int max;
public OneMeasurementHistogram(String name, Properties props) {
super(name);
buckets = Integer.parseInt(props.getProperty(BUCKETS, BUCKETS_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
histogram = new long[buckets];
histogramoverflow = 0;
operations = 0;
totallatency = 0;
totalsquaredlatency = 0;
windowoperations = 0;
windowtotallatency = 0;
min = -1;
max = -1;
}
/* (non-Javadoc)
* @see site.ycsb.OneMeasurement#measure(int)
*/
public synchronized void measure(int latency) {
//latency reported in us and collected in bucket by ms.
if (latency / 1000 >= buckets) {
histogramoverflow++;
} else {
histogram[latency / 1000]++;
}
operations++;
totallatency += latency;
totalsquaredlatency += ((double) latency) * ((double) latency);
windowoperations++;
windowtotallatency += latency;
if ((min < 0) || (latency < min)) {
min = latency;
}
if ((max < 0) || (latency > max)) {
max = latency;
}
}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
double mean = totallatency / ((double) operations);
double variance = totalsquaredlatency / ((double) operations) - (mean * mean);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", mean);
exporter.write(getName(), "LatencyVariance(us)", variance);
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
long opcounter=0;
boolean done95th = false;
for (int i = 0; i < buckets; i++) {
opcounter += histogram[i];
if ((!done95th) && (((double) opcounter) / ((double) operations) >= 0.95)) {
exporter.write(getName(), "95thPercentileLatency(us)", i * 1000);
done95th = true;
}
if (((double) opcounter) / ((double) operations) >= 0.99) {
exporter.write(getName(), "99thPercentileLatency(us)", i * 1000);
break;
}
}
exportStatusCounts(exporter);
if (verbose) {
for (int i = 0; i < buckets; i++) {
exporter.write(getName(), Integer.toString(i), histogram[i]);
}
exporter.write(getName(), ">" + buckets, histogramoverflow);
}
}
@Override
public String getSummary() {
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
}
}
| 4,938 | 28.57485 | 90 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/package-info.java | /*
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB measurements package.
*/
package site.ycsb.measurements;
| 733 | 30.913043 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/OneMeasurementTimeSeries.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.IOException;
import java.text.DecimalFormat;
import java.util.Properties;
import java.util.Vector;
class SeriesUnit {
/**
* @param time
* @param average
*/
public SeriesUnit(long time, double average) {
this.time = time;
this.average = average;
}
protected final long time;
protected final double average;
}
/**
* A time series measurement of a metric, such as READ LATENCY.
*/
public class OneMeasurementTimeSeries extends OneMeasurement {
/**
* Granularity for time series; measurements will be averaged in chunks of this granularity. Units are milliseconds.
*/
public static final String GRANULARITY = "timeseries.granularity";
public static final String GRANULARITY_DEFAULT = "1000";
private final int granularity;
private final Vector<SeriesUnit> measurements;
private long start = -1;
private long currentunit = -1;
private long count = 0;
private long sum = 0;
private long operations = 0;
private long totallatency = 0;
//keep a windowed version of these stats for printing status
private int windowoperations = 0;
private long windowtotallatency = 0;
private int min = -1;
private int max = -1;
public OneMeasurementTimeSeries(String name, Properties props) {
super(name);
granularity = Integer.parseInt(props.getProperty(GRANULARITY, GRANULARITY_DEFAULT));
measurements = new Vector<>();
}
private synchronized void checkEndOfUnit(boolean forceend) {
long now = System.currentTimeMillis();
if (start < 0) {
currentunit = 0;
start = now;
}
long unit = ((now - start) / granularity) * granularity;
if ((unit > currentunit) || (forceend)) {
double avg = ((double) sum) / ((double) count);
measurements.add(new SeriesUnit(currentunit, avg));
currentunit = unit;
count = 0;
sum = 0;
}
}
@Override
public void measure(int latency) {
checkEndOfUnit(false);
count++;
sum += latency;
totallatency += latency;
operations++;
windowoperations++;
windowtotallatency += latency;
if (latency > max) {
max = latency;
}
if ((latency < min) || (min < 0)) {
min = latency;
}
}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
checkEndOfUnit(true);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", (((double) totallatency) / ((double) operations)));
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
// TODO: 95th and 99th percentile latency
exportStatusCounts(exporter);
for (SeriesUnit unit : measurements) {
exporter.write(getName(), Long.toString(unit.time), unit.average);
}
}
@Override
public String getSummary() {
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
}
}
| 3,977 | 26.061224 | 118 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/TwoInOneMeasurement.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.Status;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.IOException;
/**
* delegates to 2 measurement instances.
*/
public class TwoInOneMeasurement extends OneMeasurement {
private final OneMeasurement thing1, thing2;
public TwoInOneMeasurement(String name, OneMeasurement thing1, OneMeasurement thing2) {
super(name);
this.thing1 = thing1;
this.thing2 = thing2;
}
/**
* No need for synchronization, using CHM to deal with that.
*/
@Override
public void reportStatus(final Status status) {
thing1.reportStatus(status);
}
/**
* It appears latency is reported in micros.
* Using {@link org.HdrHistogram.Recorder} to support concurrent updates to histogram.
*/
@Override
public void measure(int latencyInMicros) {
thing1.measure(latencyInMicros);
thing2.measure(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
thing1.exportMeasurements(exporter);
thing2.exportMeasurements(exporter);
}
/**
* This is called periodically from the StatusThread. There's a single StatusThread per Client process.
* We optionally serialize the interval to log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
return thing1.getSummary() + "\n" + thing2.getSummary();
}
}
| 2,246 | 28.181818 | 105 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/Measurements.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2020 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.Status;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
/**
* Collects latency measurements, and reports them when requested.
*/
public class Measurements {
/**
* All supported measurement types are defined in this enum.
*/
public enum MeasurementType {
HISTOGRAM,
HDRHISTOGRAM,
HDRHISTOGRAM_AND_HISTOGRAM,
HDRHISTOGRAM_AND_RAW,
TIMESERIES,
RAW
}
public static final String MEASUREMENT_TYPE_PROPERTY = "measurementtype";
private static final String MEASUREMENT_TYPE_PROPERTY_DEFAULT = "hdrhistogram";
public static final String MEASUREMENT_INTERVAL = "measurement.interval";
private static final String MEASUREMENT_INTERVAL_DEFAULT = "op";
public static final String MEASUREMENT_TRACK_JVM_PROPERTY = "measurement.trackjvm";
public static final String MEASUREMENT_TRACK_JVM_PROPERTY_DEFAULT = "false";
private static Measurements singleton = null;
private static Properties measurementproperties = null;
public static void setProperties(Properties props) {
measurementproperties = props;
}
/**
* Return the singleton Measurements object.
*/
public static synchronized Measurements getMeasurements() {
if (singleton == null) {
singleton = new Measurements(measurementproperties);
}
return singleton;
}
private final ConcurrentHashMap<String, OneMeasurement> opToMesurementMap;
private final ConcurrentHashMap<String, OneMeasurement> opToIntendedMesurementMap;
private final MeasurementType measurementType;
private final int measurementInterval;
private final Properties props;
/**
* Create a new object with the specified properties.
*/
public Measurements(Properties props) {
opToMesurementMap = new ConcurrentHashMap<>();
opToIntendedMesurementMap = new ConcurrentHashMap<>();
this.props = props;
String mTypeString = this.props.getProperty(MEASUREMENT_TYPE_PROPERTY, MEASUREMENT_TYPE_PROPERTY_DEFAULT);
switch (mTypeString) {
case "histogram":
measurementType = MeasurementType.HISTOGRAM;
break;
case "hdrhistogram":
measurementType = MeasurementType.HDRHISTOGRAM;
break;
case "hdrhistogram+histogram":
measurementType = MeasurementType.HDRHISTOGRAM_AND_HISTOGRAM;
break;
case "hdrhistogram+raw":
measurementType = MeasurementType.HDRHISTOGRAM_AND_RAW;
break;
case "timeseries":
measurementType = MeasurementType.TIMESERIES;
break;
case "raw":
measurementType = MeasurementType.RAW;
break;
default:
throw new IllegalArgumentException("unknown " + MEASUREMENT_TYPE_PROPERTY + "=" + mTypeString);
}
String mIntervalString = this.props.getProperty(MEASUREMENT_INTERVAL, MEASUREMENT_INTERVAL_DEFAULT);
switch (mIntervalString) {
case "op":
measurementInterval = 0;
break;
case "intended":
measurementInterval = 1;
break;
case "both":
measurementInterval = 2;
break;
default:
throw new IllegalArgumentException("unknown " + MEASUREMENT_INTERVAL + "=" + mIntervalString);
}
}
private OneMeasurement constructOneMeasurement(String name) {
switch (measurementType) {
case HISTOGRAM:
return new OneMeasurementHistogram(name, props);
case HDRHISTOGRAM:
return new OneMeasurementHdrHistogram(name, props);
case HDRHISTOGRAM_AND_HISTOGRAM:
return new TwoInOneMeasurement(name,
new OneMeasurementHdrHistogram("Hdr" + name, props),
new OneMeasurementHistogram("Bucket" + name, props));
case HDRHISTOGRAM_AND_RAW:
return new TwoInOneMeasurement(name,
new OneMeasurementHdrHistogram("Hdr" + name, props),
new OneMeasurementRaw("Raw" + name, props));
case TIMESERIES:
return new OneMeasurementTimeSeries(name, props);
case RAW:
return new OneMeasurementRaw(name, props);
default:
throw new AssertionError("Impossible to be here. Dead code reached. Bugs?");
}
}
static class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
private OneMeasurement getOpMeasurement(String operation) {
OneMeasurement m = opToMesurementMap.get(operation);
if (m == null) {
m = constructOneMeasurement(operation);
OneMeasurement oldM = opToMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
private OneMeasurement getOpIntendedMeasurement(String operation) {
OneMeasurement m = opToIntendedMesurementMap.get(operation);
if (m == null) {
final String name = measurementInterval == 1 ? operation : "Intended-" + operation;
m = constructOneMeasurement(name);
OneMeasurement oldM = opToIntendedMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
/**
* Report a return code for a single DB operation.
*/
public void reportStatus(final String operation, final Status status) {
OneMeasurement m = measurementInterval == 1 ?
getOpIntendedMeasurement(operation) :
getOpMeasurement(operation);
m.reportStatus(status);
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
for (OneMeasurement measurement : opToMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
for (OneMeasurement measurement : opToIntendedMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
}
/**
* Return a one line summary of the measurements.
*/
public synchronized String getSummary() {
String ret = "";
for (OneMeasurement m : opToMesurementMap.values()) {
ret += m.getSummary() + " ";
}
for (OneMeasurement m : opToIntendedMesurementMap.values()) {
ret += m.getSummary() + " ";
}
return ret;
}
}
| 8,917 | 30.624113 | 116 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/OneMeasurementRaw.java | /**
* Copyright (c) 2015-2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.Properties;
/**
* Record a series of measurements as raw data points without down sampling,
* optionally write to an output file when configured.
*
*/
public class OneMeasurementRaw extends OneMeasurement {
/**
* One raw data point, two fields: timestamp (ms) when the datapoint is
* inserted, and the value.
*/
class RawDataPoint {
private final long timestamp;
private final int value;
public RawDataPoint(int value) {
this.timestamp = System.currentTimeMillis();
this.value = value;
}
public long timeStamp() {
return timestamp;
}
public int value() {
return value;
}
}
class RawDataPointComparator implements Comparator<RawDataPoint> {
@Override
public int compare(RawDataPoint p1, RawDataPoint p2) {
if (p1.value() < p2.value()) {
return -1;
} else if (p1.value() == p2.value()) {
return 0;
} else {
return 1;
}
}
}
/**
* Optionally, user can configure an output file to save the raw data points.
* Default is none, raw results will be written to stdout.
*
*/
public static final String OUTPUT_FILE_PATH = "measurement.raw.output_file";
public static final String OUTPUT_FILE_PATH_DEFAULT = "";
/**
* Optionally, user can request to not output summary stats. This is useful
* if the user chains the raw measurement type behind the HdrHistogram type
* which already outputs summary stats. But even in that case, the user may
* still want this class to compute summary stats for them, especially if
* they want accurate computation of percentiles (because percentils computed
* by histogram classes are still approximations).
*/
public static final String NO_SUMMARY_STATS = "measurement.raw.no_summary";
public static final String NO_SUMMARY_STATS_DEFAULT = "false";
private final PrintStream outputStream;
private boolean noSummaryStats = false;
private LinkedList<RawDataPoint> measurements;
private long totalLatency = 0;
// A window of stats to print summary for at the next getSummary() call.
// It's supposed to be a one line summary, so we will just print count and
// average.
private int windowOperations = 0;
private long windowTotalLatency = 0;
public OneMeasurementRaw(String name, Properties props) {
super(name);
String outputFilePath = props.getProperty(OUTPUT_FILE_PATH, OUTPUT_FILE_PATH_DEFAULT);
if (!outputFilePath.isEmpty()) {
System.out.println("Raw data measurement: will output to result file: " +
outputFilePath);
try {
outputStream = new PrintStream(
new FileOutputStream(outputFilePath, true),
true);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open raw data output file", e);
}
} else {
System.out.println("Raw data measurement: will output to stdout.");
outputStream = System.out;
}
noSummaryStats = Boolean.parseBoolean(props.getProperty(NO_SUMMARY_STATS,
NO_SUMMARY_STATS_DEFAULT));
measurements = new LinkedList<>();
}
@Override
public synchronized void measure(int latency) {
totalLatency += latency;
windowTotalLatency += latency;
windowOperations++;
measurements.add(new RawDataPoint(latency));
}
@Override
public void exportMeasurements(MeasurementsExporter exporter)
throws IOException {
// Output raw data points first then print out a summary of percentiles to
// stdout.
outputStream.println(getName() +
" latency raw data: op, timestamp(ms), latency(us)");
for (RawDataPoint point : measurements) {
outputStream.println(
String.format("%s,%d,%d", getName(), point.timeStamp(),
point.value()));
}
if (outputStream != System.out) {
outputStream.close();
}
int totalOps = measurements.size();
exporter.write(getName(), "Total Operations", totalOps);
if (totalOps > 0 && !noSummaryStats) {
exporter.write(getName(),
"Below is a summary of latency in microseconds:", -1);
exporter.write(getName(), "Average",
(double) totalLatency / (double) totalOps);
Collections.sort(measurements, new RawDataPointComparator());
exporter.write(getName(), "Min", measurements.get(0).value());
exporter.write(
getName(), "Max", measurements.get(totalOps - 1).value());
exporter.write(
getName(), "p1", measurements.get((int) (totalOps * 0.01)).value());
exporter.write(
getName(), "p5", measurements.get((int) (totalOps * 0.05)).value());
exporter.write(
getName(), "p50", measurements.get((int) (totalOps * 0.5)).value());
exporter.write(
getName(), "p90", measurements.get((int) (totalOps * 0.9)).value());
exporter.write(
getName(), "p95", measurements.get((int) (totalOps * 0.95)).value());
exporter.write(
getName(), "p99", measurements.get((int) (totalOps * 0.99)).value());
exporter.write(getName(), "p99.9",
measurements.get((int) (totalOps * 0.999)).value());
exporter.write(getName(), "p99.99",
measurements.get((int) (totalOps * 0.9999)).value());
}
exportStatusCounts(exporter);
}
@Override
public synchronized String getSummary() {
if (windowOperations == 0) {
return "";
}
String toReturn = String.format("%s count: %d, average latency(us): %.2f",
getName(), windowOperations,
(double) windowTotalLatency / (double) windowOperations);
windowTotalLatency = 0;
windowOperations = 0;
return toReturn;
}
}
| 6,695 | 31.192308 | 90 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import org.HdrHistogram.Histogram;
import org.HdrHistogram.HistogramIterationValue;
import org.HdrHistogram.HistogramLogWriter;
import org.HdrHistogram.Recorder;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
/**
* Take measurements and maintain a HdrHistogram of a given metric, such as READ LATENCY.
*
*/
public class OneMeasurementHdrHistogram extends OneMeasurement {
// we need one log per measurement histogram
private final PrintStream log;
private final HistogramLogWriter histogramLogWriter;
private final Recorder histogram;
private Histogram totalHistogram;
/**
* The name of the property for deciding what percentile values to output.
*/
public static final String PERCENTILES_PROPERTY = "hdrhistogram.percentiles";
/**
* The default value for the hdrhistogram.percentiles property.
*/
public static final String PERCENTILES_PROPERTY_DEFAULT = "95,99";
/**
* The name of the property for determining if we should print out the buckets.
*/
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
private final List<Double> percentiles;
public OneMeasurementHdrHistogram(String name, Properties props) {
super(name);
percentiles = getPercentileValues(props.getProperty(PERCENTILES_PROPERTY, PERCENTILES_PROPERTY_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
boolean shouldLog = Boolean.parseBoolean(props.getProperty("hdrhistogram.fileoutput", "false"));
if (!shouldLog) {
log = null;
histogramLogWriter = null;
} else {
try {
final String hdrOutputFilename = props.getProperty("hdrhistogram.output.path", "") + name + ".hdr";
log = new PrintStream(new FileOutputStream(hdrOutputFilename), false);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open hdr histogram output file", e);
}
histogramLogWriter = new HistogramLogWriter(log);
histogramLogWriter.outputComment("[Logging for: " + name + "]");
histogramLogWriter.outputLogFormatVersion();
long now = System.currentTimeMillis();
histogramLogWriter.outputStartTime(now);
histogramLogWriter.setBaseTime(now);
histogramLogWriter.outputLegend();
}
histogram = new Recorder(3);
}
/**
* It appears latency is reported in micros.
* Using {@link Recorder} to support concurrent updates to histogram.
*/
public void measure(int latencyInMicros) {
histogram.recordValue(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
// accumulate the last interval which was not caught by status thread
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
// we can close now
log.close();
}
exporter.write(getName(), "Operations", totalHistogram.getTotalCount());
exporter.write(getName(), "AverageLatency(us)", totalHistogram.getMean());
exporter.write(getName(), "MinLatency(us)", totalHistogram.getMinValue());
exporter.write(getName(), "MaxLatency(us)", totalHistogram.getMaxValue());
for (Double percentile : percentiles) {
exporter.write(getName(), ordinal(percentile) + "PercentileLatency(us)",
totalHistogram.getValueAtPercentile(percentile));
}
exportStatusCounts(exporter);
// also export totalHistogram
if (verbose) {
for (HistogramIterationValue v : totalHistogram.recordedValues()) {
int value;
if (v.getValueIteratedTo() > (long)Integer.MAX_VALUE) {
value = Integer.MAX_VALUE;
} else {
value = (int)v.getValueIteratedTo();
}
exporter.write(getName(), Integer.toString(value), (double)v.getCountAtValueIteratedTo());
}
}
}
/**
* This is called periodically from the StatusThread. There's a single
* StatusThread per Client process. We optionally serialize the interval to
* log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
// we use the summary interval as the histogram file interval.
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
}
DecimalFormat d = new DecimalFormat("#.##");
return "[" + getName() + ": Count=" + intervalHistogram.getTotalCount() + ", Max="
+ intervalHistogram.getMaxValue() + ", Min=" + intervalHistogram.getMinValue() + ", Avg="
+ d.format(intervalHistogram.getMean()) + ", 90=" + d.format(intervalHistogram.getValueAtPercentile(90))
+ ", 99=" + d.format(intervalHistogram.getValueAtPercentile(99)) + ", 99.9="
+ d.format(intervalHistogram.getValueAtPercentile(99.9)) + ", 99.99="
+ d.format(intervalHistogram.getValueAtPercentile(99.99)) + "]";
}
private Histogram getIntervalHistogramAndAccumulate() {
Histogram intervalHistogram = histogram.getIntervalHistogram();
// add this to the total time histogram.
if (totalHistogram == null) {
totalHistogram = intervalHistogram;
} else {
totalHistogram.add(intervalHistogram);
}
return intervalHistogram;
}
/**
* Helper method to parse the given percentile value string.
*
* @param percentileString - comma delimited string of Integer values
* @return An Integer List of percentile values
*/
private List<Double> getPercentileValues(String percentileString) {
List<Double> percentileValues = new ArrayList<>();
try {
for (String rawPercentile : percentileString.split(",")) {
percentileValues.add(Double.parseDouble(rawPercentile));
}
} catch (Exception e) {
// If the given hdrhistogram.percentiles value is unreadable for whatever reason,
// then calculate and return the default set.
System.err.println("[WARN] Couldn't read " + PERCENTILES_PROPERTY + " value: '" + percentileString +
"', the default of '" + PERCENTILES_PROPERTY_DEFAULT + "' will be used.");
e.printStackTrace();
return getPercentileValues(PERCENTILES_PROPERTY_DEFAULT);
}
return percentileValues;
}
/**
* Helper method to find the ordinal of any number. eg 1 -> 1st
* @param i number
* @return ordinal string
*/
private String ordinal(Double i) {
String[] suffixes = new String[]{"th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th"};
Integer j = i.intValue();
if (i % 1 == 0) {
switch (j % 100) {
case 11:
case 12:
case 13:
return j + "th";
default:
return j + suffixes[j % 10];
}
} else {
return i.toString();
}
}
}
| 8,068 | 34.862222 | 112 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/OneMeasurement.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.Status;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A single measured metric (such as READ LATENCY).
*/
public abstract class OneMeasurement {
private final String name;
private final ConcurrentHashMap<Status, AtomicInteger> returncodes;
public String getName() {
return name;
}
/**
* @param name measurement name
*/
public OneMeasurement(String name) {
this.name = name;
this.returncodes = new ConcurrentHashMap<>();
}
public abstract void measure(int latency);
public abstract String getSummary();
/**
* No need for synchronization, using CHM to deal with that.
*/
public void reportStatus(Status status) {
AtomicInteger counter = returncodes.get(status);
if (counter == null) {
counter = new AtomicInteger();
AtomicInteger other = returncodes.putIfAbsent(status, counter);
if (other != null) {
counter = other;
}
}
counter.incrementAndGet();
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public abstract void exportMeasurements(MeasurementsExporter exporter) throws IOException;
protected final void exportStatusCounts(MeasurementsExporter exporter) throws IOException {
for (Map.Entry<Status, AtomicInteger> entry : returncodes.entrySet()) {
exporter.write(getName(), "Return=" + entry.getKey().getName(), entry.getValue().get());
}
}
}
| 2,445 | 28.46988 | 94 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/exporter/package-info.java | /*
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB measurements.exporter package.
*/
package site.ycsb.measurements.exporter;
| 751 | 31.695652 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/exporter/JSONArrayMeasurementsExporter.java | /**
* Copyright (c) 2015-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements.exporter;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.util.DefaultPrettyPrinter;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
/**
* Export measurements into a machine readable JSON Array of measurement objects.
*/
public class JSONArrayMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONArrayMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
g.writeStartArray();
}
public void write(String metric, String measurement, int i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, long i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, double d) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
}
public void close() throws IOException {
if (g != null) {
g.writeEndArray();
g.close();
}
}
}
| 2,448 | 32.547945 | 85 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/exporter/TextMeasurementsExporter.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements.exporter;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
/**
* Write human readable text. Tries to emulate the previous print report method.
*/
public class TextMeasurementsExporter implements MeasurementsExporter {
private final BufferedWriter bw;
public TextMeasurementsExporter(OutputStream os) {
this.bw = new BufferedWriter(new OutputStreamWriter(os));
}
public void write(String metric, String measurement, int i) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
}
public void write(String metric, String measurement, long i) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
}
public void write(String metric, String measurement, double d) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + d);
bw.newLine();
}
public void close() throws IOException {
this.bw.close();
}
}
| 1,750 | 32.037736 | 85 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/exporter/JSONMeasurementsExporter.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements.exporter;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.util.DefaultPrettyPrinter;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
/**
* Export measurements into a machine readable JSON file.
*/
public class JSONMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
}
public void write(String metric, String measurement, int i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, long i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, double d) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
}
public void close() throws IOException {
if (g != null) {
g.close();
}
}
}
| 2,367 | 31 | 85 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/measurements/exporter/MeasurementsExporter.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements.exporter;
import java.io.Closeable;
import java.io.IOException;
/**
* Used to export the collected measurements into a useful format, for example
* human readable text or machine readable JSON.
*/
public interface MeasurementsExporter extends Closeable {
/**
* Write a measurement to the exported format.
*
* @param metric Metric name, for example "READ LATENCY".
* @param measurement Measurement name, for example "Average latency".
* @param i Measurement to write.
* @throws IOException if writing failed
*/
void write(String metric, String measurement, int i) throws IOException;
/**
* Write a measurement to the exported format.
*
* @param metric Metric name, for example "READ LATENCY".
* @param measurement Measurement name, for example "Average latency".
* @param i Measurement to write.
* @throws IOException if writing failed
*/
void write(String metric, String measurement, long i) throws IOException;
/**
* Write a measurement to the exported format.
*
* @param metric Metric name, for example "READ LATENCY".
* @param measurement Measurement name, for example "Average latency".
* @param d Measurement to write.
* @throws IOException if writing failed
*/
void write(String metric, String measurement, double d) throws IOException;
}
| 2,051 | 35 | 83 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/workloads/TimeSeriesWorkload.java | /**
* Copyright (c) 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import java.util.Vector;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import site.ycsb.ByteIterator;
import site.ycsb.Client;
import site.ycsb.DB;
import site.ycsb.NumericByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.Utils;
import site.ycsb.Workload;
import site.ycsb.WorkloadException;
import site.ycsb.generator.DiscreteGenerator;
import site.ycsb.generator.Generator;
import site.ycsb.generator.HotspotIntegerGenerator;
import site.ycsb.generator.IncrementingPrintableStringGenerator;
import site.ycsb.generator.NumberGenerator;
import site.ycsb.generator.RandomDiscreteTimestampGenerator;
import site.ycsb.generator.ScrambledZipfianGenerator;
import site.ycsb.generator.SequentialGenerator;
import site.ycsb.generator.UniformLongGenerator;
import site.ycsb.generator.UnixEpochTimestampGenerator;
import site.ycsb.generator.ZipfianGenerator;
import site.ycsb.measurements.Measurements;
/**
* A specialized workload dealing with time series data, i.e. series of discreet
* events associated with timestamps and identifiers. For this workload, identities
* consist of a {@link String} <b>key</b> and a set of {@link String} <b>tag key/value</b>
* pairs.
* <p>
* For example:
* <table border="1">
* <tr><th>Time Series Key</th><th>Tag Keys/Values</th><th>1483228800</th><th>1483228860</th><th>1483228920</th></tr>
* <tr><td>AA</td><td>AA=AA, AB=AA</td><td>42.5</td><td>1.0</td><td>85.9</td></tr>
* <tr><td>AA</td><td>AA=AA, AB=AB</td><td>-9.4</td><td>76.9</td><td>0.18</td></tr>
* <tr><td>AB</td><td>AA=AA, AB=AA</td><td>-93.0</td><td>57.1</td><td>-63.8</td></tr>
* <tr><td>AB</td><td>AA=AA, AB=AB</td><td>7.6</td><td>56.1</td><td>-0.3</td></tr>
* </table>
* <p>
* This table shows four time series with 3 measurements at three different timestamps.
* Keys, tags, timestamps and values (numeric only at this time) are generated by
* this workload. For details on properties and behavior, see the
* {@code workloads/tsworkload_template} file. The Javadocs will focus on implementation
* and how {@link DB} clients can parse the workload.
* <p>
* In order to avoid having existing DB implementations implement a brand new interface
* this workload uses the existing APIs to encode a few special values that can be parsed
* by the client. The special values include the timestamp, numeric value and some
* query (read or scan) parameters. As an example on how to parse the fields, see
* {@link BasicTSDB}.
* <p>
* <b>Timestamps</b>
* <p>
* Timestamps are presented as Unix Epoch values in units of {@link TimeUnit#SECONDS},
* {@link TimeUnit#MILLISECONDS} or {@link TimeUnit#NANOSECONDS} based on the
* {@code timestampunits} property. For calls to {@link DB#insert(String, String, java.util.Map)}
* and {@link DB#update(String, String, java.util.Map)}, the timestamp is added to the
* {@code values} map encoded in a {@link NumericByteIterator} with the key defined
* in the {@code timestampkey} property (defaulting to "YCSBTS"). To pull out the timestamp
* when iterating over the values map, cast the {@link ByteIterator} to a
* {@link NumericByteIterator} and call {@link NumericByteIterator#getLong()}.
* <p>
* Note that for calls to {@link DB#update(String, String, java.util.Map)}, timestamps
* earlier than the timestamp generator's timestamp will be choosen at random to
* mimic a lambda architecture or old job re-reporting some data.
* <p>
* For calls to {@link DB#read(String, String, java.util.Set, java.util.Map)} and
* {@link DB#scan(String, String, int, java.util.Set, Vector)}, timestamps
* are encoded in a {@link StringByteIterator} in a key/value format with the
* {@code tagpairdelimiter} separator. E.g {@code YCSBTS=1483228800}. If {@code querytimespan}
* has been set to a positive value then the value will include a range with the
* starting (oldest) timestamp followed by the {@code querytimespandelimiter} separator
* and the ending (most recent) timestamp. E.g. {@code YCSBTS=1483228800-1483228920}.
* <p>
* For calls to {@link DB#delete(String, String)}, encoding is the same as reads and
* scans but key/value pairs are separated by the {@code deletedelimiter} property value.
* <p>
* By default, the starting timestamp is the current system time without any rounding.
* All timestamps are then offsets from that starting value.
* <p>
* <b>Values</b>
* <p>
* Similar to timestamps, values are encoded in {@link NumericByteIterator}s and stored
* in the values map with the key defined in {@code valuekey} (defaulting to "YCSBV").
* Values can either be 64 bit signed {@link long}s or double precision {@link double}s
* depending on the {@code valuetype} or {@code dataintegrity} properties. When parsing
* out the value, always call {@link NumericByteIterator#isFloatingPoint()} to determine
* whether or not to call {@link NumericByteIterator#getDouble()} (true) or
* {@link NumericByteIterator#getLong()} (false).
* <p>
* When {@code dataintegrity} is set to true, then the value is always set to a
* 64 bit signed integer which is the Java hash code of the concatenation of the
* key and map of values (sorted on the map keys and skipping the timestamp and value
* entries) OR'd with the timestamp of the data point. See
* {@link #validationFunction(String, long, TreeMap)} for the implementation.
* <p>
* <b>Keys and Tags</b>
* <p>
* As mentioned, the workload generates strings for the keys and tags. On initialization
* three string generators are created using the {@link IncrementingPrintableStringGenerator}
* implementation. Then the generators fill three arrays with values based on the
* number of keys, the number of tags and the cardinality of each tag key/value pair.
* This implementation gives us time series like the example table where every string
* starts at something like "AA" (depending on the length of keys, tag keys and tag values)
* and continuing to "ZZ" wherein they rollover back to "AA".
* <p>
* Each time series must have a unique set of tag keys, i.e. the key "AA" cannot appear
* more than once per time series. If the workload is configured for four tags with a
* tag key length of 2, the keys would be "AA", "AB", "AC" and "AD".
* <p>
* Each tag key is then associated with a tag value. Tag values may appear more than once
* in each time series. E.g. time series will usually start with the tags "AA=AA",
* "AB=AA", "AC=AA" and "AD=AA". The {@code tagcardinality} property determines how many
* unique values will be generated per tag key. In the example table above, the
* {@code tagcardinality} property would have been set to {@code 1,2} meaning tag
* key "AA" would always have the tag value "AA" given a cardinality of 1. However
* tag key "AB" would have values "AA" and "AB" due to a cardinality of 2. This
* cardinality map, along with the number of unique time series keys determines how
* many unique time series are generated for the workload. Tag values share a common
* array of generated strings to save on memory.
* <p>
* <b>Operation Order</b>
* <p>
* The default behavior of the workload (for inserts and updates) is to generate a
* value for each time series for a given timestamp before incrementing to the next
* timestamp and writing values. This is an ideal workload and some time series
* databases are designed for this behavior. However in the real-world events will
* arrive grouped close to the current system time with a number of events being
* delayed, hence their timestamps are further in the past. The {@code delayedseries}
* property determines the percentage of time series that are delayed by up to
* {@code delayedintervals} intervals. E.g. setting this value to 0.05 means that
* 5% of the time series will be written with timestamps earlier than the timestamp
* generator's current time.
* </p>
* <b>Reads and Scans</b>
* <p>
* For benchmarking queries, some common tasks implemented by almost every time series
* data base are available and are passed in the fields {@link Set}:
* <p>
* <b>GroupBy</b> - A common operation is to aggregate multiple time series into a
* single time series via common parameters. For example, a user may want to see the
* total network traffic in a data center so they'll issue a SQL query like:
* <code>SELECT value FROM timeseriesdb GROUP BY datacenter ORDER BY SUM(value);</code>
* If the {@code groupbyfunction} has been set to a group by function, then the fields
* will contain a key/value pair with the key set in {@code groupbykey}. E.g.
* {@code YCSBGB=SUM}.
* <p>
* Additionally with grouping enabled, fields on tag keys where group bys should
* occur will only have the key defined and will not have a value or delimiter. E.g.
* if grouping on tag key "AA", the field will contain {@code AA} instead of {@code AA=AB}.
* <p>
* <b>Downsampling</b> - Another common operation is to reduce the resolution of the
* queried time series when fetching a wide time range of data so fewer data points
* are returned. For example, a user may fetch a week of data but if the data is
* recorded on a 1 second interval, that would be over 600k data points so they
* may ask for a 1 hour downsampling (also called bucketing) wherein every hour, all
* of the data points for a "bucket" are aggregated into a single value.
* <p>
* To enable downsampling, the {@code downsamplingfunction} property must be set to
* a supported function such as "SUM" and the {@code downsamplinginterval} must be
* set to a valid time interval with the same units as {@code timestampunits}, e.g.
* "3600" which would create 1 hour buckets if the time units were set to seconds.
* With downsampling, query fields will include a key/value pair with
* {@code downsamplingkey} as the key (defaulting to "YCSBDS") and the value being
* a concatenation of {@code downsamplingfunction} and {@code downsamplinginterval},
* for example {@code YCSBDS=SUM60}.
* <p>
* <b>Timestamps</b> - For every read, a random timestamp is selected from the interval
* set. If {@code querytimespan} has been set to a positive value, then the configured
* query time interval is added to the selected timestamp so the read passes the DB
* a range of times. Note that during the run phase, if no data was previously loaded,
* or if there are more {@code recordcount}s set for the run phase, reads may be sent
* to the DB with timestamps that are beyond the written data time range (or even the
* system clock of the DB).
* <p>
* <b>Deletes</b>
* <p>
* Because the delete API only accepts a single key, a full key and tag key/value
* pair map is flattened into a single string for parsing by the database. Common
* workloads include deleting a single time series (wherein all tag key and values are
* defined), deleting all series containing a tag key and value or deleting all of the
* time series sharing a common time series key.
* <p>
* Right now the workload supports deletes with a key and for time series tag key/value
* pairs or a key with tags and a group by on one or more tags (meaning, delete all of
* the series with any value for the given tag key). The parameters are collapsed into
* a single string delimited with the character in the {@code deletedelimiter} property.
* For example, a delete request may look like: {@code AA:AA=AA:AA=AB} to delete the
* first time series in the table above.
* <p>
* <b>Threads</b>
* <p>
* For a multi-threaded execution, the number of time series keys set via the
* {@code fieldcount} property, must be greater than or equal to the number of
* threads set via {@code threads}. This is due to each thread choosing a subset
* of the total number of time series keys and being responsible for writing values
* for each time series containing those keys at each timestamp. Thus each thread
* will have it's own timestamp generator, incrementing each time every time series
* it is responsible for has had a value written.
* <p>
* Each thread may, however, issue reads and scans for any time series in the
* complete set.
* <p>
* <b>Sparsity</b>
* <p>
* By default, during loads, every time series will have a data point written at every
* time stamp in the interval set. This is common in workloads where a sensor writes
* a value at regular intervals. However some time series are only reported under
* certain conditions.
* <p>
* For example, a counter may track the number of errors over a
* time period for a web service and only report when the value is greater than 1.
* Or a time series may include tags such as a user ID and IP address when a request
* arrives at the web service and only report values when that combination is seen.
* This means the timeseries will <i>not</i> have a value at every timestamp and in
* some cases there may be only a single value!
* <p>
* This workload has a {@code sparsity} parameter that can choose how often a
* time series should record a value. The default value of 0.0 means every series
* will get a value at every timestamp. A value of 0.95 will mean that for each
* series, only 5% of the timestamps in the interval will have a value. The distribution
* of values is random.
* <p>
* <b>Notes/Warnings</b>
* <p>
* <ul>
* <li>Because time series keys and tag key/values are generated and stored in memory,
* be careful of setting the cardinality too high for the JVM's heap.</li>
* <li>When running for data integrity, a number of settings are incompatible and will
* throw errors. Check the error messages for details.</li>
* <li>Databases that support keys only and can't store tags should order and then
* collapse the tag values using a delimiter. For example the series in the example
* table at the top could be written as:
* <ul>
* <li>{@code AA.AA.AA}</li>
* <li>{@code AA.AA.AB}</li>
* <li>{@code AB.AA.AA}</li>
* <li>{@code AB.AA.AB}</li>
* </ul></li>
* </ul>
* <p>
* <b>TODOs</b>
* <p>
* <ul>
* <li>Support random time intervals. E.g. some series write every second, others every
* 60 seconds.</li>
* <li>Support random time series cardinality. Right now every series has the same
* cardinality.</li>
* <li>Truly random timetamps per time series. We could use bitmaps to determine if
* a series has had a value written for a given timestamp. Right now all of the series
* are in sync time-wise.</li>
* <li>Possibly a real-time load where values are written with the current system time.
* It's more of a bulk-loading operation now.</li>
* </ul>
*/
public class TimeSeriesWorkload extends Workload {
/**
* The types of values written to the timeseries store.
*/
public enum ValueType {
INTEGERS("integers"),
FLOATS("floats"),
MIXED("mixednumbers");
protected final String name;
ValueType(final String name) {
this.name = name;
}
public static ValueType fromString(final String name) {
for (final ValueType type : ValueType.values()) {
if (type.name.equalsIgnoreCase(name)) {
return type;
}
}
throw new IllegalArgumentException("Unrecognized type: " + name);
}
}
/** Name and default value for the timestamp key property. */
public static final String TIMESTAMP_KEY_PROPERTY = "timestampkey";
public static final String TIMESTAMP_KEY_PROPERTY_DEFAULT = "YCSBTS";
/** Name and default value for the value key property. */
public static final String VALUE_KEY_PROPERTY = "valuekey";
public static final String VALUE_KEY_PROPERTY_DEFAULT = "YCSBV";
/** Name and default value for the timestamp interval property. */
public static final String TIMESTAMP_INTERVAL_PROPERTY = "timestampinterval";
public static final String TIMESTAMP_INTERVAL_PROPERTY_DEFAULT = "60";
/** Name and default value for the timestamp units property. */
public static final String TIMESTAMP_UNITS_PROPERTY = "timestampunits";
public static final String TIMESTAMP_UNITS_PROPERTY_DEFAULT = "SECONDS";
/** Name and default value for the number of tags property. */
public static final String TAG_COUNT_PROPERTY = "tagcount";
public static final String TAG_COUNT_PROPERTY_DEFAULT = "4";
/** Name and default value for the tag value cardinality map property. */
public static final String TAG_CARDINALITY_PROPERTY = "tagcardinality";
public static final String TAG_CARDINALITY_PROPERTY_DEFAULT = "1, 2, 4, 8";
/** Name and default value for the tag key length property. */
public static final String TAG_KEY_LENGTH_PROPERTY = "tagkeylength";
public static final String TAG_KEY_LENGTH_PROPERTY_DEFAULT = "8";
/** Name and default value for the tag value length property. */
public static final String TAG_VALUE_LENGTH_PROPERTY = "tagvaluelength";
public static final String TAG_VALUE_LENGTH_PROPERTY_DEFAULT = "8";
/** Name and default value for the tag pair delimiter property. */
public static final String PAIR_DELIMITER_PROPERTY = "tagpairdelimiter";
public static final String PAIR_DELIMITER_PROPERTY_DEFAULT = "=";
/** Name and default value for the delete string delimiter property. */
public static final String DELETE_DELIMITER_PROPERTY = "deletedelimiter";
public static final String DELETE_DELIMITER_PROPERTY_DEFAULT = ":";
/** Name and default value for the random timestamp write order property. */
public static final String RANDOMIZE_TIMESTAMP_ORDER_PROPERTY = "randomwritetimestamporder";
public static final String RANDOMIZE_TIMESTAMP_ORDER_PROPERTY_DEFAULT = "false";
/** Name and default value for the random time series write order property. */
public static final String RANDOMIZE_TIMESERIES_ORDER_PROPERTY = "randomtimeseriesorder";
public static final String RANDOMIZE_TIMESERIES_ORDER_PROPERTY_DEFAULT = "true";
/** Name and default value for the value types property. */
public static final String VALUE_TYPE_PROPERTY = "valuetype";
public static final String VALUE_TYPE_PROPERTY_DEFAULT = "floats";
/** Name and default value for the sparsity property. */
public static final String SPARSITY_PROPERTY = "sparsity";
public static final String SPARSITY_PROPERTY_DEFAULT = "0.00";
/** Name and default value for the delayed series percentage property. */
public static final String DELAYED_SERIES_PROPERTY = "delayedseries";
public static final String DELAYED_SERIES_PROPERTY_DEFAULT = "0.10";
/** Name and default value for the delayed series intervals property. */
public static final String DELAYED_INTERVALS_PROPERTY = "delayedintervals";
public static final String DELAYED_INTERVALS_PROPERTY_DEFAULT = "5";
/** Name and default value for the query time span property. */
public static final String QUERY_TIMESPAN_PROPERTY = "querytimespan";
public static final String QUERY_TIMESPAN_PROPERTY_DEFAULT = "0";
/** Name and default value for the randomized query time span property. */
public static final String QUERY_RANDOM_TIMESPAN_PROPERTY = "queryrandomtimespan";
public static final String QUERY_RANDOM_TIMESPAN_PROPERTY_DEFAULT = "false";
/** Name and default value for the query time stamp delimiter property. */
public static final String QUERY_TIMESPAN_DELIMITER_PROPERTY = "querytimespandelimiter";
public static final String QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT = ",";
/** Name and default value for the group-by key property. */
public static final String GROUPBY_KEY_PROPERTY = "groupbykey";
public static final String GROUPBY_KEY_PROPERTY_DEFAULT = "YCSBGB";
/** Name and default value for the group-by function property. */
public static final String GROUPBY_PROPERTY = "groupbyfunction";
/** Name and default value for the group-by key map property. */
public static final String GROUPBY_KEYS_PROPERTY = "groupbykeys";
/** Name and default value for the downsampling key property. */
public static final String DOWNSAMPLING_KEY_PROPERTY = "downsamplingkey";
public static final String DOWNSAMPLING_KEY_PROPERTY_DEFAULT = "YCSBDS";
/** Name and default value for the downsampling function property. */
public static final String DOWNSAMPLING_FUNCTION_PROPERTY = "downsamplingfunction";
/** Name and default value for the downsampling interval property. */
public static final String DOWNSAMPLING_INTERVAL_PROPERTY = "downsamplinginterval";
/** The properties to pull settings from. */
protected Properties properties;
/** Generators for keys, tag keys and tag values. */
protected Generator<String> keyGenerator;
protected Generator<String> tagKeyGenerator;
protected Generator<String> tagValueGenerator;
/** The timestamp key, defaults to "YCSBTS". */
protected String timestampKey;
/** The value key, defaults to "YCSBDS". */
protected String valueKey;
/** The number of time units in between timestamps. */
protected int timestampInterval;
/** The units of time the timestamp and various intervals represent. */
protected TimeUnit timeUnits;
/** Whether or not to randomize the timestamp order when writing. */
protected boolean randomizeTimestampOrder;
/** Whether or not to randomize (shuffle) the time series order. NOT compatible
* with data integrity. */
protected boolean randomizeTimeseriesOrder;
/** The type of values to generate when writing data. */
protected ValueType valueType;
/** Used to calculate an offset for each time series. */
protected int[] cumulativeCardinality;
/** The calculated total cardinality based on the config. */
protected int totalCardinality;
/** The calculated per-time-series-key cardinality. I.e. the number of unique
* tag key and value combinations. */
protected int perKeyCardinality;
/** How much data to scan for in each call. */
protected NumberGenerator scanlength;
/** A generator used to select a random time series key per read/scan. */
protected NumberGenerator keychooser;
/** A generator to select what operation to perform during the run phase. */
protected DiscreteGenerator operationchooser;
/** The maximum number of interval offsets from the starting timestamp. Calculated
* based on the number of records configured for the run. */
protected int maxOffsets;
/** The number of records or operations to perform for this run. */
protected int recordcount;
/** The number of tag pairs per time series. */
protected int tagPairs;
/** The table we'll write to. */
protected String table;
/** How many time series keys will be generated. */
protected int numKeys;
/** The generated list of possible time series key values. */
protected String[] keys;
/** The generated list of possible tag key values. */
protected String[] tagKeys;
/** The generated list of possible tag value values. */
protected String[] tagValues;
/** The cardinality for each tag key. */
protected int[] tagCardinality;
/** A helper to skip non-incrementing tag values. */
protected int firstIncrementableCardinality;
/** How sparse the data written should be. */
protected double sparsity;
/** The percentage of time series that should be delayed in writes. */
protected double delayedSeries;
/** The maximum number of intervals to delay a series. */
protected int delayedIntervals;
/** Optional query time interval during reads/scans. */
protected int queryTimeSpan;
/** Whether or not the actual interval should be randomly chosen, using
* queryTimeSpan as the maximum value. */
protected boolean queryRandomTimeSpan;
/** The delimiter for tag pairs in fields. */
protected String tagPairDelimiter;
/** The delimiter between parameters for the delete key. */
protected String deleteDelimiter;
/** The delimiter between timestamps for query time spans. */
protected String queryTimeSpanDelimiter;
/** Whether or not to issue group-by queries. */
protected boolean groupBy;
/** The key used for group-by tag keys. */
protected String groupByKey;
/** The function used for group-by's. */
protected String groupByFunction;
/** The tag keys to group on. */
protected boolean[] groupBys;
/** Whether or not to issue downsampling queries. */
protected boolean downsample;
/** The key used for downsampling tag keys. */
protected String downsampleKey;
/** The downsampling function. */
protected String downsampleFunction;
/** The downsampling interval. */
protected int downsampleInterval;
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
protected boolean dataintegrity;
/** Measurements to write data integrity results to. */
protected Measurements measurements = Measurements.getMeasurements();
@Override
public void init(final Properties p) throws WorkloadException {
properties = p;
recordcount =
Integer.parseInt(p.getProperty(Client.RECORD_COUNT_PROPERTY,
Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
timestampKey = p.getProperty(TIMESTAMP_KEY_PROPERTY, TIMESTAMP_KEY_PROPERTY_DEFAULT);
valueKey = p.getProperty(VALUE_KEY_PROPERTY, VALUE_KEY_PROPERTY_DEFAULT);
operationchooser = CoreWorkload.createOperationGenerator(properties);
final int maxscanlength =
Integer.parseInt(p.getProperty(CoreWorkload.MAX_SCAN_LENGTH_PROPERTY,
CoreWorkload.MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(CoreWorkload.SCAN_LENGTH_DISTRIBUTION_PROPERTY,
CoreWorkload.SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(1, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(1, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
randomizeTimestampOrder = Boolean.parseBoolean(p.getProperty(
RANDOMIZE_TIMESTAMP_ORDER_PROPERTY,
RANDOMIZE_TIMESTAMP_ORDER_PROPERTY_DEFAULT));
randomizeTimeseriesOrder = Boolean.parseBoolean(p.getProperty(
RANDOMIZE_TIMESERIES_ORDER_PROPERTY,
RANDOMIZE_TIMESERIES_ORDER_PROPERTY_DEFAULT));
// setup the cardinality
numKeys = Integer.parseInt(p.getProperty(CoreWorkload.FIELD_COUNT_PROPERTY,
CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT));
tagPairs = Integer.parseInt(p.getProperty(TAG_COUNT_PROPERTY,
TAG_COUNT_PROPERTY_DEFAULT));
sparsity = Double.parseDouble(p.getProperty(SPARSITY_PROPERTY, SPARSITY_PROPERTY_DEFAULT));
tagCardinality = new int[tagPairs];
final String requestdistrib =
p.getProperty(CoreWorkload.REQUEST_DISTRIBUTION_PROPERTY,
CoreWorkload.REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(0, numKeys - 1);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(0, numKeys - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
keychooser = new ScrambledZipfianGenerator(0, numKeys - 1);
//} else if (requestdistrib.compareTo("latest") == 0) {
// keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(CoreWorkload.HOTSPOT_DATA_FRACTION,
CoreWorkload.HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(CoreWorkload.HOTSPOT_OPN_FRACTION,
CoreWorkload.HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, numKeys - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
// figure out the start timestamp based on the units, cardinality and interval
try {
timestampInterval = Integer.parseInt(p.getProperty(
TIMESTAMP_INTERVAL_PROPERTY, TIMESTAMP_INTERVAL_PROPERTY_DEFAULT));
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse the " +
TIMESTAMP_INTERVAL_PROPERTY, nfe);
}
try {
timeUnits = TimeUnit.valueOf(p.getProperty(TIMESTAMP_UNITS_PROPERTY,
TIMESTAMP_UNITS_PROPERTY_DEFAULT).toUpperCase());
} catch (IllegalArgumentException e) {
throw new WorkloadException("Unknown time unit type", e);
}
if (timeUnits == TimeUnit.NANOSECONDS || timeUnits == TimeUnit.MICROSECONDS) {
throw new WorkloadException("YCSB doesn't support " + timeUnits +
" at this time.");
}
tagPairDelimiter = p.getProperty(PAIR_DELIMITER_PROPERTY, PAIR_DELIMITER_PROPERTY_DEFAULT);
deleteDelimiter = p.getProperty(DELETE_DELIMITER_PROPERTY, DELETE_DELIMITER_PROPERTY_DEFAULT);
dataintegrity = Boolean.parseBoolean(
p.getProperty(CoreWorkload.DATA_INTEGRITY_PROPERTY,
CoreWorkload.DATA_INTEGRITY_PROPERTY_DEFAULT));
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
queryTimeSpan = Integer.parseInt(p.getProperty(QUERY_TIMESPAN_PROPERTY,
QUERY_TIMESPAN_PROPERTY_DEFAULT));
queryRandomTimeSpan = Boolean.parseBoolean(p.getProperty(QUERY_RANDOM_TIMESPAN_PROPERTY,
QUERY_RANDOM_TIMESPAN_PROPERTY_DEFAULT));
queryTimeSpanDelimiter = p.getProperty(QUERY_TIMESPAN_DELIMITER_PROPERTY,
QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT);
groupByKey = p.getProperty(GROUPBY_KEY_PROPERTY, GROUPBY_KEY_PROPERTY_DEFAULT);
groupByFunction = p.getProperty(GROUPBY_PROPERTY);
if (groupByFunction != null && !groupByFunction.isEmpty()) {
final String groupByKeys = p.getProperty(GROUPBY_KEYS_PROPERTY);
if (groupByKeys == null || groupByKeys.isEmpty()) {
throw new WorkloadException("Group by was enabled but no keys were specified.");
}
final String[] gbKeys = groupByKeys.split(",");
if (gbKeys.length != tagKeys.length) {
throw new WorkloadException("Only " + gbKeys.length + " group by keys "
+ "were specified but there were " + tagKeys.length + " tag keys given.");
}
groupBys = new boolean[gbKeys.length];
for (int i = 0; i < gbKeys.length; i++) {
groupBys[i] = Integer.parseInt(gbKeys[i].trim()) == 0 ? false : true;
}
groupBy = true;
}
downsampleKey = p.getProperty(DOWNSAMPLING_KEY_PROPERTY, DOWNSAMPLING_KEY_PROPERTY_DEFAULT);
downsampleFunction = p.getProperty(DOWNSAMPLING_FUNCTION_PROPERTY);
if (downsampleFunction != null && !downsampleFunction.isEmpty()) {
final String interval = p.getProperty(DOWNSAMPLING_INTERVAL_PROPERTY);
if (interval == null || interval.isEmpty()) {
throw new WorkloadException("'" + DOWNSAMPLING_INTERVAL_PROPERTY + "' was missing despite '"
+ DOWNSAMPLING_FUNCTION_PROPERTY + "' being set.");
}
downsampleInterval = Integer.parseInt(interval);
downsample = true;
}
delayedSeries = Double.parseDouble(p.getProperty(DELAYED_SERIES_PROPERTY, DELAYED_SERIES_PROPERTY_DEFAULT));
delayedIntervals = Integer.parseInt(p.getProperty(DELAYED_INTERVALS_PROPERTY, DELAYED_INTERVALS_PROPERTY_DEFAULT));
valueType = ValueType.fromString(p.getProperty(VALUE_TYPE_PROPERTY, VALUE_TYPE_PROPERTY_DEFAULT));
table = p.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
initKeysAndTags();
validateSettings();
}
@Override
public Object initThread(Properties p, int mythreadid, int threadcount) throws WorkloadException {
if (properties == null) {
throw new WorkloadException("Workload has not been initialized.");
}
return new ThreadState(mythreadid, threadcount);
}
@Override
public boolean doInsert(DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
final Map<String, ByteIterator> tags = new TreeMap<String, ByteIterator>();
final String key = ((ThreadState)threadstate).nextDataPoint(tags, true);
if (db.insert(table, key, tags) == Status.OK) {
return true;
}
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
switch (operationchooser.nextString()) {
case "READ":
doTransactionRead(db, threadstate);
break;
case "UPDATE":
doTransactionUpdate(db, threadstate);
break;
case "INSERT":
doTransactionInsert(db, threadstate);
break;
case "SCAN":
doTransactionScan(db, threadstate);
break;
case "DELETE":
doTransactionDelete(db, threadstate);
break;
default:
return false;
}
return true;
}
protected void doTransactionRead(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final String keyname = keys[keychooser.nextValue().intValue()];
final Random random = ThreadLocalRandom.current();
int offsets = state.queryOffsetGenerator.nextValue().intValue();
//int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
Set<String> fields = new HashSet<String>();
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
fields.add(tagKeys[i]);
} else {
fields.add(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
fields.add(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
fields.add(timestampKey + tagPairDelimiter + startTimestamp);
}
if (groupBy) {
fields.add(groupByKey + tagPairDelimiter + groupByFunction);
}
if (downsample) {
fields.add(downsampleKey + tagPairDelimiter + downsampleFunction + downsampleInterval);
}
final Map<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
final Status status = db.read(table, keyname, fields, cells);
if (dataintegrity && status == Status.OK) {
verifyRow(keyname, cells);
}
}
protected void doTransactionUpdate(final DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
final Map<String, ByteIterator> tags = new TreeMap<String, ByteIterator>();
final String key = ((ThreadState)threadstate).nextDataPoint(tags, false);
db.update(table, key, tags);
}
protected void doTransactionInsert(final DB db, Object threadstate) {
doInsert(db, threadstate);
}
protected void doTransactionScan(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final Random random = ThreadLocalRandom.current();
final String keyname = keys[random.nextInt(keys.length)];
// choose a random scan length
int len = scanlength.nextValue().intValue();
int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
Set<String> fields = new HashSet<String>();
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
fields.add(tagKeys[i]);
} else {
fields.add(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
fields.add(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
fields.add(timestampKey + tagPairDelimiter + startTimestamp);
}
if (groupBy) {
fields.add(groupByKey + tagPairDelimiter + groupByFunction);
}
if (downsample) {
fields.add(downsampleKey + tagPairDelimiter + downsampleFunction + tagPairDelimiter + downsampleInterval);
}
final Vector<HashMap<String, ByteIterator>> results = new Vector<HashMap<String, ByteIterator>>();
db.scan(table, keyname, len, fields, results);
}
protected void doTransactionDelete(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final Random random = ThreadLocalRandom.current();
final StringBuilder buf = new StringBuilder().append(keys[random.nextInt(keys.length)]);
int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
buf.append(deleteDelimiter)
.append(tagKeys[i]);
} else {
buf.append(deleteDelimiter).append(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
buf.append(deleteDelimiter)
.append(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
buf.append(deleteDelimiter)
.append(timestampKey + tagPairDelimiter + startTimestamp);
}
db.delete(table, buf.toString());
}
/**
* Parses the values returned by a read or scan operation and determines whether
* or not the integer value matches the hash and timestamp of the original timestamp.
* Only works for raw data points, will not work for group-by's or downsampled data.
* @param key The time series key.
* @param cells The cells read by the DB.
* @return {@link Status#OK} if the data matched or {@link Status#UNEXPECTED_STATE} if
* the data did not match.
*/
protected Status verifyRow(final String key, final Map<String, ByteIterator> cells) {
Status verifyStatus = Status.UNEXPECTED_STATE;
long startTime = System.nanoTime();
double value = 0;
long timestamp = 0;
final TreeMap<String, String> validationTags = new TreeMap<String, String>();
for (final Entry<String, ByteIterator> entry : cells.entrySet()) {
if (entry.getKey().equals(timestampKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
timestamp = it.getLong();
} else if (entry.getKey().equals(valueKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
value = it.isFloatingPoint() ? it.getDouble() : it.getLong();
} else {
validationTags.put(entry.getKey(), entry.getValue().toString());
}
}
if (validationFunction(key, timestamp, validationTags) == value) {
verifyStatus = Status.OK;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
return verifyStatus;
}
/**
* Function used for generating a deterministic hash based on the combination
* of metric, tags and timestamp.
* @param key A non-null string representing the key.
* @param timestamp A timestamp in the proper units for the workload.
* @param tags A non-null map of tag keys and values NOT including the YCSB
* key or timestamp.
* @return A hash value as an 8 byte integer.
*/
protected long validationFunction(final String key, final long timestamp,
final TreeMap<String, String> tags) {
final StringBuilder validationBuffer = new StringBuilder(keys[0].length() +
(tagPairs * tagKeys[0].length()) + (tagPairs * tagCardinality[1]));
for (final Entry<String, String> pair : tags.entrySet()) {
validationBuffer.append(pair.getKey()).append(pair.getValue());
}
return (long) validationBuffer.toString().hashCode() ^ timestamp;
}
/**
* Breaks out the keys, tags and cardinality initialization in another method
* to keep CheckStyle happy.
* @throws WorkloadException If something goes pear shaped.
*/
protected void initKeysAndTags() throws WorkloadException {
final int keyLength = Integer.parseInt(properties.getProperty(
CoreWorkload.FIELD_LENGTH_PROPERTY,
CoreWorkload.FIELD_LENGTH_PROPERTY_DEFAULT));
final int tagKeyLength = Integer.parseInt(properties.getProperty(
TAG_KEY_LENGTH_PROPERTY, TAG_KEY_LENGTH_PROPERTY_DEFAULT));
final int tagValueLength = Integer.parseInt(properties.getProperty(
TAG_VALUE_LENGTH_PROPERTY, TAG_VALUE_LENGTH_PROPERTY_DEFAULT));
keyGenerator = new IncrementingPrintableStringGenerator(keyLength);
tagKeyGenerator = new IncrementingPrintableStringGenerator(tagKeyLength);
tagValueGenerator = new IncrementingPrintableStringGenerator(tagValueLength);
final int threads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1"));
final String tagCardinalityString = properties.getProperty(
TAG_CARDINALITY_PROPERTY,
TAG_CARDINALITY_PROPERTY_DEFAULT);
final String[] tagCardinalityParts = tagCardinalityString.split(",");
int idx = 0;
totalCardinality = numKeys;
perKeyCardinality = 1;
int maxCardinality = 0;
for (final String card : tagCardinalityParts) {
try {
tagCardinality[idx] = Integer.parseInt(card.trim());
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse cardinality: " +
card, nfe);
}
if (tagCardinality[idx] < 1) {
throw new WorkloadException("Cardinality must be greater than zero: " +
tagCardinality[idx]);
}
totalCardinality *= tagCardinality[idx];
perKeyCardinality *= tagCardinality[idx];
if (tagCardinality[idx] > maxCardinality) {
maxCardinality = tagCardinality[idx];
}
++idx;
if (idx >= tagPairs) {
// we have more cardinalities than tag keys so bail at this point.
break;
}
}
if (numKeys < threads) {
throw new WorkloadException("Field count " + numKeys + " (keys for time "
+ "series workloads) must be greater or equal to the number of "
+ "threads " + threads);
}
// fill tags without explicit cardinality with 1
if (idx < tagPairs) {
tagCardinality[idx++] = 1;
}
for (int i = 0; i < tagCardinality.length; ++i) {
if (tagCardinality[i] > 1) {
firstIncrementableCardinality = i;
break;
}
}
keys = new String[numKeys];
tagKeys = new String[tagPairs];
tagValues = new String[maxCardinality];
for (int i = 0; i < numKeys; ++i) {
keys[i] = keyGenerator.nextString();
}
for (int i = 0; i < tagPairs; ++i) {
tagKeys[i] = tagKeyGenerator.nextString();
}
for (int i = 0; i < maxCardinality; i++) {
tagValues[i] = tagValueGenerator.nextString();
}
if (randomizeTimeseriesOrder) {
Utils.shuffleArray(keys);
Utils.shuffleArray(tagValues);
}
maxOffsets = (recordcount / totalCardinality) + 1;
final int[] keyAndTagCardinality = new int[tagPairs + 1];
keyAndTagCardinality[0] = numKeys;
for (int i = 0; i < tagPairs; i++) {
keyAndTagCardinality[i + 1] = tagCardinality[i];
}
cumulativeCardinality = new int[keyAndTagCardinality.length];
for (int i = 0; i < keyAndTagCardinality.length; i++) {
int cumulation = 1;
for (int x = i; x <= keyAndTagCardinality.length - 1; x++) {
cumulation *= keyAndTagCardinality[x];
}
if (i > 0) {
cumulativeCardinality[i - 1] = cumulation;
}
}
cumulativeCardinality[cumulativeCardinality.length - 1] = 1;
}
/**
* Makes sure the settings as given are compatible.
* @throws WorkloadException If one or more settings were invalid.
*/
protected void validateSettings() throws WorkloadException {
if (dataintegrity) {
if (valueType != ValueType.INTEGERS) {
throw new WorkloadException("Data integrity was enabled. 'valuetype' must "
+ "be set to 'integers'.");
}
if (groupBy) {
throw new WorkloadException("Data integrity was enabled. 'groupbyfunction' must "
+ "be empty or null.");
}
if (downsample) {
throw new WorkloadException("Data integrity was enabled. 'downsamplingfunction' must "
+ "be empty or null.");
}
if (queryTimeSpan > 0) {
throw new WorkloadException("Data integrity was enabled. 'querytimespan' must "
+ "be empty or 0.");
}
if (randomizeTimeseriesOrder) {
throw new WorkloadException("Data integrity was enabled. 'randomizetimeseriesorder' must "
+ "be false.");
}
final String startTimestamp = properties.getProperty(CoreWorkload.INSERT_START_PROPERTY);
if (startTimestamp == null || startTimestamp.isEmpty()) {
throw new WorkloadException("Data integrity was enabled. 'insertstart' must "
+ "be set to a Unix Epoch timestamp.");
}
}
}
/**
* Thread state class holding thread local generators and indices.
*/
protected class ThreadState {
/** The timestamp generator for this thread. */
protected final UnixEpochTimestampGenerator timestampGenerator;
/** An offset generator to select a random offset for queries. */
protected final NumberGenerator queryOffsetGenerator;
/** The current write key index. */
protected int keyIdx;
/** The starting fence for writing keys. */
protected int keyIdxStart;
/** The ending fence for writing keys. */
protected int keyIdxEnd;
/** Indices for each tag value for writes. */
protected int[] tagValueIdxs;
/** Whether or not all time series have written values for the current timestamp. */
protected boolean rollover;
/** The starting timestamp. */
protected long startTimestamp;
/**
* Default ctor.
* @param threadID The zero based thread ID.
* @param threadCount The total number of threads.
* @throws WorkloadException If something went pear shaped.
*/
protected ThreadState(final int threadID, final int threadCount) throws WorkloadException {
int totalThreads = threadCount > 0 ? threadCount : 1;
if (threadID >= totalThreads) {
throw new IllegalStateException("Thread ID " + threadID + " cannot be greater "
+ "than or equal than the thread count " + totalThreads);
}
if (keys.length < threadCount) {
throw new WorkloadException("Thread count " + totalThreads + " must be greater "
+ "than or equal to key count " + keys.length);
}
int keysPerThread = keys.length / totalThreads;
keyIdx = keysPerThread * threadID;
keyIdxStart = keyIdx;
if (totalThreads - 1 == threadID) {
keyIdxEnd = keys.length;
} else {
keyIdxEnd = keyIdxStart + keysPerThread;
}
tagValueIdxs = new int[tagPairs]; // all zeros
final String startingTimestamp =
properties.getProperty(CoreWorkload.INSERT_START_PROPERTY);
if (startingTimestamp == null || startingTimestamp.isEmpty()) {
timestampGenerator = randomizeTimestampOrder ?
new RandomDiscreteTimestampGenerator(timestampInterval, timeUnits, maxOffsets) :
new UnixEpochTimestampGenerator(timestampInterval, timeUnits);
} else {
try {
timestampGenerator = randomizeTimestampOrder ?
new RandomDiscreteTimestampGenerator(timestampInterval, timeUnits,
Long.parseLong(startingTimestamp), maxOffsets) :
new UnixEpochTimestampGenerator(timestampInterval, timeUnits,
Long.parseLong(startingTimestamp));
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse the " +
CoreWorkload.INSERT_START_PROPERTY, nfe);
}
}
// Set the last value properly for the timestamp, otherwise it may start
// one interval ago.
startTimestamp = timestampGenerator.nextValue();
// TODO - pick it
queryOffsetGenerator = new UniformLongGenerator(0, maxOffsets - 2);
}
/**
* Generates the next write value for thread.
* @param map An initialized map to populate with tag keys and values as well
* as the timestamp and actual value.
* @param isInsert Whether or not it's an insert or an update. Updates will pick
* an older timestamp (if random isn't enabled).
* @return The next key to write.
*/
protected String nextDataPoint(final Map<String, ByteIterator> map, final boolean isInsert) {
final Random random = ThreadLocalRandom.current();
int iterations = sparsity <= 0 ? 1 : random.nextInt((int) ((double) perKeyCardinality * sparsity));
if (iterations < 1) {
iterations = 1;
}
while (true) {
iterations--;
if (rollover) {
timestampGenerator.nextValue();
rollover = false;
}
String key = null;
if (iterations <= 0) {
final TreeMap<String, String> validationTags;
if (dataintegrity) {
validationTags = new TreeMap<String, String>();
} else {
validationTags = null;
}
key = keys[keyIdx];
int overallIdx = keyIdx * cumulativeCardinality[0];
for (int i = 0; i < tagPairs; ++i) {
int tvidx = tagValueIdxs[i];
map.put(tagKeys[i], new StringByteIterator(tagValues[tvidx]));
if (dataintegrity) {
validationTags.put(tagKeys[i], tagValues[tvidx]);
}
if (delayedSeries > 0) {
overallIdx += (tvidx * cumulativeCardinality[i + 1]);
}
}
if (!isInsert) {
final long delta = (timestampGenerator.currentValue() - startTimestamp) / timestampInterval;
final int intervals = random.nextInt((int) delta);
map.put(timestampKey, new NumericByteIterator(startTimestamp + (intervals * timestampInterval)));
} else if (delayedSeries > 0) {
// See if the series falls in a delay bucket and calculate an offset earlier
// than the current timestamp value if so.
double pct = (double) overallIdx / (double) totalCardinality;
if (pct < delayedSeries) {
int modulo = overallIdx % delayedIntervals;
if (modulo < 0) {
modulo *= -1;
}
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue() -
timestampInterval * modulo));
} else {
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue()));
}
} else {
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue()));
}
if (dataintegrity) {
map.put(valueKey, new NumericByteIterator(validationFunction(key,
timestampGenerator.currentValue(), validationTags)));
} else {
switch (valueType) {
case INTEGERS:
map.put(valueKey, new NumericByteIterator(random.nextInt()));
break;
case FLOATS:
map.put(valueKey, new NumericByteIterator(random.nextDouble() * (double) 100000));
break;
case MIXED:
if (random.nextBoolean()) {
map.put(valueKey, new NumericByteIterator(random.nextInt()));
} else {
map.put(valueKey, new NumericByteIterator(random.nextDouble() * (double) 100000));
}
break;
default:
throw new IllegalStateException("Somehow we didn't have a value "
+ "type configured that we support: " + valueType);
}
}
}
boolean tagRollover = false;
for (int i = tagCardinality.length - 1; i >= 0; --i) {
if (tagCardinality[i] <= 1) {
tagRollover = true; // Only one tag so needs roll over.
continue;
}
if (tagValueIdxs[i] + 1 >= tagCardinality[i]) {
tagValueIdxs[i] = 0;
if (i == firstIncrementableCardinality) {
tagRollover = true;
}
} else {
++tagValueIdxs[i];
break;
}
}
if (tagRollover) {
if (keyIdx + 1 >= keyIdxEnd) {
keyIdx = keyIdxStart;
rollover = true;
} else {
++keyIdx;
}
}
if (iterations <= 0) {
return key;
}
}
}
}
} | 55,502 | 42.05896 | 119 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/workloads/package-info.java | /*
* Copyright (c) 2015 - 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB workloads.
*/
package site.ycsb.workloads;
| 726 | 30.608696 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/workloads/RestWorkload.java | /**
* Copyright (c) 2016-2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.RandomByteIterator;
import site.ycsb.WorkloadException;
import site.ycsb.generator.*;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import site.ycsb.generator.UniformLongGenerator;
/**
* Typical RESTFul services benchmarking scenario. Represents a set of client
* calling REST operations like HTTP DELETE, GET, POST, PUT on a web service.
* This scenario is completely different from CoreWorkload which is mainly
* designed for databases benchmarking. However due to some reusable
* functionality this class extends {@link CoreWorkload} and overrides necessary
* methods like init, doTransaction etc.
*/
public class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
}
| 13,088 | 41.635179 | 118 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | /**
* Copyright (c) 2010 Yahoo! Inc., Copyright (c) 2016-2020 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import site.ycsb.*;
import site.ycsb.generator.*;
import site.ycsb.generator.UniformLongGenerator;
import site.ycsb.measurements.Measurements;
import java.io.IOException;
import java.util.*;
/**
* The core benchmark scenario. Represents a set of clients doing simple CRUD operations. The
* relative proportion of different kinds of operations, and other properties of the workload,
* are controlled by parameters specified at runtime.
* <p>
* Properties to control the client:
* <UL>
* <LI><b>fieldcount</b>: the number of fields in a record (default: 10)
* <LI><b>fieldlength</b>: the size of each field (default: 100)
* <LI><b>minfieldlength</b>: the minimum size of each field (default: 1)
* <LI><b>readallfields</b>: should reads read all fields (true) or just one (false) (default: true)
* <LI><b>writeallfields</b>: should updates and read/modify/writes update all fields (true) or just
* one (false) (default: false)
* <LI><b>readproportion</b>: what proportion of operations should be reads (default: 0.95)
* <LI><b>updateproportion</b>: what proportion of operations should be updates (default: 0.05)
* <LI><b>insertproportion</b>: what proportion of operations should be inserts (default: 0)
* <LI><b>scanproportion</b>: what proportion of operations should be scans (default: 0)
* <LI><b>readmodifywriteproportion</b>: what proportion of operations should be read a record,
* modify it, write it back (default: 0)
* <LI><b>requestdistribution</b>: what distribution should be used to select the records to operate
* on - uniform, zipfian, hotspot, sequential, exponential or latest (default: uniform)
* <LI><b>minscanlength</b>: for scans, what is the minimum number of records to scan (default: 1)
* <LI><b>maxscanlength</b>: for scans, what is the maximum number of records to scan (default: 1000)
* <LI><b>scanlengthdistribution</b>: for scans, what distribution should be used to choose the
* number of records to scan, for each scan, between 1 and maxscanlength (default: uniform)
* <LI><b>insertstart</b>: for parallel loads and runs, defines the starting record for this
* YCSB instance (default: 0)
* <LI><b>insertcount</b>: for parallel loads and runs, defines the number of records for this
* YCSB instance (default: recordcount)
* <LI><b>zeropadding</b>: for generating a record sequence compatible with string sort order by
* 0 padding the record number. Controls the number of 0s to use for padding. (default: 1)
* For example for row 5, with zeropadding=1 you get 'user5' key and with zeropading=8 you get
* 'user00000005' key. In order to see its impact, zeropadding needs to be bigger than number of
* digits in the record number.
* <LI><b>insertorder</b>: should records be inserted in order by key ("ordered"), or in hashed
* order ("hashed") (default: hashed)
* <LI><b>fieldnameprefix</b>: what should be a prefix for field names, the shorter may decrease the
* required storage size (default: "field")
* </ul>
*/
public class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
}
| 33,849 | 36.652948 | 111 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/workloads/ConstantOccupancyWorkload.java | /**
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import site.ycsb.Client;
import site.ycsb.WorkloadException;
import site.ycsb.generator.NumberGenerator;
import java.util.Properties;
/**
* A disk-fragmenting workload.
* <p>
* Properties to control the client:
* </p>
* <UL>
* <LI><b>disksize</b>: how many bytes of storage can the disk store? (default 100,000,000)
* <LI><b>occupancy</b>: what fraction of the available storage should be used? (default 0.9)
* <LI><b>requestdistribution</b>: what distribution should be used to select the records to operate on - uniform,
* zipfian or latest (default: histogram)
* </ul>
* <p>
* <p>
* <p> See also:
* Russell Sears, Catharine van Ingen.
* <a href='https://database.cs.wisc.edu/cidr/cidr2007/papers/cidr07p34.pdf'>Fragmentation in Large Object
* Repositories</a>,
* CIDR 2006. [<a href='https://database.cs.wisc.edu/cidr/cidr2007/slides/p34-sears.ppt'>Presentation</a>]
* </p>
*/
public class ConstantOccupancyWorkload extends CoreWorkload {
private long disksize;
private long storageages;
private double occupancy;
private long objectCount;
public static final String STORAGE_AGE_PROPERTY = "storageages";
public static final long STORAGE_AGE_PROPERTY_DEFAULT = 10;
public static final String DISK_SIZE_PROPERTY = "disksize";
public static final long DISK_SIZE_PROPERTY_DEFAULT = 100 * 1000 * 1000;
public static final String OCCUPANCY_PROPERTY = "occupancy";
public static final double OCCUPANCY_PROPERTY_DEFAULT = 0.9;
@Override
public void init(Properties p) throws WorkloadException {
disksize = Long.parseLong(p.getProperty(DISK_SIZE_PROPERTY, String.valueOf(DISK_SIZE_PROPERTY_DEFAULT)));
storageages = Long.parseLong(p.getProperty(STORAGE_AGE_PROPERTY, String.valueOf(STORAGE_AGE_PROPERTY_DEFAULT)));
occupancy = Double.parseDouble(p.getProperty(OCCUPANCY_PROPERTY, String.valueOf(OCCUPANCY_PROPERTY_DEFAULT)));
if (p.getProperty(Client.RECORD_COUNT_PROPERTY) != null ||
p.getProperty(Client.INSERT_COUNT_PROPERTY) != null ||
p.getProperty(Client.OPERATION_COUNT_PROPERTY) != null) {
System.err.println("Warning: record, insert or operation count was set prior to initting " +
"ConstantOccupancyWorkload. Overriding old values.");
}
NumberGenerator g = CoreWorkload.getFieldLengthGenerator(p);
double fieldsize = g.mean();
int fieldcount = Integer.parseInt(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
objectCount = (long) (occupancy * (disksize / (fieldsize * fieldcount)));
if (objectCount == 0) {
throw new IllegalStateException("Object count was zero. Perhaps disksize is too low?");
}
p.setProperty(Client.RECORD_COUNT_PROPERTY, String.valueOf(objectCount));
p.setProperty(Client.OPERATION_COUNT_PROPERTY, String.valueOf(storageages * objectCount));
p.setProperty(Client.INSERT_COUNT_PROPERTY, String.valueOf(objectCount));
super.init(p);
}
}
| 3,629 | 39.786517 | 116 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/SkewedLatestGenerator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
/**
* Generate a popularity distribution of items, skewed to favor recent items significantly more than older items.
*/
public class SkewedLatestGenerator extends NumberGenerator {
private CounterGenerator basis;
private final ZipfianGenerator zipfian;
public SkewedLatestGenerator(CounterGenerator basis) {
this.basis = basis;
zipfian = new ZipfianGenerator(this.basis.lastValue());
nextValue();
}
/**
* Generate the next string in the distribution, skewed Zipfian favoring the items most recently returned by
* the basis generator.
*/
@Override
public Long nextValue() {
long max = basis.lastValue();
long next = max - zipfian.nextLong(max);
setLastValue(next);
return next;
}
public static void main(String[] args) {
SkewedLatestGenerator gen = new SkewedLatestGenerator(new CounterGenerator(1000));
for (int i = 0; i < Integer.parseInt(args[0]); i++) {
System.out.println(gen.nextString());
}
}
@Override
public double mean() {
throw new UnsupportedOperationException("Can't compute mean of non-stationary distribution!");
}
}
| 1,846 | 31.403509 | 113 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/package-info.java | /*
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB generator package.
*/
package site.ycsb.generator;
| 727 | 30.652174 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/ExponentialGenerator.java | /**
* Copyright (c) 2011-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.ThreadLocalRandom;
/**
* A generator of an exponential distribution. It produces a sequence
* of time intervals according to an exponential
* distribution. Smaller intervals are more frequent than larger
* ones, and there is no bound on the length of an interval. When you
* construct an instance of this class, you specify a parameter gamma,
* which corresponds to the rate at which events occur.
* Alternatively, 1/gamma is the average length of an interval.
*/
public class ExponentialGenerator extends NumberGenerator {
// What percentage of the readings should be within the most recent exponential.frac portion of the dataset?
public static final String EXPONENTIAL_PERCENTILE_PROPERTY = "exponential.percentile";
public static final String EXPONENTIAL_PERCENTILE_DEFAULT = "95";
// What fraction of the dataset should be accessed exponential.percentile of the time?
public static final String EXPONENTIAL_FRAC_PROPERTY = "exponential.frac";
public static final String EXPONENTIAL_FRAC_DEFAULT = "0.8571428571"; // 1/7
/**
* The exponential constant to use.
*/
private double gamma;
/******************************* Constructors **************************************/
/**
* Create an exponential generator with a mean arrival rate of
* gamma. (And half life of 1/gamma).
*/
public ExponentialGenerator(double mean) {
gamma = 1.0 / mean;
}
public ExponentialGenerator(double percentile, double range) {
gamma = -Math.log(1.0 - percentile / 100.0) / range; //1.0/mean;
}
/****************************************************************************************/
/**
* Generate the next item as a long. This distribution will be skewed toward lower values; e.g. 0 will
* be the most popular, 1 the next most popular, etc.
* @return The next item in the sequence.
*/
@Override
public Double nextValue() {
return -Math.log(ThreadLocalRandom.current().nextDouble()) / gamma;
}
@Override
public double mean() {
return 1.0 / gamma;
}
public static void main(String[] args) {
ExponentialGenerator e = new ExponentialGenerator(90, 100);
int j = 0;
for (int i = 0; i < 1000; i++) {
if (e.nextValue() < 100) {
j++;
}
}
System.out.println("Got " + j + " hits. Expect 900");
}
}
| 3,080 | 34.011364 | 110 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/AcknowledgedCounterGenerator.java | /**
* Copyright (c) 2015-2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.locks.ReentrantLock;
/**
* A CounterGenerator that reports generated integers via lastInt()
* only after they have been acknowledged.
*/
public class AcknowledgedCounterGenerator extends CounterGenerator {
/** The size of the window of pending id ack's. 2^20 = {@value} */
static final int WINDOW_SIZE = Integer.rotateLeft(1, 20);
/** The mask to use to turn an id into a slot in {@link #window}. */
private static final int WINDOW_MASK = WINDOW_SIZE - 1;
private final ReentrantLock lock;
private final boolean[] window;
private volatile long limit;
/**
* Create a counter that starts at countstart.
*/
public AcknowledgedCounterGenerator(long countstart) {
super(countstart);
lock = new ReentrantLock();
window = new boolean[WINDOW_SIZE];
limit = countstart - 1;
}
/**
* In this generator, the highest acknowledged counter value
* (as opposed to the highest generated counter value).
*/
@Override
public Long lastValue() {
return limit;
}
/**
* Make a generated counter value available via lastInt().
*/
public void acknowledge(long value) {
final int currentSlot = (int)(value & WINDOW_MASK);
if (window[currentSlot]) {
throw new RuntimeException("Too many unacknowledged insertion keys.");
}
window[currentSlot] = true;
if (lock.tryLock()) {
// move a contiguous sequence from the window
// over to the "limit" variable
try {
// Only loop through the entire window at most once.
long beforeFirstSlot = (limit & WINDOW_MASK);
long index;
for (index = limit + 1; index != beforeFirstSlot; ++index) {
int slot = (int)(index & WINDOW_MASK);
if (!window[slot]) {
break;
}
window[slot] = false;
}
limit = index - 1;
} finally {
lock.unlock();
}
}
}
}
| 2,638 | 28.651685 | 76 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/ScrambledZipfianGenerator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import site.ycsb.Utils;
/**
* A generator of a zipfian distribution. It produces a sequence of items, such that some items are more popular than
* others, according to a zipfian distribution. When you construct an instance of this class, you specify the number
* of items in the set to draw from, either by specifying an itemcount (so that the sequence is of items from 0 to
* itemcount-1) or by specifying a min and a max (so that the sequence is of items from min to max inclusive). After
* you construct the instance, you can change the number of items by calling nextInt(itemcount) or nextLong(itemcount).
* <p>
* Unlike @ZipfianGenerator, this class scatters the "popular" items across the itemspace. Use this, instead of
* @ZipfianGenerator, if you don't want the head of the distribution (the popular items) clustered together.
*/
public class ScrambledZipfianGenerator extends NumberGenerator {
public static final double ZETAN = 26.46902820178302;
public static final double USED_ZIPFIAN_CONSTANT = 0.99;
public static final long ITEM_COUNT = 10000000000L;
private ZipfianGenerator gen;
private final long min, max, itemcount;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
*
* @param items The number of items in the distribution.
*/
public ScrambledZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ScrambledZipfianGenerator(long min, long max) {
this(min, max, ZipfianGenerator.ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param _items The number of items in the distribution.
* @param _zipfianconstant The zipfian constant to use.
*/
/*
// not supported, as the value of zeta depends on the zipfian constant, and we have only precomputed zeta for one
zipfian constant
public ScrambledZipfianGenerator(long _items, double _zipfianconstant)
{
this(0,_items-1,_zipfianconstant);
}
*/
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant. If you
* use a zipfian constant other than 0.99, this will take a long time to complete because we need to recompute zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ScrambledZipfianGenerator(long min, long max, double zipfianconstant) {
this.min = min;
this.max = max;
itemcount = this.max - this.min + 1;
if (zipfianconstant == USED_ZIPFIAN_CONSTANT) {
gen = new ZipfianGenerator(0, ITEM_COUNT, zipfianconstant, ZETAN);
} else {
gen = new ZipfianGenerator(0, ITEM_COUNT, zipfianconstant);
}
}
/**************************************************************************************************/
/**
* Return the next long in the sequence.
*/
@Override
public Long nextValue() {
long ret = gen.nextValue();
ret = min + Utils.fnvhash64(ret) % itemcount;
setLastValue(ret);
return ret;
}
public static void main(String[] args) {
double newzetan = ZipfianGenerator.zetastatic(ITEM_COUNT, ZipfianGenerator.ZIPFIAN_CONSTANT);
System.out.println("zetan: " + newzetan);
System.exit(0);
ScrambledZipfianGenerator gen = new ScrambledZipfianGenerator(10000);
for (int i = 0; i < 1000000; i++) {
System.out.println("" + gen.nextValue());
}
}
/**
* since the values are scrambled (hopefully uniformly), the mean is simply the middle of the range.
*/
@Override
public double mean() {
return ((min) + max) / 2.0;
}
}
| 4,758 | 36.179688 | 119 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/HistogramGenerator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.ThreadLocalRandom;
/**
* Generate integers according to a histogram distribution. The histogram
* buckets are of width one, but the values are multiplied by a block size.
* Therefore, instead of drawing sizes uniformly at random within each
* bucket, we always draw the largest value in the current bucket, so the value
* drawn is always a multiple of blockSize.
*
* The minimum value this distribution returns is blockSize (not zero).
*
*/
public class HistogramGenerator extends NumberGenerator {
private final long blockSize;
private final long[] buckets;
private long area;
private long weightedArea = 0;
private double meanSize = 0;
public HistogramGenerator(String histogramfile) throws IOException {
try (BufferedReader in = new BufferedReader(new FileReader(histogramfile))) {
String str;
String[] line;
ArrayList<Integer> a = new ArrayList<>();
str = in.readLine();
if (str == null) {
throw new IOException("Empty input file!\n");
}
line = str.split("\t");
if (line[0].compareTo("BlockSize") != 0) {
throw new IOException("First line of histogram is not the BlockSize!\n");
}
blockSize = Integer.parseInt(line[1]);
while ((str = in.readLine()) != null) {
// [0] is the bucket, [1] is the value
line = str.split("\t");
a.add(Integer.parseInt(line[0]), Integer.parseInt(line[1]));
}
buckets = new long[a.size()];
for (int i = 0; i < a.size(); i++) {
buckets[i] = a.get(i);
}
}
init();
}
public HistogramGenerator(long[] buckets, int blockSize) {
this.blockSize = blockSize;
this.buckets = buckets;
init();
}
private void init() {
for (int i = 0; i < buckets.length; i++) {
area += buckets[i];
weightedArea += i * buckets[i];
}
// calculate average file size
meanSize = ((double) blockSize) * ((double) weightedArea) / (area);
}
@Override
public Long nextValue() {
int number = ThreadLocalRandom.current().nextInt((int) area);
int i;
for (i = 0; i < (buckets.length - 1); i++) {
number -= buckets[i];
if (number <= 0) {
return (i + 1) * blockSize;
}
}
return i * blockSize;
}
@Override
public double mean() {
return meanSize;
}
}
| 3,198 | 28.348624 | 84 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/HotspotIntegerGenerator.java | /**
* Copyright (c) 2010 Yahoo! Inc. Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
/**
* Generate integers resembling a hotspot distribution where x% of operations
* access y% of data items. The parameters specify the bounds for the numbers,
* the percentage of the of the interval which comprises the hot set and
* the percentage of operations that access the hot set. Numbers of the hot set are
* always smaller than any number in the cold set. Elements from the hot set and
* the cold set are chose using a uniform distribution.
*
*/
public class HotspotIntegerGenerator extends NumberGenerator {
private final long lowerBound;
private final long upperBound;
private final long hotInterval;
private final long coldInterval;
private final double hotsetFraction;
private final double hotOpnFraction;
/**
* Create a generator for Hotspot distributions.
*
* @param lowerBound lower bound of the distribution.
* @param upperBound upper bound of the distribution.
* @param hotsetFraction percentage of data item
* @param hotOpnFraction percentage of operations accessing the hot set.
*/
public HotspotIntegerGenerator(long lowerBound, long upperBound,
double hotsetFraction, double hotOpnFraction) {
if (hotsetFraction < 0.0 || hotsetFraction > 1.0) {
System.err.println("Hotset fraction out of range. Setting to 0.0");
hotsetFraction = 0.0;
}
if (hotOpnFraction < 0.0 || hotOpnFraction > 1.0) {
System.err.println("Hot operation fraction out of range. Setting to 0.0");
hotOpnFraction = 0.0;
}
if (lowerBound > upperBound) {
System.err.println("Upper bound of Hotspot generator smaller than the lower bound. " +
"Swapping the values.");
long temp = lowerBound;
lowerBound = upperBound;
upperBound = temp;
}
this.lowerBound = lowerBound;
this.upperBound = upperBound;
this.hotsetFraction = hotsetFraction;
long interval = upperBound - lowerBound + 1;
this.hotInterval = (int) (interval * hotsetFraction);
this.coldInterval = interval - hotInterval;
this.hotOpnFraction = hotOpnFraction;
}
@Override
public Long nextValue() {
long value = 0;
Random random = ThreadLocalRandom.current();
if (random.nextDouble() < hotOpnFraction) {
// Choose a value from the hot set.
value = lowerBound + Math.abs(random.nextLong()) % hotInterval;
} else {
// Choose a value from the cold set.
value = lowerBound + hotInterval + Math.abs(random.nextLong()) % coldInterval;
}
setLastValue(value);
return value;
}
/**
* @return the lowerBound
*/
public long getLowerBound() {
return lowerBound;
}
/**
* @return the upperBound
*/
public long getUpperBound() {
return upperBound;
}
/**
* @return the hotsetFraction
*/
public double getHotsetFraction() {
return hotsetFraction;
}
/**
* @return the hotOpnFraction
*/
public double getHotOpnFraction() {
return hotOpnFraction;
}
@Override
public double mean() {
return hotOpnFraction * (lowerBound + hotInterval / 2.0)
+ (1 - hotOpnFraction) * (lowerBound + hotInterval + coldInterval / 2.0);
}
}
| 3,978 | 31.349593 | 92 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/RandomDiscreteTimestampGenerator.java | /**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.TimeUnit;
import site.ycsb.Utils;
/**
* A generator that picks from a discrete set of offsets from a base Unix Epoch
* timestamp that returns timestamps in a random order with the guarantee that
* each timestamp is only returned once.
* <p>
* TODO - It would be best to implement some kind of psuedo non-repeating random
* generator for this as it's likely OK that some small percentage of values are
* repeated. For now we just generate all of the offsets in an array, shuffle
* it and then iterate over the array.
* <p>
* Note that {@link #MAX_INTERVALS} defines a hard limit on the size of the
* offset array so that we don't completely blow out the heap.
* <p>
* The constructor parameter {@code intervals} determines how many values will be
* returned by the generator. For example, if the {@code interval} is 60 and the
* {@code timeUnits} are set to {@link TimeUnit#SECONDS} and {@code intervals}
* is set to 60, then the consumer can call {@link #nextValue()} 60 times for
* timestamps within an hour.
*/
public class RandomDiscreteTimestampGenerator extends UnixEpochTimestampGenerator {
/** A hard limit on the size of the offsets array to a void using too much heap. */
public static final int MAX_INTERVALS = 16777216;
/** The total number of intervals for this generator. */
private final int intervals;
// can't be primitives due to the generic params on the sort function :(
/** The array of generated offsets from the base time. */
private final Integer[] offsets;
/** The current index into the offsets array. */
private int offsetIndex;
/**
* Ctor that uses the current system time as current.
* @param interval The interval between timestamps.
* @param timeUnits The time units of the returned Unix Epoch timestamp (as well
* as the units for the interval).
* @param intervals The total number of intervals for the generator.
* @throws IllegalArgumentException if the intervals is larger than {@link #MAX_INTERVALS}
*/
public RandomDiscreteTimestampGenerator(final long interval, final TimeUnit timeUnits,
final int intervals) {
super(interval, timeUnits);
this.intervals = intervals;
offsets = new Integer[intervals];
setup();
}
/**
* Ctor for supplying a starting timestamp.
* The interval between timestamps.
* @param timeUnits The time units of the returned Unix Epoch timestamp (as well
* as the units for the interval).
* @param startTimestamp The start timestamp to use.
* NOTE that this must match the time units used for the interval.
* If the units are in nanoseconds, provide a nanosecond timestamp {@code System.nanoTime()}
* or in microseconds, {@code System.nanoTime() / 1000}
* or in millis, {@code System.currentTimeMillis()}
* @param intervals The total number of intervals for the generator.
* @throws IllegalArgumentException if the intervals is larger than {@link #MAX_INTERVALS}
*/
public RandomDiscreteTimestampGenerator(final long interval, final TimeUnit timeUnits,
final long startTimestamp, final int intervals) {
super(interval, timeUnits, startTimestamp);
this.intervals = intervals;
offsets = new Integer[intervals];
setup();
}
/**
* Generates the offsets and shuffles the array.
*/
private void setup() {
if (intervals > MAX_INTERVALS) {
throw new IllegalArgumentException("Too many intervals for the in-memory "
+ "array. The limit is " + MAX_INTERVALS + ".");
}
offsetIndex = 0;
for (int i = 0; i < intervals; i++) {
offsets[i] = i;
}
Utils.shuffleArray(offsets);
}
@Override
public Long nextValue() {
if (offsetIndex >= offsets.length) {
throw new IllegalStateException("Reached the end of the random timestamp "
+ "intervals: " + offsetIndex);
}
lastTimestamp = currentTimestamp;
currentTimestamp = startTimestamp + (offsets[offsetIndex++] * getOffset(1));
return currentTimestamp;
}
} | 4,816 | 39.478992 | 94 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/FileGenerator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
/**
* A generator, whose sequence is the lines of a file.
*/
public class FileGenerator extends Generator<String> {
private final String filename;
private String current;
private BufferedReader reader;
/**
* Create a FileGenerator with the given file.
* @param filename The file to read lines from.
*/
public FileGenerator(String filename) {
this.filename = filename;
reloadFile();
}
/**
* Return the next string of the sequence, ie the next line of the file.
*/
@Override
public synchronized String nextValue() {
try {
current = reader.readLine();
return current;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Return the previous read line.
*/
@Override
public String lastValue() {
return current;
}
/**
* Reopen the file to reuse values.
*/
public synchronized void reloadFile() {
try (Reader r = reader) {
System.err.println("Reload " + filename);
reader = new BufferedReader(new FileReader(filename));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 1,963 | 25.186667 | 84 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/UniformGenerator.java | /**
* Copyright (c) 2010 Yahoo! Inc. Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* An expression that generates a random value in the specified range.
*/
public class UniformGenerator extends Generator<String> {
private final List<String> values;
private String laststring;
private final UniformLongGenerator gen;
/**
* Creates a generator that will return strings from the specified set uniformly randomly.
*/
public UniformGenerator(Collection<String> values) {
this.values = new ArrayList<>(values);
laststring = null;
gen = new UniformLongGenerator(0, values.size() - 1);
}
/**
* Generate the next string in the distribution.
*/
@Override
public String nextValue() {
laststring = values.get(gen.nextValue().intValue());
return laststring;
}
/**
* Return the previous string generated by the distribution; e.g., returned from the last nextString() call.
* Calling lastString() should not advance the distribution or have any side effects. If nextString() has not yet
* been called, lastString() should return something reasonable.
*/
@Override
public String lastValue() {
if (laststring == null) {
nextValue();
}
return laststring;
}
}
| 1,971 | 29.8125 | 115 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/NumberGenerator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
/**
* A generator that is capable of generating numeric values.
*
*/
public abstract class NumberGenerator extends Generator<Number> {
private Number lastVal;
/**
* Set the last value generated. NumberGenerator subclasses must use this call
* to properly set the last value, or the {@link #lastValue()} calls won't work.
*/
protected void setLastValue(Number last) {
lastVal = last;
}
@Override
public Number lastValue() {
return lastVal;
}
/**
* Return the expected value (mean) of the values this generator will return.
*/
public abstract double mean();
}
| 1,330 | 27.934783 | 84 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/UniformLongGenerator.java | /**
* Copyright (c) 2010 Yahoo! Inc. Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.ThreadLocalRandom;
/**
* Generates longs randomly uniform from an interval.
*/
public class UniformLongGenerator extends NumberGenerator {
private final long lb, ub, interval;
/**
* Creates a generator that will return longs uniformly randomly from the
* interval [lb,ub] inclusive (that is, lb and ub are possible values)
* (lb and ub are possible values).
*
* @param lb the lower bound (inclusive) of generated values
* @param ub the upper bound (inclusive) of generated values
*/
public UniformLongGenerator(long lb, long ub) {
this.lb = lb;
this.ub = ub;
interval = this.ub - this.lb + 1;
}
@Override
public Long nextValue() {
long ret = Math.abs(ThreadLocalRandom.current().nextLong()) % interval + lb;
setLastValue(ret);
return ret;
}
@Override
public double mean() {
return ((lb + (long) ub)) / 2.0;
}
}
| 1,649 | 29 | 92 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/CounterGenerator.java | /**
* Copyright (c) 2010 Yahoo! Inc., Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.atomic.AtomicLong;
/**
* Generates a sequence of integers.
* (0, 1, ...)
*/
public class CounterGenerator extends NumberGenerator {
private final AtomicLong counter;
/**
* Create a counter that starts at countstart.
*/
public CounterGenerator(long countstart) {
counter=new AtomicLong(countstart);
}
@Override
public Long nextValue() {
return counter.getAndIncrement();
}
@Override
public Long lastValue() {
return counter.get() - 1;
}
@Override
public double mean() {
throw new UnsupportedOperationException("Can't compute mean of non-stationary distribution!");
}
}
| 1,385 | 26.176471 | 98 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/ConstantIntegerGenerator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
/**
* A trivial integer generator that always returns the same value.
*
*/
public class ConstantIntegerGenerator extends NumberGenerator {
private final int i;
/**
* @param i The integer that this generator will always return.
*/
public ConstantIntegerGenerator(int i) {
this.i = i;
}
@Override
public Integer nextValue() {
return i;
}
@Override
public double mean() {
return i;
}
}
| 1,151 | 25.181818 | 84 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/UnixEpochTimestampGenerator.java | /**
* Copyright (c) 2016-2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.TimeUnit;
/**
* A generator that produces Unix epoch timestamps in seconds, milli, micro or
* nanoseconds and increments the stamp a given interval each time
* {@link #nextValue()} is called. The result is emitted as a long in the same
* way calls to {@code System.currentTimeMillis()} and
* {@code System.nanoTime()} behave.
* <p>
* By default, the current system time of the host is used as the starting
* timestamp. Calling {@link #initalizeTimestamp(long)} can adjust the timestamp
* back or forward in time. For example, if a workload will generate an hour of
* data at 1 minute intervals, then to set the start timestamp an hour in the past
* from the current run, use:
* <pre>{@code
* UnixEpochTimestampGenerator generator = new UnixEpochTimestampGenerator();
* generator.initalizeTimestamp(-60);
* }</pre>
* A constructor is also present for setting an explicit start time.
* Negative intervals are supported as well for iterating back in time.
* <p>
* WARNING: This generator is not thread safe and should not called from multiple
* threads.
*/
public class UnixEpochTimestampGenerator extends Generator<Long> {
/** The base timestamp used as a starting reference. */
protected long startTimestamp;
/** The current timestamp that will be incremented. */
protected long currentTimestamp;
/** The last used timestamp. Should always be one interval behind current. */
protected long lastTimestamp;
/** The interval to increment by. Multiplied by {@link #timeUnits}. */
protected long interval;
/** The units of time the interval represents. */
protected TimeUnit timeUnits;
/**
* Default ctor with the current system time and a 60 second interval.
*/
public UnixEpochTimestampGenerator() {
this(60, TimeUnit.SECONDS);
}
/**
* Ctor that uses the current system time as current.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
initalizeTimestamp(-1);
currentTimestamp -= getOffset(1);
lastTimestamp = currentTimestamp;
}
/**
* Ctor for supplying a starting timestamp.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
* @param startTimestamp The start timestamp to use.
* NOTE that this must match the time units used for the interval.
* If the units are in nanoseconds, provide a nanosecond timestamp {@code System.nanoTime()}
* or in microseconds, {@code System.nanoTime() / 1000}
* or in millis, {@code System.currentTimeMillis()}
* or seconds and any interval above, {@code System.currentTimeMillis() / 1000}
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits,
final long startTimestamp) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
currentTimestamp = startTimestamp - getOffset(1);
this.startTimestamp = currentTimestamp;
lastTimestamp = currentTimestamp - getOffset(1);
}
/**
* Sets the starting timestamp to the current system time plus the interval offset.
* E.g. to set the time an hour in the past, supply a value of {@code -60}.
* @param intervalOffset The interval to increment or decrement by.
*/
public void initalizeTimestamp(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
currentTimestamp = System.nanoTime() + getOffset(intervalOffset);
break;
case MICROSECONDS:
currentTimestamp = (System.nanoTime() / 1000) + getOffset(intervalOffset);
break;
case MILLISECONDS:
currentTimestamp = System.currentTimeMillis() + getOffset(intervalOffset);
break;
case SECONDS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case MINUTES:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case HOURS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case DAYS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
startTimestamp = currentTimestamp;
}
@Override
public Long nextValue() {
lastTimestamp = currentTimestamp;
currentTimestamp += getOffset(1);
return currentTimestamp;
}
/**
* Returns the proper increment offset to use given the interval and timeunits.
* @param intervalOffset The amount of offset to multiply by.
* @return An offset value to adjust the timestamp by.
*/
public long getOffset(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
case MICROSECONDS:
case MILLISECONDS:
case SECONDS:
return intervalOffset * interval;
case MINUTES:
return intervalOffset * interval * (long) 60;
case HOURS:
return intervalOffset * interval * (long) (60 * 60);
case DAYS:
return intervalOffset * interval * (long) (60 * 60 * 24);
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
}
@Override
public Long lastValue() {
return lastTimestamp;
}
/** @return The current timestamp as set by the last call to {@link #nextValue()} */
public long currentValue() {
return currentTimestamp;
}
} | 6,651 | 35.349727 | 94 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/IncrementingPrintableStringGenerator.java | /**
* Copyright (c) 2016-2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.*;
/**
* A generator that produces strings of {@link #length} using a set of code points
* from {@link #characterSet}. Each time {@link #nextValue()} is executed, the string
* is incremented by one character. Eventually the string may rollover to the beginning
* and the user may choose to have the generator throw a NoSuchElementException at that
* point or continue incrementing. (By default the generator will continue incrementing).
* <p>
* For example, if we set a length of 2 characters and the character set includes
* [A, B] then the generator output will be:
* <ul>
* <li>AA</li>
* <li>AB</li>
* <li>BA</li>
* <li>BB</li>
* <li>AA <-- rolled over</li>
* </ul>
* <p>
* This class includes some default character sets to choose from including ASCII
* and plane 0 UTF.
*/
public class IncrementingPrintableStringGenerator extends Generator<String> {
/** Default string length for the generator. */
public static final int DEFAULTSTRINGLENGTH = 8;
/**
* Set of all character types that include every symbol other than non-printable
* control characters.
*/
public static final Set<Integer> CHAR_TYPES_ALL_BUT_CONTROL;
static {
CHAR_TYPES_ALL_BUT_CONTROL = new HashSet<Integer>(24);
// numbers
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.DECIMAL_DIGIT_NUMBER);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.LETTER_NUMBER);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.OTHER_NUMBER);
// letters
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.UPPERCASE_LETTER);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.LOWERCASE_LETTER);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.TITLECASE_LETTER);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.OTHER_LETTER);
// marks
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.COMBINING_SPACING_MARK);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.NON_SPACING_MARK);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.ENCLOSING_MARK);
// punctuation
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.CONNECTOR_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.DASH_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.START_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.END_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.INITIAL_QUOTE_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.FINAL_QUOTE_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.OTHER_PUNCTUATION);
// symbols
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.MATH_SYMBOL);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.CURRENCY_SYMBOL);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.MODIFIER_SYMBOL);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.OTHER_SYMBOL);
// separators
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.SPACE_SEPARATOR);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.LINE_SEPARATOR);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.PARAGRAPH_SEPARATOR);
}
/**
* Set of character types including only decimals, upper and lower case letters.
*/
public static final Set<Integer> CHAR_TYPES_BASIC_ALPHA;
static {
CHAR_TYPES_BASIC_ALPHA = new HashSet<Integer>(2);
CHAR_TYPES_BASIC_ALPHA.add((int) Character.UPPERCASE_LETTER);
CHAR_TYPES_BASIC_ALPHA.add((int) Character.LOWERCASE_LETTER);
}
/**
* Set of character types including only decimals, upper and lower case letters.
*/
public static final Set<Integer> CHAR_TYPES_BASIC_ALPHANUMERICS;
static {
CHAR_TYPES_BASIC_ALPHANUMERICS = new HashSet<Integer>(3);
CHAR_TYPES_BASIC_ALPHANUMERICS.add((int) Character.DECIMAL_DIGIT_NUMBER);
CHAR_TYPES_BASIC_ALPHANUMERICS.add((int) Character.UPPERCASE_LETTER);
CHAR_TYPES_BASIC_ALPHANUMERICS.add((int) Character.LOWERCASE_LETTER);
}
/**
* Set of character types including only decimals, letter numbers,
* other numbers, upper, lower, title case as well as letter modifiers
* and other letters.
*/
public static final Set<Integer> CHAR_TYPE_EXTENDED_ALPHANUMERICS;
static {
CHAR_TYPE_EXTENDED_ALPHANUMERICS = new HashSet<Integer>(8);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.DECIMAL_DIGIT_NUMBER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.LETTER_NUMBER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.OTHER_NUMBER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.UPPERCASE_LETTER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.LOWERCASE_LETTER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.TITLECASE_LETTER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.MODIFIER_LETTER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.OTHER_LETTER);
}
/** The character set to iterate over. */
private final int[] characterSet;
/** An array indices matching a position in the output string. */
private int[] indices;
/** The length of the output string in characters. */
private final int length;
/** The last value returned by the generator. Should be null if {@link #nextValue()}
* has not been called.*/
private String lastValue;
/** Whether or not to throw an exception when the string rolls over. */
private boolean throwExceptionOnRollover;
/** Whether or not the generator has rolled over. */
private boolean hasRolledOver;
/**
* Generates strings of 8 characters using only the upper and lower case alphabetical
* characters from the ASCII set.
*/
public IncrementingPrintableStringGenerator() {
this(DEFAULTSTRINGLENGTH, printableBasicAlphaASCIISet());
}
/**
* Generates strings of {@link #length} characters using only the upper and lower
* case alphabetical characters from the ASCII set.
* @param length The length of string to return from the generator.
* @throws IllegalArgumentException if the length is less than one.
*/
public IncrementingPrintableStringGenerator(final int length) {
this(length, printableBasicAlphaASCIISet());
}
/**
* Generates strings of {@link #length} characters using the code points in
* {@link #characterSet}.
* @param length The length of string to return from the generator.
* @param characterSet A set of code points to choose from. Code points in the
* set can be in any order, not necessarily lexical.
* @throws IllegalArgumentException if the length is less than one or the character
* set has fewer than one code points.
*/
public IncrementingPrintableStringGenerator(final int length, final int[] characterSet) {
if (length < 1) {
throw new IllegalArgumentException("Length must be greater than or equal to 1");
}
if (characterSet == null || characterSet.length < 1) {
throw new IllegalArgumentException("Character set must have at least one character");
}
this.length = length;
this.characterSet = characterSet;
indices = new int[length];
}
@Override
public String nextValue() {
if (hasRolledOver && throwExceptionOnRollover) {
throw new NoSuchElementException("The generator has rolled over to the beginning");
}
final StringBuilder buffer = new StringBuilder(length);
for (int i = 0; i < length; i++) {
buffer.append(Character.toChars(characterSet[indices[i]]));
}
// increment the indices;
for (int i = length - 1; i >= 0; --i) {
if (indices[i] >= characterSet.length - 1) {
indices[i] = 0;
if (i == 0 || characterSet.length == 1 && lastValue != null) {
hasRolledOver = true;
}
} else {
++indices[i];
break;
}
}
lastValue = buffer.toString();
return lastValue;
}
@Override
public String lastValue() {
return lastValue;
}
/** @param exceptionOnRollover Whether or not to throw an exception on rollover. */
public void setThrowExceptionOnRollover(final boolean exceptionOnRollover) {
this.throwExceptionOnRollover = exceptionOnRollover;
}
/** @return Whether or not to throw an exception on rollover. */
public boolean getThrowExceptionOnRollover() {
return throwExceptionOnRollover;
}
/**
* Returns an array of printable code points with only the upper and lower
* case alphabetical characters from the basic ASCII set.
* @return An array of code points
*/
public static int[] printableBasicAlphaASCIISet() {
final List<Integer> validCharacters =
generatePrintableCharacterSet(0, 127, null, false, CHAR_TYPES_BASIC_ALPHA);
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Returns an array of printable code points with the upper and lower case
* alphabetical characters as well as the numeric values from the basic
* ASCII set.
* @return An array of code points
*/
public static int[] printableBasicAlphaNumericASCIISet() {
final List<Integer> validCharacters =
generatePrintableCharacterSet(0, 127, null, false, CHAR_TYPES_BASIC_ALPHANUMERICS);
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Returns an array of printable code points with the entire basic ASCII table,
* including spaces. Excludes new lines.
* @return An array of code points
*/
public static int[] fullPrintableBasicASCIISet() {
final List<Integer> validCharacters =
generatePrintableCharacterSet(32, 127, null, false, null);
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Returns an array of printable code points with the entire basic ASCII table,
* including spaces and new lines.
* @return An array of code points
*/
public static int[] fullPrintableBasicASCIISetWithNewlines() {
final List<Integer> validCharacters = new ArrayList<Integer>();
validCharacters.add(10); // newline
validCharacters.addAll(generatePrintableCharacterSet(32, 127, null, false, null));
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Returns an array of printable code points the first plane of Unicode characters
* including only the alpha-numeric values.
* @return An array of code points
*/
public static int[] printableAlphaNumericPlaneZeroSet() {
final List<Integer> validCharacters =
generatePrintableCharacterSet(0, 65535, null, false, CHAR_TYPES_BASIC_ALPHANUMERICS);
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Returns an array of printable code points the first plane of Unicode characters
* including all printable characters.
* @return An array of code points
*/
public static int[] fullPrintablePlaneZeroSet() {
final List<Integer> validCharacters =
generatePrintableCharacterSet(0, 65535, null, false, CHAR_TYPES_ALL_BUT_CONTROL);
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Generates a list of code points based on a range and filters.
* These can be used for generating strings with various ASCII and/or
* Unicode printable character sets for use with DBs that may have
* character limitations.
* <p>
* Note that control, surrogate, format, private use and unassigned
* code points are skipped.
* @param startCodePoint The starting code point, inclusive.
* @param lastCodePoint The final code point, inclusive.
* @param characterTypesFilter An optional set of allowable character
* types. See {@link Character} for types.
* @param isFilterAllowableList Determines whether the {@code allowableTypes}
* set is inclusive or exclusive. When true, only those code points that
* appear in the list will be included in the resulting set. Otherwise
* matching code points are excluded.
* @param allowableTypes An optional list of code points for inclusion or
* exclusion.
* @return A list of code points matching the given range and filters. The
* list may be empty but is guaranteed not to be null.
*/
public static List<Integer> generatePrintableCharacterSet(
final int startCodePoint,
final int lastCodePoint,
final Set<Integer> characterTypesFilter,
final boolean isFilterAllowableList,
final Set<Integer> allowableTypes) {
// since we don't know the final size of the allowable character list we
// start with a list then we'll flatten it to an array.
final List<Integer> validCharacters = new ArrayList<Integer>(lastCodePoint);
for (int codePoint = startCodePoint; codePoint <= lastCodePoint; ++codePoint) {
if (allowableTypes != null &&
!allowableTypes.contains(Character.getType(codePoint))) {
continue;
} else {
// skip control points, formats, surrogates, etc
final int type = Character.getType(codePoint);
if (type == Character.CONTROL ||
type == Character.SURROGATE ||
type == Character.FORMAT ||
type == Character.PRIVATE_USE ||
type == Character.UNASSIGNED) {
continue;
}
}
if (characterTypesFilter != null) {
// if the filter is enabled then we need to make sure the code point
// is in the allowable list if it's a whitelist or that the code point
// is NOT in the list if it's a blacklist.
if ((isFilterAllowableList && !characterTypesFilter.contains(codePoint)) ||
(characterTypesFilter.contains(codePoint))) {
continue;
}
}
validCharacters.add(codePoint);
}
return validCharacters;
}
}
| 15,068 | 37.638462 | 93 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/Generator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
/**
* An expression that generates a sequence of values, following some distribution (Uniform, Zipfian, Sequential, etc.).
*/
public abstract class Generator<V> {
/**
* Generate the next value in the distribution.
*/
public abstract V nextValue();
/**
* Return the previous value generated by the distribution; e.g., returned from the last {@link Generator#nextValue()}
* call.
* Calling {@link #lastValue()} should not advance the distribution or have any side effects. If {@link #nextValue()}
* has not yet been called, {@link #lastValue()} should return something reasonable.
*/
public abstract V lastValue();
public final String nextString() {
V ret = nextValue();
return ret == null ? null : ret.toString();
}
public final String lastString() {
V ret = lastValue();
return ret == null ? null : ret.toString();
}
}
| 1,603 | 32.416667 | 120 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/DiscreteGenerator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.ArrayList;
import java.util.Collection;
import java.util.concurrent.ThreadLocalRandom;
import static java.util.Objects.requireNonNull;
/**
* Generates a distribution by choosing from a discrete set of values.
*/
public class DiscreteGenerator extends Generator<String> {
private static class Pair {
private double weight;
private String value;
Pair(double weight, String value) {
this.weight = weight;
this.value = requireNonNull(value);
}
}
private final Collection<Pair> values = new ArrayList<>();
private String lastvalue;
public DiscreteGenerator() {
lastvalue = null;
}
/**
* Generate the next string in the distribution.
*/
@Override
public String nextValue() {
double sum = 0;
for (Pair p : values) {
sum += p.weight;
}
double val = ThreadLocalRandom.current().nextDouble();
for (Pair p : values) {
double pw = p.weight / sum;
if (val < pw) {
return p.value;
}
val -= pw;
}
throw new AssertionError("oops. should not get here.");
}
/**
* Return the previous string generated by the distribution; e.g., returned from the last nextString() call.
* Calling lastString() should not advance the distribution or have any side effects. If nextString() has not yet
* been called, lastString() should return something reasonable.
*/
@Override
public String lastValue() {
if (lastvalue == null) {
lastvalue = nextValue();
}
return lastvalue;
}
public void addValue(double weight, String value) {
values.add(new Pair(weight, value));
}
}
| 2,363 | 24.978022 | 115 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/SequentialGenerator.java | /**
* Copyright (c) 2016-2017 YCSB Contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.atomic.AtomicLong;
/**
* Generates a sequence of integers 0, 1, ...
*/
public class SequentialGenerator extends NumberGenerator {
private final AtomicLong counter;
private long interval;
private long countstart;
/**
* Create a counter that starts at countstart.
*/
public SequentialGenerator(long countstart, long countend) {
counter = new AtomicLong();
setLastValue(counter.get());
this.countstart = countstart;
interval = countend - countstart + 1;
}
/**
* If the generator returns numeric (long) values, return the next value as an long.
* Default is to return -1, which is appropriate for generators that do not return numeric values.
*/
public long nextLong() {
long ret = countstart + counter.getAndIncrement() % interval;
setLastValue(ret);
return ret;
}
@Override
public Number nextValue() {
long ret = countstart + counter.getAndIncrement() % interval;
setLastValue(ret);
return ret;
}
@Override
public Number lastValue() {
return counter.get() + 1;
}
@Override
public double mean() {
throw new UnsupportedOperationException("Can't compute mean of non-stationary distribution!");
}
}
| 1,933 | 27.865672 | 100 | java |
null | NearPMSW-main/baseline/logging/YCSB2/core/src/main/java/site/ycsb/generator/ZipfianGenerator.java | /**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.ThreadLocalRandom;
/**
* A generator of a zipfian distribution. It produces a sequence of items, such that some items are more popular than
* others, according to a zipfian distribution. When you construct an instance of this class, you specify the number
* of items in the set to draw from, either by specifying an itemcount (so that the sequence is of items from 0 to
* itemcount-1) or by specifying a min and a max (so that the sequence is of items from min to max inclusive). After
* you construct the instance, you can change the number of items by calling nextInt(itemcount) or nextLong(itemcount).
*
* Note that the popular items will be clustered together, e.g. item 0 is the most popular, item 1 the second most
* popular, and so on (or min is the most popular, min+1 the next most popular, etc.) If you don't want this clustering,
* and instead want the popular items scattered throughout the item space, then use ScrambledZipfianGenerator instead.
*
* Be aware: initializing this generator may take a long time if there are lots of items to choose from (e.g. over a
* minute for 100 million objects). This is because certain mathematical values need to be computed to properly
* generate a zipfian skew, and one of those values (zeta) is a sum sequence from 1 to n, where n is the itemcount.
* Note that if you increase the number of items in the set, we can compute a new zeta incrementally, so it should be
* fast unless you have added millions of items. However, if you decrease the number of items, we recompute zeta from
* scratch, so this can take a long time.
*
* The algorithm used here is from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994.
*/
public class ZipfianGenerator extends NumberGenerator {
public static final double ZIPFIAN_CONSTANT = 0.99;
/**
* Number of items.
*/
private final long items;
/**
* Min item to generate.
*/
private final long base;
/**
* The zipfian constant to use.
*/
private final double zipfianconstant;
/**
* Computed parameters for generating the distribution.
*/
private double alpha, zetan, eta, theta, zeta2theta;
/**
* The number of items used to compute zetan the last time.
*/
private long countforzeta;
/**
* Flag to prevent problems. If you increase the number of items the zipfian generator is allowed to choose from,
* this code will incrementally compute a new zeta value for the larger itemcount. However, if you decrease the
* number of items, the code computes zeta from scratch; this is expensive for large itemsets.
* Usually this is not intentional; e.g. one thread thinks the number of items is 1001 and calls "nextLong()" with
* that item count; then another thread who thinks the number of items is 1000 calls nextLong() with itemcount=1000
* triggering the expensive recomputation. (It is expensive for 100 million items, not really for 1000 items.) Why
* did the second thread think there were only 1000 items? maybe it read the item count before the first thread
* incremented it. So this flag allows you to say if you really do want that recomputation. If true, then the code
* will recompute zeta if the itemcount goes down. If false, the code will assume itemcount only goes up, and never
* recompute.
*/
private boolean allowitemcountdecrease = false;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
* @param items The number of items in the distribution.
*/
public ZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ZipfianGenerator(long min, long max) {
this(min, max, ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param items The number of items in the distribution.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long items, double zipfianconstant) {
this(0, items - 1, zipfianconstant);
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant) {
this(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant, using
* the precomputed value of zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
* @param zetan The precomputed zeta constant.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant, double zetan) {
items = max - min + 1;
base = min;
this.zipfianconstant = zipfianconstant;
theta = this.zipfianconstant;
zeta2theta = zeta(2, theta);
alpha = 1.0 / (1.0 - theta);
this.zetan = zetan;
countforzeta = items;
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / this.zetan);
nextValue();
}
/**************************************************************************/
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant thetaVal. Remember the value of n, so if we change the itemcount, we can recompute zeta.
*
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
*/
double zeta(long n, double thetaVal) {
countforzeta = n;
return zetastatic(n, thetaVal);
}
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant theta. This is a static version of the function which will not remember n.
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
*/
static double zetastatic(long n, double theta) {
return zetastatic(0, n, theta, 0);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant thetaVal. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
*
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
double zeta(long st, long n, double thetaVal, double initialsum) {
countforzeta = n;
return zetastatic(st, n, thetaVal, initialsum);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant theta. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
static double zetastatic(long st, long n, double theta, double initialsum) {
double sum = initialsum;
for (long i = st; i < n; i++) {
sum += 1 / (Math.pow(i + 1, theta));
}
//System.out.println("countforzeta="+countforzeta);
return sum;
}
/****************************************************************************************/
/**
* Generate the next item as a long.
*
* @param itemcount The number of items in the distribution.
* @return The next item in the sequence.
*/
long nextLong(long itemcount) {
//from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994
if (itemcount != countforzeta) {
//have to recompute zetan and eta, since they depend on itemcount
synchronized (this) {
if (itemcount > countforzeta) {
//System.err.println("WARNING: Incrementally recomputing Zipfian distribtion. (itemcount="+itemcount+"
// countforzeta="+countforzeta+")");
//we have added more items. can compute zetan incrementally, which is cheaper
zetan = zeta(countforzeta, itemcount, theta, zetan);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
//have to start over with zetan
//note : for large itemsets, this is very slow. so don't do it!
//TODO: can also have a negative incremental computation, e.g. if you decrease the number of items,
// then just subtract the zeta sequence terms for the items that went away. This would be faster than
// recomputing from scratch when the number of items decreases
System.err.println("WARNING: Recomputing Zipfian distribtion. This is slow and should be avoided. " +
"(itemcount=" + itemcount + " countforzeta=" + countforzeta + ")");
zetan = zeta(itemcount, theta);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
}
}
}
double u = ThreadLocalRandom.current().nextDouble();
double uz = u * zetan;
if (uz < 1.0) {
return base;
}
if (uz < 1.0 + Math.pow(0.5, theta)) {
return base + 1;
}
long ret = base + (long) ((itemcount) * Math.pow(eta * u - eta + 1, alpha));
setLastValue(ret);
return ret;
}
/**
* Return the next value, skewed by the Zipfian distribution. The 0th item will be the most popular, followed by
* the 1st, followed by the 2nd, etc. (Or, if min != 0, the min-th item is the most popular, the min+1th item the
* next most popular, etc.) If you want the popular items scattered throughout the item space, use
* ScrambledZipfianGenerator instead.
*/
@Override
public Long nextValue() {
return nextLong(items);
}
public static void main(String[] args) {
new ZipfianGenerator(ScrambledZipfianGenerator.ITEM_COUNT);
}
/**
* @todo Implement ZipfianGenerator.mean()
*/
@Override
public double mean() {
throw new UnsupportedOperationException("@todo implement ZipfianGenerator.mean()");
}
}
| 11,748 | 39.653979 | 120 | java |
null | NearPMSW-main/baseline/logging/YCSB2/orientdb/src/test/java/site/ycsb/db/OrientDBClientTest.java | /**
* Copyright (c) 2015 - 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import com.orientechnologies.orient.core.db.OPartitionedDatabasePool;
import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx;
import com.orientechnologies.orient.core.dictionary.ODictionary;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import site.ycsb.ByteIterator;
import site.ycsb.DBException;
import site.ycsb.StringByteIterator;
import org.junit.*;
import java.util.*;
import static org.junit.Assert.*;
/**
* Created by kruthar on 12/29/15.
*/
public class OrientDBClientTest {
// TODO: This must be copied because it is private in OrientDBClient, but this should defer to table property.
private static final String CLASS = "usertable";
private static final int FIELD_LENGTH = 32;
private static final String FIELD_PREFIX = "FIELD";
private static final String KEY_PREFIX = "user";
private static final int NUM_FIELDS = 3;
private static final String TEST_DB_URL = "memory:test";
private static OrientDBClient orientDBClient = null;
@Before
public void setup() throws DBException {
orientDBClient = new OrientDBClient();
Properties p = new Properties();
// TODO: Extract the property names into final variables in OrientDBClient
p.setProperty("orientdb.url", TEST_DB_URL);
orientDBClient.setProperties(p);
orientDBClient.init();
}
@After
public void teardown() throws DBException {
if (orientDBClient != null) {
orientDBClient.cleanup();
}
}
/*
This is a copy of buildDeterministicValue() from core:site.ycsb.workloads.CoreWorkload.java.
That method is neither public nor static so we need a copy.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = FIELD_LENGTH;
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/*
Inserts a row of deterministic values for the given insertKey using the orientDBClient.
*/
private Map<String, ByteIterator> insertRow(String insertKey) {
HashMap<String, ByteIterator> insertMap = new HashMap<>();
for (int i = 0; i < 3; i++) {
insertMap.put(FIELD_PREFIX + i, new StringByteIterator(buildDeterministicValue(insertKey, FIELD_PREFIX + i)));
}
orientDBClient.insert(CLASS, insertKey, insertMap);
return insertMap;
}
@Test
public void insertTest() {
String insertKey = "user0";
Map<String, ByteIterator> insertMap = insertRow(insertKey);
OPartitionedDatabasePool pool = orientDBClient.getDatabasePool();
try(ODatabaseDocumentTx db = pool.acquire()) {
ODictionary<ORecord> dictionary = db.getDictionary();
ODocument result = dictionary.get(insertKey);
assertTrue("Assert a row was inserted.", result != null);
for (int i = 0; i < NUM_FIELDS; i++) {
assertEquals("Assert all inserted columns have correct values.", result.field(FIELD_PREFIX + i),
insertMap.get(FIELD_PREFIX + i).toString());
}
}
}
@Test
public void updateTest() {
String preupdateString = "preupdate";
String user0 = "user0";
String user1 = "user1";
String user2 = "user2";
OPartitionedDatabasePool pool = orientDBClient.getDatabasePool();
try(ODatabaseDocumentTx db = pool.acquire()) {
// Manually insert three documents
for (String key : Arrays.asList(user0, user1, user2)) {
ODocument doc = new ODocument(CLASS);
for (int i = 0; i < NUM_FIELDS; i++) {
doc.field(FIELD_PREFIX + i, preupdateString);
}
doc.save();
ODictionary<ORecord> dictionary = db.getDictionary();
dictionary.put(key, doc);
}
}
HashMap<String, ByteIterator> updateMap = new HashMap<>();
for (int i = 0; i < NUM_FIELDS; i++) {
updateMap.put(FIELD_PREFIX + i, new StringByteIterator(buildDeterministicValue(user1, FIELD_PREFIX + i)));
}
orientDBClient.update(CLASS, user1, updateMap);
try(ODatabaseDocumentTx db = pool.acquire()) {
ODictionary<ORecord> dictionary = db.getDictionary();
// Ensure that user0 record was not changed
ODocument result = dictionary.get(user0);
for (int i = 0; i < NUM_FIELDS; i++) {
assertEquals("Assert first row fields contain preupdateString", result.field(FIELD_PREFIX + i), preupdateString);
}
// Check that all the columns have expected values for user1 record
result = dictionary.get(user1);
for (int i = 0; i < NUM_FIELDS; i++) {
assertEquals("Assert updated row fields are correct", result.field(FIELD_PREFIX + i),
updateMap.get(FIELD_PREFIX + i).toString());
}
// Ensure that user2 record was not changed
result = dictionary.get(user2);
for (int i = 0; i < NUM_FIELDS; i++) {
assertEquals("Assert third row fields contain preupdateString", result.field(FIELD_PREFIX + i), preupdateString);
}
}
}
@Test
public void readTest() {
String insertKey = "user0";
Map<String, ByteIterator> insertMap = insertRow(insertKey);
HashSet<String> readFields = new HashSet<>();
HashMap<String, ByteIterator> readResultMap = new HashMap<>();
// Test reading a single field
readFields.add("FIELD0");
orientDBClient.read(CLASS, insertKey, readFields, readResultMap);
assertEquals("Assert that result has correct number of fields", readFields.size(), readResultMap.size());
for (String field : readFields) {
assertEquals("Assert " + field + " was read correctly", insertMap.get(field).toString(), readResultMap.get(field).toString());
}
readResultMap = new HashMap<>();
// Test reading all fields
readFields.add("FIELD1");
readFields.add("FIELD2");
orientDBClient.read(CLASS, insertKey, readFields, readResultMap);
assertEquals("Assert that result has correct number of fields", readFields.size(), readResultMap.size());
for (String field : readFields) {
assertEquals("Assert " + field + " was read correctly", insertMap.get(field).toString(), readResultMap.get(field).toString());
}
}
@Test
public void deleteTest() {
String user0 = "user0";
String user1 = "user1";
String user2 = "user2";
insertRow(user0);
insertRow(user1);
insertRow(user2);
orientDBClient.delete(CLASS, user1);
OPartitionedDatabasePool pool = orientDBClient.getDatabasePool();
try(ODatabaseDocumentTx db = pool.acquire()) {
ODictionary<ORecord> dictionary = db.getDictionary();
assertNotNull("Assert user0 still exists", dictionary.get(user0));
assertNull("Assert user1 does not exist", dictionary.get(user1));
assertNotNull("Assert user2 still exists", dictionary.get(user2));
}
}
@Test
public void scanTest() {
Map<String, Map<String, ByteIterator>> keyMap = new HashMap<>();
for (int i = 0; i < 5; i++) {
String insertKey = KEY_PREFIX + i;
keyMap.put(insertKey, insertRow(insertKey));
}
Set<String> fieldSet = new HashSet<>();
fieldSet.add("FIELD0");
fieldSet.add("FIELD1");
int startIndex = 0;
int resultRows = 3;
Vector<HashMap<String, ByteIterator>> resultVector = new Vector<>();
orientDBClient.scan(CLASS, KEY_PREFIX + startIndex, resultRows, fieldSet, resultVector);
// Check the resultVector is the correct size
assertEquals("Assert the correct number of results rows were returned", resultRows, resultVector.size());
int testIndex = startIndex;
// Check each vector row to make sure we have the correct fields
for (HashMap<String, ByteIterator> result : resultVector) {
assertEquals("Assert that this row has the correct number of fields", fieldSet.size(), result.size());
for (String field : fieldSet) {
assertEquals("Assert this field is correct in this row", keyMap.get(KEY_PREFIX + testIndex).get(field).toString(),
result.get(field).toString());
}
testIndex++;
}
}
}
| 8,882 | 34.110672 | 132 | java |
null | NearPMSW-main/baseline/logging/YCSB2/orientdb/src/main/java/site/ycsb/db/package-info.java | /*
* Copyright (c) 2015 - 2016, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="http://orientdb.com/orientdb/">OrientDB</a>.
*/
package site.ycsb.db;
| 769 | 32.478261 | 77 | java |
null | NearPMSW-main/baseline/logging/YCSB2/orientdb/src/main/java/site/ycsb/db/OrientDBClient.java | /**
* Copyright (c) 2012 - 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import com.orientechnologies.orient.client.remote.OServerAdmin;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.OPartitionedDatabasePool;
import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx;
import com.orientechnologies.orient.core.dictionary.ODictionary;
import com.orientechnologies.orient.core.exception.OConcurrentModificationException;
import com.orientechnologies.orient.core.index.OIndexCursor;
import com.orientechnologies.orient.core.record.ORecord;
import com.orientechnologies.orient.core.record.impl.ODocument;
import site.ycsb.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* OrientDB client for YCSB framework.
*/
public class OrientDBClient extends DB {
private static final String URL_PROPERTY = "orientdb.url";
private static final String URL_PROPERTY_DEFAULT =
"plocal:." + File.separator + "target" + File.separator + "databases" + File.separator + "ycsb";
private static final String USER_PROPERTY = "orientdb.user";
private static final String USER_PROPERTY_DEFAULT = "admin";
private static final String PASSWORD_PROPERTY = "orientdb.password";
private static final String PASSWORD_PROPERTY_DEFAULT = "admin";
private static final String NEWDB_PROPERTY = "orientdb.newdb";
private static final String NEWDB_PROPERTY_DEFAULT = "false";
private static final String STORAGE_TYPE_PROPERTY = "orientdb.remote.storagetype";
private static final String ORIENTDB_DOCUMENT_TYPE = "document";
private static final String CLASS = "usertable";
private static final Lock INIT_LOCK = new ReentrantLock();
private static boolean dbChecked = false;
private static volatile OPartitionedDatabasePool databasePool;
private static boolean initialized = false;
private static int clientCounter = 0;
private boolean isRemote = false;
private static final Logger LOG = LoggerFactory.getLogger(OrientDBClient.class);
/**
* Initialize any state for this DB. Called once per DB instance; there is one DB instance per client thread.
*/
public void init() throws DBException {
// initialize OrientDB driver
final Properties props = getProperties();
String url = props.getProperty(URL_PROPERTY, URL_PROPERTY_DEFAULT);
String user = props.getProperty(USER_PROPERTY, USER_PROPERTY_DEFAULT);
String password = props.getProperty(PASSWORD_PROPERTY, PASSWORD_PROPERTY_DEFAULT);
Boolean newdb = Boolean.parseBoolean(props.getProperty(NEWDB_PROPERTY, NEWDB_PROPERTY_DEFAULT));
String remoteStorageType = props.getProperty(STORAGE_TYPE_PROPERTY);
INIT_LOCK.lock();
try {
clientCounter++;
if (!initialized) {
OGlobalConfiguration.dumpConfiguration(System.out);
LOG.info("OrientDB loading database url = " + url);
ODatabaseDocumentTx db = new ODatabaseDocumentTx(url);
if (db.getStorage().isRemote()) {
isRemote = true;
}
if (!dbChecked) {
if (!isRemote) {
if (newdb) {
if (db.exists()) {
db.open(user, password);
LOG.info("OrientDB drop and recreate fresh db");
db.drop();
}
db.create();
} else {
if (!db.exists()) {
LOG.info("OrientDB database not found, creating fresh db");
db.create();
}
}
} else {
OServerAdmin server = new OServerAdmin(url).connect(user, password);
if (remoteStorageType == null) {
throw new DBException(
"When connecting to a remote OrientDB instance, "
+ "specify a database storage type (plocal or memory) with "
+ STORAGE_TYPE_PROPERTY);
}
if (newdb) {
if (server.existsDatabase()) {
LOG.info("OrientDB drop and recreate fresh db");
server.dropDatabase(remoteStorageType);
}
server.createDatabase(db.getName(), ORIENTDB_DOCUMENT_TYPE, remoteStorageType);
} else {
if (!server.existsDatabase()) {
LOG.info("OrientDB database not found, creating fresh db");
server.createDatabase(server.getURL(), ORIENTDB_DOCUMENT_TYPE, remoteStorageType);
}
}
server.close();
}
dbChecked = true;
}
if (db.isClosed()) {
db.open(user, password);
}
if (!db.getMetadata().getSchema().existsClass(CLASS)) {
db.getMetadata().getSchema().createClass(CLASS);
}
db.close();
if (databasePool == null) {
databasePool = new OPartitionedDatabasePool(url, user, password);
}
initialized = true;
}
} catch (Exception e) {
LOG.error("Could not initialize OrientDB connection pool for Loader: " + e.toString());
e.printStackTrace();
} finally {
INIT_LOCK.unlock();
}
}
OPartitionedDatabasePool getDatabasePool() {
return databasePool;
}
@Override
public void cleanup() throws DBException {
INIT_LOCK.lock();
try {
clientCounter--;
if (clientCounter == 0) {
databasePool.close();
databasePool = null;
initialized = false;
}
} finally {
INIT_LOCK.unlock();
}
}
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
try (ODatabaseDocumentTx db = databasePool.acquire()) {
final ODocument document = new ODocument(CLASS);
for (Map.Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) {
document.field(entry.getKey(), entry.getValue());
}
document.save();
final ODictionary<ORecord> dictionary = db.getMetadata().getIndexManager().getDictionary();
dictionary.put(key, document);
return Status.OK;
} catch (Exception e) {
e.printStackTrace();
}
return Status.ERROR;
}
@Override
public Status delete(String table, String key) {
while (true) {
try (ODatabaseDocumentTx db = databasePool.acquire()) {
final ODictionary<ORecord> dictionary = db.getMetadata().getIndexManager().getDictionary();
dictionary.remove(key);
return Status.OK;
} catch (OConcurrentModificationException cme) {
continue;
} catch (Exception e) {
e.printStackTrace();
return Status.ERROR;
}
}
}
@Override
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
try (ODatabaseDocumentTx db = databasePool.acquire()) {
final ODictionary<ORecord> dictionary = db.getMetadata().getIndexManager().getDictionary();
final ODocument document = dictionary.get(key);
if (document != null) {
if (fields != null) {
for (String field : fields) {
result.put(field, new StringByteIterator((String) document.field(field)));
}
} else {
for (String field : document.fieldNames()) {
result.put(field, new StringByteIterator((String) document.field(field)));
}
}
return Status.OK;
}
} catch (Exception e) {
e.printStackTrace();
}
return Status.ERROR;
}
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
while (true) {
try (ODatabaseDocumentTx db = databasePool.acquire()) {
final ODictionary<ORecord> dictionary = db.getMetadata().getIndexManager().getDictionary();
final ODocument document = dictionary.get(key);
if (document != null) {
for (Map.Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) {
document.field(entry.getKey(), entry.getValue());
}
document.save();
return Status.OK;
}
} catch (OConcurrentModificationException cme) {
continue;
} catch (Exception e) {
e.printStackTrace();
return Status.ERROR;
}
}
}
@Override
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
if (isRemote) {
// Iterator methods needed for scanning are Unsupported for remote database connections.
LOG.warn("OrientDB scan operation is not implemented for remote database connections.");
return Status.NOT_IMPLEMENTED;
}
try (ODatabaseDocumentTx db = databasePool.acquire()) {
final ODictionary<ORecord> dictionary = db.getMetadata().getIndexManager().getDictionary();
final OIndexCursor entries = dictionary.getIndex().iterateEntriesMajor(startkey, true, true);
int currentCount = 0;
while (entries.hasNext()) {
final ODocument document = entries.next().getRecord();
final HashMap<String, ByteIterator> map = new HashMap<>();
result.add(map);
if (fields != null) {
for (String field : fields) {
map.put(field, new StringByteIterator((String) document.field(field)));
}
} else {
for (String field : document.fieldNames()) {
map.put(field, new StringByteIterator((String) document.field(field)));
}
}
currentCount++;
if (currentCount >= recordcount) {
break;
}
}
return Status.OK;
} catch (Exception e) {
e.printStackTrace();
}
return Status.ERROR;
}
}
| 10,654 | 31.885802 | 111 | java |
null | NearPMSW-main/baseline/logging/YCSB2/foundationdb/src/main/java/site/ycsb/db/foundationdb/package-info.java | /*
* Copyright (c) 2015 - 2016, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="https://www.foundationdb.org">FoundationDB</a>.
*/
package site.ycsb.db.foundationdb;
| 785 | 33.173913 | 80 | java |
null | NearPMSW-main/baseline/logging/YCSB2/foundationdb/src/main/java/site/ycsb/db/foundationdb/FoundationDBClient.java | /**
* Copyright (c) 2012 - 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.foundationdb;
import com.apple.foundationdb.*;
import com.apple.foundationdb.async.AsyncIterable;
import com.apple.foundationdb.tuple.Tuple;
import site.ycsb.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.helpers.MessageFormatter;
import java.util.*;
/**
* FoundationDB client for YCSB framework.
*/
public class FoundationDBClient extends DB {
private FDB fdb;
private Database db;
private String dbName;
private int batchSize;
private int batchCount;
private static final String API_VERSION = "foundationdb.apiversion";
private static final String API_VERSION_DEFAULT = "520";
private static final String CLUSTER_FILE = "foundationdb.clusterfile";
private static final String CLUSTER_FILE_DEFAULT = "./fdb.cluster";
private static final String DB_NAME = "foundationdb.dbname";
private static final String DB_NAME_DEFAULT = "DB";
private static final String DB_BATCH_SIZE_DEFAULT = "0";
private static final String DB_BATCH_SIZE = "foundationdb.batchsize";
private Vector<String> batchKeys;
private Vector<Map<String, ByteIterator>> batchValues;
private static Logger logger = LoggerFactory.getLogger(FoundationDBClient.class);
/**
* Initialize any state for this DB. Called once per DB instance; there is one DB instance per client thread.
*/
@Override
public void init() throws DBException {
// initialize FoundationDB driver
final Properties props = getProperties();
String apiVersion = props.getProperty(API_VERSION, API_VERSION_DEFAULT);
String clusterFile = props.getProperty(CLUSTER_FILE, CLUSTER_FILE_DEFAULT);
String dbBatchSize = props.getProperty(DB_BATCH_SIZE, DB_BATCH_SIZE_DEFAULT);
dbName = props.getProperty(DB_NAME, DB_NAME_DEFAULT);
logger.info("API Version: {}", apiVersion);
logger.info("Cluster File: {}\n", clusterFile);
try {
fdb = FDB.selectAPIVersion(Integer.parseInt(apiVersion.trim()));
db = fdb.open(clusterFile);
batchSize = Integer.parseInt(dbBatchSize);
batchCount = 0;
batchKeys = new Vector<String>(batchSize+1);
batchValues = new Vector<Map<String, ByteIterator>>(batchSize+1);
} catch (FDBException e) {
logger.error(MessageFormatter.format("Error in database operation: {}", "init").getMessage(), e);
throw new DBException(e);
} catch (NumberFormatException e) {
logger.error(MessageFormatter.format("Invalid value for apiversion property: {}", apiVersion).getMessage(), e);
throw new DBException(e);
}
}
@Override
public void cleanup() throws DBException {
if (batchCount > 0) {
batchInsert();
batchCount = 0;
}
try {
db.close();
} catch (FDBException e) {
logger.error(MessageFormatter.format("Error in database operation: {}", "cleanup").getMessage(), e);
throw new DBException(e);
}
}
private static String getRowKey(String db, String table, String key) {
//return key + ":" + table + ":" + db;
return db + ":" + table + ":" + key;
}
private static String getEndRowKey(String table) {
return table + ";";
}
private Status convTupleToMap(Tuple tuple, Set<String> fields, Map<String, ByteIterator> result) {
for (int i = 0; i < tuple.size(); i++) {
Tuple v = tuple.getNestedTuple(i);
String field = v.getString(0);
String value = v.getString(1);
//System.err.println(field + " : " + value);
result.put(field, new StringByteIterator(value));
}
if (fields != null) {
for (String field : fields) {
if (result.get(field) == null) {
logger.debug("field not fount: {}", field);
return Status.NOT_FOUND;
}
}
}
return Status.OK;
}
private void batchInsert() {
try {
db.run(tr -> {
for (int i = 0; i < batchCount; ++i) {
Tuple t = new Tuple();
for (Map.Entry<String, String> entry : StringByteIterator.getStringMap(batchValues.get(i)).entrySet()) {
Tuple v = new Tuple();
v = v.add(entry.getKey());
v = v.add(entry.getValue());
t = t.add(v);
}
tr.set(Tuple.from(batchKeys.get(i)).pack(), t.pack());
}
return null;
});
} catch (FDBException e) {
for (int i = 0; i < batchCount; ++i) {
logger.error(MessageFormatter.format("Error batch inserting key {}", batchKeys.get(i)).getMessage(), e);
}
e.printStackTrace();
} catch (Throwable e) {
for (int i = 0; i < batchCount; ++i) {
logger.error(MessageFormatter.format("Error batch inserting key {}", batchKeys.get(i)).getMessage(), e);
}
e.printStackTrace();
} finally {
batchKeys.clear();
batchValues.clear();
}
}
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
String rowKey = getRowKey(dbName, table, key);
logger.debug("insert key = {}", rowKey);
try {
batchKeys.addElement(rowKey);
batchValues.addElement(new HashMap<String, ByteIterator>(values));
batchCount++;
if (batchSize == 0 || batchSize == batchCount) {
batchInsert();
batchCount = 0;
}
return Status.OK;
} catch (Throwable e) {
logger.error(MessageFormatter.format("Error inserting key: {}", rowKey).getMessage(), e);
e.printStackTrace();
}
return Status.ERROR;
}
@Override
public Status delete(String table, String key) {
String rowKey = getRowKey(dbName, table, key);
logger.debug("delete key = {}", rowKey);
try {
db.run(tr -> {
tr.clear(Tuple.from(rowKey).pack());
return null;
});
return Status.OK;
} catch (FDBException e) {
logger.error(MessageFormatter.format("Error deleting key: {}", rowKey).getMessage(), e);
e.printStackTrace();
} catch (Exception e) {
logger.error(MessageFormatter.format("Error deleting key: {}", rowKey).getMessage(), e);
e.printStackTrace();
}
return Status.ERROR;
}
@Override
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
String rowKey = getRowKey(dbName, table, key);
logger.debug("read key = {}", rowKey);
try {
byte[] row = db.run(tr -> {
byte[] r = tr.get(Tuple.from(rowKey).pack()).join();
return r;
});
Tuple t = Tuple.fromBytes(row);
if (t.size() == 0) {
logger.debug("key not fount: {}", rowKey);
return Status.NOT_FOUND;
}
return convTupleToMap(t, fields, result);
} catch (FDBException e) {
logger.error(MessageFormatter.format("Error reading key: {}", rowKey).getMessage(), e);
e.printStackTrace();
} catch (Exception e) {
logger.error(MessageFormatter.format("Error reading key: {}", rowKey).getMessage(), e);
e.printStackTrace();
}
return Status.ERROR;
}
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
String rowKey = getRowKey(dbName, table, key);
logger.debug("update key = {}", rowKey);
try {
Status s = db.run(tr -> {
byte[] row = tr.get(Tuple.from(rowKey).pack()).join();
Tuple o = Tuple.fromBytes(row);
if (o.size() == 0) {
logger.debug("key not fount: {}", rowKey);
return Status.NOT_FOUND;
}
HashMap<String, ByteIterator> result = new HashMap<>();
if (convTupleToMap(o, null, result) != Status.OK) {
return Status.ERROR;
}
for (String k : values.keySet()) {
if (result.containsKey(k)) {
result.put(k, values.get(k));
} else {
logger.debug("field not fount: {}", k);
return Status.NOT_FOUND;
}
}
Tuple t = new Tuple();
for (Map.Entry<String, String> entry : StringByteIterator.getStringMap(result).entrySet()) {
Tuple v = new Tuple();
v = v.add(entry.getKey());
v = v.add(entry.getValue());
t = t.add(v);
}
tr.set(Tuple.from(rowKey).pack(), t.pack());
return Status.OK;
});
return s;
} catch (FDBException e) {
logger.error(MessageFormatter.format("Error updating key: {}", rowKey).getMessage(), e);
e.printStackTrace();
} catch (Exception e) {
logger.error(MessageFormatter.format("Error updating key: {}", rowKey).getMessage(), e);
e.printStackTrace();
}
return Status.ERROR;
}
@Override
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
String startRowKey = getRowKey(dbName, table, startkey);
String endRowKey = getEndRowKey(table);
logger.debug("scan key from {} to {} limit {} ", startkey, endRowKey, recordcount);
try (Transaction tr = db.createTransaction()) {
tr.options().setReadYourWritesDisable();
AsyncIterable<KeyValue> entryList = tr.getRange(Tuple.from(startRowKey).pack(), Tuple.from(endRowKey).pack(),
recordcount > 0 ? recordcount : 0);
List<KeyValue> entries = entryList.asList().join();
for (int i = 0; i < entries.size(); ++i) {
final HashMap<String, ByteIterator> map = new HashMap<>();
Tuple value = Tuple.fromBytes(entries.get(i).getValue());
if (convTupleToMap(value, fields, map) == Status.OK) {
result.add(map);
} else {
logger.error("Error scanning keys: from {} to {} limit {} ", startRowKey, endRowKey, recordcount);
return Status.ERROR;
}
}
return Status.OK;
} catch (FDBException e) {
logger.error(MessageFormatter.format("Error scanning keys: from {} to {} ",
startRowKey, endRowKey).getMessage(), e);
e.printStackTrace();
} catch (Exception e) {
logger.error(MessageFormatter.format("Error scanning keys: from {} to {} ",
startRowKey, endRowKey).getMessage(), e);
e.printStackTrace();
}
return Status.ERROR;
}
}
| 10,929 | 35.072607 | 117 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/test/java/site/ycsb/db/voltdb/test/package-info.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* YCSB binding for VoltDB.
*/
package site.ycsb.db.voltdb.test;
| 735 | 31 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/test/java/site/ycsb/db/voltdb/test/VoltDBClientTest.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.voltdb.test;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeNoException;
import site.ycsb.ByteIterator;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.db.voltdb.ConnectionHelper;
import site.ycsb.db.voltdb.VoltClient4;
import org.junit.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.Vector;
import java.util.Properties;
/**
* Test harness for YCSB / VoltDB. Note that not much happens if VoltDB isn't
* visible.
*
*/
public class VoltDBClientTest {
private static final String TABLE_NAME = "USERTABLE";
private static final int FIELD_LENGTH = 32;
private static final String FIELD_PREFIX = "FIELD";
private static final int NUM_FIELDS = 3;
private static final String INSERT_TEST_KEY = "InsertReadTest";
private static final String INSERT_DELETE_AND_READ_TEST_KEY = "InsertDeleteReadTest";
private static final String UPDATE_TEST_KEY = "UpdateTest";
private static final String NON_EXISTENT_KEY = "NonExistTest";
private static final String SCAN_KEY_PREFIX = "ScanKey_";
private static final int SCAN_RECORD_COUNT = 5000;
private static final String[] TEST_DATA_KEYS = { INSERT_TEST_KEY, INSERT_DELETE_AND_READ_TEST_KEY, UPDATE_TEST_KEY };
private static VoltClient4 voltClient = null;
private static boolean haveDb = false;
@BeforeClass
public static void setup() {
Properties p = new Properties();
String servers = p.getProperty("voltdb.servers", "localhost");
String user = p.getProperty("voltdb.user", "");
String password = p.getProperty("voltdb.password", "");
String strLimit = p.getProperty("voltdb.ratelimit", "70000");
p.setProperty("voltdb.servers", servers);
p.setProperty("voltdb.user", user);
p.setProperty("voltdb.password", password);
p.setProperty("voltdb.ratelimit", strLimit);
try {
voltClient = new VoltClient4();
voltClient.setProperties(p);
if (ConnectionHelper.checkDBServers(servers)) {
voltClient.init();
haveDb = true;
removeExistingData();
}
} catch (Exception e) {
// The call to checkDBServers above looks for activity on
// the ip and port we expect VoltDB to be on. If we get to this
// line it's because 'something' is running on localhost:21212,
// but whatever it is, it isn't a happy copy of VoltDB.
assumeNoException("Something was running on VoltDB's port but it wasn't a usable copy of VoltDB", e);
}
}
private static void removeExistingData() {
try {
for (int i = 0; i < TEST_DATA_KEYS.length; i++) {
voltClient.delete(TABLE_NAME, TEST_DATA_KEYS[i]);
}
for (int i = 0; i < SCAN_RECORD_COUNT; i++) {
voltClient.delete(TABLE_NAME, SCAN_KEY_PREFIX + i);
}
} catch (Exception e) {
Logger logger = LoggerFactory.getLogger(VoltDBClientTest.class);
logger.error("Error while calling 'removeExistingData()'", e);
fail("Failed removeExistingData");
}
}
@AfterClass
public static void teardown() {
try {
if (voltClient != null && haveDb) {
removeExistingData();
voltClient.cleanup();
}
} catch (DBException e) {
e.printStackTrace();
}
}
@Before
public void prepareTest() {
}
private boolean compareContents(HashMap<String, ByteIterator> inMsg, Map<String, ByteIterator> outMsg) {
if (inMsg == null) {
return false;
}
if (outMsg == null) {
return false;
}
if (inMsg.size() != outMsg.size()) {
return false;
}
@SuppressWarnings("rawtypes")
Iterator it = inMsg.entrySet().iterator();
while (it.hasNext()) {
@SuppressWarnings("rawtypes")
Map.Entry pair = (Map.Entry) it.next();
String key = (String) pair.getKey();
ByteIterator inPayload = inMsg.get(key);
inPayload.reset();
ByteIterator outPayload = outMsg.get(key);
outPayload.reset();
if (inPayload.bytesLeft() != outPayload.bytesLeft()) {
return false;
}
while (inPayload.hasNext()) {
byte inByte = inPayload.nextByte();
byte outByte = outPayload.nextByte();
if (inByte != outByte) {
return false;
}
}
it.remove();
}
return true;
}
@Test
public void insertAndReadTest() {
Assume.assumeTrue(haveDb);
try {
// Create some test data
final String insertKey = INSERT_TEST_KEY;
final Set<String> columns = getColumnNameMap();
// Insert row
HashMap<String, ByteIterator> insertMap = new HashMap<String, ByteIterator>();
for (int i = 0; i < NUM_FIELDS; i++) {
insertMap.put(FIELD_PREFIX + i, new StringByteIterator(buildDeterministicValue(insertKey, FIELD_PREFIX + i)));
}
voltClient.insert(TABLE_NAME, insertKey, insertMap);
// Create a object to put retrieved row in...
Map<String, ByteIterator> testResult = new HashMap<String, ByteIterator>();
// Read row...
Status s = voltClient.read(TABLE_NAME, insertKey, columns, testResult);
if (!s.equals(Status.OK)) {
fail("Didn't get OK on read.");
}
if (!compareContents(insertMap, testResult)) {
fail("Returned data not the same as inserted data");
}
} catch (Exception e) {
e.printStackTrace();
fail("Failed insertTest");
}
}
@Test
public void insertDeleteAndReadTest() {
Assume.assumeTrue(haveDb);
try {
// Create some test data
final String insertKey = INSERT_DELETE_AND_READ_TEST_KEY;
final Set<String> columns = getColumnNameMap();
// Insert row
HashMap<String, ByteIterator> insertMap = new HashMap<String, ByteIterator>();
for (int i = 0; i < NUM_FIELDS; i++) {
insertMap.put(FIELD_PREFIX + i, new StringByteIterator(buildDeterministicValue(insertKey, FIELD_PREFIX + i)));
}
voltClient.insert(TABLE_NAME, insertKey, insertMap);
// Create a object to put retrieved row in...
Map<String, ByteIterator> testResult = new HashMap<String, ByteIterator>();
// Read row...
Status s = voltClient.read(TABLE_NAME, insertKey, columns, testResult);
if (!s.equals(Status.OK)) {
fail("Didn't get OK on read.");
}
if (!compareContents(insertMap, testResult)) {
fail("Returned data not the same as inserted data");
}
voltClient.delete(TABLE_NAME, insertKey);
// Create another object to put retrieved row in...
Map<String, ByteIterator> testResultAfterDelete = new HashMap<String, ByteIterator>();
// Read row...
voltClient.read(TABLE_NAME, insertKey, columns, testResultAfterDelete);
if (testResultAfterDelete.size() > 0) {
fail("testResultAfterDelete has value.");
}
} catch (Exception e) {
e.printStackTrace();
fail("Failed insertDeleteAndReadTest");
}
}
@Test
public void deleteNonExistentRecordTest() {
Assume.assumeTrue(haveDb);
try {
// Create some test data
final String insertKey = NON_EXISTENT_KEY;
final Set<String> columns = getColumnNameMap();
// Create a object to put retrieved row in...
Map<String, ByteIterator> testResult = new HashMap<String, ByteIterator>();
// Read row...
voltClient.read(TABLE_NAME, insertKey, columns, testResult);
if (testResult.size() > 0) {
fail("testResult.size() > 0.");
}
} catch (Exception e) {
e.printStackTrace();
fail("Failed deleteNonExistentRecordTest");
}
}
@Test
public void scanReadTest() {
Assume.assumeTrue(haveDb);
try {
for (int z = 0; z < SCAN_RECORD_COUNT; z++) {
// Create some test data
final String insertKey = SCAN_KEY_PREFIX + z;
// Insert row
HashMap<String, ByteIterator> insertMap = new HashMap<String, ByteIterator>();
for (int i = 0; i < NUM_FIELDS; i++) {
insertMap.put(FIELD_PREFIX + i, new StringByteIterator("Data for " + SCAN_KEY_PREFIX + z + " element " + i));
}
voltClient.insert(TABLE_NAME, insertKey, insertMap);
}
final String firstInsertKey = SCAN_KEY_PREFIX + 0;
final String lastInsertKey = SCAN_KEY_PREFIX + (SCAN_RECORD_COUNT - 1);
final String beyondLastInsertKey = SCAN_KEY_PREFIX + (SCAN_RECORD_COUNT + 1);
final String oneHundredFromEndInsertKey = SCAN_KEY_PREFIX + (SCAN_RECORD_COUNT - 101);
final String fiftyFromEndInsertKey = SCAN_KEY_PREFIX + (SCAN_RECORD_COUNT - 101);
// test non existent records
singleScanReadTest(NON_EXISTENT_KEY, 1000, 0, NON_EXISTENT_KEY);
// test single record
singleScanReadTest(firstInsertKey, 1, 1, firstInsertKey);
// test scan of SCAN_RECORD_COUNT records
singleScanReadTest(firstInsertKey, SCAN_RECORD_COUNT, SCAN_RECORD_COUNT, lastInsertKey);
// test single record in middle
singleScanReadTest(oneHundredFromEndInsertKey, 1, 1, oneHundredFromEndInsertKey);
// test request of 100 starting 50 from end.
singleScanReadTest(fiftyFromEndInsertKey, 100, 50, lastInsertKey);
// test request of 100 starting beyond the end
singleScanReadTest(beyondLastInsertKey, 100, 0, lastInsertKey);
} catch (Exception e) {
e.printStackTrace();
fail("Failed scanReadTest");
}
}
private void singleScanReadTest(String startKey, int requestedCount, int expectedCount, String lastKey) {
Assume.assumeTrue(haveDb);
try {
final Set<String> columns = getColumnNameMap();
// Create a object to put retrieved row in...
Vector<HashMap<String, ByteIterator>> testResult = new Vector<HashMap<String, ByteIterator>>();
// Read row...
Status s = voltClient.scan(TABLE_NAME, startKey, expectedCount, columns, testResult);
if (!s.equals(Status.OK)) {
fail("Didn't get OK on read.");
}
if (testResult.size() != expectedCount) {
fail("Failed singleScanReadTest " + startKey + " " + expectedCount + " " + lastKey);
}
} catch (Exception e) {
e.printStackTrace();
fail("Failed singleScanReadTest " + startKey + ". Asked for " + requestedCount + ", expected " + expectedCount
+ " lastkey=" + lastKey);
}
}
@Test
public void updateTest() {
Assume.assumeTrue(haveDb);
try {
// Create some test data
final String insertKey = UPDATE_TEST_KEY;
// Insert row
// Insert row
HashMap<String, ByteIterator> insertThenUpdateMap = new HashMap<String, ByteIterator>();
for (int i = 0; i < NUM_FIELDS; i++) {
insertThenUpdateMap.put(FIELD_PREFIX + i,
new StringByteIterator(buildDeterministicValue(insertKey, FIELD_PREFIX + i)));
}
voltClient.insert(TABLE_NAME, insertKey, insertThenUpdateMap);
// Change the data we inserted...
for (int i = 0; i < NUM_FIELDS; i++) {
insertThenUpdateMap.put(FIELD_PREFIX + i, new StringByteIterator(FIELD_PREFIX + i + " has changed"));
}
// now do an update
voltClient.update(TABLE_NAME, insertKey, insertThenUpdateMap);
// Create a object to put retrieved row in...
final Set<String> columns = getColumnNameMap();
Map<String, ByteIterator> testResult = new HashMap<String, ByteIterator>();
// Read row...
Status s = voltClient.read(TABLE_NAME, insertKey, columns, testResult);
if (!s.equals(Status.OK)) {
fail("Didn't get OK on read.");
}
if (!compareContents(insertThenUpdateMap, testResult)) {
fail("Returned data not the same as inserted data");
}
} catch (Exception e) {
e.printStackTrace();
fail("Failed updateTest");
}
}
/**
* @return
*/
private Set<String> getColumnNameMap() {
Set<String> columns = new HashSet<String>();
for (int i = 0; i < NUM_FIELDS; i++) {
columns.add(FIELD_PREFIX + i);
}
return columns;
}
/*
* This is a copy of buildDeterministicValue() from
* core:site.ycsb.workloads.CoreWorkload.java. That method is neither
* public nor static so we need a copy.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = FIELD_LENGTH;
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
}
| 13,472 | 28.546053 | 119 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/YCSBSchemaBuilder.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.voltdb;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.util.jar.Attributes;
import java.util.jar.JarEntry;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.voltdb.client.Client;
import org.voltdb.client.ClientResponse;
import org.voltdb.client.ProcCallException;
/**
* Utility class to build the YCSB schema.
*
*/
public final class YCSBSchemaBuilder {
private static final String PROCEDURE_GET_WAS_NOT_FOUND = "Procedure Get was not found";
private static final Charset UTF8 = Charset.forName("UTF-8");
private final String createTableDDL = "CREATE TABLE Store (keyspace VARBINARY(128) NOT NULL\n"
+ ", key VARCHAR(128) NOT NULL, value VARBINARY(2056) NOT NULL\n"
+ ", PRIMARY KEY (key, keyspace));";
private final String partitionTableDDL = "PARTITION TABLE Store ON COLUMN key;\n";
private final String createGetDDL = "CREATE PROCEDURE Get PARTITION ON TABLE Store COLUMN key PARAMETER 1\n"
+ "AS SELECT value FROM Store WHERE keyspace = ? AND key = ?;";
private final String createPutDDL = "CREATE PROCEDURE PARTITION ON TABLE Store COLUMN key PARAMETER 1\n"
+ "FROM CLASS site.ycsb.db.voltdb.procs.Put;";
private final String createScanDDL = "CREATE PROCEDURE PARTITION ON TABLE Store COLUMN key \n"
+ "FROM CLASS site.ycsb.db.voltdb.procs.Scan;";
private final String createScanAllDDL = "CREATE PROCEDURE \n" + "FROM CLASS site.ycsb.db.voltdb.procs.ScanAll;";
private final String[] ddlStatements = {createTableDDL, partitionTableDDL };
private final String[] procStatements = {createGetDDL, createPutDDL, createScanDDL, createScanAllDDL };
private final String[] jarFiles = {"Put.class", "Scan.class", "ScanAll.class", "ByteWrapper.class" };
private final String jarFileName = "ycsb-procs.jar";
private Logger logger = LoggerFactory.getLogger(YCSBSchemaBuilder.class);
/**
* Utility class to build the YCSB schema.
*
* @author srmadscience / VoltDB
*
*/
YCSBSchemaBuilder() {
super();
}
/**
* See if we think YCSB Schema already exists...
*
* @return true if the 'Get' procedure exists and takes one string as a
* parameter.
*/
public boolean schemaExists(Client voltClient) {
final String testString = "Test";
boolean schemaExists = false;
try {
ClientResponse response = voltClient.callProcedure("Get", testString.getBytes(UTF8), testString);
if (response.getStatus() == ClientResponse.SUCCESS) {
// YCSB Database exists...
schemaExists = true;
} else {
// If we'd connected to a copy of VoltDB without the schema and tried to call Get
// we'd have got a ProcCallException
logger.error("Error while calling schemaExists(): " + response.getStatusString());
schemaExists = false;
}
} catch (ProcCallException pce) {
schemaExists = false;
// Sanity check: Make sure we've got the *right* ProcCallException...
if (!pce.getMessage().equals(PROCEDURE_GET_WAS_NOT_FOUND)) {
logger.error("Got unexpected Exception while calling schemaExists()", pce);
}
} catch (Exception e) {
logger.error("Error while creating classes.", e);
schemaExists = false;
}
return schemaExists;
}
/**
* Load classes and DDL required by YCSB.
*
* @throws Exception
*/
public synchronized void loadClassesAndDDLIfNeeded(Client voltClient) throws Exception {
if (schemaExists(voltClient)) {
return;
}
File tempDir = Files.createTempDirectory("voltdbYCSB").toFile();
if (!tempDir.canWrite()) {
throw new Exception("Temp Directory (from Files.createTempDirectory()) '"
+ tempDir.getAbsolutePath() + "' is not writable");
}
ClientResponse cr;
for (int i = 0; i < ddlStatements.length; i++) {
try {
cr = voltClient.callProcedure("@AdHoc", ddlStatements[i]);
if (cr.getStatus() != ClientResponse.SUCCESS) {
throw new Exception("Attempt to execute '" + ddlStatements[i] + "' failed:" + cr.getStatusString());
}
logger.info(ddlStatements[i]);
} catch (Exception e) {
if (e.getMessage().indexOf("object name already exists") > -1) {
// Someone else has done this...
return;
}
throw (e);
}
}
logger.info("Creating JAR file in " + tempDir + File.separator + jarFileName);
Manifest manifest = new Manifest();
manifest.getMainAttributes().put(Attributes.Name.MANIFEST_VERSION, "1.0");
JarOutputStream newJarFile = new JarOutputStream(new FileOutputStream(tempDir + File.separator + jarFileName),
manifest);
for (int i = 0; i < jarFiles.length; i++) {
InputStream is = getClass().getResourceAsStream("/site/ycsb/db/voltdb/procs/" + jarFiles[i]);
add("site/ycsb/db/voltdb/procs/" + jarFiles[i], is, newJarFile);
}
newJarFile.close();
File file = new File(tempDir + File.separator + jarFileName);
byte[] jarFileContents = new byte[(int) file.length()];
FileInputStream fis = new FileInputStream(file);
fis.read(jarFileContents);
fis.close();
logger.info("Calling @UpdateClasses to load JAR file containing procedures");
cr = voltClient.callProcedure("@UpdateClasses", jarFileContents, null);
if (cr.getStatus() != ClientResponse.SUCCESS) {
throw new Exception("Attempt to execute UpdateClasses failed:" + cr.getStatusString());
}
for (int i = 0; i < procStatements.length; i++) {
logger.info(procStatements[i]);
cr = voltClient.callProcedure("@AdHoc", procStatements[i]);
if (cr.getStatus() != ClientResponse.SUCCESS) {
throw new Exception("Attempt to execute '" + procStatements[i] + "' failed:" + cr.getStatusString());
}
}
}
/**
* Add an entry to our JAR file.
*
* @param fileName
* @param source
* @param target
* @throws IOException
*/
private void add(String fileName, InputStream source, JarOutputStream target) throws IOException {
BufferedInputStream in = null;
try {
JarEntry entry = new JarEntry(fileName.replace("\\", "/"));
entry.setTime(System.currentTimeMillis());
target.putNextEntry(entry);
in = new BufferedInputStream(source);
byte[] buffer = new byte[1024];
while (true) {
int count = in.read(buffer);
if (count == -1) {
break;
}
target.write(buffer, 0, count);
}
target.closeEntry();
} finally {
if (in != null) {
in.close();
}
}
}
}
| 7,618 | 31.699571 | 114 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/package-info.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* VoltDB integration with YCSB.
*
*/
package site.ycsb.db.voltdb;
| 750 | 33.136364 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/VoltClient4.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/*
* This client provideds a wrapper layer for running the Yahoo Cloud Serving
* Benchmark (YCSB) against VoltDB. This benchmark runs a synchronous client
* with a mix of the operations provided below. YCSB is open-source, and may
* be found at https://github.com/brianfrankcooper/YCSB. The YCSB jar must be
* in your classpath to compile this client.
*/
package site.ycsb.db.voltdb;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.voltdb.VoltTable;
import org.voltdb.client.Client;
import org.voltdb.client.ClientResponse;
import org.voltdb.client.ClientResponseWithPartitionKey;
import org.voltdb.client.NoConnectionsException;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.db.voltdb.sortedvolttable.VoltDBTableSortedMergeWrangler;
/**
* A client that can be used by YCSB to work with VoltDB.
*/
public class VoltClient4 extends DB {
private Client mclient;
private byte[] mworkingData;
private ByteBuffer mwriteBuf;
private boolean useScanAll = false;
private static final Charset UTF8 = Charset.forName("UTF-8");
private Logger logger = LoggerFactory.getLogger(VoltClient4.class);
private YCSBSchemaBuilder ysb = null;
@Override
public void init() throws DBException {
Properties props = getProperties();
String servers = props.getProperty("voltdb.servers", "localhost");
String user = props.getProperty("voltdb.user", "");
String password = props.getProperty("voltdb.password", "");
String strLimit = props.getProperty("voltdb.ratelimit");
String useScanAllParam = props.getProperty("voltdb.scanall", "no");
if (useScanAllParam.equalsIgnoreCase("YES")) {
useScanAll = true;
}
int ratelimit = strLimit != null ? Integer.parseInt(strLimit) : Integer.MAX_VALUE;
try {
mclient = ConnectionHelper.createConnection(servers, user, password, ratelimit);
ysb = StaticHolder.INSTANCE;
ysb.loadClassesAndDDLIfNeeded(mclient);
} catch (Exception e) {
logger.error("Error while creating connection: ", e);
throw new DBException(e.getMessage());
}
mworkingData = new byte[1024 * 1024];
mwriteBuf = ByteBuffer.wrap(mworkingData);
}
/**
* @return true if we have a live DB connection
*/
public boolean hasConnection() {
if (mclient != null && mclient.getConnectedHostList().size() > 0) {
return true;
}
return false;
}
@Override
public void cleanup() throws DBException {
// If VoltDB client exists and has a live connection...
if (mclient != null && mclient.getConnectedHostList().size() > 0) {
try {
mclient.drain();
mclient.close();
} catch (NoConnectionsException e) {
logger.error(e.getMessage(), e);
} catch (InterruptedException e) {
logger.error(e.getMessage(), e);
}
mclient = null;
}
}
@Override
public Status delete(String keyspace, String key) {
try {
ClientResponse response = mclient.callProcedure("STORE.delete", key, keyspace.getBytes(UTF8));
return response.getStatus() == ClientResponse.SUCCESS ? Status.OK : Status.ERROR;
} catch (Exception e) {
logger.error("Error while deleting row", e);
return Status.ERROR;
}
}
@Override
public Status insert(String keyspace, String key, Map<String, ByteIterator> columns) {
return update(keyspace, key, columns);
}
@Override
public Status read(String keyspace, String key, Set<String> columns, Map<String, ByteIterator> result) {
try {
ClientResponse response = mclient.callProcedure("Get", keyspace.getBytes(UTF8), key);
if (response.getStatus() != ClientResponse.SUCCESS) {
return Status.ERROR;
}
VoltTable table = response.getResults()[0];
if (table.advanceRow()) {
unpackRowData(table, columns, result);
}
return Status.OK;
} catch (Exception e) {
logger.error("Error while GETing row", e);
return Status.ERROR;
}
}
@Override
public Status scan(String keyspace, String lowerBound, int recordCount, Set<String> columns,
Vector<HashMap<String, ByteIterator>> result) {
try {
if (useScanAll) {
byte[] ks = keyspace.getBytes(UTF8);
ClientResponse response = mclient.callProcedure("ScanAll", ks, lowerBound.getBytes(UTF8), recordCount);
if (response.getStatus() != ClientResponse.SUCCESS) {
return Status.ERROR;
}
result.ensureCapacity(recordCount);
VoltTable outputTable = response.getResults()[0];
outputTable.resetRowPosition();
while (outputTable.advanceRow()) {
result.add(unpackRowDataHashMap(outputTable, columns));
}
} else {
byte[] ks = keyspace.getBytes(UTF8);
ClientResponseWithPartitionKey[] response = mclient.callAllPartitionProcedure("Scan", ks,
lowerBound.getBytes(UTF8), recordCount);
for (int i = 0; i < response.length; i++) {
if (response[i].response.getStatus() != ClientResponse.SUCCESS) {
return Status.ERROR;
}
}
result.ensureCapacity(recordCount);
VoltDBTableSortedMergeWrangler smw = new VoltDBTableSortedMergeWrangler(response);
VoltTable outputTable = smw.getSortedTable(1, recordCount);
outputTable.resetRowPosition();
while (outputTable.advanceRow()) {
result.add(unpackRowDataHashMap(outputTable, columns));
}
}
return Status.OK;
} catch (Exception e) {
logger.error("Error while calling SCAN", e);
return Status.ERROR;
}
}
@Override
public Status update(String keyspace, String key, Map<String, ByteIterator> columns) {
try {
ClientResponse response = mclient.callProcedure("Put", keyspace.getBytes(UTF8), key, packRowData(columns));
return response.getStatus() == ClientResponse.SUCCESS ? Status.OK : Status.ERROR;
} catch (Exception e) {
logger.error("Error while calling Update", e);
return Status.ERROR;
}
}
private byte[] packRowData(Map<String, ByteIterator> columns) {
mwriteBuf.clear();
mwriteBuf.putInt(columns.size());
for (String key : columns.keySet()) {
byte[] k = key.getBytes(UTF8);
mwriteBuf.putInt(k.length);
mwriteBuf.put(k);
ByteIterator v = columns.get(key);
int len = (int) v.bytesLeft();
mwriteBuf.putInt(len);
v.nextBuf(mworkingData, mwriteBuf.position());
mwriteBuf.position(mwriteBuf.position() + len);
}
byte[] data = new byte[mwriteBuf.position()];
System.arraycopy(mworkingData, 0, data, 0, data.length);
return data;
}
private Map<String, ByteIterator> unpackRowData(VoltTable data, Set<String> fields,
Map<String, ByteIterator> result) {
byte[] rowData = data.getVarbinary(0);
ByteBuffer buf = ByteBuffer.wrap(rowData);
int nFields = buf.getInt();
return unpackRowData(rowData, buf, nFields, fields, result);
}
private Map<String, ByteIterator> unpackRowData(byte[] rowData, ByteBuffer buf, int nFields, Set<String> fields,
Map<String, ByteIterator> result) {
for (int i = 0; i < nFields; i++) {
int len = buf.getInt();
int off = buf.position();
String key = new String(rowData, off, len, UTF8);
buf.position(off + len);
len = buf.getInt();
off = buf.position();
if (fields == null || fields.contains(key)) {
result.put(key, new ByteArrayByteIterator(rowData, off, len));
}
buf.position(off + len);
}
return result;
}
private HashMap<String, ByteIterator> unpackRowDataHashMap(VoltTable data, Set<String> fields) {
byte[] rowData = data.getVarbinary(0);
ByteBuffer buf = ByteBuffer.wrap(rowData);
int nFields = buf.getInt();
int size = fields != null ? Math.min(fields.size(), nFields) : nFields;
HashMap<String, ByteIterator> res = new HashMap<String, ByteIterator>(size, (float) 1.25);
return unpackRowDataHashMap(rowData, buf, nFields, fields, res);
}
private HashMap<String, ByteIterator> unpackRowDataHashMap(byte[] rowData, ByteBuffer buf, int nFields,
Set<String> fields, HashMap<String, ByteIterator> result) {
for (int i = 0; i < nFields; i++) {
int len = buf.getInt();
int off = buf.position();
String key = new String(rowData, off, len, UTF8);
buf.position(off + len);
len = buf.getInt();
off = buf.position();
if (fields == null || fields.contains(key)) {
result.put(key, new ByteArrayByteIterator(rowData, off, len));
}
buf.position(off + len);
}
return result;
}
private static class StaticHolder {
static final YCSBSchemaBuilder INSTANCE = new YCSBSchemaBuilder();
}
}
| 9,781 | 31.072131 | 114 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/ConnectionHelper.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/*
* VoltDB Connection Utility.
*/
package site.ycsb.db.voltdb;
import java.io.IOException;
import java.net.Socket;
import java.util.concurrent.CountDownLatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.voltdb.client.Client;
import org.voltdb.client.ClientConfig;
import org.voltdb.client.ClientFactory;
/**
* Help class to create VoltDB connections for YCSB benchmark.
*/
public final class ConnectionHelper {
/**
* Default port for VoltDB.
*/
private static final int VOLTDB_DEFAULT_PORT = 21212;
/**
* hidden constructor.
*/
private ConnectionHelper() {
}
/**
* Creates a factory used to connect to a VoltDB instance. (Note that if a
* corresponding connection exists, all parameters other than 'servers' are
* ignored)
*
* @param servers The comma separated list of VoltDB servers in
* hostname[:port] format that the instance will use.
* @param user The username for the connection
* @param password The password for the specified user
* @param ratelimit A limit on the number of transactions per second for the
* VoltDB instance
* @return The existing factory if a corresponding connection has already been
* created; the newly created one otherwise.
* @throws IOException Throws if a connection is already open with a
* different server string.
* @throws InterruptedException
*/
public static Client createConnection(String servers, String user, String password,
int ratelimit) throws IOException, InterruptedException {
ClientConfig config = new ClientConfig(user, password);
config.setMaxTransactionsPerSecond(ratelimit);
Client client = ClientFactory.createClient(config);
// Note that in VoltDB there is a distinction between creating an instance of a client
// and actually connecting to the DB...
connect(client, servers);
return client;
}
/**
* Connect to a single server with retry. Limited exponential backoff. No
* timeout. This will run until the process is killed if it's not able to
* connect.
*
* @param server hostname:port or just hostname (hostname can be ip).
*/
private static void connectToOneServerWithRetry(final Client client, String server) {
Logger logger = LoggerFactory.getLogger(ConnectionHelper.class);
int sleep = 1000;
while (true) {
try {
client.createConnection(server);
break;
} catch (Exception e) {
logger.error("Connection failed - retrying in %d second(s).\n", sleep / 1000);
try {
Thread.sleep(sleep);
} catch (java.lang.InterruptedException e2) {
logger.error(e2.getMessage());
}
if (sleep < 8000) {
sleep += sleep;
}
}
}
logger.info("Connected to VoltDB node at:" + server);
}
/**
* See if DB servers are present on the network.
*
* @return true or false
*/
public static boolean checkDBServers(String servernames) {
String[] serverNamesArray = servernames.split(",");
boolean dbThere = false;
Socket socket = null;
try {
// Connect
socket = new Socket(serverNamesArray[0], VOLTDB_DEFAULT_PORT);
dbThere = true;
} catch (IOException connectFailed) {
dbThere = false;
} finally {
if (socket != null) {
try {
socket.close();
} catch (IOException ignore) {
// Ignore.
}
}
socket = null;
}
return dbThere;
}
/**
* Connect to a set of servers in parallel. Each will retry until connection.
* This call will block until all have connected.
*
* @param servers A comma separated list of servers using the hostname:port
* syntax (where :port is optional).
* @throws InterruptedException if anything bad happens with the threads.
*/
private static void connect(final Client client, String servers) throws InterruptedException {
Logger logger = LoggerFactory.getLogger(ConnectionHelper.class);
logger.info("Connecting to VoltDB...");
String[] serverArray = servers.split(",");
final CountDownLatch connections = new CountDownLatch(serverArray.length);
// use a new thread to connect to each server
for (final String server : serverArray) {
new Thread(new Runnable() {
@Override
public void run() {
connectToOneServerWithRetry(client, server);
connections.countDown();
}
}).start();
}
// block until all have connected
connections.await();
}
}
| 5,378 | 28.883333 | 96 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/sortedvolttable/VoltDBTableSortedMergeWrangler.java | package site.ycsb.db.voltdb.sortedvolttable;
/**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
import org.voltdb.VoltTable;
import org.voltdb.VoltType;
import org.voltdb.client.ClientResponse;
import org.voltdb.client.ClientResponseWithPartitionKey;
/**
* VoltDBTableSortedMergeWrangler allows you to merge an array of VoltTable
* provided by callAllPartitionProcedure.
*
* The intended use case is for when you need to issue a multi partition query
* but would prefer not to, as you don't need perfect read consistency and would
* rather get the individual VoltDB partitions to issue the query independently
* and then somehow merge the results.
*
*/
public class VoltDBTableSortedMergeWrangler {
private ClientResponseWithPartitionKey[] theTables = null;
@SuppressWarnings("rawtypes")
private Comparable whatWeSelectedLastTime = null;
public VoltDBTableSortedMergeWrangler(ClientResponseWithPartitionKey[] response) {
super();
this.theTables = response;
}
/**
* Takes 'theTables' and merges them based on column 'columnId'. We assume that
* column 'columnId' in each element of 'theTables' is correctly sorted within
* itself.
*
* @param columnid
* @param limit How many rows we want
* @return A new VoltTable.
* @throws NeedsToBeComparableException - if column columnId doesn't implement Comparable.
* @throws IncomingVoltTablesNeedToBeSortedException - incoming data isn't already sorted.
* @throws ClientResponseIsBadException - The procedure worked but is complaining.
*/
public VoltTable getSortedTable(int columnid, int limit)
throws NeedsToBeComparableException, IncomingVoltTablesNeedToBeSortedException, ClientResponseIsBadException {
whatWeSelectedLastTime = null;
// Create an empty output table
VoltTable outputTable = new VoltTable(theTables[0].response.getResults()[0].getTableSchema());
// make sure our input tables are usable, and ready to be read from the
// start
for (int i = 0; i < theTables.length; i++) {
VoltTable currentTable = theTables[i].response.getResults()[0];
if (theTables[i].response.getStatus() != ClientResponse.SUCCESS) {
throw new ClientResponseIsBadException(i + " " + theTables[i].response.getStatusString());
}
currentTable.resetRowPosition();
currentTable.advanceRow();
}
// Find table with lowest value for columnId, which is supposed to be
// the sort key.
int lowestId = getLowestId(columnid);
// Loop until we run out of data or get 'limit' rows.
while (lowestId > -1 && outputTable.getRowCount() < limit) {
// having identified the lowest Table pull that row, add it to
// the output table, and then call 'advanceRow' so we can do this
// again...
VoltTable lowestTable = theTables[lowestId].response.getResults()[0];
outputTable.add(lowestTable.cloneRow());
lowestTable.advanceRow();
// Find table with lowest value for columnId
lowestId = getLowestId(columnid);
}
return outputTable;
}
/**
* This routine looks at column 'columnId' in an array of VoltTable and
* identifies which one is lowest. Note that as we call 'advanceRow' elsewhere
* this will change.
*
* @param columnid
* @return the VoltTable with the lowest value for column 'columnId'. or -1 if
* we've exhausted all the VoltTables.
* @throws NeedsToBeComparableException
* @throws IncomingVoltTablesNeedToBeSortedException
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
private int getLowestId(int columnid) throws NeedsToBeComparableException, IncomingVoltTablesNeedToBeSortedException {
int lowestId = -1;
Comparable lowestObservedValue = null;
for (int i = 0; i < theTables.length; i++) {
VoltTable currentTable = theTables[i].response.getResults()[0];
int activeRowIndex = currentTable.getActiveRowIndex();
int rowCount = currentTable.getRowCount();
if (activeRowIndex > -1 && activeRowIndex < rowCount) {
if (lowestObservedValue == null) {
lowestId = i;
lowestObservedValue = getComparable(currentTable, columnid);
} else {
Comparable newObservedValue = getComparable(currentTable, columnid);
if (newObservedValue.compareTo(lowestObservedValue) <= 0) {
lowestId = i;
lowestObservedValue = getComparable(currentTable, columnid);
}
}
}
}
// If we found something make sure that the data in columnid was sorted
// properly when it was retrieved.
if (lowestId > -1) {
Comparable latestItemWeSelected = getComparable(theTables[lowestId].response.getResults()[0], columnid);
if (whatWeSelectedLastTime != null && latestItemWeSelected.compareTo(whatWeSelectedLastTime) < 0) {
throw new IncomingVoltTablesNeedToBeSortedException(
"Latest Item '" + latestItemWeSelected + "' is before last item '" + whatWeSelectedLastTime + "'");
}
whatWeSelectedLastTime = latestItemWeSelected;
}
return lowestId;
}
/**
* Get the value we're working with as a Comparable.
*
* @param theTable
* @param columnId
* @return a Comparable.
* @throws NeedsToBeComparableException
*/
@SuppressWarnings("rawtypes")
private Comparable getComparable(VoltTable theTable, int columnId) throws NeedsToBeComparableException {
Comparable c = null;
VoltType vt = theTable.getColumnType(columnId);
Object theValue = theTable.get(columnId, vt);
if (theValue instanceof Comparable) {
c = (Comparable) theValue;
} else {
throw new NeedsToBeComparableException(
theValue + ": Only Comparables are supported by VoltDBTableSortedMergeWrangler");
}
return c;
}
/**
* Do a comparison of byte arrays. Not used right now, but will be when we added
* support for VARBINARY.
*
* @param left
* @param right
* @return whether 'left' is <, >, or = 'right'
*/
private int compare(byte[] left, byte[] right) {
for (int i = 0, j = 0; i < left.length && j < right.length; i++, j++) {
int a = (left[i] & 0xff);
int b = (right[j] & 0xff);
if (a != b) {
return a - b;
}
}
return left.length - right.length;
}
}
| 6,961 | 32.796117 | 120 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/sortedvolttable/package-info.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
*
* VoltDBTableSortedMergeWrangler allows you to merge an array of VoltTable
* provided by callAllPartitionProcedure.
*
* The intended use case is for when you need to issue a multi partition query
* but would prefer not to, as you don't need perfect read consistency and would
* rather get the individual VoltDB partitions to issue the query independently
* and then somehow merge the results.
*
*/
package site.ycsb.db.voltdb.sortedvolttable;
| 1,137 | 38.241379 | 80 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/sortedvolttable/ClientResponseIsBadException.java | package site.ycsb.db.voltdb.sortedvolttable;
/**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
*/
@SuppressWarnings("serial")
public class ClientResponseIsBadException extends Exception {
public ClientResponseIsBadException(String string) {
super(string);
}
}
| 903 | 29.133333 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/sortedvolttable/IncomingVoltTablesNeedToBeSortedException.java | package site.ycsb.db.voltdb.sortedvolttable;
/**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
*/
@SuppressWarnings("serial")
public class IncomingVoltTablesNeedToBeSortedException extends Exception {
public IncomingVoltTablesNeedToBeSortedException(String string) {
super(string);
}
}
| 929 | 30 | 74 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/sortedvolttable/NeedsToBeComparableException.java | package site.ycsb.db.voltdb.sortedvolttable;
/**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
*/
@SuppressWarnings("serial")
public class NeedsToBeComparableException extends Exception {
/**
* @param string
*/
public NeedsToBeComparableException(String string) {
super(string);
}
}
| 934 | 27.333333 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/procs/package-info.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* VoltDB site.ycsb.db.voltdb.procs for Put, Scan and ScanAll.
* Other site.ycsb.db.voltdb.procs are defined using DDL.
*
* ByteWrapper is a utility class, not a procedure.
*/
package site.ycsb.db.voltdb.procs;
| 887 | 33.153846 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/procs/Put.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.voltdb.procs;
import java.nio.ByteBuffer;
import java.util.HashSet;
import org.voltdb.SQLStmt;
import org.voltdb.VoltProcedure;
import org.voltdb.VoltTable;
/**
*
* Update a value in STORE.
*
*/
public class Put extends VoltProcedure {
private final SQLStmt selectStmt = new SQLStmt("SELECT value FROM Store WHERE keyspace = ? AND key = ?");
private final SQLStmt insertStmt = new SQLStmt("INSERT INTO Store VALUES (?, ?, ?)");
private final SQLStmt updateStmt = new SQLStmt("UPDATE Store SET value = ? WHERE keyspace = ? AND key = ?");
public long run(byte[] keyspace, String key, byte[] data) {
voltQueueSQL(selectStmt, keyspace, key);
VoltTable res = voltExecuteSQL()[0];
if (res.advanceRow()) {
voltQueueSQL(updateStmt, merge(res.getVarbinary(0), data), keyspace, key);
} else {
voltQueueSQL(insertStmt, keyspace, key, data);
}
voltExecuteSQL(true);
return 0L;
}
private byte[] merge(byte[] dest, byte[] src) {
HashSet<ByteWrapper> mergeSet = new HashSet<ByteWrapper>();
ByteBuffer buf = ByteBuffer.wrap(src);
int nSrc = buf.getInt();
for (int i = 0; i < nSrc; i++) {
int len = buf.getInt();
int off = buf.position();
mergeSet.add(new ByteWrapper(src, off, len));
buf.position(off + len);
len = buf.getInt();
buf.position(buf.position() + len);
}
byte[] merged = new byte[src.length + dest.length];
ByteBuffer out = ByteBuffer.wrap(merged);
buf = ByteBuffer.wrap(dest);
int nDest = buf.getInt();
int nFields = nSrc + nDest;
out.putInt(nFields);
int blockStart = 4;
int blockEnd = 4;
for (int i = 0; i < nDest; i++) {
int len = buf.getInt();
int off = buf.position();
boolean flushBlock = mergeSet.contains(new ByteWrapper(dest, off, len));
buf.position(off + len);
len = buf.getInt();
buf.position(buf.position() + len);
if (flushBlock) {
if (blockStart < blockEnd) {
out.put(dest, blockStart, blockEnd - blockStart);
}
nFields--;
blockStart = buf.position();
}
blockEnd = buf.position();
}
if (blockStart < blockEnd) {
out.put(dest, blockStart, blockEnd - blockStart);
}
out.put(src, 4, src.length - 4);
int length = out.position();
if (nFields != nSrc + nDest) {
out.position(0);
out.putInt(nFields);
}
byte[] res = new byte[length];
System.arraycopy(merged, 0, res, 0, length);
return res;
}
}
| 3,215 | 29.923077 | 110 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/procs/ScanAll.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.voltdb.procs;
import org.voltdb.SQLStmt;
import org.voltdb.VoltProcedure;
import org.voltdb.VoltTable;
/**
* Query STORE using a multi partition query..
*
*/
public class ScanAll extends VoltProcedure {
private final SQLStmt getBddStmt = new SQLStmt(
"SELECT value, key FROM Store WHERE keyspace = ? AND key >= ? ORDER BY key, keyspace LIMIT ?");
private final SQLStmt getUnbddStmt = new SQLStmt(
"SELECT value, key FROM Store WHERE keyspace = ? ORDER BY key, keyspace LIMIT ?");
public VoltTable[] run(byte[] keyspace, byte[] rangeMin, int count) throws Exception {
if (rangeMin != null) {
voltQueueSQL(getBddStmt, keyspace, new String(rangeMin, "UTF-8"), count);
} else {
voltQueueSQL(getUnbddStmt, keyspace, count);
}
return voltExecuteSQL(true);
}
}
| 1,511 | 32.6 | 101 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/procs/ByteWrapper.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.voltdb.procs;
/**
* Utility class to map data structures used by YCSB to a VoltDB VARBINARY column.
*/
class ByteWrapper {
private byte[] marr;
private int moff;
private int mlen;
ByteWrapper(byte[] arr, int off, int len) {
marr = arr;
moff = off;
mlen = len;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof ByteWrapper)) {
return false;
}
ByteWrapper that = (ByteWrapper) obj;
if (this.mlen != that.mlen) {
return false;
}
for (int i = 0; i < this.mlen; i++) {
if (this.marr[this.moff + i] != that.marr[that.moff + i]) {
return false;
}
}
return true;
}
@Override
public int hashCode() {
if (this.marr == null) {
return 0;
}
int res = 1;
for (int i = 0; i < mlen; i++) {
res = 31 * res + marr[moff + i];
}
return res;
}
} | 1,630 | 23.712121 | 82 | java |
null | NearPMSW-main/baseline/logging/YCSB2/voltdb/src/main/java/site/ycsb/db/voltdb/procs/Scan.java | /**
* Copyright (c) 2015-2019 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.voltdb.procs;
import org.voltdb.SQLStmt;
import org.voltdb.VoltProcedure;
import org.voltdb.VoltTable;
/**
*
* Query STORE using a single partition query.
*
*/
public class Scan extends VoltProcedure {
private final SQLStmt getBddStmt = new SQLStmt(
"SELECT value, key FROM Store WHERE keyspace = ? AND key >= ? ORDER BY key, keyspace LIMIT ?");
private final SQLStmt getUnbddStmt = new SQLStmt(
"SELECT value, key FROM Store WHERE keyspace = ? ORDER BY key, keyspace LIMIT ?");
public VoltTable[] run(String partKey, byte[] keyspace, byte[] rangeMin, int count) throws Exception {
if (rangeMin != null) {
voltQueueSQL(getBddStmt, keyspace, new String(rangeMin, "UTF-8"), count);
} else {
voltQueueSQL(getUnbddStmt, keyspace, count);
}
return voltExecuteSQL(true);
}
}
| 1,525 | 32.911111 | 104 | java |
null | NearPMSW-main/baseline/logging/YCSB2/seaweedfs/src/main/java/site/ycsb/db/seaweed/package-info.java | /**
* Copyright (c) 2015 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*
* SeaweedFS storage client binding for YCSB.
*/
package site.ycsb.db.seaweed;
| 739 | 32.636364 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/seaweedfs/src/main/java/site/ycsb/db/seaweed/SeaweedClient.java | /**
* Copyright (c) 2015 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
* <p>
* SeaweedFS storage client binding for YCSB.
*/
package site.ycsb.db.seaweed;
import seaweedfs.client.FilerProto;
import seaweedfs.client.FilerClient;
import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.SeaweedRead;
import seaweedfs.client.SeaweedWrite;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.node.ObjectNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* SeaweedFS Storage client for YCSB framework.
*
* The size of the file to upload is determined by two parameters:
* - fieldcount this is the number of fields of a record in YCSB
* - fieldlength this is the size in bytes of a single field in the record
* together these two parameters define the size of the file to upload,
* the size in bytes is given by the fieldlength multiplied by the fieldcount.
* The name of the file is determined by the parameter key.
* This key is automatically generated by YCSB.
*/
public class SeaweedClient extends DB {
private static final Logger LOG = LoggerFactory.getLogger(SeaweedClient.class);
protected static final ObjectMapper MAPPER = new ObjectMapper();
private FilerClient filerClient;
private FilerGrpcClient filerGrpcClient;
private String filerHost;
private int filerPort;
private String folder;
/**
* Cleanup any state for this storage.
* Called once per instance;
*/
@Override
public void cleanup() throws DBException {
}
/**
* Delete a file from SeaweedFS Storage.
*
* @param tableName The name of the table
* @param key The record key of the file to delete.
* @return OK on success, otherwise ERROR. See the
* {@link DB} class's description for a discussion of error codes.
*/
@Override
public Status delete(String tableName, String key) {
if (!filerClient.rm(this.folder + "/" + tableName + "/" + key, true, true)) {
return Status.ERROR;
}
return Status.OK;
}
/**
* Initialize any state for the storage.
* Called once per SeaweedFS instance; If the client is not null it is re-used.
*/
@Override
public void init() throws DBException {
filerHost = getProperties().getProperty("seaweed.filerHost", "localhost");
filerPort = Integer.parseInt(getProperties().getProperty("seaweed.filerPort", "8888"));
folder = getProperties().getProperty("seaweed.folder", "/ycsb");
filerGrpcClient = new FilerGrpcClient(filerHost, filerPort+10000);
filerClient = new FilerClient(filerGrpcClient);
filerClient.mkdirs(this.folder, 0755);
}
/**
* Create a new File in the table. Any field/value pairs in the specified
* values HashMap will be written into the file with the specified record
* key.
*
* @param tableName The name of the table
* @param key The record key of the file to insert.
* @param values A HashMap of field/value pairs to insert in the file.
* Only the content of the first field is written to a byteArray
* multiplied by the number of field. In this way the size
* of the file to upload is determined by the fieldlength
* and fieldcount parameters.
* @return OK on success, ERROR otherwise. See the
* {@link DB} class's description for a discussion of error codes.
*/
@Override
public Status insert(String tableName, String key,
Map<String, ByteIterator> values) {
return writeToStorage(tableName, key, values);
}
/**
* Read a file from the table. Each field/value pair from the result
* will be stored in a HashMap.
*
* @param tableName The name of the table
* @param key The record key of the file to read.
* @param fields The list of fields to read, or null for all of them,
* it is null by default
* @param result A HashMap of field/value pairs for the result
* @return OK on success, ERROR otherwise.
*/
@Override
public Status read(String tableName, String key, Set<String> fields,
Map<String, ByteIterator> result) {
return readFromStorage(tableName, key, fields, result);
}
/**
* Update a file in the table. Any field/value pairs in the specified
* values HashMap will be written into the file with the specified file
* key, overwriting any existing values with the same field name.
*
* @param tableName The name of the table
* @param key The file key of the file to write.
* @param values A HashMap of field/value pairs to update in the record
* @return OK on success, ERORR otherwise.
*/
@Override
public Status update(String tableName, String key,
Map<String, ByteIterator> values) {
Map<String, ByteIterator> existingValues = new HashMap<>();
Status readStatus = readFromStorage(tableName, key, null, existingValues);
if (readStatus != Status.OK) {
return readStatus;
}
existingValues.putAll(values);
return writeToStorage(tableName, key, existingValues);
}
/**
* Perform a range scan for a set of files in the table. Each
* field/value pair from the result will be stored in a HashMap.
*
* @param tableName The name of the table
* @param startkey The file key of the first file to read.
* @param recordcount The number of files to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one file
* @return OK on success, ERROR otherwise.
*/
@Override
public Status scan(String tableName, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
return scanFromStorage(tableName, startkey, recordcount, fields, result);
}
/**
* Write a new object to SeaweedFS.
*
* @param tableName The name of the table
* @param key The file key of the object to upload/update.
* @param values The data to be written on the object
*/
protected Status writeToStorage(String tableName, String key, Map<String, ByteIterator> values) {
try {
byte[] jsonData = toJson(values).getBytes(StandardCharsets.UTF_8);
long now = System.currentTimeMillis() / 1000L;
FilerProto.Entry.Builder entry = FilerProto.Entry.newBuilder()
.setName(key)
.setIsDirectory(false)
.setAttributes(
FilerProto.FuseAttributes.newBuilder()
.setCrtime(now)
.setMtime(now)
.setFileMode(0755)
);
SeaweedWrite.writeData(entry, "000", this.filerGrpcClient, 0, jsonData, 0, jsonData.length);
SeaweedWrite.writeMeta(this.filerGrpcClient, this.folder + "/" + tableName, entry);
} catch (Exception e) {
LOG.error("Not possible to write the object {}", key, e);
return Status.ERROR;
}
return Status.OK;
}
/**
* Download an object from SeaweedFS.
*
* @param tableName The name of the table
* @param key The file key of the object to upload/update.
* @param result The Hash map where data from the object are written
*/
protected Status readFromStorage(String tableName, String key, Set<String> fields, Map<String, ByteIterator> result) {
try {
FilerProto.Entry entry = this.filerClient.lookupEntry(this.folder + "/" + tableName, key);
if (entry!=null) {
readOneEntry(entry, key, fields, result);
}else{
LOG.error("Fail to read the object {}", key);
return Status.NOT_FOUND;
}
} catch (Exception e) {
LOG.error("Not possible to get the object {}", key, e);
return Status.ERROR;
}
return Status.OK;
}
protected void readOneEntry(
FilerProto.Entry entry, String key, Set<String> fields, Map<String, ByteIterator> result) throws IOException {
List<SeaweedRead.VisibleInterval> visibleIntervalList =
SeaweedRead.nonOverlappingVisibleIntervals(filerGrpcClient, entry.getChunksList());
int length = (int) SeaweedRead.totalSize(entry.getChunksList());
byte[] buffer = new byte[length];
SeaweedRead.read(this.filerGrpcClient, visibleIntervalList, 0, buffer, 0, buffer.length);
fromJson(new String(buffer, StandardCharsets.UTF_8), fields, result);
}
/**
* Perform an emulation of a database scan operation on a SeaweedFS table.
*
* @param tableName The name of the table
* @param startkey The file key of the first file to read.
* @param recordcount The number of files to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one file
*/
protected Status scanFromStorage(String tableName, String startkey,
int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
try {
List<FilerProto.Entry> entryList = this.filerClient.listEntries(
this.folder + "/" + tableName, "", startkey, recordcount, true);
for (FilerProto.Entry entry : entryList) {
HashMap<String, ByteIterator> ret = new HashMap<String, ByteIterator>();
readOneEntry(entry, entry.getName(), fields, ret);
result.add(ret);
}
} catch (Exception e) {
LOG.error("Not possible to list the object {} limit {}", startkey, recordcount, e);
return Status.ERROR;
}
return Status.OK;
}
protected static void fromJson(
String value, Set<String> fields,
Map<String, ByteIterator> result) throws IOException {
JsonNode json = MAPPER.readTree(value);
boolean checkFields = fields != null && !fields.isEmpty();
for (Iterator<Map.Entry<String, JsonNode>> jsonFields = json.getFields();
jsonFields.hasNext();
/* increment in loop body */) {
Map.Entry<String, JsonNode> jsonField = jsonFields.next();
String name = jsonField.getKey();
if (checkFields && !fields.contains(name)) {
continue;
}
JsonNode jsonValue = jsonField.getValue();
if (jsonValue != null && !jsonValue.isNull()) {
result.put(name, new StringByteIterator(jsonValue.asText()));
}
}
}
protected static String toJson(Map<String, ByteIterator> values)
throws IOException {
ObjectNode node = MAPPER.createObjectNode();
Map<String, String> stringMap = StringByteIterator.getStringMap(values);
for (Map.Entry<String, String> pair : stringMap.entrySet()) {
node.put(pair.getKey(), pair.getValue());
}
JsonFactory jsonFactory = new JsonFactory();
Writer writer = new StringWriter();
JsonGenerator jsonGenerator = jsonFactory.createJsonGenerator(writer);
MAPPER.writeTree(jsonGenerator, node);
return writer.toString();
}
}
| 12,024 | 36.461059 | 120 | java |
null | NearPMSW-main/baseline/logging/YCSB2/redis/src/main/java/site/ycsb/db/RedisClient.java | /**
* Copyright (c) 2012 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* Redis client binding for YCSB.
*
* All YCSB records are mapped to a Redis *hash field*. For scanning
* operations, all keys are saved (by an arbitrary hash) in a sorted set.
*/
package site.ycsb.db;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import redis.clients.jedis.BasicCommands;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisCluster;
import redis.clients.jedis.JedisCommands;
import redis.clients.jedis.Protocol;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
/**
* YCSB binding for <a href="http://redis.io/">Redis</a>.
*
* See {@code redis/README.md} for details.
*/
public class RedisClient extends DB {
private JedisCommands jedis;
public static final String HOST_PROPERTY = "redis.host";
public static final String PORT_PROPERTY = "redis.port";
public static final String PASSWORD_PROPERTY = "redis.password";
public static final String CLUSTER_PROPERTY = "redis.cluster";
public static final String TIMEOUT_PROPERTY = "redis.timeout";
public static final String INDEX_KEY = "_indices";
public void init() throws DBException {
Properties props = getProperties();
int port;
String portString = props.getProperty(PORT_PROPERTY);
if (portString != null) {
port = Integer.parseInt(portString);
} else {
port = Protocol.DEFAULT_PORT;
}
String host = props.getProperty(HOST_PROPERTY);
boolean clusterEnabled = Boolean.parseBoolean(props.getProperty(CLUSTER_PROPERTY));
if (clusterEnabled) {
Set<HostAndPort> jedisClusterNodes = new HashSet<>();
jedisClusterNodes.add(new HostAndPort(host, port));
jedis = new JedisCluster(jedisClusterNodes);
} else {
String redisTimeout = props.getProperty(TIMEOUT_PROPERTY);
if (redisTimeout != null){
jedis = new Jedis(host, port, Integer.parseInt(redisTimeout));
} else {
jedis = new Jedis(host, port);
}
((Jedis) jedis).connect();
}
String password = props.getProperty(PASSWORD_PROPERTY);
if (password != null) {
((BasicCommands) jedis).auth(password);
}
}
public void cleanup() throws DBException {
try {
((Closeable) jedis).close();
} catch (IOException e) {
throw new DBException("Closing connection failed.");
}
}
/*
* Calculate a hash for a key to store it in an index. The actual return value
* of this function is not interesting -- it primarily needs to be fast and
* scattered along the whole space of doubles. In a real world scenario one
* would probably use the ASCII values of the keys.
*/
private double hash(String key) {
return key.hashCode();
}
// XXX jedis.select(int index) to switch to `table`
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
if (fields == null) {
StringByteIterator.putAllAsByteIterators(result, jedis.hgetAll(key));
} else {
String[] fieldArray =
(String[]) fields.toArray(new String[fields.size()]);
List<String> values = jedis.hmget(key, fieldArray);
Iterator<String> fieldIterator = fields.iterator();
Iterator<String> valueIterator = values.iterator();
while (fieldIterator.hasNext() && valueIterator.hasNext()) {
result.put(fieldIterator.next(),
new StringByteIterator(valueIterator.next()));
}
assert !fieldIterator.hasNext() && !valueIterator.hasNext();
}
return result.isEmpty() ? Status.ERROR : Status.OK;
}
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
if (jedis.hmset(key, StringByteIterator.getStringMap(values))
.equals("OK")) {
jedis.zadd(INDEX_KEY, hash(key), key);
return Status.OK;
}
return Status.ERROR;
}
@Override
public Status delete(String table, String key) {
return jedis.del(key) == 0 && jedis.zrem(INDEX_KEY, key) == 0 ? Status.ERROR
: Status.OK;
}
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
return jedis.hmset(key, StringByteIterator.getStringMap(values))
.equals("OK") ? Status.OK : Status.ERROR;
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
Set<String> keys = jedis.zrangeByScore(INDEX_KEY, hash(startkey),
Double.POSITIVE_INFINITY, 0, recordcount);
HashMap<String, ByteIterator> values;
for (String key : keys) {
values = new HashMap<String, ByteIterator>();
read(table, key, fields, values);
result.add(values);
}
return Status.OK;
}
}
| 5,737 | 30.355191 | 87 | java |
null | NearPMSW-main/baseline/logging/YCSB2/redis/src/main/java/site/ycsb/db/package-info.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="http://redis.io/">Redis</a>.
*/
package site.ycsb.db;
| 746 | 31.478261 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/asynchbase/src/test/java/site/ycsb/db/AsyncHBaseTest.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.measurements.Measurements;
import site.ycsb.workloads.CoreWorkload;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;
import java.util.Vector;
/**
* Integration tests for the YCSB AsyncHBase client, using an HBase minicluster.
* These are the same as those for the hbase10 client.
*/
public class AsyncHBaseTest {
private final static String COLUMN_FAMILY = "cf";
private static HBaseTestingUtility testingUtil;
private AsyncHBaseClient client;
private Table table = null;
private String tableName;
private static boolean isWindows() {
final String os = System.getProperty("os.name");
return os.startsWith("Windows");
}
/**
* Creates a mini-cluster for use in these tests.
*
* This is a heavy-weight operation, so invoked only once for the test class.
*/
@BeforeClass
public static void setUpClass() throws Exception {
// Minicluster setup fails on Windows with an UnsatisfiedLinkError.
// Skip if windows.
assumeTrue(!isWindows());
testingUtil = HBaseTestingUtility.createLocalHTU();
testingUtil.startMiniCluster();
}
/**
* Tears down mini-cluster.
*/
@AfterClass
public static void tearDownClass() throws Exception {
if (testingUtil != null) {
testingUtil.shutdownMiniCluster();
}
}
/**
* Sets up the mini-cluster for testing.
*
* We re-create the table for each test.
*/
@Before
public void setUp() throws Exception {
Properties p = new Properties();
p.setProperty("columnfamily", COLUMN_FAMILY);
Measurements.setProperties(p);
final CoreWorkload workload = new CoreWorkload();
workload.init(p);
tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
table = testingUtil.createTable(TableName.valueOf(tableName), Bytes.toBytes(COLUMN_FAMILY));
final String zkQuorum = "127.0.0.1:" + testingUtil.getZkCluster().getClientPort();
p.setProperty("hbase.zookeeper.quorum", zkQuorum);
client = new AsyncHBaseClient();
client.setProperties(p);
client.init();
}
@After
public void tearDown() throws Exception {
table.close();
testingUtil.deleteTable(tableName);
}
@Test
public void testRead() throws Exception {
final String rowKey = "row1";
final Put p = new Put(Bytes.toBytes(rowKey));
p.addColumn(Bytes.toBytes(COLUMN_FAMILY),
Bytes.toBytes("column1"), Bytes.toBytes("value1"));
p.addColumn(Bytes.toBytes(COLUMN_FAMILY),
Bytes.toBytes("column2"), Bytes.toBytes("value2"));
table.put(p);
final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
final Status status = client.read(tableName, rowKey, null, result);
assertEquals(Status.OK, status);
assertEquals(2, result.size());
assertEquals("value1", result.get("column1").toString());
assertEquals("value2", result.get("column2").toString());
}
@Test
public void testReadMissingRow() throws Exception {
final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
final Status status = client.read(tableName, "Missing row", null, result);
assertEquals(Status.NOT_FOUND, status);
assertEquals(0, result.size());
}
@Test
public void testScan() throws Exception {
// Fill with data
final String colStr = "row_number";
final byte[] col = Bytes.toBytes(colStr);
final int n = 10;
final List<Put> puts = new ArrayList<Put>(n);
for(int i = 0; i < n; i++) {
final byte[] key = Bytes.toBytes(String.format("%05d", i));
final byte[] value = java.nio.ByteBuffer.allocate(4).putInt(i).array();
final Put p = new Put(key);
p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value);
puts.add(p);
}
table.put(puts);
// Test
final Vector<HashMap<String, ByteIterator>> result =
new Vector<HashMap<String, ByteIterator>>();
// Scan 5 records, skipping the first
client.scan(tableName, "00001", 5, null, result);
assertEquals(5, result.size());
for(int i = 0; i < 5; i++) {
final HashMap<String, ByteIterator> row = result.get(i);
assertEquals(1, row.size());
assertTrue(row.containsKey(colStr));
final byte[] bytes = row.get(colStr).toArray();
final ByteBuffer buf = ByteBuffer.wrap(bytes);
final int rowNum = buf.getInt();
assertEquals(i + 1, rowNum);
}
}
@Test
public void testUpdate() throws Exception{
final String key = "key";
final HashMap<String, String> input = new HashMap<String, String>();
input.put("column1", "value1");
input.put("column2", "value2");
final Status status = client.insert(tableName, key, StringByteIterator.getByteIteratorMap(input));
assertEquals(Status.OK, status);
// Verify result
final Get get = new Get(Bytes.toBytes(key));
final Result result = this.table.get(get);
assertFalse(result.isEmpty());
assertEquals(2, result.size());
for(final java.util.Map.Entry<String, String> entry : input.entrySet()) {
assertEquals(entry.getValue(),
new String(result.getValue(Bytes.toBytes(COLUMN_FAMILY),
Bytes.toBytes(entry.getKey()))));
}
}
@Test
@Ignore("Not yet implemented")
public void testDelete() {
fail("Not yet implemented");
}
}
| 6,986 | 31.497674 | 102 | java |
null | NearPMSW-main/baseline/logging/YCSB2/asynchbase/src/main/java/site/ycsb/db/package-info.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for HBase using the AsyncHBase client.
*/
package site.ycsb.db;
| 749 | 33.090909 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.Vector;
import org.hbase.async.Bytes;
import org.hbase.async.Config;
import org.hbase.async.DeleteRequest;
import org.hbase.async.GetRequest;
import org.hbase.async.HBaseClient;
import org.hbase.async.KeyValue;
import org.hbase.async.PutRequest;
import org.hbase.async.Scanner;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import site.ycsb.DBException;
import site.ycsb.Status;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
/**
* Alternative Java client for Apache HBase.
*
* This client provides a subset of the main HBase client and uses a completely
* asynchronous pipeline for all calls. It is particularly useful for write heavy
* workloads. It is also compatible with all production versions of HBase.
*/
public class AsyncHBaseClient extends site.ycsb.DB {
public static final Charset UTF8_CHARSET = Charset.forName("UTF8");
private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering";
private static final String DURABILITY_PROPERTY = "durability";
private static final String PREFETCH_META_PROPERTY = "prefetchmeta";
private static final String CONFIG_PROPERTY = "config";
private static final String COLUMN_FAMILY_PROPERTY = "columnfamily";
private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout";
private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000";
/** Mutex for instantiating a single instance of the client. */
private static final Object MUTEX = new Object();
/** Use for tracking running thread counts so we know when to shutdown the client. */
private static int threadCount = 0;
/** The client that's used for all threads. */
private static HBaseClient client;
/** Print debug information to standard out. */
private boolean debug = false;
/** The column family use for the workload. */
private byte[] columnFamilyBytes;
/** Cache for the last table name/ID to avoid byte conversions. */
private String lastTable = "";
private byte[] lastTableBytes;
private long joinTimeout;
/** Whether or not to bypass the WAL for puts and deletes. */
private boolean durability = true;
/**
* If true, buffer mutations on the client. This is the default behavior for
* AsyncHBase. For measuring insert/update/delete latencies, client side
* buffering should be disabled.
*
* A single instance of this
*/
private boolean clientSideBuffering = false;
@Override
public void init() throws DBException {
if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false")
.toLowerCase().equals("true")) {
clientSideBuffering = true;
}
if (getProperties().getProperty(DURABILITY_PROPERTY, "true")
.toLowerCase().equals("false")) {
durability = false;
}
final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY);
if (columnFamily == null || columnFamily.isEmpty()) {
System.err.println("Error, must specify a columnfamily for HBase table");
throw new DBException("No columnfamily specified");
}
columnFamilyBytes = columnFamily.getBytes();
if ((getProperties().getProperty("debug") != null)
&& (getProperties().getProperty("debug").compareTo("true") == 0)) {
debug = true;
}
joinTimeout = Integer.parseInt(getProperties().getProperty(
JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT));
final boolean prefetchMeta = getProperties()
.getProperty(PREFETCH_META_PROPERTY, "false")
.toLowerCase().equals("true") ? true : false;
try {
synchronized (MUTEX) {
++threadCount;
if (client == null) {
final String configPath = getProperties().getProperty(CONFIG_PROPERTY);
final Config config;
if (configPath == null || configPath.isEmpty()) {
config = new Config();
final Iterator<Entry<Object, Object>> iterator = getProperties()
.entrySet().iterator();
while (iterator.hasNext()) {
final Entry<Object, Object> property = iterator.next();
config.overrideConfig((String)property.getKey(),
(String)property.getValue());
}
} else {
config = new Config(configPath);
}
client = new HBaseClient(config);
// Terminate right now if table does not exist, since the client
// will not propagate this error upstream once the workload
// starts.
String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
try {
client.ensureTableExists(table).join(joinTimeout);
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
} catch (Exception e) {
throw new DBException(e);
}
if (prefetchMeta) {
try {
if (debug) {
System.out.println("Starting meta prefetch for table " + table);
}
client.prefetchMeta(table).join(joinTimeout);
if (debug) {
System.out.println("Completed meta prefetch for table " + table);
}
} catch (InterruptedException e) {
System.err.println("Interrupted during prefetch");
Thread.currentThread().interrupt();
} catch (Exception e) {
throw new DBException("Failed prefetch", e);
}
}
}
}
} catch (IOException e) {
throw new DBException("Failed instantiation of client", e);
}
}
@Override
public void cleanup() throws DBException {
synchronized (MUTEX) {
--threadCount;
if (client != null && threadCount < 1) {
try {
if (debug) {
System.out.println("Shutting down client");
}
client.shutdown().joinUninterruptibly(joinTimeout);
} catch (Exception e) {
System.err.println("Failed to shutdown the AsyncHBase client "
+ "properly: " + e.getMessage());
}
client = null;
}
}
}
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
setTable(table);
final GetRequest get = new GetRequest(
lastTableBytes, key.getBytes(), columnFamilyBytes);
if (fields != null) {
get.qualifiers(getQualifierList(fields));
}
try {
if (debug) {
System.out.println("Doing read from HBase columnfamily " +
Bytes.pretty(columnFamilyBytes));
System.out.println("Doing read for key: " + key);
}
final ArrayList<KeyValue> row = client.get(get).join(joinTimeout);
if (row == null || row.isEmpty()) {
return Status.NOT_FOUND;
}
// got something so populate the results
for (final KeyValue column : row) {
result.put(new String(column.qualifier()),
// TODO - do we need to clone this array? YCSB may keep it in memory
// for a while which would mean the entire KV would hang out and won't
// be GC'd.
new ByteArrayByteIterator(column.value()));
if (debug) {
System.out.println(
"Result for field: " + Bytes.pretty(column.qualifier())
+ " is: " + Bytes.pretty(column.value()));
}
}
return Status.OK;
} catch (InterruptedException e) {
System.err.println("Thread interrupted");
Thread.currentThread().interrupt();
} catch (Exception e) {
System.err.println("Failure reading from row with key " + key +
": " + e.getMessage());
return Status.ERROR;
}
return Status.ERROR;
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
setTable(table);
final Scanner scanner = client.newScanner(lastTableBytes);
scanner.setFamily(columnFamilyBytes);
scanner.setStartKey(startkey.getBytes(UTF8_CHARSET));
// No end key... *sniff*
if (fields != null) {
scanner.setQualifiers(getQualifierList(fields));
}
// no filters? *sniff*
ArrayList<ArrayList<KeyValue>> rows = null;
try {
int numResults = 0;
while ((rows = scanner.nextRows().join(joinTimeout)) != null) {
for (final ArrayList<KeyValue> row : rows) {
final HashMap<String, ByteIterator> rowResult =
new HashMap<String, ByteIterator>(row.size());
for (final KeyValue column : row) {
rowResult.put(new String(column.qualifier()),
// TODO - do we need to clone this array? YCSB may keep it in memory
// for a while which would mean the entire KV would hang out and won't
// be GC'd.
new ByteArrayByteIterator(column.value()));
if (debug) {
System.out.println("Got scan result for key: " +
Bytes.pretty(column.key()));
}
}
result.add(rowResult);
numResults++;
if (numResults >= recordcount) {// if hit recordcount, bail out
break;
}
}
}
scanner.close().join(joinTimeout);
return Status.OK;
} catch (InterruptedException e) {
System.err.println("Thread interrupted");
Thread.currentThread().interrupt();
} catch (Exception e) {
System.err.println("Failure reading from row with key " + startkey +
": " + e.getMessage());
return Status.ERROR;
}
return Status.ERROR;
}
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
setTable(table);
if (debug) {
System.out.println("Setting up put for key: " + key);
}
final byte[][] qualifiers = new byte[values.size()][];
final byte[][] byteValues = new byte[values.size()][];
int idx = 0;
for (final Entry<String, ByteIterator> entry : values.entrySet()) {
qualifiers[idx] = entry.getKey().getBytes();
byteValues[idx++] = entry.getValue().toArray();
if (debug) {
System.out.println("Adding field/value " + entry.getKey() + "/"
+ Bytes.pretty(entry.getValue().toArray()) + " to put request");
}
}
final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(),
columnFamilyBytes, qualifiers, byteValues);
if (!durability) {
put.setDurable(false);
}
if (!clientSideBuffering) {
put.setBufferable(false);
try {
client.put(put).join(joinTimeout);
} catch (InterruptedException e) {
System.err.println("Thread interrupted");
Thread.currentThread().interrupt();
} catch (Exception e) {
System.err.println("Failure reading from row with key " + key +
": " + e.getMessage());
return Status.ERROR;
}
} else {
// hooray! Asynchronous write. But without a callback and an async
// YCSB call we don't know whether it succeeded or not
client.put(put);
}
return Status.OK;
}
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
return update(table, key, values);
}
@Override
public Status delete(String table, String key) {
setTable(table);
if (debug) {
System.out.println("Doing delete for key: " + key);
}
final DeleteRequest delete = new DeleteRequest(
lastTableBytes, key.getBytes(), columnFamilyBytes);
if (!durability) {
delete.setDurable(false);
}
if (!clientSideBuffering) {
delete.setBufferable(false);
try {
client.delete(delete).join(joinTimeout);
} catch (InterruptedException e) {
System.err.println("Thread interrupted");
Thread.currentThread().interrupt();
} catch (Exception e) {
System.err.println("Failure reading from row with key " + key +
": " + e.getMessage());
return Status.ERROR;
}
} else {
// hooray! Asynchronous write. But without a callback and an async
// YCSB call we don't know whether it succeeded or not
client.delete(delete);
}
return Status.OK;
}
/**
* Little helper to set the table byte array. If it's different than the last
* table we reset the byte array. Otherwise we just use the existing array.
* @param table The table we're operating against
*/
private void setTable(final String table) {
if (!lastTable.equals(table)) {
lastTable = table;
lastTableBytes = table.getBytes();
}
}
/**
* Little helper to build a qualifier byte array from a field set.
* @param fields The fields to fetch.
* @return The column qualifier byte arrays.
*/
private byte[][] getQualifierList(final Set<String> fields) {
final byte[][] qualifiers = new byte[fields.size()][];
int idx = 0;
for (final String field : fields) {
qualifiers[idx++] = field.getBytes();
}
return qualifiers;
}
} | 14,335 | 33.711864 | 101 | java |
null | NearPMSW-main/baseline/logging/YCSB2/googlebigtable/src/main/java/site/ycsb/db/package-info.java | /*
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for Google's <a href="https://cloud.google.com/bigtable/">
* Bigtable</a>.
*/
package site.ycsb.db;
| 784 | 33.130435 | 78 | java |
null | NearPMSW-main/baseline/logging/YCSB2/googlebigtable/src/main/java/site/ycsb/db/GoogleBigtableClient.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.util.Bytes;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.ExecutionException;
import com.google.bigtable.v2.Column;
import com.google.bigtable.v2.Family;
import com.google.bigtable.v2.MutateRowRequest;
import com.google.bigtable.v2.Mutation;
import com.google.bigtable.v2.ReadRowsRequest;
import com.google.bigtable.v2.Row;
import com.google.bigtable.v2.RowFilter;
import com.google.bigtable.v2.RowRange;
import com.google.bigtable.v2.RowSet;
import com.google.bigtable.v2.Mutation.DeleteFromRow;
import com.google.bigtable.v2.Mutation.SetCell;
import com.google.bigtable.v2.RowFilter.Chain.Builder;
import com.google.cloud.bigtable.config.BigtableOptions;
import com.google.cloud.bigtable.grpc.BigtableDataClient;
import com.google.cloud.bigtable.grpc.BigtableSession;
import com.google.cloud.bigtable.grpc.BigtableTableName;
import com.google.cloud.bigtable.grpc.async.BulkMutation;
import com.google.cloud.bigtable.hbase.BigtableOptionsFactory;
import com.google.cloud.bigtable.util.ByteStringer;
import com.google.protobuf.ByteString;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import site.ycsb.DBException;
import site.ycsb.Status;
/**
* Google Bigtable Proto client for YCSB framework.
*
* Bigtable offers two APIs. These include a native Protobuf GRPC API as well as
* an HBase API wrapper for the GRPC API. This client implements the Protobuf
* API to test the underlying calls wrapped up in the HBase API. To use the
* HBase API, see the hbase10 client binding.
*/
public class GoogleBigtableClient extends site.ycsb.DB {
public static final Charset UTF8_CHARSET = Charset.forName("UTF8");
/** Property names for the CLI. */
private static final String ASYNC_MUTATOR_MAX_MEMORY = "mutatorMaxMemory";
private static final String ASYNC_MAX_INFLIGHT_RPCS = "mutatorMaxInflightRPCs";
private static final String CLIENT_SIDE_BUFFERING = "clientbuffering";
/** Tracks running thread counts so we know when to close the session. */
private static int threadCount = 0;
/** This will load the hbase-site.xml config file and/or store CLI options. */
private static final Configuration CONFIG = HBaseConfiguration.create();
/** Print debug information to standard out. */
private boolean debug = false;
/** Global Bigtable native API objects. */
private static BigtableOptions options;
private static BigtableSession session;
/** Thread local Bigtable native API objects. */
private BigtableDataClient client;
/** The column family use for the workload. */
private byte[] columnFamilyBytes;
/** Cache for the last table name/ID to avoid byte conversions. */
private String lastTable = "";
private byte[] lastTableBytes;
/**
* If true, buffer mutations on the client. For measuring insert/update/delete
* latencies, client side buffering should be disabled.
*/
private boolean clientSideBuffering = false;
private BulkMutation bulkMutation;
@Override
public void init() throws DBException {
Properties props = getProperties();
// Defaults the user can override if needed
if (getProperties().containsKey(ASYNC_MUTATOR_MAX_MEMORY)) {
CONFIG.set(BigtableOptionsFactory.BIGTABLE_BUFFERED_MUTATOR_MAX_MEMORY_KEY,
getProperties().getProperty(ASYNC_MUTATOR_MAX_MEMORY));
}
if (getProperties().containsKey(ASYNC_MAX_INFLIGHT_RPCS)) {
CONFIG.set(BigtableOptionsFactory.BIGTABLE_BULK_MAX_ROW_KEY_COUNT,
getProperties().getProperty(ASYNC_MAX_INFLIGHT_RPCS));
}
// make it easy on ourselves by copying all CLI properties into the config object.
final Iterator<Entry<Object, Object>> it = props.entrySet().iterator();
while (it.hasNext()) {
Entry<Object, Object> entry = it.next();
CONFIG.set((String)entry.getKey(), (String)entry.getValue());
}
clientSideBuffering = getProperties()
.getProperty(CLIENT_SIDE_BUFFERING, "false").equals("true");
System.err.println("Running Google Bigtable with Proto API" +
(clientSideBuffering ? " and client side buffering." : "."));
synchronized (CONFIG) {
++threadCount;
if (session == null) {
try {
options = BigtableOptionsFactory.fromConfiguration(CONFIG);
session = new BigtableSession(options);
// important to instantiate the first client here, otherwise the
// other threads may receive an NPE from the options when they try
// to read the cluster name.
client = session.getDataClient();
} catch (IOException e) {
throw new DBException("Error loading options from config: ", e);
}
} else {
client = session.getDataClient();
}
}
if ((getProperties().getProperty("debug") != null)
&& (getProperties().getProperty("debug").compareTo("true") == 0)) {
debug = true;
}
final String columnFamily = getProperties().getProperty("columnfamily");
if (columnFamily == null) {
System.err.println("Error, must specify a columnfamily for Bigtable table");
throw new DBException("No columnfamily specified");
}
columnFamilyBytes = Bytes.toBytes(columnFamily);
}
@Override
public void cleanup() throws DBException {
if (bulkMutation != null) {
try {
bulkMutation.flush();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new DBException(e);
} catch(RuntimeException e){
throw new DBException(e);
}
}
synchronized (CONFIG) {
--threadCount;
if (threadCount <= 0) {
try {
session.close();
} catch (IOException e) {
throw new DBException(e);
}
}
}
}
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
if (debug) {
System.out.println("Doing read from Bigtable columnfamily "
+ new String(columnFamilyBytes));
System.out.println("Doing read for key: " + key);
}
setTable(table);
RowFilter filter = RowFilter.newBuilder()
.setFamilyNameRegexFilterBytes(ByteStringer.wrap(columnFamilyBytes))
.build();
if (fields != null && fields.size() > 0) {
Builder filterChain = RowFilter.Chain.newBuilder();
filterChain.addFilters(filter);
filterChain.addFilters(RowFilter.newBuilder()
.setCellsPerColumnLimitFilter(1)
.build());
int count = 0;
// usually "field#" so pre-alloc
final StringBuilder regex = new StringBuilder(fields.size() * 6);
for (final String field : fields) {
if (count++ > 0) {
regex.append("|");
}
regex.append(field);
}
filterChain.addFilters(RowFilter.newBuilder()
.setColumnQualifierRegexFilter(
ByteStringer.wrap(regex.toString().getBytes()))).build();
filter = RowFilter.newBuilder().setChain(filterChain.build()).build();
}
final ReadRowsRequest.Builder rrr = ReadRowsRequest.newBuilder()
.setTableNameBytes(ByteStringer.wrap(lastTableBytes))
.setFilter(filter)
.setRows(RowSet.newBuilder()
.addRowKeys(ByteStringer.wrap(key.getBytes())));
List<Row> rows;
try {
rows = client.readRowsAsync(rrr.build()).get();
if (rows == null || rows.isEmpty()) {
return Status.NOT_FOUND;
}
for (final Row row : rows) {
for (final Family family : row.getFamiliesList()) {
if (Arrays.equals(family.getNameBytes().toByteArray(), columnFamilyBytes)) {
for (final Column column : family.getColumnsList()) {
// we should only have a single cell per column
result.put(column.getQualifier().toString(UTF8_CHARSET),
new ByteArrayByteIterator(column.getCells(0).getValue().toByteArray()));
if (debug) {
System.out.println(
"Result for field: " + column.getQualifier().toString(UTF8_CHARSET)
+ " is: " + column.getCells(0).getValue().toString(UTF8_CHARSET));
}
}
}
}
}
return Status.OK;
} catch (InterruptedException e) {
System.err.println("Interrupted during get: " + e);
Thread.currentThread().interrupt();
return Status.ERROR;
} catch (ExecutionException e) {
System.err.println("Exception during get: " + e);
return Status.ERROR;
}
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
setTable(table);
RowFilter filter = RowFilter.newBuilder()
.setFamilyNameRegexFilterBytes(ByteStringer.wrap(columnFamilyBytes))
.build();
if (fields != null && fields.size() > 0) {
Builder filterChain = RowFilter.Chain.newBuilder();
filterChain.addFilters(filter);
filterChain.addFilters(RowFilter.newBuilder()
.setCellsPerColumnLimitFilter(1)
.build());
int count = 0;
// usually "field#" so pre-alloc
final StringBuilder regex = new StringBuilder(fields.size() * 6);
for (final String field : fields) {
if (count++ > 0) {
regex.append("|");
}
regex.append(field);
}
filterChain.addFilters(RowFilter.newBuilder()
.setColumnQualifierRegexFilter(
ByteStringer.wrap(regex.toString().getBytes()))).build();
filter = RowFilter.newBuilder().setChain(filterChain.build()).build();
}
final RowRange range = RowRange.newBuilder()
.setStartKeyClosed(ByteStringer.wrap(startkey.getBytes()))
.build();
final RowSet rowSet = RowSet.newBuilder()
.addRowRanges(range)
.build();
final ReadRowsRequest.Builder rrr = ReadRowsRequest.newBuilder()
.setTableNameBytes(ByteStringer.wrap(lastTableBytes))
.setFilter(filter)
.setRows(rowSet);
List<Row> rows;
try {
rows = client.readRowsAsync(rrr.build()).get();
if (rows == null || rows.isEmpty()) {
return Status.NOT_FOUND;
}
int numResults = 0;
for (final Row row : rows) {
final HashMap<String, ByteIterator> rowResult =
new HashMap<String, ByteIterator>(fields != null ? fields.size() : 10);
for (final Family family : row.getFamiliesList()) {
if (Arrays.equals(family.getNameBytes().toByteArray(), columnFamilyBytes)) {
for (final Column column : family.getColumnsList()) {
// we should only have a single cell per column
rowResult.put(column.getQualifier().toString(UTF8_CHARSET),
new ByteArrayByteIterator(column.getCells(0).getValue().toByteArray()));
if (debug) {
System.out.println(
"Result for field: " + column.getQualifier().toString(UTF8_CHARSET)
+ " is: " + column.getCells(0).getValue().toString(UTF8_CHARSET));
}
}
}
}
result.add(rowResult);
numResults++;
if (numResults >= recordcount) {// if hit recordcount, bail out
break;
}
}
return Status.OK;
} catch (InterruptedException e) {
System.err.println("Interrupted during scan: " + e);
Thread.currentThread().interrupt();
return Status.ERROR;
} catch (ExecutionException e) {
System.err.println("Exception during scan: " + e);
return Status.ERROR;
}
}
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
if (debug) {
System.out.println("Setting up put for key: " + key);
}
setTable(table);
final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder();
rowMutation.setRowKey(ByteString.copyFromUtf8(key));
rowMutation.setTableNameBytes(ByteStringer.wrap(lastTableBytes));
for (final Entry<String, ByteIterator> entry : values.entrySet()) {
final Mutation.Builder mutationBuilder = rowMutation.addMutationsBuilder();
final SetCell.Builder setCellBuilder = mutationBuilder.getSetCellBuilder();
setCellBuilder.setFamilyNameBytes(ByteStringer.wrap(columnFamilyBytes));
setCellBuilder.setColumnQualifier(ByteStringer.wrap(entry.getKey().getBytes()));
setCellBuilder.setValue(ByteStringer.wrap(entry.getValue().toArray()));
// Bigtable uses a 1ms granularity
setCellBuilder.setTimestampMicros(System.currentTimeMillis() * 1000);
}
try {
if (clientSideBuffering) {
bulkMutation.add(rowMutation.build());
} else {
client.mutateRow(rowMutation.build());
}
return Status.OK;
} catch (RuntimeException e) {
System.err.println("Failed to insert key: " + key + " " + e.getMessage());
return Status.ERROR;
}
}
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
return update(table, key, values);
}
@Override
public Status delete(String table, String key) {
if (debug) {
System.out.println("Doing delete for key: " + key);
}
setTable(table);
final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder()
.setRowKey(ByteString.copyFromUtf8(key))
.setTableNameBytes(ByteStringer.wrap(lastTableBytes));
rowMutation.addMutationsBuilder().setDeleteFromRow(
DeleteFromRow.getDefaultInstance());
try {
if (clientSideBuffering) {
bulkMutation.add(rowMutation.build());
} else {
client.mutateRow(rowMutation.build());
}
return Status.OK;
} catch (RuntimeException e) {
System.err.println("Failed to delete key: " + key + " " + e.getMessage());
return Status.ERROR;
}
}
/**
* Little helper to set the table byte array. If it's different than the last
* table we reset the byte array. Otherwise we just use the existing array.
* @param table The table we're operating against
*/
private void setTable(final String table) {
if (!lastTable.equals(table)) {
lastTable = table;
BigtableTableName tableName = options
.getInstanceName()
.toTableName(table);
lastTableBytes = tableName
.toString()
.getBytes();
synchronized(this) {
if (bulkMutation != null) {
try {
bulkMutation.flush();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
bulkMutation = session.createBulkMutation(tableName);
}
}
}
} | 16,081 | 34.579646 | 90 | java |
null | NearPMSW-main/baseline/logging/YCSB2/mongodb/src/test/java/site/ycsb/db/AbstractDBTestCases.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNoException;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.Status;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
/**
* MongoDbClientTest provides runs the basic DB test cases.
* <p>
* The tests will be skipped if MongoDB is not running on port 27017 on the
* local machine. See the README.md for how to get MongoDB running.
* </p>
*/
@SuppressWarnings("boxing")
public abstract class AbstractDBTestCases {
/** The default port for MongoDB. */
private static final int MONGODB_DEFAULT_PORT = 27017;
/**
* Verifies the mongod process (or some process) is running on port 27017, if
* not the tests are skipped.
*/
@BeforeClass
public static void setUpBeforeClass() {
// Test if we can connect.
Socket socket = null;
try {
// Connect
socket = new Socket(InetAddress.getLocalHost(), MONGODB_DEFAULT_PORT);
assertThat("Socket is not bound.", socket.getLocalPort(), not(-1));
} catch (IOException connectFailed) {
assumeNoException("MongoDB is not running. Skipping tests.",
connectFailed);
} finally {
if (socket != null) {
try {
socket.close();
} catch (IOException ignore) {
// Ignore.
}
}
socket = null;
}
}
/**
* Test method for {@link DB#insert}, {@link DB#read}, and {@link DB#delete} .
*/
@Test
public void testInsertReadDelete() {
final DB client = getDB();
final String table = getClass().getSimpleName();
final String id = "delete";
HashMap<String, ByteIterator> inserted =
new HashMap<String, ByteIterator>();
inserted.put("a", new ByteArrayByteIterator(new byte[] { 1, 2, 3, 4 }));
Status result = client.insert(table, id, inserted);
assertThat("Insert did not return success (0).", result, is(Status.OK));
HashMap<String, ByteIterator> read = new HashMap<String, ByteIterator>();
Set<String> keys = Collections.singleton("a");
result = client.read(table, id, keys, read);
assertThat("Read did not return success (0).", result, is(Status.OK));
for (String key : keys) {
ByteIterator iter = read.get(key);
assertThat("Did not read the inserted field: " + key, iter,
notNullValue());
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 1)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 2)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 3)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 4)));
assertFalse(iter.hasNext());
}
result = client.delete(table, id);
assertThat("Delete did not return success (0).", result, is(Status.OK));
read.clear();
result = client.read(table, id, null, read);
assertThat("Read, after delete, did not return not found (1).", result,
is(Status.NOT_FOUND));
assertThat("Found the deleted fields.", read.size(), is(0));
result = client.delete(table, id);
assertThat("Delete did not return not found (1).", result, is(Status.NOT_FOUND));
}
/**
* Test method for {@link DB#insert}, {@link DB#read}, and {@link DB#update} .
*/
@Test
public void testInsertReadUpdate() {
DB client = getDB();
final String table = getClass().getSimpleName();
final String id = "update";
HashMap<String, ByteIterator> inserted =
new HashMap<String, ByteIterator>();
inserted.put("a", new ByteArrayByteIterator(new byte[] { 1, 2, 3, 4 }));
Status result = client.insert(table, id, inserted);
assertThat("Insert did not return success (0).", result, is(Status.OK));
HashMap<String, ByteIterator> read = new HashMap<String, ByteIterator>();
Set<String> keys = Collections.singleton("a");
result = client.read(table, id, keys, read);
assertThat("Read did not return success (0).", result, is(Status.OK));
for (String key : keys) {
ByteIterator iter = read.get(key);
assertThat("Did not read the inserted field: " + key, iter,
notNullValue());
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 1)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 2)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 3)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 4)));
assertFalse(iter.hasNext());
}
HashMap<String, ByteIterator> updated = new HashMap<String, ByteIterator>();
updated.put("a", new ByteArrayByteIterator(new byte[] { 5, 6, 7, 8 }));
result = client.update(table, id, updated);
assertThat("Update did not return success (0).", result, is(Status.OK));
read.clear();
result = client.read(table, id, null, read);
assertThat("Read, after update, did not return success (0).", result, is(Status.OK));
for (String key : keys) {
ByteIterator iter = read.get(key);
assertThat("Did not read the inserted field: " + key, iter,
notNullValue());
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 5)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 6)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 7)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 8)));
assertFalse(iter.hasNext());
}
}
/**
* Test method for {@link DB#insert}, {@link DB#read}, and {@link DB#update} .
*/
@Test
public void testInsertReadUpdateWithUpsert() {
Properties props = new Properties();
props.setProperty("mongodb.upsert", "true");
DB client = getDB(props);
final String table = getClass().getSimpleName();
final String id = "updateWithUpsert";
HashMap<String, ByteIterator> inserted =
new HashMap<String, ByteIterator>();
inserted.put("a", new ByteArrayByteIterator(new byte[] { 1, 2, 3, 4 }));
Status result = client.insert(table, id, inserted);
assertThat("Insert did not return success (0).", result, is(Status.OK));
HashMap<String, ByteIterator> read = new HashMap<String, ByteIterator>();
Set<String> keys = Collections.singleton("a");
result = client.read(table, id, keys, read);
assertThat("Read did not return success (0).", result, is(Status.OK));
for (String key : keys) {
ByteIterator iter = read.get(key);
assertThat("Did not read the inserted field: " + key, iter,
notNullValue());
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 1)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 2)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 3)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 4)));
assertFalse(iter.hasNext());
}
HashMap<String, ByteIterator> updated = new HashMap<String, ByteIterator>();
updated.put("a", new ByteArrayByteIterator(new byte[] { 5, 6, 7, 8 }));
result = client.update(table, id, updated);
assertThat("Update did not return success (0).", result, is(Status.OK));
read.clear();
result = client.read(table, id, null, read);
assertThat("Read, after update, did not return success (0).", result, is(Status.OK));
for (String key : keys) {
ByteIterator iter = read.get(key);
assertThat("Did not read the inserted field: " + key, iter,
notNullValue());
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 5)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 6)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 7)));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) 8)));
assertFalse(iter.hasNext());
}
}
/**
* Test method for {@link DB#scan}.
*/
@Test
public void testScan() {
final DB client = getDB();
final String table = getClass().getSimpleName();
// Insert a bunch of documents.
for (int i = 0; i < 100; ++i) {
HashMap<String, ByteIterator> inserted =
new HashMap<String, ByteIterator>();
inserted.put("a", new ByteArrayByteIterator(new byte[] {
(byte) (i & 0xFF), (byte) (i >> 8 & 0xFF), (byte) (i >> 16 & 0xFF),
(byte) (i >> 24 & 0xFF) }));
Status result = client.insert(table, padded(i), inserted);
assertThat("Insert did not return success (0).", result, is(Status.OK));
}
Set<String> keys = Collections.singleton("a");
Vector<HashMap<String, ByteIterator>> results =
new Vector<HashMap<String, ByteIterator>>();
Status result = client.scan(table, "00050", 5, null, results);
assertThat("Read did not return success (0).", result, is(Status.OK));
assertThat(results.size(), is(5));
for (int i = 0; i < 5; ++i) {
Map<String, ByteIterator> read = results.get(i);
for (String key : keys) {
ByteIterator iter = read.get(key);
assertThat("Did not read the inserted field: " + key, iter,
notNullValue());
assertTrue(iter.hasNext());
assertThat(iter.nextByte(), is(Byte.valueOf((byte) ((i + 50) & 0xFF))));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(),
is(Byte.valueOf((byte) ((i + 50) >> 8 & 0xFF))));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(),
is(Byte.valueOf((byte) ((i + 50) >> 16 & 0xFF))));
assertTrue(iter.hasNext());
assertThat(iter.nextByte(),
is(Byte.valueOf((byte) ((i + 50) >> 24 & 0xFF))));
assertFalse(iter.hasNext());
}
}
}
/**
* Gets the test DB.
*
* @return The test DB.
*/
protected DB getDB() {
return getDB(new Properties());
}
/**
* Gets the test DB.
*
* @param props
* Properties to pass to the client.
* @return The test DB.
*/
protected abstract DB getDB(Properties props);
/**
* Creates a zero padded integer.
*
* @param i
* The integer to padd.
* @return The padded integer.
*/
private String padded(int i) {
String result = String.valueOf(i);
while (result.length() < 5) {
result = "0" + result;
}
return result;
}
} | 11,895 | 33.988235 | 89 | java |
null | NearPMSW-main/baseline/logging/YCSB2/mongodb/src/test/java/site/ycsb/db/OptionsSupportTest.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import static site.ycsb.db.OptionsSupport.updateUrl;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import java.util.Properties;
import org.junit.Test;
/**
* OptionsSupportTest provides tests for the OptionsSupport class.
*
* @author rjm
*/
public class OptionsSupportTest {
/**
* Test method for {@link OptionsSupport#updateUrl(String, Properties)} for
* {@code mongodb.maxconnections}.
*/
@Test
public void testUpdateUrlMaxConnections() {
assertThat(
updateUrl("mongodb://locahost:27017/",
props("mongodb.maxconnections", "1234")),
is("mongodb://locahost:27017/?maxPoolSize=1234"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar",
props("mongodb.maxconnections", "1234")),
is("mongodb://locahost:27017/?foo=bar&maxPoolSize=1234"));
assertThat(
updateUrl("mongodb://locahost:27017/?maxPoolSize=1",
props("mongodb.maxconnections", "1234")),
is("mongodb://locahost:27017/?maxPoolSize=1"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar", props("foo", "1234")),
is("mongodb://locahost:27017/?foo=bar"));
}
/**
* Test method for {@link OptionsSupport#updateUrl(String, Properties)} for
* {@code mongodb.threadsAllowedToBlockForConnectionMultiplier}.
*/
@Test
public void testUpdateUrlWaitQueueMultiple() {
assertThat(
updateUrl(
"mongodb://locahost:27017/",
props("mongodb.threadsAllowedToBlockForConnectionMultiplier",
"1234")),
is("mongodb://locahost:27017/?waitQueueMultiple=1234"));
assertThat(
updateUrl(
"mongodb://locahost:27017/?foo=bar",
props("mongodb.threadsAllowedToBlockForConnectionMultiplier",
"1234")),
is("mongodb://locahost:27017/?foo=bar&waitQueueMultiple=1234"));
assertThat(
updateUrl(
"mongodb://locahost:27017/?waitQueueMultiple=1",
props("mongodb.threadsAllowedToBlockForConnectionMultiplier",
"1234")), is("mongodb://locahost:27017/?waitQueueMultiple=1"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar", props("foo", "1234")),
is("mongodb://locahost:27017/?foo=bar"));
}
/**
* Test method for {@link OptionsSupport#updateUrl(String, Properties)} for
* {@code mongodb.threadsAllowedToBlockForConnectionMultiplier}.
*/
@Test
public void testUpdateUrlWriteConcern() {
assertThat(
updateUrl("mongodb://locahost:27017/",
props("mongodb.writeConcern", "errors_ignored")),
is("mongodb://locahost:27017/?w=0"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar",
props("mongodb.writeConcern", "unacknowledged")),
is("mongodb://locahost:27017/?foo=bar&w=0"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar",
props("mongodb.writeConcern", "acknowledged")),
is("mongodb://locahost:27017/?foo=bar&w=1"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar",
props("mongodb.writeConcern", "journaled")),
is("mongodb://locahost:27017/?foo=bar&journal=true&j=true"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar",
props("mongodb.writeConcern", "replica_acknowledged")),
is("mongodb://locahost:27017/?foo=bar&w=2"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar",
props("mongodb.writeConcern", "majority")),
is("mongodb://locahost:27017/?foo=bar&w=majority"));
// w already exists.
assertThat(
updateUrl("mongodb://locahost:27017/?w=1",
props("mongodb.writeConcern", "acknowledged")),
is("mongodb://locahost:27017/?w=1"));
// Unknown options
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar", props("foo", "1234")),
is("mongodb://locahost:27017/?foo=bar"));
}
/**
* Test method for {@link OptionsSupport#updateUrl(String, Properties)} for
* {@code mongodb.threadsAllowedToBlockForConnectionMultiplier}.
*/
@Test
public void testUpdateUrlReadPreference() {
assertThat(
updateUrl("mongodb://locahost:27017/",
props("mongodb.readPreference", "primary")),
is("mongodb://locahost:27017/?readPreference=primary"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar",
props("mongodb.readPreference", "primary_preferred")),
is("mongodb://locahost:27017/?foo=bar&readPreference=primaryPreferred"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar",
props("mongodb.readPreference", "secondary")),
is("mongodb://locahost:27017/?foo=bar&readPreference=secondary"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar",
props("mongodb.readPreference", "secondary_preferred")),
is("mongodb://locahost:27017/?foo=bar&readPreference=secondaryPreferred"));
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar",
props("mongodb.readPreference", "nearest")),
is("mongodb://locahost:27017/?foo=bar&readPreference=nearest"));
// readPreference already exists.
assertThat(
updateUrl("mongodb://locahost:27017/?readPreference=primary",
props("mongodb.readPreference", "secondary")),
is("mongodb://locahost:27017/?readPreference=primary"));
// Unknown options
assertThat(
updateUrl("mongodb://locahost:27017/?foo=bar", props("foo", "1234")),
is("mongodb://locahost:27017/?foo=bar"));
}
/**
* Factory method for a {@link Properties} object.
*
* @param key
* The key for the property to set.
* @param value
* The value for the property to set.
* @return The {@link Properties} with the property added.
*/
private Properties props(String key, String value) {
Properties props = new Properties();
props.setProperty(key, value);
return props;
}
}
| 6,785 | 35.681081 | 83 | java |
null | NearPMSW-main/baseline/logging/YCSB2/mongodb/src/test/java/site/ycsb/db/MongoDbClientTest.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import static org.junit.Assume.assumeNoException;
import java.util.Properties;
import org.junit.After;
import site.ycsb.DB;
/**
* MongoDbClientTest provides runs the basic workload operations.
*/
public class MongoDbClientTest extends AbstractDBTestCases {
/** The client to use. */
private DB myClient = null;
protected DB instantiateClient() {
return new MongoDbClient();
}
/**
* Stops the test client.
*/
@After
public void tearDown() {
try {
myClient.cleanup();
} catch (Exception error) {
// Ignore.
} finally {
myClient = null;
}
}
/**
* {@inheritDoc}
* <p>
* Overridden to return the {@link MongoDbClient}.
* </p>
*/
@Override
protected DB getDB(Properties props) {
if( myClient == null ) {
myClient = instantiateClient();
myClient.setProperties(props);
try {
myClient.init();
} catch (Exception error) {
assumeNoException(error);
}
}
return myClient;
}
}
| 1,689 | 22.150685 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/mongodb/src/test/java/site/ycsb/db/AsyncMongoDbClientTest.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import site.ycsb.DB;
/**
* AsyncMongoDbClientTest provides runs the basic workload operations.
*/
public class AsyncMongoDbClientTest extends MongoDbClientTest {
@Override
protected DB instantiateClient() {
return new AsyncMongoDbClient();
}
}
| 933 | 29.129032 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/mongodb/src/main/java/site/ycsb/db/package-info.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="https://www.mongodb.org/">MongoDB</a>.
* For additional details on using and configuring the binding see the
* accompanying <a
* href="https://github.com/brianfrankcooper/YCSB/blob/master/mongodb/README.md"
* >README.md</a>.
* <p>
* A YCSB binding is provided for both the the
* <a href="http://www.allanbank.com/mongodb-async-driver/">Asynchronous
* Java Driver</a> and the MongoDB Inc.
* <a href="http://docs.mongodb.org/ecosystem/drivers/java/">driver</a>.
* </p>
*/
package site.ycsb.db;
| 1,196 | 35.272727 | 80 | java |
null | NearPMSW-main/baseline/logging/YCSB2/mongodb/src/main/java/site/ycsb/db/OptionsSupport.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import java.util.Properties;
/**
* OptionsSupport provides methods for handling legacy options.
*
* @author rjm
*/
public final class OptionsSupport {
/** Value for an unavailable property. */
private static final String UNAVAILABLE = "n/a";
/**
* Updates the URL with the appropriate attributes if legacy properties are
* set and the URL does not have the property already set.
*
* @param url
* The URL to update.
* @param props
* The legacy properties.
* @return The updated URL.
*/
public static String updateUrl(String url, Properties props) {
String result = url;
// max connections.
final String maxConnections =
props.getProperty("mongodb.maxconnections", UNAVAILABLE).toLowerCase();
if (!UNAVAILABLE.equals(maxConnections)) {
result = addUrlOption(result, "maxPoolSize", maxConnections);
}
// Blocked thread multiplier.
final String threadsAllowedToBlockForConnectionMultiplier =
props
.getProperty(
"mongodb.threadsAllowedToBlockForConnectionMultiplier",
UNAVAILABLE).toLowerCase();
if (!UNAVAILABLE.equals(threadsAllowedToBlockForConnectionMultiplier)) {
result =
addUrlOption(result, "waitQueueMultiple",
threadsAllowedToBlockForConnectionMultiplier);
}
// write concern
String writeConcernType =
props.getProperty("mongodb.writeConcern", UNAVAILABLE).toLowerCase();
if (!UNAVAILABLE.equals(writeConcernType)) {
if ("errors_ignored".equals(writeConcernType)) {
result = addUrlOption(result, "w", "0");
} else if ("unacknowledged".equals(writeConcernType)) {
result = addUrlOption(result, "w", "0");
} else if ("acknowledged".equals(writeConcernType)) {
result = addUrlOption(result, "w", "1");
} else if ("journaled".equals(writeConcernType)) {
result = addUrlOption(result, "journal", "true"); // this is the
// documented option
// name
result = addUrlOption(result, "j", "true"); // but keep this until
// MongoDB Java driver
// supports "journal" option
} else if ("replica_acknowledged".equals(writeConcernType)) {
result = addUrlOption(result, "w", "2");
} else if ("majority".equals(writeConcernType)) {
result = addUrlOption(result, "w", "majority");
} else {
System.err.println("WARNING: Invalid writeConcern: '"
+ writeConcernType + "' will be ignored. "
+ "Must be one of [ unacknowledged | acknowledged | "
+ "journaled | replica_acknowledged | majority ]");
}
}
// read preference
String readPreferenceType =
props.getProperty("mongodb.readPreference", UNAVAILABLE).toLowerCase();
if (!UNAVAILABLE.equals(readPreferenceType)) {
if ("primary".equals(readPreferenceType)) {
result = addUrlOption(result, "readPreference", "primary");
} else if ("primary_preferred".equals(readPreferenceType)) {
result = addUrlOption(result, "readPreference", "primaryPreferred");
} else if ("secondary".equals(readPreferenceType)) {
result = addUrlOption(result, "readPreference", "secondary");
} else if ("secondary_preferred".equals(readPreferenceType)) {
result = addUrlOption(result, "readPreference", "secondaryPreferred");
} else if ("nearest".equals(readPreferenceType)) {
result = addUrlOption(result, "readPreference", "nearest");
} else {
System.err.println("WARNING: Invalid readPreference: '"
+ readPreferenceType + "' will be ignored. "
+ "Must be one of [ primary | primary_preferred | "
+ "secondary | secondary_preferred | nearest ]");
}
}
return result;
}
/**
* Adds an option to the url if it does not already contain the option.
*
* @param url
* The URL to append the options to.
* @param name
* The name of the option.
* @param value
* The value for the option.
* @return The updated URL.
*/
private static String addUrlOption(String url, String name, String value) {
String fullName = name + "=";
if (!url.contains(fullName)) {
if (url.contains("?")) {
return url + "&" + fullName + value;
}
return url + "?" + fullName + value;
}
return url;
}
/**
* Hidden Constructor.
*/
private OptionsSupport() {
// Nothing.
}
}
| 5,199 | 34.616438 | 79 | java |
null | NearPMSW-main/baseline/logging/YCSB2/mongodb/src/main/java/site/ycsb/db/AsyncMongoDbClient.java | /*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import static com.allanbank.mongodb.builder.QueryBuilder.where;
import com.allanbank.mongodb.Durability;
import com.allanbank.mongodb.LockType;
import com.allanbank.mongodb.MongoClient;
import com.allanbank.mongodb.MongoClientConfiguration;
import com.allanbank.mongodb.MongoCollection;
import com.allanbank.mongodb.MongoDatabase;
import com.allanbank.mongodb.MongoDbUri;
import com.allanbank.mongodb.MongoFactory;
import com.allanbank.mongodb.MongoIterator;
import com.allanbank.mongodb.ReadPreference;
import com.allanbank.mongodb.bson.Document;
import com.allanbank.mongodb.bson.Element;
import com.allanbank.mongodb.bson.ElementType;
import com.allanbank.mongodb.bson.builder.BuilderFactory;
import com.allanbank.mongodb.bson.builder.DocumentBuilder;
import com.allanbank.mongodb.bson.element.BinaryElement;
import com.allanbank.mongodb.builder.BatchedWrite;
import com.allanbank.mongodb.builder.BatchedWriteMode;
import com.allanbank.mongodb.builder.Find;
import com.allanbank.mongodb.builder.Sort;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.atomic.AtomicInteger;
/**
* MongoDB asynchronous client for YCSB framework using the <a
* href="http://www.allanbank.com/mongodb-async-driver/">Asynchronous Java
* Driver</a>
* <p>
* See the <code>README.md</code> for configuration information.
* </p>
*
* @author rjm
* @see <a href="http://www.allanbank.com/mongodb-async-driver/">Asynchronous
* Java Driver</a>
*/
public class AsyncMongoDbClient extends DB {
/** Used to include a field in a response. */
protected static final int INCLUDE = 1;
/** The database to use. */
private static String databaseName;
/** Thread local document builder. */
private static final ThreadLocal<DocumentBuilder> DOCUMENT_BUILDER =
new ThreadLocal<DocumentBuilder>() {
@Override
protected DocumentBuilder initialValue() {
return BuilderFactory.start();
}
};
/** The write concern for the requests. */
private static final AtomicInteger INIT_COUNT = new AtomicInteger(0);
/** The connection to MongoDB. */
private static MongoClient mongoClient;
/** The write concern for the requests. */
private static Durability writeConcern;
/** Which servers to use for reads. */
private static ReadPreference readPreference;
/** The database to MongoDB. */
private MongoDatabase database;
/** The batch size to use for inserts. */
private static int batchSize;
/** If true then use updates with the upsert option for inserts. */
private static boolean useUpsert;
/** The bulk inserts pending for the thread. */
private final BatchedWrite.Builder batchedWrite = BatchedWrite.builder()
.mode(BatchedWriteMode.REORDERED);
/** The number of writes in the batchedWrite. */
private int batchedWriteCount = 0;
/**
* Cleanup any state for this DB. Called once per DB instance; there is one DB
* instance per client thread.
*/
@Override
public final void cleanup() throws DBException {
if (INIT_COUNT.decrementAndGet() == 0) {
try {
mongoClient.close();
} catch (final Exception e1) {
System.err.println("Could not close MongoDB connection pool: "
+ e1.toString());
e1.printStackTrace();
return;
} finally {
mongoClient = null;
database = null;
}
}
}
/**
* Delete a record from the database.
*
* @param table
* The name of the table
* @param key
* The record key of the record to delete.
* @return Zero on success, a non-zero error code on error. See this class's
* description for a discussion of error codes.
*/
@Override
public final Status delete(final String table, final String key) {
try {
final MongoCollection collection = database.getCollection(table);
final Document q = BuilderFactory.start().add("_id", key).build();
final long res = collection.delete(q, writeConcern);
if (res == 0) {
System.err.println("Nothing deleted for key " + key);
return Status.NOT_FOUND;
}
return Status.OK;
} catch (final Exception e) {
System.err.println(e.toString());
return Status.ERROR;
}
}
/**
* Initialize any state for this DB. Called once per DB instance; there is one
* DB instance per client thread.
*/
@Override
public final void init() throws DBException {
final int count = INIT_COUNT.incrementAndGet();
synchronized (AsyncMongoDbClient.class) {
final Properties props = getProperties();
if (mongoClient != null) {
database = mongoClient.getDatabase(databaseName);
// If there are more threads (count) than connections then the
// Low latency spin lock is not really needed as we will keep
// the connections occupied.
if (count > mongoClient.getConfig().getMaxConnectionCount()) {
mongoClient.getConfig().setLockType(LockType.MUTEX);
}
return;
}
// Set insert batchsize, default 1 - to be YCSB-original equivalent
batchSize = Integer.parseInt(props.getProperty("mongodb.batchsize", "1"));
// Set is inserts are done as upserts. Defaults to false.
useUpsert = Boolean.parseBoolean(
props.getProperty("mongodb.upsert", "false"));
// Just use the standard connection format URL
// http://docs.mongodb.org/manual/reference/connection-string/
// to configure the client.
String url =
props
.getProperty("mongodb.url", "mongodb://localhost:27017/ycsb?w=1");
if (!url.startsWith("mongodb://")) {
System.err.println("ERROR: Invalid URL: '" + url
+ "'. Must be of the form "
+ "'mongodb://<host1>:<port1>,<host2>:<port2>/database?"
+ "options'. See "
+ "http://docs.mongodb.org/manual/reference/connection-string/.");
System.exit(1);
}
MongoDbUri uri = new MongoDbUri(url);
try {
databaseName = uri.getDatabase();
if ((databaseName == null) || databaseName.isEmpty()) {
// Default database is "ycsb" if database is not
// specified in URL
databaseName = "ycsb";
}
mongoClient = MongoFactory.createClient(uri);
MongoClientConfiguration config = mongoClient.getConfig();
if (!url.toLowerCase().contains("locktype=")) {
config.setLockType(LockType.LOW_LATENCY_SPIN); // assumed...
}
readPreference = config.getDefaultReadPreference();
writeConcern = config.getDefaultDurability();
database = mongoClient.getDatabase(databaseName);
System.out.println("mongo connection created with " + url);
} catch (final Exception e1) {
System.err
.println("Could not initialize MongoDB connection pool for Loader: "
+ e1.toString());
e1.printStackTrace();
return;
}
}
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key.
*
* @param table
* The name of the table
* @param key
* The record key of the record to insert.
* @param values
* A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error. See the {@link DB}
* class's description for a discussion of error codes.
*/
@Override
public final Status insert(final String table, final String key,
final Map<String, ByteIterator> values) {
try {
final MongoCollection collection = database.getCollection(table);
final DocumentBuilder toInsert =
DOCUMENT_BUILDER.get().reset().add("_id", key);
final Document query = toInsert.build();
for (final Map.Entry<String, ByteIterator> entry : values.entrySet()) {
toInsert.add(entry.getKey(), entry.getValue().toArray());
}
// Do an upsert.
if (batchSize <= 1) {
long result;
if (useUpsert) {
result = collection.update(query, toInsert,
/* multi= */false, /* upsert= */true, writeConcern);
} else {
// Return is not stable pre-SERVER-4381. No exception is success.
collection.insert(writeConcern, toInsert);
result = 1;
}
return result == 1 ? Status.OK : Status.NOT_FOUND;
}
// Use a bulk insert.
try {
if (useUpsert) {
batchedWrite.update(query, toInsert, /* multi= */false,
/* upsert= */true);
} else {
batchedWrite.insert(toInsert);
}
batchedWriteCount += 1;
if (batchedWriteCount < batchSize) {
return Status.BATCHED_OK;
}
long count = collection.write(batchedWrite);
if (count == batchedWriteCount) {
batchedWrite.reset().mode(BatchedWriteMode.REORDERED);
batchedWriteCount = 0;
return Status.OK;
}
System.err.println("Number of inserted documents doesn't match the "
+ "number sent, " + count + " inserted, sent " + batchedWriteCount);
batchedWrite.reset().mode(BatchedWriteMode.REORDERED);
batchedWriteCount = 0;
return Status.ERROR;
} catch (Exception e) {
System.err.println("Exception while trying bulk insert with "
+ batchedWriteCount);
e.printStackTrace();
return Status.ERROR;
}
} catch (final Exception e) {
e.printStackTrace();
return Status.ERROR;
}
}
/**
* Read a record from the database. Each field/value pair from the result will
* be stored in a HashMap.
*
* @param table
* The name of the table
* @param key
* The record key of the record to read.
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error or "not found".
*/
@Override
public final Status read(final String table, final String key,
final Set<String> fields, final Map<String, ByteIterator> result) {
try {
final MongoCollection collection = database.getCollection(table);
final DocumentBuilder query =
DOCUMENT_BUILDER.get().reset().add("_id", key);
Document queryResult = null;
if (fields != null) {
final DocumentBuilder fieldsToReturn = BuilderFactory.start();
final Iterator<String> iter = fields.iterator();
while (iter.hasNext()) {
fieldsToReturn.add(iter.next(), 1);
}
final Find.Builder fb = new Find.Builder(query);
fb.projection(fieldsToReturn);
fb.setLimit(1);
fb.setBatchSize(1);
fb.readPreference(readPreference);
final MongoIterator<Document> ci = collection.find(fb.build());
if (ci.hasNext()) {
queryResult = ci.next();
ci.close();
}
} else {
queryResult = collection.findOne(query);
}
if (queryResult != null) {
fillMap(result, queryResult);
}
return queryResult != null ? Status.OK : Status.NOT_FOUND;
} catch (final Exception e) {
System.err.println(e.toString());
return Status.ERROR;
}
}
/**
* Perform a range scan for a set of records in the database. Each field/value
* pair from the result will be stored in a HashMap.
*
* @param table
* The name of the table
* @param startkey
* The record key of the first record to read.
* @param recordcount
* The number of records to read
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one record
* @return Zero on success, a non-zero error code on error. See the {@link DB}
* class's description for a discussion of error codes.
*/
@Override
public final Status scan(final String table, final String startkey,
final int recordcount, final Set<String> fields,
final Vector<HashMap<String, ByteIterator>> result) {
try {
final MongoCollection collection = database.getCollection(table);
final Find.Builder find =
Find.builder().query(where("_id").greaterThanOrEqualTo(startkey))
.limit(recordcount).batchSize(recordcount).sort(Sort.asc("_id"))
.readPreference(readPreference);
if (fields != null) {
final DocumentBuilder fieldsDoc = BuilderFactory.start();
for (final String field : fields) {
fieldsDoc.add(field, INCLUDE);
}
find.projection(fieldsDoc);
}
result.ensureCapacity(recordcount);
final MongoIterator<Document> cursor = collection.find(find);
if (!cursor.hasNext()) {
System.err.println("Nothing found in scan for key " + startkey);
return Status.NOT_FOUND;
}
while (cursor.hasNext()) {
// toMap() returns a Map but result.add() expects a
// Map<String,String>. Hence, the suppress warnings.
final Document doc = cursor.next();
final HashMap<String, ByteIterator> docAsMap =
new HashMap<String, ByteIterator>();
fillMap(docAsMap, doc);
result.add(docAsMap);
}
return Status.OK;
} catch (final Exception e) {
System.err.println(e.toString());
return Status.ERROR;
}
}
/**
* Update a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key, overwriting any existing values with the same field name.
*
* @param table
* The name of the table
* @param key
* The record key of the record to write.
* @param values
* A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error. See the {@link DB}
* class's description for a discussion of error codes.
*/
@Override
public final Status update(final String table, final String key,
final Map<String, ByteIterator> values) {
try {
final MongoCollection collection = database.getCollection(table);
final DocumentBuilder query = BuilderFactory.start().add("_id", key);
final DocumentBuilder update = BuilderFactory.start();
final DocumentBuilder fieldsToSet = update.push("$set");
for (final Map.Entry<String, ByteIterator> entry : values.entrySet()) {
fieldsToSet.add(entry.getKey(), entry.getValue().toArray());
}
final long res =
collection.update(query, update, false, false, writeConcern);
return writeConcern == Durability.NONE || res == 1 ? Status.OK : Status.NOT_FOUND;
} catch (final Exception e) {
System.err.println(e.toString());
return Status.ERROR;
}
}
/**
* Fills the map with the ByteIterators from the document.
*
* @param result
* The map to fill.
* @param queryResult
* The document to fill from.
*/
protected final void fillMap(final Map<String, ByteIterator> result,
final Document queryResult) {
for (final Element be : queryResult) {
if (be.getType() == ElementType.BINARY) {
result.put(be.getName(),
new BinaryByteArrayIterator((BinaryElement) be));
}
}
}
/**
* BinaryByteArrayIterator provides an adapter from a {@link BinaryElement} to
* a {@link ByteIterator}.
*/
private static final class BinaryByteArrayIterator extends ByteIterator {
/** The binary data. */
private final BinaryElement binaryElement;
/** The current offset into the binary element. */
private int offset;
/**
* Creates a new BinaryByteArrayIterator.
*
* @param element
* The {@link BinaryElement} to iterate over.
*/
public BinaryByteArrayIterator(final BinaryElement element) {
this.binaryElement = element;
this.offset = 0;
}
/**
* {@inheritDoc}
* <p>
* Overridden to return the number of bytes remaining in the iterator.
* </p>
*/
@Override
public long bytesLeft() {
return Math.max(0, binaryElement.length() - offset);
}
/**
* {@inheritDoc}
* <p>
* Overridden to return true if there is more data in the
* {@link BinaryElement}.
* </p>
*/
@Override
public boolean hasNext() {
return (offset < binaryElement.length());
}
/**
* {@inheritDoc}
* <p>
* Overridden to return the next value and advance the iterator.
* </p>
*/
@Override
public byte nextByte() {
final byte value = binaryElement.get(offset);
offset += 1;
return value;
}
}
}
| 17,998 | 31.666062 | 88 | java |
null | NearPMSW-main/baseline/logging/YCSB2/mongodb/src/main/java/site/ycsb/db/MongoDbClient.java | /**
* Copyright (c) 2012 - 2015 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/*
* MongoDB client binding for YCSB.
*
* Submitted by Yen Pai on 5/11/2010.
*
* https://gist.github.com/000a66b8db2caf42467b#file_mongo_database.java
*/
package site.ycsb.db;
import com.mongodb.MongoClient;
import com.mongodb.MongoClientURI;
import com.mongodb.ReadPreference;
import com.mongodb.WriteConcern;
import com.mongodb.client.FindIterable;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoCursor;
import com.mongodb.client.MongoDatabase;
import com.mongodb.client.model.InsertManyOptions;
import com.mongodb.client.model.UpdateOneModel;
import com.mongodb.client.model.UpdateOptions;
import com.mongodb.client.result.DeleteResult;
import com.mongodb.client.result.UpdateResult;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import org.bson.Document;
import org.bson.types.Binary;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.atomic.AtomicInteger;
/**
* MongoDB binding for YCSB framework using the MongoDB Inc. <a
* href="http://docs.mongodb.org/ecosystem/drivers/java/">driver</a>
* <p>
* See the <code>README.md</code> for configuration information.
* </p>
*
* @author ypai
* @see <a href="http://docs.mongodb.org/ecosystem/drivers/java/">MongoDB Inc.
* driver</a>
*/
public class MongoDbClient extends DB {
/** Used to include a field in a response. */
private static final Integer INCLUDE = Integer.valueOf(1);
/** The options to use for inserting many documents. */
private static final InsertManyOptions INSERT_UNORDERED =
new InsertManyOptions().ordered(false);
/** The options to use for inserting a single document. */
private static final UpdateOptions UPDATE_WITH_UPSERT = new UpdateOptions()
.upsert(true);
/**
* The database name to access.
*/
private static String databaseName;
/** The database name to access. */
private static MongoDatabase database;
/**
* Count the number of times initialized to teardown on the last
* {@link #cleanup()}.
*/
private static final AtomicInteger INIT_COUNT = new AtomicInteger(0);
/** A singleton Mongo instance. */
private static MongoClient mongoClient;
/** The default read preference for the test. */
private static ReadPreference readPreference;
/** The default write concern for the test. */
private static WriteConcern writeConcern;
/** The batch size to use for inserts. */
private static int batchSize;
/** If true then use updates with the upsert option for inserts. */
private static boolean useUpsert;
/** The bulk inserts pending for the thread. */
private final List<Document> bulkInserts = new ArrayList<Document>();
/**
* Cleanup any state for this DB. Called once per DB instance; there is one DB
* instance per client thread.
*/
@Override
public void cleanup() throws DBException {
if (INIT_COUNT.decrementAndGet() == 0) {
try {
mongoClient.close();
} catch (Exception e1) {
System.err.println("Could not close MongoDB connection pool: "
+ e1.toString());
e1.printStackTrace();
return;
} finally {
database = null;
mongoClient = null;
}
}
}
/**
* Delete a record from the database.
*
* @param table
* The name of the table
* @param key
* The record key of the record to delete.
* @return Zero on success, a non-zero error code on error. See the {@link DB}
* class's description for a discussion of error codes.
*/
@Override
public Status delete(String table, String key) {
try {
MongoCollection<Document> collection = database.getCollection(table);
Document query = new Document("_id", key);
DeleteResult result =
collection.withWriteConcern(writeConcern).deleteOne(query);
if (result.wasAcknowledged() && result.getDeletedCount() == 0) {
System.err.println("Nothing deleted for key " + key);
return Status.NOT_FOUND;
}
return Status.OK;
} catch (Exception e) {
System.err.println(e.toString());
return Status.ERROR;
}
}
/**
* Initialize any state for this DB. Called once per DB instance; there is one
* DB instance per client thread.
*/
@Override
public void init() throws DBException {
INIT_COUNT.incrementAndGet();
synchronized (INCLUDE) {
if (mongoClient != null) {
return;
}
Properties props = getProperties();
// Set insert batchsize, default 1 - to be YCSB-original equivalent
batchSize = Integer.parseInt(props.getProperty("batchsize", "1"));
// Set is inserts are done as upserts. Defaults to false.
useUpsert = Boolean.parseBoolean(
props.getProperty("mongodb.upsert", "false"));
// Just use the standard connection format URL
// http://docs.mongodb.org/manual/reference/connection-string/
// to configure the client.
String url = props.getProperty("mongodb.url", null);
boolean defaultedUrl = false;
if (url == null) {
defaultedUrl = true;
url = "mongodb://localhost:27017/ycsb?w=1";
}
url = OptionsSupport.updateUrl(url, props);
if (!url.startsWith("mongodb://") && !url.startsWith("mongodb+srv://")) {
System.err.println("ERROR: Invalid URL: '" + url
+ "'. Must be of the form "
+ "'mongodb://<host1>:<port1>,<host2>:<port2>/database?options' "
+ "or 'mongodb+srv://<host>/database?options'. "
+ "http://docs.mongodb.org/manual/reference/connection-string/");
System.exit(1);
}
try {
MongoClientURI uri = new MongoClientURI(url);
String uriDb = uri.getDatabase();
if (!defaultedUrl && (uriDb != null) && !uriDb.isEmpty()
&& !"admin".equals(uriDb)) {
databaseName = uriDb;
} else {
// If no database is specified in URI, use "ycsb"
databaseName = "ycsb";
}
readPreference = uri.getOptions().getReadPreference();
writeConcern = uri.getOptions().getWriteConcern();
mongoClient = new MongoClient(uri);
database =
mongoClient.getDatabase(databaseName)
.withReadPreference(readPreference)
.withWriteConcern(writeConcern);
System.out.println("mongo client connection created with " + url);
} catch (Exception e1) {
System.err
.println("Could not initialize MongoDB connection pool for Loader: "
+ e1.toString());
e1.printStackTrace();
return;
}
}
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key.
*
* @param table
* The name of the table
* @param key
* The record key of the record to insert.
* @param values
* A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error. See the {@link DB}
* class's description for a discussion of error codes.
*/
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
try {
MongoCollection<Document> collection = database.getCollection(table);
Document toInsert = new Document("_id", key);
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
toInsert.put(entry.getKey(), entry.getValue().toArray());
}
if (batchSize == 1) {
if (useUpsert) {
// this is effectively an insert, but using an upsert instead due
// to current inability of the framework to clean up after itself
// between test runs.
collection.replaceOne(new Document("_id", toInsert.get("_id")),
toInsert, UPDATE_WITH_UPSERT);
} else {
collection.insertOne(toInsert);
}
} else {
bulkInserts.add(toInsert);
if (bulkInserts.size() == batchSize) {
if (useUpsert) {
List<UpdateOneModel<Document>> updates =
new ArrayList<UpdateOneModel<Document>>(bulkInserts.size());
for (Document doc : bulkInserts) {
updates.add(new UpdateOneModel<Document>(
new Document("_id", doc.get("_id")),
doc, UPDATE_WITH_UPSERT));
}
collection.bulkWrite(updates);
} else {
collection.insertMany(bulkInserts, INSERT_UNORDERED);
}
bulkInserts.clear();
} else {
return Status.BATCHED_OK;
}
}
return Status.OK;
} catch (Exception e) {
System.err.println("Exception while trying bulk insert with "
+ bulkInserts.size());
e.printStackTrace();
return Status.ERROR;
}
}
/**
* Read a record from the database. Each field/value pair from the result will
* be stored in a HashMap.
*
* @param table
* The name of the table
* @param key
* The record key of the record to read.
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error or "not found".
*/
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
try {
MongoCollection<Document> collection = database.getCollection(table);
Document query = new Document("_id", key);
FindIterable<Document> findIterable = collection.find(query);
if (fields != null) {
Document projection = new Document();
for (String field : fields) {
projection.put(field, INCLUDE);
}
findIterable.projection(projection);
}
Document queryResult = findIterable.first();
if (queryResult != null) {
fillMap(result, queryResult);
}
return queryResult != null ? Status.OK : Status.NOT_FOUND;
} catch (Exception e) {
System.err.println(e.toString());
return Status.ERROR;
}
}
/**
* Perform a range scan for a set of records in the database. Each field/value
* pair from the result will be stored in a HashMap.
*
* @param table
* The name of the table
* @param startkey
* The record key of the first record to read.
* @param recordcount
* The number of records to read
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one record
* @return Zero on success, a non-zero error code on error. See the {@link DB}
* class's description for a discussion of error codes.
*/
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
MongoCursor<Document> cursor = null;
try {
MongoCollection<Document> collection = database.getCollection(table);
Document scanRange = new Document("$gte", startkey);
Document query = new Document("_id", scanRange);
Document sort = new Document("_id", INCLUDE);
FindIterable<Document> findIterable =
collection.find(query).sort(sort).limit(recordcount);
if (fields != null) {
Document projection = new Document();
for (String fieldName : fields) {
projection.put(fieldName, INCLUDE);
}
findIterable.projection(projection);
}
cursor = findIterable.iterator();
if (!cursor.hasNext()) {
System.err.println("Nothing found in scan for key " + startkey);
return Status.ERROR;
}
result.ensureCapacity(recordcount);
while (cursor.hasNext()) {
HashMap<String, ByteIterator> resultMap =
new HashMap<String, ByteIterator>();
Document obj = cursor.next();
fillMap(resultMap, obj);
result.add(resultMap);
}
return Status.OK;
} catch (Exception e) {
System.err.println(e.toString());
return Status.ERROR;
} finally {
if (cursor != null) {
cursor.close();
}
}
}
/**
* Update a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key, overwriting any existing values with the same field name.
*
* @param table
* The name of the table
* @param key
* The record key of the record to write.
* @param values
* A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error. See this class's
* description for a discussion of error codes.
*/
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
try {
MongoCollection<Document> collection = database.getCollection(table);
Document query = new Document("_id", key);
Document fieldsToSet = new Document();
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
fieldsToSet.put(entry.getKey(), entry.getValue().toArray());
}
Document update = new Document("$set", fieldsToSet);
UpdateResult result = collection.updateOne(query, update);
if (result.wasAcknowledged() && result.getMatchedCount() == 0) {
System.err.println("Nothing updated for key " + key);
return Status.NOT_FOUND;
}
return Status.OK;
} catch (Exception e) {
System.err.println(e.toString());
return Status.ERROR;
}
}
/**
* Fills the map with the values from the DBObject.
*
* @param resultMap
* The map to fill/
* @param obj
* The object to copy values from.
*/
protected void fillMap(Map<String, ByteIterator> resultMap, Document obj) {
for (Map.Entry<String, Object> entry : obj.entrySet()) {
if (entry.getValue() instanceof Binary) {
resultMap.put(entry.getKey(),
new ByteArrayByteIterator(((Binary) entry.getValue()).getData()));
}
}
}
}
| 15,303 | 31.423729 | 80 | java |
null | NearPMSW-main/baseline/logging/YCSB2/rest/src/test/java/site/ycsb/webservice/rest/ResourceLoader.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.webservice.rest;
import java.util.HashSet;
import java.util.Set;
import javax.ws.rs.core.Application;
/**
* Class responsible for loading mock rest resource class like
* {@link RestTestResource}.
*/
public class ResourceLoader extends Application {
@Override
public Set<Class<?>> getClasses() {
final Set<Class<?>> classes = new HashSet<Class<?>>();
classes.add(RestTestResource.class);
return classes;
}
} | 1,109 | 28.210526 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/rest/src/test/java/site/ycsb/webservice/rest/RestClientTest.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.webservice.rest;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Properties;
import javax.servlet.ServletException;
import org.apache.catalina.Context;
import org.apache.catalina.LifecycleException;
import org.apache.catalina.startup.Tomcat;
import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.servlet.ServletContainer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import site.ycsb.ByteIterator;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
/**
* Test cases to verify the {@link RestClient} of the rest-binding
* module. It performs these steps in order. 1. Runs an embedded Tomcat
* server with a mock RESTFul web service. 2. Invokes the {@link RestClient}
* class for all the various methods which make HTTP calls to the mock REST
* service. 3. Compares the response from such calls to the mock REST
* service with the response expected. 4. Stops the embedded Tomcat server.
* Cases for verifying the handling of different HTTP status like 2xx, 4xx &
* 5xx have been included in success and failure test cases.
*/
public class RestClientTest {
private static Integer port = 8080;
private static Tomcat tomcat;
private static RestClient rc = new RestClient();
private static final String RESPONSE_TAG = "response";
private static final String DATA_TAG = "data";
private static final String VALID_RESOURCE = "resource_valid";
private static final String INVALID_RESOURCE = "resource_invalid";
private static final String ABSENT_RESOURCE = "resource_absent";
private static final String UNAUTHORIZED_RESOURCE = "resource_unauthorized";
private static final String INPUT_DATA = "<field1>one</field1><field2>two</field2>";
@BeforeClass
public static void init() throws IOException, DBException, ServletException, LifecycleException, InterruptedException {
String webappDirLocation = IntegrationTest.class.getClassLoader().getResource("WebContent").getPath();
while (!Utils.available(port)) {
port++;
}
tomcat = new Tomcat();
tomcat.setPort(Integer.valueOf(port));
Context context = tomcat.addWebapp("/webService", new File(webappDirLocation).getAbsolutePath());
Tomcat.addServlet(context, "jersey-container-servlet", resourceConfig());
context.addServletMapping("/rest/*", "jersey-container-servlet");
tomcat.start();
// Allow time for proper startup.
Thread.sleep(1000);
Properties props = new Properties();
props.load(new FileReader(RestClientTest.class.getClassLoader().getResource("workload_rest").getPath()));
// Update the port value in the url.prefix property.
props.setProperty("url.prefix", props.getProperty("url.prefix").replaceAll("PORT", port.toString()));
rc.setProperties(props);
rc.init();
}
@AfterClass
public static void cleanUp() throws DBException {
rc.cleanup();
}
// Read success.
@Test
public void read_200() {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
Status status = rc.read(null, VALID_RESOURCE, null, result);
assertEquals(Status.OK, status);
assertEquals(result.get(RESPONSE_TAG).toString(), "HTTP GET response to: "+ VALID_RESOURCE);
}
// Unauthorized request error.
@Test
public void read_403() {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
Status status = rc.read(null, UNAUTHORIZED_RESOURCE, null, result);
assertEquals(Status.FORBIDDEN, status);
}
//Not found error.
@Test
public void read_404() {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
Status status = rc.read(null, ABSENT_RESOURCE, null, result);
assertEquals(Status.NOT_FOUND, status);
}
// Server error.
@Test
public void read_500() {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
Status status = rc.read(null, INVALID_RESOURCE, null, result);
assertEquals(Status.ERROR, status);
}
// Insert success.
@Test
public void insert_200() {
HashMap<String, ByteIterator> data = new HashMap<String, ByteIterator>();
data.put(DATA_TAG, new StringByteIterator(INPUT_DATA));
Status status = rc.insert(null, VALID_RESOURCE, data);
assertEquals(Status.OK, status);
}
@Test
public void insert_403() {
HashMap<String, ByteIterator> data = new HashMap<String, ByteIterator>();
data.put(DATA_TAG, new StringByteIterator(INPUT_DATA));
Status status = rc.insert(null, UNAUTHORIZED_RESOURCE, data);
assertEquals(Status.FORBIDDEN, status);
}
@Test
public void insert_404() {
HashMap<String, ByteIterator> data = new HashMap<String, ByteIterator>();
data.put(DATA_TAG, new StringByteIterator(INPUT_DATA));
Status status = rc.insert(null, ABSENT_RESOURCE, data);
assertEquals(Status.NOT_FOUND, status);
}
@Test
public void insert_500() {
HashMap<String, ByteIterator> data = new HashMap<String, ByteIterator>();
data.put(DATA_TAG, new StringByteIterator(INPUT_DATA));
Status status = rc.insert(null, INVALID_RESOURCE, data);
assertEquals(Status.ERROR, status);
}
// Delete success.
@Test
public void delete_200() {
Status status = rc.delete(null, VALID_RESOURCE);
assertEquals(Status.OK, status);
}
@Test
public void delete_403() {
Status status = rc.delete(null, UNAUTHORIZED_RESOURCE);
assertEquals(Status.FORBIDDEN, status);
}
@Test
public void delete_404() {
Status status = rc.delete(null, ABSENT_RESOURCE);
assertEquals(Status.NOT_FOUND, status);
}
@Test
public void delete_500() {
Status status = rc.delete(null, INVALID_RESOURCE);
assertEquals(Status.ERROR, status);
}
@Test
public void update_200() {
HashMap<String, ByteIterator> data = new HashMap<String, ByteIterator>();
data.put(DATA_TAG, new StringByteIterator(INPUT_DATA));
Status status = rc.update(null, VALID_RESOURCE, data);
assertEquals(Status.OK, status);
}
@Test
public void update_403() {
HashMap<String, ByteIterator> data = new HashMap<String, ByteIterator>();
data.put(DATA_TAG, new StringByteIterator(INPUT_DATA));
Status status = rc.update(null, UNAUTHORIZED_RESOURCE, data);
assertEquals(Status.FORBIDDEN, status);
}
@Test
public void update_404() {
HashMap<String, ByteIterator> data = new HashMap<String, ByteIterator>();
data.put(DATA_TAG, new StringByteIterator(INPUT_DATA));
Status status = rc.update(null, ABSENT_RESOURCE, data);
assertEquals(Status.NOT_FOUND, status);
}
@Test
public void update_500() {
HashMap<String, ByteIterator> data = new HashMap<String, ByteIterator>();
data.put(DATA_TAG, new StringByteIterator(INPUT_DATA));
Status status = rc.update(null, INVALID_RESOURCE, data);
assertEquals(Status.ERROR, status);
}
@Test
public void scan() {
assertEquals(Status.NOT_IMPLEMENTED, rc.scan(null, null, 0, null, null));
}
private static ServletContainer resourceConfig() {
return new ServletContainer(new ResourceConfig(new ResourceLoader().getClasses()));
}
}
| 7,973 | 34.127753 | 121 | java |
null | NearPMSW-main/baseline/logging/YCSB2/rest/src/test/java/site/ycsb/webservice/rest/IntegrationTest.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.webservice.rest;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import javax.servlet.ServletException;
import org.apache.catalina.Context;
import org.apache.catalina.LifecycleException;
import org.apache.catalina.startup.Tomcat;
import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.servlet.ServletContainer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.FixMethodOrder;
import org.junit.Rule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.Assertion;
import org.junit.contrib.java.lang.system.ExpectedSystemExit;
import org.junit.runners.MethodSorters;
import site.ycsb.Client;
import site.ycsb.DBException;
import site.ycsb.webservice.rest.Utils;
/**
* Integration test cases to verify the end to end working of the rest-binding
* module. It performs these steps in order. 1. Runs an embedded Tomcat
* server with a mock RESTFul web service. 2. Invokes the {@link Client}
* class with the required parameters to start benchmarking the mock REST
* service. 3. Compares the response stored in the output file by {@link Client}
* class with the response expected. 4. Stops the embedded Tomcat server.
* Cases for verifying the handling of different HTTP status like 2xx & 5xx have
* been included in success and failure test cases.
*/
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class IntegrationTest {
@Rule
public final ExpectedSystemExit exit = ExpectedSystemExit.none();
private static int port = 8080;
private static Tomcat tomcat;
private static final String WORKLOAD_FILEPATH = IntegrationTest.class.getClassLoader().getResource("workload_rest").getPath();
private static final String TRACE_FILEPATH = IntegrationTest.class.getClassLoader().getResource("trace.txt").getPath();
private static final String ERROR_TRACE_FILEPATH = IntegrationTest.class.getClassLoader().getResource("error_trace.txt").getPath();
private static final String RESULTS_FILEPATH = IntegrationTest.class.getClassLoader().getResource(".").getPath() + "results.txt";
@BeforeClass
public static void init() throws ServletException, LifecycleException, FileNotFoundException, IOException,
DBException, InterruptedException {
String webappDirLocation = IntegrationTest.class.getClassLoader().getResource("WebContent").getPath();
while (!Utils.available(port)) {
port++;
}
tomcat = new Tomcat();
tomcat.setPort(Integer.valueOf(port));
Context context = tomcat.addWebapp("/webService", new File(webappDirLocation).getAbsolutePath());
Tomcat.addServlet(context, "jersey-container-servlet", resourceConfig());
context.addServletMapping("/rest/*", "jersey-container-servlet");
tomcat.start();
// Allow time for proper startup.
Thread.sleep(1000);
}
@AfterClass
public static void cleanUp() throws LifecycleException {
tomcat.stop();
}
// All read operations during benchmark are executed successfully with an HTTP OK status.
@Test
public void testReadOpsBenchmarkSuccess() throws InterruptedException {
exit.expectSystemExit();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
List<String> results = Utils.read(RESULTS_FILEPATH);
assertEquals(true, results.contains("[READ], Return=OK, 1"));
Utils.delete(RESULTS_FILEPATH);
}
});
Client.main(getArgs(TRACE_FILEPATH, 1, 0, 0, 0));
}
//All read operations during benchmark are executed with an HTTP 500 error.
@Test
public void testReadOpsBenchmarkFailure() throws InterruptedException {
exit.expectSystemExit();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
List<String> results = Utils.read(RESULTS_FILEPATH);
assertEquals(true, results.contains("[READ], Return=ERROR, 1"));
Utils.delete(RESULTS_FILEPATH);
}
});
Client.main(getArgs(ERROR_TRACE_FILEPATH, 1, 0, 0, 0));
}
//All insert operations during benchmark are executed successfully with an HTTP OK status.
@Test
public void testInsertOpsBenchmarkSuccess() throws InterruptedException {
exit.expectSystemExit();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
List<String> results = Utils.read(RESULTS_FILEPATH);
assertEquals(true, results.contains("[INSERT], Return=OK, 1"));
Utils.delete(RESULTS_FILEPATH);
}
});
Client.main(getArgs(TRACE_FILEPATH, 0, 1, 0, 0));
}
//All read operations during benchmark are executed with an HTTP 500 error.
@Test
public void testInsertOpsBenchmarkFailure() throws InterruptedException {
exit.expectSystemExit();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
List<String> results = Utils.read(RESULTS_FILEPATH);
assertEquals(true, results.contains("[INSERT], Return=ERROR, 1"));
Utils.delete(RESULTS_FILEPATH);
}
});
Client.main(getArgs(ERROR_TRACE_FILEPATH, 0, 1, 0, 0));
}
//All update operations during benchmark are executed successfully with an HTTP OK status.
@Test
public void testUpdateOpsBenchmarkSuccess() throws InterruptedException {
exit.expectSystemExit();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
List<String> results = Utils.read(RESULTS_FILEPATH);
assertEquals(true, results.contains("[UPDATE], Return=OK, 1"));
Utils.delete(RESULTS_FILEPATH);
}
});
Client.main(getArgs(TRACE_FILEPATH, 0, 0, 1, 0));
}
//All read operations during benchmark are executed with an HTTP 500 error.
@Test
public void testUpdateOpsBenchmarkFailure() throws InterruptedException {
exit.expectSystemExit();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
List<String> results = Utils.read(RESULTS_FILEPATH);
assertEquals(true, results.contains("[UPDATE], Return=ERROR, 1"));
Utils.delete(RESULTS_FILEPATH);
}
});
Client.main(getArgs(ERROR_TRACE_FILEPATH, 0, 0, 1, 0));
}
//All delete operations during benchmark are executed successfully with an HTTP OK status.
@Test
public void testDeleteOpsBenchmarkSuccess() throws InterruptedException {
exit.expectSystemExit();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
List<String> results = Utils.read(RESULTS_FILEPATH);
assertEquals(true, results.contains("[DELETE], Return=OK, 1"));
Utils.delete(RESULTS_FILEPATH);
}
});
Client.main(getArgs(TRACE_FILEPATH, 0, 0, 0, 1));
}
//All read operations during benchmark are executed with an HTTP 500 error.
@Test
public void testDeleteOpsBenchmarkFailure() throws InterruptedException {
exit.expectSystemExit();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
List<String> results = Utils.read(RESULTS_FILEPATH);
assertEquals(true, results.contains("[DELETE], Return=ERROR, 1"));
Utils.delete(RESULTS_FILEPATH);
}
});
Client.main(getArgs(ERROR_TRACE_FILEPATH, 0, 0, 0, 1));
}
private String[] getArgs(String traceFilePath, float rp, float ip, float up, float dp) {
String[] args = new String[25];
args[0] = "-target";
args[1] = "1";
args[2] = "-t";
args[3] = "-P";
args[4] = WORKLOAD_FILEPATH;
args[5] = "-p";
args[6] = "url.prefix=http://127.0.0.1:"+port+"/webService/rest/resource/";
args[7] = "-p";
args[8] = "url.trace.read=" + traceFilePath;
args[9] = "-p";
args[10] = "url.trace.insert=" + traceFilePath;
args[11] = "-p";
args[12] = "url.trace.update=" + traceFilePath;
args[13] = "-p";
args[14] = "url.trace.delete=" + traceFilePath;
args[15] = "-p";
args[16] = "exportfile=" + RESULTS_FILEPATH;
args[17] = "-p";
args[18] = "readproportion=" + rp;
args[19] = "-p";
args[20] = "updateproportion=" + up;
args[21] = "-p";
args[22] = "deleteproportion=" + dp;
args[23] = "-p";
args[24] = "insertproportion=" + ip;
return args;
}
private static ServletContainer resourceConfig() {
return new ServletContainer(new ResourceConfig(new ResourceLoader().getClasses()));
}
} | 9,384 | 37.150407 | 133 | java |
null | NearPMSW-main/baseline/logging/YCSB2/rest/src/test/java/site/ycsb/webservice/rest/Utils.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.webservice.rest;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.net.DatagramSocket;
import java.net.ServerSocket;
import java.util.ArrayList;
import java.util.List;
/**
* Holds the common utility methods.
*/
public class Utils {
/**
* Returns true if the port is available.
*
* @param port
* @return isAvailable
*/
public static boolean available(int port) {
ServerSocket ss = null;
DatagramSocket ds = null;
try {
ss = new ServerSocket(port);
ss.setReuseAddress(true);
ds = new DatagramSocket(port);
ds.setReuseAddress(true);
return true;
} catch (IOException e) {
} finally {
if (ds != null) {
ds.close();
}
if (ss != null) {
try {
ss.close();
} catch (IOException e) {
/* should not be thrown */
}
}
}
return false;
}
public static List<String> read(String filepath) {
List<String> list = new ArrayList<String>();
try {
BufferedReader file = new BufferedReader(new FileReader(filepath));
String line = null;
while ((line = file.readLine()) != null) {
list.add(line.trim());
}
file.close();
} catch (IOException e) {
e.printStackTrace();
}
return list;
}
public static void delete(String filepath) {
try {
new File(filepath).delete();
} catch (Exception e) {
e.printStackTrace();
}
}
}
| 2,197 | 23.696629 | 73 | java |
null | NearPMSW-main/baseline/logging/YCSB2/rest/src/test/java/site/ycsb/webservice/rest/RestTestResource.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.webservice.rest;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.HttpMethod;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* Class that implements a mock RESTFul web service to be used for integration
* testing.
*/
@Path("/resource/{id}")
public class RestTestResource {
@GET
@Produces(MediaType.TEXT_PLAIN)
public Response respondToGET(@PathParam("id") String id) {
return processRequests(id, HttpMethod.GET);
}
@POST
@Produces(MediaType.TEXT_PLAIN)
public Response respondToPOST(@PathParam("id") String id) {
return processRequests(id, HttpMethod.POST);
}
@DELETE
@Produces(MediaType.TEXT_PLAIN)
public Response respondToDELETE(@PathParam("id") String id) {
return processRequests(id, HttpMethod.DELETE);
}
@PUT
@Produces(MediaType.TEXT_PLAIN)
public Response respondToPUT(@PathParam("id") String id) {
return processRequests(id, HttpMethod.PUT);
}
private static Response processRequests(String id, String method) {
if (id.equals("resource_invalid"))
return Response.serverError().build();
else if (id.equals("resource_absent"))
return Response.status(Response.Status.NOT_FOUND).build();
else if (id.equals("resource_unauthorized"))
return Response.status(Response.Status.FORBIDDEN).build();
return Response.ok("HTTP " + method + " response to: " + id).build();
}
} | 2,227 | 30.380282 | 78 | java |
null | NearPMSW-main/baseline/logging/YCSB2/rest/src/main/java/site/ycsb/webservice/rest/package-info.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* YCSB binding for RESTFul Web Services.
*/
package site.ycsb.webservice.rest;
| 745 | 31.434783 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/rest/src/main/java/site/ycsb/webservice/rest/RestClient.java | /**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.webservice.rest;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import java.util.zip.GZIPInputStream;
import javax.ws.rs.HttpMethod;
import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.InputStreamEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
/**
* Class responsible for making web service requests for benchmarking purpose.
* Using Apache HttpClient over standard Java HTTP API as this is more flexible
* and provides better functionality. For example HttpClient can automatically
* handle redirects and proxy authentication which the standard Java API can't.
*/
public class RestClient extends DB {
private static final String URL_PREFIX = "url.prefix";
private static final String CON_TIMEOUT = "timeout.con";
private static final String READ_TIMEOUT = "timeout.read";
private static final String EXEC_TIMEOUT = "timeout.exec";
private static final String LOG_ENABLED = "log.enable";
private static final String HEADERS = "headers";
private static final String COMPRESSED_RESPONSE = "response.compression";
private boolean compressedResponse;
private boolean logEnabled;
private String urlPrefix;
private Properties props;
private String[] headers;
private CloseableHttpClient client;
private int conTimeout = 10000;
private int readTimeout = 10000;
private int execTimeout = 10000;
private volatile Criteria requestTimedout = new Criteria(false);
@Override
public void init() throws DBException {
props = getProperties();
urlPrefix = props.getProperty(URL_PREFIX, "http://127.0.0.1:8080");
conTimeout = Integer.valueOf(props.getProperty(CON_TIMEOUT, "10")) * 1000;
readTimeout = Integer.valueOf(props.getProperty(READ_TIMEOUT, "10")) * 1000;
execTimeout = Integer.valueOf(props.getProperty(EXEC_TIMEOUT, "10")) * 1000;
logEnabled = Boolean.valueOf(props.getProperty(LOG_ENABLED, "false").trim());
compressedResponse = Boolean.valueOf(props.getProperty(COMPRESSED_RESPONSE, "false").trim());
headers = props.getProperty(HEADERS, "Accept */* Content-Type application/xml user-agent Mozilla/5.0 ").trim()
.split(" ");
setupClient();
}
private void setupClient() {
RequestConfig.Builder requestBuilder = RequestConfig.custom();
requestBuilder = requestBuilder.setConnectTimeout(conTimeout);
requestBuilder = requestBuilder.setConnectionRequestTimeout(readTimeout);
requestBuilder = requestBuilder.setSocketTimeout(readTimeout);
HttpClientBuilder clientBuilder = HttpClientBuilder.create().setDefaultRequestConfig(requestBuilder.build());
this.client = clientBuilder.setConnectionManagerShared(true).build();
}
@Override
public Status read(String table, String endpoint, Set<String> fields, Map<String, ByteIterator> result) {
int responseCode;
try {
responseCode = httpGet(urlPrefix + endpoint, result);
} catch (Exception e) {
responseCode = handleExceptions(e, urlPrefix + endpoint, HttpMethod.GET);
}
if (logEnabled) {
System.err.println(new StringBuilder("GET Request: ").append(urlPrefix).append(endpoint)
.append(" | Response Code: ").append(responseCode).toString());
}
return getStatus(responseCode);
}
@Override
public Status insert(String table, String endpoint, Map<String, ByteIterator> values) {
int responseCode;
try {
responseCode = httpExecute(new HttpPost(urlPrefix + endpoint), values.get("data").toString());
} catch (Exception e) {
responseCode = handleExceptions(e, urlPrefix + endpoint, HttpMethod.POST);
}
if (logEnabled) {
System.err.println(new StringBuilder("POST Request: ").append(urlPrefix).append(endpoint)
.append(" | Response Code: ").append(responseCode).toString());
}
return getStatus(responseCode);
}
@Override
public Status delete(String table, String endpoint) {
int responseCode;
try {
responseCode = httpDelete(urlPrefix + endpoint);
} catch (Exception e) {
responseCode = handleExceptions(e, urlPrefix + endpoint, HttpMethod.DELETE);
}
if (logEnabled) {
System.err.println(new StringBuilder("DELETE Request: ").append(urlPrefix).append(endpoint)
.append(" | Response Code: ").append(responseCode).toString());
}
return getStatus(responseCode);
}
@Override
public Status update(String table, String endpoint, Map<String, ByteIterator> values) {
int responseCode;
try {
responseCode = httpExecute(new HttpPut(urlPrefix + endpoint), values.get("data").toString());
} catch (Exception e) {
responseCode = handleExceptions(e, urlPrefix + endpoint, HttpMethod.PUT);
}
if (logEnabled) {
System.err.println(new StringBuilder("PUT Request: ").append(urlPrefix).append(endpoint)
.append(" | Response Code: ").append(responseCode).toString());
}
return getStatus(responseCode);
}
@Override
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
return Status.NOT_IMPLEMENTED;
}
// Maps HTTP status codes to YCSB status codes.
private Status getStatus(int responseCode) {
int rc = responseCode / 100;
if (responseCode == 400) {
return Status.BAD_REQUEST;
} else if (responseCode == 403) {
return Status.FORBIDDEN;
} else if (responseCode == 404) {
return Status.NOT_FOUND;
} else if (responseCode == 501) {
return Status.NOT_IMPLEMENTED;
} else if (responseCode == 503) {
return Status.SERVICE_UNAVAILABLE;
} else if (rc == 5) {
return Status.ERROR;
}
return Status.OK;
}
private int handleExceptions(Exception e, String url, String method) {
if (logEnabled) {
System.err.println(new StringBuilder(method).append(" Request: ").append(url).append(" | ")
.append(e.getClass().getName()).append(" occured | Error message: ")
.append(e.getMessage()).toString());
}
if (e instanceof ClientProtocolException) {
return 400;
}
return 500;
}
// Connection is automatically released back in case of an exception.
private int httpGet(String endpoint, Map<String, ByteIterator> result) throws IOException {
requestTimedout.setIsSatisfied(false);
Thread timer = new Thread(new Timer(execTimeout, requestTimedout));
timer.start();
int responseCode = 200;
HttpGet request = new HttpGet(endpoint);
for (int i = 0; i < headers.length; i = i + 2) {
request.setHeader(headers[i], headers[i + 1]);
}
CloseableHttpResponse response = client.execute(request);
responseCode = response.getStatusLine().getStatusCode();
HttpEntity responseEntity = response.getEntity();
// If null entity don't bother about connection release.
if (responseEntity != null) {
InputStream stream = responseEntity.getContent();
/*
* TODO: Gzip Compression must be supported in the future. Header[]
* header = response.getAllHeaders();
* if(response.getHeaders("Content-Encoding")[0].getValue().contains
* ("gzip")) stream = new GZIPInputStream(stream);
*/
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8"));
StringBuffer responseContent = new StringBuffer();
String line = "";
while ((line = reader.readLine()) != null) {
if (requestTimedout.isSatisfied()) {
// Must avoid memory leak.
reader.close();
stream.close();
EntityUtils.consumeQuietly(responseEntity);
response.close();
client.close();
throw new TimeoutException();
}
responseContent.append(line);
}
timer.interrupt();
result.put("response", new StringByteIterator(responseContent.toString()));
// Closing the input stream will trigger connection release.
stream.close();
}
EntityUtils.consumeQuietly(responseEntity);
response.close();
client.close();
return responseCode;
}
private int httpExecute(HttpEntityEnclosingRequestBase request, String data) throws IOException {
requestTimedout.setIsSatisfied(false);
Thread timer = new Thread(new Timer(execTimeout, requestTimedout));
timer.start();
int responseCode = 200;
for (int i = 0; i < headers.length; i = i + 2) {
request.setHeader(headers[i], headers[i + 1]);
}
InputStreamEntity reqEntity = new InputStreamEntity(new ByteArrayInputStream(data.getBytes()),
ContentType.APPLICATION_FORM_URLENCODED);
reqEntity.setChunked(true);
request.setEntity(reqEntity);
CloseableHttpResponse response = client.execute(request);
responseCode = response.getStatusLine().getStatusCode();
HttpEntity responseEntity = response.getEntity();
// If null entity don't bother about connection release.
if (responseEntity != null) {
InputStream stream = responseEntity.getContent();
if (compressedResponse) {
stream = new GZIPInputStream(stream);
}
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8"));
StringBuffer responseContent = new StringBuffer();
String line = "";
while ((line = reader.readLine()) != null) {
if (requestTimedout.isSatisfied()) {
// Must avoid memory leak.
reader.close();
stream.close();
EntityUtils.consumeQuietly(responseEntity);
response.close();
client.close();
throw new TimeoutException();
}
responseContent.append(line);
}
timer.interrupt();
// Closing the input stream will trigger connection release.
stream.close();
}
EntityUtils.consumeQuietly(responseEntity);
response.close();
client.close();
return responseCode;
}
private int httpDelete(String endpoint) throws IOException {
requestTimedout.setIsSatisfied(false);
Thread timer = new Thread(new Timer(execTimeout, requestTimedout));
timer.start();
int responseCode = 200;
HttpDelete request = new HttpDelete(endpoint);
for (int i = 0; i < headers.length; i = i + 2) {
request.setHeader(headers[i], headers[i + 1]);
}
CloseableHttpResponse response = client.execute(request);
responseCode = response.getStatusLine().getStatusCode();
response.close();
client.close();
return responseCode;
}
/**
* Marks the input {@link Criteria} as satisfied when the input time has elapsed.
*/
class Timer implements Runnable {
private long timeout;
private Criteria timedout;
public Timer(long timeout, Criteria timedout) {
this.timedout = timedout;
this.timeout = timeout;
}
@Override
public void run() {
try {
Thread.sleep(timeout);
this.timedout.setIsSatisfied(true);
} catch (InterruptedException e) {
// Do nothing.
}
}
}
/**
* Sets the flag when a criteria is fulfilled.
*/
class Criteria {
private boolean isSatisfied;
public Criteria(boolean isSatisfied) {
this.isSatisfied = isSatisfied;
}
public boolean isSatisfied() {
return isSatisfied;
}
public void setIsSatisfied(boolean satisfied) {
this.isSatisfied = satisfied;
}
}
/**
* Private exception class for execution timeout.
*/
class TimeoutException extends RuntimeException {
private static final long serialVersionUID = 1L;
public TimeoutException() {
super("HTTP Request exceeded execution time limit.");
}
}
}
| 13,272 | 34.680108 | 114 | java |
null | NearPMSW-main/baseline/logging/YCSB2/googledatastore/src/main/java/site/ycsb/db/package-info.java | /**
* Copyright (c) 2015 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* YCSB binding for
<a href="https://cloud.google.com/datastore/">Google Cloud Datastore</a>.
*/
package site.ycsb.db;
| 783 | 33.086957 | 73 | java |
null | NearPMSW-main/baseline/logging/YCSB2/googledatastore/src/main/java/site/ycsb/db/GoogleDatastoreClient.java | /*
* Copyright 2015 YCSB contributors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import com.google.api.client.auth.oauth2.Credential;
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
import com.google.datastore.v1.*;
import com.google.datastore.v1.CommitRequest.Mode;
import com.google.datastore.v1.ReadOptions.ReadConsistency;
import com.google.datastore.v1.client.Datastore;
import com.google.datastore.v1.client.DatastoreException;
import com.google.datastore.v1.client.DatastoreFactory;
import com.google.datastore.v1.client.DatastoreHelper;
import com.google.datastore.v1.client.DatastoreOptions;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.Vector;
import javax.annotation.Nullable;
/**
* Google Cloud Datastore Client for YCSB.
*/
public class GoogleDatastoreClient extends DB {
/**
* Defines a MutationType used in this class.
*/
private enum MutationType {
UPSERT,
UPDATE,
DELETE
}
/**
* Defines a EntityGroupingMode enum used in this class.
*/
private enum EntityGroupingMode {
ONE_ENTITY_PER_GROUP,
MULTI_ENTITY_PER_GROUP
}
private static Logger logger =
Logger.getLogger(GoogleDatastoreClient.class);
// Read consistency defaults to "STRONG" per YCSB guidance.
// User can override this via configure.
private ReadConsistency readConsistency = ReadConsistency.STRONG;
private EntityGroupingMode entityGroupingMode =
EntityGroupingMode.ONE_ENTITY_PER_GROUP;
private String rootEntityName;
private Datastore datastore = null;
private static boolean skipIndex = true;
/**
* Initialize any state for this DB. Called once per DB instance; there is
* one DB instance per client thread.
*/
@Override
public void init() throws DBException {
String debug = getProperties().getProperty("googledatastore.debug", null);
if (null != debug && "true".equalsIgnoreCase(debug)) {
logger.setLevel(Level.DEBUG);
}
String skipIndexString = getProperties().getProperty(
"googledatastore.skipIndex", null);
if (null != skipIndexString && "false".equalsIgnoreCase(skipIndexString)) {
skipIndex = false;
}
// We need the following 3 essential properties to initialize datastore:
//
// - DatasetId,
// - Path to private key file,
// - Service account email address.
String datasetId = getProperties().getProperty(
"googledatastore.datasetId", null);
if (datasetId == null) {
throw new DBException(
"Required property \"datasetId\" missing.");
}
String privateKeyFile = getProperties().getProperty(
"googledatastore.privateKeyFile", null);
String serviceAccountEmail = getProperties().getProperty(
"googledatastore.serviceAccountEmail", null);
// Below are properties related to benchmarking.
String readConsistencyConfig = getProperties().getProperty(
"googledatastore.readConsistency", null);
if (readConsistencyConfig != null) {
try {
this.readConsistency = ReadConsistency.valueOf(
readConsistencyConfig.trim().toUpperCase());
} catch (IllegalArgumentException e) {
throw new DBException("Invalid read consistency specified: " +
readConsistencyConfig + ". Expecting STRONG or EVENTUAL.");
}
}
//
// Entity Grouping Mode (googledatastore.entitygroupingmode), see
// documentation in conf/googledatastore.properties.
//
String entityGroupingConfig = getProperties().getProperty(
"googledatastore.entityGroupingMode", null);
if (entityGroupingConfig != null) {
try {
this.entityGroupingMode = EntityGroupingMode.valueOf(
entityGroupingConfig.trim().toUpperCase());
} catch (IllegalArgumentException e) {
throw new DBException("Invalid entity grouping mode specified: " +
entityGroupingConfig + ". Expecting ONE_ENTITY_PER_GROUP or " +
"MULTI_ENTITY_PER_GROUP.");
}
}
this.rootEntityName = getProperties().getProperty(
"googledatastore.rootEntityName", "YCSB_ROOT_ENTITY");
try {
// Setup the connection to Google Cloud Datastore with the credentials
// obtained from the configure.
DatastoreOptions.Builder options = new DatastoreOptions.Builder();
Credential credential = GoogleCredential.getApplicationDefault();
if (serviceAccountEmail != null && privateKeyFile != null) {
credential = DatastoreHelper.getServiceAccountCredential(
serviceAccountEmail, privateKeyFile);
logger.info("Using JWT Service Account credential.");
logger.info("DatasetID: " + datasetId + ", Service Account Email: " +
serviceAccountEmail + ", Private Key File Path: " + privateKeyFile);
} else {
logger.info("Using default gcloud credential.");
logger.info("DatasetID: " + datasetId
+ ", Service Account Email: " + ((GoogleCredential) credential).getServiceAccountId());
}
datastore = DatastoreFactory.get().create(
options.credential(credential).projectId(datasetId).build());
} catch (GeneralSecurityException exception) {
throw new DBException("Security error connecting to the datastore: " +
exception.getMessage(), exception);
} catch (IOException exception) {
throw new DBException("I/O error connecting to the datastore: " +
exception.getMessage(), exception);
}
logger.info("Datastore client instance created: " +
datastore.toString());
}
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
LookupRequest.Builder lookupRequest = LookupRequest.newBuilder();
lookupRequest.addKeys(buildPrimaryKey(table, key));
lookupRequest.getReadOptionsBuilder().setReadConsistency(
this.readConsistency);
// Note above, datastore lookupRequest always reads the entire entity, it
// does not support reading a subset of "fields" (properties) of an entity.
logger.debug("Built lookup request as: " + lookupRequest.toString());
LookupResponse response = null;
try {
response = datastore.lookup(lookupRequest.build());
} catch (DatastoreException exception) {
logger.error(
String.format("Datastore Exception when reading (%s): %s %s",
exception.getMessage(),
exception.getMethodName(),
exception.getCode()));
// DatastoreException.getCode() returns an HTTP response code which we
// will bubble up to the user as part of the YCSB Status "name".
return new Status("ERROR-" + exception.getCode(), exception.getMessage());
}
if (response.getFoundCount() == 0) {
return new Status("ERROR-404", "Not Found, key is: " + key);
} else if (response.getFoundCount() > 1) {
// We only asked to lookup for one key, shouldn't have got more than one
// entity back. Unexpected State.
return Status.UNEXPECTED_STATE;
}
Entity entity = response.getFound(0).getEntity();
logger.debug("Read entity: " + entity.toString());
Map<String, Value> properties = entity.getProperties();
Set<String> propertiesToReturn =
(fields == null ? properties.keySet() : fields);
for (String name : propertiesToReturn) {
if (properties.containsKey(name)) {
result.put(name, new StringByteIterator(properties.get(name)
.getStringValue()));
}
}
return Status.OK;
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
// TODO: Implement Scan as query on primary key.
return Status.NOT_IMPLEMENTED;
}
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
return doSingleItemMutation(table, key, values, MutationType.UPDATE);
}
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
// Use Upsert to allow overwrite of existing key instead of failing the
// load (or run) just because the DB already has the key.
// This is the same behavior as what other DB does here (such as
// the DynamoDB client).
return doSingleItemMutation(table, key, values, MutationType.UPSERT);
}
@Override
public Status delete(String table, String key) {
return doSingleItemMutation(table, key, null, MutationType.DELETE);
}
private Key.Builder buildPrimaryKey(String table, String key) {
Key.Builder result = Key.newBuilder();
if (this.entityGroupingMode == EntityGroupingMode.MULTI_ENTITY_PER_GROUP) {
// All entities are in side the same group when we are in this mode.
result.addPath(Key.PathElement.newBuilder().setKind(table).
setName(rootEntityName));
}
return result.addPath(Key.PathElement.newBuilder().setKind(table)
.setName(key));
}
private Status doSingleItemMutation(String table, String key,
@Nullable Map<String, ByteIterator> values,
MutationType mutationType) {
// First build the key.
Key.Builder datastoreKey = buildPrimaryKey(table, key);
// Build a commit request in non-transactional mode.
// Single item mutation to google datastore
// is always atomic and strongly consistent. Transaction is only necessary
// for multi-item mutation, or Read-modify-write operation.
CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
commitRequest.setMode(Mode.NON_TRANSACTIONAL);
if (mutationType == MutationType.DELETE) {
commitRequest.addMutationsBuilder().setDelete(datastoreKey);
} else {
// If this is not for delete, build the entity.
Entity.Builder entityBuilder = Entity.newBuilder();
entityBuilder.setKey(datastoreKey);
for (Entry<String, ByteIterator> val : values.entrySet()) {
entityBuilder.getMutableProperties()
.put(val.getKey(),
Value.newBuilder()
.setStringValue(val.getValue().toString())
.setExcludeFromIndexes(skipIndex).build());
}
Entity entity = entityBuilder.build();
logger.debug("entity built as: " + entity.toString());
if (mutationType == MutationType.UPSERT) {
commitRequest.addMutationsBuilder().setUpsert(entity);
} else if (mutationType == MutationType.UPDATE){
commitRequest.addMutationsBuilder().setUpdate(entity);
} else {
throw new RuntimeException("Impossible MutationType, code bug.");
}
}
try {
datastore.commit(commitRequest.build());
logger.debug("successfully committed.");
} catch (DatastoreException exception) {
// Catch all Datastore rpc errors.
// Log the exception, the name of the method called and the error code.
logger.error(
String.format("Datastore Exception when committing (%s): %s %s",
exception.getMessage(),
exception.getMethodName(),
exception.getCode()));
// DatastoreException.getCode() returns an HTTP response code which we
// will bubble up to the user as part of the YCSB Status "name".
return new Status("ERROR-" + exception.getCode(), exception.getMessage());
}
return Status.OK;
}
}
| 12,381 | 34.994186 | 99 | java |
null | NearPMSW-main/baseline/logging/YCSB2/crail/src/main/java/site/ycsb/db/crail/CrailClient.java | /**
* Copyright (c) 2015 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.crail;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.Vector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.crail.CrailBufferedInputStream;
import org.apache.crail.CrailBufferedOutputStream;
import org.apache.crail.CrailStore;
import org.apache.crail.CrailKeyValue;
import org.apache.crail.CrailLocationClass;
import org.apache.crail.CrailNodeType;
import org.apache.crail.CrailStorageClass;
import org.apache.crail.conf.CrailConfiguration;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
/**
* Crail binding for <a href="http://crail.apache.org/">Crail</a>.
*/
public class CrailClient extends DB {
private static final Logger LOG = LoggerFactory.getLogger(CrailClient.class);
private CrailStore client;
private long startTime;
private long endTime;
private String usertable;
private boolean enumerateKeys;
@Override
public void init() throws DBException {
super.init();
try {
CrailConfiguration crailConf = new CrailConfiguration();
this.client = CrailStore.newInstance(crailConf);
usertable = getProperties().getProperty("table", "usertable");
enumerateKeys = Boolean.parseBoolean(getProperties().getProperty("crail.enumeratekeys", "false"));
if (client.lookup(usertable).get() == null) {
client.create(usertable, CrailNodeType.TABLE, CrailStorageClass.DEFAULT,
CrailLocationClass.DEFAULT, true).get().syncDir();
}
this.startTime = System.nanoTime();
} catch(Exception e){
throw new DBException(e);
}
}
@Override
public void cleanup() throws DBException {
try {
this.endTime = System.nanoTime();
long runTime = (endTime - startTime) / 1000000;
client.close();
} catch(Exception e){
throw new DBException(e);
}
}
@Override
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
try {
String path = table + "/" + key;
CrailKeyValue file = client.lookup(path).get().asKeyValue();
CrailBufferedInputStream stream = file.getBufferedInputStream(1024);
while(stream.available() < Integer.BYTES){
assert true;
}
int fieldKeyLength = stream.readInt();
while(stream.available() < fieldKeyLength){
assert true;
}
byte[] fieldKey = new byte[fieldKeyLength];
int res = stream.read(fieldKey);
if (res != fieldKey.length){
stream.close();
return Status.ERROR;
}
while(stream.available() < Integer.BYTES){
assert true;
}
int fieldValueLength = stream.readInt();
while(stream.available() < fieldValueLength){
assert true;
}
byte[] fieldValue = new byte[fieldValueLength];
res = stream.read(fieldValue);
if (res != fieldValue.length){
stream.close();
return Status.ERROR;
}
result.put(new String(fieldKey), new ByteArrayByteIterator(fieldValue));
stream.close();
return Status.OK;
} catch(Exception e){
LOG.error("Error during read, table " + table + ", key " + key + ", exception " + e.getMessage());
return new Status("read error", "reading exception");
}
}
@Override
public Status scan(String table, String startKey, int recordCount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
return Status.NOT_IMPLEMENTED;
}
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
return insert(table, key, values);
}
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
try {
String path = table + "/" + key;
CrailKeyValue file = client.create(path, CrailNodeType.KEYVALUE, CrailStorageClass.DEFAULT,
CrailLocationClass.DEFAULT, enumerateKeys).get().asKeyValue();
CrailBufferedOutputStream stream = file.getBufferedOutputStream(1024);
for (Entry<String, ByteIterator> entry : values.entrySet()){
byte[] fieldKey = entry.getKey().getBytes();
int fieldKeyLength = fieldKey.length;
byte[] fieldValue = entry.getValue().toArray();
int fieldValueLength = fieldValue.length;
stream.writeInt(fieldKeyLength);
stream.write(fieldKey);
stream.writeInt(fieldValueLength);
stream.write(fieldValue);
}
file.syncDir();
stream.close();
} catch(Exception e){
LOG.error("Error during insert, table " + table + ", key " + key + ", exception " + e.getMessage());
return Status.ERROR;
}
return Status.OK;
}
@Override
public Status delete(String table, String key) {
try {
String path = table + "/" + key;
client.delete(path, false).get().syncDir();
} catch(Exception e){
LOG.error("Error during delete, table " + table + ", key " + key + ", exception " + e.getMessage());
return Status.ERROR;
}
return Status.OK;
}
}
| 5,816 | 32.051136 | 106 | java |
null | NearPMSW-main/baseline/logging/YCSB2/crail/src/main/java/site/ycsb/db/crail/package-info.java | /**
* Copyright (c) 2015 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* YCSB binding for <a href="http://www.crail.io/">Crail</a>.
*/
package site.ycsb.db.crail;
| 757 | 33.454545 | 70 | java |
null | NearPMSW-main/baseline/logging/YCSB2/tarantool/src/main/java/site/ycsb/db/TarantoolClient.java | /**
* Copyright (c) 2014 - 2016 YCSB Contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import site.ycsb.*;
import org.tarantool.TarantoolConnection16;
import org.tarantool.TarantoolConnection16Impl;
import org.tarantool.TarantoolException;
import java.util.*;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* YCSB binding for <a href="http://tarantool.org/">Tarantool</a>.
*/
public class TarantoolClient extends DB {
private static final Logger LOGGER = Logger.getLogger(TarantoolClient.class.getName());
private static final String HOST_PROPERTY = "tarantool.host";
private static final String PORT_PROPERTY = "tarantool.port";
private static final String SPACE_PROPERTY = "tarantool.space";
private static final String DEFAULT_HOST = "localhost";
private static final String DEFAULT_PORT = "3301";
private static final String DEFAULT_SPACE = "1024";
private TarantoolConnection16 connection;
private int spaceNo;
public void init() throws DBException {
Properties props = getProperties();
int port = Integer.parseInt(props.getProperty(PORT_PROPERTY, DEFAULT_PORT));
String host = props.getProperty(HOST_PROPERTY, DEFAULT_HOST);
spaceNo = Integer.parseInt(props.getProperty(SPACE_PROPERTY, DEFAULT_SPACE));
try {
this.connection = new TarantoolConnection16Impl(host, port);
} catch (Exception exc) {
throw new DBException("Can't initialize Tarantool connection", exc);
}
}
public void cleanup() throws DBException {
this.connection.close();
}
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
return replace(key, values, "Can't insert element");
}
private HashMap<String, ByteIterator> tupleConvertFilter(List<String> input, Set<String> fields) {
HashMap<String, ByteIterator> result = new HashMap<>();
if (input == null) {
return result;
}
for (int i = 1; i < input.toArray().length; i += 2) {
if (fields == null || fields.contains(input.get(i))) {
result.put(input.get(i), new StringByteIterator(input.get(i + 1)));
}
}
return result;
}
@Override
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
try {
List<String> response = this.connection.select(this.spaceNo, 0, Arrays.asList(key), 0, 1, 0);
result = tupleConvertFilter(response, fields);
return Status.OK;
} catch (TarantoolException exc) {
LOGGER.log(Level.SEVERE, "Can't select element", exc);
return Status.ERROR;
} catch (NullPointerException exc) {
return Status.ERROR;
}
}
@Override
public Status scan(String table, String startkey,
int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
List<List<String>> response;
try {
response = this.connection.select(this.spaceNo, 0, Arrays.asList(startkey), 0, recordcount, 6);
} catch (TarantoolException exc) {
LOGGER.log(Level.SEVERE, "Can't select range elements", exc);
return Status.ERROR;
} catch (NullPointerException exc) {
return Status.ERROR;
}
for (List<String> i : response) {
HashMap<String, ByteIterator> temp = tupleConvertFilter(i, fields);
if (!temp.isEmpty()) {
result.add((HashMap<String, ByteIterator>) temp.clone());
}
}
return Status.OK;
}
@Override
public Status delete(String table, String key) {
try {
this.connection.delete(this.spaceNo, Collections.singletonList(key));
} catch (TarantoolException exc) {
LOGGER.log(Level.SEVERE, "Can't delete element", exc);
return Status.ERROR;
} catch (NullPointerException e) {
return Status.ERROR;
}
return Status.OK;
}
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
return replace(key, values, "Can't replace element");
}
private Status replace(String key, Map<String, ByteIterator> values, String exceptionDescription) {
int j = 0;
String[] tuple = new String[1 + 2 * values.size()];
tuple[0] = key;
for (Map.Entry<String, ByteIterator> i : values.entrySet()) {
tuple[j + 1] = i.getKey();
tuple[j + 2] = i.getValue().toString();
j += 2;
}
try {
this.connection.replace(this.spaceNo, tuple);
} catch (TarantoolException exc) {
LOGGER.log(Level.SEVERE, exceptionDescription, exc);
return Status.ERROR;
}
return Status.OK;
}
}
| 5,181 | 32.869281 | 102 | java |
null | NearPMSW-main/baseline/logging/YCSB2/tarantool/src/main/java/site/ycsb/db/package-info.java | /*
* Copyright (c) 2014 - 2016 YCSB Contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* YCSB binding for <a href="http://tarantool.org/">Tarantool</a>.
*/
package site.ycsb.db;
| 763 | 32.217391 | 70 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.