repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
null
NearPMSW-main/baseline/logging/YCSB/solr7/src/test/java/site/ycsb/db/solr7/SolrClientBaseTest.java
/** * Copyright (c) 2020 YCSB contributors. All rights reserved. * <p/> * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * <p/> * http://www.apache.org/licenses/LICENSE-2.0 * <p/> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.solr7; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.Status; import site.ycsb.StringByteIterator; import site.ycsb.workloads.CoreWorkload; import org.apache.solr.client.solrj.embedded.JettyConfig; import org.apache.solr.client.solrj.request.CollectionAdminRequest; import org.apache.solr.cloud.MiniSolrCloudCluster; import org.apache.solr.common.util.NamedList; import org.junit.*; import java.io.File; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.HashMap; import java.util.Properties; import java.util.Set; import java.util.Vector; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; public abstract class SolrClientBaseTest { protected static MiniSolrCloudCluster miniSolrCloudCluster; private DB instance; private final static HashMap<String, ByteIterator> MOCK_DATA; protected final static String MOCK_TABLE = "ycsb"; private final static String MOCK_KEY0 = "0"; private final static String MOCK_KEY1 = "1"; private final static int NUM_RECORDS = 10; private final static String FIELD_PREFIX = CoreWorkload.FIELD_NAME_PREFIX_DEFAULT; static { MOCK_DATA = new HashMap<>(NUM_RECORDS); for (int i = 0; i < NUM_RECORDS; i++) { MOCK_DATA.put(FIELD_PREFIX + i, new StringByteIterator("value" + i)); } } @BeforeClass public static void onlyOnce() throws Exception { Path miniSolrCloudClusterTempDirectory = Files.createTempDirectory("miniSolrCloudCluster"); miniSolrCloudClusterTempDirectory.toFile().deleteOnExit(); miniSolrCloudCluster = new MiniSolrCloudCluster(1, miniSolrCloudClusterTempDirectory, JettyConfig.builder().build()); // Upload Solr configuration URL configDir = SolrClientBaseTest.class.getClassLoader().getResource("solr_config"); assertNotNull(configDir); miniSolrCloudCluster.uploadConfigSet(Paths.get(configDir.toURI()), MOCK_TABLE); } @AfterClass public static void destroy() throws Exception { if(miniSolrCloudCluster != null) { miniSolrCloudCluster.shutdown(); } } @Before public void setup() throws Exception { CollectionAdminRequest.createCollection(MOCK_TABLE, MOCK_TABLE, 1, 1) .withProperty("solr.directoryFactory", "solr.StandardDirectoryFactory") .process(miniSolrCloudCluster.getSolrClient()); miniSolrCloudCluster.waitForActiveCollection(MOCK_TABLE, 1, 1); Thread.sleep(1000); instance = getDB(); } @After public void tearDown() throws Exception { if(miniSolrCloudCluster != null) { CollectionAdminRequest.deleteCollection(MOCK_TABLE) .processAndWait(miniSolrCloudCluster.getSolrClient(), 60); Thread.sleep(1000); } } @Test public void testInsert() throws Exception { Status result = instance.insert(MOCK_TABLE, MOCK_KEY0, MOCK_DATA); assertEquals(Status.OK, result); } @Test public void testDelete() throws Exception { Status result = instance.delete(MOCK_TABLE, MOCK_KEY1); assertEquals(Status.OK, result); } @Test public void testRead() throws Exception { Set<String> fields = MOCK_DATA.keySet(); HashMap<String, ByteIterator> resultParam = new HashMap<>(NUM_RECORDS); Status result = instance.read(MOCK_TABLE, MOCK_KEY1, fields, resultParam); assertEquals(Status.OK, result); } @Test public void testUpdate() throws Exception { HashMap<String, ByteIterator> newValues = new HashMap<>(NUM_RECORDS); for (int i = 0; i < NUM_RECORDS; i++) { newValues.put(FIELD_PREFIX + i, new StringByteIterator("newvalue" + i)); } Status result = instance.update(MOCK_TABLE, MOCK_KEY1, newValues); assertEquals(Status.OK, result); //validate that the values changed HashMap<String, ByteIterator> resultParam = new HashMap<>(NUM_RECORDS); instance.read(MOCK_TABLE, MOCK_KEY1, MOCK_DATA.keySet(), resultParam); for (int i = 0; i < NUM_RECORDS; i++) { assertEquals("newvalue" + i, resultParam.get(FIELD_PREFIX + i).toString()); } } @Test public void testScan() throws Exception { Set<String> fields = MOCK_DATA.keySet(); Vector<HashMap<String, ByteIterator>> resultParam = new Vector<>(NUM_RECORDS); Status result = instance.scan(MOCK_TABLE, MOCK_KEY1, NUM_RECORDS, fields, resultParam); assertEquals(Status.OK, result); } /** * Gets the test DB. * * @return The test DB. */ protected DB getDB() { return getDB(new Properties()); } /** * Gets the test DB. * * @param props * Properties to pass to the client. * @return The test DB. */ protected abstract DB getDB(Properties props); }
5,391
31.287425
121
java
null
NearPMSW-main/baseline/logging/YCSB/solr7/src/main/java/site/ycsb/db/solr7/package-info.java
/* * Copyright (c) 2020 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for * <a href="http://lucene.apache.org/solr/">Solr</a>. */ package site.ycsb.db.solr7;
774
31.291667
70
java
null
NearPMSW-main/baseline/logging/YCSB/solr7/src/main/java/site/ycsb/db/solr7/SolrClient.java
/** * Copyright (c) 2020 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.solr7; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import site.ycsb.StringByteIterator; import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.impl.CloudSolrClient; import org.apache.solr.client.solrj.impl.HttpClientUtil; import org.apache.solr.client.solrj.impl.HttpSolrClient; import org.apache.solr.client.solrj.impl.Krb5HttpClientBuilder; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.client.solrj.response.UpdateResponse; import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.SolrInputDocument; import java.io.IOException; import java.util.*; import java.util.Map.Entry; /** * Solr client for YCSB framework. * * <p> * Default properties to set: * </p> * <ul> * See README.md * </ul> * */ public class SolrClient extends DB { public static final String DEFAULT_CLOUD_MODE = "false"; public static final String DEFAULT_BATCH_MODE = "false"; public static final String DEFAULT_ZOOKEEPER_HOSTS = "localhost:2181"; public static final String DEFAULT_SOLR_BASE_URL = "http://localhost:8983/solr"; public static final String DEFAULT_COMMIT_WITHIN_TIME = "1000"; private org.apache.solr.client.solrj.SolrClient client; private Integer commitTime; private Boolean batchMode; /** * Initialize any state for this DB. Called once per DB instance; there is one DB instance per * client thread. */ @Override public void init() throws DBException { Properties props = getProperties(); commitTime = Integer .parseInt(props.getProperty("solr.commit.within.time", DEFAULT_COMMIT_WITHIN_TIME)); batchMode = Boolean.parseBoolean(props.getProperty("solr.batch.mode", DEFAULT_BATCH_MODE)); String jaasConfPath = props.getProperty("solr.jaas.conf.path"); if(jaasConfPath != null) { System.setProperty("java.security.auth.login.config", jaasConfPath); HttpClientUtil.setHttpClientBuilder(new Krb5HttpClientBuilder().getHttpClientBuilder(Optional.empty())); } // Check if Solr cluster is running in SolrCloud or Stand-alone mode Boolean cloudMode = Boolean.parseBoolean(props.getProperty("solr.cloud", DEFAULT_CLOUD_MODE)); System.err.println("Solr Cloud Mode = " + cloudMode); if (cloudMode) { System.err.println("Solr Zookeeper Remote Hosts = " + props.getProperty("solr.zookeeper.hosts", DEFAULT_ZOOKEEPER_HOSTS)); client = new CloudSolrClient.Builder().withZkHost( Arrays.asList(props.getProperty("solr.zookeeper.hosts", DEFAULT_ZOOKEEPER_HOSTS).split(","))).build(); } else { client = new HttpSolrClient.Builder(props.getProperty("solr.base.url", DEFAULT_SOLR_BASE_URL)).build(); } } @Override public void cleanup() throws DBException { try { client.close(); } catch (IOException e) { throw new DBException(e); } } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be * written into the record with the specified record key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", key); for (Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) { doc.addField(entry.getKey(), entry.getValue()); } UpdateResponse response; if (batchMode) { response = client.add(table, doc, commitTime); } else { response = client.add(table, doc); client.commit(table); } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override public Status delete(String table, String key) { try { UpdateResponse response; if (batchMode) { response = client.deleteById(table, key, commitTime); } else { response = client.deleteById(table, key); client.commit(table); } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result will be stored in a * HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { Boolean returnFields = false; String[] fieldList = null; if (fields != null) { returnFields = true; fieldList = fields.toArray(new String[fields.size()]); } SolrQuery query = new SolrQuery(); query.setQuery("id:" + key); if (returnFields) { query.setFields(fieldList); } final QueryResponse response = client.query(table, query); SolrDocumentList results = response.getResults(); if ((results != null) && (results.getNumFound() > 0)) { for (String field : results.get(0).getFieldNames()) { result.put(field, new StringByteIterator(String.valueOf(results.get(0).getFirstValue(field)))); } } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be * written into the record with the specified record key, overwriting any existing values with the * same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { SolrInputDocument updatedDoc = new SolrInputDocument(); updatedDoc.addField("id", key); for (Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) { updatedDoc.addField(entry.getKey(), Collections.singletonMap("set", entry.getValue())); } UpdateResponse writeResponse; if (batchMode) { writeResponse = client.add(table, updatedDoc, commitTime); } else { writeResponse = client.add(table, updatedDoc); client.commit(table); } return checkStatus(writeResponse.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the * result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { Boolean returnFields = false; String[] fieldList = null; if (fields != null) { returnFields = true; fieldList = fields.toArray(new String[fields.size()]); } SolrQuery query = new SolrQuery(); query.setQuery("*:*"); query.setParam("fq", "id:[ " + startkey + " TO * ]"); if (returnFields) { query.setFields(fieldList); } query.setRows(recordcount); final QueryResponse response = client.query(table, query); SolrDocumentList results = response.getResults(); HashMap<String, ByteIterator> entry; for (SolrDocument hit : results) { entry = new HashMap<>((int) results.getNumFound()); for (String field : hit.getFieldNames()) { entry.put(field, new StringByteIterator(String.valueOf(hit.getFirstValue(field)))); } result.add(entry); } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } private Status checkStatus(int status) { Status responseStatus; switch (status) { case 0: responseStatus = Status.OK; break; case 400: responseStatus = Status.BAD_REQUEST; break; case 403: responseStatus = Status.FORBIDDEN; break; case 404: responseStatus = Status.NOT_FOUND; break; case 500: responseStatus = Status.ERROR; break; case 503: responseStatus = Status.SERVICE_UNAVAILABLE; break; default: responseStatus = Status.UNEXPECTED_STATE; break; } return responseStatus; } }
11,172
32.653614
110
java
null
NearPMSW-main/baseline/logging/YCSB/postgrenosql/src/test/java/site/ycsb/postgrenosql/PostgreNoSQLDBClientTest.java
/* * Copyright 2017 YCSB Contributors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.postgrenosql; import site.ycsb.*; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.postgresql.Driver; import java.io.IOException; import java.net.InetAddress; import java.net.Socket; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.*; import org.postgresql.util.PSQLException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertThat; import static org.junit.Assume.assumeNoException; /** * PostgreNoSQL test client for YCSB framework. */ public class PostgreNoSQLDBClientTest { private static final Logger LOG = LoggerFactory.getLogger(PostgreNoSQLDBClientTest.class); /** The default port for PostgreSQL. */ private static final int DEFAULT_PORT = 5432; private static final String DATABASE_NAME = "test"; private static final String DEFAULT_USER = "postgres"; private static final String DEFAULT_PWD = "postgres"; /** The properties settings */ private static final String HOST_NAME = "localhost"; private static final String TEST_DB_URL = "jdbc:postgresql://" + HOST_NAME + ":" + DEFAULT_PORT + "/" + DATABASE_NAME; private static final String TABLE_NAME = "usertable"; private static final int FIELD_LENGTH = 32; private static final String FIELD_PREFIX = "FIELD"; private static final int NUM_FIELDS = 3; private static Connection postgreSQLConnection = null; private static PostgreNoSQLDBClient postgreNoSQLClient = null; @BeforeClass public static void setUp() { // Check whether postgres is available try (Socket socket = new Socket(HOST_NAME, DEFAULT_PORT)){ assertThat("Socket is not bound.", socket.getLocalPort(), not(-1)); } catch (IOException connectFailed) { assumeNoException("PostgreSQL is not running. Skipping tests.", connectFailed); } Properties props = new Properties(); props.setProperty(PostgreNoSQLDBClient.CONNECTION_URL, TEST_DB_URL); props.setProperty(PostgreNoSQLDBClient.CONNECTION_USER, DEFAULT_USER); props.setProperty(PostgreNoSQLDBClient.CONNECTION_PASSWD, DEFAULT_PWD); props.setProperty("user", DEFAULT_USER); props.setProperty("password", DEFAULT_PWD); props.setProperty(PostgreNoSQLDBClient.JDBC_AUTO_COMMIT, "true"); try{ postgreSQLConnection = new Driver().connect(TEST_DB_URL, props); boolean tableExists = postgreSQLConnection.getMetaData().getTables(null, null, TABLE_NAME, null).next(); assertThat("Table does not exist.", tableExists, not(false)); postgreNoSQLClient = new PostgreNoSQLDBClient(); postgreNoSQLClient.setProperties(props); postgreNoSQLClient.init(); } catch (PSQLException e){ if (e.getSQLState().equals("3D000")){ assumeNoException("Database does not exist. Skipping tests.", e); } } catch (SQLException | DBException e){ LOG.info(e.toString()); } } @AfterClass public static void tearDown(){ } @Test public void insertRead() { String insertKey = "user0"; try{ HashMap<String, ByteIterator> insertMap = new HashMap<>(); HashMap<String, ByteIterator> copiedInsertMap = new HashMap<>(); Set<String> fields = createFieldSet(); for (int i = 0; i < NUM_FIELDS; i++) { byte[] value = new byte[FIELD_LENGTH]; for (int j = 0;j < value.length;j++){ value[j] = (byte)((i+1)*(j+1)); } insertMap.put(FIELD_PREFIX + i, new ByteArrayByteIterator(value)); copiedInsertMap.put(FIELD_PREFIX + i, new ByteArrayByteIterator(value)); } Status result = postgreNoSQLClient.insert(TABLE_NAME, insertKey, insertMap); assertThat("Insert did not return success (0).", result, is(Status.OK)); HashMap<String, ByteIterator> readResults = new HashMap<>(); result = postgreNoSQLClient.read(TABLE_NAME, insertKey, fields, readResults); assertThat("Read did not return success (0).", result, is(Status.OK)); for (Map.Entry<String, ByteIterator> entry : readResults.entrySet()) { assertArrayEquals("Read result does not match wrote entries.", entry.getValue().toArray(), copiedInsertMap.get(entry.getKey()).toArray()); } } catch (Exception e){ LOG.info(e.toString()); } } @Test public void insertReadDelete() { String insertKey = "user1"; try{ HashMap<String, ByteIterator> insertMap = new HashMap<>(); HashMap<String, ByteIterator> copiedInsertMap = new HashMap<>(); Set<String> fields = createFieldSet(); for (int i = 0; i < NUM_FIELDS; i++) { byte[] value = new byte[FIELD_LENGTH]; for (int j = 0;j < value.length;j++){ value[j] = (byte)((i+1)*(j+1)); } insertMap.put(FIELD_PREFIX + i, new ByteArrayByteIterator(value)); copiedInsertMap.put(FIELD_PREFIX + i, new ByteArrayByteIterator(value)); } Status result = postgreNoSQLClient.insert(TABLE_NAME, insertKey, insertMap); assertThat("Insert did not return success (0).", result, is(Status.OK)); HashMap<String, ByteIterator> readResults = new HashMap<>(); result = postgreNoSQLClient.read(TABLE_NAME, insertKey, fields, readResults); assertThat("Read did not return success (0).", result, is(Status.OK)); for (Map.Entry<String, ByteIterator> entry : readResults.entrySet()) { assertArrayEquals("Read result does not match wrote entries.", entry.getValue().toArray(), copiedInsertMap.get(entry.getKey()).toArray()); } result = postgreNoSQLClient.delete(TABLE_NAME, insertKey); assertThat("Delete did not return success (0).", result, is(Status.OK)); result = postgreNoSQLClient.read(TABLE_NAME, insertKey, fields, readResults); assertThat("Read did not return not found (0).", result, is(Status.NOT_FOUND)); } catch (Exception e){ LOG.info(e.toString()); } } @Test public void insertScan() { int numberOfValuesToInsert = 100; int recordcount = 5; String startKey = "00050"; try{ // create set of fields to scan Set<String> fields = createFieldSet(); // create values to insert for (int i = 0;i < numberOfValuesToInsert;i++){ HashMap<String, ByteIterator> insertMap = new HashMap<>(); for (int j = 0; j < NUM_FIELDS; j++) { byte[] value = new byte[FIELD_LENGTH]; for (int k = 0; k < value.length; k++) { value[k] = (byte) ((j + 1) * (k + 1)); } insertMap.put(FIELD_PREFIX + j, new ByteArrayByteIterator(value)); } postgreNoSQLClient.insert(TABLE_NAME, padded(i, 5), insertMap); } Vector<HashMap<String, ByteIterator>> results = new Vector<HashMap<String, ByteIterator>>(); Status result = postgreNoSQLClient.scan(TABLE_NAME, startKey,recordcount, fields, results); assertThat("Scan did not return success (0).", result, is(Status.OK)); assertThat("Number of results does not match.", results.size(), is(recordcount)); } catch (Exception e){ LOG.info(e.toString()); } } @Test public void insertUpdate(){ String insertKey = "user2"; try{ HashMap<String, ByteIterator> insertMap = new HashMap<>(); HashMap<String, ByteIterator> copiedInsertMap = new HashMap<>(); Set<String> fields = createFieldSet(); for (int i = 0; i < NUM_FIELDS; i++) { byte[] value = new byte[FIELD_LENGTH]; for (int j = 0;j < value.length;j++){ value[j] = (byte)((i+1)*(j+1)); } insertMap.put(FIELD_PREFIX + i, new ByteArrayByteIterator(value)); copiedInsertMap.put(FIELD_PREFIX + i, new ByteArrayByteIterator(value)); } Status result = postgreNoSQLClient.insert(TABLE_NAME, insertKey, insertMap); assertThat("Insert did not return success (0).", result, is(Status.OK)); HashMap<String, ByteIterator> updateMap = new HashMap<>(); updateMap.put("FIELD0", new ByteArrayByteIterator(new byte[]{99, 99, 99, 99})); result = postgreNoSQLClient.update(TABLE_NAME, insertKey, updateMap); assertThat("Update did not return success (0).", result, is(Status.OK)); HashMap<String, ByteIterator> readResults = new HashMap<>(); result = postgreNoSQLClient.read(TABLE_NAME, insertKey, fields, readResults); assertThat("Read did not return success (0).", result, is(Status.OK)); assertThat("Value was not updated correctly.", readResults.get("FIELD0").toArray(), is(new byte[]{99, 99, 99, 99})); } catch (Exception e){ LOG.info(e.toString()); } } private String padded(int i, int padding) { String result = String.valueOf(i); while (result.length() < padding) { result = "0" + result; } return result; } private Set<String> createFieldSet() { Set<String> fields = new HashSet<>(); for (int j = 0; j < NUM_FIELDS; j++) { fields.add(FIELD_PREFIX + j); } return fields; } }
9,832
35.553903
146
java
null
NearPMSW-main/baseline/logging/YCSB/postgrenosql/src/main/java/site/ycsb/postgrenosql/package-info.java
/* * Copyright 2017 YCSB Contributors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for PostgreNoSQL client. */ package site.ycsb.postgrenosql;
739
32.636364
70
java
null
NearPMSW-main/baseline/logging/YCSB/postgrenosql/src/main/java/site/ycsb/postgrenosql/PostgreNoSQLDBClient.java
/* * Copyright 2017 YCSB Contributors. All Rights Reserved. * * CODE IS BASED ON the jdbc-binding JdbcDBClient class. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.postgrenosql; import site.ycsb.*; import org.json.simple.JSONObject; import org.postgresql.Driver; import org.postgresql.util.PGobject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.*; /** * PostgreNoSQL client for YCSB framework. */ public class PostgreNoSQLDBClient extends DB { private static final Logger LOG = LoggerFactory.getLogger(PostgreNoSQLDBClient.class); /** Count the number of times initialized to teardown on the last. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** Cache for already prepared statements. */ private static ConcurrentMap<StatementType, PreparedStatement> cachedStatements; /** The driver to get the connection to postgresql. */ private static Driver postgrenosqlDriver; /** The connection to the database. */ private static Connection connection; /** The class to use as the jdbc driver. */ public static final String DRIVER_CLASS = "db.driver"; /** The URL to connect to the database. */ public static final String CONNECTION_URL = "postgrenosql.url"; /** The user name to use to connect to the database. */ public static final String CONNECTION_USER = "postgrenosql.user"; /** The password to use for establishing the connection. */ public static final String CONNECTION_PASSWD = "postgrenosql.passwd"; /** The JDBC connection auto-commit property for the driver. */ public static final String JDBC_AUTO_COMMIT = "postgrenosql.autocommit"; /** The primary key in the user table. */ public static final String PRIMARY_KEY = "YCSB_KEY"; /** The field name prefix in the table. */ public static final String COLUMN_NAME = "YCSB_VALUE"; private static final String DEFAULT_PROP = ""; /** Returns parsed boolean value from the properties if set, otherwise returns defaultVal. */ private static boolean getBoolProperty(Properties props, String key, boolean defaultVal) { String valueStr = props.getProperty(key); if (valueStr != null) { return Boolean.parseBoolean(valueStr); } return defaultVal; } @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (PostgreNoSQLDBClient.class) { if (postgrenosqlDriver != null) { return; } Properties props = getProperties(); String urls = props.getProperty(CONNECTION_URL, DEFAULT_PROP); String user = props.getProperty(CONNECTION_USER, DEFAULT_PROP); String passwd = props.getProperty(CONNECTION_PASSWD, DEFAULT_PROP); boolean autoCommit = getBoolProperty(props, JDBC_AUTO_COMMIT, true); try { Properties tmpProps = new Properties(); tmpProps.setProperty("user", user); tmpProps.setProperty("password", passwd); cachedStatements = new ConcurrentHashMap<>(); postgrenosqlDriver = new Driver(); connection = postgrenosqlDriver.connect(urls, tmpProps); connection.setAutoCommit(autoCommit); } catch (Exception e) { LOG.error("Error during initialization: " + e); } } } @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { try { cachedStatements.clear(); if (!connection.getAutoCommit()){ connection.commit(); } connection.close(); } catch (SQLException e) { System.err.println("Error in cleanup execution. " + e); } postgrenosqlDriver = null; } } @Override public Status read(String tableName, String key, Set<String> fields, Map<String, ByteIterator> result) { try { StatementType type = new StatementType(StatementType.Type.READ, tableName, fields); PreparedStatement readStatement = cachedStatements.get(type); if (readStatement == null) { readStatement = createAndCacheReadStatement(type); } readStatement.setString(1, key); ResultSet resultSet = readStatement.executeQuery(); if (!resultSet.next()) { resultSet.close(); return Status.NOT_FOUND; } if (result != null) { if (fields == null){ do{ String field = resultSet.getString(2); String value = resultSet.getString(3); result.put(field, new StringByteIterator(value)); }while (resultSet.next()); } else { for (String field : fields) { String value = resultSet.getString(field); result.put(field, new StringByteIterator(value)); } } } resultSet.close(); return Status.OK; } catch (SQLException e) { LOG.error("Error in processing read of table " + tableName + ": " + e); return Status.ERROR; } } @Override public Status scan(String tableName, String startKey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { StatementType type = new StatementType(StatementType.Type.SCAN, tableName, fields); PreparedStatement scanStatement = cachedStatements.get(type); if (scanStatement == null) { scanStatement = createAndCacheScanStatement(type); } scanStatement.setString(1, startKey); scanStatement.setInt(2, recordcount); ResultSet resultSet = scanStatement.executeQuery(); for (int i = 0; i < recordcount && resultSet.next(); i++) { if (result != null && fields != null) { HashMap<String, ByteIterator> values = new HashMap<String, ByteIterator>(); for (String field : fields) { String value = resultSet.getString(field); values.put(field, new StringByteIterator(value)); } result.add(values); } } resultSet.close(); return Status.OK; } catch (SQLException e) { LOG.error("Error in processing scan of table: " + tableName + ": " + e); return Status.ERROR; } } @Override public Status update(String tableName, String key, Map<String, ByteIterator> values) { try{ StatementType type = new StatementType(StatementType.Type.UPDATE, tableName, null); PreparedStatement updateStatement = cachedStatements.get(type); if (updateStatement == null) { updateStatement = createAndCacheUpdateStatement(type); } JSONObject jsonObject = new JSONObject(); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { jsonObject.put(entry.getKey(), entry.getValue().toString()); } PGobject object = new PGobject(); object.setType("jsonb"); object.setValue(jsonObject.toJSONString()); updateStatement.setObject(1, object); updateStatement.setString(2, key); int result = updateStatement.executeUpdate(); if (result == 1) { return Status.OK; } return Status.UNEXPECTED_STATE; } catch (SQLException e) { LOG.error("Error in processing update to table: " + tableName + e); return Status.ERROR; } } @Override public Status insert(String tableName, String key, Map<String, ByteIterator> values) { try{ StatementType type = new StatementType(StatementType.Type.INSERT, tableName, null); PreparedStatement insertStatement = cachedStatements.get(type); if (insertStatement == null) { insertStatement = createAndCacheInsertStatement(type); } JSONObject jsonObject = new JSONObject(); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { jsonObject.put(entry.getKey(), entry.getValue().toString()); } PGobject object = new PGobject(); object.setType("jsonb"); object.setValue(jsonObject.toJSONString()); insertStatement.setObject(2, object); insertStatement.setString(1, key); int result = insertStatement.executeUpdate(); if (result == 1) { return Status.OK; } return Status.UNEXPECTED_STATE; } catch (SQLException e) { LOG.error("Error in processing insert to table: " + tableName + ": " + e); return Status.ERROR; } } @Override public Status delete(String tableName, String key) { try{ StatementType type = new StatementType(StatementType.Type.DELETE, tableName, null); PreparedStatement deleteStatement = cachedStatements.get(type); if (deleteStatement == null) { deleteStatement = createAndCacheDeleteStatement(type); } deleteStatement.setString(1, key); int result = deleteStatement.executeUpdate(); if (result == 1){ return Status.OK; } return Status.UNEXPECTED_STATE; } catch (SQLException e) { LOG.error("Error in processing delete to table: " + tableName + e); return Status.ERROR; } } private PreparedStatement createAndCacheReadStatement(StatementType readType) throws SQLException{ PreparedStatement readStatement = connection.prepareStatement(createReadStatement(readType)); PreparedStatement statement = cachedStatements.putIfAbsent(readType, readStatement); if (statement == null) { return readStatement; } return statement; } private String createReadStatement(StatementType readType){ StringBuilder read = new StringBuilder("SELECT " + PRIMARY_KEY + " AS " + PRIMARY_KEY); if (readType.getFields() == null) { read.append(", (jsonb_each_text(" + COLUMN_NAME + ")).*"); } else { for (String field:readType.getFields()){ read.append(", " + COLUMN_NAME + "->>'" + field + "' AS " + field); } } read.append(" FROM " + readType.getTableName()); read.append(" WHERE "); read.append(PRIMARY_KEY); read.append(" = "); read.append("?"); return read.toString(); } private PreparedStatement createAndCacheScanStatement(StatementType scanType) throws SQLException{ PreparedStatement scanStatement = connection.prepareStatement(createScanStatement(scanType)); PreparedStatement statement = cachedStatements.putIfAbsent(scanType, scanStatement); if (statement == null) { return scanStatement; } return statement; } private String createScanStatement(StatementType scanType){ StringBuilder scan = new StringBuilder("SELECT " + PRIMARY_KEY + " AS " + PRIMARY_KEY); if (scanType.getFields() != null){ for (String field:scanType.getFields()){ scan.append(", " + COLUMN_NAME + "->>'" + field + "' AS " + field); } } scan.append(" FROM " + scanType.getTableName()); scan.append(" WHERE "); scan.append(PRIMARY_KEY); scan.append(" >= ?"); scan.append(" ORDER BY "); scan.append(PRIMARY_KEY); scan.append(" LIMIT ?"); return scan.toString(); } public PreparedStatement createAndCacheUpdateStatement(StatementType updateType) throws SQLException{ PreparedStatement updateStatement = connection.prepareStatement(createUpdateStatement(updateType)); PreparedStatement statement = cachedStatements.putIfAbsent(updateType, updateStatement); if (statement == null) { return updateStatement; } return statement; } private String createUpdateStatement(StatementType updateType){ StringBuilder update = new StringBuilder("UPDATE "); update.append(updateType.getTableName()); update.append(" SET "); update.append(COLUMN_NAME + " = " + COLUMN_NAME); update.append(" || ? "); update.append(" WHERE "); update.append(PRIMARY_KEY); update.append(" = ?"); return update.toString(); } private PreparedStatement createAndCacheInsertStatement(StatementType insertType) throws SQLException{ PreparedStatement insertStatement = connection.prepareStatement(createInsertStatement(insertType)); PreparedStatement statement = cachedStatements.putIfAbsent(insertType, insertStatement); if (statement == null) { return insertStatement; } return statement; } private String createInsertStatement(StatementType insertType){ StringBuilder insert = new StringBuilder("INSERT INTO "); insert.append(insertType.getTableName()); insert.append(" (" + PRIMARY_KEY + "," + COLUMN_NAME + ")"); insert.append(" VALUES(?,?)"); return insert.toString(); } private PreparedStatement createAndCacheDeleteStatement(StatementType deleteType) throws SQLException{ PreparedStatement deleteStatement = connection.prepareStatement(createDeleteStatement(deleteType)); PreparedStatement statement = cachedStatements.putIfAbsent(deleteType, deleteStatement); if (statement == null) { return deleteStatement; } return statement; } private String createDeleteStatement(StatementType deleteType){ StringBuilder delete = new StringBuilder("DELETE FROM "); delete.append(deleteType.getTableName()); delete.append(" WHERE "); delete.append(PRIMARY_KEY); delete.append(" = ?"); return delete.toString(); } }
13,961
33.053659
106
java
null
NearPMSW-main/baseline/logging/YCSB/postgrenosql/src/main/java/site/ycsb/postgrenosql/StatementType.java
/* * Copyright 2017 YCSB Contributors. All Rights Reserved. * * CODE IS BASED ON the jdbc-binding StatementType class. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.postgrenosql; import java.util.Set; /** * The statement type for the prepared statements. */ public class StatementType { enum Type { INSERT(1), DELETE(2), READ(3), UPDATE(4), SCAN(5); private final int internalType; Type(int type) { internalType = type; } int getHashCode() { final int prime = 31; int result = 1; result = prime * result + internalType; return result; } } private Type type; private String tableName; private Set<String> fields; public StatementType(Type type, String tableName, Set<String> fields) { this.type = type; this.tableName = tableName; this.fields = fields; } public String getTableName() { return tableName; } public Set<String> getFields() { return fields; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((type == null) ? 0 : type.getHashCode()); result = prime * result + ((tableName == null) ? 0 : tableName.hashCode()); result = prime * result + ((fields == null) ? 0 : fields.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } StatementType other = (StatementType) obj; if (type != other.type) { return false; } if (tableName == null) { if (other.tableName != null) { return false; } } else if (!tableName.equals(other.tableName)) { return false; } if (fields == null) { if (other.fields != null) { return false; } }else if (!fields.equals(other.fields)) { return false; } return true; } }
2,546
22.366972
79
java
null
NearPMSW-main/baseline/logging/YCSB/hbase1/src/test/java/site/ycsb/db/hbase1/HBaseClient1Test.java
/** * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.hbase1; import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.junit.Assume.assumeTrue; import site.ycsb.ByteIterator; import site.ycsb.Status; import site.ycsb.StringByteIterator; import site.ycsb.measurements.Measurements; import site.ycsb.workloads.CoreWorkload; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Properties; import java.util.Vector; /** * Integration tests for the YCSB HBase 1 client, using an HBase minicluster. */ public class HBaseClient1Test { private final static String COLUMN_FAMILY = "cf"; private static HBaseTestingUtility testingUtil; private HBaseClient1 client; private Table table = null; private String tableName; private static boolean isWindows() { final String os = System.getProperty("os.name"); return os.startsWith("Windows"); } /** * Creates a mini-cluster for use in these tests. * * This is a heavy-weight operation, so invoked only once for the test class. */ @BeforeClass public static void setUpClass() throws Exception { // Minicluster setup fails on Windows with an UnsatisfiedLinkError. // Skip if windows. assumeTrue(!isWindows()); testingUtil = HBaseTestingUtility.createLocalHTU(); testingUtil.startMiniCluster(); } /** * Tears down mini-cluster. */ @AfterClass public static void tearDownClass() throws Exception { if (testingUtil != null) { testingUtil.shutdownMiniCluster(); } } /** * Sets up the mini-cluster for testing. * * We re-create the table for each test. */ @Before public void setUp() throws Exception { client = new HBaseClient1(); client.setConfiguration(new Configuration(testingUtil.getConfiguration())); Properties p = new Properties(); p.setProperty("columnfamily", COLUMN_FAMILY); Measurements.setProperties(p); final CoreWorkload workload = new CoreWorkload(); workload.init(p); tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); table = testingUtil.createTable(TableName.valueOf(tableName), Bytes.toBytes(COLUMN_FAMILY)); client.setProperties(p); client.init(); } @After public void tearDown() throws Exception { table.close(); testingUtil.deleteTable(tableName); } @Test public void testRead() throws Exception { final String rowKey = "row1"; final Put p = new Put(Bytes.toBytes(rowKey)); p.addColumn(Bytes.toBytes(COLUMN_FAMILY), Bytes.toBytes("column1"), Bytes.toBytes("value1")); p.addColumn(Bytes.toBytes(COLUMN_FAMILY), Bytes.toBytes("column2"), Bytes.toBytes("value2")); table.put(p); final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); final Status status = client.read(tableName, rowKey, null, result); assertEquals(Status.OK, status); assertEquals(2, result.size()); assertEquals("value1", result.get("column1").toString()); assertEquals("value2", result.get("column2").toString()); } @Test public void testReadMissingRow() throws Exception { final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); final Status status = client.read(tableName, "Missing row", null, result); assertEquals(Status.NOT_FOUND, status); assertEquals(0, result.size()); } @Test public void testScan() throws Exception { // Fill with data final String colStr = "row_number"; final byte[] col = Bytes.toBytes(colStr); final int n = 10; final List<Put> puts = new ArrayList<Put>(n); for(int i = 0; i < n; i++) { final byte[] key = Bytes.toBytes(String.format("%05d", i)); final byte[] value = java.nio.ByteBuffer.allocate(4).putInt(i).array(); final Put p = new Put(key); p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value); puts.add(p); } table.put(puts); // Test final Vector<HashMap<String, ByteIterator>> result = new Vector<HashMap<String, ByteIterator>>(); // Scan 5 records, skipping the first client.scan(tableName, "00001", 5, null, result); assertEquals(5, result.size()); for(int i = 0; i < 5; i++) { final HashMap<String, ByteIterator> row = result.get(i); assertEquals(1, row.size()); assertTrue(row.containsKey(colStr)); final byte[] bytes = row.get(colStr).toArray(); final ByteBuffer buf = ByteBuffer.wrap(bytes); final int rowNum = buf.getInt(); assertEquals(i + 1, rowNum); } } @Test public void testUpdate() throws Exception{ final String key = "key"; final HashMap<String, String> input = new HashMap<String, String>(); input.put("column1", "value1"); input.put("column2", "value2"); final Status status = client.insert(tableName, key, StringByteIterator.getByteIteratorMap(input)); assertEquals(Status.OK, status); // Verify result final Get get = new Get(Bytes.toBytes(key)); final Result result = this.table.get(get); assertFalse(result.isEmpty()); assertEquals(2, result.size()); for(final java.util.Map.Entry<String, String> entry : input.entrySet()) { assertEquals(entry.getValue(), new String(result.getValue(Bytes.toBytes(COLUMN_FAMILY), Bytes.toBytes(entry.getKey())))); } } @Test @Ignore("Not yet implemented") public void testDelete() { fail("Not yet implemented"); } }
6,848
31.004673
102
java
null
NearPMSW-main/baseline/logging/YCSB/hbase1/src/main/java/site/ycsb/db/hbase1/package-info.java
/* * Copyright (c) 2014, Yahoo!, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for <a href="https://hbase.apache.org/">HBase</a> * using the HBase 1 shaded API. */ package site.ycsb.db.hbase1;
795
32.166667
70
java
null
NearPMSW-main/baseline/logging/YCSB/hbase1/src/main/java/site/ycsb/db/hbase1/HBaseClient1.java
/** * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.hbase1; import site.ycsb.ByteArrayByteIterator; import site.ycsb.ByteIterator; import site.ycsb.DBException; import site.ycsb.Status; import site.ycsb.measurements.Measurements; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.PageFilter; import org.apache.hadoop.hbase.util.Bytes; import java.io.IOException; import java.util.ConcurrentModificationException; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.Vector; import java.util.concurrent.atomic.AtomicInteger; import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; /** * HBase 1 client for YCSB framework. * * Intended for use with HBase's shaded client. */ public class HBaseClient1 extends site.ycsb.DB { private static final AtomicInteger THREAD_COUNT = new AtomicInteger(0); private Configuration config = HBaseConfiguration.create(); private boolean debug = false; private String tableName = ""; /** * A Cluster Connection instance that is shared by all running ycsb threads. * Needs to be initialized late so we pick up command-line configs if any. * To ensure one instance only in a multi-threaded context, guard access * with a 'lock' object. * @See #CONNECTION_LOCK. */ private static Connection connection = null; // Depending on the value of clientSideBuffering, either bufferedMutator // (clientSideBuffering) or currentTable (!clientSideBuffering) will be used. private Table currentTable = null; private BufferedMutator bufferedMutator = null; private String columnFamily = ""; private byte[] columnFamilyBytes; /** * Durability to use for puts and deletes. */ private Durability durability = Durability.USE_DEFAULT; /** Whether or not a page filter should be used to limit scan length. */ private boolean usePageFilter = true; /** * If true, buffer mutations on the client. This is the default behavior for * HBaseClient. For measuring insert/update/delete latencies, client side * buffering should be disabled. */ private boolean clientSideBuffering = false; private long writeBufferSize = 1024 * 1024 * 12; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { if ("true" .equals(getProperties().getProperty("clientbuffering", "false"))) { this.clientSideBuffering = true; } if (getProperties().containsKey("writebuffersize")) { writeBufferSize = Long.parseLong(getProperties().getProperty("writebuffersize")); } if (getProperties().getProperty("durability") != null) { this.durability = Durability.valueOf(getProperties().getProperty("durability")); } if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) { config.set("hadoop.security.authentication", "Kerberos"); UserGroupInformation.setConfiguration(config); } if ((getProperties().getProperty("principal")!=null) && (getProperties().getProperty("keytab")!=null)) { try { UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"), getProperties().getProperty("keytab")); } catch (IOException e) { System.err.println("Keytab file is not readable or not found"); throw new DBException(e); } } String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { THREAD_COUNT.getAndIncrement(); synchronized (THREAD_COUNT) { if (connection == null) { // Initialize if not set up already. connection = ConnectionFactory.createConnection(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. final TableName tName = TableName.valueOf(table); try (Admin admin = connection.getAdmin()) { if (!admin.tableExists(tName)) { throw new DBException("Table " + tName + " does not exists"); } } } } } catch (java.io.IOException e) { throw new DBException(e); } if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } if ("false" .equals(getProperties().getProperty("hbase.usepagefilter", "true"))) { usePageFilter = false; } columnFamily = getProperties().getProperty("columnfamily"); if (columnFamily == null) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = Bytes.toBytes(columnFamily); } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { // Get the measurements instance as this is the only client that should // count clean up time like an update if client-side buffering is // enabled. Measurements measurements = Measurements.getMeasurements(); try { long st = System.nanoTime(); if (bufferedMutator != null) { bufferedMutator.close(); } if (currentTable != null) { currentTable.close(); } long en = System.nanoTime(); final String type = clientSideBuffering ? "UPDATE" : "CLEANUP"; measurements.measure(type, (int) ((en - st) / 1000)); int threadCount = THREAD_COUNT.decrementAndGet(); if (threadCount <= 0) { // Means we are done so ok to shut down the Connection. synchronized (THREAD_COUNT) { if (connection != null) { connection.close(); connection = null; } } } } catch (IOException e) { throw new DBException(e); } } public void getHTable(String table) throws IOException { final TableName tName = TableName.valueOf(table); this.currentTable = connection.getTable(tName); if (clientSideBuffering) { final BufferedMutatorParams p = new BufferedMutatorParams(tName); p.writeBufferSize(writeBufferSize); this.bufferedMutator = connection.getBufferedMutator(p); } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; try { getHTable(table); tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase table: " + e); return Status.ERROR; } } Result r = null; try { if (debug) { System.out .println("Doing read from HBase columnfamily " + columnFamily); System.out.println("Doing read for key: " + key); } Get g = new Get(Bytes.toBytes(key)); if (fields == null) { g.addFamily(columnFamilyBytes); } else { for (String field : fields) { g.addColumn(columnFamilyBytes, Bytes.toBytes(field)); } } r = currentTable.get(g); } catch (IOException e) { if (debug) { System.err.println("Error doing get: " + e); } return Status.ERROR; } catch (ConcurrentModificationException e) { // do nothing for now...need to understand HBase concurrency model better return Status.ERROR; } if (r.isEmpty()) { return Status.NOT_FOUND; } while (r.advance()) { final Cell c = r.current(); result.put(Bytes.toString(CellUtil.cloneQualifier(c)), new ByteArrayByteIterator(CellUtil.cloneValue(c))); if (debug) { System.out.println( "Result for field: " + Bytes.toString(CellUtil.cloneQualifier(c)) + " is: " + Bytes.toString(CellUtil.cloneValue(c))); } } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; try { getHTable(table); tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase table: " + e); return Status.ERROR; } } Scan s = new Scan(Bytes.toBytes(startkey)); // HBase has no record limit. Here, assume recordcount is small enough to // bring back in one call. // We get back recordcount records s.setCaching(recordcount); if (this.usePageFilter) { s.setFilter(new PageFilter(recordcount)); } // add specified fields or else all fields if (fields == null) { s.addFamily(columnFamilyBytes); } else { for (String field : fields) { s.addColumn(columnFamilyBytes, Bytes.toBytes(field)); } } // get results ResultScanner scanner = null; try { scanner = currentTable.getScanner(s); int numResults = 0; for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { // get row key String key = Bytes.toString(rr.getRow()); if (debug) { System.out.println("Got scan result for key: " + key); } HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(); while (rr.advance()) { final Cell cell = rr.current(); rowResult.put(Bytes.toString(CellUtil.cloneQualifier(cell)), new ByteArrayByteIterator(CellUtil.cloneValue(cell))); } // add rowResult to result vector result.add(rowResult); numResults++; // PageFilter does not guarantee that the number of results is <= // pageSize, so this // break is required. if (numResults >= recordcount) {// if hit recordcount, bail out break; } } // done with row } catch (IOException e) { if (debug) { System.out.println("Error in getting/parsing scan result: " + e); } return Status.ERROR; } finally { if (scanner != null) { scanner.close(); } } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; try { getHTable(table); tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase table: " + e); return Status.ERROR; } } if (debug) { System.out.println("Setting up put for key: " + key); } Put p = new Put(Bytes.toBytes(key)); p.setDurability(durability); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { byte[] value = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.toStringBinary(value) + " to put request"); } p.addColumn(columnFamilyBytes, Bytes.toBytes(entry.getKey()), value); } try { if (clientSideBuffering) { // removed Preconditions.checkNotNull, which throws NPE, in favor of NPE on next line bufferedMutator.mutate(p); } else { currentTable.put(p); } } catch (IOException e) { if (debug) { System.err.println("Error doing put: " + e); } return Status.ERROR; } catch (ConcurrentModificationException e) { // do nothing for now...hope this is rare return Status.ERROR; } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return update(table, key, values); } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; try { getHTable(table); tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase table: " + e); return Status.ERROR; } } if (debug) { System.out.println("Doing delete for key: " + key); } final Delete d = new Delete(Bytes.toBytes(key)); d.setDurability(durability); try { if (clientSideBuffering) { // removed Preconditions.checkNotNull, which throws NPE, in favor of NPE on next line bufferedMutator.mutate(d); } else { currentTable.delete(d); } } catch (IOException e) { if (debug) { System.err.println("Error doing delete: " + e); } return Status.ERROR; } return Status.OK; } // Only non-private for testing. void setConfiguration(final Configuration newConfig) { this.config = newConfig; } } /* * For customized vim control set autoindent set si set shiftwidth=4 */
17,246
31.480226
95
java
null
NearPMSW-main/baseline/logging/YCSB/elasticsearch5/src/test/java/site/ycsb/db/elasticsearch5/ElasticsearchIntegTestBase.java
/* * Copyright (c) 2017 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.elasticsearch5; import site.ycsb.ByteIterator; import site.ycsb.Client; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import site.ycsb.StringByteIterator; import site.ycsb.workloads.CoreWorkload; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.util.HashMap; import java.util.Properties; import java.util.Set; import java.util.Vector; import static org.junit.Assert.assertEquals; public abstract class ElasticsearchIntegTestBase { private DB db; abstract DB newDB(); private final static HashMap<String, ByteIterator> MOCK_DATA; private final static String MOCK_TABLE = "MOCK_TABLE"; private final static String FIELD_PREFIX = CoreWorkload.FIELD_NAME_PREFIX_DEFAULT; static { MOCK_DATA = new HashMap<>(10); for (int i = 1; i <= 10; i++) { MOCK_DATA.put(FIELD_PREFIX + i, new StringByteIterator("value" + i)); } } @Before public void setUp() throws DBException { final Properties props = new Properties(); props.put("es.new_index", "true"); props.put("es.setting.cluster.name", "test"); db = newDB(); db.setProperties(props); db.init(); for (int i = 0; i < 16; i++) { db.insert(MOCK_TABLE, Integer.toString(i), MOCK_DATA); } } @After public void tearDown() throws DBException { db.cleanup(); } @Test public void testInsert() { final Status result = db.insert(MOCK_TABLE, "0", MOCK_DATA); assertEquals(Status.OK, result); } /** * Test of delete method, of class ElasticsearchClient. */ @Test public void testDelete() { final Status result = db.delete(MOCK_TABLE, "1"); assertEquals(Status.OK, result); } /** * Test of read method, of class ElasticsearchClient. */ @Test public void testRead() { final Set<String> fields = MOCK_DATA.keySet(); final HashMap<String, ByteIterator> resultParam = new HashMap<>(10); final Status result = db.read(MOCK_TABLE, "1", fields, resultParam); assertEquals(Status.OK, result); } /** * Test of update method, of class ElasticsearchClient. */ @Test public void testUpdate() { final HashMap<String, ByteIterator> newValues = new HashMap<>(10); for (int i = 1; i <= 10; i++) { newValues.put(FIELD_PREFIX + i, new StringByteIterator("newvalue" + i)); } final Status updateResult = db.update(MOCK_TABLE, "1", newValues); assertEquals(Status.OK, updateResult); // validate that the values changed final HashMap<String, ByteIterator> resultParam = new HashMap<>(10); final Status readResult = db.read(MOCK_TABLE, "1", MOCK_DATA.keySet(), resultParam); assertEquals(Status.OK, readResult); for (int i = 1; i <= 10; i++) { assertEquals("newvalue" + i, resultParam.get(FIELD_PREFIX + i).toString()); } } /** * Test of scan method, of class ElasticsearchClient. */ @Test public void testScan() { final int recordcount = 10; final Set<String> fields = MOCK_DATA.keySet(); final Vector<HashMap<String, ByteIterator>> resultParam = new Vector<>(10); final Status result = db.scan(MOCK_TABLE, "1", recordcount, fields, resultParam); assertEquals(Status.OK, result); assertEquals(10, resultParam.size()); } }
4,203
29.463768
92
java
null
NearPMSW-main/baseline/logging/YCSB/elasticsearch5/src/test/java/site/ycsb/db/elasticsearch5/ElasticsearchClientIT.java
/** * Copyright (c) 2017 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.elasticsearch5; import site.ycsb.DB; public class ElasticsearchClientIT extends ElasticsearchIntegTestBase { @Override DB newDB() { return new ElasticsearchClient(); } }
874
28.166667
71
java
null
NearPMSW-main/baseline/logging/YCSB/elasticsearch5/src/test/java/site/ycsb/db/elasticsearch5/ElasticsearchRestClientIT.java
/** * Copyright (c) 2017 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.elasticsearch5; import site.ycsb.DB; public class ElasticsearchRestClientIT extends ElasticsearchIntegTestBase { @Override DB newDB() { return new ElasticsearchRestClient(); } }
872
28.1
75
java
null
NearPMSW-main/baseline/logging/YCSB/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/package-info.java
/* * Copyright (c) 2017 YCSB Contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for * <a href="https://www.elastic.co/products/elasticsearch">Elasticsearch</a>. */ package site.ycsb.db.elasticsearch5;
807
32.666667
77
java
null
NearPMSW-main/baseline/logging/YCSB/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/ElasticsearchRestClient.java
/* * Copyright (c) 2017 YCSB contributors. All rights reserved. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.elasticsearch5; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import site.ycsb.StringByteIterator; import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.HttpStatus; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; import org.codehaus.jackson.map.ObjectMapper; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; import static site.ycsb.db.elasticsearch5.Elasticsearch5.KEY; import static site.ycsb.db.elasticsearch5.Elasticsearch5.parseIntegerProperty; import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; /** * Elasticsearch REST client for YCSB framework. */ public class ElasticsearchRestClient extends DB { private static final String DEFAULT_INDEX_KEY = "es.ycsb"; private static final String DEFAULT_REMOTE_HOST = "localhost:9200"; private static final int NUMBER_OF_SHARDS = 1; private static final int NUMBER_OF_REPLICAS = 0; private RestClient restClient; private String indexKey; /** * * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { final Properties props = getProperties(); this.indexKey = props.getProperty("es.index.key", DEFAULT_INDEX_KEY); final int numberOfShards = parseIntegerProperty(props, "es.number_of_shards", NUMBER_OF_SHARDS); final int numberOfReplicas = parseIntegerProperty(props, "es.number_of_replicas", NUMBER_OF_REPLICAS); final Boolean newIndex = Boolean.parseBoolean(props.getProperty("es.new_index", "false")); final String[] nodeList = props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST).split(","); final List<HttpHost> esHttpHosts = new ArrayList<>(nodeList.length); for (String h : nodeList) { String[] nodes = h.split(":"); esHttpHosts.add(new HttpHost(nodes[0], Integer.valueOf(nodes[1]), "http")); } restClient = RestClient.builder(esHttpHosts.toArray(new HttpHost[esHttpHosts.size()])).build(); final Response existsResponse = performRequest(restClient, "HEAD", "/" + indexKey); final boolean exists = existsResponse.getStatusLine().getStatusCode() == HttpStatus.SC_OK; if (exists && newIndex) { final Response deleteResponse = performRequest(restClient, "DELETE", "/" + indexKey); final int statusCode = deleteResponse.getStatusLine().getStatusCode(); if (statusCode != HttpStatus.SC_OK) { throw new DBException("delete [" + indexKey + "] failed with status [" + statusCode + "]"); } } if (!exists || newIndex) { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); builder.startObject("settings"); builder.field("index.number_of_shards", numberOfShards); builder.field("index.number_of_replicas", numberOfReplicas); builder.endObject(); builder.endObject(); final Map<String, String> params = emptyMap(); final StringEntity entity = new StringEntity(builder.string()); final Response createResponse = performRequest(restClient, "PUT", "/" + indexKey, params, entity); final int statusCode = createResponse.getStatusLine().getStatusCode(); if (statusCode != HttpStatus.SC_OK) { throw new DBException("create [" + indexKey + "] failed with status [" + statusCode + "]"); } } catch (final IOException e) { throw new DBException(e); } } final Map<String, String> params = Collections.singletonMap("wait_for_status", "green"); final Response healthResponse = performRequest(restClient, "GET", "/_cluster/health/" + indexKey, params); final int healthStatusCode = healthResponse.getStatusLine().getStatusCode(); if (healthStatusCode != HttpStatus.SC_OK) { throw new DBException("cluster health [" + indexKey + "] failed with status [" + healthStatusCode + "]"); } } private static Response performRequest( final RestClient restClient, final String method, final String endpoint) throws DBException { final Map<String, String> params = emptyMap(); return performRequest(restClient, method, endpoint, params); } private static Response performRequest( final RestClient restClient, final String method, final String endpoint, final Map<String, String> params) throws DBException { return performRequest(restClient, method, endpoint, params, null); } private static final Header[] EMPTY_HEADERS = new Header[0]; private static Response performRequest( final RestClient restClient, final String method, final String endpoint, final Map<String, String> params, final HttpEntity entity) throws DBException { try { final Header[] headers; if (entity != null) { headers = new Header[]{new BasicHeader("content-type", ContentType.APPLICATION_JSON.toString())}; } else { headers = EMPTY_HEADERS; } return restClient.performRequest( method, endpoint, params, entity, headers); } catch (final IOException e) { e.printStackTrace(); throw new DBException(e); } } @Override public void cleanup() throws DBException { if (restClient != null) { try { restClient.close(); restClient = null; } catch (final IOException e) { throw new DBException(e); } } } private volatile boolean isRefreshNeeded = false; @Override public Status insert(final String table, final String key, final Map<String, ByteIterator> values) { try { final Map<String, String> data = StringByteIterator.getStringMap(values); data.put(KEY, key); final Response response = restClient.performRequest( "POST", "/" + indexKey + "/" + table + "/", Collections.<String, String>emptyMap(), new NStringEntity(new ObjectMapper().writeValueAsString(data), ContentType.APPLICATION_JSON)); if (response.getStatusLine().getStatusCode() != HttpStatus.SC_CREATED) { return Status.ERROR; } if (!isRefreshNeeded) { synchronized (this) { isRefreshNeeded = true; } } return Status.OK; } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } @Override public Status delete(final String table, final String key) { try { final Response searchResponse = search(table, key); final int statusCode = searchResponse.getStatusLine().getStatusCode(); if (statusCode == HttpStatus.SC_NOT_FOUND) { return Status.NOT_FOUND; } else if (statusCode != HttpStatus.SC_OK) { return Status.ERROR; } final Map<String, Object> map = map(searchResponse); @SuppressWarnings("unchecked") final Map<String, Object> hits = (Map<String, Object>)map.get("hits"); final int total = (int)hits.get("total"); if (total == 0) { return Status.NOT_FOUND; } @SuppressWarnings("unchecked") final Map<String, Object> hit = (Map<String, Object>)((List<Object>)hits.get("hits")).get(0); final Response deleteResponse = restClient.performRequest("DELETE", "/" + indexKey + "/" + table + "/" + hit.get("_id")); if (deleteResponse.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { return Status.ERROR; } if (!isRefreshNeeded) { synchronized (this) { isRefreshNeeded = true; } } return Status.OK; } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } @Override public Status read( final String table, final String key, final Set<String> fields, final Map<String, ByteIterator> result) { try { final Response searchResponse = search(table, key); final int statusCode = searchResponse.getStatusLine().getStatusCode(); if (statusCode == 404) { return Status.NOT_FOUND; } else if (statusCode != HttpStatus.SC_OK) { return Status.ERROR; } final Map<String, Object> map = map(searchResponse); @SuppressWarnings("unchecked") final Map<String, Object> hits = (Map<String, Object>)map.get("hits"); final int total = (int)hits.get("total"); if (total == 0) { return Status.NOT_FOUND; } @SuppressWarnings("unchecked") final Map<String, Object> hit = (Map<String, Object>)((List<Object>)hits.get("hits")).get(0); @SuppressWarnings("unchecked") final Map<String, Object> source = (Map<String, Object>)hit.get("_source"); if (fields != null) { for (final String field : fields) { result.put(field, new StringByteIterator((String) source.get(field))); } } else { for (final Map.Entry<String, Object> e : source.entrySet()) { if (KEY.equals(e.getKey())) { continue; } result.put(e.getKey(), new StringByteIterator((String) e.getValue())); } } return Status.OK; } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } @Override public Status update(final String table, final String key, final Map<String, ByteIterator> values) { try { final Response searchResponse = search(table, key); final int statusCode = searchResponse.getStatusLine().getStatusCode(); if (statusCode == 404) { return Status.NOT_FOUND; } else if (statusCode != HttpStatus.SC_OK) { return Status.ERROR; } final Map<String, Object> map = map(searchResponse); @SuppressWarnings("unchecked") final Map<String, Object> hits = (Map<String, Object>) map.get("hits"); final int total = (int) hits.get("total"); if (total == 0) { return Status.NOT_FOUND; } @SuppressWarnings("unchecked") final Map<String, Object> hit = (Map<String, Object>) ((List<Object>) hits.get("hits")).get(0); @SuppressWarnings("unchecked") final Map<String, Object> source = (Map<String, Object>) hit.get("_source"); for (final Map.Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) { source.put(entry.getKey(), entry.getValue()); } final Map<String, String> params = emptyMap(); final Response response = restClient.performRequest( "PUT", "/" + indexKey + "/" + table + "/" + hit.get("_id"), params, new NStringEntity(new ObjectMapper().writeValueAsString(source), ContentType.APPLICATION_JSON)); if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { return Status.ERROR; } if (!isRefreshNeeded) { synchronized (this) { isRefreshNeeded = true; } } return Status.OK; } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } @Override public Status scan( final String table, final String startkey, final int recordcount, final Set<String> fields, final Vector<HashMap<String, ByteIterator>> result) { try { final Response response; try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); builder.startObject("query"); builder.startObject("range"); builder.startObject(KEY); builder.field("gte", startkey); builder.endObject(); builder.endObject(); builder.endObject(); builder.field("size", recordcount); builder.endObject(); response = search(table, builder); @SuppressWarnings("unchecked") final Map<String, Object> map = map(response); @SuppressWarnings("unchecked") final Map<String, Object> hits = (Map<String, Object>)map.get("hits"); @SuppressWarnings("unchecked") final List<Map<String, Object>> list = (List<Map<String, Object>>) hits.get("hits"); for (final Map<String, Object> hit : list) { @SuppressWarnings("unchecked") final Map<String, Object> source = (Map<String, Object>)hit.get("_source"); final HashMap<String, ByteIterator> entry; if (fields != null) { entry = new HashMap<>(fields.size()); for (final String field : fields) { entry.put(field, new StringByteIterator((String) source.get(field))); } } else { entry = new HashMap<>(hit.size()); for (final Map.Entry<String, Object> field : source.entrySet()) { if (KEY.equals(field.getKey())) { continue; } entry.put(field.getKey(), new StringByteIterator((String) field.getValue())); } } result.add(entry); } } return Status.OK; } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } private void refreshIfNeeded() throws IOException { if (isRefreshNeeded) { final boolean refresh; synchronized (this) { if (isRefreshNeeded) { refresh = true; isRefreshNeeded = false; } else { refresh = false; } } if (refresh) { restClient.performRequest("POST", "/" + indexKey + "/_refresh"); } } } private Response search(final String table, final String key) throws IOException { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); builder.startObject("query"); builder.startObject("term"); builder.field(KEY, key); builder.endObject(); builder.endObject(); builder.endObject(); return search(table, builder); } } private Response search(final String table, final XContentBuilder builder) throws IOException { refreshIfNeeded(); final Map<String, String> params = emptyMap(); final StringEntity entity = new StringEntity(builder.string()); final Header header = new BasicHeader("content-type", ContentType.APPLICATION_JSON.toString()); return restClient.performRequest("GET", "/" + indexKey + "/" + table + "/_search", params, entity, header); } private Map<String, Object> map(final Response response) throws IOException { try (InputStream is = response.getEntity().getContent()) { final ObjectMapper mapper = new ObjectMapper(); @SuppressWarnings("unchecked") final Map<String, Object> map = mapper.readValue(is, Map.class); return map; } } }
16,038
35.042697
116
java
null
NearPMSW-main/baseline/logging/YCSB/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/ElasticsearchClient.java
/* * Copyright (c) 2017 YCSB contributors. All rights reserved. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.elasticsearch5; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import site.ycsb.StringByteIterator; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.transport.client.PreBuiltTransportClient; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.Vector; import static site.ycsb.db.elasticsearch5.Elasticsearch5.KEY; import static site.ycsb.db.elasticsearch5.Elasticsearch5.parseIntegerProperty; import static org.elasticsearch.common.settings.Settings.Builder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; /** * Elasticsearch client for YCSB framework. */ public class ElasticsearchClient extends DB { private static final String DEFAULT_CLUSTER_NAME = "es.ycsb.cluster"; private static final String DEFAULT_INDEX_KEY = "es.ycsb"; private static final String DEFAULT_REMOTE_HOST = "localhost:9300"; private static final int NUMBER_OF_SHARDS = 1; private static final int NUMBER_OF_REPLICAS = 0; private TransportClient client; private String indexKey; /** * * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { final Properties props = getProperties(); this.indexKey = props.getProperty("es.index.key", DEFAULT_INDEX_KEY); final int numberOfShards = parseIntegerProperty(props, "es.number_of_shards", NUMBER_OF_SHARDS); final int numberOfReplicas = parseIntegerProperty(props, "es.number_of_replicas", NUMBER_OF_REPLICAS); final Boolean newIndex = Boolean.parseBoolean(props.getProperty("es.new_index", "false")); final Builder settings = Settings.builder().put("cluster.name", DEFAULT_CLUSTER_NAME); // if properties file contains elasticsearch user defined properties // add it to the settings file (will overwrite the defaults). for (final Entry<Object, Object> e : props.entrySet()) { if (e.getKey() instanceof String) { final String key = (String) e.getKey(); if (key.startsWith("es.setting.")) { settings.put(key.substring("es.setting.".length()), e.getValue()); } } } settings.put("client.transport.sniff", true) .put("client.transport.ignore_cluster_name", false) .put("client.transport.ping_timeout", "30s") .put("client.transport.nodes_sampler_interval", "30s"); // Default it to localhost:9300 final String[] nodeList = props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST).split(","); client = new PreBuiltTransportClient(settings.build()); for (String h : nodeList) { String[] nodes = h.split(":"); final InetAddress address; try { address = InetAddress.getByName(nodes[0]); } catch (UnknownHostException e) { throw new IllegalArgumentException("unable to identity host [" + nodes[0]+ "]", e); } final int port; try { port = Integer.parseInt(nodes[1]); } catch (final NumberFormatException e) { throw new IllegalArgumentException("unable to parse port [" + nodes[1] + "]", e); } client.addTransportAddress(new InetSocketTransportAddress(address, port)); } final boolean exists = client.admin().indices() .exists(Requests.indicesExistsRequest(indexKey)).actionGet() .isExists(); if (exists && newIndex) { client.admin().indices().prepareDelete(indexKey).get(); } if (!exists || newIndex) { client.admin().indices().create( new CreateIndexRequest(indexKey) .settings( Settings.builder() .put("index.number_of_shards", numberOfShards) .put("index.number_of_replicas", numberOfReplicas) )).actionGet(); } client.admin().cluster().health(new ClusterHealthRequest().waitForGreenStatus()).actionGet(); } @Override public void cleanup() throws DBException { if (client != null) { client.close(); client = null; } } private volatile boolean isRefreshNeeded = false; @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try (XContentBuilder doc = jsonBuilder()) { doc.startObject(); for (final Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) { doc.field(entry.getKey(), entry.getValue()); } doc.field(KEY, key); doc.endObject(); final IndexResponse indexResponse = client.prepareIndex(indexKey, table).setSource(doc).get(); if (indexResponse.getResult() != DocWriteResponse.Result.CREATED) { return Status.ERROR; } if (!isRefreshNeeded) { synchronized (this) { isRefreshNeeded = true; } } return Status.OK; } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } @Override public Status delete(final String table, final String key) { try { final SearchResponse searchResponse = search(table, key); if (searchResponse.getHits().totalHits == 0) { return Status.NOT_FOUND; } final String id = searchResponse.getHits().getAt(0).getId(); final DeleteResponse deleteResponse = client.prepareDelete(indexKey, table, id).get(); if (deleteResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { return Status.NOT_FOUND; } if (!isRefreshNeeded) { synchronized (this) { isRefreshNeeded = true; } } return Status.OK; } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } @Override public Status read( final String table, final String key, final Set<String> fields, final Map<String, ByteIterator> result) { try { final SearchResponse searchResponse = search(table, key); if (searchResponse.getHits().totalHits == 0) { return Status.NOT_FOUND; } final SearchHit hit = searchResponse.getHits().getAt(0); if (fields != null) { for (final String field : fields) { result.put(field, new StringByteIterator((String) hit.getSource().get(field))); } } else { for (final Map.Entry<String, Object> e : hit.getSource().entrySet()) { if (KEY.equals(e.getKey())) { continue; } result.put(e.getKey(), new StringByteIterator((String) e.getValue())); } } return Status.OK; } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } @Override public Status update(final String table, final String key, final Map<String, ByteIterator> values) { try { final SearchResponse response = search(table, key); if (response.getHits().totalHits == 0) { return Status.NOT_FOUND; } final SearchHit hit = response.getHits().getAt(0); for (final Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) { hit.getSource().put(entry.getKey(), entry.getValue()); } final IndexResponse indexResponse = client.prepareIndex(indexKey, table, hit.getId()).setSource(hit.getSource()).get(); if (indexResponse.getResult() != DocWriteResponse.Result.UPDATED) { return Status.ERROR; } if (!isRefreshNeeded) { synchronized (this) { isRefreshNeeded = true; } } return Status.OK; } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } @Override public Status scan( final String table, final String startkey, final int recordcount, final Set<String> fields, final Vector<HashMap<String, ByteIterator>> result) { try { refreshIfNeeded(); final RangeQueryBuilder query = new RangeQueryBuilder(KEY).gte(startkey); final SearchResponse response = client.prepareSearch(indexKey).setQuery(query).setSize(recordcount).get(); for (final SearchHit hit : response.getHits()) { final HashMap<String, ByteIterator> entry; if (fields != null) { entry = new HashMap<>(fields.size()); for (final String field : fields) { entry.put(field, new StringByteIterator((String) hit.getSource().get(field))); } } else { entry = new HashMap<>(hit.getSource().size()); for (final Map.Entry<String, Object> field : hit.getSource().entrySet()) { if (KEY.equals(field.getKey())) { continue; } entry.put(field.getKey(), new StringByteIterator((String) field.getValue())); } } result.add(entry); } return Status.OK; } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } private void refreshIfNeeded() { if (isRefreshNeeded) { final boolean refresh; synchronized (this) { if (isRefreshNeeded) { refresh = true; isRefreshNeeded = false; } else { refresh = false; } } if (refresh) { client.admin().indices().refresh(new RefreshRequest()).actionGet(); } } } private SearchResponse search(final String table, final String key) { refreshIfNeeded(); return client.prepareSearch(indexKey).setTypes(table).setQuery(new TermQueryBuilder(KEY, key)).get(); } }
11,290
32.805389
112
java
null
NearPMSW-main/baseline/logging/YCSB/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/Elasticsearch5.java
/* * Copyright (c) 2017 YCSB contributors. All rights reserved. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.elasticsearch5; import java.util.Properties; final class Elasticsearch5 { private Elasticsearch5() { } static final String KEY = "key"; static int parseIntegerProperty(final Properties properties, final String key, final int defaultValue) { final String value = properties.getProperty(key); return value == null ? defaultValue : Integer.parseInt(value); } }
1,074
28.861111
106
java
null
NearPMSW-main/baseline/logging/YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/package-info.java
/* * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for <a href="https://azure.microsoft.com/en-us/services/storage/">Azure table Storage</a>. */ package site.ycsb.db.azuretablestorage;
818
34.608696
110
java
null
NearPMSW-main/baseline/logging/YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
/** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.azuretablestorage; import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.table.CloudTable; import com.microsoft.azure.storage.table.CloudTableClient; import com.microsoft.azure.storage.table.DynamicTableEntity; import com.microsoft.azure.storage.table.EntityProperty; import com.microsoft.azure.storage.table.EntityResolver; import com.microsoft.azure.storage.table.TableBatchOperation; import com.microsoft.azure.storage.table.TableOperation; import com.microsoft.azure.storage.table.TableQuery; import com.microsoft.azure.storage.table.TableServiceEntity; import site.ycsb.ByteArrayByteIterator; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import java.util.Date; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.Vector; /** * YCSB binding for <a href="https://azure.microsoft.com/en-us/services/storage/">Azure</a>. * See {@code azure/README.md} for details. */ public class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
10,830
37.544484
112
java
null
NearPMSW-main/baseline/logging/YCSB/jdbc/src/test/java/site/ycsb/db/JdbcDBClientTest.java
/** * Copyright (c) 2015 - 2016 Yahoo! Inc., 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; import static org.junit.Assert.*; import site.ycsb.ByteIterator; import site.ycsb.DBException; import site.ycsb.StringByteIterator; import org.junit.*; import java.sql.*; import java.util.HashMap; import java.util.Map; import java.util.HashSet; import java.util.Set; import java.util.Properties; import java.util.Vector; public class JdbcDBClientTest { private static final String TEST_DB_DRIVER = "org.hsqldb.jdbc.JDBCDriver"; private static final String TEST_DB_URL = "jdbc:hsqldb:mem:ycsb"; private static final String TEST_DB_USER = "sa"; private static final String TABLE_NAME = "USERTABLE"; private static final int FIELD_LENGTH = 32; private static final String FIELD_PREFIX = "FIELD"; private static final String KEY_PREFIX = "user"; private static final String KEY_FIELD = "YCSB_KEY"; private static final int NUM_FIELDS = 3; private static Connection jdbcConnection = null; private static JdbcDBClient jdbcDBClient = null; @BeforeClass public static void setup() { setupWithBatch(1, true); } public static void setupWithBatch(int batchSize, boolean autoCommit) { try { jdbcConnection = DriverManager.getConnection(TEST_DB_URL); jdbcDBClient = new JdbcDBClient(); Properties p = new Properties(); p.setProperty(JdbcDBClient.CONNECTION_URL, TEST_DB_URL); p.setProperty(JdbcDBClient.DRIVER_CLASS, TEST_DB_DRIVER); p.setProperty(JdbcDBClient.CONNECTION_USER, TEST_DB_USER); p.setProperty(JdbcDBClient.DB_BATCH_SIZE, Integer.toString(batchSize)); p.setProperty(JdbcDBClient.JDBC_BATCH_UPDATES, "true"); p.setProperty(JdbcDBClient.JDBC_AUTO_COMMIT, Boolean.toString(autoCommit)); jdbcDBClient.setProperties(p); jdbcDBClient.init(); } catch (SQLException e) { e.printStackTrace(); fail("Could not create local Database"); } catch (DBException e) { e.printStackTrace(); fail("Could not create JdbcDBClient instance"); } } @AfterClass public static void teardown() { try { if (jdbcConnection != null) { jdbcConnection.close(); } } catch (SQLException e) { e.printStackTrace(); } try { if (jdbcDBClient != null) { jdbcDBClient.cleanup(); } } catch (DBException e) { e.printStackTrace(); } } @Before public void prepareTest() { try { DatabaseMetaData metaData = jdbcConnection.getMetaData(); ResultSet tableResults = metaData.getTables(null, null, TABLE_NAME, null); if (tableResults.next()) { // If the table already exists, just truncate it jdbcConnection.prepareStatement( String.format("TRUNCATE TABLE %s", TABLE_NAME) ).execute(); } else { // If the table does not exist then create it StringBuilder createString = new StringBuilder( String.format("CREATE TABLE %s (%s VARCHAR(100) PRIMARY KEY", TABLE_NAME, KEY_FIELD) ); for (int i = 0; i < NUM_FIELDS; i++) { createString.append( String.format(", %s%d VARCHAR(100)", FIELD_PREFIX, i) ); } createString.append(")"); jdbcConnection.prepareStatement(createString.toString()).execute(); } } catch (SQLException e) { e.printStackTrace(); fail("Failed to prepare test"); } } /* This is a copy of buildDeterministicValue() from core:site.ycsb.workloads.CoreWorkload.java. That method is neither public nor static so we need a copy. */ private String buildDeterministicValue(String key, String fieldkey) { int size = FIELD_LENGTH; StringBuilder sb = new StringBuilder(size); sb.append(key); sb.append(':'); sb.append(fieldkey); while (sb.length() < size) { sb.append(':'); sb.append(sb.toString().hashCode()); } sb.setLength(size); return sb.toString(); } /* Inserts a row of deterministic values for the given insertKey using the jdbcDBClient. */ private HashMap<String, ByteIterator> insertRow(String insertKey) { HashMap<String, ByteIterator> insertMap = new HashMap<String, ByteIterator>(); for (int i = 0; i < 3; i++) { insertMap.put(FIELD_PREFIX + i, new StringByteIterator(buildDeterministicValue(insertKey, FIELD_PREFIX + i))); } jdbcDBClient.insert(TABLE_NAME, insertKey, insertMap); return insertMap; } @Test public void insertTest() { try { String insertKey = "user0"; HashMap<String, ByteIterator> insertMap = insertRow(insertKey); ResultSet resultSet = jdbcConnection.prepareStatement( String.format("SELECT * FROM %s", TABLE_NAME) ).executeQuery(); // Check we have a result Row assertTrue(resultSet.next()); // Check that all the columns have expected values assertEquals(resultSet.getString(KEY_FIELD), insertKey); for (int i = 0; i < 3; i++) { assertEquals(resultSet.getString(FIELD_PREFIX + i), insertMap.get(FIELD_PREFIX + i).toString()); } // Check that we do not have any more rows assertFalse(resultSet.next()); resultSet.close(); } catch (SQLException e) { e.printStackTrace(); fail("Failed insertTest"); } } @Test public void updateTest() { try { String preupdateString = "preupdate"; StringBuilder fauxInsertString = new StringBuilder( String.format("INSERT INTO %s VALUES(?", TABLE_NAME) ); for (int i = 0; i < NUM_FIELDS; i++) { fauxInsertString.append(",?"); } fauxInsertString.append(")"); PreparedStatement fauxInsertStatement = jdbcConnection.prepareStatement(fauxInsertString.toString()); for (int i = 2; i < NUM_FIELDS + 2; i++) { fauxInsertStatement.setString(i, preupdateString); } fauxInsertStatement.setString(1, "user0"); fauxInsertStatement.execute(); fauxInsertStatement.setString(1, "user1"); fauxInsertStatement.execute(); fauxInsertStatement.setString(1, "user2"); fauxInsertStatement.execute(); HashMap<String, ByteIterator> updateMap = new HashMap<String, ByteIterator>(); for (int i = 0; i < 3; i++) { updateMap.put(FIELD_PREFIX + i, new StringByteIterator(buildDeterministicValue("user1", FIELD_PREFIX + i))); } jdbcDBClient.update(TABLE_NAME, "user1", updateMap); ResultSet resultSet = jdbcConnection.prepareStatement( String.format("SELECT * FROM %s ORDER BY %s", TABLE_NAME, KEY_FIELD) ).executeQuery(); // Ensure that user0 record was not changed resultSet.next(); assertEquals("Assert first row key is user0", resultSet.getString(KEY_FIELD), "user0"); for (int i = 0; i < 3; i++) { assertEquals("Assert first row fields contain preupdateString", resultSet.getString(FIELD_PREFIX + i), preupdateString); } // Check that all the columns have expected values for user1 record resultSet.next(); assertEquals(resultSet.getString(KEY_FIELD), "user1"); for (int i = 0; i < 3; i++) { assertEquals(resultSet.getString(FIELD_PREFIX + i), updateMap.get(FIELD_PREFIX + i).toString()); } // Ensure that user2 record was not changed resultSet.next(); assertEquals("Assert third row key is user2", resultSet.getString(KEY_FIELD), "user2"); for (int i = 0; i < 3; i++) { assertEquals("Assert third row fields contain preupdateString", resultSet.getString(FIELD_PREFIX + i), preupdateString); } resultSet.close(); } catch (SQLException e) { e.printStackTrace(); fail("Failed updateTest"); } } @Test public void readTest() { String insertKey = "user0"; HashMap<String, ByteIterator> insertMap = insertRow(insertKey); Set<String> readFields = new HashSet<String>(); HashMap<String, ByteIterator> readResultMap = new HashMap<String, ByteIterator>(); // Test reading a single field readFields.add("FIELD0"); jdbcDBClient.read(TABLE_NAME, insertKey, readFields, readResultMap); assertEquals("Assert that result has correct number of fields", readFields.size(), readResultMap.size()); for (String field: readFields) { assertEquals("Assert " + field + " was read correctly", insertMap.get(field).toString(), readResultMap.get(field).toString()); } readResultMap = new HashMap<String, ByteIterator>(); // Test reading all fields readFields.add("FIELD1"); readFields.add("FIELD2"); jdbcDBClient.read(TABLE_NAME, insertKey, readFields, readResultMap); assertEquals("Assert that result has correct number of fields", readFields.size(), readResultMap.size()); for (String field: readFields) { assertEquals("Assert " + field + " was read correctly", insertMap.get(field).toString(), readResultMap.get(field).toString()); } } @Test public void deleteTest() { try { insertRow("user0"); String deleteKey = "user1"; insertRow(deleteKey); insertRow("user2"); jdbcDBClient.delete(TABLE_NAME, deleteKey); ResultSet resultSet = jdbcConnection.prepareStatement( String.format("SELECT * FROM %s", TABLE_NAME) ).executeQuery(); int totalRows = 0; while (resultSet.next()) { assertNotEquals("Assert this is not the deleted row key", deleteKey, resultSet.getString(KEY_FIELD)); totalRows++; } // Check we do not have a result Row assertEquals("Assert we ended with the correct number of rows", totalRows, 2); resultSet.close(); } catch (SQLException e) { e.printStackTrace(); fail("Failed deleteTest"); } } @Test public void scanTest() throws SQLException { Map<String, HashMap<String, ByteIterator>> keyMap = new HashMap<String, HashMap<String, ByteIterator>>(); for (int i = 0; i < 5; i++) { String insertKey = KEY_PREFIX + i; keyMap.put(insertKey, insertRow(insertKey)); } Set<String> fieldSet = new HashSet<String>(); fieldSet.add("FIELD0"); fieldSet.add("FIELD1"); int startIndex = 1; int resultRows = 3; Vector<HashMap<String, ByteIterator>> resultVector = new Vector<HashMap<String, ByteIterator>>(); jdbcDBClient.scan(TABLE_NAME, KEY_PREFIX + startIndex, resultRows, fieldSet, resultVector); // Check the resultVector is the correct size assertEquals("Assert the correct number of results rows were returned", resultRows, resultVector.size()); // Check each vector row to make sure we have the correct fields int testIndex = startIndex; for (Map<String, ByteIterator> result: resultVector) { assertEquals("Assert that this row has the correct number of fields", fieldSet.size(), result.size()); for (String field: fieldSet) { assertEquals("Assert this field is correct in this row", keyMap.get(KEY_PREFIX + testIndex).get(field).toString(), result.get(field).toString()); } testIndex++; } } @Test public void insertBatchTest() throws DBException { insertBatchTest(20); } @Test public void insertPartialBatchTest() throws DBException { insertBatchTest(19); } public void insertBatchTest(int numRows) throws DBException { teardown(); setupWithBatch(10, false); try { String insertKey = "user0"; HashMap<String, ByteIterator> insertMap = insertRow(insertKey); assertEquals(3, insertMap.size()); ResultSet resultSet = jdbcConnection.prepareStatement( String.format("SELECT * FROM %s", TABLE_NAME) ).executeQuery(); // Check we do not have a result Row (because batch is not full yet) assertFalse(resultSet.next()); // insert more rows, completing 1 batch (still results are partial). for (int i = 1; i < numRows; i++) { insertMap = insertRow("user" + i); } // assertNumRows(10 * (numRows / 10)); // call cleanup, which should insert the partial batch jdbcDBClient.cleanup(); // Prevent a teardown() from printing an error jdbcDBClient = null; // Check that we have all rows assertNumRows(numRows); } catch (SQLException e) { e.printStackTrace(); fail("Failed insertBatchTest"); } finally { teardown(); // for next tests setup(); } } private void assertNumRows(long numRows) throws SQLException { ResultSet resultSet = jdbcConnection.prepareStatement( String.format("SELECT * FROM %s", TABLE_NAME) ).executeQuery(); for (int i = 0; i < numRows; i++) { assertTrue("expecting " + numRows + " results, received only " + i, resultSet.next()); } assertFalse("expecting " + numRows + " results, received more", resultSet.next()); resultSet.close(); } }
14,830
36.642132
161
java
null
NearPMSW-main/baseline/logging/YCSB/jdbc/src/main/java/site/ycsb/db/package-info.java
/* * Copyright (c) 2014 - 2016, Yahoo!, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for stores that can be accessed via JDBC. */ package site.ycsb.db;
753
31.782609
70
java
null
NearPMSW-main/baseline/logging/YCSB/jdbc/src/main/java/site/ycsb/db/JdbcDBCli.java
/** * Copyright (c) 2010 - 2016 Yahoo! Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; import java.io.FileInputStream; import java.io.IOException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; import java.util.Enumeration; import java.util.Properties; /** * Execute a JDBC command line. * * @author sudipto */ public final class JdbcDBCli { private static void usageMessage() { System.out.println("JdbcCli. Options:"); System.out.println(" -p key=value properties defined."); System.out.println(" -P location of the properties file to load."); System.out.println(" -c SQL command to execute."); } private static void executeCommand(Properties props, String sql) throws SQLException { String driver = props.getProperty(JdbcDBClient.DRIVER_CLASS); String username = props.getProperty(JdbcDBClient.CONNECTION_USER); String password = props.getProperty(JdbcDBClient.CONNECTION_PASSWD, ""); String url = props.getProperty(JdbcDBClient.CONNECTION_URL); if (driver == null || username == null || url == null) { throw new SQLException("Missing connection information."); } Connection conn = null; try { Class.forName(driver); conn = DriverManager.getConnection(url, username, password); Statement stmt = conn.createStatement(); stmt.execute(sql); System.out.println("Command \"" + sql + "\" successfully executed."); } catch (ClassNotFoundException e) { throw new SQLException("JDBC Driver class not found."); } finally { if (conn != null) { System.out.println("Closing database connection."); conn.close(); } } } /** * @param args */ public static void main(String[] args) { if (args.length == 0) { usageMessage(); System.exit(0); } Properties props = new Properties(); Properties fileprops = new Properties(); String sql = null; // parse arguments int argindex = 0; while (args[argindex].startsWith("-")) { if (args[argindex].compareTo("-P") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } String propfile = args[argindex]; argindex++; Properties myfileprops = new Properties(); try { myfileprops.load(new FileInputStream(propfile)); } catch (IOException e) { System.out.println(e.getMessage()); System.exit(0); } // Issue #5 - remove call to stringPropertyNames to make compilable // under Java 1.5 for (Enumeration<?> e = myfileprops.propertyNames(); e.hasMoreElements();) { String prop = (String) e.nextElement(); fileprops.setProperty(prop, myfileprops.getProperty(prop)); } } else if (args[argindex].compareTo("-p") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } int eq = args[argindex].indexOf('='); if (eq < 0) { usageMessage(); System.exit(0); } String name = args[argindex].substring(0, eq); String value = args[argindex].substring(eq + 1); props.put(name, value); argindex++; } else if (args[argindex].compareTo("-c") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } sql = args[argindex++]; } else { System.out.println("Unknown option " + args[argindex]); usageMessage(); System.exit(0); } if (argindex >= args.length) { break; } } if (argindex != args.length) { usageMessage(); System.exit(0); } // overwrite file properties with properties from the command line // Issue #5 - remove call to stringPropertyNames to make compilable under // Java 1.5 for (Enumeration<?> e = props.propertyNames(); e.hasMoreElements();) { String prop = (String) e.nextElement(); fileprops.setProperty(prop, props.getProperty(prop)); } if (sql == null) { System.err.println("Missing command."); usageMessage(); System.exit(1); } try { executeCommand(fileprops, sql); } catch (SQLException e) { System.err.println("Error in executing command. " + e); System.exit(1); } } /** * Hidden constructor. */ private JdbcDBCli() { super(); } }
5,184
27.489011
88
java
null
NearPMSW-main/baseline/logging/YCSB/jdbc/src/main/java/site/ycsb/db/JdbcDBCreateTable.java
/** * Copyright (c) 2010 - 2016 Yahoo! Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; import java.io.FileInputStream; import java.io.IOException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; import java.util.Enumeration; import java.util.Properties; /** * Utility class to create the table to be used by the benchmark. * * @author sudipto */ public final class JdbcDBCreateTable { private static void usageMessage() { System.out.println("Create Table Client. Options:"); System.out.println(" -p key=value properties defined."); System.out.println(" -P location of the properties file to load."); System.out.println(" -n name of the table."); System.out.println(" -f number of fields (default 10)."); } private static void createTable(Properties props, String tablename) throws SQLException { String driver = props.getProperty(JdbcDBClient.DRIVER_CLASS); String username = props.getProperty(JdbcDBClient.CONNECTION_USER); String password = props.getProperty(JdbcDBClient.CONNECTION_PASSWD, ""); String url = props.getProperty(JdbcDBClient.CONNECTION_URL); int fieldcount = Integer.parseInt(props.getProperty(JdbcDBClient.FIELD_COUNT_PROPERTY, JdbcDBClient.FIELD_COUNT_PROPERTY_DEFAULT)); if (driver == null || username == null || url == null) { throw new SQLException("Missing connection information."); } Connection conn = null; try { Class.forName(driver); conn = DriverManager.getConnection(url, username, password); Statement stmt = conn.createStatement(); StringBuilder sql = new StringBuilder("DROP TABLE IF EXISTS "); sql.append(tablename); sql.append(";"); stmt.execute(sql.toString()); sql = new StringBuilder("CREATE TABLE "); sql.append(tablename); sql.append(" (YCSB_KEY VARCHAR PRIMARY KEY"); for (int idx = 0; idx < fieldcount; idx++) { sql.append(", FIELD"); sql.append(idx); sql.append(" TEXT"); } sql.append(");"); stmt.execute(sql.toString()); System.out.println("Table " + tablename + " created.."); } catch (ClassNotFoundException e) { throw new SQLException("JDBC Driver class not found."); } finally { if (conn != null) { System.out.println("Closing database connection."); conn.close(); } } } /** * @param args */ public static void main(String[] args) { if (args.length == 0) { usageMessage(); System.exit(0); } String tablename = null; int fieldcount = -1; Properties props = new Properties(); Properties fileprops = new Properties(); // parse arguments int argindex = 0; while (args[argindex].startsWith("-")) { if (args[argindex].compareTo("-P") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } String propfile = args[argindex]; argindex++; Properties myfileprops = new Properties(); try { myfileprops.load(new FileInputStream(propfile)); } catch (IOException e) { System.out.println(e.getMessage()); System.exit(0); } // Issue #5 - remove call to stringPropertyNames to make compilable // under Java 1.5 for (Enumeration<?> e = myfileprops.propertyNames(); e.hasMoreElements();) { String prop = (String) e.nextElement(); fileprops.setProperty(prop, myfileprops.getProperty(prop)); } } else if (args[argindex].compareTo("-p") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } int eq = args[argindex].indexOf('='); if (eq < 0) { usageMessage(); System.exit(0); } String name = args[argindex].substring(0, eq); String value = args[argindex].substring(eq + 1); props.put(name, value); argindex++; } else if (args[argindex].compareTo("-n") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } tablename = args[argindex++]; } else if (args[argindex].compareTo("-f") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } try { fieldcount = Integer.parseInt(args[argindex++]); } catch (NumberFormatException e) { System.err.println("Invalid number for field count"); usageMessage(); System.exit(1); } } else { System.out.println("Unknown option " + args[argindex]); usageMessage(); System.exit(0); } if (argindex >= args.length) { break; } } if (argindex != args.length) { usageMessage(); System.exit(0); } // overwrite file properties with properties from the command line // Issue #5 - remove call to stringPropertyNames to make compilable under // Java 1.5 for (Enumeration<?> e = props.propertyNames(); e.hasMoreElements();) { String prop = (String) e.nextElement(); fileprops.setProperty(prop, props.getProperty(prop)); } props = fileprops; if (tablename == null) { System.err.println("table name missing."); usageMessage(); System.exit(1); } if (fieldcount > 0) { props.setProperty(JdbcDBClient.FIELD_COUNT_PROPERTY, String.valueOf(fieldcount)); } try { createTable(props, tablename); } catch (SQLException e) { System.err.println("Error in creating table. " + e); System.exit(1); } } /** * Hidden constructor. */ private JdbcDBCreateTable() { super(); } }
6,519
27.977778
91
java
null
NearPMSW-main/baseline/logging/YCSB/jdbc/src/main/java/site/ycsb/db/StatementType.java
/** * Copyright (c) 2010 Yahoo! Inc., 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; /** * The statement type for the prepared statements. */ public class StatementType { enum Type { INSERT(1), DELETE(2), READ(3), UPDATE(4), SCAN(5); private final int internalType; private Type(int type) { internalType = type; } int getHashCode() { final int prime = 31; int result = 1; result = prime * result + internalType; return result; } } private Type type; private int shardIndex; private int numFields; private String tableName; private String fieldString; public StatementType(Type type, String tableName, int numFields, String fieldString, int shardIndex) { this.type = type; this.tableName = tableName; this.numFields = numFields; this.fieldString = fieldString; this.shardIndex = shardIndex; } public String getTableName() { return tableName; } public String getFieldString() { return fieldString; } public int getNumFields() { return numFields; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + numFields + 100 * shardIndex; result = prime * result + ((tableName == null) ? 0 : tableName.hashCode()); result = prime * result + ((type == null) ? 0 : type.getHashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } StatementType other = (StatementType) obj; if (numFields != other.numFields) { return false; } if (shardIndex != other.shardIndex) { return false; } if (tableName == null) { if (other.tableName != null) { return false; } } else if (!tableName.equals(other.tableName)) { return false; } if (type != other.type) { return false; } if (!fieldString.equals(other.fieldString)) { return false; } return true; } }
2,733
23.630631
104
java
null
NearPMSW-main/baseline/logging/YCSB/jdbc/src/main/java/site/ycsb/db/JdbcDBClient.java
/** * Copyright (c) 2010 - 2016 Yahoo! Inc., 2016, 2019 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.ByteIterator; import site.ycsb.Status; import site.ycsb.StringByteIterator; import java.sql.*; import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import site.ycsb.db.flavors.DBFlavor; /** * A class that wraps a JDBC compliant database to allow it to be interfaced * with YCSB. This class extends {@link DB} and implements the database * interface used by YCSB client. * * <br> * Each client will have its own instance of this class. This client is not * thread safe. * * <br> * This interface expects a schema <key> <field1> <field2> <field3> ... All * attributes are of type TEXT. All accesses are through the primary key. * Therefore, only one index on the primary key is needed. */ public class JdbcDBClient extends DB { /** The class to use as the jdbc driver. */ public static final String DRIVER_CLASS = "db.driver"; /** The URL to connect to the database. */ public static final String CONNECTION_URL = "db.url"; /** The user name to use to connect to the database. */ public static final String CONNECTION_USER = "db.user"; /** The password to use for establishing the connection. */ public static final String CONNECTION_PASSWD = "db.passwd"; /** The batch size for batched inserts. Set to >0 to use batching */ public static final String DB_BATCH_SIZE = "db.batchsize"; /** The JDBC fetch size hinted to the driver. */ public static final String JDBC_FETCH_SIZE = "jdbc.fetchsize"; /** The JDBC connection auto-commit property for the driver. */ public static final String JDBC_AUTO_COMMIT = "jdbc.autocommit"; public static final String JDBC_BATCH_UPDATES = "jdbc.batchupdateapi"; /** The name of the property for the number of fields in a record. */ public static final String FIELD_COUNT_PROPERTY = "fieldcount"; /** Default number of fields in a record. */ public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10"; /** Representing a NULL value. */ public static final String NULL_VALUE = "NULL"; /** The primary key in the user table. */ public static final String PRIMARY_KEY = "YCSB_KEY"; /** The field name prefix in the table. */ public static final String COLUMN_PREFIX = "FIELD"; /** SQL:2008 standard: FETCH FIRST n ROWS after the ORDER BY. */ private boolean sqlansiScans = false; /** SQL Server before 2012: TOP n after the SELECT. */ private boolean sqlserverScans = false; private List<Connection> conns; private boolean initialized = false; private Properties props; private int jdbcFetchSize; private int batchSize; private boolean autoCommit; private boolean batchUpdates; private static final String DEFAULT_PROP = ""; private ConcurrentMap<StatementType, PreparedStatement> cachedStatements; private long numRowsInBatch = 0; /** DB flavor defines DB-specific syntax and behavior for the * particular database. Current database flavors are: {default, phoenix} */ private DBFlavor dbFlavor; /** * Ordered field information for insert and update statements. */ private static class OrderedFieldInfo { private String fieldKeys; private List<String> fieldValues; OrderedFieldInfo(String fieldKeys, List<String> fieldValues) { this.fieldKeys = fieldKeys; this.fieldValues = fieldValues; } String getFieldKeys() { return fieldKeys; } List<String> getFieldValues() { return fieldValues; } } /** * For the given key, returns what shard contains data for this key. * * @param key Data key to do operation on * @return Shard index */ private int getShardIndexByKey(String key) { int ret = Math.abs(key.hashCode()) % conns.size(); return ret; } /** * For the given key, returns Connection object that holds connection to the * shard that contains this key. * * @param key Data key to get information for * @return Connection object */ private Connection getShardConnectionByKey(String key) { return conns.get(getShardIndexByKey(key)); } private void cleanupAllConnections() throws SQLException { for (Connection conn : conns) { if (!autoCommit) { conn.commit(); } conn.close(); } } /** Returns parsed int value from the properties if set, otherwise returns -1. */ private static int getIntProperty(Properties props, String key) throws DBException { String valueStr = props.getProperty(key); if (valueStr != null) { try { return Integer.parseInt(valueStr); } catch (NumberFormatException nfe) { System.err.println("Invalid " + key + " specified: " + valueStr); throw new DBException(nfe); } } return -1; } /** Returns parsed boolean value from the properties if set, otherwise returns defaultVal. */ private static boolean getBoolProperty(Properties props, String key, boolean defaultVal) { String valueStr = props.getProperty(key); if (valueStr != null) { return Boolean.parseBoolean(valueStr); } return defaultVal; } @Override public void init() throws DBException { if (initialized) { System.err.println("Client connection already initialized."); return; } props = getProperties(); String urls = props.getProperty(CONNECTION_URL, DEFAULT_PROP); String user = props.getProperty(CONNECTION_USER, DEFAULT_PROP); String passwd = props.getProperty(CONNECTION_PASSWD, DEFAULT_PROP); String driver = props.getProperty(DRIVER_CLASS); this.jdbcFetchSize = getIntProperty(props, JDBC_FETCH_SIZE); this.batchSize = getIntProperty(props, DB_BATCH_SIZE); this.autoCommit = getBoolProperty(props, JDBC_AUTO_COMMIT, true); this.batchUpdates = getBoolProperty(props, JDBC_BATCH_UPDATES, false); try { // The SQL Syntax for Scan depends on the DB engine // - SQL:2008 standard: FETCH FIRST n ROWS after the ORDER BY // - SQL Server before 2012: TOP n after the SELECT // - others (MySQL,MariaDB, PostgreSQL before 8.4) // TODO: check product name and version rather than driver name if (driver != null) { if (driver.contains("sqlserver")) { sqlserverScans = true; sqlansiScans = false; } if (driver.contains("oracle")) { sqlserverScans = false; sqlansiScans = true; } if (driver.contains("postgres")) { sqlserverScans = false; sqlansiScans = true; } Class.forName(driver); } int shardCount = 0; conns = new ArrayList<Connection>(3); // for a longer explanation see the README.md // semicolons aren't present in JDBC urls, so we use them to delimit // multiple JDBC connections to shard across. final String[] urlArr = urls.split(";"); for (String url : urlArr) { System.out.println("Adding shard node URL: " + url); Connection conn = DriverManager.getConnection(url, user, passwd); // Since there is no explicit commit method in the DB interface, all // operations should auto commit, except when explicitly told not to // (this is necessary in cases such as for PostgreSQL when running a // scan workload with fetchSize) conn.setAutoCommit(autoCommit); shardCount++; conns.add(conn); } System.out.println("Using shards: " + shardCount + ", batchSize:" + batchSize + ", fetchSize: " + jdbcFetchSize); cachedStatements = new ConcurrentHashMap<StatementType, PreparedStatement>(); this.dbFlavor = DBFlavor.fromJdbcUrl(urlArr[0]); } catch (ClassNotFoundException e) { System.err.println("Error in initializing the JDBS driver: " + e); throw new DBException(e); } catch (SQLException e) { System.err.println("Error in database operation: " + e); throw new DBException(e); } catch (NumberFormatException e) { System.err.println("Invalid value for fieldcount property. " + e); throw new DBException(e); } initialized = true; } @Override public void cleanup() throws DBException { if (batchSize > 0) { try { // commit un-finished batches for (PreparedStatement st : cachedStatements.values()) { if (!st.getConnection().isClosed() && !st.isClosed() && (numRowsInBatch % batchSize != 0)) { st.executeBatch(); } } } catch (SQLException e) { System.err.println("Error in cleanup execution. " + e); throw new DBException(e); } } try { cleanupAllConnections(); } catch (SQLException e) { System.err.println("Error in closing the connection. " + e); throw new DBException(e); } } private PreparedStatement createAndCacheInsertStatement(StatementType insertType, String key) throws SQLException { String insert = dbFlavor.createInsertStatement(insertType, key); PreparedStatement insertStatement = getShardConnectionByKey(key).prepareStatement(insert); PreparedStatement stmt = cachedStatements.putIfAbsent(insertType, insertStatement); if (stmt == null) { return insertStatement; } return stmt; } private PreparedStatement createAndCacheReadStatement(StatementType readType, String key) throws SQLException { String read = dbFlavor.createReadStatement(readType, key); PreparedStatement readStatement = getShardConnectionByKey(key).prepareStatement(read); PreparedStatement stmt = cachedStatements.putIfAbsent(readType, readStatement); if (stmt == null) { return readStatement; } return stmt; } private PreparedStatement createAndCacheDeleteStatement(StatementType deleteType, String key) throws SQLException { String delete = dbFlavor.createDeleteStatement(deleteType, key); PreparedStatement deleteStatement = getShardConnectionByKey(key).prepareStatement(delete); PreparedStatement stmt = cachedStatements.putIfAbsent(deleteType, deleteStatement); if (stmt == null) { return deleteStatement; } return stmt; } private PreparedStatement createAndCacheUpdateStatement(StatementType updateType, String key) throws SQLException { String update = dbFlavor.createUpdateStatement(updateType, key); PreparedStatement insertStatement = getShardConnectionByKey(key).prepareStatement(update); PreparedStatement stmt = cachedStatements.putIfAbsent(updateType, insertStatement); if (stmt == null) { return insertStatement; } return stmt; } private PreparedStatement createAndCacheScanStatement(StatementType scanType, String key) throws SQLException { String select = dbFlavor.createScanStatement(scanType, key, sqlserverScans, sqlansiScans); PreparedStatement scanStatement = getShardConnectionByKey(key).prepareStatement(select); if (this.jdbcFetchSize > 0) { scanStatement.setFetchSize(this.jdbcFetchSize); } PreparedStatement stmt = cachedStatements.putIfAbsent(scanType, scanStatement); if (stmt == null) { return scanStatement; } return stmt; } @Override public Status read(String tableName, String key, Set<String> fields, Map<String, ByteIterator> result) { try { StatementType type = new StatementType(StatementType.Type.READ, tableName, 1, "", getShardIndexByKey(key)); PreparedStatement readStatement = cachedStatements.get(type); if (readStatement == null) { readStatement = createAndCacheReadStatement(type, key); } readStatement.setString(1, key); ResultSet resultSet = readStatement.executeQuery(); if (!resultSet.next()) { resultSet.close(); return Status.NOT_FOUND; } if (result != null && fields != null) { for (String field : fields) { String value = resultSet.getString(field); result.put(field, new StringByteIterator(value)); } } resultSet.close(); return Status.OK; } catch (SQLException e) { System.err.println("Error in processing read of table " + tableName + ": " + e); return Status.ERROR; } } @Override public Status scan(String tableName, String startKey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { StatementType type = new StatementType(StatementType.Type.SCAN, tableName, 1, "", getShardIndexByKey(startKey)); PreparedStatement scanStatement = cachedStatements.get(type); if (scanStatement == null) { scanStatement = createAndCacheScanStatement(type, startKey); } // SQL Server TOP syntax is at first if (sqlserverScans) { scanStatement.setInt(1, recordcount); scanStatement.setString(2, startKey); // FETCH FIRST and LIMIT are at the end } else { scanStatement.setString(1, startKey); scanStatement.setInt(2, recordcount); } ResultSet resultSet = scanStatement.executeQuery(); for (int i = 0; i < recordcount && resultSet.next(); i++) { if (result != null && fields != null) { HashMap<String, ByteIterator> values = new HashMap<String, ByteIterator>(); for (String field : fields) { String value = resultSet.getString(field); values.put(field, new StringByteIterator(value)); } result.add(values); } } resultSet.close(); return Status.OK; } catch (SQLException e) { System.err.println("Error in processing scan of table: " + tableName + e); return Status.ERROR; } } @Override public Status update(String tableName, String key, Map<String, ByteIterator> values) { try { int numFields = values.size(); OrderedFieldInfo fieldInfo = getFieldInfo(values); StatementType type = new StatementType(StatementType.Type.UPDATE, tableName, numFields, fieldInfo.getFieldKeys(), getShardIndexByKey(key)); PreparedStatement updateStatement = cachedStatements.get(type); if (updateStatement == null) { updateStatement = createAndCacheUpdateStatement(type, key); } int index = 1; for (String value: fieldInfo.getFieldValues()) { updateStatement.setString(index++, value); } updateStatement.setString(index, key); int result = updateStatement.executeUpdate(); if (result == 1) { return Status.OK; } return Status.UNEXPECTED_STATE; } catch (SQLException e) { System.err.println("Error in processing update to table: " + tableName + e); return Status.ERROR; } } @Override public Status insert(String tableName, String key, Map<String, ByteIterator> values) { try { int numFields = values.size(); OrderedFieldInfo fieldInfo = getFieldInfo(values); StatementType type = new StatementType(StatementType.Type.INSERT, tableName, numFields, fieldInfo.getFieldKeys(), getShardIndexByKey(key)); PreparedStatement insertStatement = cachedStatements.get(type); if (insertStatement == null) { insertStatement = createAndCacheInsertStatement(type, key); } insertStatement.setString(1, key); int index = 2; for (String value: fieldInfo.getFieldValues()) { insertStatement.setString(index++, value); } // Using the batch insert API if (batchUpdates) { insertStatement.addBatch(); // Check for a sane batch size if (batchSize > 0) { // Commit the batch after it grows beyond the configured size if (++numRowsInBatch % batchSize == 0) { int[] results = insertStatement.executeBatch(); for (int r : results) { // Acceptable values are 1 and SUCCESS_NO_INFO (-2) from reWriteBatchedInserts=true if (r != 1 && r != -2) { return Status.ERROR; } } // If autoCommit is off, make sure we commit the batch if (!autoCommit) { getShardConnectionByKey(key).commit(); } return Status.OK; } // else, the default value of -1 or a nonsense. Treat it as an infinitely large batch. } // else, we let the batch accumulate // Added element to the batch, potentially committing the batch too. return Status.BATCHED_OK; } else { // Normal update int result = insertStatement.executeUpdate(); // If we are not autoCommit, we might have to commit now if (!autoCommit) { // Let updates be batcher locally if (batchSize > 0) { if (++numRowsInBatch % batchSize == 0) { // Send the batch of updates getShardConnectionByKey(key).commit(); } // uhh return Status.OK; } else { // Commit each update getShardConnectionByKey(key).commit(); } } if (result == 1) { return Status.OK; } } return Status.UNEXPECTED_STATE; } catch (SQLException e) { System.err.println("Error in processing insert to table: " + tableName + e); return Status.ERROR; } } @Override public Status delete(String tableName, String key) { try { StatementType type = new StatementType(StatementType.Type.DELETE, tableName, 1, "", getShardIndexByKey(key)); PreparedStatement deleteStatement = cachedStatements.get(type); if (deleteStatement == null) { deleteStatement = createAndCacheDeleteStatement(type, key); } deleteStatement.setString(1, key); int result = deleteStatement.executeUpdate(); if (result == 1) { return Status.OK; } return Status.UNEXPECTED_STATE; } catch (SQLException e) { System.err.println("Error in processing delete to table: " + tableName + e); return Status.ERROR; } } private OrderedFieldInfo getFieldInfo(Map<String, ByteIterator> values) { String fieldKeys = ""; List<String> fieldValues = new ArrayList<>(); int count = 0; for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { fieldKeys += entry.getKey(); if (count < values.size() - 1) { fieldKeys += ","; } fieldValues.add(count, entry.getValue().toString()); count++; } return new OrderedFieldInfo(fieldKeys, fieldValues); } }
19,288
35.054206
119
java
null
NearPMSW-main/baseline/logging/YCSB/jdbc/src/main/java/site/ycsb/db/flavors/package-info.java
/** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * This package contains a collection of database-specific overrides. This accounts for the variance * that can be present where JDBC does not explicitly define what a database must do or when a * database has a non-standard SQL implementation. */ package site.ycsb.db.flavors;
943
40.043478
100
java
null
NearPMSW-main/baseline/logging/YCSB/jdbc/src/main/java/site/ycsb/db/flavors/DBFlavor.java
/** * Copyright (c) 2016, 2019 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.flavors; import site.ycsb.db.StatementType; /** * DBFlavor captures minor differences in syntax and behavior among JDBC implementations and SQL * dialects. This class also acts as a factory to instantiate concrete flavors based on the JDBC URL. */ public abstract class DBFlavor { enum DBName { DEFAULT, PHOENIX } private final DBName dbName; public DBFlavor(DBName dbName) { this.dbName = dbName; } public static DBFlavor fromJdbcUrl(String url) { if (url.startsWith("jdbc:phoenix")) { return new PhoenixDBFlavor(); } return new DefaultDBFlavor(); } /** * Create and return a SQL statement for inserting data. */ public abstract String createInsertStatement(StatementType insertType, String key); /** * Create and return a SQL statement for reading data. */ public abstract String createReadStatement(StatementType readType, String key); /** * Create and return a SQL statement for deleting data. */ public abstract String createDeleteStatement(StatementType deleteType, String key); /** * Create and return a SQL statement for updating data. */ public abstract String createUpdateStatement(StatementType updateType, String key); /** * Create and return a SQL statement for scanning data. */ public abstract String createScanStatement(StatementType scanType, String key, boolean sqlserverScans, boolean sqlansiScans); }
2,159
29.422535
101
java
null
NearPMSW-main/baseline/logging/YCSB/jdbc/src/main/java/site/ycsb/db/flavors/DefaultDBFlavor.java
/** * Copyright (c) 2016, 2019 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.flavors; import site.ycsb.db.JdbcDBClient; import site.ycsb.db.StatementType; /** * A default flavor for relational databases. */ public class DefaultDBFlavor extends DBFlavor { public DefaultDBFlavor() { super(DBName.DEFAULT); } public DefaultDBFlavor(DBName dbName) { super(dbName); } @Override public String createInsertStatement(StatementType insertType, String key) { StringBuilder insert = new StringBuilder("INSERT INTO "); insert.append(insertType.getTableName()); insert.append(" (" + JdbcDBClient.PRIMARY_KEY + "," + insertType.getFieldString() + ")"); insert.append(" VALUES(?"); for (int i = 0; i < insertType.getNumFields(); i++) { insert.append(",?"); } insert.append(")"); return insert.toString(); } @Override public String createReadStatement(StatementType readType, String key) { StringBuilder read = new StringBuilder("SELECT * FROM "); read.append(readType.getTableName()); read.append(" WHERE "); read.append(JdbcDBClient.PRIMARY_KEY); read.append(" = "); read.append("?"); return read.toString(); } @Override public String createDeleteStatement(StatementType deleteType, String key) { StringBuilder delete = new StringBuilder("DELETE FROM "); delete.append(deleteType.getTableName()); delete.append(" WHERE "); delete.append(JdbcDBClient.PRIMARY_KEY); delete.append(" = ?"); return delete.toString(); } @Override public String createUpdateStatement(StatementType updateType, String key) { String[] fieldKeys = updateType.getFieldString().split(","); StringBuilder update = new StringBuilder("UPDATE "); update.append(updateType.getTableName()); update.append(" SET "); for (int i = 0; i < fieldKeys.length; i++) { update.append(fieldKeys[i]); update.append("=?"); if (i < fieldKeys.length - 1) { update.append(", "); } } update.append(" WHERE "); update.append(JdbcDBClient.PRIMARY_KEY); update.append(" = ?"); return update.toString(); } @Override public String createScanStatement(StatementType scanType, String key, boolean sqlserverScans, boolean sqlansiScans) { StringBuilder select; if (sqlserverScans) { select = new StringBuilder("SELECT TOP (?) * FROM "); } else { select = new StringBuilder("SELECT * FROM "); } select.append(scanType.getTableName()); select.append(" WHERE "); select.append(JdbcDBClient.PRIMARY_KEY); select.append(" >= ?"); select.append(" ORDER BY "); select.append(JdbcDBClient.PRIMARY_KEY); if (!sqlserverScans) { if (sqlansiScans) { select.append(" FETCH FIRST ? ROWS ONLY"); } else { select.append(" LIMIT ?"); } } return select.toString(); } }
3,501
30.836364
119
java
null
NearPMSW-main/baseline/logging/YCSB/jdbc/src/main/java/site/ycsb/db/flavors/PhoenixDBFlavor.java
/** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.flavors; import site.ycsb.db.JdbcDBClient; import site.ycsb.db.StatementType; /** * Database flavor for Apache Phoenix. Captures syntax differences used by Phoenix. */ public class PhoenixDBFlavor extends DefaultDBFlavor { public PhoenixDBFlavor() { super(DBName.PHOENIX); } @Override public String createInsertStatement(StatementType insertType, String key) { // Phoenix uses UPSERT syntax StringBuilder insert = new StringBuilder("UPSERT INTO "); insert.append(insertType.getTableName()); insert.append(" (" + JdbcDBClient.PRIMARY_KEY + "," + insertType.getFieldString() + ")"); insert.append(" VALUES(?"); for (int i = 0; i < insertType.getNumFields(); i++) { insert.append(",?"); } insert.append(")"); return insert.toString(); } @Override public String createUpdateStatement(StatementType updateType, String key) { // Phoenix doesn't have UPDATE semantics, just re-use UPSERT VALUES on the specific columns String[] fieldKeys = updateType.getFieldString().split(","); StringBuilder update = new StringBuilder("UPSERT INTO "); update.append(updateType.getTableName()); update.append(" ("); // Each column to update for (int i = 0; i < fieldKeys.length; i++) { update.append(fieldKeys[i]).append(","); } // And then set the primary key column update.append(JdbcDBClient.PRIMARY_KEY).append(") VALUES("); // Add an unbound param for each column to update for (int i = 0; i < fieldKeys.length; i++) { update.append("?, "); } // Then the primary key column's value update.append("?)"); return update.toString(); } }
2,339
34.454545
95
java
null
NearPMSW-main/baseline/logging/YCSB/cassandra/src/test/java/site/ycsb/db/CassandraCQLClientTest.java
/** * Copyright (c) 2015 YCSB contributors All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import com.google.common.collect.Sets; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Row; import com.datastax.driver.core.Session; import com.datastax.driver.core.Statement; import com.datastax.driver.core.querybuilder.Insert; import com.datastax.driver.core.querybuilder.QueryBuilder; import com.datastax.driver.core.querybuilder.Select; import site.ycsb.ByteIterator; import site.ycsb.Status; import site.ycsb.StringByteIterator; import site.ycsb.measurements.Measurements; import site.ycsb.workloads.CoreWorkload; import org.cassandraunit.CassandraCQLUnit; import org.cassandraunit.dataset.cql.ClassPathCQLDataSet; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Set; /** * Integration tests for the Cassandra client */ public class CassandraCQLClientTest { // Change the default Cassandra timeout from 10s to 120s for slow CI machines private final static long timeout = 120000L; private final static String TABLE = "usertable"; private final static String HOST = "localhost"; private final static int PORT = 9142; private final static String DEFAULT_ROW_KEY = "user1"; private CassandraCQLClient client; private Session session; @ClassRule public static CassandraCQLUnit cassandraUnit = new CassandraCQLUnit( new ClassPathCQLDataSet("ycsb.cql", "ycsb"), null, timeout); @Before public void setUp() throws Exception { session = cassandraUnit.getSession(); Properties p = new Properties(); p.setProperty("hosts", HOST); p.setProperty("port", Integer.toString(PORT)); p.setProperty("table", TABLE); Measurements.setProperties(p); final CoreWorkload workload = new CoreWorkload(); workload.init(p); client = new CassandraCQLClient(); client.setProperties(p); client.init(); } @After public void tearDownClient() throws Exception { if (client != null) { client.cleanup(); } client = null; } @After public void clearTable() throws Exception { // Clear the table so that each test starts fresh. final Statement truncate = QueryBuilder.truncate(TABLE); if (cassandraUnit != null) { cassandraUnit.getSession().execute(truncate); } } @Test public void testReadMissingRow() throws Exception { final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); final Status status = client.read(TABLE, "Missing row", null, result); assertThat(result.size(), is(0)); assertThat(status, is(Status.NOT_FOUND)); } private void insertRow() { final String rowKey = DEFAULT_ROW_KEY; Insert insertStmt = QueryBuilder.insertInto(TABLE); insertStmt.value(CassandraCQLClient.YCSB_KEY, rowKey); insertStmt.value("field0", "value1"); insertStmt.value("field1", "value2"); session.execute(insertStmt); } @Test public void testRead() throws Exception { insertRow(); final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); final Status status = client.read(TABLE, DEFAULT_ROW_KEY, null, result); assertThat(status, is(Status.OK)); assertThat(result.entrySet(), hasSize(11)); assertThat(result, hasEntry("field2", null)); final HashMap<String, String> strResult = new HashMap<String, String>(); for (final Map.Entry<String, ByteIterator> e : result.entrySet()) { if (e.getValue() != null) { strResult.put(e.getKey(), e.getValue().toString()); } } assertThat(strResult, hasEntry(CassandraCQLClient.YCSB_KEY, DEFAULT_ROW_KEY)); assertThat(strResult, hasEntry("field0", "value1")); assertThat(strResult, hasEntry("field1", "value2")); } @Test public void testReadSingleColumn() throws Exception { insertRow(); final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); final Set<String> fields = Sets.newHashSet("field1"); final Status status = client.read(TABLE, DEFAULT_ROW_KEY, fields, result); assertThat(status, is(Status.OK)); assertThat(result.entrySet(), hasSize(1)); final Map<String, String> strResult = StringByteIterator.getStringMap(result); assertThat(strResult, hasEntry("field1", "value2")); } @Test public void testInsert() throws Exception { final String key = "key"; final Map<String, String> input = new HashMap<String, String>(); input.put("field0", "value1"); input.put("field1", "value2"); final Status status = client.insert(TABLE, key, StringByteIterator.getByteIteratorMap(input)); assertThat(status, is(Status.OK)); // Verify result final Select selectStmt = QueryBuilder.select("field0", "field1") .from(TABLE) .where(QueryBuilder.eq(CassandraCQLClient.YCSB_KEY, key)) .limit(1); final ResultSet rs = session.execute(selectStmt); final Row row = rs.one(); assertThat(row, notNullValue()); assertThat(rs.isExhausted(), is(true)); assertThat(row.getString("field0"), is("value1")); assertThat(row.getString("field1"), is("value2")); } @Test public void testUpdate() throws Exception { insertRow(); final Map<String, String> input = new HashMap<String, String>(); input.put("field0", "new-value1"); input.put("field1", "new-value2"); final Status status = client.update(TABLE, DEFAULT_ROW_KEY, StringByteIterator.getByteIteratorMap(input)); assertThat(status, is(Status.OK)); // Verify result final Select selectStmt = QueryBuilder.select("field0", "field1") .from(TABLE) .where(QueryBuilder.eq(CassandraCQLClient.YCSB_KEY, DEFAULT_ROW_KEY)) .limit(1); final ResultSet rs = session.execute(selectStmt); final Row row = rs.one(); assertThat(row, notNullValue()); assertThat(rs.isExhausted(), is(true)); assertThat(row.getString("field0"), is("new-value1")); assertThat(row.getString("field1"), is("new-value2")); } @Test public void testDelete() throws Exception { insertRow(); final Status status = client.delete(TABLE, DEFAULT_ROW_KEY); assertThat(status, is(Status.OK)); // Verify result final Select selectStmt = QueryBuilder.select("field0", "field1") .from(TABLE) .where(QueryBuilder.eq(CassandraCQLClient.YCSB_KEY, DEFAULT_ROW_KEY)) .limit(1); final ResultSet rs = session.execute(selectStmt); final Row row = rs.one(); assertThat(row, nullValue()); } @Test public void testPreparedStatements() throws Exception { final int LOOP_COUNT = 3; for (int i = 0; i < LOOP_COUNT; i++) { testInsert(); testUpdate(); testRead(); testReadSingleColumn(); testReadMissingRow(); testDelete(); } } }
7,941
31.818182
98
java
null
NearPMSW-main/baseline/logging/YCSB/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java
/** * Copyright (c) 2013-2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. See accompanying LICENSE file. * * Submitted by Chrisjan Matser on 10/11/2010. */ package site.ycsb.db; import com.datastax.driver.core.Cluster; import com.datastax.driver.core.ColumnDefinitions; import com.datastax.driver.core.ConsistencyLevel; import com.datastax.driver.core.Host; import com.datastax.driver.core.HostDistance; import com.datastax.driver.core.Metadata; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Row; import com.datastax.driver.core.Session; import com.datastax.driver.core.PreparedStatement; import com.datastax.driver.core.BoundStatement; import com.datastax.driver.core.querybuilder.Insert; import com.datastax.driver.core.querybuilder.QueryBuilder; import com.datastax.driver.core.querybuilder.Select; import com.datastax.driver.core.querybuilder.Update; import site.ycsb.ByteArrayByteIterator; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.Vector; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.helpers.MessageFormatter; /** * Cassandra 2.x CQL client. * * See {@code cassandra2/README.md} for details. * * @author cmatser */ public class CassandraCQLClient extends DB { private static Logger logger = LoggerFactory.getLogger(CassandraCQLClient.class); private static Cluster cluster = null; private static Session session = null; private static ConcurrentMap<Set<String>, PreparedStatement> readStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> scanStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> insertStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> updateStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static AtomicReference<PreparedStatement> readAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> scanAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> deleteStmt = new AtomicReference<PreparedStatement>(); private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.QUORUM; private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.QUORUM; public static final String YCSB_KEY = "y_id"; public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; public static final String USERNAME_PROPERTY = "cassandra.username"; public static final String PASSWORD_PROPERTY = "cassandra.password"; public static final String HOSTS_PROPERTY = "hosts"; public static final String PORT_PROPERTY = "port"; public static final String PORT_PROPERTY_DEFAULT = "9042"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "cassandra.readconsistencylevel"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = readConsistencyLevel.name(); public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "cassandra.writeconsistencylevel"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = writeConsistencyLevel.name(); public static final String MAX_CONNECTIONS_PROPERTY = "cassandra.maxconnections"; public static final String CORE_CONNECTIONS_PROPERTY = "cassandra.coreconnections"; public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = "cassandra.connecttimeoutmillis"; public static final String READ_TIMEOUT_MILLIS_PROPERTY = "cassandra.readtimeoutmillis"; public static final String TRACING_PROPERTY = "cassandra.tracing"; public static final String TRACING_PROPERTY_DEFAULT = "false"; public static final String USE_SSL_CONNECTION = "cassandra.useSSL"; private static final String DEFAULT_USE_SSL_CONNECTION = "false"; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static boolean debug = false; private static boolean trace = false; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { // Keep track of number of calls to init (for later cleanup) INIT_COUNT.incrementAndGet(); // Synchronized so that we only have a single // cluster/session instance for all the threads. synchronized (INIT_COUNT) { // Check if the cluster has already been initialized if (cluster != null) { return; } try { debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT)); String host = getProperties().getProperty(HOSTS_PROPERTY); if (host == null) { throw new DBException(String.format( "Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY)); } String[] hosts = host.split(","); String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); String username = getProperties().getProperty(USERNAME_PROPERTY); String password = getProperties().getProperty(PASSWORD_PROPERTY); String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT); readConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); Boolean useSSL = Boolean.parseBoolean(getProperties().getProperty(USE_SSL_CONNECTION, DEFAULT_USE_SSL_CONNECTION)); if ((username != null) && !username.isEmpty()) { Cluster.Builder clusterBuilder = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts); if (useSSL) { clusterBuilder = clusterBuilder.withSSL(); } cluster = clusterBuilder.build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); } String maxConnections = getProperties().getProperty( MAX_CONNECTIONS_PROPERTY); if (maxConnections != null) { cluster.getConfiguration().getPoolingOptions() .setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections)); } String coreConnections = getProperties().getProperty( CORE_CONNECTIONS_PROPERTY); if (coreConnections != null) { cluster.getConfiguration().getPoolingOptions() .setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections)); } String connectTimoutMillis = getProperties().getProperty( CONNECT_TIMEOUT_MILLIS_PROPERTY); if (connectTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); } String readTimoutMillis = getProperties().getProperty( READ_TIMEOUT_MILLIS_PROPERTY); if (readTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); } Metadata metadata = cluster.getMetadata(); logger.info("Connected to cluster: {}\n", metadata.getClusterName()); for (Host discoveredHost : metadata.getAllHosts()) { logger.info("Datacenter: {}; Host: {}; Rack: {}\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack()); } session = cluster.connect(keyspace); } catch (Exception e) { throw new DBException(e); } } // synchronized } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { final int curInitCount = INIT_COUNT.decrementAndGet(); if (curInitCount <= 0) { readStmts.clear(); scanStmts.clear(); insertStmts.clear(); updateStmts.clear(); readAllStmt.set(null); scanAllStmt.set(null); deleteStmt.set(null); session.close(); cluster.close(); cluster = null; session = null; } if (curInitCount < 0) { // This should never happen. throw new DBException( String.format("initCount is negative: %d", curInitCount)); } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { PreparedStatement stmt = (fields == null) ? readAllStmt.get() : readStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = session.prepare(selectBuilder.from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())) .limit(1)); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? readAllStmt.getAndSet(stmt) : readStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); ResultSet rs = session.execute(stmt.bind(key)); if (rs.isExhausted()) { return Status.NOT_FOUND; } // Should be only 1 row Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { result.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { result.put(def.getName(), null); } } return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * Cassandra CQL uses "token" method for range scan which doesn't always yield * intuitive results. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { PreparedStatement stmt = (fields == null) ? scanAllStmt.get() : scanStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } Select selectStmt = selectBuilder.from(table); // The statement builder is not setup right for tokens. // So, we need to build it manually. String initialStmt = selectStmt.toString(); StringBuilder scanStmt = new StringBuilder(); scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token("); scanStmt.append(QueryBuilder.bindMarker()); scanStmt.append(")"); scanStmt.append(" LIMIT "); scanStmt.append(QueryBuilder.bindMarker()); stmt = session.prepare(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? scanAllStmt.getAndSet(stmt) : scanStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("startKey = {}, recordcount = {}", startkey, recordcount); ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount))); HashMap<String, ByteIterator> tuple; while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { tuple.put(def.getName(), null); } } result.add(tuple); } return Status.OK; } catch (Exception e) { logger.error( MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = updateStmts.get(fields); // Prepare statement on demand if (stmt == null) { Update updateStmt = QueryBuilder.update(table); // Add fields for (String field : fields) { updateStmt.with(QueryBuilder.set(field, QueryBuilder.bindMarker())); } // Add key updateStmt.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())); stmt = session.prepare(updateStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = updateStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add fields ColumnDefinitions vars = stmt.getVariables(); BoundStatement boundStmt = stmt.bind(); for (int i = 0; i < vars.size() - 1; i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } // Add key boundStmt.setString(vars.size() - 1, key); session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error updating key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = insertStmts.get(fields); // Prepare statement on demand if (stmt == null) { Insert insertStmt = QueryBuilder.insertInto(table); // Add key insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker()); // Add fields for (String field : fields) { insertStmt.value(field, QueryBuilder.bindMarker()); } stmt = session.prepare(insertStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = insertStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add key BoundStatement boundStmt = stmt.bind().setString(0, key); // Add fields ColumnDefinitions vars = stmt.getVariables(); for (int i = 1; i < vars.size(); i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error inserting key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { try { PreparedStatement stmt = deleteStmt.get(); // Prepare statement on demand if (stmt == null) { stmt = session.prepare(QueryBuilder.delete().from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = deleteStmt.getAndSet(stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); session.execute(stmt.bind(key)); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e); } return Status.ERROR; } }
21,717
32.934375
105
java
null
NearPMSW-main/baseline/logging/YCSB/cassandra/src/main/java/site/ycsb/db/package-info.java
/* * Copyright (c) 2014, Yahoo!, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for <a href="http://cassandra.apache.org/">Cassandra</a> * 2.1+ via CQL. */ package site.ycsb.db;
779
31.5
77
java
null
NearPMSW-main/baseline/logging/YCSB/geode/src/main/java/site/ycsb/db/package-info.java
/* * Copyright (c) 2014-2016, Yahoo!, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * YCSB binding for <a href="https://geode.apache.org/">Apache Geode</a>. */ package site.ycsb.db;
762
33.681818
73
java
null
NearPMSW-main/baseline/logging/YCSB/geode/src/main/java/site/ycsb/db/GeodeClient.java
/** * Copyright (c) 2013 - 2016 YCSB Contributors. All rights reserved. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; import java.net.InetSocketAddress; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; import java.util.regex.Matcher; import java.util.regex.Pattern; import site.ycsb.ByteArrayByteIterator; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import org.apache.geode.cache.Cache; import org.apache.geode.cache.CacheFactory; import org.apache.geode.cache.GemFireCache; import org.apache.geode.cache.Region; import org.apache.geode.cache.RegionExistsException; import org.apache.geode.cache.RegionFactory; import org.apache.geode.cache.RegionShortcut; import org.apache.geode.cache.client.ClientCache; import org.apache.geode.cache.client.ClientCacheFactory; import org.apache.geode.cache.client.ClientRegionFactory; import org.apache.geode.cache.client.ClientRegionShortcut; import org.apache.geode.pdx.JSONFormatter; import org.apache.geode.pdx.PdxInstance; import org.apache.geode.pdx.PdxInstanceFactory; /** * Apache Geode client for the YCSB benchmark.<br /> * <p>By default acts as a Geode client and tries to connect * to Geode cache server running on localhost with default * cache server port. Hostname and port of a Geode cacheServer * can be provided using <code>geode.serverport=port</code> and <code> * geode.serverhost=host</code> properties on YCSB command line. * A locator may also be used for discovering a cacheServer * by using the property <code>geode.locator=host[port]</code></p> * <p> * <p>To run this client in a peer-to-peer topology with other Geode * nodes, use the property <code>geode.topology=p2p</code>. Running * in p2p mode will enable embedded caching in this client.</p> * <p> * <p>YCSB by default does its operations against "usertable". When running * as a client this is a <code>ClientRegionShortcut.PROXY</code> region, * when running in p2p mode it is a <code>RegionShortcut.PARTITION</code> * region. A cache.xml defining "usertable" region can be placed in the * working directory to override these region definitions.</p> */ public class GeodeClient extends DB { /** * property name of the port where Geode server is listening for connections. */ private static final String SERVERPORT_PROPERTY_NAME = "geode.serverport"; /** * property name of the host where Geode server is running. */ private static final String SERVERHOST_PROPERTY_NAME = "geode.serverhost"; /** * default value of {@link #SERVERHOST_PROPERTY_NAME}. */ private static final String SERVERHOST_PROPERTY_DEFAULT = "localhost"; /** * property name to specify a Geode locator. This property can be used in both * client server and p2p topology */ private static final String LOCATOR_PROPERTY_NAME = "geode.locator"; /** * property name to specify Geode topology. */ private static final String TOPOLOGY_PROPERTY_NAME = "geode.topology"; /** * value of {@value #TOPOLOGY_PROPERTY_NAME} when peer to peer topology should be used. * (client-server topology is default) */ private static final String TOPOLOGY_P2P_VALUE = "p2p"; /** * Pattern to split up a locator string in the form host[port]. */ private static final Pattern LOCATOR_PATTERN = Pattern.compile("(.+)\\[(\\d+)\\]");; private GemFireCache cache; /** * true if ycsb client runs as a client to a Geode cache server. */ private boolean isClient; @Override public void init() throws DBException { Properties props = getProperties(); // hostName where Geode cacheServer is running String serverHost = null; // port of Geode cacheServer int serverPort = 0; String locatorStr = null; if (props != null && !props.isEmpty()) { String serverPortStr = props.getProperty(SERVERPORT_PROPERTY_NAME); if (serverPortStr != null) { serverPort = Integer.parseInt(serverPortStr); } serverHost = props.getProperty(SERVERHOST_PROPERTY_NAME, SERVERHOST_PROPERTY_DEFAULT); locatorStr = props.getProperty(LOCATOR_PROPERTY_NAME); String topology = props.getProperty(TOPOLOGY_PROPERTY_NAME); if (topology != null && topology.equals(TOPOLOGY_P2P_VALUE)) { CacheFactory cf = new CacheFactory(); if (locatorStr != null) { cf.set("locators", locatorStr); } cache = cf.create(); isClient = false; return; } } isClient = true; ClientCacheFactory ccf = new ClientCacheFactory(); ccf.setPdxReadSerialized(true); if (serverPort != 0) { ccf.addPoolServer(serverHost, serverPort); } else { InetSocketAddress locatorAddress = getLocatorAddress(locatorStr); ccf.addPoolLocator(locatorAddress.getHostName(), locatorAddress.getPort()); } cache = ccf.create(); } static InetSocketAddress getLocatorAddress(String locatorStr) { Matcher matcher = LOCATOR_PATTERN.matcher(locatorStr); if(!matcher.matches()) { throw new IllegalStateException("Unable to parse locator: " + locatorStr); } return new InetSocketAddress(matcher.group(1), Integer.parseInt(matcher.group(2))); } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Region<String, PdxInstance> r = getRegion(table); PdxInstance val = r.get(key); if (val != null) { if (fields == null) { for (String fieldName : val.getFieldNames()) { result.put(fieldName, new ByteArrayByteIterator((byte[]) val.getField(fieldName))); } } else { for (String field : fields) { result.put(field, new ByteArrayByteIterator((byte[]) val.getField(field))); } } return Status.OK; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Geode does not support scan return Status.ERROR; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { getRegion(table).put(key, convertToBytearrayMap(values)); return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { getRegion(table).put(key, convertToBytearrayMap(values)); return Status.OK; } @Override public Status delete(String table, String key) { getRegion(table).destroy(key); return Status.OK; } private PdxInstance convertToBytearrayMap(Map<String, ByteIterator> values) { PdxInstanceFactory pdxInstanceFactory = cache.createPdxInstanceFactory(JSONFormatter.JSON_CLASSNAME); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { pdxInstanceFactory.writeByteArray(entry.getKey(), entry.getValue().toArray()); } return pdxInstanceFactory.create(); } private Region<String, PdxInstance> getRegion(String table) { Region<String, PdxInstance> r = cache.getRegion(table); if (r == null) { try { if (isClient) { ClientRegionFactory<String, PdxInstance> crf = ((ClientCache) cache).createClientRegionFactory(ClientRegionShortcut.PROXY); r = crf.create(table); } else { RegionFactory<String, PdxInstance> rf = ((Cache) cache).createRegionFactory(RegionShortcut.PARTITION); r = rf.create(table); } } catch (RegionExistsException e) { // another thread created the region r = cache.getRegion(table); } } return r; } }
8,327
33.991597
112
java
null
NearPMSW-main/baseline/logging/YCSB/kudu/src/main/java/site/ycsb/db/package-info.java
/** * Copyright (c) 2015-2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for <a href="http://kudu.apache.org/">Apache Kudu</a>. */ package site.ycsb.db;
770
32.521739
74
java
null
NearPMSW-main/baseline/logging/YCSB/kudu/src/main/java/site/ycsb/db/KuduYCSBClient.java
/** * Copyright (c) 2015-2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; import com.stumbleupon.async.TimeoutException; import site.ycsb.ByteIterator; import site.ycsb.DBException; import site.ycsb.Status; import site.ycsb.StringByteIterator; import site.ycsb.workloads.CoreWorkload; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.kudu.ColumnSchema; import org.apache.kudu.Schema; import org.apache.kudu.client.*; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; import java.util.List; import java.util.Properties; import java.util.Set; import java.util.Vector; import static site.ycsb.Client.DEFAULT_RECORD_COUNT; import static site.ycsb.Client.RECORD_COUNT_PROPERTY; import static site.ycsb.workloads.CoreWorkload.INSERT_ORDER_PROPERTY; import static site.ycsb.workloads.CoreWorkload.INSERT_ORDER_PROPERTY_DEFAULT; import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; import static site.ycsb.workloads.CoreWorkload.ZERO_PADDING_PROPERTY; import static site.ycsb.workloads.CoreWorkload.ZERO_PADDING_PROPERTY_DEFAULT; import static org.apache.kudu.Type.STRING; import static org.apache.kudu.client.KuduPredicate.ComparisonOp.EQUAL; import static org.apache.kudu.client.KuduPredicate.ComparisonOp.GREATER_EQUAL; /** * Kudu client for YCSB framework. Example to load: <blockquote> * * <pre> * <code> * $ ./bin/ycsb load kudu -P workloads/workloada -threads 5 * </code> * </pre> * * </blockquote> Example to run: <blockquote> * * <pre> * <code> * ./bin/ycsb run kudu -P workloads/workloada -p kudu_sync_ops=true -threads 5 * </code> * </pre> * * </blockquote> */ public class KuduYCSBClient extends site.ycsb.DB { private static final Logger LOG = LoggerFactory.getLogger(KuduYCSBClient.class); private static final String KEY = "key"; private static final Status TIMEOUT = new Status("TIMEOUT", "The operation timed out."); private static final int MAX_TABLETS = 9000; private static final long DEFAULT_SLEEP = 60000; private static final int DEFAULT_NUM_CLIENTS = 1; private static final int DEFAULT_NUM_REPLICAS = 3; private static final String DEFAULT_PARTITION_SCHEMA = "hashPartition"; private static final String SYNC_OPS_OPT = "kudu_sync_ops"; private static final String BUFFER_NUM_OPS_OPT = "kudu_buffer_num_ops"; private static final String PRE_SPLIT_NUM_TABLETS_OPT = "kudu_pre_split_num_tablets"; private static final String TABLE_NUM_REPLICAS = "kudu_table_num_replicas"; private static final String BLOCK_SIZE_OPT = "kudu_block_size"; private static final String MASTER_ADDRESSES_OPT = "kudu_master_addresses"; private static final String NUM_CLIENTS_OPT = "kudu_num_clients"; private static final String PARTITION_SCHEMA_OPT = "kudu_partition_schema"; private static final int BLOCK_SIZE_DEFAULT = 4096; private static final int BUFFER_NUM_OPS_DEFAULT = 2000; private static final List<String> COLUMN_NAMES = new ArrayList<>(); private static List<KuduClient> clients = new ArrayList<>(); private static int clientRoundRobin = 0; private static boolean tableSetup = false; private KuduClient client; private Schema schema; private String tableName; private KuduSession session; private KuduTable kuduTable; private String partitionSchema; private int zeropadding; private boolean orderedinserts; @Override public void init() throws DBException { Properties prop = getProperties(); this.tableName = prop.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); this.partitionSchema = prop.getProperty(PARTITION_SCHEMA_OPT, DEFAULT_PARTITION_SCHEMA); this.zeropadding = Integer.parseInt(prop.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT)); if (prop.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) { this.orderedinserts = false; } else { this.orderedinserts = true; } initClient(); this.session = client.newSession(); if (getProperties().getProperty(SYNC_OPS_OPT) != null && getProperties().getProperty(SYNC_OPS_OPT).equals("false")) { this.session.setFlushMode(KuduSession.FlushMode.AUTO_FLUSH_BACKGROUND); this.session.setMutationBufferSpace( getIntFromProp(getProperties(), BUFFER_NUM_OPS_OPT, BUFFER_NUM_OPS_DEFAULT)); } else { this.session.setFlushMode(KuduSession.FlushMode.AUTO_FLUSH_SYNC); } try { this.kuduTable = client.openTable(tableName); this.schema = kuduTable.getSchema(); } catch (Exception e) { throw new DBException("Could not open a table because of:", e); } } /** * Initialize the 'clients' member with the configured number of * clients. */ private void initClients() throws DBException { synchronized (KuduYCSBClient.class) { if (!clients.isEmpty()) { return; } Properties prop = getProperties(); String masterAddresses = prop.getProperty(MASTER_ADDRESSES_OPT, "localhost:7051"); LOG.debug("Connecting to the masters at {}", masterAddresses); int numClients = getIntFromProp(prop, NUM_CLIENTS_OPT, DEFAULT_NUM_CLIENTS); for (int i = 0; i < numClients; i++) { clients.add(new KuduClient.KuduClientBuilder(masterAddresses) .defaultSocketReadTimeoutMs(DEFAULT_SLEEP) .defaultOperationTimeoutMs(DEFAULT_SLEEP) .defaultAdminOperationTimeoutMs(DEFAULT_SLEEP) .build()); } } } private void initClient() throws DBException { initClients(); synchronized (clients) { client = clients.get(clientRoundRobin++ % clients.size()); } setupTable(); } private void setupTable() throws DBException { Properties prop = getProperties(); synchronized (KuduYCSBClient.class) { if (tableSetup) { return; } int numTablets = getIntFromProp(prop, PRE_SPLIT_NUM_TABLETS_OPT, 4); if (numTablets > MAX_TABLETS) { throw new DBException("Specified number of tablets (" + numTablets + ") must be equal " + "or below " + MAX_TABLETS); } int numReplicas = getIntFromProp(prop, TABLE_NUM_REPLICAS, DEFAULT_NUM_REPLICAS); long recordCount = Long.parseLong(prop.getProperty(RECORD_COUNT_PROPERTY, DEFAULT_RECORD_COUNT)); if (recordCount == 0) { recordCount = Integer.MAX_VALUE; } int blockSize = getIntFromProp(prop, BLOCK_SIZE_OPT, BLOCK_SIZE_DEFAULT); int fieldCount = getIntFromProp(prop, CoreWorkload.FIELD_COUNT_PROPERTY, Integer.parseInt(CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); final String fieldprefix = prop.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); List<ColumnSchema> columns = new ArrayList<ColumnSchema>(fieldCount + 1); ColumnSchema keyColumn = new ColumnSchema.ColumnSchemaBuilder(KEY, STRING) .key(true) .desiredBlockSize(blockSize) .build(); columns.add(keyColumn); COLUMN_NAMES.add(KEY); for (int i = 0; i < fieldCount; i++) { String name = fieldprefix + i; COLUMN_NAMES.add(name); columns.add(new ColumnSchema.ColumnSchemaBuilder(name, STRING) .desiredBlockSize(blockSize) .build()); } schema = new Schema(columns); CreateTableOptions builder = new CreateTableOptions(); if (partitionSchema.equals("hashPartition")) { builder.setRangePartitionColumns(new ArrayList<String>()); List<String> hashPartitionColumns = new ArrayList<>(); hashPartitionColumns.add(KEY); builder.addHashPartitions(hashPartitionColumns, numTablets); } else if (partitionSchema.equals("rangePartition")) { if (!orderedinserts) { // We need to use ordered keys to determine how to split range partitions. throw new DBException("Must specify `insertorder=ordered` if using rangePartition schema."); } String maxKeyValue = String.valueOf(recordCount); if (zeropadding < maxKeyValue.length()) { throw new DBException(String.format("Invalid zeropadding value: %d, zeropadding needs to be larger " + "or equal to number of digits in the record number: %d.", zeropadding, maxKeyValue.length())); } List<String> rangePartitionColumns = new ArrayList<>(); rangePartitionColumns.add(KEY); builder.setRangePartitionColumns(rangePartitionColumns); // Add rangePartitions long lowerNum = 0; long upperNum = 0; int remainder = (int) recordCount % numTablets; for (int i = 0; i < numTablets; i++) { lowerNum = upperNum; upperNum = lowerNum + recordCount / numTablets; if (i < remainder) { ++upperNum; } PartialRow lower = schema.newPartialRow(); lower.addString(KEY, CoreWorkload.buildKeyName(lowerNum, zeropadding, orderedinserts)); PartialRow upper = schema.newPartialRow(); upper.addString(KEY, CoreWorkload.buildKeyName(upperNum, zeropadding, orderedinserts)); builder.addRangePartition(lower, upper); } } else { throw new DBException("Invalid partition_schema specified: " + partitionSchema + ", must specify `partition_schema=hashPartition` or `partition_schema=rangePartition`"); } builder.setNumReplicas(numReplicas); try { client.createTable(tableName, schema, builder); } catch (Exception e) { if (!e.getMessage().contains("already exists")) { throw new DBException("Couldn't create the table", e); } } tableSetup = true; } } private static int getIntFromProp(Properties prop, String propName, int defaultValue) throws DBException { String intStr = prop.getProperty(propName); if (intStr == null) { return defaultValue; } else { try { return Integer.valueOf(intStr); } catch (NumberFormatException ex) { throw new DBException("Provided number for " + propName + " isn't a valid integer"); } } } @Override public void cleanup() throws DBException { try { this.session.close(); this.client.close(); } catch (Exception e) { throw new DBException("Couldn't cleanup the session", e); } } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Vector<HashMap<String, ByteIterator>> results = new Vector<>(); final Status status = scan(table, key, 1, fields, results); if (!status.equals(Status.OK)) { return status; } if (results.size() != 1) { return Status.NOT_FOUND; } result.putAll(results.firstElement()); return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { KuduScanner.KuduScannerBuilder scannerBuilder = client.newScannerBuilder(kuduTable); List<String> querySchema; if (fields == null) { querySchema = COLUMN_NAMES; // No need to set the projected columns with the whole schema. } else { querySchema = new ArrayList<>(fields); scannerBuilder.setProjectedColumnNames(querySchema); } ColumnSchema column = schema.getColumnByIndex(0); KuduPredicate.ComparisonOp predicateOp = recordcount == 1 ? EQUAL : GREATER_EQUAL; KuduPredicate predicate = KuduPredicate.newComparisonPredicate(column, predicateOp, startkey); scannerBuilder.addPredicate(predicate); scannerBuilder.limit(recordcount); // currently noop KuduScanner scanner = scannerBuilder.build(); while (scanner.hasMoreRows()) { RowResultIterator data = scanner.nextRows(); addAllRowsToResult(data, recordcount, querySchema, result); if (recordcount == result.size()) { break; } } RowResultIterator closer = scanner.close(); addAllRowsToResult(closer, recordcount, querySchema, result); } catch (TimeoutException te) { LOG.info("Waited too long for a scan operation with start key={}", startkey); return TIMEOUT; } catch (Exception e) { LOG.warn("Unexpected exception", e); return Status.ERROR; } return Status.OK; } private void addAllRowsToResult(RowResultIterator it, int recordcount, List<String> querySchema, Vector<HashMap<String, ByteIterator>> result) throws Exception { RowResult row; HashMap<String, ByteIterator> rowResult = new HashMap<>(querySchema.size()); if (it == null) { return; } while (it.hasNext()) { if (result.size() == recordcount) { return; } row = it.next(); int colIdx = 0; for (String col : querySchema) { rowResult.put(col, new StringByteIterator(row.getString(colIdx))); colIdx++; } result.add(rowResult); } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { Update update = this.kuduTable.newUpdate(); PartialRow row = update.getRow(); row.addString(KEY, key); for (int i = 1; i < schema.getColumnCount(); i++) { String columnName = schema.getColumnByIndex(i).getName(); ByteIterator b = values.get(columnName); if (b != null) { row.addStringUtf8(columnName, b.toArray()); } } apply(update); return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { Insert insert = this.kuduTable.newInsert(); PartialRow row = insert.getRow(); row.addString(KEY, key); for (int i = 1; i < schema.getColumnCount(); i++) { row.addStringUtf8(i, values.get(schema.getColumnByIndex(i).getName()).toArray()); } apply(insert); return Status.OK; } @Override public Status delete(String table, String key) { Delete delete = this.kuduTable.newDelete(); PartialRow row = delete.getRow(); row.addString(KEY, key); apply(delete); return Status.OK; } private void apply(Operation op) { try { OperationResponse response = session.apply(op); if (response != null && response.hasRowError()) { LOG.info("Write operation failed: {}", response.getRowError()); } } catch (KuduException ex) { LOG.warn("Write operation failed", ex); } } }
15,870
36.698337
112
java
null
NearPMSW-main/baseline/logging/YCSB/tablestore/src/main/java/site/ycsb/db/tablestore/package-info.java
/* * Copyright 2018 YCSB Contributors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for <a href="https://www.alibabacloud.com/product/table-store">TableStore</a>. */ package site.ycsb.db.tablestore;
795
33.608696
98
java
null
NearPMSW-main/baseline/logging/YCSB/tablestore/src/main/java/site/ycsb/db/tablestore/TableStoreClient.java
/* * Copyright 2018 YCSB Contributors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.tablestore; import java.util.*; import java.util.function.*; import site.ycsb.*; import com.alicloud.openservices.tablestore.*; import com.alicloud.openservices.tablestore.model.*; import org.apache.log4j.Logger; /** * TableStore Client for YCSB. */ public class TableStoreClient extends DB { private static SyncClient client; private int maxVersions = 1; private String primaryKeyName; private static final Logger LOGGER = Logger.getLogger(TableStoreClient.class); // nasty here as currently there is no support of JEP218 private void setIntegerProperty( Properties properties, String propertyName, ClientConfiguration clientConfiguration, Function<Integer, Boolean> qualifyFunction, BiConsumer<ClientConfiguration, Integer> setFunction) throws DBException { String propertyString = properties.getProperty(propertyName); if (propertyString != null) { Integer propertyInteger = new Integer(propertyString); if (qualifyFunction.apply(propertyInteger).booleanValue()) { setFunction.accept(clientConfiguration, propertyInteger); } else { String errorMessage = "Illegal argument." + propertyName + ":" + propertyString; LOGGER.error(errorMessage); throw new DBException(errorMessage); } } } @Override public void init() throws DBException { Properties properties = getProperties(); String accessID = properties.getProperty("alibaba.cloud.tablestore.access_id"); String accessKey = properties.getProperty("alibaba.cloud.tablestore.access_key"); String endPoint = properties.getProperty("alibaba.cloud.tablestore.end_point"); String instanceName = properties.getProperty("alibaba.cloud.tablestore.instance_name"); String maxVersion = properties.getProperty("alibaba.cloud.tablestore.max_version", "1"); maxVersions = Integer.parseInt(maxVersion); primaryKeyName = properties.getProperty("alibaba.cloud.tablestore.primary_key", ""); ClientConfiguration clientConfiguration = new ClientConfiguration(); setIntegerProperty( properties, "alibaba.cloud.tablestore.connection_timeout", clientConfiguration, (Integer t) -> t > 0, (ClientConfiguration c, Integer t) -> c.setConnectionTimeoutInMillisecond(t.intValue())); setIntegerProperty( properties, "alibaba.cloud.tablestore.socket_timeout", clientConfiguration, (Integer t) -> t > 0, (ClientConfiguration c, Integer t) -> c.setSocketTimeoutInMillisecond(t.intValue())); setIntegerProperty( properties, "alibaba.cloud.tablestore.max_connections", clientConfiguration, (Integer t) -> t > 0, (ClientConfiguration c, Integer t) -> c.setMaxConnections(t.intValue())); try { synchronized (TableStoreClient.class) { if (client == null) { client = new SyncClient(endPoint, accessID, accessKey, instanceName, clientConfiguration); LOGGER.info("new tablestore sync client\tendpoint:" + endPoint + "\tinstanceName:" + instanceName); } } } catch (IllegalArgumentException e) { throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } } private void setResult(Set<String> fields, Map<String, ByteIterator> result, Row row) { if (row != null) { if (fields != null) { for (String field : fields) { result.put(field, new StringByteIterator((row.getColumn(field).toString()))); } } else { for (Column column : row.getColumns()) { result.put(column.getName(), new StringByteIterator(column.getValue().asString())); } } } } private Status dealWithTableStoreException(TableStoreException e) { if (e.getErrorCode().contains("OTSRowOperationConflict")) { return Status.ERROR; } LOGGER.error(e); return Status.ERROR; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { // set primary key PrimaryKeyColumn[] primaryKeyColumns = new PrimaryKeyColumn[1]; primaryKeyColumns[0] = new PrimaryKeyColumn(primaryKeyName, PrimaryKeyValue.fromString(key)); PrimaryKey primaryKey = new PrimaryKey(primaryKeyColumns); // set table_name SingleRowQueryCriteria singleRowQueryCriteria = new SingleRowQueryCriteria(table, primaryKey); singleRowQueryCriteria.setMaxVersions(maxVersions); // set columns if (fields != null) { singleRowQueryCriteria.addColumnsToGet(fields.toArray(new String[0])); } // set get_row request GetRowRequest getRowRequest = new GetRowRequest(); getRowRequest.setRowQueryCriteria(singleRowQueryCriteria); // operate GetRowResponse getRowResponse = client.getRow(getRowRequest); // set the result setResult(fields, result, getRowResponse.getRow()); return Status.OK; } catch (TableStoreException e) { return dealWithTableStoreException(e); } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { // set primary key PrimaryKeyColumn[] startKey = new PrimaryKeyColumn[1]; startKey[0] = new PrimaryKeyColumn(primaryKeyName, PrimaryKeyValue.fromString(startkey)); PrimaryKeyColumn[] endKey = new PrimaryKeyColumn[1]; endKey[0] = new PrimaryKeyColumn(primaryKeyName, PrimaryKeyValue.INF_MAX); RangeRowQueryCriteria criteria = new RangeRowQueryCriteria(table); criteria.setInclusiveStartPrimaryKey(new PrimaryKey(startKey)); criteria.setExclusiveEndPrimaryKey(new PrimaryKey(endKey)); criteria.setMaxVersions(maxVersions); // set columns if (fields != null) { criteria.addColumnsToGet(fields.toArray(new String[0])); } // set limit criteria.setLimit(recordcount); // set the request GetRangeRequest getRangeRequest = new GetRangeRequest(); getRangeRequest.setRangeRowQueryCriteria(criteria); GetRangeResponse getRangeResponse = client.getRange(getRangeRequest); // set the result List<Row> rows = getRangeResponse.getRows(); for (Row row : rows) { HashMap<String, ByteIterator> values = new HashMap<>(); setResult(fields, values, row); result.add(values); } return Status.OK; } catch (TableStoreException e) { return dealWithTableStoreException(e); } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { PrimaryKeyColumn[] primaryKeyColumns = new PrimaryKeyColumn[1]; primaryKeyColumns[0] = new PrimaryKeyColumn(primaryKeyName, PrimaryKeyValue.fromString(key)); PrimaryKey primaryKey = new PrimaryKey(primaryKeyColumns); RowUpdateChange rowUpdateChange = new RowUpdateChange(table, primaryKey); for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { rowUpdateChange.put(entry.getKey(), ColumnValue.fromString(entry.getValue().toString())); } UpdateRowRequest updateRowRequest = new UpdateRowRequest(); updateRowRequest.setRowChange(rowUpdateChange); client.updateRow(updateRowRequest); return Status.OK; } catch (TableStoreException e) { return dealWithTableStoreException(e); } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { // set the primary key PrimaryKeyColumn[] primaryKeyColumns = new PrimaryKeyColumn[1]; primaryKeyColumns[0] = new PrimaryKeyColumn(primaryKeyName, PrimaryKeyValue.fromString(key)); PrimaryKey primaryKey = new PrimaryKey(primaryKeyColumns); RowPutChange rowPutChange = new RowPutChange(table, primaryKey); // set the columns for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { rowPutChange.addColumn(entry.getKey(), ColumnValue.fromString(entry.getValue().toString())); } // set the putRow request PutRowRequest putRowRequest = new PutRowRequest(); putRowRequest.setRowChange(rowPutChange); // operate client.putRow(putRowRequest); return Status.OK; } catch (TableStoreException e) { return dealWithTableStoreException(e); } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } @Override public Status delete(String table, String key) { try { PrimaryKeyColumn[] primaryKeyColumns = new PrimaryKeyColumn[1]; primaryKeyColumns[0] = new PrimaryKeyColumn(primaryKeyName, PrimaryKeyValue.fromString(key)); PrimaryKey primaryKey = new PrimaryKey(primaryKeyColumns); RowDeleteChange rowDeleteChange = new RowDeleteChange(table, primaryKey); DeleteRowRequest deleteRowRequest = new DeleteRowRequest(); deleteRowRequest.setRowChange(rowDeleteChange); client.deleteRow(deleteRowRequest); return Status.OK; } catch (TableStoreException e) { return dealWithTableStoreException(e); } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } }
10,214
36.01087
109
java
null
NearPMSW-main/baseline/logging/YCSB/aerospike/src/main/java/site/ycsb/db/package-info.java
/** * Copyright (c) 2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * YCSB binding for <a href="http://www.aerospike.com/">Areospike</a>. */ package site.ycsb.db;
760
33.590909
70
java
null
NearPMSW-main/baseline/logging/YCSB/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java
/** * Copyright (c) 2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; import com.aerospike.client.AerospikeException; import com.aerospike.client.Bin; import com.aerospike.client.Key; import com.aerospike.client.Record; import com.aerospike.client.policy.ClientPolicy; import com.aerospike.client.policy.Policy; import com.aerospike.client.policy.RecordExistsAction; import com.aerospike.client.policy.WritePolicy; import site.ycsb.ByteArrayByteIterator; import site.ycsb.ByteIterator; import site.ycsb.DBException; import site.ycsb.Status; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; /** * YCSB binding for <a href="http://www.aerospike.com/">Areospike</a>. */ public class AerospikeClient extends site.ycsb.DB { private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3000"; private static final String DEFAULT_TIMEOUT = "10000"; private static final String DEFAULT_NAMESPACE = "ycsb"; private String namespace = null; private com.aerospike.client.AerospikeClient client = null; private Policy readPolicy = new Policy(); private WritePolicy insertPolicy = new WritePolicy(); private WritePolicy updatePolicy = new WritePolicy(); private WritePolicy deletePolicy = new WritePolicy(); @Override public void init() throws DBException { insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); namespace = props.getProperty("as.namespace", DEFAULT_NAMESPACE); String host = props.getProperty("as.host", DEFAULT_HOST); String user = props.getProperty("as.user"); String password = props.getProperty("as.password"); int port = Integer.parseInt(props.getProperty("as.port", DEFAULT_PORT)); int timeout = Integer.parseInt(props.getProperty("as.timeout", DEFAULT_TIMEOUT)); readPolicy.timeout = timeout; insertPolicy.timeout = timeout; updatePolicy.timeout = timeout; deletePolicy.timeout = timeout; ClientPolicy clientPolicy = new ClientPolicy(); if (user != null && password != null) { clientPolicy.user = user; clientPolicy.password = password; } try { client = new com.aerospike.client.AerospikeClient(clientPolicy, host, port); } catch (AerospikeException e) { throw new DBException(String.format("Error while creating Aerospike " + "client for %s:%d.", host, port), e); } } @Override public void cleanup() throws DBException { client.close(); } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { Record record; if (fields != null) { record = client.get(readPolicy, new Key(namespace, table, key), fields.toArray(new String[fields.size()])); } else { record = client.get(readPolicy, new Key(namespace, table, key)); } if (record == null) { System.err.println("Record key " + key + " not found (read)"); return Status.ERROR; } for (Map.Entry<String, Object> entry: record.bins.entrySet()) { result.put(entry.getKey(), new ByteArrayByteIterator((byte[])entry.getValue())); } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while reading key " + key + ": " + e); return Status.ERROR; } } @Override public Status scan(String table, String start, int count, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { System.err.println("Scan not implemented"); return Status.ERROR; } private Status write(String table, String key, WritePolicy writePolicy, Map<String, ByteIterator> values) { Bin[] bins = new Bin[values.size()]; int index = 0; for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { bins[index] = new Bin(entry.getKey(), entry.getValue().toArray()); ++index; } Key keyObj = new Key(namespace, table, key); try { client.put(writePolicy, keyObj, bins); return Status.OK; } catch (AerospikeException e) { System.err.println("Error while writing key " + key + ": " + e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return write(table, key, updatePolicy, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return write(table, key, insertPolicy, values); } @Override public Status delete(String table, String key) { try { if (!client.delete(deletePolicy, new Key(namespace, table, key))) { System.err.println("Record key " + key + " not found (delete)"); return Status.ERROR; } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while deleting key " + key + ": " + e); return Status.ERROR; } } }
5,790
30.472826
79
java
null
NearPMSW-main/baseline/logging/YCSB/azurecosmos/src/main/java/site/ycsb/db/package-info.java
/* * Copyright 2018 YCSB Contributors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for <a href="https://azure.microsoft.com/services/cosmos-db/">Azure Cosmos</a>. */ package site.ycsb.db;
785
33.173913
99
java
null
NearPMSW-main/baseline/logging/YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
/* * Copyright (c) 2018 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. See accompanying LICENSE file. */ package site.ycsb.db; import java.time.Duration; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.Vector; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.azure.cosmos.ConsistencyLevel; import com.azure.cosmos.CosmosClient; import com.azure.cosmos.CosmosClientBuilder; import com.azure.cosmos.CosmosContainer; import com.azure.cosmos.CosmosDatabase; import com.azure.cosmos.CosmosException; import com.azure.cosmos.DirectConnectionConfig; import com.azure.cosmos.GatewayConnectionConfig; import com.azure.cosmos.ThrottlingRetryOptions; import com.azure.cosmos.models.CosmosItemRequestOptions; import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.CosmosQueryRequestOptions; import com.azure.cosmos.models.FeedResponse; import com.azure.cosmos.models.PartitionKey; import com.azure.cosmos.models.SqlParameter; import com.azure.cosmos.models.SqlQuerySpec; import com.azure.cosmos.util.CosmosPagedIterable; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import site.ycsb.StringByteIterator; /** * Azure Cosmos DB Java SDK 4.6.0 client for YCSB. */ public class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
20,780
39.117761
120
java
null
NearPMSW-main/baseline/logging/YCSB/maprdb/src/main/java/site/ycsb/db/mapr/package-info.java
/* * Copyright (c) 2017, Yahoo!, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for <a href="http://mapr.com/maprdb/">MapR-DB</a>. */ package site.ycsb.db.mapr;
760
32.086957
70
java
null
NearPMSW-main/baseline/logging/YCSB/maprdb/src/main/java/site/ycsb/db/mapr/MapRDBClient.java
/** * Copyright (c) 2017 Yahoo! Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.mapr; /** * MapR-DB(binary) client for YCSB framework. * */ public class MapRDBClient extends site.ycsb.db.hbase1.HBaseClient1 { }
809
30.153846
70
java
null
NearPMSW-main/baseline/logging/YCSB/griddb/src/test/java/site/ycsb/db/griddb/GridDBClientTest.java
/** * Copyright (c) 2018 TOSHIBA Digital Solutions Corporation. * Copyright (c) 2018 YCSB contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.griddb; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.*; import static org.junit.Assume.assumeNoException; import java.io.IOException; import java.net.InetAddress; import java.net.Socket; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Properties; import java.util.Set; import org.junit.After; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import com.toshiba.mwcloud.gs.ColumnInfo; import com.toshiba.mwcloud.gs.ContainerInfo; import com.toshiba.mwcloud.gs.ContainerType; import com.toshiba.mwcloud.gs.GSException; import com.toshiba.mwcloud.gs.GSType; import com.toshiba.mwcloud.gs.GridStore; import com.toshiba.mwcloud.gs.GridStoreFactory; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import site.ycsb.StringByteIterator; import site.ycsb.measurements.Measurements; import site.ycsb.workloads.CoreWorkload; public class GridDBClientTest { // Default GridbDB port private static final int GRIDDB_DEFAULT_PORT = 10040; //GridDBDatastore configuration private final static String TEST_TABLE = "testtable"; private final static String NOTIFICATION_PORT = "31999";//default is 31999 private final static String NOTIFICATION_ADDR = "239.0.0.1";//default is 239.0.0.1 private final static String CLUSTER_NAME = "ycsbcluster";//Fill your cluster name private final static String USER_NAME = "admin";//Fill your user name private final static String PASS = "admin";//Fill your password private final static int FIELD_COUNT = 10; private final static String FIELD_LENGTH = "100"; private DB myClient = null; private final static String DEFAULT_ROW_KEY = "user1"; public static final String VALUE_COLUMN_NAME_PREFIX= "field"; private ContainerInfo containerInfo = null; private GridStore store; /** * Verifies the GridDB process (or some process) is running on port 10040, if * not the tests are skipped. */ @BeforeClass public static void setUpBeforeClass() { // Test if we can connect. Socket socket = null; try { // Connect socket = new Socket(InetAddress.getLocalHost(), GRIDDB_DEFAULT_PORT); assertThat("Socket is not bound.", socket.getLocalPort(), not(-1)); } catch (IOException connectFailed) { assumeNoException("GridDB is not running. Skipping tests.", connectFailed); } finally { if (socket != null) { try { socket.close(); } catch (IOException ignore) { // Ignore. } } socket = null; } } /** * Create properties for configuration to get client * Create data table to test */ @Before public void setUp() throws Exception { Properties p = new Properties(); p.setProperty("notificationAddress", NOTIFICATION_ADDR); p.setProperty("notificationPort", NOTIFICATION_PORT); p.setProperty("clusterName", CLUSTER_NAME); p.setProperty("userName", USER_NAME); p.setProperty("user", USER_NAME); p.setProperty("password", PASS); p.setProperty("fieldcount", String.valueOf(FIELD_COUNT)); p.setProperty("fieldlength", FIELD_LENGTH); Measurements.setProperties(p); final CoreWorkload workload = new CoreWorkload(); workload.init(p); getDB(p); // Create data table to test // List of columns List<ColumnInfo> columnInfoList = new ArrayList<ColumnInfo>(); ColumnInfo keyInfo = new ColumnInfo("key", GSType.STRING); columnInfoList.add(keyInfo); for (int i = 0; i < FIELD_COUNT; i++) { String columnName = String.format(VALUE_COLUMN_NAME_PREFIX + "%d", i); ColumnInfo info = new ColumnInfo(columnName, GSType.STRING); columnInfoList.add(info); } containerInfo = new ContainerInfo(null, ContainerType.COLLECTION, columnInfoList, true); try { GridStoreFactory.getInstance().setProperties(p); store = GridStoreFactory.getInstance().getGridStore(p); store.putContainer(TEST_TABLE, containerInfo, false); } catch (GSException e) { e.printStackTrace(); throw new DBException(); } } /** * Insert data to GridbDB database for testing */ private void insertToDatabase() { HashMap<String, ByteIterator> values = new HashMap<String, ByteIterator>(); // The number of field in container info is 10 for (int i = 0; i < FIELD_COUNT; i++) { values.put(VALUE_COLUMN_NAME_PREFIX + i, new StringByteIterator("value" + i)); } myClient.insert(TEST_TABLE, DEFAULT_ROW_KEY, values); } @Test public void testReadNoExistedRow() { Set<String> fields = Collections.singleton("field0"); HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); insertToDatabase(); Status readStatus = myClient.read(TEST_TABLE, "Missing row", fields, result); assertEquals(readStatus, Status.ERROR); assertEquals(result.size(), 0); } @Test public void testReadSingleRow() { Set<String> fields = Collections.singleton("field1"); HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); insertToDatabase(); Status readStatus = myClient.read(TEST_TABLE, DEFAULT_ROW_KEY, fields, result); assertEquals(readStatus, Status.OK); assertNotEquals(result.entrySet(), 0); for (String key : fields) { ByteIterator iter = result.get(key); byte[] byteArray1 = iter.toArray(); String value = new String(byteArray1); assertEquals(value, "value1"); } } @Test public void testReadAll() { HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); insertToDatabase(); Status readStatus = myClient.read(TEST_TABLE, DEFAULT_ROW_KEY, null, result); assertEquals(readStatus, Status.OK); assertEquals(result.size(), FIELD_COUNT); for (int i = 0; i < FIELD_COUNT; i++) { ByteIterator iter = result.get("field" + i); byte[] byteArray1 = iter.toArray(); String value = new String(byteArray1); assertEquals(value, "value" + i); } } @Test public void testUpdate() { HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); HashMap<String, ByteIterator> values = new HashMap<String, ByteIterator>(); insertToDatabase(); String keyForUpdate = "field2"; Set<String> fields = Collections.singleton(keyForUpdate); String strValueToUpdate = "new_value_2"; ByteIterator valForUpdate = new StringByteIterator(strValueToUpdate); values.put(keyForUpdate, valForUpdate); Status updateStatus = myClient.update(TEST_TABLE, DEFAULT_ROW_KEY, values); assertEquals(updateStatus, Status.OK); // After update, we read the update row for get new value myClient.read(TEST_TABLE, DEFAULT_ROW_KEY, fields, result); assertNotEquals(result.entrySet(), 0); boolean found = false; for (int i = 0; i < FIELD_COUNT; i++) { ByteIterator iter = result.get("field" + i); byte[] byteArray1 = iter.toArray(); String value = new String(byteArray1); // check result has row value is new update value or not if (value.equals(strValueToUpdate)) { found = true; } } assertEquals(found, true); } @Test public void testInsert() { HashMap<String, ByteIterator> values = new HashMap<String, ByteIterator>(); HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); // The number of field in container info is 10 for (int i = 0; i < FIELD_COUNT; i++) { values.put("field" + i, new StringByteIterator("value" + i)); } Status insertStatus = myClient.insert(TEST_TABLE, DEFAULT_ROW_KEY, values); assertEquals(insertStatus, Status.OK); myClient.read(TEST_TABLE, DEFAULT_ROW_KEY, null, result); assertEquals(result.size(), FIELD_COUNT); for (int i = 0; i < FIELD_COUNT; i++) { ByteIterator iter = result.get("field" + i); byte[] byteArray1 = iter.toArray(); String value = new String(byteArray1); assertEquals(value, "value" + i); } } @Test public void testDelete() { HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); insertToDatabase(); Status deleteStatus = myClient.delete(TEST_TABLE, DEFAULT_ROW_KEY); assertEquals(deleteStatus, Status.OK); Status readStatus = myClient.read(TEST_TABLE, DEFAULT_ROW_KEY, null, result); assertEquals(readStatus, Status.ERROR); assertEquals(result.size(), 0); } @Test public void testCombination() { final int LOOP_COUNT = 3; for (int i = 0; i < LOOP_COUNT; i++) { testReadNoExistedRow(); testReadSingleRow(); testReadAll(); testInsert(); testUpdate(); testDelete(); } } /** * Stops the test client. */ @After public void tearDown() { try { myClient.cleanup(); store.dropContainer(TEST_TABLE); } catch (Exception error) { // Ignore. } finally { myClient = null; } } /** * Gets the test DB. * * @param props * Properties to pass to the client. * @return The test DB. */ protected DB getDB(Properties props) { if( myClient == null ) { myClient = new GridDBClient(); myClient.setProperties(props); try { myClient.init(); } catch (Exception error) { assumeNoException(error); } } return myClient; } }
11,184
32.791541
96
java
null
NearPMSW-main/baseline/logging/YCSB/griddb/src/main/java/site/ycsb/db/griddb/package-info.java
/* * Copyright (c) 2018 TOSHIBA Digital Solutions Corporation. * Copyright (c) 2018 YCSB contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for <a href="https://griddb.net/">GridDB</a>. */ package site.ycsb.db.griddb;
801
33.869565
70
java
null
NearPMSW-main/baseline/logging/YCSB/griddb/src/main/java/site/ycsb/db/griddb/GridDBClient.java
/** * Copyright (c) 2018 TOSHIBA Digital Solutions Corporation. * Copyright (c) 2018 YCSB contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.griddb; import java.util.ArrayList; import java.util.Comparator; import java.util.HashMap; import java.util.Map; import java.util.List; import java.util.Properties; import java.util.Set; import java.util.Vector; import java.util.logging.Logger; import com.toshiba.mwcloud.gs.ColumnInfo; import com.toshiba.mwcloud.gs.Container; import com.toshiba.mwcloud.gs.ContainerInfo; import com.toshiba.mwcloud.gs.ContainerType; import com.toshiba.mwcloud.gs.GSException; import com.toshiba.mwcloud.gs.GSType; import com.toshiba.mwcloud.gs.GridStore; import com.toshiba.mwcloud.gs.GridStoreFactory; import com.toshiba.mwcloud.gs.PartitionController; import com.toshiba.mwcloud.gs.Row; import site.ycsb.ByteArrayByteIterator; import site.ycsb.ByteIterator; import site.ycsb.DBException; import site.ycsb.Status; /** * A class representing GridDBClient. * */ public class GridDBClient extends site.ycsb.DB { //(A)multicast method private String notificationAddress = ""; // "239.0.0.1" private String notificationPort = ""; // "31999" //(B)fixed list method private String notificationMember = ""; // "10.0.0.12:10001,10.0.0.13:10001,10.0.0.14:10001" private String userName = ""; // private String password = ""; // private String clusterName = ""; // "ycsbCluster" public static final String VALUE_COLUMN_NAME_PREFIX= "field"; public static final int ROW_KEY_COLUMN_POS = 0; private String containerPrefix = ""; public static final int DEFAULT_CACHE_CONTAINER_NUM = 1000; public static final int FIELD_NUM = 10; private int numContainer = 0; // Sets PartitionNum public static final GSType SCHEMA_TYPE = GSType.STRING; private GridStore store; private ContainerInfo containerInfo = null; private static final Logger LOGGER = Logger.getLogger(GridDBClient.class.getName()); class RowComparator implements Comparator<Row> { public int compare(Row row1, Row row2) throws NullPointerException { int result = 0; try { Object val1 = row1.getValue(0); Object val2 = row2.getValue(0); result = ((String)val1).compareTo((String)val2); } catch (GSException e) { LOGGER.severe("There is a exception: " + e.getMessage()); throw new NullPointerException(); } return result; } } public void init() throws DBException { LOGGER.info("GridDBClient"); final Properties props = getProperties(); notificationAddress = props.getProperty("notificationAddress"); notificationPort = props.getProperty("notificationPort"); notificationMember = props.getProperty("notificationMember"); clusterName = props.getProperty("clusterName"); userName = props.getProperty("userName"); password = props.getProperty("password"); containerPrefix = props.getProperty("table", "usertable") + "@"; String fieldcount = props.getProperty("fieldcount"); String fieldlength = props.getProperty("fieldlength"); LOGGER.info("notificationAddress=" + notificationAddress + " notificationPort=" + notificationPort + " notificationMember=" + notificationMember); LOGGER.info("clusterName=" + clusterName + " userName=" + userName); LOGGER.info("fieldcount=" + fieldcount + " fieldlength=" + fieldlength); final Properties gridstoreProp = new Properties(); if (clusterName == null || userName == null || password == null) { LOGGER.severe("[ERROR] clusterName or userName or password argument not specified"); throw new DBException(); } if (fieldcount == null || fieldlength == null) { LOGGER.severe("[ERROR] fieldcount or fieldlength argument not specified"); throw new DBException(); } else { if (!fieldcount.equals(String.valueOf(FIELD_NUM)) || !fieldlength.equals("100")) { LOGGER.severe("[ERROR] Invalid argment: fieldcount or fieldlength"); throw new DBException(); } } if (notificationAddress != null) { if (notificationPort == null) { LOGGER.severe("[ERROR] notificationPort argument not specified"); throw new DBException(); } //(A)multicast method gridstoreProp.setProperty("notificationAddress", notificationAddress); gridstoreProp.setProperty("notificationPort", notificationPort); } else if (notificationMember != null) { //(B)fixed list method gridstoreProp.setProperty("notificationMember", notificationMember); } else { LOGGER.severe("[ERROR] notificationAddress and notificationMember argument not specified"); throw new DBException(); } gridstoreProp.setProperty("clusterName", clusterName); gridstoreProp.setProperty("user", userName); gridstoreProp.setProperty("password", password); gridstoreProp.setProperty("containerCacheSize", String.valueOf(DEFAULT_CACHE_CONTAINER_NUM)); List<ColumnInfo> columnInfoList = new ArrayList<ColumnInfo>(); ColumnInfo keyInfo = new ColumnInfo("key", SCHEMA_TYPE); columnInfoList.add(keyInfo); for (int i = 0; i < FIELD_NUM; i++) { String columnName = String.format(VALUE_COLUMN_NAME_PREFIX + "%d", i); ColumnInfo info = new ColumnInfo(columnName, SCHEMA_TYPE); columnInfoList.add(info); } containerInfo = new ContainerInfo(null, ContainerType.COLLECTION, columnInfoList, true); try { GridStoreFactory.getInstance().setProperties(gridstoreProp); store = GridStoreFactory.getInstance().getGridStore(gridstoreProp); PartitionController controller = store.getPartitionController(); numContainer = controller.getPartitionCount(); for(int k = 0; k < numContainer; k++) { String name = containerPrefix + k; store.putContainer(name, containerInfo, false); } } catch (GSException e) { LOGGER.severe("Exception: " + e.getMessage()); throw new DBException(); } LOGGER.info("numContainer=" + numContainer + " containerCasheSize=" + String.valueOf(DEFAULT_CACHE_CONTAINER_NUM)); } public void cleanup() throws DBException { try { store.close(); } catch (GSException e) { LOGGER.severe("Exception when close." + e.getMessage()); throw new DBException(); } } public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { Object rowKey = makeRowKey(key); String containerKey = makeContainerKey(key); final Container<Object, Row> container = store.getContainer(containerKey); if(container == null) { LOGGER.severe("[ERROR]getCollection " + containerKey + " in read()"); return Status.ERROR; } Row targetRow = container.get(rowKey); if (targetRow == null) { LOGGER.severe("[ERROR]get(rowKey) in read()"); return Status.ERROR; } for (int i = 1; i < containerInfo.getColumnCount(); i++) { result.put(containerInfo.getColumnInfo(i).getName(), new ByteArrayByteIterator(targetRow.getValue(i).toString().getBytes())); } return Status.OK; } catch (GSException e) { LOGGER.severe("Exception: " + e.getMessage()); return Status.ERROR; } } public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { LOGGER.severe("[ERROR]scan() not supported"); return Status.ERROR; } public Status update(String table, String key, Map<String, ByteIterator> values) { try { Object rowKey = makeRowKey(key); String containerKey = makeContainerKey(key); final Container<Object, Row> container = store.getContainer(containerKey); if(container == null) { LOGGER.severe("[ERROR]getCollection " + containerKey + " in update()"); return Status.ERROR; } Row targetRow = container.get(rowKey); if (targetRow == null) { LOGGER.severe("[ERROR]get(rowKey) in update()"); return Status.ERROR; } int setCount = 0; for (int i = 1; i < containerInfo.getColumnCount() && setCount < values.size(); i++) { containerInfo.getColumnInfo(i).getName(); ByteIterator byteIterator = values.get(containerInfo.getColumnInfo(i).getName()); if (byteIterator != null) { Object value = makeValue(byteIterator); targetRow.setValue(i, value); setCount++; } } if (setCount != values.size()) { LOGGER.severe("Error setCount = " + setCount); return Status.ERROR; } container.put(targetRow); return Status.OK; } catch (GSException e) { LOGGER.severe("Exception: " + e.getMessage()); return Status.ERROR; } } public Status insert(String table, String key, Map<String, ByteIterator> values) { try { Object rowKey = makeRowKey(key); String containerKey = makeContainerKey(key); final Container<Object, Row> container = store.getContainer(containerKey); if(container == null) { LOGGER.severe("[ERROR]getCollection " + containerKey + " in insert()"); } Row row = container.createRow(); row.setValue(ROW_KEY_COLUMN_POS, rowKey); for (int i = 1; i < containerInfo.getColumnCount(); i++) { ByteIterator byteIterator = values.get(containerInfo.getColumnInfo(i).getName()); Object value = makeValue(byteIterator); row.setValue(i, value); } container.put(row); } catch (GSException e) { LOGGER.severe("Exception: " + e.getMessage()); return Status.ERROR; } return Status.OK; } public Status delete(String table, String key) { try { Object rowKey = makeRowKey(key); String containerKey = makeContainerKey(key); final Container<Object, Row> container = store.getContainer(containerKey); if(container == null) { LOGGER.severe("[ERROR]getCollection " + containerKey + " in read()"); return Status.ERROR; } boolean isDelete = container.remove(rowKey); if (!isDelete) { LOGGER.severe("[ERROR]remove(rowKey) in remove()"); return Status.ERROR; } }catch (GSException e) { LOGGER.severe("Exception: " + e.getMessage()); return Status.ERROR; } return Status.OK; } protected String makeContainerKey(String key) { return containerPrefix + Math.abs(key.hashCode() % numContainer); } protected Object makeRowKey(String key) { return key; } protected Object makeValue(ByteIterator byteIterator) { return byteIterator.toString(); } protected Object makeQueryLiteral(Object value) { return "'" + value.toString() + "'"; } }
11,403
33.143713
104
java
null
NearPMSW-main/baseline/logging/YCSB/maprjsondb/src/main/java/site/ycsb/db/mapr/package-info.java
/* * Copyright (c) 2017, Yahoo!, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for <a href="http://mapr.com/maprjsondb/">MapR JSON DB</a>. */ package site.ycsb.db.mapr;
769
32.478261
79
java
null
NearPMSW-main/baseline/logging/YCSB/maprjsondb/src/main/java/site/ycsb/db/mapr/ValueByteIterator.java
/* * Copyright (c) 2017, Yahoo!, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.mapr; import org.ojai.Value; import org.ojai.util.Values; import site.ycsb.ByteIterator; /** * OJAI Value byte iterator. * * Used for parsing the document fetched MapR JSON DB */ public class ValueByteIterator extends ByteIterator { private Value value; public ValueByteIterator(Value value) { this.value = value; } @Override public boolean hasNext() { return false; } @Override public byte nextByte() { return 0; } @Override public long bytesLeft() { return 0; } @Override public String toString() { return Values.asJsonString(value); } }
1,279
20.694915
70
java
null
NearPMSW-main/baseline/logging/YCSB/maprjsondb/src/main/java/site/ycsb/db/mapr/MapRJSONDBClient.java
/** * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.mapr; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.Vector; import org.ojai.Document; import org.ojai.DocumentConstants; import org.ojai.DocumentStream; import org.ojai.Value; import org.ojai.store.Connection; import org.ojai.store.DocumentMutation; import org.ojai.store.DocumentStore; import org.ojai.store.Driver; import org.ojai.store.DriverManager; import org.ojai.store.Query; import org.ojai.store.QueryCondition; import org.ojai.store.QueryCondition.Op; import site.ycsb.ByteIterator; import site.ycsb.Status; /** * MapR-DB(json) client for YCSB framework. * */ public class MapRJSONDBClient extends site.ycsb.DB { private Connection connection = null; private DocumentStore documentStore = null; private Driver driver = null; @Override public void init() { connection = DriverManager.getConnection("ojai:mapr:"); driver = connection.getDriver(); } @Override public void cleanup() { documentStore.close(); connection.close(); } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { DocumentStore docStore = getTable(table); Document doc = docStore.findById(key, getFieldPaths(fields)); buildRowResult(doc, result); return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { DocumentStore docStore = getTable(table); QueryCondition condition = driver.newCondition() .is(DocumentConstants.ID_FIELD, Op.GREATER_OR_EQUAL, startkey) .build(); Query query = driver.newQuery() .select(getFieldPaths(fields)) .where(condition) .build(); try (DocumentStream stream = docStore.findQuery(query)) { int numResults = 0; for (Document record : stream) { result.add(buildRowResult(record)); numResults++; if (numResults >= recordcount) { break; } } } return Status.OK; } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { DocumentStore docStore = getTable(table); docStore.update(key, newMutation(values)); return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { DocumentStore docStore = getTable(table); docStore.insertOrReplace(key, newDocument(values)); return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status delete(String table, String key) { try { DocumentStore docStore = getTable(table); docStore.delete(key); return Status.OK; } catch (Exception e) { return Status.ERROR; } } /** * Get the OJAI DocumentStore instance for a given table. * * @param tableName * @return */ private DocumentStore getTable(String tableName) { if (documentStore == null) { documentStore = connection.getStore(tableName); } return documentStore; } /** * Construct a Document object from the map of OJAI values. * * @param values * @return */ private Document newDocument(Map<String, ByteIterator> values) { Document document = driver.newDocument(); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { document.set(entry.getKey(), entry.getValue().toArray()); } return document; } /** * Build a DocumentMutation object for the values specified. * @param values * @return */ private DocumentMutation newMutation(Map<String, ByteIterator> values) { DocumentMutation mutation = driver.newMutation(); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutation.setOrReplace(entry.getKey(), ByteBuffer.wrap(entry.getValue().toArray())); } return mutation; } /** * Get field path array from the set. * * @param fields * @return */ private String[] getFieldPaths(Set<String> fields) { if (fields != null) { return fields.toArray(new String[fields.size()]); } return new String[0]; } /** * Build result the map from the Document passed. * * @param document * @return */ private HashMap<String, ByteIterator> buildRowResult(Document document) { return buildRowResult(document, null); } /** * Build result the map from the Document passed. * * @param document * @param result * @return */ private HashMap<String, ByteIterator> buildRowResult(Document document, Map<String, ByteIterator> result) { if (document != null) { if (result == null) { result = new HashMap<String, ByteIterator>(); } for (Map.Entry<String, Value> kv : document) { result.put(kv.getKey(), new ValueByteIterator(kv.getValue())); } } return (HashMap<String, ByteIterator>)result; } }
5,972
25.546667
75
java
null
NearPMSW-main/baseline/logging/YCSB/rados/src/test/java/site/ycsb/db/RadosClientTest.java
/** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; import static org.junit.Assert.assertEquals; import static org.junit.Assume.assumeNoException; import site.ycsb.ByteIterator; import site.ycsb.DBException; import site.ycsb.Status; import site.ycsb.StringByteIterator; import org.junit.AfterClass; import org.junit.After; import org.junit.BeforeClass; import org.junit.Before; import org.junit.Test; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.UUID; /** * Test for the binding of <a href="http://ceph.org/">RADOS of Ceph</a>. * * See {@code rados/README.md} for details. */ public class RadosClientTest { private static RadosClient radosclient; public static final String POOL_PROPERTY = "rados.pool"; public static final String POOL_TEST = "rbd"; private static final String TABLE_NAME = "table0"; private static final String KEY0 = "key0"; private static final String KEY1 = "key1"; private static final String KEY2 = "key2"; private static final HashMap<String, ByteIterator> DATA; private static final HashMap<String, ByteIterator> DATA_UPDATED; static { DATA = new HashMap<String, ByteIterator>(10); DATA_UPDATED = new HashMap<String, ByteIterator>(10); for (int i = 0; i < 10; i++) { String key = "key" + UUID.randomUUID(); DATA.put(key, new StringByteIterator("data" + UUID.randomUUID())); DATA_UPDATED.put(key, new StringByteIterator("data" + UUID.randomUUID())); } } @BeforeClass public static void setupClass() throws DBException { radosclient = new RadosClient(); Properties p = new Properties(); p.setProperty(POOL_PROPERTY, POOL_TEST); try { radosclient.setProperties(p); radosclient.init(); } catch (DBException|UnsatisfiedLinkError e) { assumeNoException("Ceph cluster is not running. Skipping tests.", e); } } @AfterClass public static void teardownClass() throws DBException { if (radosclient != null) { radosclient.cleanup(); } } @Before public void setUp() { radosclient.insert(TABLE_NAME, KEY0, DATA); } @After public void tearDown() { radosclient.delete(TABLE_NAME, KEY0); } @Test public void insertTest() { Status result = radosclient.insert(TABLE_NAME, KEY1, DATA); assertEquals(Status.OK, result); } @Test public void updateTest() { radosclient.insert(TABLE_NAME, KEY2, DATA); Status result = radosclient.update(TABLE_NAME, KEY2, DATA_UPDATED); assertEquals(Status.OK, result); HashMap<String, ByteIterator> ret = new HashMap<String, ByteIterator>(10); radosclient.read(TABLE_NAME, KEY2, DATA.keySet(), ret); compareMap(DATA_UPDATED, ret); radosclient.delete(TABLE_NAME, KEY2); } @Test public void readTest() { HashMap<String, ByteIterator> ret = new HashMap<String, ByteIterator>(10); Status result = radosclient.read(TABLE_NAME, KEY0, DATA.keySet(), ret); assertEquals(Status.OK, result); compareMap(DATA, ret); } private void compareMap(HashMap<String, ByteIterator> src, HashMap<String, ByteIterator> dest) { assertEquals(src.size(), dest.size()); Set setSrc = src.entrySet(); Iterator<Map.Entry> itSrc = setSrc.iterator(); for (int i = 0; i < 10; i++) { Map.Entry<String, ByteIterator> entrySrc = itSrc.next(); assertEquals(entrySrc.getValue().toString(), dest.get(entrySrc.getKey()).toString()); } } @Test public void deleteTest() { Status result = radosclient.delete(TABLE_NAME, KEY0); assertEquals(Status.OK, result); } }
4,287
27.397351
98
java
null
NearPMSW-main/baseline/logging/YCSB/rados/src/main/java/site/ycsb/db/package-info.java
/** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * YCSB binding for RADOS of Ceph. */ package site.ycsb.db;
724
31.954545
70
java
null
NearPMSW-main/baseline/logging/YCSB/rados/src/main/java/site/ycsb/db/RadosClient.java
/** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db; import com.ceph.rados.Rados; import com.ceph.rados.IoCTX; import com.ceph.rados.jna.RadosObjectInfo; import com.ceph.rados.ReadOp; import com.ceph.rados.ReadOp.ReadResult; import com.ceph.rados.exceptions.RadosException; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import site.ycsb.StringByteIterator; import java.io.File; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.Vector; import org.json.JSONObject; /** * YCSB binding for <a href="http://ceph.org/">RADOS of Ceph</a>. * * See {@code rados/README.md} for details. */ public class RadosClient extends DB { private Rados rados; private IoCTX ioctx; public static final String CONFIG_FILE_PROPERTY = "rados.configfile"; public static final String CONFIG_FILE_DEFAULT = "/etc/ceph/ceph.conf"; public static final String ID_PROPERTY = "rados.id"; public static final String ID_DEFAULT = "admin"; public static final String POOL_PROPERTY = "rados.pool"; public static final String POOL_DEFAULT = "data"; private boolean isInited = false; public void init() throws DBException { Properties props = getProperties(); String configfile = props.getProperty(CONFIG_FILE_PROPERTY); if (configfile == null) { configfile = CONFIG_FILE_DEFAULT; } String id = props.getProperty(ID_PROPERTY); if (id == null) { id = ID_DEFAULT; } String pool = props.getProperty(POOL_PROPERTY); if (pool == null) { pool = POOL_DEFAULT; } // try { // } catch (UnsatisfiedLinkError e) { // throw new DBException("RADOS library is not loaded."); // } rados = new Rados(id); try { rados.confReadFile(new File(configfile)); rados.connect(); ioctx = rados.ioCtxCreate(pool); } catch (RadosException e) { throw new DBException(e.getMessage() + ": " + e.getReturnValue()); } isInited = true; } public void cleanup() throws DBException { if (isInited) { rados.shutDown(); rados.ioCtxDestroy(ioctx); isInited = false; } } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { byte[] buffer; try { RadosObjectInfo info = ioctx.stat(key); buffer = new byte[(int)info.getSize()]; ReadOp rop = ioctx.readOpCreate(); ReadResult readResult = rop.queueRead(0, info.getSize()); // TODO: more size than byte length possible; // rop.operate(key, Rados.OPERATION_NOFLAG); // for rados-java 0.3.0 rop.operate(key, 0); // readResult.raiseExceptionOnError("Error ReadOP(%d)", readResult.getRVal()); // for rados-java 0.3.0 if (readResult.getRVal() < 0) { throw new RadosException("Error ReadOP", readResult.getRVal()); } if (info.getSize() != readResult.getBytesRead()) { return new Status("ERROR", "Error the object size read"); } readResult.getBuffer().get(buffer); } catch (RadosException e) { return new Status("ERROR-" + e.getReturnValue(), e.getMessage()); } JSONObject json = new JSONObject(new String(buffer, java.nio.charset.StandardCharsets.UTF_8)); Set<String> fieldsToReturn = (fields == null ? json.keySet() : fields); for (String name : fieldsToReturn) { result.put(name, new StringByteIterator(json.getString(name))); } return result.isEmpty() ? Status.ERROR : Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { JSONObject json = new JSONObject(); for (final Entry<String, ByteIterator> e : values.entrySet()) { json.put(e.getKey(), e.getValue().toString()); } try { ioctx.write(key, json.toString()); } catch (RadosException e) { return new Status("ERROR-" + e.getReturnValue(), e.getMessage()); } return Status.OK; } @Override public Status delete(String table, String key) { try { ioctx.remove(key); } catch (RadosException e) { return new Status("ERROR-" + e.getReturnValue(), e.getMessage()); } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { Status rtn = delete(table, key); if (rtn.equals(Status.OK)) { return insert(table, key, values); } return rtn; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { return Status.NOT_IMPLEMENTED; } }
5,352
28.738889
108
java
null
NearPMSW-main/baseline/logging/YCSB/s3/src/main/java/site/ycsb/db/package-info.java
/** * Copyright (c) 2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. * * S3 storage client binding for YCSB. */ package site.ycsb.db;
724
31.954545
70
java
null
NearPMSW-main/baseline/logging/YCSB/s3/src/main/java/site/ycsb/db/S3Client.java
/** * Copyright (c) 2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. * * S3 storage client binding for YCSB. */ package site.ycsb.db; import java.util.HashMap; import java.util.Properties; import java.util.Set; import java.util.Vector; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.util.*; import java.util.concurrent.atomic.AtomicInteger; import java.net.*; import com.amazonaws.util.IOUtils; import site.ycsb.ByteArrayByteIterator; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.*; import com.amazonaws.auth.*; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.ClientConfiguration; import com.amazonaws.regions.Region; import com.amazonaws.regions.Regions; import com.amazonaws.Protocol; import com.amazonaws.services.s3.model.DeleteObjectRequest; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.SSECustomerKey; import com.amazonaws.services.s3.model.PutObjectRequest; /** * S3 Storage client for YCSB framework. * * Properties to set: * * s3.accessKeyId=access key S3 aws * s3.secretKey=secret key S3 aws * s3.endPoint=s3.amazonaws.com * s3.region=us-east-1 * The parameter table is the name of the Bucket where to upload the files. * This must be created before to start the benchmark * The size of the file to upload is determined by two parameters: * - fieldcount this is the number of fields of a record in YCSB * - fieldlength this is the size in bytes of a single field in the record * together these two parameters define the size of the file to upload, * the size in bytes is given by the fieldlength multiplied by the fieldcount. * The name of the file is determined by the parameter key. *This key is automatically generated by YCSB. * */ public class S3Client extends DB { private static AmazonS3Client s3Client; private static String sse; private static SSECustomerKey ssecKey; private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** * Cleanup any state for this storage. * Called once per S3 instance; */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { try { s3Client.shutdown(); System.out.println("The client is shutdown successfully"); } catch (Exception e){ System.err.println("Could not shutdown the S3Client: "+e.toString()); e.printStackTrace(); } finally { if (s3Client != null){ s3Client = null; } } } } /** * Delete a file from S3 Storage. * * @param bucket * The name of the bucket * @param key * The record key of the file to delete. * @return OK on success, otherwise ERROR. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String bucket, String key) { try { s3Client.deleteObject(new DeleteObjectRequest(bucket, key)); } catch (Exception e){ System.err.println("Not possible to delete the key "+key); e.printStackTrace(); return Status.ERROR; } return Status.OK; } /** * Initialize any state for the storage. * Called once per S3 instance; If the client is not null it is re-used. */ @Override public void init() throws DBException { final int count = INIT_COUNT.incrementAndGet(); synchronized (S3Client.class){ Properties propsCL = getProperties(); int recordcount = Integer.parseInt( propsCL.getProperty("recordcount")); int operationcount = Integer.parseInt( propsCL.getProperty("operationcount")); int numberOfOperations = 0; if (recordcount > 0){ if (recordcount > operationcount){ numberOfOperations = recordcount; } else { numberOfOperations = operationcount; } } else { numberOfOperations = operationcount; } if (count <= numberOfOperations) { String accessKeyId = null; String secretKey = null; String endPoint = null; String region = null; String maxErrorRetry = null; String maxConnections = null; String protocol = null; BasicAWSCredentials s3Credentials; ClientConfiguration clientConfig; if (s3Client != null) { System.out.println("Reusing the same client"); return; } try { InputStream propFile = S3Client.class.getClassLoader() .getResourceAsStream("s3.properties"); Properties props = new Properties(System.getProperties()); props.load(propFile); accessKeyId = props.getProperty("s3.accessKeyId"); if (accessKeyId == null){ accessKeyId = propsCL.getProperty("s3.accessKeyId"); } System.out.println(accessKeyId); secretKey = props.getProperty("s3.secretKey"); if (secretKey == null){ secretKey = propsCL.getProperty("s3.secretKey"); } System.out.println(secretKey); endPoint = props.getProperty("s3.endPoint"); if (endPoint == null){ endPoint = propsCL.getProperty("s3.endPoint", "s3.amazonaws.com"); } System.out.println(endPoint); region = props.getProperty("s3.region"); if (region == null){ region = propsCL.getProperty("s3.region", "us-east-1"); } System.out.println(region); maxErrorRetry = props.getProperty("s3.maxErrorRetry"); if (maxErrorRetry == null){ maxErrorRetry = propsCL.getProperty("s3.maxErrorRetry", "15"); } maxConnections = props.getProperty("s3.maxConnections"); if (maxConnections == null){ maxConnections = propsCL.getProperty("s3.maxConnections"); } protocol = props.getProperty("s3.protocol"); if (protocol == null){ protocol = propsCL.getProperty("s3.protocol", "HTTPS"); } sse = props.getProperty("s3.sse"); if (sse == null){ sse = propsCL.getProperty("s3.sse", "false"); } String ssec = props.getProperty("s3.ssec"); if (ssec == null){ ssec = propsCL.getProperty("s3.ssec", null); } else { ssecKey = new SSECustomerKey(ssec); } } catch (Exception e){ System.err.println("The file properties doesn't exist "+e.toString()); e.printStackTrace(); } try { System.out.println("Inizializing the S3 connection"); s3Credentials = new BasicAWSCredentials(accessKeyId, secretKey); clientConfig = new ClientConfiguration(); clientConfig.setMaxErrorRetry(Integer.parseInt(maxErrorRetry)); if(protocol.equals("HTTP")) { clientConfig.setProtocol(Protocol.HTTP); } else { clientConfig.setProtocol(Protocol.HTTPS); } if(maxConnections != null) { clientConfig.setMaxConnections(Integer.parseInt(maxConnections)); } s3Client = new AmazonS3Client(s3Credentials, clientConfig); s3Client.setRegion(Region.getRegion(Regions.fromName(region))); s3Client.setEndpoint(endPoint); System.out.println("Connection successfully initialized"); } catch (Exception e){ System.err.println("Could not connect to S3 storage: "+ e.toString()); e.printStackTrace(); throw new DBException(e); } } else { System.err.println( "The number of threads must be less or equal than the operations"); throw new DBException(new Error( "The number of threads must be less or equal than the operations")); } } } /** * Create a new File in the Bucket. Any field/value pairs in the specified * values HashMap will be written into the file with the specified record * key. * * @param bucket * The name of the bucket * @param key * The record key of the file to insert. * @param values * A HashMap of field/value pairs to insert in the file. * Only the content of the first field is written to a byteArray * multiplied by the number of field. In this way the size * of the file to upload is determined by the fieldlength * and fieldcount parameters. * @return OK on success, ERROR otherwise. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String bucket, String key, Map<String, ByteIterator> values) { return writeToStorage(bucket, key, values, true, sse, ssecKey); } /** * Read a file from the Bucket. Each field/value pair from the result * will be stored in a HashMap. * * @param bucket * The name of the bucket * @param key * The record key of the file to read. * @param fields * The list of fields to read, or null for all of them, * it is null by default * @param result * A HashMap of field/value pairs for the result * @return OK on success, ERROR otherwise. */ @Override public Status read(String bucket, String key, Set<String> fields, Map<String, ByteIterator> result) { return readFromStorage(bucket, key, result, ssecKey); } /** * Update a file in the database. Any field/value pairs in the specified * values HashMap will be written into the file with the specified file * key, overwriting any existing values with the same field name. * * @param bucket * The name of the bucket * @param key * The file key of the file to write. * @param values * A HashMap of field/value pairs to update in the record * @return OK on success, ERORR otherwise. */ @Override public Status update(String bucket, String key, Map<String, ByteIterator> values) { return writeToStorage(bucket, key, values, false, sse, ssecKey); } /** * Perform a range scan for a set of files in the bucket. Each * field/value pair from the result will be stored in a HashMap. * * @param bucket * The name of the bucket * @param startkey * The file key of the first file to read. * @param recordcount * The number of files to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one file * @return OK on success, ERROR otherwise. */ @Override public Status scan(String bucket, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { return scanFromStorage(bucket, startkey, recordcount, result, ssecKey); } /** * Upload a new object to S3 or update an object on S3. * * @param bucket * The name of the bucket * @param key * The file key of the object to upload/update. * @param values * The data to be written on the object * @param updateMarker * A boolean value. If true a new object will be uploaded * to S3. If false an existing object will be re-uploaded * */ protected Status writeToStorage(String bucket, String key, Map<String, ByteIterator> values, Boolean updateMarker, String sseLocal, SSECustomerKey ssecLocal) { int totalSize = 0; int fieldCount = values.size(); //number of fields to concatenate // getting the first field in the values Object keyToSearch = values.keySet().toArray()[0]; // getting the content of just one field byte[] sourceArray = values.get(keyToSearch).toArray(); int sizeArray = sourceArray.length; //size of each array if (updateMarker){ totalSize = sizeArray*fieldCount; } else { try { S3Object object = getS3ObjectAndMetadata(bucket, key, ssecLocal); int sizeOfFile = (int)object.getObjectMetadata().getContentLength(); fieldCount = sizeOfFile/sizeArray; totalSize = sizeOfFile; object.close(); } catch (Exception e){ System.err.println("Not possible to get the object :"+key); e.printStackTrace(); return Status.ERROR; } } byte[] destinationArray = new byte[totalSize]; int offset = 0; for (int i = 0; i < fieldCount; i++) { System.arraycopy(sourceArray, 0, destinationArray, offset, sizeArray); offset += sizeArray; } try (InputStream input = new ByteArrayInputStream(destinationArray)) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(totalSize); PutObjectRequest putObjectRequest = null; if (sseLocal.equals("true")) { metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); putObjectRequest = new PutObjectRequest(bucket, key, input, metadata); } else if (ssecLocal != null) { putObjectRequest = new PutObjectRequest(bucket, key, input, metadata).withSSECustomerKey(ssecLocal); } else { putObjectRequest = new PutObjectRequest(bucket, key, input, metadata); } try { PutObjectResult res = s3Client.putObject(putObjectRequest); if(res.getETag() == null) { return Status.ERROR; } else { if (sseLocal.equals("true")) { System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm()); } else if (ssecLocal != null) { System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm()); } } } catch (Exception e) { System.err.println("Not possible to write object :"+key); e.printStackTrace(); return Status.ERROR; } } catch (Exception e) { System.err.println("Error in the creation of the stream :"+e.toString()); e.printStackTrace(); return Status.ERROR; } return Status.OK; } /** * Download an object from S3. * * @param bucket * The name of the bucket * @param key * The file key of the object to upload/update. * @param result * The Hash map where data from the object are written * */ protected Status readFromStorage(String bucket, String key, Map<String, ByteIterator> result, SSECustomerKey ssecLocal) { try { S3Object object = getS3ObjectAndMetadata(bucket, key, ssecLocal); InputStream objectData = object.getObjectContent(); //consuming the stream // writing the stream to bytes and to results result.put(key, new ByteArrayByteIterator(IOUtils.toByteArray(objectData))); objectData.close(); object.close(); } catch (Exception e){ System.err.println("Not possible to get the object "+key); e.printStackTrace(); return Status.ERROR; } return Status.OK; } private S3Object getS3ObjectAndMetadata(String bucket, String key, SSECustomerKey ssecLocal) { GetObjectRequest getObjectRequest; if (ssecLocal != null) { getObjectRequest = new GetObjectRequest(bucket, key).withSSECustomerKey(ssecLocal); } else { getObjectRequest = new GetObjectRequest(bucket, key); } return s3Client.getObject(getObjectRequest); } /** * Perform an emulation of a database scan operation on a S3 bucket. * * @param bucket * The name of the bucket * @param startkey * The file key of the first file to read. * @param recordcount * The number of files to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one file * */ protected Status scanFromStorage(String bucket, String startkey, int recordcount, Vector<HashMap<String, ByteIterator>> result, SSECustomerKey ssecLocal) { int counter = 0; ObjectListing listing = s3Client.listObjects(bucket); List<S3ObjectSummary> summaries = listing.getObjectSummaries(); List<String> keyList = new ArrayList(); int startkeyNumber = 0; int numberOfIteration = 0; // getting the list of files in the bucket while (listing.isTruncated()) { listing = s3Client.listNextBatchOfObjects(listing); summaries.addAll(listing.getObjectSummaries()); } for (S3ObjectSummary summary : summaries) { String summaryKey = summary.getKey(); keyList.add(summaryKey); } // Sorting the list of files in Alphabetical order Collections.sort(keyList); // sorting the list // Getting the position of the startingfile for the scan for (String key : keyList) { if (key.equals(startkey)){ startkeyNumber = counter; } else { counter = counter + 1; } } // Checking if the total number of file is bigger than the file to read, // if not using the total number of Files if (recordcount < keyList.size()) { numberOfIteration = recordcount; } else { numberOfIteration = keyList.size(); } // Reading the Files starting from the startkey File till the end // of the Files or Till the recordcount number for (int i = startkeyNumber; i < numberOfIteration; i++){ HashMap<String, ByteIterator> resultTemp = new HashMap<String, ByteIterator>(); readFromStorage(bucket, keyList.get(i), resultTemp, ssecLocal); result.add(resultTemp); } return Status.OK; } }
18,959
35.531792
108
java
null
NearPMSW-main/baseline/logging/YCSB/accumulo1.9/src/test/java/site/ycsb/db/accumulo/AccumuloTest.java
/* * Copyright (c) 2016 YCSB contributors. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.accumulo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; import java.util.Map.Entry; import java.util.Properties; import site.ycsb.Workload; import site.ycsb.DB; import site.ycsb.measurements.Measurements; import site.ycsb.workloads.CoreWorkload; import org.apache.accumulo.core.client.Connector; import org.apache.accumulo.core.client.Scanner; import org.apache.accumulo.core.client.security.tokens.PasswordToken; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.security.TablePermission; import org.apache.accumulo.minicluster.MiniAccumuloCluster; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Use an Accumulo MiniCluster to test out basic workload operations with * the Accumulo binding. */ public class AccumuloTest { private static final Logger LOG = LoggerFactory.getLogger(AccumuloTest.class); private static final int INSERT_COUNT = 2000; private static final int TRANSACTION_COUNT = 2000; @ClassRule public static TemporaryFolder workingDir = new TemporaryFolder(); @Rule public TestName test = new TestName(); private static MiniAccumuloCluster cluster; private static Properties properties; private Workload workload; private DB client; private Properties workloadProps; private static boolean isWindows() { final String os = System.getProperty("os.name"); return os.startsWith("Windows"); } @BeforeClass public static void setup() throws Exception { // Minicluster setup fails on Windows with an UnsatisfiedLinkError. // Skip if windows. assumeTrue(!isWindows()); cluster = new MiniAccumuloCluster(workingDir.newFolder("accumulo").getAbsoluteFile(), "protectyaneck"); LOG.debug("starting minicluster"); cluster.start(); LOG.debug("creating connection for admin operations."); // set up the table and user final Connector admin = cluster.getConnector("root", "protectyaneck"); admin.tableOperations().create(CoreWorkload.TABLENAME_PROPERTY_DEFAULT); admin.securityOperations().createLocalUser("ycsb", new PasswordToken("protectyaneck")); admin.securityOperations().grantTablePermission("ycsb", CoreWorkload.TABLENAME_PROPERTY_DEFAULT, TablePermission.READ); admin.securityOperations().grantTablePermission("ycsb", CoreWorkload.TABLENAME_PROPERTY_DEFAULT, TablePermission.WRITE); // set properties the binding will read properties = new Properties(); properties.setProperty("accumulo.zooKeepers", cluster.getZooKeepers()); properties.setProperty("accumulo.instanceName", cluster.getInstanceName()); properties.setProperty("accumulo.columnFamily", "family"); properties.setProperty("accumulo.username", "ycsb"); properties.setProperty("accumulo.password", "protectyaneck"); // cut down the batch writer timeout so that writes will push through. properties.setProperty("accumulo.batchWriterMaxLatency", "4"); // set these explicitly to the defaults at the time we're compiled, since they'll be inlined in our class. properties.setProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); properties.setProperty(CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT); properties.setProperty(CoreWorkload.INSERT_ORDER_PROPERTY, "ordered"); } @AfterClass public static void clusterCleanup() throws Exception { if (cluster != null) { LOG.debug("shutting down minicluster"); cluster.stop(); cluster = null; } } @Before public void client() throws Exception { LOG.debug("Loading workload properties for {}", test.getMethodName()); workloadProps = new Properties(); workloadProps.load(getClass().getResourceAsStream("/workloads/" + test.getMethodName())); for (String prop : properties.stringPropertyNames()) { workloadProps.setProperty(prop, properties.getProperty(prop)); } // TODO we need a better test rig for 'run this ycsb workload' LOG.debug("initializing measurements and workload"); Measurements.setProperties(workloadProps); workload = new CoreWorkload(); workload.init(workloadProps); LOG.debug("initializing client"); client = new AccumuloClient(); client.setProperties(workloadProps); client.init(); } @After public void cleanup() throws Exception { if (client != null) { LOG.debug("cleaning up client"); client.cleanup(); client = null; } if (workload != null) { LOG.debug("cleaning up workload"); workload.cleanup(); } } @After public void truncateTable() throws Exception { if (cluster != null) { LOG.debug("truncating table {}", CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final Connector admin = cluster.getConnector("root", "protectyaneck"); admin.tableOperations().deleteRows(CoreWorkload.TABLENAME_PROPERTY_DEFAULT, null, null); } } @Test public void workloada() throws Exception { runWorkload(); } @Test public void workloadb() throws Exception { runWorkload(); } @Test public void workloadc() throws Exception { runWorkload(); } @Test public void workloadd() throws Exception { runWorkload(); } @Test public void workloade() throws Exception { runWorkload(); } /** * go through a workload cycle. * <ol> * <li>initialize thread-specific state * <li>load the workload dataset * <li>run workload transactions * </ol> */ private void runWorkload() throws Exception { final Object state = workload.initThread(workloadProps,0,0); LOG.debug("load"); for (int i = 0; i < INSERT_COUNT; i++) { assertTrue("insert failed.", workload.doInsert(client, state)); } // Ensure we wait long enough for the batch writer to flush // TODO accumulo client should be flushing per insert by default. Thread.sleep(2000); LOG.debug("verify number of cells"); final Scanner scanner = cluster.getConnector("root", "protectyaneck").createScanner(CoreWorkload.TABLENAME_PROPERTY_DEFAULT, Authorizations.EMPTY); int count = 0; for (Entry<Key, Value> entry : scanner) { count++; } assertEquals("Didn't get enough total cells.", (Integer.valueOf(CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT) * INSERT_COUNT), count); LOG.debug("run"); for (int i = 0; i < TRANSACTION_COUNT; i++) { assertTrue("transaction failed.", workload.doTransaction(client, state)); } } }
7,583
33.630137
151
java
null
NearPMSW-main/baseline/logging/YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/package-info.java
/** * Copyright (c) 2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * YCSB binding for <a href="https://accumulo.apache.org/">Apache Accumulo</a>. */ package site.ycsb.db.accumulo;
779
32.913043
79
java
null
NearPMSW-main/baseline/logging/YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
/** * Copyright (c) 2011 YCSB++ project, 2014-2016 YCSB contributors. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package site.ycsb.db.accumulo; import static java.nio.charset.StandardCharsets.UTF_8; import java.io.IOException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.SortedMap; import java.util.Vector; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.client.BatchWriter; import org.apache.accumulo.core.client.BatchWriterConfig; import org.apache.accumulo.core.client.ClientConfiguration; import org.apache.accumulo.core.client.Connector; import org.apache.accumulo.core.client.IteratorSetting; import org.apache.accumulo.core.client.MutationsRejectedException; import org.apache.accumulo.core.client.Scanner; import org.apache.accumulo.core.client.TableNotFoundException; import org.apache.accumulo.core.client.ZooKeeperInstance; import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; import org.apache.accumulo.core.client.security.tokens.PasswordToken; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.iterators.user.WholeRowIterator; import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.util.CleanUp; import org.apache.hadoop.io.Text; import site.ycsb.ByteArrayByteIterator; import site.ycsb.ByteIterator; import site.ycsb.DB; import site.ycsb.DBException; import site.ycsb.Status; /** * <a href="https://accumulo.apache.org/">Accumulo</a> binding for YCSB. */ public class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
12,939
33.691689
100
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/examples/java/SimpleHistogramExample.java
/** * HistogramPerfTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ import org.HdrHistogram.*; import java.net.DatagramSocket; import java.net.SocketException; /** * A simple example of using HdrHistogram: run for 20 seconds collecting the * time it takes to perform a simple Datagram Socket create/close operation, * and report a histogram of the times at the end. */ public class SimpleHistogramExample { // A Histogram covering the range from 1 nsec to 1 hour with 3 decimal point resolution: static Histogram histogram = new Histogram(3600000000000L, 3); static public volatile DatagramSocket socket; static long WARMUP_TIME_MSEC = 5000; static long RUN_TIME_MSEC = 20000; static void recordTimeToCreateAndCloseDatagramSocket() { long startTime = System.nanoTime(); try { socket = new DatagramSocket(); } catch (SocketException ex) { } finally { socket.close(); } long endTime = System.nanoTime(); histogram.recordValue(endTime - startTime); } public static void main(final String[] args) { long startTime = System.currentTimeMillis(); long now; do { recordTimeToCreateAndCloseDatagramSocket(); now = System.currentTimeMillis(); } while (now - startTime < WARMUP_TIME_MSEC); histogram.reset(); do { recordTimeToCreateAndCloseDatagramSocket(); now = System.currentTimeMillis(); } while (now - startTime < RUN_TIME_MSEC); System.out.println("Recorded latencies [in usec] for Create+Close of a DatagramSocket:"); histogram.outputPercentileDistribution(System.out, 1000.0); } }
1,865
28.619048
97
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/perf/java/org/HdrHistogram/HistogramPerfTest.java
/** * HistogramPerfTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import org.junit.jupiter.api.Test; /** * JUnit test for {@link org.HdrHistogram.Histogram} */ public class HistogramPerfTest { static final long highestTrackableValue = 3600L * 1000 * 1000; // e.g. for 1 hr in usec units static final int numberOfSignificantValueDigits = 3; static final long testValueLevel = 12340; static final long warmupLoopLength = 50000; static final long rawTimingLoopCount = 800000000L; static final long rawDoubleTimingLoopCount = 300000000L; static final long singleWriterIntervalTimingLoopCount = 100000000L; static final long singleWriterDoubleIntervalTimingLoopCount = 100000000L; static final long intervalTimingLoopCount = 40000000L; static final long synchronizedTimingLoopCount = 180000000L; static final long atomicTimingLoopCount = 80000000L; static final long concurrentTimingLoopCount = 50000000L; void recordLoop(AbstractHistogram histogram, long loopCount) { for (long i = 0; i < loopCount; i++) histogram.recordValue(testValueLevel + (i & 0x8000)); } void recordLoopWithExpectedInterval(AbstractHistogram histogram, long loopCount, long expectedInterval) { for (long i = 0; i < loopCount; i++) histogram.recordValueWithExpectedInterval(testValueLevel + (i & 0x8000), expectedInterval); } void recordLoopWithExpectedInterval(Recorder histogram, long loopCount, long expectedInterval) { for (long i = 0; i < loopCount; i++) histogram.recordValueWithExpectedInterval(testValueLevel + (i & 0x8000), expectedInterval); } void recordLoopWithExpectedInterval(SingleWriterRecorder histogram, long loopCount, long expectedInterval) { for (long i = 0; i < loopCount; i++) histogram.recordValueWithExpectedInterval(testValueLevel + (i & 0x8000), expectedInterval); } void recordLoopWithExpectedInterval(DoubleRecorder histogram, long loopCount, long expectedInterval) { for (long i = 0; i < loopCount; i++) histogram.recordValueWithExpectedInterval(testValueLevel + (i & 0x8000), expectedInterval); } void recordLoopWithExpectedInterval(SingleWriterDoubleRecorder histogram, long loopCount, long expectedInterval) { for (long i = 0; i < loopCount; i++) histogram.recordValueWithExpectedInterval(testValueLevel + (i & 0x8000), expectedInterval); } void recordLoopDoubleWithExpectedInterval(DoubleHistogram histogram, long loopCount, double expectedInterval) { for (long i = 0; i < loopCount; i++) histogram.recordValueWithExpectedInterval(testValueLevel + (i & 0x8000), expectedInterval); } long LeadingZerosSpeedLoop(long loopCount) { long sum = 0; for (long i = 0; i < loopCount; i++) { // long val = testValueLevel + (i & 0x8000); long val = testValueLevel; sum += Long.numberOfLeadingZeros(val); sum += Long.numberOfLeadingZeros(val); sum += Long.numberOfLeadingZeros(val); sum += Long.numberOfLeadingZeros(val); sum += Long.numberOfLeadingZeros(val); sum += Long.numberOfLeadingZeros(val); sum += Long.numberOfLeadingZeros(val); sum += Long.numberOfLeadingZeros(val); } return sum; } public void testRawRecordingSpeedSingleValue(String label, AbstractHistogram histogram, long timingLoopCount) throws Exception { System.out.println("\nTiming recording speed with single value per recording:"); // Warm up: long startTime = System.nanoTime(); recordLoop(histogram, warmupLoopLength); long endTime = System.nanoTime(); long deltaUsec = (endTime - startTime) / 1000L; long rate = 1000000 * warmupLoopLength / deltaUsec; System.out.println(label + "Warmup: " + warmupLoopLength + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); histogram.reset(); // Wait a bit to make sure compiler had a cache to do it's stuff: try { Thread.sleep(1000); } catch (InterruptedException e) { } startTime = System.nanoTime(); recordLoop(histogram, timingLoopCount); endTime = System.nanoTime(); deltaUsec = (endTime - startTime) / 1000L; rate = 1000000 * timingLoopCount / deltaUsec; System.out.println(label + "Hot code timing:"); System.out.println(label + timingLoopCount + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); rate = 1000000 * histogram.getTotalCount() / deltaUsec; System.out.println(label + histogram.getTotalCount() + " raw recorded entries completed in " + deltaUsec + " usec, rate = " + rate + " recorded values per sec."); } public void testRawRecordingSpeedAtExpectedInterval(String label, AbstractHistogram histogram, long expectedInterval, long timingLoopCount) throws Exception { System.out.println("\nTiming recording speed with expectedInterval = " + expectedInterval + " :"); // Warm up: long startTime = System.nanoTime(); recordLoopWithExpectedInterval(histogram, warmupLoopLength, expectedInterval); long endTime = System.nanoTime(); long deltaUsec = (endTime - startTime) / 1000L; long rate = 1000000 * warmupLoopLength / deltaUsec; System.out.println(label + "Warmup: " + warmupLoopLength + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); histogram.reset(); // Wait a bit to make sure compiler had a cache to do it's stuff: try { Thread.sleep(1000); } catch (InterruptedException e) { } startTime = System.nanoTime(); recordLoopWithExpectedInterval(histogram, timingLoopCount, expectedInterval); endTime = System.nanoTime(); deltaUsec = (endTime - startTime) / 1000L; rate = 1000000 * timingLoopCount / deltaUsec; System.out.println(label + "Hot code timing:"); System.out.println(label + timingLoopCount + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); rate = 1000000 * histogram.getTotalCount() / deltaUsec; System.out.println(label + histogram.getTotalCount() + " raw recorded entries completed in " + deltaUsec + " usec, rate = " + rate + " recorded values per sec."); } public void testRawRecordingSpeedAtExpectedInterval(String label, Recorder intervalHistogram, long expectedInterval, long timingLoopCount) throws Exception { System.out.println("\nTiming recording speed with expectedInterval = " + expectedInterval + " :"); // Warm up: long startTime = System.nanoTime(); recordLoopWithExpectedInterval(intervalHistogram, warmupLoopLength, expectedInterval); long endTime = System.nanoTime(); long deltaUsec = (endTime - startTime) / 1000L; long rate = 1000000 * warmupLoopLength / deltaUsec; System.out.println(label + "Warmup: " + warmupLoopLength + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); intervalHistogram.reset(); // Wait a bit to make sure compiler had a cache to do it's stuff: try { Thread.sleep(1000); } catch (InterruptedException e) { } startTime = System.nanoTime(); recordLoopWithExpectedInterval(intervalHistogram, timingLoopCount, expectedInterval); endTime = System.nanoTime(); deltaUsec = (endTime - startTime) / 1000L; rate = 1000000 * timingLoopCount / deltaUsec; System.out.println(label + "Hot code timing:"); System.out.println(label + timingLoopCount + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); Histogram histogram = intervalHistogram.getIntervalHistogram(); rate = 1000000 * histogram.getTotalCount() / deltaUsec; System.out.println(label + histogram.getTotalCount() + " raw recorded entries completed in " + deltaUsec + " usec, rate = " + rate + " recorded values per sec."); } public void testRawRecordingSpeedAtExpectedInterval(String label, SingleWriterRecorder intervalHistogram, long expectedInterval, long timingLoopCount) throws Exception { System.out.println("\nTiming recording speed with expectedInterval = " + expectedInterval + " :"); // Warm up: long startTime = System.nanoTime(); recordLoopWithExpectedInterval(intervalHistogram, warmupLoopLength, expectedInterval); long endTime = System.nanoTime(); long deltaUsec = (endTime - startTime) / 1000L; long rate = 1000000 * warmupLoopLength / deltaUsec; System.out.println(label + "Warmup: " + warmupLoopLength + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); intervalHistogram.reset(); // Wait a bit to make sure compiler had a cache to do it's stuff: try { Thread.sleep(1000); } catch (InterruptedException e) { } startTime = System.nanoTime(); recordLoopWithExpectedInterval(intervalHistogram, timingLoopCount, expectedInterval); endTime = System.nanoTime(); deltaUsec = (endTime - startTime) / 1000L; rate = 1000000 * timingLoopCount / deltaUsec; System.out.println(label + "Hot code timing:"); System.out.println(label + timingLoopCount + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); Histogram histogram = intervalHistogram.getIntervalHistogram(); rate = 1000000 * histogram.getTotalCount() / deltaUsec; System.out.println(label + histogram.getTotalCount() + " raw recorded entries completed in " + deltaUsec + " usec, rate = " + rate + " recorded values per sec."); } public void testRawRecordingSpeedAtExpectedInterval(String label, SingleWriterDoubleRecorder intervalHistogram, long expectedInterval, long timingLoopCount) throws Exception { System.out.println("\nTiming recording speed with expectedInterval = " + expectedInterval + " :"); // Warm up: long startTime = System.nanoTime(); recordLoopWithExpectedInterval(intervalHistogram, warmupLoopLength, expectedInterval); long endTime = System.nanoTime(); long deltaUsec = (endTime - startTime) / 1000L; long rate = 1000000 * warmupLoopLength / deltaUsec; System.out.println(label + "Warmup: " + warmupLoopLength + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); intervalHistogram.reset(); // Wait a bit to make sure compiler had a cache to do it's stuff: try { Thread.sleep(1000); } catch (InterruptedException e) { } startTime = System.nanoTime(); recordLoopWithExpectedInterval(intervalHistogram, timingLoopCount, expectedInterval); endTime = System.nanoTime(); deltaUsec = (endTime - startTime) / 1000L; rate = 1000000 * timingLoopCount / deltaUsec; System.out.println(label + "Hot code timing:"); System.out.println(label + timingLoopCount + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); DoubleHistogram histogram = intervalHistogram.getIntervalHistogram(); rate = 1000000 * histogram.getTotalCount() / deltaUsec; System.out.println(label + histogram.getTotalCount() + " raw recorded entries completed in " + deltaUsec + " usec, rate = " + rate + " recorded values per sec."); } public void testRawRecordingSpeedAtExpectedInterval(String label, DoubleRecorder intervalHistogram, long expectedInterval, long timingLoopCount) throws Exception { System.out.println("\nTiming recording speed with expectedInterval = " + expectedInterval + " :"); // Warm up: long startTime = System.nanoTime(); recordLoopWithExpectedInterval(intervalHistogram, warmupLoopLength, expectedInterval); long endTime = System.nanoTime(); long deltaUsec = (endTime - startTime) / 1000L; long rate = 1000000 * warmupLoopLength / deltaUsec; System.out.println(label + "Warmup: " + warmupLoopLength + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); intervalHistogram.reset(); // Wait a bit to make sure compiler had a cache to do it's stuff: try { Thread.sleep(1000); } catch (InterruptedException e) { } startTime = System.nanoTime(); recordLoopWithExpectedInterval(intervalHistogram, timingLoopCount, expectedInterval); endTime = System.nanoTime(); deltaUsec = (endTime - startTime) / 1000L; rate = 1000000 * timingLoopCount / deltaUsec; System.out.println(label + "Hot code timing:"); System.out.println(label + timingLoopCount + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); DoubleHistogram histogram = intervalHistogram.getIntervalHistogram(); rate = 1000000 * histogram.getTotalCount() / deltaUsec; System.out.println(label + histogram.getTotalCount() + " raw recorded entries completed in " + deltaUsec + " usec, rate = " + rate + " recorded values per sec."); } public void testRawDoubleRecordingSpeedAtExpectedInterval(String label, DoubleHistogram histogram, long expectedInterval, long timingLoopCount) throws Exception { System.out.println("\nTiming recording speed with expectedInterval = " + expectedInterval + " :"); // Warm up: long startTime = System.nanoTime(); recordLoopDoubleWithExpectedInterval(histogram, warmupLoopLength, expectedInterval); long endTime = System.nanoTime(); long deltaUsec = (endTime - startTime) / 1000L; long rate = 1000000 * warmupLoopLength / deltaUsec; System.out.println(label + "Warmup: " + warmupLoopLength + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); histogram.reset(); // Wait a bit to make sure compiler had a cache to do it's stuff: try { Thread.sleep(1000); } catch (InterruptedException e) { } startTime = System.nanoTime(); recordLoopDoubleWithExpectedInterval(histogram, timingLoopCount, expectedInterval); endTime = System.nanoTime(); deltaUsec = (endTime - startTime) / 1000L; rate = 1000000 * timingLoopCount / deltaUsec; System.out.println(label + "Hot code timing:"); System.out.println(label + timingLoopCount + " value recordings completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); rate = 1000000 * histogram.getTotalCount() / deltaUsec; System.out.println(label + histogram.getTotalCount() + " raw recorded entries completed in " + deltaUsec + " usec, rate = " + rate + " recorded values per sec."); } @Test public void testRawRecordingSpeedSingleValue() throws Exception { AbstractHistogram histogram; histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming Histogram:"); testRawRecordingSpeedSingleValue("Histogram: ", histogram, rawTimingLoopCount); } @Test public void testRawRecordingSpeed() throws Exception { AbstractHistogram histogram; histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming Histogram:"); testRawRecordingSpeedAtExpectedInterval("Histogram: ", histogram, 1000000000, rawTimingLoopCount); } @Test public void testRawPackedRecordingSpeedSingleValue() throws Exception { AbstractHistogram histogram; histogram = new PackedHistogram(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming PackedHistogram:"); testRawRecordingSpeedSingleValue("PackedHistogram: ", histogram, rawTimingLoopCount); } @Test public void testRawPackedRecordingSpeed() throws Exception { AbstractHistogram histogram; histogram = new PackedHistogram(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming PackedHistogram:"); testRawRecordingSpeedAtExpectedInterval("PackedHistogram: ", histogram, 1000000000, rawTimingLoopCount); } @Test public void testSingleWriterIntervalRecordingSpeed() throws Exception { SingleWriterRecorder histogramRecorder; histogramRecorder = new SingleWriterRecorder(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming SingleWriterIntervalHistogramRecorder:"); testRawRecordingSpeedAtExpectedInterval("SingleWriterRecorder: ", histogramRecorder, 1000000000, singleWriterIntervalTimingLoopCount); } @Test public void testIntervalRecordingSpeed() throws Exception { Recorder histogramRecorder; histogramRecorder = new Recorder(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming IntervalHistogramRecorder:"); testRawRecordingSpeedAtExpectedInterval("Recorder: ", histogramRecorder, 1000000000, intervalTimingLoopCount); } @Test public void testRawDoubleRecordingSpeed() throws Exception { DoubleHistogram histogram; histogram = new DoubleHistogram(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming DoubleHistogram:"); testRawDoubleRecordingSpeedAtExpectedInterval("DoubleHistogram: ", histogram, 1000000000, rawDoubleTimingLoopCount); } @Test public void testDoubleIntervalRecordingSpeed() throws Exception { DoubleRecorder histogramRecorder; histogramRecorder = new DoubleRecorder(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming IntervalDoubleHistogramRecorder:"); testRawRecordingSpeedAtExpectedInterval("DoubleRecorder: ", histogramRecorder, 1000000000, intervalTimingLoopCount); } @Test public void testSingleWriterDoubleIntervalRecordingSpeed() throws Exception { SingleWriterDoubleRecorder histogramRecorder; histogramRecorder = new SingleWriterDoubleRecorder(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming SingleWriterIntervalDoubleHistogramRecorder:"); testRawRecordingSpeedAtExpectedInterval("SingleWriterDoubleRecorder: ", histogramRecorder, 1000000000, singleWriterDoubleIntervalTimingLoopCount); } @Test public void testRawSyncronizedRecordingSpeed() throws Exception { AbstractHistogram histogram; histogram = new SynchronizedHistogram(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming SynchronizedHistogram:"); testRawRecordingSpeedAtExpectedInterval("SynchronizedHistogram: ", histogram, 1000000000, synchronizedTimingLoopCount); } @Test public void testRawAtomicRecordingSpeed() throws Exception { AbstractHistogram histogram; histogram = new AtomicHistogram(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming AtomicHistogram:"); testRawRecordingSpeedAtExpectedInterval("AtomicHistogram: ", histogram, 1000000000, atomicTimingLoopCount); } @Test public void testRawConcurrentRecordingSpeed() throws Exception { AbstractHistogram histogram; histogram = new ConcurrentHistogram(highestTrackableValue, numberOfSignificantValueDigits); System.out.println("\n\nTiming ConcurrentHistogram:"); testRawRecordingSpeedAtExpectedInterval("AtomicHistogram: ", histogram, 1000000000, concurrentTimingLoopCount); } public void testLeadingZerosSpeed() throws Exception { System.out.println("\nTiming LeadingZerosSpeed :"); long startTime = System.nanoTime(); LeadingZerosSpeedLoop(warmupLoopLength); long endTime = System.nanoTime(); long deltaUsec = (endTime - startTime) / 1000L; long rate = 1000000 * warmupLoopLength / deltaUsec; System.out.println("Warmup:\n" + warmupLoopLength + " Leading Zero loops completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); // Wait a bit to make sure compiler had a cache to do it's stuff: try { Thread.sleep(1000); } catch (InterruptedException e) { } startTime = System.nanoTime(); LeadingZerosSpeedLoop(rawTimingLoopCount); endTime = System.nanoTime(); deltaUsec = (endTime - startTime) / 1000L; rate = 1000000 * rawTimingLoopCount / deltaUsec; System.out.println("Hot code timing:"); System.out.println(rawTimingLoopCount + " Leading Zero loops completed in " + deltaUsec + " usec, rate = " + rate + " value recording calls per sec."); } public static void main(String[] args) { try { HistogramPerfTest test = new HistogramPerfTest(); test.testLeadingZerosSpeed(); Thread.sleep(1000000); } catch (Exception e) { System.out.println("Exception: " + e); } } }
22,754
52.290398
154
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/HistogramAutosizingTest.java
/** * HistogramTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import org.junit.Assert; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import static org.HdrHistogram.HistogramTestUtils.constructHistogram; import static org.HdrHistogram.HistogramTestUtils.constructDoubleHistogram; /** * JUnit test for {@link org.HdrHistogram.Histogram} */ public class HistogramAutosizingTest { static final long highestTrackableValue = 3600L * 1000 * 1000; // e.g. for 1 hr in usec units @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testHistogramAutoSizingEdges(Class c) throws Exception { AbstractHistogram histogram = constructHistogram(c,3); histogram.recordValue((1L << 62) - 1); Assert.assertEquals(52, histogram.bucketCount); Assert.assertEquals(54272, histogram.countsArrayLength); histogram.recordValue(Long.MAX_VALUE); Assert.assertEquals(53, histogram.bucketCount); Assert.assertEquals(55296, histogram.countsArrayLength); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testHistogramEqualsAfterResizing(Class c) throws Exception { AbstractHistogram histogram = constructHistogram(c,3); histogram.recordValue((1L << 62) - 1); Assert.assertEquals(52, histogram.bucketCount); Assert.assertEquals(54272, histogram.countsArrayLength); histogram.recordValue(Long.MAX_VALUE); Assert.assertEquals(53, histogram.bucketCount); Assert.assertEquals(55296, histogram.countsArrayLength); histogram.reset(); histogram.recordValue((1L << 62) - 1); Histogram histogram1 = new Histogram(3); histogram1.recordValue((1L << 62) - 1); Assert.assertEquals(histogram, histogram1); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testHistogramAutoSizing(Class c) throws Exception { AbstractHistogram histogram = constructHistogram(c,3); for (int i = 0; i < 63; i++) { long value = 1L << i; histogram.recordValue(value); } Assert.assertEquals(53, histogram.bucketCount); Assert.assertEquals(55296, histogram.countsArrayLength); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testAutoSizingAdd(Class c) throws Exception { AbstractHistogram histogram1 = constructHistogram(c, 2); AbstractHistogram histogram2 = constructHistogram(c, 2); histogram1.recordValue(1000L); histogram1.recordValue(1000000000L); histogram2.add(histogram1); Assert.assertTrue("Max should be equivalent to 1000000000L", histogram2.valuesAreEquivalent(histogram2.getMaxValue(), 1000000000L) ); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, }) public void testAutoSizingAcrossContinuousRange(Class c) { AbstractHistogram histogram = constructHistogram(c, 2); for (long i = 0; i < 10000000L; i++) { histogram.recordValue(i); } } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, }) public void testAutoSizingAddDouble(Class c) throws Exception { DoubleHistogram histogram1 = constructDoubleHistogram(c,2); DoubleHistogram histogram2 = constructDoubleHistogram(c,2); histogram1.recordValue(1000L); histogram1.recordValue(1000000000L); histogram2.add(histogram1); Assert.assertTrue("Max should be equivalent to 1000000000L", histogram2.valuesAreEquivalent(histogram2.getMaxValue(), 1000000000L) ); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, }) public void testDoubleHistogramAutoSizingUp(Class c) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(c,2); for (int i = 0; i < 55; i++) { double value = 1L << i; histogram.recordValue(value); } } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, }) public void testDoubleHistogramAutoSizingDown(Class c) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(c,2); for (int i = 0; i < 56; i++) { double value = (1L << 45) * 1.0 / (1L << i); histogram.recordValue(value); } } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, }) public void testDoubleHistogramAutoSizingEdges(Class c) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(c,3);; histogram.recordValue(1); histogram.recordValue(1L << 48); histogram.recordValue((1L << 52) - 1); Assert.assertEquals(52, histogram.integerValuesHistogram.bucketCount); Assert.assertEquals(54272, histogram.integerValuesHistogram.countsArrayLength); histogram.recordValue((1L << 53) - 1); Assert.assertEquals(53, histogram.integerValuesHistogram.bucketCount); Assert.assertEquals(55296, histogram.integerValuesHistogram.countsArrayLength); DoubleHistogram histogram2 = constructDoubleHistogram(c,2);; histogram2.recordValue(1); histogram2.recordValue(1L << 48); histogram2.recordValue((1L << 54) - 1); Assert.assertEquals(55, histogram2.integerValuesHistogram.bucketCount); Assert.assertEquals(7168, histogram2.integerValuesHistogram.countsArrayLength); histogram2.recordValue((1L << 55) - 1); Assert.assertEquals(56, histogram2.integerValuesHistogram.bucketCount); Assert.assertEquals(7296, histogram2.integerValuesHistogram.countsArrayLength); DoubleHistogram histogram3 = constructDoubleHistogram(c,2);; histogram3.recordValue(1E50); histogram3.recordValue((1L << 48) * 1E50); histogram3.recordValue(((1L << 54) - 1) * 1E50); Assert.assertEquals(55, histogram3.integerValuesHistogram.bucketCount); Assert.assertEquals(7168, histogram3.integerValuesHistogram.countsArrayLength); histogram3.recordValue(((1L << 55) - 1) * 1E50); Assert.assertEquals(56, histogram3.integerValuesHistogram.bucketCount); Assert.assertEquals(7296, histogram3.integerValuesHistogram.countsArrayLength); } }
8,264
36.912844
97
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/DoubleHistogramDataAccessTest.java
/** * HistogramDataAccessTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import org.junit.Assert; import org.junit.jupiter.api.Test; /** * JUnit test for {@link Histogram} */ public class DoubleHistogramDataAccessTest { static final long highestTrackableValue = 3600L * 1000 * 1000; // 1 hour in usec units static final int numberOfSignificantValueDigits = 3; // Maintain at least 3 decimal points of accuracy static final DoubleHistogram histogram; static final DoubleHistogram scaledHistogram; static final DoubleHistogram rawHistogram; static final DoubleHistogram scaledRawHistogram; static final DoubleHistogram postCorrectedHistogram; static final DoubleHistogram postCorrectedScaledHistogram; static { histogram = new DoubleHistogram(highestTrackableValue, numberOfSignificantValueDigits); scaledHistogram = new DoubleHistogram(highestTrackableValue / 2 , numberOfSignificantValueDigits); rawHistogram = new DoubleHistogram(highestTrackableValue, numberOfSignificantValueDigits); scaledRawHistogram = new DoubleHistogram(highestTrackableValue / 2, numberOfSignificantValueDigits); // Log hypothetical scenario: 100 seconds of "perfect" 1msec results, sampled // 100 times per second (10,000 results), followed by a 100 second pause with // a single (100 second) recorded result. Recording is done indicating an expected // interval between samples of 10 msec: for (int i = 0; i < 10000; i++) { histogram.recordValueWithExpectedInterval(1000 /* 1 msec */, 10000 /* 10 msec expected interval */); scaledHistogram.recordValueWithExpectedInterval(1000 * 512 /* 1 msec */, 10000 * 512 /* 10 msec expected interval */); rawHistogram.recordValue(1000 /* 1 msec */); scaledRawHistogram.recordValue(1000 * 512/* 1 msec */); } histogram.recordValueWithExpectedInterval(100000000L /* 100 sec */, 10000 /* 10 msec expected interval */); scaledHistogram.recordValueWithExpectedInterval(100000000L * 512 /* 100 sec */, 10000 * 512 /* 10 msec expected interval */); rawHistogram.recordValue(100000000L /* 100 sec */); scaledRawHistogram.recordValue(100000000L * 512 /* 100 sec */); postCorrectedHistogram = rawHistogram.copyCorrectedForCoordinatedOmission(10000 /* 10 msec expected interval */); postCorrectedScaledHistogram = scaledRawHistogram.copyCorrectedForCoordinatedOmission(10000 * 512 /* 10 msec expected interval */); } @Test public void testScalingEquivalence() { Assert.assertEquals("averages should be equivalent", histogram.getMean() * 512, scaledHistogram.getMean(), scaledHistogram.getMean() * 0.000001); Assert.assertEquals("total count should be the same", histogram.getTotalCount(), scaledHistogram.getTotalCount()); Assert.assertEquals("99%'iles should be equivalent", scaledHistogram.highestEquivalentValue(histogram.getValueAtPercentile(99.0) * 512), scaledHistogram.highestEquivalentValue(scaledHistogram.getValueAtPercentile(99.0)), scaledHistogram.highestEquivalentValue(scaledHistogram.getValueAtPercentile(99.0)) * 0.000001); Assert.assertEquals("Max should be equivalent", scaledHistogram.highestEquivalentValue(histogram.getMaxValue() * 512), scaledHistogram.getMaxValue(), scaledHistogram.getMaxValue() * 0.000001); // Same for post-corrected: Assert.assertEquals("averages should be equivalent", histogram.getMean() * 512, scaledHistogram.getMean(), scaledHistogram.getMean() * 0.000001); Assert.assertEquals("total count should be the same", postCorrectedHistogram.getTotalCount(), postCorrectedScaledHistogram.getTotalCount()); Assert.assertEquals("99%'iles should be equivalent", postCorrectedHistogram.lowestEquivalentValue(postCorrectedHistogram.getValueAtPercentile(99.0)) * 512, postCorrectedScaledHistogram.lowestEquivalentValue(postCorrectedScaledHistogram.getValueAtPercentile(99.0)), postCorrectedScaledHistogram.lowestEquivalentValue(postCorrectedScaledHistogram.getValueAtPercentile(99.0)) * 0.000001 ); Assert.assertEquals("Max should be equivalent", postCorrectedScaledHistogram.highestEquivalentValue(postCorrectedHistogram.getMaxValue() * 512), postCorrectedScaledHistogram.getMaxValue(), postCorrectedScaledHistogram.getMaxValue() * 0.000001 ); } @Test public void testPreVsPostCorrectionValues() { // Loop both ways (one would be enough, but good practice just for fun: Assert.assertEquals("pre and post corrected count totals ", histogram.getTotalCount(), postCorrectedHistogram.getTotalCount()); // The following comparison loops would have worked in a perfect accuracy world, but since post // correction is done based on the value extracted from the bucket, and the during-recording is done // based on the actual (not pixelized) value, there will be subtle differences due to roundoffs: // for (HistogramIterationValue v : histogram.allValues()) { // long preCorrectedCount = v.getCountAtValueIteratedTo(); // long postCorrectedCount = postCorrectedHistogram.getCountAtValue(v.getValueIteratedTo()); // Assert.assertEquals("pre and post corrected count at value " + v.getValueIteratedTo(), // preCorrectedCount, postCorrectedCount); // } // // for (HistogramIterationValue v : postCorrectedHistogram.allValues()) { // long preCorrectedCount = v.getCountAtValueIteratedTo(); // long postCorrectedCount = histogram.getCountAtValue(v.getValueIteratedTo()); // Assert.assertEquals("pre and post corrected count at value " + v.getValueIteratedTo(), // preCorrectedCount, postCorrectedCount); // } } @Test public void testGetTotalCount() throws Exception { // The overflow value should count in the total count: Assert.assertEquals("Raw total count is 10,001", 10001L, rawHistogram.getTotalCount()); Assert.assertEquals("Total count is 20,000", 20000L, histogram.getTotalCount()); } @Test public void testGetMaxValue() throws Exception { Assert.assertTrue( histogram.valuesAreEquivalent(100L * 1000 * 1000, histogram.getMaxValue())); } @Test public void testGetMinValue() throws Exception { Assert.assertTrue( histogram.valuesAreEquivalent(1000, histogram.getMinValue())); } @Test public void testGetMean() throws Exception { double expectedRawMean = ((10000.0 * 1000) + (1.0 * 100000000))/10001; /* direct avg. of raw results */ double expectedMean = (1000.0 + 50000000.0)/2; /* avg. 1 msec for half the time, and 50 sec for other half */ // We expect to see the mean to be accurate to ~3 decimal points (~0.1%): Assert.assertEquals("Raw mean is " + expectedRawMean + " +/- 0.1%", expectedRawMean, rawHistogram.getMean(), expectedRawMean * 0.001); Assert.assertEquals("Mean is " + expectedMean + " +/- 0.1%", expectedMean, histogram.getMean(), expectedMean * 0.001); } @Test public void testGetStdDeviation() throws Exception { double expectedRawMean = ((10000.0 * 1000) + (1.0 * 100000000))/10001; /* direct avg. of raw results */ double expectedRawStdDev = Math.sqrt( ((10000.0 * Math.pow((1000.0 - expectedRawMean), 2)) + Math.pow((100000000.0 - expectedRawMean), 2)) / 10001); double expectedMean = (1000.0 + 50000000.0)/2; /* avg. 1 msec for half the time, and 50 sec for other half */ double expectedSquareDeviationSum = 10000 * Math.pow((1000.0 - expectedMean), 2); for (long value = 10000; value <= 100000000; value += 10000) { expectedSquareDeviationSum += Math.pow((value - expectedMean), 2); } double expectedStdDev = Math.sqrt(expectedSquareDeviationSum / 20000); // We expect to see the standard deviations to be accurate to ~3 decimal points (~0.1%): Assert.assertEquals("Raw standard deviation is " + expectedRawStdDev + " +/- 0.1%", expectedRawStdDev, rawHistogram.getStdDeviation(), expectedRawStdDev * 0.001); Assert.assertEquals("Standard deviation is " + expectedStdDev + " +/- 0.1%", expectedStdDev, histogram.getStdDeviation(), expectedStdDev * 0.001); } @Test public void testGetValueAtPercentile() throws Exception { Assert.assertEquals("raw 30%'ile is 1 msec +/- 0.1%", 1000.0, (double) rawHistogram.getValueAtPercentile(30.0), 1000.0 * 0.001); Assert.assertEquals("raw 99%'ile is 1 msec +/- 0.1%", 1000.0, (double) rawHistogram.getValueAtPercentile(99.0), 1000.0 * 0.001); Assert.assertEquals("raw 99.99%'ile is 1 msec +/- 0.1%", 1000.0, (double) rawHistogram.getValueAtPercentile(99.99) , 1000.0 * 0.001); Assert.assertEquals("raw 99.999%'ile is 100 sec +/- 0.1%", 100000000.0, (double) rawHistogram.getValueAtPercentile(99.999), 100000000.0 * 0.001); Assert.assertEquals("raw 100%'ile is 100 sec +/- 0.1%", 100000000.0, (double) rawHistogram.getValueAtPercentile(100.0), 100000000.0 * 0.001); Assert.assertEquals("30%'ile is 1 msec +/- 0.1%", 1000.0, (double) histogram.getValueAtPercentile(30.0), 1000.0 * 0.001); Assert.assertEquals("50%'ile is 1 msec +/- 0.1%", 1000.0, (double) histogram.getValueAtPercentile(50.0), 1000.0 * 0.001); Assert.assertEquals("75%'ile is 50 sec +/- 0.1%", 50000000.0, (double) histogram.getValueAtPercentile(75.0), 50000000.0 * 0.001); Assert.assertEquals("90%'ile is 80 sec +/- 0.1%", 80000000.0, (double) histogram.getValueAtPercentile(90.0), 80000000.0 * 0.001); Assert.assertEquals("99%'ile is 98 sec +/- 0.1%", 98000000.0, (double) histogram.getValueAtPercentile(99.0), 98000000.0 * 0.001); Assert.assertEquals("99.999%'ile is 100 sec +/- 0.1%", 100000000.0, (double) histogram.getValueAtPercentile(99.999), 100000000.0 * 0.001); Assert.assertEquals("100%'ile is 100 sec +/- 0.1%", 100000000.0, (double) histogram.getValueAtPercentile(100.0), 100000000.0 * 0.001); } @Test public void testGetValueAtPercentileForLargeHistogram() { long largestValue = 1000000000000L; Histogram h = new Histogram(largestValue, 5); h.recordValue(largestValue); Assert.assertTrue(h.getValueAtPercentile(100.0) > 0); } @Test public void testGetPercentileAtOrBelowValue() throws Exception { Assert.assertEquals("Raw percentile at or below 5 msec is 99.99% +/- 0.0001", 99.99, rawHistogram.getPercentileAtOrBelowValue(5000), 0.0001); Assert.assertEquals("Percentile at or below 5 msec is 50% +/- 0.0001%", 50.0, histogram.getPercentileAtOrBelowValue(5000), 0.0001); Assert.assertEquals("Percentile at or below 100 sec is 100% +/- 0.0001%", 100.0, histogram.getPercentileAtOrBelowValue(100000000L), 0.0001); } @Test public void testGetCountBetweenValues() throws Exception { Assert.assertEquals("Count of raw values between 1 msec and 1 msec is 1", 10000, rawHistogram.getCountBetweenValues(1000L, 1000L), 10000 * 0.000001); Assert.assertEquals("Count of raw values between 5 msec and 150 sec is 1", 1, rawHistogram.getCountBetweenValues(5000L, 150000000L), 1 * 0.000001); Assert.assertEquals("Count of values between 5 msec and 150 sec is 10,000", 10000, histogram.getCountBetweenValues(5000L, 150000000L), 10000 * 0.000001); } @Test public void testGetCountAtValue() throws Exception { Assert.assertEquals("Count of raw values at 10 msec is 0", 0, rawHistogram.getCountBetweenValues(10000L, 10010L), 0.000001); Assert.assertEquals("Count of values at 10 msec is 0", 1, histogram.getCountBetweenValues(10000L, 10010L), 0.000001); Assert.assertEquals("Count of raw values at 1 msec is 10,000", 10000, rawHistogram.getCountAtValue(1000L), 10000 * 0.000001); Assert.assertEquals("Count of values at 1 msec is 10,000", 10000, histogram.getCountAtValue(1000L), 10000 * 0.000001); } @Test public void testPercentiles() throws Exception { int i = 0; for (DoubleHistogramIterationValue v : histogram.percentiles(5 /* ticks per half */)) { Assert.assertEquals("i = " + i + ", Value at Iterated-to Percentile is the same as the matching getValueAtPercentile():\n" + "getPercentileLevelIteratedTo = " + v.getPercentileLevelIteratedTo() + "\ngetValueIteratedTo = " + v.getValueIteratedTo() + "\ngetValueIteratedFrom = " + v.getValueIteratedFrom() + "\ngetValueAtPercentile(getPercentileLevelIteratedTo()) = " + histogram.getValueAtPercentile(v.getPercentileLevelIteratedTo()) + "\ngetPercentile = " + v.getPercentile() + "\ngetValueAtPercentile(getPercentile())" + histogram.getValueAtPercentile(v.getPercentile()) + "\nequivalent1 = " + histogram.highestEquivalentValue(histogram.getValueAtPercentile(v.getPercentileLevelIteratedTo())) + "\nequivalent2 = " + histogram.highestEquivalentValue(histogram.getValueAtPercentile(v.getPercentile())) + "\n" , v.getValueIteratedTo(), histogram.highestEquivalentValue(histogram.getValueAtPercentile(v.getPercentile())), v.getValueIteratedTo() * 0.001); } } @Test public void testLinearBucketValues() throws Exception { int index = 0; // Note that using linear buckets should work "as expected" as long as the number of linear buckets // is lower than the resolution level determined by largestValueWithSingleUnitResolution // (2000 in this case). Above that count, some of the linear buckets can end up rounded up in size // (to the nearest local resolution unit level), which can result in a smaller number of buckets that // expected covering the range. // Iterate raw data using linear buckets of 100 msec each. for (DoubleHistogramIterationValue v : rawHistogram.linearBucketValues(100000)) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Raw Linear 100 msec bucket # 0 added a count of 10000", 10000, countAddedInThisBucket); } else if (index == 999) { Assert.assertEquals("Raw Linear 100 msec bucket # 999 added a count of 1", 1, countAddedInThisBucket); } else { Assert.assertEquals("Raw Linear 100 msec bucket # " + index + " added a count of 0", 0 , countAddedInThisBucket); } index++; } Assert.assertEquals(1000, index); index = 0; long totalAddedCounts = 0; // Iterate data using linear buckets of 10 msec each. for (DoubleHistogramIterationValue v : histogram.linearBucketValues(10000)) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Linear 1 sec bucket # 0 [" + v.getValueIteratedFrom() + ".." + v.getValueIteratedTo() + "] added a count of 10000", 10000, countAddedInThisBucket); } // Because value resolution is low enough (3 digits) that multiple linear buckets will end up // residing in a single value-equivalent range, some linear buckets will have counts of 2 or // more, and some will have 0 (when the first bucket in the equivalent range was the one that // got the total count bump). // However, we can still verify the sum of counts added in all the buckets... totalAddedCounts += v.getCountAddedInThisIterationStep(); index++; } Assert.assertEquals("There should be 10000 linear buckets of size 10000 usec between 0 and 100 sec.", 10000, index); Assert.assertEquals("Total added counts should be 20000", 20000, totalAddedCounts); index = 0; totalAddedCounts = 0; // Iterate data using linear buckets of 1 msec each. for (DoubleHistogramIterationValue v : histogram.linearBucketValues(1000)) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 1) { Assert.assertEquals("Linear 1 sec bucket # 0 [" + v.getValueIteratedFrom() + ".." + v.getValueIteratedTo() + "] added a count of 10000", 10000, countAddedInThisBucket); } // Because value resolution is low enough (3 digits) that multiple linear buckets will end up // residing in a single value-equivalent range, some linear buckets will have counts of 2 or // more, and some will have 0 (when the first bucket in the equivalent range was the one that // got the total count bump). // However, we can still verify the sum of counts added in all the buckets... totalAddedCounts += v.getCountAddedInThisIterationStep(); index++; } // You may ask "why 100007 and not 100000?" for the value below? The answer is that at this fine // a linear stepping resolution, the final populated sub-bucket (at 100 seconds with 3 decimal // point resolution) is larger than our liner stepping, and holds more than one linear 1 msec // step in it. // Since we only know we're done with linear iteration when the next iteration step will step // out of the last populated bucket, there is not way to tell if the iteration should stop at // 100000 or 100007 steps. The proper thing to do is to run to the end of the sub-bucket quanta... Assert.assertEquals("There should be 100007 linear buckets of size 1000 usec between 0 and 100 sec.", 100007, index); Assert.assertEquals("Total added counts should be 20000", 20000, totalAddedCounts); } @Test public void testLogarithmicBucketValues() throws Exception { int index = 0; // Iterate raw data using logarithmic buckets starting at 10 msec. for (DoubleHistogramIterationValue v : rawHistogram.logarithmicBucketValues(10000, 2)) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Raw Logarithmic 10 msec bucket # 0 added a count of 10000", 10000, countAddedInThisBucket); } else if (index == 14) { Assert.assertEquals("Raw Logarithmic 10 msec bucket # 14 added a count of 1", 1, countAddedInThisBucket); } else { Assert.assertEquals("Raw Logarithmic 100 msec bucket # " + index + " added a count of 0", 0, countAddedInThisBucket); } index++; } Assert.assertEquals(14, index - 1); index = 0; long totalAddedCounts = 0; // Iterate data using linear buckets of 1 sec each. for (DoubleHistogramIterationValue v : histogram.logarithmicBucketValues(10000, 2)) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Logarithmic 10 msec bucket # 0 [" + v.getValueIteratedFrom() + ".." + v.getValueIteratedTo() + "] added a count of 10000", 10000, countAddedInThisBucket); } totalAddedCounts += v.getCountAddedInThisIterationStep(); index++; } Assert.assertEquals("There should be 14 Logarithmic buckets of size 10000 usec between 0 and 100 sec.", 14, index - 1); Assert.assertEquals("Total added counts should be 20000", 20000, totalAddedCounts); } @Test public void testRecordedValues() throws Exception { int index = 0; // Iterate raw data by stepping through every value that has a count recorded: for (DoubleHistogramIterationValue v : rawHistogram.recordedValues()) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Raw recorded value bucket # 0 added a count of 10000", 10000, countAddedInThisBucket); } else { Assert.assertEquals("Raw recorded value bucket # " + index + " added a count of 1", 1, countAddedInThisBucket); } index++; } Assert.assertEquals(2, index); index = 0; long totalAddedCounts = 0; // Iterate data using linear buckets of 1 sec each. for (DoubleHistogramIterationValue v : histogram.recordedValues()) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Recorded bucket # 0 [" + v.getValueIteratedFrom() + ".." + v.getValueIteratedTo() + "] added a count of 10000", 10000, countAddedInThisBucket); } Assert.assertTrue("The count in recorded bucket #" + index + " is not 0", v.getCountAtValueIteratedTo() != 0); Assert.assertEquals("The count in recorded bucket #" + index + " is exactly the amount added since the last iteration ", v.getCountAtValueIteratedTo(), v.getCountAddedInThisIterationStep()); totalAddedCounts += v.getCountAddedInThisIterationStep(); index++; } Assert.assertEquals("Total added counts should be 20000", 20000, totalAddedCounts); } @Test public void testAllValues() throws Exception { int index = 0; double latestValueAtIndex = 0; double totalCountToThisPoint = 0; double totalValueToThisPoint = 0; // Iterate raw data by stepping through every value that ahs a count recorded: for (DoubleHistogramIterationValue v : rawHistogram.allValues()) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 2000) { Assert.assertEquals("Raw allValues bucket # 0 added a count of 10000", 10000, countAddedInThisBucket); } else if (histogram.valuesAreEquivalent(v.getValueIteratedTo(), 100000000)) { Assert.assertEquals("Raw allValues value bucket # " + index + " added a count of 1", 1, countAddedInThisBucket); } else { Assert.assertEquals("Raw allValues value bucket # " + index + " added a count of 0", 0, countAddedInThisBucket); } latestValueAtIndex = v.getValueIteratedTo(); totalCountToThisPoint += v.getCountAtValueIteratedTo(); Assert.assertEquals("total Count should match", totalCountToThisPoint, v.getTotalCountToThisValue(), 1e-8); totalValueToThisPoint += v.getCountAtValueIteratedTo() * latestValueAtIndex; Assert.assertEquals("total Value should match", totalValueToThisPoint, v.getTotalValueToThisValue(), 1e-8); index++; } Assert.assertEquals("index should be equal to countsArrayLength", histogram.integerValuesHistogram.countsArrayLength, index); index = 0; long totalAddedCounts = 0; // Iterate data using linear buckets of 1 sec each. for (DoubleHistogramIterationValue v : histogram.allValues()) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 2000) { Assert.assertEquals("AllValues bucket # 0 [" + v.getValueIteratedFrom() + ".." + v.getValueIteratedTo() + "] added a count of 10000", 10000, countAddedInThisBucket); } Assert.assertEquals("The count in AllValues bucket #" + index + " is exactly the amount added since the last iteration ", v.getCountAtValueIteratedTo(), v.getCountAddedInThisIterationStep()); totalAddedCounts += v.getCountAddedInThisIterationStep(); index++; } Assert.assertEquals("index should be equal to countsArrayLength", histogram.integerValuesHistogram.countsArrayLength, index); Assert.assertEquals("Total added counts should be 20000", 20000, totalAddedCounts); } }
26,583
52.922921
139
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/DoubleHistogramTest.java
/** * HistogramTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import static org.HdrHistogram.HistogramTestUtils.constructHistogram; import static org.junit.Assert.*; import org.junit.Assert; import org.junit.Test; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.function.Executable; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import java.io.*; import java.util.zip.Deflater; import static org.HdrHistogram.HistogramTestUtils.constructDoubleHistogram; /** * JUnit test for {@link Histogram} */ public class DoubleHistogramTest { static final long trackableValueRangeSize = 3600L * 1000 * 1000; // e.g. for 1 hr in usec units static final int numberOfSignificantValueDigits = 3; // static final long testValueLevel = 12340; static final double testValueLevel = 4.0; @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testTrackableValueRangeMustBeGreaterThanTwo(final Class histoClass) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { DoubleHistogram histogram = constructDoubleHistogram(histoClass, 1, numberOfSignificantValueDigits); } }); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testNumberOfSignificantValueDigitsMustBeLessThanSix(final Class histoClass) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, 6); } }); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testNumberOfSignificantValueDigitsMustBePositive(final Class histoClass) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, -1); } }); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testConstructionArgumentGets(Class histoClass) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); // Record 1.0, and verify that the range adjust to it: histogram.recordValue(Math.pow(2.0, 20)); histogram.recordValue(1.0); assertEquals(1.0, histogram.getCurrentLowestTrackableNonZeroValue(), 0.001); assertEquals(trackableValueRangeSize, histogram.getHighestToLowestValueRatio()); assertEquals(numberOfSignificantValueDigits, histogram.getNumberOfSignificantValueDigits()); DoubleHistogram histogram2 = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); // Record a larger value, and verify that the range adjust to it too: histogram2.recordValue(2048.0 * 1024.0 * 1024.0); assertEquals(2048.0 * 1024.0 * 1024.0, histogram2.getCurrentLowestTrackableNonZeroValue(), 0.001); DoubleHistogram histogram3 = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); // Record a value that is 1000x outside of the initially set range, which should scale us by 1/1024x: histogram3.recordValue(1/1000.0); assertEquals(1.0/1024, histogram3.getCurrentLowestTrackableNonZeroValue(), 0.001); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testDataRange(Class histoClass) { // A trackableValueRangeSize histigram DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(0.0); // Include a zero value to make sure things are handled right. assertEquals(1L, histogram.getCountAtValue(0.0)); double topValue = 1.0; try { while (true) { histogram.recordValue(topValue); topValue *= 2.0; } } catch (ArrayIndexOutOfBoundsException ex) { } assertEquals(1L << 33, topValue, 0.00001); assertEquals(1L, histogram.getCountAtValue(0.0)); histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(0.0); // Include a zero value to make sure things are handled right. double bottomValue = 1L << 33; try { while (true) { histogram.recordValue(bottomValue); bottomValue /= 2.0; } } catch (ArrayIndexOutOfBoundsException ex) { System.out.println("Bottom value at exception point = " + bottomValue); } assertEquals(1.0, bottomValue, 0.00001); long expectedRange = 1L << (findContainingBinaryOrderOfMagnitude(trackableValueRangeSize) + 1); assertEquals(expectedRange, (topValue / bottomValue), 0.00001); assertEquals(1L, histogram.getCountAtValue(0.0)); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testRecordValue(Class histoClass) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); assertEquals(1L, histogram.getCountAtValue(testValueLevel)); assertEquals(1L, histogram.getTotalCount()); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testRecordValue_Overflow_ShouldThrowException(final Class histoClass) throws Exception { Assertions.assertThrows(ArrayIndexOutOfBoundsException.class, new Executable() { @Override public void execute() throws Throwable { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(trackableValueRangeSize * 3); histogram.recordValue(1.0); } }); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testRecordValueWithExpectedInterval(Class histoClass) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(0); histogram.recordValueWithExpectedInterval(testValueLevel, testValueLevel/4); DoubleHistogram rawHistogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); rawHistogram.recordValue(0); rawHistogram.recordValue(testValueLevel); // The raw data will not include corrected samples: assertEquals(1L, rawHistogram.getCountAtValue(0)); assertEquals(0L, rawHistogram.getCountAtValue((testValueLevel * 1 )/4)); assertEquals(0L, rawHistogram.getCountAtValue((testValueLevel * 2 )/4)); assertEquals(0L, rawHistogram.getCountAtValue((testValueLevel * 3 )/4)); assertEquals(1L, rawHistogram.getCountAtValue((testValueLevel * 4 )/4)); assertEquals(2L, rawHistogram.getTotalCount()); // The data will include corrected samples: assertEquals(1L, histogram.getCountAtValue(0)); assertEquals(1L, histogram.getCountAtValue((testValueLevel * 1 )/4)); assertEquals(1L, histogram.getCountAtValue((testValueLevel * 2 )/4)); assertEquals(1L, histogram.getCountAtValue((testValueLevel * 3 )/4)); assertEquals(1L, histogram.getCountAtValue((testValueLevel * 4 )/4)); assertEquals(5L, histogram.getTotalCount()); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testReset(final Class histoClass) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(10); histogram.recordValue(100); Assert.assertEquals(histogram.getMinValue(), Math.min(10.0, testValueLevel), 1.0); Assert.assertEquals(histogram.getMaxValue(), Math.max(100.0, testValueLevel), 1.0); histogram.reset(); assertEquals(0L, histogram.getCountAtValue(testValueLevel)); assertEquals(0L, histogram.getTotalCount()); histogram.recordValue(20); histogram.recordValue(80); Assert.assertEquals(histogram.getMinValue(), 20.0, 1.0); Assert.assertEquals(histogram.getMaxValue(), 80.0, 1.0); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testAdd(final Class histoClass) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); DoubleHistogram other = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 1000); other.recordValue(testValueLevel); other.recordValue(testValueLevel * 1000); histogram.add(other); assertEquals(2L, histogram.getCountAtValue(testValueLevel)); assertEquals(2L, histogram.getCountAtValue(testValueLevel * 1000)); assertEquals(4L, histogram.getTotalCount()); DoubleHistogram biggerOther = constructDoubleHistogram(histoClass, trackableValueRangeSize * 2, numberOfSignificantValueDigits); biggerOther.recordValue(testValueLevel); biggerOther.recordValue(testValueLevel * 1000); // Adding the smaller histogram to the bigger one should work: biggerOther.add(histogram); assertEquals(3L, biggerOther.getCountAtValue(testValueLevel)); assertEquals(3L, biggerOther.getCountAtValue(testValueLevel * 1000)); assertEquals(6L, biggerOther.getTotalCount()); // Since we are auto-sized, trying to add a larger histogram into a smaller one should work if no // overflowing data is there: try { // This should throw: histogram.add(biggerOther); } catch (ArrayIndexOutOfBoundsException e) { fail("Should not thow with out of bounds error"); } // But trying to add smaller values to a larger histogram that actually uses it's range should throw an AIOOB: histogram.recordValue(1.0); other.recordValue(1.0); biggerOther.recordValue(trackableValueRangeSize * 8); try { // This should throw: biggerOther.add(histogram); fail("Should have thown with out of bounds error"); } catch (ArrayIndexOutOfBoundsException e) { } } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testAddWithAutoResize(final Class histoClass) { DoubleHistogram histo1 = constructDoubleHistogram(histoClass, 3); histo1.setAutoResize(true); histo1.recordValue(6.0); histo1.recordValue(1.0); histo1.recordValue(5.0); histo1.recordValue(8.0); histo1.recordValue(3.0); histo1.recordValue(7.0); DoubleHistogram histo2 = constructDoubleHistogram(histoClass, 3); histo2.setAutoResize(true); histo2.recordValue(9.0); DoubleHistogram histo3 = constructDoubleHistogram(histoClass, 3); histo3.setAutoResize(true); histo3.recordValue(4.0); histo3.recordValue(2.0); histo3.recordValue(10.0); DoubleHistogram merged = constructDoubleHistogram(histoClass, 3); merged.setAutoResize(true); merged.add(histo1); merged.add(histo2); merged.add(histo3); assertEquals(merged.getTotalCount(), histo1.getTotalCount() + histo2.getTotalCount() + histo3.getTotalCount()); assertEquals(1.0, merged.getMinValue(), 0.01); assertEquals(10.0, merged.getMaxValue(), 0.01); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testSizeOfEquivalentValueRange(final Class histoClass) { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(1.0); assertEquals("Size of equivalent range for value 1 is 1", 1.0/1024.0, histogram.sizeOfEquivalentValueRange(1), 0.001); assertEquals("Size of equivalent range for value 2500 is 2", 2, histogram.sizeOfEquivalentValueRange(2500), 0.001); assertEquals("Size of equivalent range for value 8191 is 4", 4, histogram.sizeOfEquivalentValueRange(8191), 0.001); assertEquals("Size of equivalent range for value 8192 is 8", 8, histogram.sizeOfEquivalentValueRange(8192), 0.001); assertEquals("Size of equivalent range for value 10000 is 8", 8, histogram.sizeOfEquivalentValueRange(10000), 0.001); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testLowestEquivalentValue(final Class histoClass) { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(1.0); assertEquals("The lowest equivalent value to 10007 is 10000", 10000, histogram.lowestEquivalentValue(10007), 0.001); assertEquals("The lowest equivalent value to 10009 is 10008", 10008, histogram.lowestEquivalentValue(10009), 0.001); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testHighestEquivalentValue(final Class histoClass) { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(1.0); assertEquals("The highest equivalent value to 8180 is 8183", 8183.99999, histogram.highestEquivalentValue(8180), 0.001); assertEquals("The highest equivalent value to 8187 is 8191", 8191.99999, histogram.highestEquivalentValue(8191), 0.001); assertEquals("The highest equivalent value to 8193 is 8199", 8199.99999, histogram.highestEquivalentValue(8193), 0.001); assertEquals("The highest equivalent value to 9995 is 9999", 9999.99999, histogram.highestEquivalentValue(9995), 0.001); assertEquals("The highest equivalent value to 10007 is 10007", 10007.99999, histogram.highestEquivalentValue(10007), 0.001); assertEquals("The highest equivalent value to 10008 is 10015", 10015.99999, histogram.highestEquivalentValue(10008), 0.001); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testMedianEquivalentValue(final Class histoClass) { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(1.0); assertEquals("The median equivalent value to 4 is 4", 4.002, histogram.medianEquivalentValue(4), 0.001); assertEquals("The median equivalent value to 5 is 5", 5.002, histogram.medianEquivalentValue(5), 0.001); assertEquals("The median equivalent value to 4000 is 4001", 4001, histogram.medianEquivalentValue(4000), 0.001); assertEquals("The median equivalent value to 8000 is 8002", 8002, histogram.medianEquivalentValue(8000), 0.001); assertEquals("The median equivalent value to 10007 is 10004", 10004, histogram.medianEquivalentValue(10007), 0.001); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testNextNonEquivalentValue(final Class histoClass) { DoubleHistogram histogram = constructDoubleHistogram(histoClass, trackableValueRangeSize, numberOfSignificantValueDigits); assertNotSame(null, histogram); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testMaxValue(final Class histoClass) { DoubleHistogram histogram = constructDoubleHistogram(histoClass, 1_000_000_000, 2); Assertions.assertNotSame(null, histogram); histogram.recordValue(2.5362386543); double maxValue = histogram.getMaxValue(); Assertions.assertEquals(maxValue, histogram.highestEquivalentValue(2.5362386543)); } void testDoubleHistogramSerialization(DoubleHistogram histogram) throws Exception { histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 10); histogram.recordValueWithExpectedInterval(histogram.getCurrentHighestTrackableValue() - 1, histogram.getCurrentHighestTrackableValue() / 1000); ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutput out = null; ByteArrayInputStream bis = null; ObjectInput in = null; DoubleHistogram newHistogram = null; try { out = new ObjectOutputStream(bos); out.writeObject(histogram); Deflater compresser = new Deflater(); compresser.setInput(bos.toByteArray()); compresser.finish(); byte [] compressedOutput = new byte[1024*1024]; int compressedDataLength = compresser.deflate(compressedOutput); System.out.println("Serialized form of " + histogram.getClass() + " with internalHighestToLowestValueRatio = " + histogram.getHighestToLowestValueRatio() + "\n and a numberOfSignificantValueDigits = " + histogram.getNumberOfSignificantValueDigits() + " is " + bos.toByteArray().length + " bytes long. Compressed form is " + compressedDataLength + " bytes long."); System.out.println(" (estimated footprint was " + histogram.getEstimatedFootprintInBytes() + " bytes)"); bis = new ByteArrayInputStream(bos.toByteArray()); in = new ObjectInputStream(bis); newHistogram = (DoubleHistogram) in.readObject(); } finally { if (out != null) out.close(); bos.close(); if (in !=null) in.close(); if (bis != null) bis.close(); } assertNotNull(newHistogram); assertEqual(histogram, newHistogram); } private void assertEqual(DoubleHistogram expectedHistogram, DoubleHistogram actualHistogram) { assertEquals(expectedHistogram, actualHistogram); Assert.assertTrue(expectedHistogram.hashCode() == actualHistogram.hashCode()); assertEquals( expectedHistogram.getCountAtValue(testValueLevel), actualHistogram.getCountAtValue(testValueLevel)); assertEquals( expectedHistogram.getCountAtValue(testValueLevel * 10), actualHistogram.getCountAtValue(testValueLevel * 10)); assertEquals( expectedHistogram.getTotalCount(), actualHistogram.getTotalCount()); } @Test public void equalsWillNotThrowClassCastException() { SynchronizedDoubleHistogram synchronizedDoubleHistogram = new SynchronizedDoubleHistogram(1); IntCountsHistogram other = new IntCountsHistogram(1); synchronizedDoubleHistogram.equals(other); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testSerialization(final Class histoClass) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(histoClass,trackableValueRangeSize, 3); testDoubleHistogramSerialization(histogram); histogram = constructDoubleHistogram(histoClass,trackableValueRangeSize, 2); testDoubleHistogramSerialization(histogram); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, }) public void testSerializationWithInternals(final Class histoClass) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(histoClass,trackableValueRangeSize, 3); testDoubleHistogramSerialization(histogram); DoubleHistogram withIntHistogram = constructDoubleHistogram(histoClass,trackableValueRangeSize, 3, IntCountsHistogram.class); testDoubleHistogramSerialization(withIntHistogram); DoubleHistogram withShortHistogram = constructDoubleHistogram(histoClass,trackableValueRangeSize, 3, ShortCountsHistogram.class); testDoubleHistogramSerialization(withShortHistogram); histogram = constructDoubleHistogram(histoClass,trackableValueRangeSize, 2, Histogram.class); testDoubleHistogramSerialization(histogram); withIntHistogram = constructDoubleHistogram(histoClass,trackableValueRangeSize, 2, IntCountsHistogram.class); testDoubleHistogramSerialization(withIntHistogram); withShortHistogram = constructDoubleHistogram(histoClass,trackableValueRangeSize, 2, ShortCountsHistogram.class); testDoubleHistogramSerialization(withShortHistogram); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testCopy(final Class histoClass) throws Exception { DoubleHistogram histogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 10); histogram.recordValueWithExpectedInterval(histogram.getCurrentHighestTrackableValue() - 1, 31000); System.out.println("Testing copy of DoubleHistogram:"); assertEqual(histogram, histogram.copy()); DoubleHistogram withIntHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, IntCountsHistogram.class); withIntHistogram.recordValue(testValueLevel); withIntHistogram.recordValue(testValueLevel * 10); withIntHistogram.recordValueWithExpectedInterval(withIntHistogram.getCurrentHighestTrackableValue() - 1, 31000); System.out.println("Testing copy of DoubleHistogram backed by IntHistogram:"); assertEqual(withIntHistogram, withIntHistogram.copy()); DoubleHistogram withShortHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, ShortCountsHistogram.class); withShortHistogram.recordValue(testValueLevel); withShortHistogram.recordValue(testValueLevel * 10); withShortHistogram.recordValueWithExpectedInterval(withShortHistogram.getCurrentHighestTrackableValue() - 1, 31000); System.out.println("Testing copy of DoubleHistogram backed by ShortHistogram:"); assertEqual(withShortHistogram, withShortHistogram.copy()); DoubleHistogram withConcurrentHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, ConcurrentHistogram.class); withConcurrentHistogram.recordValue(testValueLevel); withConcurrentHistogram.recordValue(testValueLevel * 10); withConcurrentHistogram.recordValueWithExpectedInterval(withConcurrentHistogram.getCurrentHighestTrackableValue() - 1, 31000); System.out.println("Testing copy of DoubleHistogram backed by ConcurrentHistogram:"); assertEqual(withConcurrentHistogram, withConcurrentHistogram.copy()); DoubleHistogram withSyncHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, SynchronizedHistogram.class); withSyncHistogram.recordValue(testValueLevel); withSyncHistogram.recordValue(testValueLevel * 10); withSyncHistogram.recordValueWithExpectedInterval(withSyncHistogram.getCurrentHighestTrackableValue() - 1, 31000); System.out.println("Testing copy of DoubleHistogram backed by SynchronizedHistogram:"); assertEqual(withSyncHistogram, withSyncHistogram.copy()); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testCopyInto(final Class histoClass) throws Exception { DoubleHistogram histogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits); DoubleHistogram targetHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 10); histogram.recordValueWithExpectedInterval(histogram.getCurrentHighestTrackableValue() - 1, histogram.getCurrentHighestTrackableValue() / 1000); System.out.println("Testing copyInto for DoubleHistogram:"); histogram.copyInto(targetHistogram); assertEqual(histogram, targetHistogram); histogram.recordValue(testValueLevel * 20); histogram.copyInto(targetHistogram); assertEqual(histogram, targetHistogram); DoubleHistogram withIntHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, IntCountsHistogram.class); DoubleHistogram targetWithIntHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, IntCountsHistogram.class); withIntHistogram.recordValue(testValueLevel); withIntHistogram.recordValue(testValueLevel * 10); withIntHistogram.recordValueWithExpectedInterval(withIntHistogram.getCurrentHighestTrackableValue() - 1, withIntHistogram.getCurrentHighestTrackableValue() / 1000); System.out.println("Testing copyInto for DoubleHistogram backed by IntHistogram:"); withIntHistogram.copyInto(targetWithIntHistogram); assertEqual(withIntHistogram, targetWithIntHistogram); withIntHistogram.recordValue(testValueLevel * 20); withIntHistogram.copyInto(targetWithIntHistogram); assertEqual(withIntHistogram, targetWithIntHistogram); DoubleHistogram withShortHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, ShortCountsHistogram.class); DoubleHistogram targetWithShortHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, ShortCountsHistogram.class); withShortHistogram.recordValue(testValueLevel); withShortHistogram.recordValue(testValueLevel * 10); withShortHistogram.recordValueWithExpectedInterval(withShortHistogram.getCurrentHighestTrackableValue() - 1, withShortHistogram.getCurrentHighestTrackableValue() / 1000); System.out.println("Testing copyInto for DoubleHistogram backed by a ShortHistogram:"); withShortHistogram.copyInto(targetWithShortHistogram); assertEqual(withShortHistogram, targetWithShortHistogram); withShortHistogram.recordValue(testValueLevel * 20); withShortHistogram.copyInto(targetWithShortHistogram); assertEqual(withShortHistogram, targetWithShortHistogram); DoubleHistogram withConcurrentHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, ConcurrentHistogram.class); DoubleHistogram targetWithConcurrentHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, ConcurrentHistogram.class); withConcurrentHistogram.recordValue(testValueLevel); withConcurrentHistogram.recordValue(testValueLevel * 10); withConcurrentHistogram.recordValueWithExpectedInterval(withConcurrentHistogram.getCurrentHighestTrackableValue() - 1, withConcurrentHistogram.getCurrentHighestTrackableValue() / 1000); System.out.println("Testing copyInto for DoubleHistogram backed by ConcurrentHistogram:"); withConcurrentHistogram.copyInto(targetWithConcurrentHistogram); assertEqual(withConcurrentHistogram, targetWithConcurrentHistogram); withConcurrentHistogram.recordValue(testValueLevel * 20); withConcurrentHistogram.copyInto(targetWithConcurrentHistogram); assertEqual(withConcurrentHistogram, targetWithConcurrentHistogram); ConcurrentDoubleHistogram concurrentHistogram = new ConcurrentDoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits); ConcurrentDoubleHistogram targetConcurrentHistogram = new ConcurrentDoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits); concurrentHistogram.recordValue(testValueLevel); concurrentHistogram.recordValue(testValueLevel * 10); concurrentHistogram.recordValueWithExpectedInterval(concurrentHistogram.getCurrentHighestTrackableValue() - 1, concurrentHistogram.getCurrentHighestTrackableValue() / 1000); System.out.println("Testing copyInto for actual ConcurrentHistogram:"); concurrentHistogram.copyInto(targetConcurrentHistogram); assertEqual(concurrentHistogram, targetConcurrentHistogram); concurrentHistogram.recordValue(testValueLevel * 20); concurrentHistogram.copyInto(targetConcurrentHistogram); assertEqual(concurrentHistogram, targetConcurrentHistogram); DoubleHistogram withSyncHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, SynchronizedHistogram.class); DoubleHistogram targetWithSyncHistogram = new DoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits, SynchronizedHistogram.class); withSyncHistogram.recordValue(testValueLevel); withSyncHistogram.recordValue(testValueLevel * 10); withSyncHistogram.recordValueWithExpectedInterval(withSyncHistogram.getCurrentHighestTrackableValue() - 1, withSyncHistogram.getCurrentHighestTrackableValue() / 1000); System.out.println("Testing copyInto for DoubleHistogram backed by SynchronizedHistogram:"); withSyncHistogram.copyInto(targetWithSyncHistogram); assertEqual(withSyncHistogram, targetWithSyncHistogram); withSyncHistogram.recordValue(testValueLevel * 20); withSyncHistogram.copyInto(targetWithSyncHistogram); assertEqual(withSyncHistogram, targetWithSyncHistogram); SynchronizedDoubleHistogram syncHistogram = new SynchronizedDoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits); SynchronizedDoubleHistogram targetSyncHistogram = new SynchronizedDoubleHistogram(trackableValueRangeSize, numberOfSignificantValueDigits); syncHistogram.recordValue(testValueLevel); syncHistogram.recordValue(testValueLevel * 10); syncHistogram.recordValueWithExpectedInterval(syncHistogram.getCurrentHighestTrackableValue() - 1, syncHistogram.getCurrentHighestTrackableValue() / 1000); System.out.println("Testing copyInto for actual SynchronizedDoubleHistogram:"); syncHistogram.copyInto(targetSyncHistogram); assertEqual(syncHistogram, targetSyncHistogram); syncHistogram.recordValue(testValueLevel * 20); syncHistogram.copyInto(targetSyncHistogram); assertEqual(syncHistogram, targetSyncHistogram); } private int findContainingBinaryOrderOfMagnitude(long longNumber) { int pow2ceiling = 64 - Long.numberOfLeadingZeros(longNumber); // smallest power of 2 containing value pow2ceiling = Math.min(pow2ceiling, 62); return pow2ceiling; } private void genericResizeTest(DoubleHistogram h) { h.recordValue(0); h.recordValue(5); h.recordValue(1); h.recordValue(8); h.recordValue(9); Assert.assertEquals(9.0, h.getValueAtPercentile(100), 0.1d); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, ConcurrentDoubleHistogram.class, SynchronizedDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testResize(final Class histoClass) { // Verify resize behvaior for various underlying internal integer histogram implementations: genericResizeTest(constructDoubleHistogram(histoClass, 2)); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, }) public void testResizeInternals(final Class histoClass) { // Verify resize behvaior for various underlying internal integer histogram implementations: genericResizeTest(constructDoubleHistogram(histoClass, 2)); genericResizeTest(constructDoubleHistogram(histoClass,2, IntCountsHistogram.class)); genericResizeTest(constructDoubleHistogram(histoClass,2, ShortCountsHistogram.class)); genericResizeTest(constructDoubleHistogram(histoClass,2, ConcurrentHistogram.class)); genericResizeTest(constructDoubleHistogram(histoClass,2, SynchronizedHistogram.class)); genericResizeTest(constructDoubleHistogram(histoClass,2, PackedHistogram.class)); genericResizeTest(constructDoubleHistogram(histoClass,2, PackedConcurrentHistogram.class)); } }
38,799
47.019802
151
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/HistogramTest.java
/** * HistogramTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import org.junit.Assert; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.function.Executable; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.aggregator.ArgumentsAccessor; import org.junit.jupiter.params.provider.CsvSource; import org.junit.jupiter.params.provider.ValueSource; import java.io.*; import java.util.zip.Deflater; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.HdrHistogram.HistogramTestUtils.constructHistogram; /** * JUnit test for {@link Histogram} */ public class HistogramTest { static final long highestTrackableValue = 3600L * 1000 * 1000; // e.g. for 1 hr in usec units static final int numberOfSignificantValueDigits = 3; static final long testValueLevel = 4; @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testConstructionArgumentRanges(Class histoClass) throws Exception { Boolean thrown = false; AbstractHistogram histogram = null; try { // This should throw: // histogram = new Histogram(1, numberOfSignificantValueDigits); histogram = constructHistogram(histoClass, 1, numberOfSignificantValueDigits); } catch (IllegalArgumentException e) { thrown = true; } Assert.assertTrue(thrown); Assert.assertEquals(histogram, null); thrown = false; try { // This should throw: // histogram = new Histogram(highestTrackableValue, 6); histogram = constructHistogram(histoClass, highestTrackableValue, 6); } catch (IllegalArgumentException e) { thrown = true; } Assert.assertTrue(thrown); Assert.assertEquals(histogram, null); thrown = false; try { // This should throw: // histogram = new Histogram(highestTrackableValue, -1); histogram = constructHistogram(histoClass, highestTrackableValue, -1); } catch (IllegalArgumentException e) { thrown = true; } Assert.assertTrue(thrown); Assert.assertEquals(histogram, null); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testUnitMagnitude0IndexCalculations(Class histoClass) { // Histogram h = new Histogram(1L, 1L << 32, 3); AbstractHistogram h = constructHistogram(histoClass, 1L, 1L << 32, 3); assertEquals(2048, h.subBucketCount); assertEquals(0, h.unitMagnitude); // subBucketCount = 2^11, so 2^11 << 22 is > the max of 2^32 for 23 buckets total assertEquals(23, h.bucketCount); // first half of first bucket assertEquals(0, h.getBucketIndex(3)); assertEquals(3, h.getSubBucketIndex(3, 0)); // second half of first bucket assertEquals(0, h.getBucketIndex(1024 + 3)); assertEquals(1024 + 3, h.getSubBucketIndex(1024 + 3, 0)); // second bucket (top half) assertEquals(1, h.getBucketIndex(2048 + 3 * 2)); // counting by 2s, starting at halfway through the bucket assertEquals(1024 + 3, h.getSubBucketIndex(2048 + 3 * 2, 1)); // third bucket (top half) assertEquals(2, h.getBucketIndex((2048 << 1) + 3 * 4)); // counting by 4s, starting at halfway through the bucket assertEquals(1024 + 3, h.getSubBucketIndex((2048 << 1) + 3 * 4, 2)); // past last bucket -- not near Long.MAX_VALUE, so should still calculate ok. assertEquals(23, h.getBucketIndex((2048L << 22) + 3 * (1 << 23))); assertEquals(1024 + 3, h.getSubBucketIndex((2048L << 22) + 3 * (1 << 23), 23)); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testUnitMagnitude4IndexCalculations(Class histoClass) { // Histogram h = new Histogram(1L << 12, 1L << 32, 3); AbstractHistogram h = constructHistogram(histoClass, 1L << 12, 1L << 32, 3); assertEquals(2048, h.subBucketCount); assertEquals(12, h.unitMagnitude); // subBucketCount = 2^11. With unit magnitude shift, it's 2^23. 2^23 << 10 is > the max of 2^32 for 11 buckets // total assertEquals(11, h.bucketCount); long unit = 1L << 12; // below lowest value assertEquals(0, h.getBucketIndex(3)); assertEquals(0, h.getSubBucketIndex(3, 0)); // first half of first bucket assertEquals(0, h.getBucketIndex(3 * unit)); assertEquals(3, h.getSubBucketIndex(3 * unit, 0)); // second half of first bucket // subBucketHalfCount's worth of units, plus 3 more assertEquals(0, h.getBucketIndex(unit * (1024 + 3))); assertEquals(1024 + 3, h.getSubBucketIndex(unit * (1024 + 3), 0)); // second bucket (top half), bucket scale = unit << 1. // Middle of bucket is (subBucketHalfCount = 2^10) of bucket scale, = unit << 11. // Add on 3 of bucket scale. assertEquals(1, h.getBucketIndex((unit << 11) + 3 * (unit << 1))); assertEquals(1024 + 3, h.getSubBucketIndex((unit << 11) + 3 * (unit << 1), 1)); // third bucket (top half), bucket scale = unit << 2. // Middle of bucket is (subBucketHalfCount = 2^10) of bucket scale, = unit << 12. // Add on 3 of bucket scale. assertEquals(2, h.getBucketIndex((unit << 12) + 3 * (unit << 2))); assertEquals(1024 + 3, h.getSubBucketIndex((unit << 12) + 3 * (unit << 2), 2)); // past last bucket -- not near Long.MAX_VALUE, so should still calculate ok. assertEquals(11, h.getBucketIndex((unit << 21) + 3 * (unit << 11))); assertEquals(1024 + 3, h.getSubBucketIndex((unit << 21) + 3 * (unit << 11), 11)); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testUnitMagnitude51SubBucketMagnitude11IndexCalculations(Class histoClass) { // maximum unit magnitude for this precision // Histogram h = new Histogram(1L << 51, Long.MAX_VALUE, 3); AbstractHistogram h = constructHistogram(histoClass, 1L << 51, Long.MAX_VALUE, 3); assertEquals(2048, h.subBucketCount); assertEquals(51, h.unitMagnitude); // subBucketCount = 2^11. With unit magnitude shift, it's 2^62. 1 more bucket to (almost) reach 2^63. assertEquals(2, h.bucketCount); assertEquals(2, h.leadingZeroCountBase); long unit = 1L << 51; // below lowest value assertEquals(0, h.getBucketIndex(3)); assertEquals(0, h.getSubBucketIndex(3, 0)); // first half of first bucket assertEquals(0, h.getBucketIndex(3 * unit)); assertEquals(3, h.getSubBucketIndex(3 * unit, 0)); // second half of first bucket // subBucketHalfCount's worth of units, plus 3 more assertEquals(0, h.getBucketIndex(unit * (1024 + 3))); assertEquals(1024 + 3, h.getSubBucketIndex(unit * (1024 + 3), 0)); // end of second half assertEquals(0, h.getBucketIndex(unit * 1024 + 1023 * unit)); assertEquals(1024 + 1023, h.getSubBucketIndex(unit * 1024 + 1023 * unit, 0)); // second bucket (top half), bucket scale = unit << 1. // Middle of bucket is (subBucketHalfCount = 2^10) of bucket scale, = unit << 11. // Add on 3 of bucket scale. assertEquals(1, h.getBucketIndex((unit << 11) + 3 * (unit << 1))); assertEquals(1024 + 3, h.getSubBucketIndex((unit << 11) + 3 * (unit << 1), 1)); // upper half of second bucket, last slot assertEquals(1, h.getBucketIndex(Long.MAX_VALUE)); assertEquals(1024 + 1023, h.getSubBucketIndex(Long.MAX_VALUE, 1)); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testUnitMagnitude52SubBucketMagnitude11Throws(Class histoClass) { try { // new Histogram(1L << 52, 1L << 62, 3); constructHistogram(histoClass, 1L << 52, 1L << 62, 3); fail(); } catch (IllegalArgumentException e) { assertEquals("Cannot represent numberOfSignificantValueDigits worth of values beyond lowestDiscernibleValue", e.getMessage()); } } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testUnitMagnitude54SubBucketMagnitude8Ok(Class histoClass) { // Histogram h = new Histogram(1L << 54, 1L << 62, 2); AbstractHistogram h = constructHistogram(histoClass, 1L << 54, 1L << 62, 2); assertEquals(256, h.subBucketCount); assertEquals(54, h.unitMagnitude); // subBucketCount = 2^8. With unit magnitude shift, it's 2^62. assertEquals(2, h.bucketCount); // below lowest value assertEquals(0, h.getBucketIndex(3)); assertEquals(0, h.getSubBucketIndex(3, 0)); // upper half of second bucket, last slot assertEquals(1, h.getBucketIndex(Long.MAX_VALUE)); assertEquals(128 + 127, h.getSubBucketIndex(Long.MAX_VALUE, 1)); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testUnitMagnitude61SubBucketMagnitude0Ok(Class histoClass) { // Histogram h = new Histogram(1L << 61, 1L << 62, 0); AbstractHistogram h = constructHistogram(histoClass, 1L << 61, 1L << 62, 0); assertEquals(2, h.subBucketCount); assertEquals(61, h.unitMagnitude); // subBucketCount = 2^1. With unit magnitude shift, it's 2^62. 1 more bucket to be > the max of 2^62. assertEquals(2, h.bucketCount); // below lowest value assertEquals(0, h.getBucketIndex(3)); assertEquals(0, h.getSubBucketIndex(3, 0)); // upper half of second bucket, last slot assertEquals(1, h.getBucketIndex(Long.MAX_VALUE)); assertEquals(1, h.getSubBucketIndex(Long.MAX_VALUE, 1)); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testEmptyHistogram(Class histoClass) throws Exception { // Histogram histogram = new Histogram(3); AbstractHistogram histogram = constructHistogram(histoClass, 3); long min = histogram.getMinValue(); Assert.assertEquals(0, min); long max = histogram.getMaxValue(); Assert.assertEquals(0, max); double mean = histogram.getMean(); Assert.assertEquals(0, mean, 0.0000000000001D); double stddev = histogram.getStdDeviation(); Assert.assertEquals(0, stddev, 0.0000000000001D); double pcnt = histogram.getPercentileAtOrBelowValue(0); Assert.assertEquals(100.0, pcnt, 0.0000000000001D); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testConstructionArgumentGets(Class histoClass) throws Exception { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertEquals(1, histogram.getLowestDiscernibleValue()); Assert.assertEquals(highestTrackableValue, histogram.getHighestTrackableValue()); Assert.assertEquals(numberOfSignificantValueDigits, histogram.getNumberOfSignificantValueDigits()); // Histogram histogram2 = new Histogram(1000, highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram2 = constructHistogram(histoClass, 1000, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertEquals(1000, histogram2.getLowestDiscernibleValue()); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, AtomicHistogram.class, SynchronizedHistogram.class, }) public void testGetEstimatedFootprintInBytes(Class histoClass) throws Exception { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); /* * largestValueWithSingleUnitResolution = 2 * (10 ^ numberOfSignificantValueDigits); * subBucketSize = roundedUpToNearestPowerOf2(largestValueWithSingleUnitResolution); * expectedHistogramFootprintInBytes = 512 + * ({primitive type size} / 2) * * (log2RoundedUp((trackableValueRangeSize) / subBucketSize) + 2) * * subBucketSize */ long largestValueWithSingleUnitResolution = 2 * (long) Math.pow(10, numberOfSignificantValueDigits); int subBucketCountMagnitude = (int) Math.ceil(Math.log(largestValueWithSingleUnitResolution) / Math.log(2)); int subBucketSize = (int) Math.pow(2, (subBucketCountMagnitude)); long expectedSize = 512 + ((8 * ((long) ( Math.ceil( Math.log(highestTrackableValue / subBucketSize) / Math.log(2) ) + 2)) * (1 << (64 - Long.numberOfLeadingZeros(2 * (long) Math.pow(10, numberOfSignificantValueDigits)))) ) / 2); Assert.assertEquals(expectedSize, histogram.getEstimatedFootprintInBytes()); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testRecordValue(Class histoClass) throws Exception { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); Assert.assertEquals(1L, histogram.getCountAtValue(testValueLevel)); Assert.assertEquals(1L, histogram.getTotalCount()); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testRecordValue_Overflow_ShouldThrowException(final Class histoClass) throws Exception { Assertions.assertThrows(ArrayIndexOutOfBoundsException.class, new Executable() { @Override public void execute() throws Throwable { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(highestTrackableValue * 3); } }); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testConstructionWithLargeNumbers(Class histoClass) throws Exception { // Histogram histogram = new Histogram(20000000, 100000000, 5); AbstractHistogram histogram = constructHistogram(histoClass, 20000000, 100000000, 5); histogram.recordValue(100000000); histogram.recordValue(20000000); histogram.recordValue(30000000); Assert.assertTrue(histogram.valuesAreEquivalent(20000000, histogram.getValueAtPercentile(50.0))); Assert.assertTrue(histogram.valuesAreEquivalent(30000000, histogram.getValueAtPercentile(50.0))); Assert.assertTrue(histogram.valuesAreEquivalent(100000000, histogram.getValueAtPercentile(83.33))); Assert.assertTrue(histogram.valuesAreEquivalent(100000000, histogram.getValueAtPercentile(83.34))); Assert.assertTrue(histogram.valuesAreEquivalent(100000000, histogram.getValueAtPercentile(99.0))); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testValueAtPercentileMatchesPercentile(Class histoClass) throws Exception { // Histogram histogram = new Histogram(1, Long.MAX_VALUE, 3); AbstractHistogram histogram = constructHistogram(histoClass, 1, Long.MAX_VALUE, 2); long[] lengths = {1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 50000, 100000}; for (long length : lengths) { histogram.reset(); for (long value = 1; value <= length; value++) { histogram.recordValue(value); } for (long value = 1; value <= length; value = histogram.nextNonEquivalentValue(value)) { Double calculatedPercentile = 100.0 * ((double) value) / length; long lookupValue = histogram.getValueAtPercentile(calculatedPercentile); Assert.assertTrue("length:" + length + " value: " + value + " calculatedPercentile:" + calculatedPercentile + " getValueAtPercentile(" + calculatedPercentile + ") = " + lookupValue + " [should be " + value + "]", histogram.valuesAreEquivalent(value, lookupValue)); } } } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testValueAtPercentileMatchesPercentileIter(Class histoClass) throws Exception { // Histogram histogram = new Histogram(1, Long.MAX_VALUE, 3); AbstractHistogram histogram = constructHistogram(histoClass, 1, Long.MAX_VALUE, 2); long[] lengths = {1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 50000, 100000}; for (long length : lengths) { histogram.reset(); for (long value = 1; value <= length; value++) { histogram.recordValue(value); } int percentileTicksPerHalfDistance = 1000; for (HistogramIterationValue v : histogram.percentiles(percentileTicksPerHalfDistance)) { long calculatedValue = histogram.getValueAtPercentile(v.getPercentile()); long iterValue = v.getValueIteratedTo(); Assert.assertTrue("length:" + length + " percentile: " + v.getPercentile() + " calculatedValue:" + calculatedValue + " iterValue:" + iterValue + "[should be " + calculatedValue + "]", histogram.valuesAreEquivalent(calculatedValue, iterValue)); Assert.assertTrue(histogram.valuesAreEquivalent(calculatedValue, iterValue)); } } } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testRecordValueWithExpectedInterval(Class histoClass) throws Exception { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValueWithExpectedInterval(testValueLevel, testValueLevel / 4); // Histogram rawHistogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram rawHistogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); rawHistogram.recordValue(testValueLevel); // The data will include corrected samples: Assert.assertEquals(1L, histogram.getCountAtValue((testValueLevel * 1) / 4)); Assert.assertEquals(1L, histogram.getCountAtValue((testValueLevel * 2) / 4)); Assert.assertEquals(1L, histogram.getCountAtValue((testValueLevel * 3) / 4)); Assert.assertEquals(1L, histogram.getCountAtValue((testValueLevel * 4) / 4)); Assert.assertEquals(4L, histogram.getTotalCount()); // But the raw data will not: Assert.assertEquals(0L, rawHistogram.getCountAtValue((testValueLevel * 1) / 4)); Assert.assertEquals(0L, rawHistogram.getCountAtValue((testValueLevel * 2) / 4)); Assert.assertEquals(0L, rawHistogram.getCountAtValue((testValueLevel * 3) / 4)); Assert.assertEquals(1L, rawHistogram.getCountAtValue((testValueLevel * 4) / 4)); Assert.assertEquals(1L, rawHistogram.getTotalCount()); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testReset(Class histoClass) throws Exception { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(10); histogram.recordValue(100); Assert.assertEquals(histogram.getMinValue(), Math.min(10, testValueLevel)); Assert.assertEquals(histogram.getMaxValue(), Math.max(100, testValueLevel)); histogram.reset(); Assert.assertEquals(0L, histogram.getCountAtValue(testValueLevel)); Assert.assertEquals(0L, histogram.getTotalCount()); verifyMaxValue(histogram); histogram.recordValue(20); histogram.recordValue(80); Assert.assertEquals(histogram.getMinValue(), 20); Assert.assertEquals(histogram.getMaxValue(), 80); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testAdd(Class histoClass) throws Exception { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); // Histogram other = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram other = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 1000); other.recordValue(testValueLevel); other.recordValue(testValueLevel * 1000); histogram.add(other); Assert.assertEquals(2L, histogram.getCountAtValue(testValueLevel)); Assert.assertEquals(2L, histogram.getCountAtValue(testValueLevel * 1000)); Assert.assertEquals(4L, histogram.getTotalCount()); // Histogram biggerOther = new Histogram(highestTrackableValue * 2, numberOfSignificantValueDigits); AbstractHistogram biggerOther = constructHistogram(histoClass, highestTrackableValue * 2, numberOfSignificantValueDigits); biggerOther.recordValue(testValueLevel); biggerOther.recordValue(testValueLevel * 1000); biggerOther.recordValue(highestTrackableValue * 2); // Adding the smaller histogram to the bigger one should work: biggerOther.add(histogram); Assert.assertEquals(3L, biggerOther.getCountAtValue(testValueLevel)); Assert.assertEquals(3L, biggerOther.getCountAtValue(testValueLevel * 1000)); Assert.assertEquals(1L, biggerOther.getCountAtValue(highestTrackableValue * 2)); // overflow smaller hist... Assert.assertEquals(7L, biggerOther.getTotalCount()); // But trying to add a larger histogram into a smaller one should throw an AIOOB: boolean thrown = false; try { // This should throw: histogram.add(biggerOther); } catch (ArrayIndexOutOfBoundsException e) { thrown = true; } Assert.assertTrue(thrown); verifyMaxValue(histogram); verifyMaxValue(other); verifyMaxValue(biggerOther); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testSubtractAfterAdd(Class histoClass) { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); // Histogram other = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram other = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 1000); other.recordValue(testValueLevel); other.recordValue(testValueLevel * 1000); histogram.add(other); Assert.assertEquals(2L, histogram.getCountAtValue(testValueLevel)); Assert.assertEquals(2L, histogram.getCountAtValue(testValueLevel * 1000)); Assert.assertEquals(4L, histogram.getTotalCount()); histogram.add(other); Assert.assertEquals(3L, histogram.getCountAtValue(testValueLevel)); Assert.assertEquals(3L, histogram.getCountAtValue(testValueLevel * 1000)); Assert.assertEquals(6L, histogram.getTotalCount()); histogram.subtract(other); Assert.assertEquals(2L, histogram.getCountAtValue(testValueLevel)); Assert.assertEquals(2L, histogram.getCountAtValue(testValueLevel * 1000)); Assert.assertEquals(4L, histogram.getTotalCount()); verifyMaxValue(histogram); verifyMaxValue(other); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testSubtractToZeroCounts(Class histoClass) { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 1000); Assert.assertEquals(1L, histogram.getCountAtValue(testValueLevel)); Assert.assertEquals(1L, histogram.getCountAtValue(testValueLevel * 1000)); Assert.assertEquals(2L, histogram.getTotalCount()); // Subtracting down to zero counts should work: histogram.subtract(histogram); Assert.assertEquals(0L, histogram.getCountAtValue(testValueLevel)); Assert.assertEquals(0L, histogram.getCountAtValue(testValueLevel * 1000)); Assert.assertEquals(0L, histogram.getTotalCount()); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testSubtractToNegativeCountsThrows(Class histoClass) { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); // Histogram other = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram other = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 1000); other.recordValueWithCount(testValueLevel, 2); other.recordValueWithCount(testValueLevel * 1000, 2); try { histogram.subtract(other); fail(); } catch (IllegalArgumentException e) { // should throw } verifyMaxValue(histogram); verifyMaxValue(other); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testSubtractSubtrahendValuesOutsideMinuendRangeThrows(Class histoClass) { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 1000); // Histogram biggerOther = new Histogram(highestTrackableValue * 2, numberOfSignificantValueDigits); AbstractHistogram biggerOther = constructHistogram(histoClass, highestTrackableValue * 2, numberOfSignificantValueDigits); biggerOther.recordValue(testValueLevel); biggerOther.recordValue(testValueLevel * 1000); biggerOther.recordValue(highestTrackableValue * 2); // outside smaller histogram's range try { histogram.subtract(biggerOther); fail(); } catch (IllegalArgumentException e) { // should throw } verifyMaxValue(histogram); verifyMaxValue(biggerOther); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testSubtractSubtrahendValuesInsideMinuendRangeWorks(Class histoClass) { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 1000); // Histogram biggerOther = new Histogram(highestTrackableValue * 2, numberOfSignificantValueDigits); AbstractHistogram biggerOther = constructHistogram(histoClass, highestTrackableValue * 2, numberOfSignificantValueDigits); biggerOther.recordValue(testValueLevel); biggerOther.recordValue(testValueLevel * 1000); biggerOther.recordValue(highestTrackableValue * 2); biggerOther.add(biggerOther); biggerOther.add(biggerOther); Assert.assertEquals(4L, biggerOther.getCountAtValue(testValueLevel)); Assert.assertEquals(4L, biggerOther.getCountAtValue(testValueLevel * 1000)); Assert.assertEquals(4L, biggerOther.getCountAtValue(highestTrackableValue * 2)); // overflow smaller hist... Assert.assertEquals(12L, biggerOther.getTotalCount()); // Subtracting the smaller histogram from the bigger one should work: biggerOther.subtract(histogram); Assert.assertEquals(3L, biggerOther.getCountAtValue(testValueLevel)); Assert.assertEquals(3L, biggerOther.getCountAtValue(testValueLevel * 1000)); Assert.assertEquals(4L, biggerOther.getCountAtValue(highestTrackableValue * 2)); // overflow smaller hist... Assert.assertEquals(10L, biggerOther.getTotalCount()); verifyMaxValue(histogram); verifyMaxValue(biggerOther); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testSizeOfEquivalentValueRange(Class histoClass) { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertEquals("Size of equivalent range for value 1 is 1", 1, histogram.sizeOfEquivalentValueRange(1)); Assert.assertEquals("Size of equivalent range for value 1025 is 1", 1, histogram.sizeOfEquivalentValueRange(1025)); Assert.assertEquals("Size of equivalent range for value 2047 is 1", 1, histogram.sizeOfEquivalentValueRange(2047)); Assert.assertEquals("Size of equivalent range for value 2048 is 2", 2, histogram.sizeOfEquivalentValueRange(2048)); Assert.assertEquals("Size of equivalent range for value 2500 is 2", 2, histogram.sizeOfEquivalentValueRange(2500)); Assert.assertEquals("Size of equivalent range for value 8191 is 4", 4, histogram.sizeOfEquivalentValueRange(8191)); Assert.assertEquals("Size of equivalent range for value 8192 is 8", 8, histogram.sizeOfEquivalentValueRange(8192)); Assert.assertEquals("Size of equivalent range for value 10000 is 8", 8, histogram.sizeOfEquivalentValueRange(10000)); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testScaledSizeOfEquivalentValueRange(Class histoClass) { // Histogram histogram = new Histogram(1024, highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, 1024, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertEquals("Size of equivalent range for value 1 * 1024 is 1 * 1024", 1 * 1024, histogram.sizeOfEquivalentValueRange(1 * 1024)); Assert.assertEquals("Size of equivalent range for value 2500 * 1024 is 2 * 1024", 2 * 1024, histogram.sizeOfEquivalentValueRange(2500 * 1024)); Assert.assertEquals("Size of equivalent range for value 8191 * 1024 is 4 * 1024", 4 * 1024, histogram.sizeOfEquivalentValueRange(8191 * 1024)); Assert.assertEquals("Size of equivalent range for value 8192 * 1024 is 8 * 1024", 8 * 1024, histogram.sizeOfEquivalentValueRange(8192 * 1024)); Assert.assertEquals("Size of equivalent range for value 10000 * 1024 is 8 * 1024", 8 * 1024, histogram.sizeOfEquivalentValueRange(10000 * 1024)); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testLowestEquivalentValue(Class histoClass) { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertEquals("The lowest equivalent value to 10007 is 10000", 10000, histogram.lowestEquivalentValue(10007)); Assert.assertEquals("The lowest equivalent value to 10009 is 10008", 10008, histogram.lowestEquivalentValue(10009)); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testScaledLowestEquivalentValue(Class histoClass) { // Histogram histogram = new Histogram(1024, highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, 1024, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertEquals("The lowest equivalent value to 10007 * 1024 is 10000 * 1024", 10000 * 1024, histogram.lowestEquivalentValue(10007 * 1024)); Assert.assertEquals("The lowest equivalent value to 10009 * 1024 is 10008 * 1024", 10008 * 1024, histogram.lowestEquivalentValue(10009 * 1024)); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testHighestEquivalentValue(Class histoClass) { // Histogram histogram = new Histogram(1024, highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, 1024, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertEquals("The highest equivalent value to 8180 * 1024 is 8183 * 1024 + 1023", 8183 * 1024 + 1023, histogram.highestEquivalentValue(8180 * 1024)); Assert.assertEquals("The highest equivalent value to 8187 * 1024 is 8191 * 1024 + 1023", 8191 * 1024 + 1023, histogram.highestEquivalentValue(8191 * 1024)); Assert.assertEquals("The highest equivalent value to 8193 * 1024 is 8199 * 1024 + 1023", 8199 * 1024 + 1023, histogram.highestEquivalentValue(8193 * 1024)); Assert.assertEquals("The highest equivalent value to 9995 * 1024 is 9999 * 1024 + 1023", 9999 * 1024 + 1023, histogram.highestEquivalentValue(9995 * 1024)); Assert.assertEquals("The highest equivalent value to 10007 * 1024 is 10007 * 1024 + 1023", 10007 * 1024 + 1023, histogram.highestEquivalentValue(10007 * 1024)); Assert.assertEquals("The highest equivalent value to 10008 * 1024 is 10015 * 1024 + 1023", 10015 * 1024 + 1023, histogram.highestEquivalentValue(10008 * 1024)); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testScaledHighestEquivalentValue(Class histoClass) { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertEquals("The highest equivalent value to 8180 is 8183", 8183, histogram.highestEquivalentValue(8180)); Assert.assertEquals("The highest equivalent value to 8187 is 8191", 8191, histogram.highestEquivalentValue(8191)); Assert.assertEquals("The highest equivalent value to 8193 is 8199", 8199, histogram.highestEquivalentValue(8193)); Assert.assertEquals("The highest equivalent value to 9995 is 9999", 9999, histogram.highestEquivalentValue(9995)); Assert.assertEquals("The highest equivalent value to 10007 is 10007", 10007, histogram.highestEquivalentValue(10007)); Assert.assertEquals("The highest equivalent value to 10008 is 10015", 10015, histogram.highestEquivalentValue(10008)); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testMedianEquivalentValue(Class histoClass) { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertEquals("The median equivalent value to 4 is 4", 4, histogram.medianEquivalentValue(4)); Assert.assertEquals("The median equivalent value to 5 is 5", 5, histogram.medianEquivalentValue(5)); Assert.assertEquals("The median equivalent value to 4000 is 4001", 4001, histogram.medianEquivalentValue(4000)); Assert.assertEquals("The median equivalent value to 8000 is 8002", 8002, histogram.medianEquivalentValue(8000)); Assert.assertEquals("The median equivalent value to 10007 is 10004", 10004, histogram.medianEquivalentValue(10007)); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testScaledMedianEquivalentValue(Class histoClass) { // Histogram histogram = new Histogram(1024, highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, 1024, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertEquals("The median equivalent value to 4 * 1024 is 4 * 1024 + 512", 4 * 1024 + 512, histogram.medianEquivalentValue(4 * 1024)); Assert.assertEquals("The median equivalent value to 5 * 1024 is 5 * 1024 + 512", 5 * 1024 + 512, histogram.medianEquivalentValue(5 * 1024)); Assert.assertEquals("The median equivalent value to 4000 * 1024 is 4001 * 1024", 4001 * 1024, histogram.medianEquivalentValue(4000 * 1024)); Assert.assertEquals("The median equivalent value to 8000 * 1024 is 8002 * 1024", 8002 * 1024, histogram.medianEquivalentValue(8000 * 1024)); Assert.assertEquals("The median equivalent value to 10007 * 1024 is 10004 * 1024", 10004 * 1024, histogram.medianEquivalentValue(10007 * 1024)); verifyMaxValue(histogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testNextNonEquivalentValue(Class histoClass) { // Histogram histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); Assert.assertNotSame(null, histogram); } void testAbstractSerialization(AbstractHistogram histogram) throws Exception { histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 10); histogram.recordValueWithExpectedInterval(histogram.getHighestTrackableValue() - 1, 255); if (histogram.supportsAutoResize()) { histogram.setAutoResize(true); assertTrue(histogram.isAutoResize()); } ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutput out = null; ByteArrayInputStream bis = null; ObjectInput in = null; AbstractHistogram newHistogram = null; try { out = new ObjectOutputStream(bos); out.writeObject(histogram); Deflater compresser = new Deflater(); compresser.setInput(bos.toByteArray()); compresser.finish(); byte [] compressedOutput = new byte[1024*1024]; int compressedDataLength = compresser.deflate(compressedOutput); System.out.println("Serialized form of " + histogram.getClass() + " with trackableValueRangeSize = " + histogram.getHighestTrackableValue() + "\n and a numberOfSignificantValueDigits = " + histogram.getNumberOfSignificantValueDigits() + " is " + bos.toByteArray().length + " bytes long. Compressed form is " + compressedDataLength + " bytes long."); System.out.println(" (estimated footprint was " + histogram.getEstimatedFootprintInBytes() + " bytes)"); bis = new ByteArrayInputStream(bos.toByteArray()); in = new ObjectInputStream(bis); newHistogram = (AbstractHistogram) in.readObject(); } finally { if (out != null) out.close(); bos.close(); if (in !=null) in.close(); if (bis != null) bis.close(); } Assert.assertNotNull(newHistogram); assertEqual(histogram, newHistogram); assertTrue(histogram.equals(newHistogram)); if (histogram.supportsAutoResize()) { assertTrue(histogram.isAutoResize()); } assertEquals(newHistogram.isAutoResize(), histogram.isAutoResize()); Assert.assertTrue(histogram.hashCode() == newHistogram.hashCode()); assertEquals(histogram.getNeededByteBufferCapacity(), newHistogram.copy().getNeededByteBufferCapacity()); assertEquals(histogram.getNeededByteBufferCapacity(), newHistogram.getNeededByteBufferCapacity()); } private void assertEqual(AbstractHistogram expectedHistogram, AbstractHistogram actualHistogram) { Assert.assertEquals(expectedHistogram, actualHistogram); Assert.assertEquals( expectedHistogram.getCountAtValue(testValueLevel), actualHistogram.getCountAtValue(testValueLevel)); Assert.assertEquals( expectedHistogram.getCountAtValue(testValueLevel * 10), actualHistogram.getCountAtValue(testValueLevel * 10)); Assert.assertEquals( expectedHistogram.getTotalCount(), actualHistogram.getTotalCount()); verifyMaxValue(expectedHistogram); verifyMaxValue(actualHistogram); } @Test public void testPackedEquivalence() { Histogram histogram = new Histogram(highestTrackableValue, 3); histogram.recordValueWithExpectedInterval(histogram.getHighestTrackableValue() - 1, 255); // for each value in a non-packed histogram, record the same in a packed one, and for each // step, verify all values up to that point match. // start from the top and go down: Histogram packedHistogram = new PackedHistogram(highestTrackableValue, 3); for (int index = histogram.counts.length - 1; index >= 0; index--) { if (histogram.counts[index] != 0) { packedHistogram.addToCountAtIndex(index, histogram.counts[index]); // Now verify every value up to this point: for (int i = histogram.counts.length - 1; i >= index; i--) { long histValue = histogram.counts[i]; long packedHistValue; try { packedHistValue = packedHistogram.getCountAtIndex(i); } catch (ArrayIndexOutOfBoundsException ex) { System.out.println("AIOOB at i = " + i + " : " + ex); throw ex; } if (histValue != packedHistValue) { // Blow up with assert: (easier to breakpoint this way). Assert.assertEquals("at insertion index " + index + ", contents of index " + i + " don't match", histValue, packedHistValue); } } } } } @ParameterizedTest @CsvSource({ "Histogram, 3", "Histogram, 2", "ConcurrentHistogram, 3", "ConcurrentHistogram, 2", "AtomicHistogram, 3", "AtomicHistogram, 2", "SynchronizedHistogram, 3", "SynchronizedHistogram, 2", "PackedHistogram, 3", "PackedHistogram, 2", "PackedConcurrentHistogram, 3", "PackedConcurrentHistogram, 2", "IntCountsHistogram, 3", "IntCountsHistogram, 2", "ShortCountsHistogram, 3", "ShortCountsHistogram, 4", // ShortCountsHistogram would overflow with 2 }) public void testSerialization(ArgumentsAccessor arguments) throws Exception { Class histoClass = Class.forName("org.HdrHistogram." + arguments.getString(0)); int digits = arguments.getInteger(1); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, digits); testAbstractSerialization(histogram); } @Test public void testShortCountsHistogramOverflow() throws Exception { Assertions.assertThrows(IllegalStateException.class, new Executable() { @Override public void execute() throws Throwable { ShortCountsHistogram histogram = new ShortCountsHistogram(highestTrackableValue, 2); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 10); // This should overflow a ShortHistogram: histogram.recordValueWithExpectedInterval( histogram.getHighestTrackableValue() - 1, 500); } }); } @Test public void testIntCountsHistogramOverflow() throws Exception { Assertions.assertThrows(IllegalStateException.class, new Executable() { @Override public void execute() throws Throwable { IntCountsHistogram histogram = new IntCountsHistogram(highestTrackableValue, 2); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 10); // This should overflow a ShortHistogram: histogram.recordValueWithCount(testValueLevel, 10); histogram.recordValueWithCount(testValueLevel, 10); histogram.recordValueWithCount(testValueLevel, Integer.MAX_VALUE - 10); } }); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testCopy(Class histoClass) throws Exception { AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 10); histogram.recordValueWithExpectedInterval(histogram.getHighestTrackableValue() - 1, 31000); assertEqual(histogram, histogram.copy()); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testScaledCopy(Class histoClass) throws Exception { AbstractHistogram histogram = constructHistogram(histoClass,1000, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 10); histogram.recordValueWithExpectedInterval(histogram.getHighestTrackableValue() - 1, 31000); System.out.println("Testing copy of scaled Histogram:"); assertEqual(histogram, histogram.copy()); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testCopyInto(Class histoClass) throws Exception { AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram targetHistogram = constructHistogram(histoClass, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 10); histogram.recordValueWithExpectedInterval(histogram.getHighestTrackableValue() - 1, 31000); System.out.println("Testing copyInto for Histogram:"); histogram.copyInto(targetHistogram); assertEqual(histogram, targetHistogram); histogram.recordValue(testValueLevel * 20); histogram.copyInto(targetHistogram); assertEqual(histogram, targetHistogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, AtomicHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testScaledCopyInto(Class histoClass) throws Exception { AbstractHistogram histogram = constructHistogram(histoClass, 1000, highestTrackableValue, numberOfSignificantValueDigits); AbstractHistogram targetHistogram = constructHistogram(histoClass, 1000, highestTrackableValue, numberOfSignificantValueDigits); histogram.recordValue(testValueLevel); histogram.recordValue(testValueLevel * 10); histogram.recordValueWithExpectedInterval(histogram.getHighestTrackableValue() - 1, 31000); System.out.println("Testing copyInto for scaled Histogram:"); histogram.copyInto(targetHistogram); assertEqual(histogram, targetHistogram); histogram.recordValue(testValueLevel * 20); histogram.copyInto(targetHistogram); assertEqual(histogram, targetHistogram); } public void verifyMaxValue(AbstractHistogram histogram) { long computedMaxValue = 0; for (int i = 0; i < histogram.countsArrayLength; i++) { if (histogram.getCountAtIndex(i) > 0) { computedMaxValue = histogram.valueFromIndex(i); } } computedMaxValue = (computedMaxValue == 0) ? 0 : histogram.highestEquivalentValue(computedMaxValue); Assert.assertEquals(computedMaxValue, histogram.getMaxValue()); } }
63,063
46.993912
140
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/HistogramShiftTest.java
/** * HistogramTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import org.junit.Assert; import org.junit.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import static org.HdrHistogram.HistogramTestUtils.constructHistogram; /** * JUnit test for {@link Histogram} */ public class HistogramShiftTest { static final long highestTrackableValue = 3600L * 1000 * 1000; // e.g. for 1 hr in usec units static final Class[] histogramClassesNoAtomic = { Histogram.class, ConcurrentHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class }; @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testHistogramShift(Class histoClass) throws Exception { // Histogram h = new Histogram(1L, 1L << 32, 3); AbstractHistogram histogram = constructHistogram(histoClass, highestTrackableValue, 3); testShiftLowestBucket(histogram); testShiftNonLowestBucket(histogram); } void testShiftLowestBucket(AbstractHistogram histogram) { for (int shiftAmount = 0; shiftAmount < 10; shiftAmount++) { histogram.reset(); histogram.recordValueWithCount(0, 500); histogram.recordValue(2); histogram.recordValue(4); histogram.recordValue(5); histogram.recordValue(511); histogram.recordValue(512); histogram.recordValue(1023); histogram.recordValue(1024); histogram.recordValue(1025); AbstractHistogram histogram2 = histogram.copy(); histogram2.reset(); histogram2.recordValueWithCount(0, 500); histogram2.recordValue(2 << shiftAmount); histogram2.recordValue(4 << shiftAmount); histogram2.recordValue(5 << shiftAmount); histogram2.recordValue(511 << shiftAmount); histogram2.recordValue(512 << shiftAmount); histogram2.recordValue(1023 << shiftAmount); histogram2.recordValue(1024 << shiftAmount); histogram2.recordValue(1025 << shiftAmount); histogram.shiftValuesLeft(shiftAmount); if (!histogram.equals(histogram2)) { System.out.println("Not Equal for shift of " + shiftAmount); } Assert.assertEquals(histogram, histogram2); } } void testShiftNonLowestBucket(AbstractHistogram histogram) { for (int shiftAmount = 0; shiftAmount < 10; shiftAmount++) { histogram.reset(); histogram.recordValueWithCount(0, 500); histogram.recordValue(2 << 10); histogram.recordValue(4 << 10); histogram.recordValue(5 << 10); histogram.recordValue(511 << 10); histogram.recordValue(512 << 10); histogram.recordValue(1023 << 10); histogram.recordValue(1024 << 10); histogram.recordValue(1025 << 10); AbstractHistogram origHistogram = histogram.copy(); AbstractHistogram histogram2 = histogram.copy(); histogram2.reset(); histogram2.recordValueWithCount(0, 500); histogram2.recordValue((2 << 10) << shiftAmount); histogram2.recordValue((4 << 10) << shiftAmount); histogram2.recordValue((5 << 10) << shiftAmount); histogram2.recordValue((511 << 10) << shiftAmount); histogram2.recordValue((512 << 10) << shiftAmount); histogram2.recordValue((1023 << 10) << shiftAmount); histogram2.recordValue((1024 << 10) << shiftAmount); histogram2.recordValue((1025 << 10) << shiftAmount); histogram.shiftValuesLeft(shiftAmount); if (!histogram.equals(histogram2)) { System.out.println("Not Equal for shift of " + shiftAmount); } Assert.assertEquals(histogram, histogram2); histogram.shiftValuesRight(shiftAmount); Assert.assertEquals(histogram, origHistogram); } } }
4,575
36.508197
97
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/HistogramEncodingTest.java
/** * HistogramTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import org.junit.Assert; import org.junit.jupiter.api.Test; import org.junit.experimental.theories.DataPoints; import org.junit.experimental.theories.Theories; import org.junit.experimental.theories.Theory; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import org.junit.runner.RunWith; import java.nio.ByteBuffer; import static org.HdrHistogram.HistogramTestUtils.constructHistogram; import static org.HdrHistogram.HistogramTestUtils.constructDoubleHistogram; import static org.HdrHistogram.HistogramTestUtils.decodeFromCompressedByteBuffer; import static org.HdrHistogram.HistogramTestUtils.decodeDoubleHistogramFromCompressedByteBuffer; /** * JUnit test for {@link org.HdrHistogram.Histogram} */ @RunWith(Theories.class) public class HistogramEncodingTest { static final long highestTrackableValue = 3600L * 1000 * 1000; // e.g. for 1 hr in usec units @Test public void testHistogramEncoding_ByteBufferHasCorrectPositionSetAfterEncoding() throws Exception { Histogram histogram = new Histogram(highestTrackableValue, 3); int size = histogram.getNeededByteBufferCapacity(); ByteBuffer buffer = ByteBuffer.allocate(size); int bytesWritten = histogram.encodeIntoCompressedByteBuffer(buffer); Assert.assertEquals(bytesWritten, buffer.position()); buffer.rewind(); bytesWritten = histogram.encodeIntoByteBuffer(buffer); Assert.assertEquals(bytesWritten, buffer.position()); } public enum BufferAllocator { DIRECT { @Override public ByteBuffer allocate(final int size) { return ByteBuffer.allocateDirect(size); } }, HEAP { @Override public ByteBuffer allocate(final int size) { return ByteBuffer.allocate(size); } }; public abstract ByteBuffer allocate(int size); } @DataPoints public static BufferAllocator[] ALLOCATORS = new BufferAllocator[] { BufferAllocator.DIRECT, BufferAllocator.HEAP }; @Theory public void testHistogramEncoding(BufferAllocator allocator) throws Exception { ShortCountsHistogram shortCountsHistogram = new ShortCountsHistogram(highestTrackableValue, 3); IntCountsHistogram intCountsHistogram = new IntCountsHistogram(highestTrackableValue, 3); Histogram histogram = new Histogram(highestTrackableValue, 3); PackedHistogram packedHistogram = new PackedHistogram(highestTrackableValue, 3); PackedConcurrentHistogram packedConcurrentHistogram = new PackedConcurrentHistogram(highestTrackableValue, 3); AtomicHistogram atomicHistogram = new AtomicHistogram(highestTrackableValue, 3); ConcurrentHistogram concurrentHistogram = new ConcurrentHistogram(highestTrackableValue, 3); SynchronizedHistogram synchronizedHistogram = new SynchronizedHistogram(highestTrackableValue, 3); DoubleHistogram doubleHistogram = new DoubleHistogram(highestTrackableValue * 1000, 3); PackedDoubleHistogram packedDoubleHistogram = new PackedDoubleHistogram(highestTrackableValue * 1000, 3); DoubleHistogram concurrentDoubleHistogram = new ConcurrentDoubleHistogram(highestTrackableValue * 1000, 3); PackedConcurrentDoubleHistogram packedConcurrentDoubleHistogram = new PackedConcurrentDoubleHistogram(highestTrackableValue * 1000, 3); for (int i = 0; i < 10000; i++) { shortCountsHistogram.recordValue(1000 * i); intCountsHistogram.recordValue(2000 * i); histogram.recordValue(3000 * i); packedHistogram.recordValue(3000 * i); packedConcurrentHistogram.recordValue(3000 * i); atomicHistogram.recordValue(4000 * i); concurrentHistogram.recordValue(4000 * i); synchronizedHistogram.recordValue(5000 * i); doubleHistogram.recordValue(5000 * i); doubleHistogram.recordValue(0.001); // Makes some internal shifts happen. packedDoubleHistogram.recordValue(5000 * i); packedDoubleHistogram.recordValue(0.001); // Makes some internal shifts happen. concurrentDoubleHistogram.recordValue(5000 * i); concurrentDoubleHistogram.recordValue(0.001); // Makes some internal shifts happen. packedConcurrentDoubleHistogram.recordValue(5000 * i); packedConcurrentDoubleHistogram.recordValue(0.001); // Makes some internal shifts happen. } System.out.println("Testing encoding of a ShortHistogram:"); ByteBuffer targetBuffer = allocator.allocate(shortCountsHistogram.getNeededByteBufferCapacity()); shortCountsHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); ShortCountsHistogram shortCountsHistogram2 = ShortCountsHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(shortCountsHistogram, shortCountsHistogram2); ByteBuffer targetCompressedBuffer = allocator.allocate(shortCountsHistogram.getNeededByteBufferCapacity()); shortCountsHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); ShortCountsHistogram shortCountsHistogram3 = ShortCountsHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(shortCountsHistogram, shortCountsHistogram3); System.out.println("Testing encoding of a IntHistogram:"); targetBuffer = allocator.allocate(intCountsHistogram.getNeededByteBufferCapacity()); intCountsHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); IntCountsHistogram intCountsHistogram2 = IntCountsHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(intCountsHistogram, intCountsHistogram2); targetCompressedBuffer = allocator.allocate(intCountsHistogram.getNeededByteBufferCapacity()); intCountsHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); IntCountsHistogram intCountsHistogram3 = IntCountsHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(intCountsHistogram, intCountsHistogram3); System.out.println("Testing encoding of a Histogram:"); targetBuffer = allocator.allocate(histogram.getNeededByteBufferCapacity()); histogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); Histogram histogram2 = Histogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(histogram, histogram2); targetCompressedBuffer = allocator.allocate(histogram.getNeededByteBufferCapacity()); histogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); Histogram histogram3 = Histogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(histogram, histogram3); System.out.println("Testing encoding of a PackedHistogram:"); targetBuffer = allocator.allocate(packedHistogram.getNeededByteBufferCapacity()); packedHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); PackedHistogram packedHistogram2 = PackedHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(packedHistogram, packedHistogram2); targetCompressedBuffer = allocator.allocate(packedHistogram.getNeededByteBufferCapacity()); packedHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); PackedHistogram packedHistogram3 = PackedHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(packedHistogram, packedHistogram3); System.out.println("Testing encoding of a PackedConcurrentHistogram:"); targetBuffer = allocator.allocate(packedConcurrentHistogram.getNeededByteBufferCapacity()); packedConcurrentHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); PackedConcurrentHistogram packedConcurrentHistogram2 = PackedConcurrentHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(packedConcurrentHistogram, packedConcurrentHistogram2); targetCompressedBuffer = allocator.allocate(packedConcurrentHistogram.getNeededByteBufferCapacity()); packedConcurrentHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); PackedConcurrentHistogram packedConcurrentHistogram3 = PackedConcurrentHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(packedConcurrentHistogram, packedConcurrentHistogram3); System.out.println("Testing encoding of a AtomicHistogram:"); targetBuffer = allocator.allocate(atomicHistogram.getNeededByteBufferCapacity()); atomicHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); AtomicHistogram atomicHistogram2 = AtomicHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(atomicHistogram, atomicHistogram2); targetCompressedBuffer = allocator.allocate(atomicHistogram.getNeededByteBufferCapacity()); atomicHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); AtomicHistogram atomicHistogram3 = AtomicHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(atomicHistogram, atomicHistogram3); System.out.println("Testing encoding of a ConcurrentHistogram:"); targetBuffer = allocator.allocate(concurrentHistogram.getNeededByteBufferCapacity()); concurrentHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); ConcurrentHistogram concurrentHistogram2 = ConcurrentHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(concurrentHistogram, concurrentHistogram2); targetCompressedBuffer = allocator.allocate(concurrentHistogram.getNeededByteBufferCapacity()); concurrentHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); ConcurrentHistogram concurrentHistogram3 = ConcurrentHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(concurrentHistogram, concurrentHistogram3); System.out.println("Testing encoding of a SynchronizedHistogram:"); targetBuffer = allocator.allocate(synchronizedHistogram.getNeededByteBufferCapacity()); synchronizedHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); SynchronizedHistogram synchronizedHistogram2 = SynchronizedHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(synchronizedHistogram, synchronizedHistogram2); synchronizedHistogram.setIntegerToDoubleValueConversionRatio(5.0); targetCompressedBuffer = allocator.allocate(synchronizedHistogram.getNeededByteBufferCapacity()); synchronizedHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); SynchronizedHistogram synchronizedHistogram3 = SynchronizedHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(synchronizedHistogram, synchronizedHistogram3); System.out.println("Testing encoding of a DoubleHistogram:"); targetBuffer = allocator.allocate(doubleHistogram.getNeededByteBufferCapacity()); doubleHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); DoubleHistogram doubleHistogram2 = DoubleHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(doubleHistogram, doubleHistogram2); targetCompressedBuffer = allocator.allocate(doubleHistogram.getNeededByteBufferCapacity()); doubleHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); DoubleHistogram doubleHistogram3 = DoubleHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(doubleHistogram, doubleHistogram3); System.out.println("Testing encoding of a PackedDoubleHistogram:"); targetBuffer = allocator.allocate(packedDoubleHistogram.getNeededByteBufferCapacity()); packedDoubleHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); PackedDoubleHistogram packedDoubleHistogram2 = PackedDoubleHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(packedDoubleHistogram, packedDoubleHistogram2); targetCompressedBuffer = allocator.allocate(packedDoubleHistogram.getNeededByteBufferCapacity()); packedDoubleHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); PackedDoubleHistogram packedDoubleHistogram3 = PackedDoubleHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(packedDoubleHistogram, packedDoubleHistogram3); System.out.println("Testing encoding of a ConcurrentDoubleHistogram:"); targetBuffer = allocator.allocate(concurrentDoubleHistogram.getNeededByteBufferCapacity()); concurrentDoubleHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); ConcurrentDoubleHistogram concurrentDoubleHistogram2 = ConcurrentDoubleHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(concurrentDoubleHistogram, concurrentDoubleHistogram2); targetCompressedBuffer = allocator.allocate(concurrentDoubleHistogram.getNeededByteBufferCapacity()); concurrentDoubleHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); ConcurrentDoubleHistogram concurrentDoubleHistogram3 = ConcurrentDoubleHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(concurrentDoubleHistogram, concurrentDoubleHistogram3); System.out.println("Testing encoding of a PackedConcurrentDoubleHistogram:"); targetBuffer = allocator.allocate(packedConcurrentDoubleHistogram.getNeededByteBufferCapacity()); packedConcurrentDoubleHistogram.encodeIntoByteBuffer(targetBuffer); targetBuffer.rewind(); PackedConcurrentDoubleHistogram packedConcurrentDoubleHistogram2 = PackedConcurrentDoubleHistogram.decodeFromByteBuffer(targetBuffer, 0); Assert.assertEquals(packedConcurrentDoubleHistogram, packedConcurrentDoubleHistogram2); targetCompressedBuffer = allocator.allocate(packedConcurrentDoubleHistogram.getNeededByteBufferCapacity()); packedConcurrentDoubleHistogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); PackedConcurrentDoubleHistogram packedConcurrentDoubleHistogram3 = PackedConcurrentDoubleHistogram.decodeFromCompressedByteBuffer(targetCompressedBuffer, 0); Assert.assertEquals(packedConcurrentDoubleHistogram, packedConcurrentDoubleHistogram3); } @ParameterizedTest @ValueSource(classes = { Histogram.class, AtomicHistogram.class, ConcurrentHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testSimpleIntegerHistogramEncoding(final Class histoClass) throws Exception { AbstractHistogram histogram = constructHistogram(histoClass, 274877906943L, 3); histogram.recordValue(6147); histogram.recordValue(1024); histogram.recordValue(0); ByteBuffer targetBuffer = ByteBuffer.allocate(histogram.getNeededByteBufferCapacity()); histogram.encodeIntoCompressedByteBuffer(targetBuffer); targetBuffer.rewind(); AbstractHistogram decodedHistogram = decodeFromCompressedByteBuffer(histoClass, targetBuffer, 0); Assert.assertEquals(histogram, decodedHistogram); histogram.recordValueWithCount(100, 1L << 4); // Make total count > 2^4 targetBuffer.clear(); histogram.encodeIntoCompressedByteBuffer(targetBuffer); targetBuffer.rewind(); decodedHistogram = decodeFromCompressedByteBuffer(histoClass, targetBuffer, 0); Assert.assertEquals(histogram, decodedHistogram); if (histoClass.equals(ShortCountsHistogram.class)) { return; // Going farther will overflow short counts histogram } histogram.recordValueWithCount(200, 1L << 16); // Make total count > 2^16 targetBuffer.clear(); histogram.encodeIntoCompressedByteBuffer(targetBuffer); targetBuffer.rewind(); decodedHistogram = decodeFromCompressedByteBuffer(histoClass, targetBuffer, 0); Assert.assertEquals(histogram, decodedHistogram); histogram.recordValueWithCount(300, 1L << 20); // Make total count > 2^20 targetBuffer.clear(); histogram.encodeIntoCompressedByteBuffer(targetBuffer); targetBuffer.rewind(); decodedHistogram = decodeFromCompressedByteBuffer(histoClass, targetBuffer, 0); Assert.assertEquals(histogram, decodedHistogram); if (histoClass.equals(IntCountsHistogram.class)) { return; // Going farther will overflow int counts histogram } histogram.recordValueWithCount(400, 1L << 32); // Make total count > 2^32 targetBuffer.clear(); histogram.encodeIntoCompressedByteBuffer(targetBuffer); targetBuffer.rewind(); decodedHistogram = decodeFromCompressedByteBuffer(histoClass, targetBuffer, 0); Assert.assertEquals(histogram, decodedHistogram); histogram.recordValueWithCount(500, 1L << 52); // Make total count > 2^52 targetBuffer.clear(); histogram.encodeIntoCompressedByteBuffer(targetBuffer); targetBuffer.rewind(); decodedHistogram = decodeFromCompressedByteBuffer(histoClass, targetBuffer, 0); Assert.assertEquals(histogram, decodedHistogram); } @ParameterizedTest @ValueSource(classes = { DoubleHistogram.class, SynchronizedDoubleHistogram.class, ConcurrentDoubleHistogram.class, PackedDoubleHistogram.class, PackedConcurrentDoubleHistogram.class, }) public void testSimpleDoubleHistogramEncoding(final Class histoClass) throws Exception { DoubleHistogram histogram = constructDoubleHistogram(histoClass, 100000000L, 3); histogram.recordValue(6.0); histogram.recordValue(1.0); histogram.recordValue(0.0); ByteBuffer targetBuffer = ByteBuffer.allocate(histogram.getNeededByteBufferCapacity()); histogram.encodeIntoCompressedByteBuffer(targetBuffer); targetBuffer.rewind(); DoubleHistogram decodedHistogram = decodeDoubleHistogramFromCompressedByteBuffer(histoClass, targetBuffer, 0); Assert.assertEquals(histogram, decodedHistogram); } @ParameterizedTest @ValueSource(classes = { Histogram.class, ConcurrentHistogram.class, SynchronizedHistogram.class, PackedHistogram.class, PackedConcurrentHistogram.class, IntCountsHistogram.class, ShortCountsHistogram.class, }) public void testResizingHistogramBetweenCompressedEncodings(final Class histoClass) throws Exception { AbstractHistogram histogram = constructHistogram(histoClass, 3); histogram.recordValue(1); ByteBuffer targetCompressedBuffer = ByteBuffer.allocate(histogram.getNeededByteBufferCapacity()); histogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); histogram.recordValue(10000); targetCompressedBuffer = ByteBuffer.allocate(histogram.getNeededByteBufferCapacity()); histogram.encodeIntoCompressedByteBuffer(targetCompressedBuffer); targetCompressedBuffer.rewind(); AbstractHistogram histogram2 = decodeFromCompressedByteBuffer(histoClass, targetCompressedBuffer, 0); Assert.assertEquals(histogram, histogram2); } }
20,674
49.67402
165
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/HistogramTestUtils.java
package org.HdrHistogram; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.nio.ByteBuffer; public class HistogramTestUtils { static AbstractHistogram constructHistogram(Class c, Object... constructorArgs) { try { Class[] argTypes; if (constructorArgs.length == 1) { if (constructorArgs[0] instanceof AbstractHistogram) { argTypes = new Class[]{AbstractHistogram.class}; } else { argTypes = new Class[]{int.class}; } } else if (constructorArgs.length == 2) { argTypes = new Class[]{long.class, int.class}; } else if (constructorArgs.length == 3) { argTypes = new Class[]{long.class, long.class, int.class}; } else { throw new RuntimeException("Not an expected signature for Histogram constructor"); } return (AbstractHistogram) c.getConstructor(argTypes).newInstance(constructorArgs); } catch (InvocationTargetException ex) { if (ex.getTargetException() instanceof IllegalArgumentException) { throw new IllegalArgumentException(ex.getTargetException().getMessage(), ex); } else { throw new RuntimeException("Re-throwing: ", ex); } } catch (NoSuchMethodException | InstantiationException | IllegalAccessException ex) { throw new RuntimeException("Re-throwing: ", ex); } } static AbstractHistogram decodeFromCompressedByteBuffer(Class c, final ByteBuffer buffer, final long minBarForHighestTrackableValue) { try { Class[] argTypes = {ByteBuffer.class, long.class}; Method m = c.getMethod("decodeFromCompressedByteBuffer", argTypes); return (AbstractHistogram) m.invoke(null, buffer, minBarForHighestTrackableValue); } catch (InvocationTargetException ex) { if (ex.getTargetException() instanceof IllegalArgumentException) { throw new IllegalArgumentException(ex.getTargetException().getMessage(), ex); } else { throw new RuntimeException("Re-throwing: ", ex); } } catch (NoSuchMethodException | IllegalAccessException ex) { throw new RuntimeException("Re-throwing: ", ex); } } static DoubleHistogram constructDoubleHistogram(Class c, Object... constructorArgs) { try { Class[] argTypes; if (constructorArgs.length == 1) { if (constructorArgs[0] instanceof DoubleHistogram) { argTypes = new Class[]{DoubleHistogram.class}; } else { argTypes = new Class[]{int.class}; } } else if (constructorArgs.length == 2) { if (constructorArgs[1] instanceof Class) { argTypes = new Class[]{int.class, Class.class}; } else { argTypes = new Class[]{long.class, int.class}; } } else if (constructorArgs.length == 3) { argTypes = new Class[]{long.class, int.class, Class.class}; } else { throw new RuntimeException("Not an expected signature for DoubleHistogram constructor"); } return (DoubleHistogram) c.getDeclaredConstructor(argTypes).newInstance(constructorArgs); } catch (InvocationTargetException ex) { if (ex.getTargetException() instanceof IllegalArgumentException) { throw new IllegalArgumentException(ex.getTargetException().getMessage(), ex); } else { throw new RuntimeException("Re-throwing: ", ex); } } catch (NoSuchMethodException | InstantiationException | IllegalAccessException ex) { throw new RuntimeException("Re-throwing: ", ex); } } static DoubleHistogram decodeDoubleHistogramFromCompressedByteBuffer(Class c, final ByteBuffer buffer, final long minBarForHighestTrackableValue) { try { Class[] argTypes = {ByteBuffer.class, long.class}; Method m = c.getMethod("decodeFromCompressedByteBuffer", argTypes); return (DoubleHistogram) m.invoke(null, buffer, minBarForHighestTrackableValue); } catch (InvocationTargetException ex) { if (ex.getTargetException() instanceof IllegalArgumentException) { throw new IllegalArgumentException(ex.getTargetException().getMessage(), ex); } else { throw new RuntimeException("Re-throwing: ", ex); } } catch (NoSuchMethodException | IllegalAccessException ex) { throw new RuntimeException("Re-throwing: ", ex); } } }
5,144
47.537736
104
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/DumpHistogram.java
package org.HdrHistogram; import java.nio.ByteBuffer; public class DumpHistogram { static String hist1 = "DHISTwAAAAMAAAAAAAAABByEkxQAAABBeNqTaZkszMDAoMwAAcxQmhFC2f+3OwBhHZdgecrONJWDpZuF5zozSzMTUz8bVzcL03VmjkZGlnqWRkY2AGoTC78="; static String hist2 = "DHISTwAAAAMAAAAAAAAAAhyEkxQAAAAieNqTaZkszMDAwMIAAcxQmhFCyf+32wBhMa0VYAIAUp8EHA=="; static void dumpHistogram(String histString) throws Exception { final ByteBuffer buffer = ByteBuffer.wrap(Base64Helper.parseBase64Binary(histString)); DoubleHistogram histogram = DoubleHistogram.decodeFromCompressedByteBuffer(buffer, 0); dumpHistogram(histogram); } static void dumpHistogram(DoubleHistogram histogram) throws Exception { AbstractHistogram iHist = histogram.integerValuesHistogram; System.out.format("digits = %d, min = %12.12g, max = %12.12g\n", histogram.getNumberOfSignificantValueDigits(), histogram.getMinNonZeroValue(), histogram.getMaxValue()); System.out.format("lowest = %12.12g, highest = %12.12g\n", histogram.getCurrentLowestTrackableNonZeroValue(), histogram.getCurrentHighestTrackableValue()); System.out.format("lowest(i) = %d, highest(i) = %d\n", iHist.countsArrayIndex((long)(histogram.getCurrentLowestTrackableNonZeroValue() * histogram.getDoubleToIntegerValueConversionRatio())), iHist.countsArrayIndex((long)(histogram.getCurrentHighestTrackableValue() * histogram.getDoubleToIntegerValueConversionRatio()))); System.out.format("length = %d, imin = %d, imax = %d\n", histogram.integerValuesHistogram.countsArrayLength, histogram.integerValuesHistogram.getMinNonZeroValue(), histogram.integerValuesHistogram.getMaxValue()); System.out.format("index 4644 here translates to %12.12g\n", iHist.valueFromIndex(4644) * histogram.getIntegerToDoubleValueConversionRatio()); for (DoubleHistogramIterationValue val : histogram.recordedValues()) { HistogramIterationValue iVal = val.getIntegerHistogramIterationValue(); int index = iHist.countsArrayIndex(iVal.getValueIteratedTo()); System.out.format("[%d] %12.12g, %12.12g - %12.12g : %d\n",index, val.getValueIteratedTo(), histogram.lowestEquivalentValue(val.getValueIteratedTo()), histogram.highestEquivalentValue(val.getValueIteratedTo()), val.getCountAtValueIteratedTo()); } } public static void main(String[] args) throws Exception { System.out.println("hist1:"); dumpHistogram(hist1); System.out.println("hist2:"); dumpHistogram(hist2); DoubleHistogram h = new DoubleHistogram(3); h.recordValue(0.000999451); System.out.println("h:"); dumpHistogram(h); h.recordValue(h.integerValuesHistogram.valueFromIndex(4644) * h.getIntegerToDoubleValueConversionRatio()); System.out.println("h':"); dumpHistogram(h); h.recordValue(0.0119934); System.out.println("h':"); dumpHistogram(h); } }
3,117
53.701754
177
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/ConcurrentHistogramTest.java
/** * HistogramTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import org.junit.Assert; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import java.util.Random; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicLong; /** * JUnit test for {@link Histogram} */ public class ConcurrentHistogramTest { static final long highestTrackableValue = 3600L * 1000 * 1000 * 1000; // e.g. for 1 hr in usec units volatile boolean doRun = true; volatile boolean waitToGo = true; @Test public void testConcurrentAutoSizedRecording() throws Exception { ConcurrentHistogram histogram = new ConcurrentHistogram(2); ValueRecorder valueRecorders[] = new ValueRecorder[64]; doRun = true; waitToGo = true; for (int i = 0; i < valueRecorders.length; i++) { valueRecorders[i] = new ValueRecorder(histogram); valueRecorders[i].start(); } long sumOfCounts; for (int i = 0; i < 1000; i++) { // Ready: sumOfCounts = 0; for (ValueRecorder v : valueRecorders) { v.readySem.acquire(); sumOfCounts += v.count; } Assert.assertEquals("totalCount must be equal to sum of counts", sumOfCounts, histogram.getTotalCount()); // Set: waitToGo = true; histogram = new ConcurrentHistogram(2); for (ValueRecorder v : valueRecorders) { v.histogram = histogram; v.count = 0; v.setSem.release(); } Thread.sleep(2); // Go! : waitToGo = false; } doRun = false; } static AtomicLong valueRecorderId = new AtomicLong(42); class ValueRecorder extends Thread { ConcurrentHistogram histogram; long count = 0; Semaphore readySem = new Semaphore(0); Semaphore setSem = new Semaphore(0); long id = valueRecorderId.getAndIncrement(); Random random = new Random(id); ValueRecorder(ConcurrentHistogram histogram) { this.histogram = histogram; } public void run() { try { long nextValue = 0; for (int i = 0; i < id; i++) { nextValue = (long) (highestTrackableValue * random.nextDouble()); } while (doRun) { readySem.release(); setSem.acquire(); while (waitToGo) { // wait for doRun to be set. } histogram.resize(nextValue); histogram.recordValue(nextValue); count++; } } catch (InterruptedException e) { throw new RuntimeException(e); } } } }
3,207
28.431193
104
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/RecorderTest.java
/** * HistogramTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import org.HdrHistogram.packedarray.PackedArrayRecorder; import org.HdrHistogram.packedarray.PackedLongArray; import org.junit.Assert; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.function.Executable; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; /** * JUnit test for {@link Histogram} */ public class RecorderTest { static final long highestTrackableValue = 3600L * 1000 * 1000; // e.g. for 1 hr in usec units static final int packedArrayLength = 300 * 10000 * 2; static final int nonPackedPhysicalLength = 128 * 1024; @ParameterizedTest @ValueSource(booleans = {false, true}) public void testIntervalRecording(boolean usePacked) throws Exception { Histogram histogram = new Histogram(highestTrackableValue, 3); DoubleHistogram doubleHistogram = new DoubleHistogram(highestTrackableValue * 1000, 3); Recorder recorder1 = new Recorder(3, usePacked); Recorder recorder2 = new Recorder(3, usePacked); DoubleRecorder doubleRecorder1 = new DoubleRecorder(3, usePacked); DoubleRecorder doubleRecorder2 = new DoubleRecorder(3, usePacked); PackedLongArray array1 = new PackedLongArray(packedArrayLength); PackedArrayRecorder arrayRecorder1 = new PackedArrayRecorder(packedArrayLength); for (int i = 0; i < 10000; i++) { histogram.recordValue(3000 * i); recorder1.recordValue(3000 * i); recorder2.recordValue(3000 * i); doubleHistogram.recordValue(5000 * i); doubleRecorder1.recordValue(5000 * i); doubleRecorder2.recordValue(5000 * i); doubleHistogram.recordValue(0.001); // Makes some internal shifts happen. doubleRecorder1.recordValue(0.001); // Makes some internal shifts happen. doubleRecorder2.recordValue(0.001); // Makes some internal shifts happen. array1.increment(300 * i); arrayRecorder1.increment(300 * i); } Histogram histogram2 = recorder1.getIntervalHistogram(); Assert.assertEquals(histogram, histogram2); recorder2.getIntervalHistogramInto(histogram2); Assert.assertEquals(histogram, histogram2); DoubleHistogram doubleHistogram2 = doubleRecorder1.getIntervalHistogram(); Assert.assertEquals(doubleHistogram, doubleHistogram2); doubleRecorder2.getIntervalHistogramInto(doubleHistogram2); Assert.assertEquals(doubleHistogram, doubleHistogram2); PackedLongArray array2 = arrayRecorder1.getIntervalArray(); boolean arraysAreEqual = array1.equals(array2); Assert.assertEquals(arraysAreEqual, true); for (int i = 0; i < 5000; i++) { histogram.recordValue(3000 * i); recorder1.recordValue(3000 * i); recorder2.recordValue(3000 * i); doubleHistogram.recordValue(5000 * i); doubleRecorder1.recordValue(5000 * i); doubleRecorder2.recordValue(5000 * i); doubleHistogram.recordValue(0.001); doubleRecorder1.recordValue(0.001); doubleRecorder2.recordValue(0.001); array1.increment(300 * i); arrayRecorder1.increment(300 * i); } Histogram histogram3 = recorder1.getIntervalHistogram(); Histogram sumHistogram = histogram2.copy(); sumHistogram.add(histogram3); Assert.assertEquals(histogram, sumHistogram); DoubleHistogram doubleHistogram3 = doubleRecorder1.getIntervalHistogram(); DoubleHistogram sumDoubleHistogram = doubleHistogram2.copy(); sumDoubleHistogram.add(doubleHistogram3); Assert.assertEquals(doubleHistogram, sumDoubleHistogram); PackedLongArray array3 = arrayRecorder1.getIntervalArray(); PackedLongArray sumArray = array2.copy(); sumArray.add(array3); arraysAreEqual = array1.equals(sumArray); Assert.assertEquals(arraysAreEqual, true); recorder2.getIntervalHistogram(); doubleRecorder2.getIntervalHistogram(); for (int i = 5000; i < 10000; i++) { histogram.recordValue(3000 * i); recorder1.recordValue(3000 * i); recorder2.recordValue(3000 * i); doubleHistogram.recordValue(5000 * i); doubleRecorder1.recordValue(5000 * i); doubleRecorder2.recordValue(5000 * i); doubleHistogram.recordValue(0.001); doubleRecorder1.recordValue(0.001); doubleRecorder2.recordValue(0.001); array1.increment(300 * i); arrayRecorder1.increment(300 * i); } Histogram histogram4 = recorder1.getIntervalHistogram(); histogram4.add(histogram3); Assert.assertEquals(histogram4, histogram2); recorder2.getIntervalHistogramInto(histogram4); histogram4.add(histogram3); Assert.assertEquals(histogram4, histogram2); DoubleHistogram doubleHistogram4 = doubleRecorder1.getIntervalHistogram(); doubleHistogram4.add(doubleHistogram3); Assert.assertEquals(doubleHistogram4, doubleHistogram2); doubleHistogram4.reset(); doubleRecorder2.getIntervalHistogramInto(doubleHistogram4); doubleHistogram4.add(doubleHistogram3); Assert.assertEquals(doubleHistogram4, doubleHistogram2); PackedLongArray array4 = arrayRecorder1.getIntervalArray(); array4.add(array3); arraysAreEqual = array4.equals(array2); Assert.assertEquals(arraysAreEqual, true); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testSimpleAutosizingRecorder(boolean usePacked) throws Exception { Recorder recorder = new Recorder(3, usePacked); Histogram histogram = recorder.getIntervalHistogram(); } // PackedArrayRecorder recycling tests: @ParameterizedTest @ValueSource(booleans = {false, true}) public void testPARecycling(boolean usePacked) throws Exception { PackedArrayRecorder recorder = new PackedArrayRecorder(packedArrayLength, usePacked ? 0 : nonPackedPhysicalLength); PackedLongArray arrayA = recorder.getIntervalArray(); PackedLongArray arrayB = recorder.getIntervalArray(arrayA); PackedLongArray arrayC = recorder.getIntervalArray(arrayB, true); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testPARecyclingContainingClassEnforcement(final boolean usePacked) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { PackedLongArray arrayToRecycle = new PackedLongArray(packedArrayLength, usePacked ? 0 : nonPackedPhysicalLength); PackedArrayRecorder recorder = new PackedArrayRecorder(packedArrayLength, usePacked ? 0 : nonPackedPhysicalLength); PackedLongArray arrayA = recorder.getIntervalArray(arrayToRecycle); } }); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testPARecyclingContainingInstanceEnforcement(final boolean usePacked) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { PackedArrayRecorder recorder1 = new PackedArrayRecorder(packedArrayLength, usePacked ? 0 : nonPackedPhysicalLength); PackedArrayRecorder recorder2 = new PackedArrayRecorder(packedArrayLength, usePacked ? 0 : nonPackedPhysicalLength); PackedLongArray arrayToRecycle = recorder1.getIntervalArray(); PackedLongArray arrayToRecycle2 = recorder2.getIntervalArray(arrayToRecycle); } }); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testPARecyclingContainingInstanceNonEnforcement(final boolean usePacked) throws Exception { PackedArrayRecorder recorder1 = new PackedArrayRecorder(packedArrayLength, usePacked ? 0 : nonPackedPhysicalLength); PackedArrayRecorder recorder2 = new PackedArrayRecorder(packedArrayLength, usePacked ? 0 : nonPackedPhysicalLength); PackedLongArray arrayToRecycle = recorder1.getIntervalArray(); PackedLongArray arrayToRecycle2 = recorder2.getIntervalArray(arrayToRecycle, false); } // Recorder Recycling tests: @ParameterizedTest @ValueSource(booleans = {false, true}) public void testRecycling(boolean usePacked) throws Exception { Recorder recorder = new Recorder(3, usePacked); Histogram histogramA = recorder.getIntervalHistogram(); Histogram histogramB = recorder.getIntervalHistogram(histogramA); Histogram histogramC = recorder.getIntervalHistogram(histogramB, true); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testRecyclingContainingClassEnforcement(final boolean usePacked) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { Histogram histToRecycle = new Histogram(3); Recorder recorder = new Recorder(3, usePacked); Histogram histogramA = recorder.getIntervalHistogram(histToRecycle); } }); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testRecyclingContainingInstanceEnforcement(final boolean usePacked) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { Recorder recorder1 = new Recorder(3, usePacked); Recorder recorder2 = new Recorder(3, usePacked); Histogram histToRecycle = recorder1.getIntervalHistogram(); Histogram histToRecycle2 = recorder2.getIntervalHistogram(histToRecycle); } }); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testRecyclingContainingInstanceNonEnforcement(final boolean usePacked) throws Exception { Recorder recorder1 = new Recorder(3, usePacked); Recorder recorder2 = new Recorder(3, usePacked); Histogram histToRecycle = recorder1.getIntervalHistogram(); Histogram histToRecycle2 = recorder2.getIntervalHistogram(histToRecycle, false); } // SingleWriterRecorder Recycling tests: @ParameterizedTest @ValueSource(booleans = {false, true}) public void testSWRecycling(final boolean usePacked) throws Exception { SingleWriterRecorder recorder = new SingleWriterRecorder(3, usePacked); Histogram histogramA = recorder.getIntervalHistogram(); Histogram histogramB = recorder.getIntervalHistogram(histogramA); Histogram histogramC = recorder.getIntervalHistogram(histogramB, true); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testSWRecyclingContainingClassEnforcement(final boolean usePacked) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { Histogram histToRecycle = new Histogram(3); SingleWriterRecorder recorder = new SingleWriterRecorder(3, usePacked); Histogram histogramA = recorder.getIntervalHistogram(histToRecycle); } }); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testSWRecyclingContainingInstanceEnforcement(final boolean usePacked) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { SingleWriterRecorder recorder1 = new SingleWriterRecorder(3, usePacked); SingleWriterRecorder recorder2 = new SingleWriterRecorder(3, usePacked); Histogram histToRecycle = recorder1.getIntervalHistogram(); Histogram histToRecycle2 = recorder2.getIntervalHistogram(histToRecycle); } }); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testSWRecyclingContainingInstanceNonEnforcement(final boolean usePacked) throws Exception { SingleWriterRecorder recorder1 = new SingleWriterRecorder(3, usePacked); SingleWriterRecorder recorder2 = new SingleWriterRecorder(3, usePacked); Histogram histToRecycle = recorder1.getIntervalHistogram(); Histogram histToRecycle2 = recorder2.getIntervalHistogram(histToRecycle, false); } // DoubleRecorder Recycling tests: @ParameterizedTest @ValueSource(booleans = {false, true}) public void testDRecycling(final boolean usePacked) throws Exception { DoubleRecorder recorder = new DoubleRecorder(3, usePacked); DoubleHistogram histogramA = recorder.getIntervalHistogram(); DoubleHistogram histogramB = recorder.getIntervalHistogram(histogramA); DoubleHistogram histogramC = recorder.getIntervalHistogram(histogramB, true); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testDRecyclingContainingClassEnforcement(final boolean usePacked) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { DoubleHistogram histToRecycle = new DoubleHistogram(3); DoubleRecorder recorder = new DoubleRecorder(3, usePacked); DoubleHistogram histogramA = recorder.getIntervalHistogram(histToRecycle); } }); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testDRecyclingContainingInstanceEnforcement(final boolean usePacked) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { DoubleRecorder recorder1 = new DoubleRecorder(3, usePacked); DoubleRecorder recorder2 = new DoubleRecorder(3, usePacked); DoubleHistogram histToRecycle = recorder1.getIntervalHistogram(); DoubleHistogram histToRecycle2 = recorder2.getIntervalHistogram(histToRecycle); } }); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testDRecyclingContainingInstanceNonEnforcement(final boolean usePacked) throws Exception { DoubleRecorder recorder1 = new DoubleRecorder(3, usePacked); DoubleRecorder recorder2 = new DoubleRecorder(3, usePacked); DoubleHistogram histToRecycle = recorder1.getIntervalHistogram(); DoubleHistogram histToRecycle2 = recorder2.getIntervalHistogram(histToRecycle, false); } // SingleWriterDoubleRecorder Recycling tests: @ParameterizedTest @ValueSource(booleans = {false, true}) public void testSWDRecycling(final boolean usePacked) throws Exception { SingleWriterDoubleRecorder recorder = new SingleWriterDoubleRecorder(3, usePacked); DoubleHistogram histogramA = recorder.getIntervalHistogram(); DoubleHistogram histogramB = recorder.getIntervalHistogram(histogramA); DoubleHistogram histogramC = recorder.getIntervalHistogram(histogramB, true); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testSWDRecyclingContainingClassEnforcement(final boolean usePacked) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { DoubleHistogram histToRecycle = new DoubleHistogram(3); SingleWriterDoubleRecorder recorder = new SingleWriterDoubleRecorder(3, usePacked); DoubleHistogram histogramA = recorder.getIntervalHistogram(histToRecycle); } }); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testSWDRecyclingContainingInstanceEnforcement(final boolean usePacked) throws Exception { Assertions.assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() throws Throwable { SingleWriterDoubleRecorder recorder1 = new SingleWriterDoubleRecorder(3, usePacked); SingleWriterDoubleRecorder recorder2 = new SingleWriterDoubleRecorder(3, usePacked); DoubleHistogram histToRecycle = recorder1.getIntervalHistogram(); DoubleHistogram histToRecycle2 = recorder2.getIntervalHistogram(histToRecycle); } }); } @ParameterizedTest @ValueSource(booleans = {false, true}) public void testSWDRecyclingContainingInstanceNonEnforcement(final boolean usePacked) throws Exception { SingleWriterDoubleRecorder recorder1 = new SingleWriterDoubleRecorder(3, usePacked); SingleWriterDoubleRecorder recorder2 = new SingleWriterDoubleRecorder(3, usePacked); DoubleHistogram histToRecycle = recorder1.getIntervalHistogram(); DoubleHistogram histToRecycle2 = recorder2.getIntervalHistogram(histToRecycle, false); } }
18,701
45.755
140
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/HistogramDataAccessTest.java
/** * HistogramDataAccessTest.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import org.junit.Assert; import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.List; /** * JUnit test for {@link org.HdrHistogram.Histogram} */ public class HistogramDataAccessTest { static final long highestTrackableValue = 3600L * 1000 * 1000; // 1 hour in usec units static final int numberOfSignificantValueDigits = 3; // Maintain at least 3 decimal points of accuracy static final Histogram histogram; static final Histogram scaledHistogram; static final Histogram rawHistogram; static final Histogram scaledRawHistogram; static final Histogram postCorrectedHistogram; static final Histogram postCorrectedScaledHistogram; static { histogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); scaledHistogram = new Histogram(1000, highestTrackableValue * 512, numberOfSignificantValueDigits); rawHistogram = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); scaledRawHistogram = new Histogram(1000, highestTrackableValue * 512, numberOfSignificantValueDigits); // Log hypothetical scenario: 100 seconds of "perfect" 1msec results, sampled // 100 times per second (10,000 results), followed by a 100 second pause with // a single (100 second) recorded result. Recording is done indicating an expected // interval between samples of 10 msec: for (int i = 0; i < 10000; i++) { histogram.recordValueWithExpectedInterval(1000 /* 1 msec */, 10000 /* 10 msec expected interval */); scaledHistogram.recordValueWithExpectedInterval(1000 * 512 /* 1 msec */, 10000 * 512 /* 10 msec expected interval */); rawHistogram.recordValue(1000 /* 1 msec */); scaledRawHistogram.recordValue(1000 * 512/* 1 msec */); } histogram.recordValueWithExpectedInterval(100000000L /* 100 sec */, 10000 /* 10 msec expected interval */); scaledHistogram.recordValueWithExpectedInterval(100000000L * 512 /* 100 sec */, 10000 * 512 /* 10 msec expected interval */); rawHistogram.recordValue(100000000L /* 100 sec */); scaledRawHistogram.recordValue(100000000L * 512 /* 100 sec */); postCorrectedHistogram = rawHistogram.copyCorrectedForCoordinatedOmission(10000 /* 10 msec expected interval */); postCorrectedScaledHistogram = scaledRawHistogram.copyCorrectedForCoordinatedOmission(10000 * 512 /* 10 msec expected interval */); } @Test public void testScalingEquivalence() { Assert.assertEquals("averages should be equivalent", histogram.getMean() * 512, scaledHistogram.getMean(), scaledHistogram.getMean() * 0.000001); Assert.assertEquals("total count should be the same", histogram.getTotalCount(), scaledHistogram.getTotalCount()); Assert.assertEquals("99%'iles should be equivalent", scaledHistogram.highestEquivalentValue(histogram.getValueAtPercentile(99.0) * 512), scaledHistogram.highestEquivalentValue(scaledHistogram.getValueAtPercentile(99.0))); Assert.assertEquals("Max should be equivalent", scaledHistogram.highestEquivalentValue(histogram.getMaxValue() * 512), scaledHistogram.getMaxValue()); // Same for post-corrected: Assert.assertEquals("averages should be equivalent", histogram.getMean() * 512, scaledHistogram.getMean(), scaledHistogram.getMean() * 0.000001); Assert.assertEquals("total count should be the same", postCorrectedHistogram.getTotalCount(), postCorrectedScaledHistogram.getTotalCount()); Assert.assertEquals("99%'iles should be equivalent", postCorrectedHistogram.lowestEquivalentValue(postCorrectedHistogram.getValueAtPercentile(99.0)) * 512, postCorrectedScaledHistogram.lowestEquivalentValue(postCorrectedScaledHistogram.getValueAtPercentile(99.0))); Assert.assertEquals("Max should be equivalent", postCorrectedScaledHistogram.highestEquivalentValue(postCorrectedHistogram.getMaxValue() * 512), postCorrectedScaledHistogram.getMaxValue()); } @Test public void testPreVsPostCorrectionValues() { // Loop both ways (one would be enough, but good practice just for fun: Assert.assertEquals("pre and post corrected count totals ", histogram.getTotalCount(), postCorrectedHistogram.getTotalCount()); // The following comparison loops would have worked in a perfect accuracy world, but since post // correction is done based on the value extracted from the bucket, and the during-recording is done // based on the actual (not pixelized) value, there will be subtle differences due to roundoffs: // for (HistogramIterationValue v : histogram.allValues()) { // long preCorrectedCount = v.getCountAtValueIteratedTo(); // long postCorrectedCount = postCorrectedHistogram.getCountAtValue(v.getValueIteratedTo()); // Assert.assertEquals("pre and post corrected count at value " + v.getValueIteratedTo(), // preCorrectedCount, postCorrectedCount); // } // // for (HistogramIterationValue v : postCorrectedHistogram.allValues()) { // long preCorrectedCount = v.getCountAtValueIteratedTo(); // long postCorrectedCount = histogram.getCountAtValue(v.getValueIteratedTo()); // Assert.assertEquals("pre and post corrected count at value " + v.getValueIteratedTo(), // preCorrectedCount, postCorrectedCount); // } } @Test public void testGetTotalCount() throws Exception { // The overflow value should count in the total count: Assert.assertEquals("Raw total count is 10,001", 10001L, rawHistogram.getTotalCount()); Assert.assertEquals("Total count is 20,000", 20000L, histogram.getTotalCount()); } @Test public void testGetMaxValue() throws Exception { Assert.assertTrue( histogram.valuesAreEquivalent(100L * 1000 * 1000, histogram.getMaxValue())); } @Test public void testGetMinValue() throws Exception { Assert.assertTrue( histogram.valuesAreEquivalent(1000, histogram.getMinValue())); } @Test public void testGetMean() throws Exception { double expectedRawMean = ((10000.0 * 1000) + (1.0 * 100000000))/10001; /* direct avg. of raw results */ double expectedMean = (1000.0 + 50000000.0)/2; /* avg. 1 msec for half the time, and 50 sec for other half */ // We expect to see the mean to be accurate to ~3 decimal points (~0.1%): Assert.assertEquals("Raw mean is " + expectedRawMean + " +/- 0.1%", expectedRawMean, rawHistogram.getMean(), expectedRawMean * 0.001); Assert.assertEquals("Mean is " + expectedMean + " +/- 0.1%", expectedMean, histogram.getMean(), expectedMean * 0.001); } @Test public void testGetStdDeviation() throws Exception { double expectedRawMean = ((10000.0 * 1000) + (1.0 * 100000000))/10001; /* direct avg. of raw results */ double expectedRawStdDev = Math.sqrt( ((10000.0 * Math.pow((1000.0 - expectedRawMean), 2)) + Math.pow((100000000.0 - expectedRawMean), 2)) / 10001); double expectedMean = (1000.0 + 50000000.0)/2; /* avg. 1 msec for half the time, and 50 sec for other half */ double expectedSquareDeviationSum = 10000 * Math.pow((1000.0 - expectedMean), 2); for (long value = 10000; value <= 100000000; value += 10000) { expectedSquareDeviationSum += Math.pow((value - expectedMean), 2); } double expectedStdDev = Math.sqrt(expectedSquareDeviationSum / 20000); // We expect to see the standard deviations to be accurate to ~3 decimal points (~0.1%): Assert.assertEquals("Raw standard deviation is " + expectedRawStdDev + " +/- 0.1%", expectedRawStdDev, rawHistogram.getStdDeviation(), expectedRawStdDev * 0.001); Assert.assertEquals("Standard deviation is " + expectedStdDev + " +/- 0.1%", expectedStdDev, histogram.getStdDeviation(), expectedStdDev * 0.001); } @Test public void testGetValueAtPercentileExamples() throws Exception { Histogram hist = new Histogram(3600000000L, 3); hist.recordValue(1); hist.recordValue(2); Assert.assertEquals("50.0%'ile is 1", 1, hist.getValueAtPercentile(50.0)); Assert.assertEquals("50.00000000000001%'ile is 1", 1, hist.getValueAtPercentile(50.00000000000001)); Assert.assertEquals("50.0000000000001%'ile is 2", 2, hist.getValueAtPercentile(50.0000000000001)); hist.recordValue(2); hist.recordValue(2); hist.recordValue(2); long val = hist.getValueAtPercentile(25); Assert.assertEquals("25%'ile is 2", 2, hist.getValueAtPercentile(25)); Assert.assertEquals("30%'ile is 2", 2, hist.getValueAtPercentile(30)); } @Test public void testGetValueAtPercentile() throws Exception { Assert.assertEquals("raw 30%'ile is 1 msec +/- 0.1%", 1000.0, (double) rawHistogram.getValueAtPercentile(30.0), 1000.0 * 0.001); Assert.assertEquals("raw 99%'ile is 1 msec +/- 0.1%", 1000.0, (double) rawHistogram.getValueAtPercentile(99.0), 1000.0 * 0.001); Assert.assertEquals("raw 99.99%'ile is 1 msec +/- 0.1%", 1000.0, (double) rawHistogram.getValueAtPercentile(99.99) , 1000.0 * 0.001); Assert.assertEquals("raw 99.999%'ile is 100 sec +/- 0.1%", 100000000.0, (double) rawHistogram.getValueAtPercentile(99.999), 100000000.0 * 0.001); Assert.assertEquals("raw 100%'ile is 100 sec +/- 0.1%", 100000000.0, (double) rawHistogram.getValueAtPercentile(100.0), 100000000.0 * 0.001); Assert.assertEquals("30%'ile is 1 msec +/- 0.1%", 1000.0, (double) histogram.getValueAtPercentile(30.0), 1000.0 * 0.001); Assert.assertEquals("50%'ile is 1 msec +/- 0.1%", 1000.0, (double) histogram.getValueAtPercentile(50.0), 1000.0 * 0.001); Assert.assertEquals("75%'ile is 50 sec +/- 0.1%", 50000000.0, (double) histogram.getValueAtPercentile(75.0), 50000000.0 * 0.001); Assert.assertEquals("90%'ile is 80 sec +/- 0.1%", 80000000.0, (double) histogram.getValueAtPercentile(90.0), 80000000.0 * 0.001); Assert.assertEquals("99%'ile is 98 sec +/- 0.1%", 98000000.0, (double) histogram.getValueAtPercentile(99.0), 98000000.0 * 0.001); Assert.assertEquals("99.999%'ile is 100 sec +/- 0.1%", 100000000.0, (double) histogram.getValueAtPercentile(99.999), 100000000.0 * 0.001); Assert.assertEquals("100%'ile is 100 sec +/- 0.1%", 100000000.0, (double) histogram.getValueAtPercentile(100.0), 100000000.0 * 0.001); } @Test public void testGetValueAtPercentileForLargeHistogram() { long largestValue = 1000000000000L; Histogram h = new Histogram(largestValue, 5); h.recordValue(largestValue); Assert.assertTrue(h.getValueAtPercentile(100.0) > 0); } @Test public void testGetPercentileAtOrBelowValue() throws Exception { Assert.assertEquals("Raw percentile at or below 5 msec is 99.99% +/- 0.0001", 99.99, rawHistogram.getPercentileAtOrBelowValue(5000), 0.0001); Assert.assertEquals("Percentile at or below 5 msec is 50% +/- 0.0001%", 50.0, histogram.getPercentileAtOrBelowValue(5000), 0.0001); Assert.assertEquals("Percentile at or below 100 sec is 100% +/- 0.0001%", 100.0, histogram.getPercentileAtOrBelowValue(100000000L), 0.0001); } @Test public void testGetCountBetweenValues() throws Exception { Assert.assertEquals("Count of raw values between 1 msec and 1 msec is 1", 10000, rawHistogram.getCountBetweenValues(1000L, 1000L)); Assert.assertEquals("Count of raw values between 5 msec and 150 sec is 1", 1, rawHistogram.getCountBetweenValues(5000L, 150000000L)); Assert.assertEquals("Count of values between 5 msec and 150 sec is 10,000", 10000, histogram.getCountBetweenValues(5000L, 150000000L)); } @Test public void testGetCountAtValue() throws Exception { Assert.assertEquals("Count of raw values at 10 msec is 0", 0, rawHistogram.getCountBetweenValues(10000L, 10010L)); Assert.assertEquals("Count of values at 10 msec is 0", 1, histogram.getCountBetweenValues(10000L, 10010L)); Assert.assertEquals("Count of raw values at 1 msec is 10,000", 10000, rawHistogram.getCountAtValue(1000L)); Assert.assertEquals("Count of values at 1 msec is 10,000", 10000, histogram.getCountAtValue(1000L)); } @Test public void testPercentiles() throws Exception { for (HistogramIterationValue v : histogram.percentiles(5 /* ticks per half */)) { Assert.assertEquals("Value at Iterated-to Percentile is the same as the matching getValueAtPercentile():\n" + "getPercentileLevelIteratedTo = " + v.getPercentileLevelIteratedTo() + "\ngetValueIteratedTo = " + v.getValueIteratedTo() + "\ngetValueIteratedFrom = " + v.getValueIteratedFrom() + "\ngetValueAtPercentile(getPercentileLevelIteratedTo()) = " + histogram.getValueAtPercentile(v.getPercentileLevelIteratedTo()) + "\ngetPercentile = " + v.getPercentile() + "\ngetValueAtPercentile(getPercentile())" + histogram.getValueAtPercentile(v.getPercentile()) + "\nequivalent at getValueAtPercentile(v.getPercentileLevelIteratedTo()) = " + histogram.highestEquivalentValue(histogram.getValueAtPercentile(v.getPercentileLevelIteratedTo())) + "\nequivalent at getValueAtPercentile(v.getPercentile()) = " + histogram.highestEquivalentValue(histogram.getValueAtPercentile(v.getPercentile())) + "\nindex at v.getValueIteratedTo() = " + histogram.countsArrayIndex(v.getValueIteratedTo()) + "\nindex at getValueAtPercentile(v.getPercentileLevelIteratedTo()) = " + histogram.countsArrayIndex(histogram.getValueAtPercentile(v.getPercentileLevelIteratedTo())) + "\nindex at getValueAtPercentile(v.getPercentile()) = " + histogram.countsArrayIndex(histogram.getValueAtPercentile(v.getPercentile())) + "\nindex at getValueAtPercentile(v.getPercentile() - 0.0000000001) = " + histogram.countsArrayIndex(histogram.getValueAtPercentile(v.getPercentile() - 0.0000000001)) + "\ncount for (long)(((v.getPercentile() / 100.0) * histogram.getTotalCount()) + 0.5) = " + (long)(((v.getPercentile() / 100.0) * histogram.getTotalCount()) + 0.5) + "\n math for ((v.getPercentile() / 100.0) * histogram.getTotalCount()) = " + ((v.getPercentile() / 100.0) * histogram.getTotalCount()) + "\n math for (((v.getPercentile() / 100.0) * histogram.getTotalCount()) + 0.5) = " + (((v.getPercentile() / 100.0) * histogram.getTotalCount()) + 0.5) + "\n math for (long)(((v.getPercentile() / 100.0) * histogram.getTotalCount()) + 0.5) = " + (long)(((v.getPercentile() / 100.0) * histogram.getTotalCount()) + 0.5) + "\ncount for (long)(Math.ceil((v.getPercentile() / 100.0) * histogram.getTotalCount())) = " + (long)(Math.ceil((v.getPercentile() / 100.0) * histogram.getTotalCount())) + "\n math for Math.ceil((v.getPercentile() / 100.0) * histogram.getTotalCount()) = " + Math.ceil((v.getPercentile() / 100.0) * histogram.getTotalCount()) + "\ntotalCountToThisValue = " + v.getTotalCountToThisValue() + "\ncountAtValueIteratedTo = " + v.getCountAtValueIteratedTo() + "\ncount at index at getValueAtPercentile(v.getPercentileLevelIteratedTo()) = " + histogram.getCountAtIndex(histogram.countsArrayIndex(histogram.getValueAtPercentile(v.getPercentileLevelIteratedTo()))) + "\ncount at index at getValueAtPercentile(v.getPercentile()) = " + histogram.getCountAtIndex(histogram.countsArrayIndex(histogram.getValueAtPercentile(v.getPercentile()))) + "\n" , v.getValueIteratedTo(), histogram.highestEquivalentValue(histogram.getValueAtPercentile(v.getPercentile()))); } } @Test public void testLinearBucketValues() throws Exception { int index = 0; // Note that using linear buckets should work "as expected" as long as the number of linear buckets // is lower than the resolution level determined by largestValueWithSingleUnitResolution // (2000 in this case). Above that count, some of the linear buckets can end up rounded up in size // (to the nearest local resolution unit level), which can result in a smaller number of buckets that // expected covering the range. // Iterate raw data using linear buckets of 100 msec each. for (HistogramIterationValue v : rawHistogram.linearBucketValues(100000)) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Raw Linear 100 msec bucket # 0 added a count of 10000", 10000, countAddedInThisBucket); } else if (index == 999) { Assert.assertEquals("Raw Linear 100 msec bucket # 999 added a count of 1", 1, countAddedInThisBucket); } else { Assert.assertEquals("Raw Linear 100 msec bucket # " + index + " added a count of 0", 0 , countAddedInThisBucket); } index++; } Assert.assertEquals(1000, index); index = 0; long totalAddedCounts = 0; // Iterate data using linear buckets of 10 msec each. for (HistogramIterationValue v : histogram.linearBucketValues(10000)) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Linear 1 sec bucket # 0 [" + v.getValueIteratedFrom() + ".." + v.getValueIteratedTo() + "] added a count of 10000", 10000, countAddedInThisBucket); } // Because value resolution is low enough (3 digits) that multiple linear buckets will end up // residing in a single value-equivalent range, some linear buckets will have counts of 2 or // more, and some will have 0 (when the first bucket in the equivalent range was the one that // got the total count bump). // However, we can still verify the sum of counts added in all the buckets... totalAddedCounts += v.getCountAddedInThisIterationStep(); index++; } Assert.assertEquals("There should be 10000 linear buckets of size 10000 usec between 0 and 100 sec.", 10000, index); Assert.assertEquals("Total added counts should be 20000", 20000, totalAddedCounts); index = 0; totalAddedCounts = 0; // Iterate data using linear buckets of 1 msec each. for (HistogramIterationValue v : histogram.linearBucketValues(1000)) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 1) { Assert.assertEquals("Linear 1 sec bucket # 0 [" + v.getValueIteratedFrom() + ".." + v.getValueIteratedTo() + "] added a count of 10000", 10000, countAddedInThisBucket); } // Because value resolution is low enough (3 digits) that multiple linear buckets will end up // residing in a single value-equivalent range, some linear buckets will have counts of 2 or // more, and some will have 0 (when the first bucket in the equivalent range was the one that // got the total count bump). // However, we can still verify the sum of counts added in all the buckets... totalAddedCounts += v.getCountAddedInThisIterationStep(); index++; } // You may ask "why 100007 and not 100000?" for the value below? The answer is that at this fine // a linear stepping resolution, the final populated sub-bucket (at 100 seconds with 3 decimal // point resolution) is larger than our liner stepping, and holds more than one linear 1 msec // step in it. // Since we only know we're done with linear iteration when the next iteration step will step // out of the last populated bucket, there is not way to tell if the iteration should stop at // 100000 or 100007 steps. The proper thing to do is to run to the end of the sub-bucket quanta... Assert.assertEquals("There should be 100007 linear buckets of size 1000 usec between 0 and 100 sec.", 100007, index); Assert.assertEquals("Total added counts should be 20000", 20000, totalAddedCounts); } @Test public void testLogarithmicBucketValues() throws Exception { int index = 0; // Iterate raw data using logarithmic buckets starting at 10 msec. for (HistogramIterationValue v : rawHistogram.logarithmicBucketValues(10000, 2)) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Raw Logarithmic 10 msec bucket # 0 added a count of 10000", 10000, countAddedInThisBucket); } else if (index == 14) { Assert.assertEquals("Raw Logarithmic 10 msec bucket # 14 added a count of 1", 1, countAddedInThisBucket); } else { Assert.assertEquals("Raw Logarithmic 100 msec bucket # " + index + " added a count of 0", 0, countAddedInThisBucket); } index++; } Assert.assertEquals(14, index - 1); index = 0; long totalAddedCounts = 0; for (HistogramIterationValue v : histogram.logarithmicBucketValues(10000, 2)) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Logarithmic 10 msec bucket # 0 [" + v.getValueIteratedFrom() + ".." + v.getValueIteratedTo() + "] added a count of 10000", 10000, countAddedInThisBucket); } totalAddedCounts += v.getCountAddedInThisIterationStep(); index++; } Assert.assertEquals("There should be 14 Logarithmic buckets of size 10000 usec between 0 and 100 sec.", 14, index - 1); Assert.assertEquals("Total added counts should be 20000", 20000, totalAddedCounts); } @Test public void testRecordedValues() throws Exception { int index = 0; // Iterate raw data by stepping through every value that has a count recorded: for (HistogramIterationValue v : rawHistogram.recordedValues()) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Raw recorded value bucket # 0 added a count of 10000", 10000, countAddedInThisBucket); } else { Assert.assertEquals("Raw recorded value bucket # " + index + " added a count of 1", 1, countAddedInThisBucket); } index++; } Assert.assertEquals(2, index); index = 0; long totalAddedCounts = 0; for (HistogramIterationValue v : histogram.recordedValues()) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 0) { Assert.assertEquals("Recorded bucket # 0 [" + v.getValueIteratedFrom() + ".." + v.getValueIteratedTo() + "] added a count of 10000", 10000, countAddedInThisBucket); } Assert.assertTrue("The count in recorded bucket #" + index + " is not 0", v.getCountAtValueIteratedTo() != 0); Assert.assertEquals("The count in recorded bucket #" + index + " is exactly the amount added since the last iteration ", v.getCountAtValueIteratedTo(), v.getCountAddedInThisIterationStep()); totalAddedCounts += v.getCountAddedInThisIterationStep(); index++; } Assert.assertEquals("Total added counts should be 20000", 20000, totalAddedCounts); } @Test public void testAllValues() throws Exception { int index = 0; long latestValueAtIndex = 0; long totalCountToThisPoint = 0; long totalValueToThisPoint = 0; // Iterate raw data by stepping through every value that has a count recorded: for (HistogramIterationValue v : rawHistogram.allValues()) { long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 1000) { Assert.assertEquals("Raw allValues bucket # 0 added a count of 10000", 10000, countAddedInThisBucket); } else if (histogram.valuesAreEquivalent(v.getValueIteratedTo(), 100000000)) { Assert.assertEquals("Raw allValues value bucket # " + index + " added a count of 1", 1, countAddedInThisBucket); } else { Assert.assertEquals("Raw allValues value bucket # " + index + " added a count of 0", 0, countAddedInThisBucket); } latestValueAtIndex = v.getValueIteratedTo(); totalCountToThisPoint += v.getCountAtValueIteratedTo(); Assert.assertEquals("total Count should match", totalCountToThisPoint, v.getTotalCountToThisValue()); totalValueToThisPoint += v.getCountAtValueIteratedTo() * latestValueAtIndex; Assert.assertEquals("total Value should match", totalValueToThisPoint, v.getTotalValueToThisValue()); index++; } Assert.assertEquals("index should be equal to countsArrayLength", histogram.countsArrayLength, index); index = 0; long totalAddedCounts = 0; HistogramIterationValue v1 = null; for (HistogramIterationValue v : histogram.allValues()) { v1 = v; long countAddedInThisBucket = v.getCountAddedInThisIterationStep(); if (index == 1000) { Assert.assertEquals("AllValues bucket # 0 [" + v.getValueIteratedFrom() + ".." + v.getValueIteratedTo() + "] added a count of 10000", 10000, countAddedInThisBucket); } Assert.assertEquals("The count in AllValues bucket #" + index + " is exactly the amount added since the last iteration ", v.getCountAtValueIteratedTo(), v.getCountAddedInThisIterationStep()); totalAddedCounts += v.getCountAddedInThisIterationStep(); Assert.assertTrue("valueFromIndex(index) should be equal to getValueIteratedTo()", histogram.valuesAreEquivalent(histogram.valueFromIndex(index), v.getValueIteratedTo())); index++; } Assert.assertEquals("index should be equal to countsArrayLength", histogram.countsArrayLength, index); Assert.assertEquals("Total added counts should be 20000", 20000, totalAddedCounts); } @Test public void testLinearIteratorSteps() { IntCountsHistogram histogram = new IntCountsHistogram(2); histogram.recordValue(193); histogram.recordValue(0); histogram.recordValue(1); histogram.recordValue(64); histogram.recordValue(128); int step = 64; int stepCount = 0; for (HistogramIterationValue itValue : histogram.linearBucketValues(step)) { stepCount++; itValue.getCountAddedInThisIterationStep(); } Assert.assertEquals("should see 4 steps", 4, stepCount); } private static void recordOneValueAndDisplayLinearBuckets(int value, long step) { IntCountsHistogram histogram = new IntCountsHistogram(2); histogram.recordValue(value); for (HistogramIterationValue itValue : histogram.linearBucketValues(step)) { itValue.getCountAddedInThisIterationStep(); } } @Test public void scanLinearIteratorForAIOOB() { List<int[]> broken = new ArrayList<int[]>(); // scan iterators through a range of step sizes and recorded values, looking for AIOOB: for (int step = 1; step < 100; step++) { for (int value = 1; value < 1000; value++) { try { recordOneValueAndDisplayLinearBuckets(value, step); } catch (ArrayIndexOutOfBoundsException e) { broken.add(new int[] { value, step }); } } } Assert.assertEquals("should not have any AIOOB iterations", 0, broken.size()); for (int[] brk : broken) { System.out.println("broken: value=" + brk[0] + " step=" + brk[1]); } } @Test public void testVerifyManualAllValuesDuplication() { Histogram histogram1 = histogram.copy(); AbstractHistogram.AllValues values = histogram1.allValues(); ArrayList<Long> ranges = new ArrayList<Long>(); ArrayList<Long> counts = new ArrayList<Long>(); int index = 0; for (HistogramIterationValue value : values) { if (value.getCountAddedInThisIterationStep() > 0) { ranges.add(value.getValueIteratedTo()); counts.add(value.getCountAddedInThisIterationStep()); } index++; } Assert.assertEquals("index should be equal to countsArrayLength", histogram1.countsArrayLength, index); AbstractHistogram histogram2 = new Histogram(highestTrackableValue, numberOfSignificantValueDigits); for (int i = 0; i < ranges.size(); ++i) { histogram2.recordValueWithCount(ranges.get(i), counts.get(i)); } Assert.assertTrue("Histograms should be equal", histogram1.equals(histogram2)); } @Test public void testLinearIteratorVisitsBucketsWiderThanStepSizeMultipleTimes() { Histogram h = new Histogram(1, Long.MAX_VALUE, 3); h.recordValue(1); h.recordValue(2047); // bucket size 2 h.recordValue(2048); h.recordValue(2049); h.recordValue(4095); // bucket size 4 h.recordValue(4096); h.recordValue(4097); h.recordValue(4098); h.recordValue(4099); // 2nd bucket in size 4 h.recordValue(4100); // sadly verbose helper class to hang on to iteration information for later comparison class IteratorValueSnapshot { private final long value; private final long count; private IteratorValueSnapshot(HistogramIterationValue iv) { this.value = iv.getValueIteratedTo(); this.count = iv.getCountAddedInThisIterationStep(); } private IteratorValueSnapshot(long value, long count) { this.value = value; this.count = count; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } IteratorValueSnapshot that = (IteratorValueSnapshot) o; if (value != that.value) { return false; } return count == that.count; } @Override public int hashCode() { int result = (int) (value ^ (value >>> 32)); result = 31 * result + (int) (count ^ (count >>> 32)); return result; } @Override public String toString() { return "IteratorValueSnapshot{" + "value=" + value + ", count=" + count + '}'; } } List<IteratorValueSnapshot> snapshots = new ArrayList<IteratorValueSnapshot>(); for (HistogramIterationValue iv : h.linearBucketValues(1)) { snapshots.add(new IteratorValueSnapshot(iv)); } // bucket size 1 Assert.assertEquals(new IteratorValueSnapshot(0, 0), snapshots.get(0)); Assert.assertEquals(new IteratorValueSnapshot(1, 1), snapshots.get(1)); Assert.assertEquals(new IteratorValueSnapshot(2046, 0), snapshots.get(2046)); Assert.assertEquals(new IteratorValueSnapshot(2047, 1), snapshots.get(2047)); // bucket size 2 Assert.assertEquals(new IteratorValueSnapshot(2048, 2), snapshots.get(2048)); Assert.assertEquals(new IteratorValueSnapshot(2049, 0), snapshots.get(2049)); Assert.assertEquals(new IteratorValueSnapshot(2050, 0), snapshots.get(2050)); Assert.assertEquals(new IteratorValueSnapshot(2051, 0), snapshots.get(2051)); Assert.assertEquals(new IteratorValueSnapshot(4094, 1), snapshots.get(4094)); Assert.assertEquals(new IteratorValueSnapshot(4095, 0), snapshots.get(4095)); // bucket size 4 Assert.assertEquals(new IteratorValueSnapshot(4096, 4), snapshots.get(4096)); Assert.assertEquals(new IteratorValueSnapshot(4097, 0), snapshots.get(4097)); Assert.assertEquals(new IteratorValueSnapshot(4098, 0), snapshots.get(4098)); Assert.assertEquals(new IteratorValueSnapshot(4099, 0), snapshots.get(4099)); // also size 4, last bucket Assert.assertEquals(new IteratorValueSnapshot(4100, 1), snapshots.get(4100)); Assert.assertEquals(new IteratorValueSnapshot(4101, 0), snapshots.get(4101)); Assert.assertEquals(new IteratorValueSnapshot(4102, 0), snapshots.get(4102)); Assert.assertEquals(new IteratorValueSnapshot(4103, 0), snapshots.get(4103)); Assert.assertEquals(4104, snapshots.size()); } }
36,256
50.721826
141
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/test/java/org/HdrHistogram/HistogramLogReaderWriterTest.java
package org.HdrHistogram; import org.junit.Assert; import org.junit.jupiter.api.Test; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.InputStream; public class HistogramLogReaderWriterTest { @Test public void emptyLog() throws Exception { File temp = File.createTempFile("hdrhistogramtesting", "hist"); temp.deleteOnExit(); FileOutputStream writerStream = new FileOutputStream(temp); HistogramLogWriter writer = new HistogramLogWriter(writerStream); writer.outputLogFormatVersion(); long startTimeWritten = 1000; writer.outputStartTime(startTimeWritten); writer.outputLogFormatVersion(); writer.outputLegend(); writerStream.close(); FileInputStream readerStream = new FileInputStream(temp); HistogramLogReader reader = new HistogramLogReader(readerStream); EncodableHistogram histogram = reader.nextIntervalHistogram(); Assert.assertNull(histogram); Assert.assertEquals(1.0, reader.getStartTimeSec(), 0.000001); } @Test public void taggedV2LogTest() throws Exception { InputStream readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("tagged-Log.logV2.hlog"); HistogramLogReader reader = new HistogramLogReader(readerStream); int histogramCount = 0; long totalCount = 0; EncodableHistogram encodeableHistogram = null; Histogram accumulatedHistogramWithNoTag = new Histogram(3); Histogram accumulatedHistogramWithTagA = new Histogram(3); while ((encodeableHistogram = reader.nextIntervalHistogram()) != null) { histogramCount++; Assert.assertTrue("Expected integer value histograms in log file", encodeableHistogram instanceof Histogram); Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); if ("A".equals(histogram.getTag())) { accumulatedHistogramWithTagA.add(histogram); } else { accumulatedHistogramWithNoTag.add(histogram); } } Assert.assertEquals(32290, totalCount); Assert.assertEquals(accumulatedHistogramWithTagA, accumulatedHistogramWithNoTag); } @Test public void jHiccupV2Log() throws Exception { InputStream readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("jHiccup-2.0.7S.logV2.hlog"); HistogramLogReader reader = new HistogramLogReader(readerStream); int histogramCount = 0; long totalCount = 0; EncodableHistogram encodeableHistogram = null; Histogram accumulatedHistogram = new Histogram(3); while ((encodeableHistogram = reader.nextIntervalHistogram()) != null) { histogramCount++; Assert.assertTrue("Expected integer value histograms in log file", encodeableHistogram instanceof Histogram); Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(62, histogramCount); Assert.assertEquals(48761, totalCount); Assert.assertEquals(1745879039, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(1796210687, accumulatedHistogram.getMaxValue()); Assert.assertEquals(1441812279.474, reader.getStartTimeSec(), 0.000001); readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("jHiccup-2.0.7S.logV2.hlog"); reader = new HistogramLogReader(readerStream); histogramCount = 0; totalCount = 0; accumulatedHistogram.reset(); while ((encodeableHistogram = reader.nextIntervalHistogram(5, 20)) != null) { histogramCount++; Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(15, histogramCount); Assert.assertEquals(11664, totalCount); Assert.assertEquals(1536163839, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(1544552447, accumulatedHistogram.getMaxValue()); readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("jHiccup-2.0.7S.logV2.hlog"); reader = new HistogramLogReader(readerStream); histogramCount = 0; totalCount = 0; accumulatedHistogram.reset(); while ((encodeableHistogram = reader.nextIntervalHistogram(40, 60)) != null) { histogramCount++; Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(20, histogramCount); Assert.assertEquals(15830, totalCount); Assert.assertEquals(1779433471, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(1796210687, accumulatedHistogram.getMaxValue()); } @Test public void jHiccupV1Log() throws Exception { InputStream readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("jHiccup-2.0.6.logV1.hlog"); HistogramLogReader reader = new HistogramLogReader(readerStream); int histogramCount = 0; long totalCount = 0; EncodableHistogram encodeableHistogram = null; Histogram accumulatedHistogram = new Histogram(3); while ((encodeableHistogram = reader.nextIntervalHistogram()) != null) { histogramCount++; Assert.assertTrue("Expected integer value histograms in log file", encodeableHistogram instanceof Histogram); Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(88, histogramCount); Assert.assertEquals(65964, totalCount); Assert.assertEquals(1829765119, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(1888485375, accumulatedHistogram.getMaxValue()); Assert.assertEquals(1438867590.285, reader.getStartTimeSec(), 0.000001); readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("jHiccup-2.0.6.logV1.hlog"); reader = new HistogramLogReader(readerStream); histogramCount = 0; totalCount = 0; accumulatedHistogram.reset(); while ((encodeableHistogram = reader.nextIntervalHistogram(5, 20)) != null) { histogramCount++; Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(15, histogramCount); Assert.assertEquals(11213, totalCount); Assert.assertEquals(1019740159, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(1032323071, accumulatedHistogram.getMaxValue()); readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("jHiccup-2.0.6.logV1.hlog"); reader = new HistogramLogReader(readerStream); histogramCount = 0; totalCount = 0; accumulatedHistogram.reset(); while ((encodeableHistogram = reader.nextIntervalHistogram(50, 80)) != null) { histogramCount++; Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(29, histogramCount); Assert.assertEquals(22630, totalCount); Assert.assertEquals(1871708159, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(1888485375, accumulatedHistogram.getMaxValue()); } @Test public void jHiccupV0Log() throws Exception { InputStream readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("jHiccup-2.0.1.logV0.hlog"); HistogramLogReader reader = new HistogramLogReader(readerStream); int histogramCount = 0; long totalCount = 0; EncodableHistogram encodeableHistogram = null; Histogram accumulatedHistogram = new Histogram(3); while ((encodeableHistogram = reader.nextIntervalHistogram()) != null) { histogramCount++; Assert.assertTrue("Expected integer value histograms in log file", encodeableHistogram instanceof Histogram); Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(81, histogramCount); Assert.assertEquals(61256, totalCount); Assert.assertEquals(1510998015, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(1569718271, accumulatedHistogram.getMaxValue()); Assert.assertEquals(1438869961.225, reader.getStartTimeSec(), 0.000001); readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("jHiccup-2.0.1.logV0.hlog"); reader = new HistogramLogReader(readerStream); histogramCount = 0; totalCount = 0; accumulatedHistogram.reset(); while ((encodeableHistogram = reader.nextIntervalHistogram(20, 45)) != null) { histogramCount++; Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(25, histogramCount); Assert.assertEquals(18492, totalCount); Assert.assertEquals(459007, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(623103, accumulatedHistogram.getMaxValue()); readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("jHiccup-2.0.1.logV0.hlog"); reader = new HistogramLogReader(readerStream); histogramCount = 0; totalCount = 0; accumulatedHistogram.reset(); while ((encodeableHistogram = reader.nextIntervalHistogram(46, 80)) != null) { histogramCount++; Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(34, histogramCount); Assert.assertEquals(25439, totalCount); Assert.assertEquals(1209008127, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(1234173951, accumulatedHistogram.getMaxValue()); } @Test public void ycsbV1Log() throws Exception { InputStream readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("ycsb.logV1.hlog"); HistogramLogReader reader = new HistogramLogReader(readerStream); int histogramCount = 0; long totalCount = 0; EncodableHistogram encodeableHistogram = null; Histogram accumulatedHistogram = new Histogram(3); while ((encodeableHistogram = reader.nextIntervalHistogram()) != null) { histogramCount++; Assert.assertTrue("Expected integer value histograms in log file", encodeableHistogram instanceof Histogram); Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(602, histogramCount); Assert.assertEquals(300056, totalCount); Assert.assertEquals(1214463, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(1546239, accumulatedHistogram.getMaxValue()); Assert.assertEquals(1438613579.295, reader.getStartTimeSec(), 0.000001); readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("ycsb.logV1.hlog"); reader = new HistogramLogReader(readerStream); histogramCount = 0; totalCount = 0; accumulatedHistogram.reset(); while ((encodeableHistogram = reader.nextIntervalHistogram(0, 180)) != null) { histogramCount++; Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } // note the first histogram in the log is before 0, so we drop it on the // floor Assert.assertEquals(180, histogramCount); Assert.assertEquals(90033, totalCount); Assert.assertEquals(1375231, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(1546239, accumulatedHistogram.getMaxValue()); readerStream = HistogramLogReaderWriterTest.class.getResourceAsStream("ycsb.logV1.hlog"); reader = new HistogramLogReader(readerStream); histogramCount = 0; totalCount = 0; accumulatedHistogram.reset(); while ((encodeableHistogram = reader.nextIntervalHistogram(180, 700)) != null) { histogramCount++; Histogram histogram = (Histogram) encodeableHistogram; totalCount += histogram.getTotalCount(); accumulatedHistogram.add(histogram); } Assert.assertEquals(421, histogramCount); Assert.assertEquals(209686, totalCount); Assert.assertEquals(530, accumulatedHistogram.getValueAtPercentile(99.9)); Assert.assertEquals(17775, accumulatedHistogram.getMaxValue()); } @Test public void emptyHistogramsInLog() throws Exception { File temp = File.createTempFile("hdrhistogramtesting", "hlog"); FileOutputStream writerStream = new FileOutputStream(temp); HistogramLogWriter writer = new HistogramLogWriter(writerStream); writer.outputLogFormatVersion(); long startTimeWritten = 11000; writer.outputStartTime(startTimeWritten); writer.outputLogFormatVersion(); writer.outputLegend(); Histogram empty = new Histogram(2); empty.setStartTimeStamp(11100); empty.setEndTimeStamp(12100); writer.outputIntervalHistogram(empty); empty.setStartTimeStamp(12100); empty.setEndTimeStamp(13100); writer.outputIntervalHistogram(empty); writerStream.close(); FileInputStream readerStream = new FileInputStream(temp); HistogramLogReader reader = new HistogramLogReader(readerStream); Histogram histogram = (Histogram) reader.nextIntervalHistogram(); Assert.assertEquals(11.0, reader.getStartTimeSec(), 0.000001); Assert.assertNotNull(histogram); Assert.assertEquals(0, histogram.getTotalCount()); Assert.assertEquals(11100, histogram.getStartTimeStamp()); Assert.assertEquals(12100, histogram.getEndTimeStamp()); histogram = (Histogram) reader.nextIntervalHistogram(); Assert.assertNotNull(histogram); Assert.assertEquals(0, histogram.getTotalCount()); Assert.assertEquals(12100, histogram.getStartTimeStamp()); Assert.assertEquals(13100, histogram.getEndTimeStamp()); Assert.assertNull(reader.nextIntervalHistogram()); readerStream.close(); readerStream = new FileInputStream(temp); reader = new HistogramLogReader(readerStream); // relative read from the file, should include both histograms histogram = (Histogram) reader.nextIntervalHistogram(0.0, 4.0); Assert.assertEquals(11.0, reader.getStartTimeSec(), 0.000001); Assert.assertNotNull(histogram); Assert.assertEquals(0, histogram.getTotalCount()); Assert.assertEquals(11100, histogram.getStartTimeStamp()); Assert.assertEquals(12100, histogram.getEndTimeStamp()); histogram = (Histogram) reader.nextIntervalHistogram(0.0, 4.0); Assert.assertNotNull(histogram); Assert.assertEquals(0, histogram.getTotalCount()); Assert.assertEquals(12100, histogram.getStartTimeStamp()); Assert.assertEquals(13100, histogram.getEndTimeStamp()); Assert.assertNull(reader.nextIntervalHistogram()); readerStream.close(); readerStream = new FileInputStream(temp); reader = new HistogramLogReader(readerStream); // relative read from the file, should skip first histogram histogram = (Histogram) reader.nextIntervalHistogram(1.0, 4.0); Assert.assertEquals(11.0, reader.getStartTimeSec(), 0.000001); Assert.assertNotNull(histogram); Assert.assertEquals(0, histogram.getTotalCount()); Assert.assertEquals(12100, histogram.getStartTimeStamp()); Assert.assertEquals(13100, histogram.getEndTimeStamp()); Assert.assertNull(reader.nextIntervalHistogram()); readerStream.close(); } }
16,946
48.408163
121
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/package-info.java
/* * package-info.java * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ */ /** * <h3>A High Dynamic Range (HDR) Histogram Package</h3> * <p> * An HdrHistogram histogram supports the recording and analyzing sampled data value counts across a configurable * integer value range with configurable value precision within the range. Value precision is expressed as the number * of significant digits in the value recording, and provides control over value quantization behavior across the * value range and the subsequent value resolution at any given level. * </p> * <p> * In contrast to traditional histograms that use linear, logarithmic, or arbitrary sized bins or buckets, * HdrHistograms use a fixed storage internal data representation that simultaneously supports an arbitrarily high * dynamic range and arbitrary precision throughout that dynamic range. This capability makes HdrHistograms extremely * useful for tracking and reporting on the distribution of percentile values with high resolution and across a wide * dynamic range -- a common need in latency behavior characterization. * </p> * <p> * The HdrHistogram package was specifically designed with latency and performance sensitive applications in mind. * Experimental u-benchmark measurements show value recording times as low as 3-6 nanoseconds on modern * (circa 2012) Intel CPUs. All Histogram variants can maintain a fixed cost in both space and time. When not * configured to auto-resize, a Histogram's memory footprint is constant, with no allocation operations involved in * recording data values or in iterating through them. The memory footprint is fixed regardless of the number of data * value samples recorded, and depends solely on the dynamic range and precision chosen. The amount of work involved in * recording a sample is constant, and directly computes storage index locations such that no iteration or searching * is ever involved in recording data values. * <p> * NOTE: Histograms can optionally be configured to auto-resize their dynamic range as a convenience feature. * When configured to auto-resize, recording operations that need to expand a histogram will auto-resize its * dynamic range to include recorded values as they are encountered. Note that recording calls that cause * auto-resizing may take longer to execute, and that resizing incurs allocation and copying of internal data * structures. * </p> * <p> * The combination of high dynamic range and precision is useful for collection and accurate post-recording * analysis of sampled value data distribution in various forms. Whether it's calculating or * plotting arbitrary percentiles, iterating through and summarizing values in various ways, or deriving mean and * standard deviation values, the fact that the recorded value count information is kept in high * resolution allows for accurate post-recording analysis with low [and ultimately configurable] loss in * accuracy when compared to performing the same analysis directly on the potentially infinite series of sourced * data values samples. * </p> * <p> * An HdrHistogram histogram is usually configured to maintain value count data with a resolution good enough * to support a desired precision in post-recording analysis and reporting on the collected data. Analysis can include * the computation and reporting of distribution by percentiles, linear or logarithmic arbitrary value buckets, mean * and standard deviation, as well as any other computations that can supported using the various iteration techniques * available on the collected value count data. In practice, a precision levels of 2 or 3 decimal points are most * commonly used, as they maintain a value accuracy of +/- ~1% or +/- ~0.1% respectively for derived distribution * statistics. * </p> * <p> * A good example of HdrHistogram use would be tracking of latencies across a wide dynamic range. E.g. from a * microsecond to an hour. A Histogram can be configured to track and later report on the counts of observed integer * usec-unit latency values between 0 and 3,600,000,000 while maintaining a value precision of 3 significant digits * across that range. Such an example Histogram would simply be created with a * <b><code>highestTrackableValue</code></b> of 3,600,000,000, and a * <b><code>numberOfSignificantValueDigits</code></b> of 3, and would occupy a fixed, unchanging memory footprint * of around 185KB (see "Footprint estimation" below). * <br> * Code for this use example would include these basic elements: * <br> * <pre> * <code> * {@link org.HdrHistogram.Histogram} histogram = new {@link org.HdrHistogram.Histogram}(3600000000L, 3); * . * . * . * // Repeatedly record measured latencies: * histogram.{@link org.HdrHistogram.AbstractHistogram#recordValue(long) recordValue}(latency); * . * . * . * // Report histogram percentiles, expressed in msec units: * histogram.{@link org.HdrHistogram.AbstractHistogram#outputPercentileDistribution(java.io.PrintStream, Double) outputPercentileDistribution}(histogramLog, 1000.0)}; * </code> * </pre> * Specifying 3 decimal points of precision in this example guarantees that value quantization within the value range * will be no larger than 1/1,000th (or 0.1%) of any recorded value. This example Histogram can be therefore used to * track, analyze and report the counts of observed latencies ranging between 1 microsecond and 1 hour in magnitude, * while maintaining a value resolution 1 microsecond (or better) up to 1 millisecond, a resolution of 1 millisecond * (or better) up to one second, and a resolution of 1 second (or better) up to 1,000 seconds. At it's maximum tracked * value (1 hour), it would still maintain a resolution of 3.6 seconds (or better). * <h3>Histogram variants and internal representation</h3> * The HdrHistogram package includes multiple implementations of the {@link org.HdrHistogram.AbstractHistogram} class: * <ul> * <li> {@link org.HdrHistogram.Histogram}, which is the commonly used Histogram form and tracks value counts * in <b><code>long</code></b> fields. </li> * <li>{@link org.HdrHistogram.IntCountsHistogram} and {@link org.HdrHistogram.ShortCountsHistogram}, which track value counts * in <b><code>int</code></b> and * <b><code>short</code></b> fields respectively, are provided for use cases where smaller count ranges are practical * and smaller overall storage is beneficial (e.g. systems where tens of thousands of in-memory histogram are * being tracked).</li> * <li>{@link org.HdrHistogram.AtomicHistogram}, {@link org.HdrHistogram.ConcurrentHistogram} * and {@link org.HdrHistogram.SynchronizedHistogram}</li> * </ul> * <p> * Internally, data in HdrHistogram variants is maintained using a concept somewhat similar to that of floating * point number representation: Using a an exponent a (non-normalized) mantissa to * support a wide dynamic range at a high but varying (by exponent value) resolution. * AbstractHistogram uses exponentially increasing bucket value ranges (the parallel of * the exponent portion of a floating point number) with each bucket containing * a fixed number (per bucket) set of linear sub-buckets (the parallel of a non-normalized mantissa portion * of a floating point number). * Both dynamic range and resolution are configurable, with <b><code>highestTrackableValue</code></b> * controlling dynamic range, and <b><code>numberOfSignificantValueDigits</code></b> controlling * resolution. * </p> * <h3>Synchronization and concurrent access</h3> * In the interest of keeping value recording cost to a minimum, the commonly used {@link org.HdrHistogram.Histogram} * class and it's {@link org.HdrHistogram.IntCountsHistogram} and {@link org.HdrHistogram.ShortCountsHistogram} * variants are NOT internally synchronized, and do NOT use atomic variables. Callers wishing to make potentially * concurrent, multi-threaded updates or queries against Histogram objects should either take care to externally * synchronize and/or order their access, or use the {@link org.HdrHistogram.ConcurrentHistogram}, * {@link org.HdrHistogram.AtomicHistogram}, or {@link org.HdrHistogram.SynchronizedHistogram} variants. * <p> * A common pattern seen in histogram value recording involves recording values in a critical path (multi-threaded * or not), coupled with a non-critical path reading the recorded data for summary/reporting purposes. When such * continuous non-blocking recording operation (concurrent or not) is desired even when sampling, analyzing, or * reporting operations are needed, consider using the {@link org.HdrHistogram.Recorder} and * {@link org.HdrHistogram.SingleWriterRecorder} variants that were specifically designed for that purpose. * Recorders provide a recording API similar to Histogram, and internally maintain and coordinate active/inactive * histograms such that recording remains wait-free in the presence of accurate and stable interval sampling. * </p> * <p> * It is worth mentioning that since Histogram objects are additive, it is common practice to use per-thread * non-synchronized histograms or {@link org.HdrHistogram.SingleWriterRecorder}s, and using a summary/reporting * thread perform histogram aggregation math across time and/or threads. * </p> * <h3>Iteration</h3> * Histograms supports multiple convenient forms of iterating through the histogram data set, including linear, * logarithmic, and percentile iteration mechanisms, as well as means for iterating through each recorded value or * each possible value level. The iteration mechanisms all provide {@link org.HdrHistogram.HistogramIterationValue} * data points along the histogram's iterated data set, and are available via the following methods: * <ul> * <li>{@link org.HdrHistogram.AbstractHistogram#percentiles percentiles} : * An {@link java.lang.Iterable}{@literal <}{@link org.HdrHistogram.HistogramIterationValue}{@literal >} through the * histogram using a {@link org.HdrHistogram.PercentileIterator} </li> * <li>{@link org.HdrHistogram.AbstractHistogram#linearBucketValues linearBucketValues} : * An {@link java.lang.Iterable}{@literal <}{@link org.HdrHistogram.HistogramIterationValue}{@literal >} through * the histogram using a {@link org.HdrHistogram.LinearIterator} </li> * <li>{@link org.HdrHistogram.AbstractHistogram#logarithmicBucketValues logarithmicBucketValues} : * An {@link java.lang.Iterable}{@literal <}{@link org.HdrHistogram.HistogramIterationValue}{@literal >} * through the histogram using a {@link org.HdrHistogram.LogarithmicIterator} </li> * <li>{@link org.HdrHistogram.AbstractHistogram#recordedValues recordedValues} : * An {@link java.lang.Iterable}{@literal <}{@link org.HdrHistogram.HistogramIterationValue}{@literal >} through * the histogram using a {@link org.HdrHistogram.RecordedValuesIterator} </li> * <li>{@link org.HdrHistogram.AbstractHistogram#allValues allValues} : * An {@link java.lang.Iterable}{@literal <}{@link org.HdrHistogram.HistogramIterationValue}{@literal >} through * the histogram using a {@link org.HdrHistogram.AllValuesIterator} </li> * </ul> * <p> * Iteration is typically done with a for-each loop statement. E.g.: * <br><pre><code> * for (HistogramIterationValue v : histogram.percentiles(<i>percentileTicksPerHalfDistance</i>)) { * ... * } * </code></pre> * or * <br><pre><code> * for (HistogramIterationValue v : histogram.linearBucketValues(<i>valueUnitsPerBucket</i>)) { * ... * } * </code> * </pre> * The iterators associated with each iteration method are resettable, such that a caller that would like to avoid * allocating a new iterator object for each iteration loop can re-use an iterator to repeatedly iterate through the * histogram. This iterator re-use usually takes the form of a traditional for loop using the Iterator's * <b><code>hasNext()</code></b> and <b><code>next()</code></b> methods: * * to avoid allocating a new iterator object for each iteration loop: * <br> * <pre> * <code> * PercentileIterator iter = histogram.percentiles().iterator(<i>percentileTicksPerHalfDistance</i>); * ... * iter.reset(<i>percentileTicksPerHalfDistance</i>); * for (iter.hasNext() { * HistogramIterationValue v = iter.next(); * ... * } * </code> * </pre> * <h3>Equivalent Values and value ranges</h3> * <p> * Due to the finite (and configurable) resolution of the histogram, multiple adjacent integer data values can * be "equivalent". Two values are considered "equivalent" if samples recorded for both are always counted in a * common total count due to the histogram's resolution level. Histogram provides methods for determining the * lowest and highest equivalent values for any given value, as we as determining whether two values are equivalent, * and for finding the next non-equivalent value for a given value (useful when looping through values, in order * to avoid double-counting count). * </p> * <h3>Raw vs. corrected recording</h3> * <p> * Regular, raw value data recording into an HdrHistogram is achieved with the * {@link org.HdrHistogram.AbstractHistogram#recordValue(long) recordValue()} method. * <p> * Histogram variants also provide an auto-correcting * {@link org.HdrHistogram.AbstractHistogram#recordValueWithExpectedInterval(long, long) recordValueWithExpectedInterval()} * form in support of a common use case found when histogram values are used to track response time * distribution in the presence of Coordinated Omission - an extremely common phenomenon found in latency recording * systems. * This correcting form is useful in [e.g. load generator] scenarios where measured response times may exceed the * expected interval between issuing requests, leading to the "omission" of response time measurements that would * typically correlate with "bad" results. This coordinated (non random) omission of source data, if left uncorrected, * will then dramatically skew any overall latency stats computed on the recorded information, as the recorded data set * itself will be significantly skewed towards good results. * </p> * <p> * When a value recorded in the histogram exceeds the * <b><code>expectedIntervalBetweenValueSamples</code></b> parameter, recorded histogram data will * reflect an appropriate number of additional values, linearly decreasing in steps of * <b><code>expectedIntervalBetweenValueSamples</code></b>, down to the last value * that would still be higher than <b><code>expectedIntervalBetweenValueSamples</code></b>). * </p> * <p> * To illustrate why this corrective behavior is critically needed in order to accurately represent value * distribution when large value measurements may lead to missed samples, imagine a system for which response * times samples are taken once every 10 msec to characterize response time distribution. * The hypothetical system behaves "perfectly" for 100 seconds (10,000 recorded samples), with each sample * showing a 1msec response time value. At each sample for 100 seconds (10,000 logged samples * at 1msec each). The hypothetical system then encounters a 100 sec pause during which only a single sample is * recorded (with a 100 second value). * An normally recorded (uncorrected) data histogram collected for such a hypothetical system (over the 200 second * scenario above) would show ~99.99% of results at 1msec or below, which is obviously "not right". In contrast, a * histogram that records the same data using the auto-correcting * {@link org.HdrHistogram.AbstractHistogram#recordValueWithExpectedInterval(long, long) recordValueWithExpectedInterval()} * method with the knowledge of an expectedIntervalBetweenValueSamples of 10msec will correctly represent the * real world response time distribution of this hypothetical system. Only ~50% of results will be at 1msec or below, * with the remaining 50% coming from the auto-generated value records covering the missing increments spread between * 10msec and 100 sec. * </p> * <p> * Data sets recorded with and with * {@link org.HdrHistogram.AbstractHistogram#recordValue(long) recordValue()} * and with * {@link org.HdrHistogram.AbstractHistogram#recordValueWithExpectedInterval(long, long) recordValueWithExpectedInterval()} * will differ only if at least one value recorded was greater than it's * associated <b><code>expectedIntervalBetweenValueSamples</code></b> parameter. * Data sets recorded with * {@link org.HdrHistogram.AbstractHistogram#recordValueWithExpectedInterval(long, long) recordValueWithExpectedInterval()} * parameter will be identical to ones recorded with * {@link org.HdrHistogram.AbstractHistogram#recordValue(long) recordValue()} * it if all values recorded via the <b><code>recordValue</code></b> calls were smaller * than their associated <b><code>expectedIntervalBetweenValueSamples</code></b> parameters. * </p> * <p> * In addition to at-recording-time correction option, Histogram variants also provide the post-recording correction * methods * {@link org.HdrHistogram.AbstractHistogram#copyCorrectedForCoordinatedOmission(long) copyCorrectedForCoordinatedOmission()} * and * {@link org.HdrHistogram.AbstractHistogram#addWhileCorrectingForCoordinatedOmission(AbstractHistogram, long) addWhileCorrectingForCoordinatedOmission()}. * These methods can be used for post-recording correction, and are useful when the * <b><code>expectedIntervalBetweenValueSamples</code></b> parameter is estimated to be the same for all recorded * values. However, for obvious reasons, it is important to note that only one correction method (during or post * recording) should be be used on a given histogram data set. * </p> * <p> * When used for response time characterization, the recording with the optional * <code><b>expectedIntervalBetweenValueSamples</b></code> parameter will tend to produce data sets that would * much more accurately reflect the response time distribution that a random, uncoordinated request would have * experienced. * </p> * <h3>Floating point values and DoubleHistogram variants</h3> * The above discussion relates to integer value histograms (the various subclasses of * {@link org.HdrHistogram.AbstractHistogram} and their related supporting classes). HdrHistogram supports floating * point value recording and reporting with a similar set of classes, including the * {@link org.HdrHistogram.DoubleHistogram}, {@link org.HdrHistogram.ConcurrentDoubleHistogram} and * {@link org.HdrHistogram.SynchronizedDoubleHistogram} histogram classes. Support for floating point value * iteration is provided with {@link org.HdrHistogram.DoubleHistogramIterationValue} and related iterator classes ( * {@link org.HdrHistogram.DoubleLinearIterator}, {@link org.HdrHistogram.DoubleLogarithmicIterator}, * {@link org.HdrHistogram.DoublePercentileIterator}, {@link org.HdrHistogram.DoubleRecordedValuesIterator}, * {@link org.HdrHistogram.DoubleAllValuesIterator}). Support for interval recording is provided with * {@link org.HdrHistogram.DoubleRecorder} and * {@link org.HdrHistogram.SingleWriterDoubleRecorder}. * <h4>Auto-ranging in floating point histograms</h4> * Unlike integer value based histograms, the specific value range tracked by a {@link * org.HdrHistogram.DoubleHistogram} (and variants) is not specified upfront. Only the dynamic range of values * that the histogram can cover is (optionally) specified. E.g. When a {@link org.HdrHistogram.DoubleHistogram} * is created to track a dynamic range of 3600000000000 (enough to track values from a nanosecond to an hour), * values could be recorded into into it in any consistent unit of time as long as the ratio between the highest * and lowest non-zero values stays within the specified dynamic range, so recording in units of nanoseconds * (1.0 thru 3600000000000.0), milliseconds (0.000001 thru 3600000.0) seconds (0.000000001 thru 3600.0), hours * (1/3.6E12 thru 1.0) will all work just as well. * <h3>Footprint estimation</h3> * Due to it's dynamic range representation, Histogram is relatively efficient in memory space requirements given * the accuracy and dynamic range it covers. Still, it is useful to be able to estimate the memory footprint involved * for a given <b><code>highestTrackableValue</code></b> and <b><code>numberOfSignificantValueDigits</code></b> * combination. Beyond a relatively small fixed-size footprint used for internal fields and stats (which can be * estimated as "fixed at well less than 1KB"), the bulk of a Histogram's storage is taken up by it's data value * recording counts array. The total footprint can be conservatively estimated by: * <pre><code> * largestValueWithSingleUnitResolution = 2 * (10 ^ numberOfSignificantValueDigits); * subBucketSize = roundedUpToNearestPowerOf2(largestValueWithSingleUnitResolution); * expectedHistogramFootprintInBytes = 512 + * ({primitive type size} / 2) * * (log2RoundedUp((highestTrackableValue) / subBucketSize) + 2) * * subBucketSize * * </code></pre> * A conservative (high) estimate of a Histogram's footprint in bytes is available via the * {@link org.HdrHistogram.AbstractHistogram#getEstimatedFootprintInBytes() getEstimatedFootprintInBytes()} method. */ package org.HdrHistogram;
21,532
67.795527
166
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/LinearIterator.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.Iterator; /** * Used for iterating through histogram values in linear steps. The iteration is * performed in steps of <i>valueUnitsPerBucket</i> in size, terminating when all recorded histogram * values are exhausted. Note that each iteration "bucket" includes values up to and including * the next bucket boundary value. */ public class LinearIterator extends AbstractHistogramIterator implements Iterator<HistogramIterationValue> { private long valueUnitsPerBucket; private long currentStepHighestValueReportingLevel; private long currentStepLowestValueReportingLevel; /** * Reset iterator for re-use in a fresh iteration over the same histogram data set. * @param valueUnitsPerBucket The size (in value units) of each bucket iteration. */ public void reset(final long valueUnitsPerBucket) { reset(histogram, valueUnitsPerBucket); } private void reset(final AbstractHistogram histogram, final long valueUnitsPerBucket) { super.resetIterator(histogram); this.valueUnitsPerBucket = valueUnitsPerBucket; this.currentStepHighestValueReportingLevel = valueUnitsPerBucket - 1; this.currentStepLowestValueReportingLevel = histogram.lowestEquivalentValue(currentStepHighestValueReportingLevel); } /** * @param histogram The histogram this iterator will operate on * @param valueUnitsPerBucket The size (in value units) of each bucket iteration. */ public LinearIterator(final AbstractHistogram histogram, final long valueUnitsPerBucket) { reset(histogram, valueUnitsPerBucket); } @Override public boolean hasNext() { if (super.hasNext()) { return true; } // If the next iteration will not move to the next sub bucket index (which is empty if // if we reached this point), then we are not yet done iterating (we want to iterate // until we are no longer on a value that has a count, rather than util we first reach // the last value that has a count. The difference is subtle but important)... // When this is called, we're about to begin the "next" iteration, so // currentStepHighestValueReportingLevel has already been incremented, and we use it // without incrementing its value. return (currentStepHighestValueReportingLevel < nextValueAtIndex); } @Override void incrementIterationLevel() { currentStepHighestValueReportingLevel += valueUnitsPerBucket; currentStepLowestValueReportingLevel = histogram.lowestEquivalentValue(currentStepHighestValueReportingLevel); } @Override long getValueIteratedTo() { return currentStepHighestValueReportingLevel; } @Override boolean reachedIterationLevel() { return ((currentValueAtIndex >= currentStepLowestValueReportingLevel) || (currentIndex >= histogram.countsArrayLength - 1)) ; } }
3,167
39.615385
123
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/WriterReaderPhaser.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ */ package org.HdrHistogram; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.locks.ReentrantLock; /** * {@link WriterReaderPhaser} provides an asymmetric means for * synchronizing the execution of wait-free "writer" critical sections against * a "reader phase flip" that needs to make sure no writer critical sections * that were active at the beginning of the flip are still active after the * flip is done. Multiple writers and multiple readers are supported. * <p> * Using a {@link WriterReaderPhaser} for coordination, writers can continuously * perform wait-free/lock-free updates to common data structures, while readers * can get hold of atomic and inactive snapshots without stalling writers. * <p> * While a {@link WriterReaderPhaser} can be useful in multiple scenarios, a * specific and common use case is that of safely managing "double buffered" * data stream access in which writers can proceed without being blocked, while * readers gain access to stable and unchanging buffer samples. * {@link WriterReaderPhaser} "writers" are wait free (on architectures that support * wait free atomic increment operations), "readers" block for other * "readers", and "readers" are only blocked by "writers" whose critical section * was entered before the reader's * {@link WriterReaderPhaser#flipPhase()} attempt. * <h3>Assumptions and Guarantees</h3> * <p> * When used to protect an actively recording data structure, the assumptions on * how readers and writers act are: * <ol> * <li>There are two sets of data structures ("active" and "inactive")</li> * <li>Writing is done to the perceived active version (as perceived by the * writer), and only within critical sections delineated by * {@link WriterReaderPhaser#writerCriticalSectionEnter} and * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionExit()}. * </li> * <li>Only readers switch the perceived roles of the active and inactive data * structures. They do so only while under {@link WriterReaderPhaser#readerLock()} * protection and only before calling {@link WriterReaderPhaser#flipPhase()}.</li> * <li>Writers do not remain in their critical sections indefinitely.</li> * <li>Only writers perform {@link WriterReaderPhaser#writerCriticalSectionEnter} * and * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionExit()}. * </li> * <li>Readers do not hold onto readerLock indefinitely.</li> * <li>Only readers perform {@link WriterReaderPhaser#readerLock()} and * {@link WriterReaderPhaser#readerUnlock()}.</li> * <li>Only readers perform {@link WriterReaderPhaser#flipPhase()} operations, * and only while holding the readerLock.</li> * </ol> * <p> * When the above assumptions are met, {@link WriterReaderPhaser} guarantees * that the inactive data structures are not being modified by any writers while * being read while under readerLock() protection after a * {@link WriterReaderPhaser#flipPhase()}() operation. * <p> * The following progress guarantees are provided to writers and readers that * adhere to the above stated assumptions: * <ol> * <li>Writers operations * ({@link WriterReaderPhaser#writerCriticalSectionEnter writerCriticalSectionEnter} * and {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionExit}) * are wait free on architectures that * support wait-free atomic increment operations (they remain lock-free [but not * wait-free] on architectures that do not support wait-free atomic increment * operations)</li> * <li>{@link WriterReaderPhaser#flipPhase()} operations are guaranteed to * make forward progress, and will only be blocked by writers whose critical sections * were entered prior to the start of the reader's flipPhase operation, and have not * yet exited their critical sections.</li> * <li>{@link WriterReaderPhaser#readerLock()} only blocks for other * readers that are holding the readerLock.</li> * </ol> * * <h3>Example use</h3> * Imagine a simple use case where a histogram (which is basically a large set of * rapidly updated counters) is being modified by writers, and a reader needs to gain * access to stable interval samples of the histogram for reporting or other analysis * purposes. * <pre><code> * final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser(); * * volatile Histogram activeHistogram; * Histogram inactiveHistogram; * ... * </code></pre> * A writer may record values the histogram: * <pre><code> * // Wait-free recording: * long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); * try { * activeHistogram.recordValue(value); * } finally { * recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); * } * </code></pre> * A reader gains access to a stable histogram of values recorded during an interval, * and reports on it: * <pre><code> * try { * recordingPhaser.readerLock(); * * inactiveHistogram.reset(); * * // Swap active and inactive histograms: * final Histogram tempHistogram = inactiveHistogram; * inactiveHistogram = activeHistogram; * activeHistogram = tempHistogram; * * recordingPhaser.flipPhase(); * // At this point, inactiveHistogram content is guaranteed to be stable * * logHistogram(inactiveHistogram); * * } finally { * recordingPhaser.readerUnlock(); * } * </code></pre> */ /* * High level design: There are even and odd epochs; the epoch flips for each * reader. Any number of writers can be in the same epoch (odd or even), but * after a completed phase flip no writers will be still in the old epoch * (and therefore are known to not be updating or observing the old, inactive * data structure). Writers can always proceed at full speed in what they * perceive to be the current (odd or even) epoch. The epoch flip is fast (a * single atomic op). */ public class WriterReaderPhaser { private volatile long startEpoch = 0; private volatile long evenEndEpoch = 0; private volatile long oddEndEpoch = Long.MIN_VALUE; private final ReentrantLock readerLock = new ReentrantLock(); private static final AtomicLongFieldUpdater<WriterReaderPhaser> startEpochUpdater = AtomicLongFieldUpdater.newUpdater(WriterReaderPhaser.class, "startEpoch"); private static final AtomicLongFieldUpdater<WriterReaderPhaser> evenEndEpochUpdater = AtomicLongFieldUpdater.newUpdater(WriterReaderPhaser.class, "evenEndEpoch"); private static final AtomicLongFieldUpdater<WriterReaderPhaser> oddEndEpochUpdater = AtomicLongFieldUpdater.newUpdater(WriterReaderPhaser.class, "oddEndEpoch"); /** * Indicate entry to a critical section containing a write operation. * <p> * This call is wait-free on architectures that support wait free atomic increment operations, * and is lock-free on architectures that do not. * <p> * {@code writerCriticalSectionEnter()} must be matched with a subsequent * {@link WriterReaderPhaser#writerCriticalSectionExit(long)} in order for CriticalSectionPhaser * synchronization to function properly. * * @return an (opaque) value associated with the critical section entry, which MUST be provided * to the matching {@link WriterReaderPhaser#writerCriticalSectionExit} call. */ public long writerCriticalSectionEnter() { return startEpochUpdater.getAndIncrement(this); } /** * Indicate exit from a critical section containing a write operation. * <p> * This call is wait-free on architectures that support wait free atomic increment operations, * and is lock-free on architectures that do not. * <p> * {@code writerCriticalSectionExit(long)} must be matched with a preceding * {@link WriterReaderPhaser#writerCriticalSectionEnter()} call, and must be provided with the * matching {@link WriterReaderPhaser#writerCriticalSectionEnter()} call's return value, in * order for CriticalSectionPhaser synchronization to function properly. * * @param criticalValueAtEnter the (opaque) value returned from the matching * {@link WriterReaderPhaser#writerCriticalSectionEnter()} call. */ public void writerCriticalSectionExit(long criticalValueAtEnter) { (criticalValueAtEnter < 0 ? oddEndEpochUpdater : evenEndEpochUpdater).getAndIncrement(this); } /** * Enter to a critical section containing a read operation (reentrant, mutually excludes against * {@link WriterReaderPhaser#readerLock} calls by other threads). * <p> * {@link WriterReaderPhaser#readerLock} DOES NOT provide synchronization * against {@link WriterReaderPhaser#writerCriticalSectionEnter()} calls. Use {@link WriterReaderPhaser#flipPhase()} * to synchronize reads against writers. */ public void readerLock() { readerLock.lock(); } /** * Exit from a critical section containing a read operation (relinquishes mutual exclusion against other * {@link WriterReaderPhaser#readerLock} calls). */ public void readerUnlock() { readerLock.unlock(); } /** * Flip a phase in the {@link WriterReaderPhaser} instance, {@link WriterReaderPhaser#flipPhase()} * can only be called while holding the {@link WriterReaderPhaser#readerLock() readerLock}. * {@link WriterReaderPhaser#flipPhase()} will return only after all writer critical sections (protected by * {@link WriterReaderPhaser#writerCriticalSectionEnter() writerCriticalSectionEnter} and * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionEnter}) that may have been * in flight when the {@link WriterReaderPhaser#flipPhase()} call were made had completed. * <p> * No actual writer critical section activity is required for {@link WriterReaderPhaser#flipPhase()} to * succeed. * <p> * However, {@link WriterReaderPhaser#flipPhase()} is lock-free with respect to calls to * {@link WriterReaderPhaser#writerCriticalSectionEnter()} and * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionExit()}. It may spin-wait * or for active writer critical section code to complete. * * @param yieldTimeNsec The amount of time (in nanoseconds) to sleep in each yield if yield loop is needed. */ public void flipPhase(long yieldTimeNsec) { if (!readerLock.isHeldByCurrentThread()) { throw new IllegalStateException("flipPhase() can only be called while holding the readerLock()"); } // Read the volatile 'startEpoch' exactly once boolean nextPhaseIsEven = (startEpoch < 0); // Current phase is odd... // First, clear currently unused [next] phase end epoch (to proper initial value for phase): long initialStartValue = nextPhaseIsEven ? 0 : Long.MIN_VALUE; (nextPhaseIsEven ? evenEndEpochUpdater : oddEndEpochUpdater).lazySet(this, initialStartValue); // Next, reset start value, indicating new phase, and retain value at flip: long startValueAtFlip = startEpochUpdater.getAndSet(this, initialStartValue); // Now, spin until previous phase end value catches up with start value at flip: while((nextPhaseIsEven ? oddEndEpoch : evenEndEpoch) != startValueAtFlip) { if (yieldTimeNsec == 0) { Thread.yield(); } else { try { TimeUnit.NANOSECONDS.sleep(yieldTimeNsec); } catch (InterruptedException ex) { // nothing to do here, we just woke up earlier that expected. } } } } /** * Flip a phase in the {@link WriterReaderPhaser} instance, {@code flipPhase()} * can only be called while holding the {@link WriterReaderPhaser#readerLock() readerLock}. * {@code flipPhase()} will return only after all writer critical sections (protected by * {@link WriterReaderPhaser#writerCriticalSectionEnter() writerCriticalSectionEnter} and * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionEnter}) that may have been * in flight when the {@code flipPhase()} call were made had completed. * <p> * No actual writer critical section activity is required for {@code flipPhase()} to * succeed. * <p> * However, {@code flipPhase()} is lock-free with respect to calls to * {@link WriterReaderPhaser#writerCriticalSectionEnter()} and * {@link WriterReaderPhaser#writerCriticalSectionExit writerCriticalSectionExit()}. It may spin-wait * or for active writer critical section code to complete. */ public void flipPhase() { flipPhase(0); } }
13,219
47.602941
120
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/DoubleLogarithmicIterator.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.Iterator; /** * Used for iterating through {@link DoubleHistogram} values values in logarithmically increasing levels. The * iteration is performed in steps that start at <i>valueUnitsInFirstBucket</i> and increase exponentially according to * <i>logBase</i>, terminating when all recorded histogram values are exhausted. Note that each iteration "bucket" * includes values up to and including the next bucket boundary value. */ public class DoubleLogarithmicIterator implements Iterator<DoubleHistogramIterationValue> { private final LogarithmicIterator integerLogarithmicIterator; private final DoubleHistogramIterationValue iterationValue; DoubleHistogram histogram; /** * Reset iterator for re-use in a fresh iteration over the same histogram data set. * @param valueUnitsInFirstBucket the size (in value units) of the first value bucket step * @param logBase the multiplier by which the bucket size is expanded in each iteration step. */ public void reset(final double valueUnitsInFirstBucket, final double logBase) { integerLogarithmicIterator.reset( (long) (valueUnitsInFirstBucket * histogram.getDoubleToIntegerValueConversionRatio()), logBase ); } /** * @param histogram The histogram this iterator will operate on * @param valueUnitsInFirstBucket the size (in value units) of the first value bucket step * @param logBase the multiplier by which the bucket size is expanded in each iteration step. */ public DoubleLogarithmicIterator(final DoubleHistogram histogram, final double valueUnitsInFirstBucket, final double logBase) { this.histogram = histogram; integerLogarithmicIterator = new LogarithmicIterator( histogram.integerValuesHistogram, (long) (valueUnitsInFirstBucket * histogram.getDoubleToIntegerValueConversionRatio()), logBase ); iterationValue = new DoubleHistogramIterationValue(integerLogarithmicIterator.currentIterationValue); } @Override public boolean hasNext() { return integerLogarithmicIterator.hasNext(); } @Override public DoubleHistogramIterationValue next() { integerLogarithmicIterator.next(); return iterationValue; } @Override public void remove() { integerLogarithmicIterator.remove(); } }
2,670
38.865672
119
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/HistogramLogScanner.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.io.*; import java.nio.ByteBuffer; import java.util.Locale; import java.util.Scanner; import java.util.zip.DataFormatException; public class HistogramLogScanner implements Closeable { // can't use lambdas, and anyway we need to let the handler take the exception public interface EncodableHistogramSupplier { EncodableHistogram read() throws DataFormatException; } /** * Handles log events, return true to stop processing. */ public interface EventHandler { boolean onComment(String comment); boolean onBaseTime(double secondsSinceEpoch); boolean onStartTime(double secondsSinceEpoch); /** * A lazy reader is provided to allow fast skipping of bulk of work where tag or timestamp are to be used as * a basis for filtering the {@link EncodableHistogram} anyway. The reader is to be called only once. * * @param tag histogram tag or null if none exist * @param timestamp logged timestamp * @param length logged interval length * @param lazyReader to be called if the histogram needs to be deserialized, given the tag/timestamp etc. * @return true to stop processing, false to continue. */ boolean onHistogram(String tag, double timestamp, double length, EncodableHistogramSupplier lazyReader); boolean onException(Throwable t); } private static class LazyHistogramReader implements EncodableHistogramSupplier { private final Scanner scanner; private boolean gotIt = true; private LazyHistogramReader(Scanner scanner) { this.scanner = scanner; } private void allowGet() { gotIt = false; } @Override public EncodableHistogram read() throws DataFormatException { // prevent double calls to this method if (gotIt) { throw new IllegalStateException(); } gotIt = true; final String compressedPayloadString = scanner.next(); final ByteBuffer buffer = ByteBuffer.wrap(Base64Helper.parseBase64Binary(compressedPayloadString)); EncodableHistogram histogram = EncodableHistogram.decodeFromCompressedByteBuffer(buffer, 0); return histogram; } } private final LazyHistogramReader lazyReader; protected final Scanner scanner; /** * Constructs a new HistogramLogReader that produces intervals read from the specified file name. * @param inputFileName The name of the file to read from * @throws java.io.FileNotFoundException when unable to find inputFileName */ public HistogramLogScanner(final String inputFileName) throws FileNotFoundException { this(new Scanner(new File(inputFileName))); } /** * Constructs a new HistogramLogReader that produces intervals read from the specified InputStream. Note that * log readers constructed through this constructor do not assume ownership of stream and will not close it on * {@link #close()}. * * @param inputStream The InputStream to read from */ public HistogramLogScanner(final InputStream inputStream) { this(new Scanner(inputStream)); } /** * Constructs a new HistogramLogReader that produces intervals read from the specified file. * @param inputFile The File to read from * @throws java.io.FileNotFoundException when unable to find inputFile */ public HistogramLogScanner(final File inputFile) throws FileNotFoundException { this(new Scanner(inputFile)); } private HistogramLogScanner(Scanner scanner) { this.scanner = scanner; this.lazyReader = new LazyHistogramReader(scanner); initScanner(); } private void initScanner() { scanner.useLocale(Locale.US); scanner.useDelimiter("[ ,\\r\\n]"); } /** * Close underlying scanner. */ @Override public void close() { scanner.close(); } public void process(EventHandler handler) { while (scanner.hasNextLine()) { try { if (scanner.hasNext("\\#.*")) { // comment line. // Look for explicit start time or base time notes in comments: if (scanner.hasNext("#\\[StartTime:")) { scanner.next("#\\[StartTime:"); if (scanner.hasNextDouble()) { double startTimeSec = scanner.nextDouble(); // start time represented as seconds since epoch if (handler.onStartTime(startTimeSec)) { return; } } } else if (scanner.hasNext("#\\[BaseTime:")) { scanner.next("#\\[BaseTime:"); if (scanner.hasNextDouble()) { double baseTimeSec = scanner.nextDouble(); // base time represented as seconds since epoch if (handler.onBaseTime(baseTimeSec)) { return; } } } else if (handler.onComment(scanner.next("\\#.*"))) { return; } continue; } if (scanner.hasNext("\"StartTimestamp\".*")) { // Legend line continue; } String tagString = null; if (scanner.hasNext("Tag\\=.*")) { tagString = scanner.next("Tag\\=.*").substring(4); } // Decode: startTimestamp, intervalLength, maxTime, histogramPayload final double logTimeStampInSec = scanner.nextDouble(); // Timestamp is expected to be in seconds final double intervalLengthSec = scanner.nextDouble(); // Timestamp length is expect to be in seconds scanner.nextDouble(); // Skip maxTime field, as max time can be deduced from the histogram. lazyReader.allowGet(); if (handler.onHistogram(tagString, logTimeStampInSec, intervalLengthSec, lazyReader)) { return; } } catch (Throwable ex) { if (handler.onException(ex)) { return; } } finally { scanner.nextLine(); // Move to next line. } } } /** * Indicates whether or not additional intervals may exist in the log * * @return true if additional intervals may exist in the log */ public boolean hasNextLine() { return scanner.hasNextLine(); } }
7,180
35.085427
120
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/PackedConcurrentDoubleHistogram.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.nio.ByteBuffer; import java.util.zip.DataFormatException; /** * <h3>A floating point values High Dynamic Range (HDR) Histogram that uses a packed internal representation and * supports safe concurrent recording operations.</h3> * <p> * A {@link PackedConcurrentDoubleHistogram} is a variant of {@link DoubleHistogram} that guarantees * lossless recording of values into the histogram even when the histogram is updated by multiple threads, and * supports auto-resize and auto-ranging operations that may occur concurrently as a result of recording operations. * <p> * {@link PackedConcurrentDoubleHistogram} tracks value counts in a packed internal representation optimized * for typical histogram recoded values are sparse in the value range and tend to be incremented in small unit counts. * This packed representation tends to require significantly smaller amounts of storage when compared to unpacked * representations, but can incur additional recording cost due to resizing and repacking operations that may * occur as previously unrecorded values are encountered. * <p> * It is important to note that concurrent recording, auto-sizing, and value shifting are the only thread-safe behaviors * provided by {@link PackedConcurrentDoubleHistogram}, and that it is not otherwise synchronized. Specifically, {@link * PackedConcurrentDoubleHistogram} provides no implicit synchronization that would prevent the contents of the histogram * from changing during queries, iterations, copies, or addition operations on the histogram. Callers wishing to make * potentially concurrent, multi-threaded updates that would safely work in the presence of queries, copies, or * additions of histogram objects should either take care to externally synchronize and/or order their access, * use the {@link DoubleRecorder} or {@link SingleWriterDoubleRecorder} which are intended for this purpose. * <p> * {@link PackedConcurrentDoubleHistogram} supports the recording and analyzing sampled data value counts across a * configurable dynamic range of floating point (double) values, with configurable value precision within the range. * Dynamic range is expressed as a ratio between the highest and lowest non-zero values trackable within the histogram * at any given time. Value precision is expressed as the number of significant [decimal] digits in the value recording, * and provides control over value quantization behavior across the value range and the subsequent value resolution at * any given level. * <p> * Auto-ranging: Unlike integer value based histograms, the specific value range tracked by a {@link * PackedConcurrentDoubleHistogram} is not specified upfront. Only the dynamic range of values that the histogram can cover is * (optionally) specified. E.g. When a {@link PackedConcurrentDoubleHistogram} is created to track a dynamic range of * 3600000000000 (enough to track values from a nanosecond to an hour), values could be recorded into into it in any * consistent unit of time as long as the ratio between the highest and lowest non-zero values stays within the * specified dynamic range, so recording in units of nanoseconds (1.0 thru 3600000000000.0), milliseconds (0.000001 * thru 3600000.0) seconds (0.000000001 thru 3600.0), hours (1/3.6E12 thru 1.0) will all work just as well. * <p> * Auto-resizing: When constructed with no specified dynamic range (or when auto-resize is turned on with {@link * PackedConcurrentDoubleHistogram#setAutoResize}) a {@link PackedConcurrentDoubleHistogram} will auto-resize its dynamic range to * include recorded values as they are encountered. Note that recording calls that cause auto-resizing may take * longer to execute, as resizing incurs allocation and copying of internal data structures. * <p> * Attempts to record non-zero values that range outside of the specified dynamic range (or exceed the limits of * of dynamic range when auto-resizing) may results in {@link ArrayIndexOutOfBoundsException} exceptions, either * due to overflow or underflow conditions. These exceptions will only be thrown if recording the value would have * resulted in discarding or losing the required value precision of values already recorded in the histogram. * <p> * See package description for {@link org.HdrHistogram} for details. */ public class PackedConcurrentDoubleHistogram extends ConcurrentDoubleHistogram { /** * Construct a new auto-resizing DoubleHistogram using a precision stated as a number of significant decimal * digits. * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal * digits to which the histogram will maintain value resolution and * separation. Must be a non-negative integer between 0 and 5. */ public PackedConcurrentDoubleHistogram(final int numberOfSignificantValueDigits) { this(2, numberOfSignificantValueDigits); setAutoResize(true); } /** * Construct a new DoubleHistogram with the specified dynamic range (provided in {@code highestToLowestValueRatio}) * and using a precision stated as a number of significant decimal digits. * * @param highestToLowestValueRatio specifies the dynamic range to use * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal * digits to which the histogram will maintain value resolution and * separation. Must be a non-negative integer between 0 and 5. */ public PackedConcurrentDoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) { this(highestToLowestValueRatio, numberOfSignificantValueDigits, PackedConcurrentHistogram.class); } /** * Construct a {@link PackedConcurrentDoubleHistogram} with the same range settings as a given source, * duplicating the source's start/end timestamps (but NOT it's contents) * @param source The source histogram to duplicate */ public PackedConcurrentDoubleHistogram(final DoubleHistogram source) { super(source); } PackedConcurrentDoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits, final Class<? extends AbstractHistogram> internalCountsHistogramClass) { super(highestToLowestValueRatio, numberOfSignificantValueDigits, internalCountsHistogramClass); } PackedConcurrentDoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits, final Class<? extends AbstractHistogram> internalCountsHistogramClass, AbstractHistogram internalCountsHistogram) { super( highestToLowestValueRatio, numberOfSignificantValueDigits, internalCountsHistogramClass, internalCountsHistogram ); } /** * Construct a new ConcurrentDoubleHistogram by decoding it from a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high * @return The newly constructed ConcurrentDoubleHistogram */ public static PackedConcurrentDoubleHistogram decodeFromByteBuffer( final ByteBuffer buffer, final long minBarForHighestToLowestValueRatio) { try { int cookie = buffer.getInt(); if (!isNonCompressedDoubleHistogramCookie(cookie)) { throw new IllegalArgumentException("The buffer does not contain a DoubleHistogram"); } PackedConcurrentDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, PackedConcurrentDoubleHistogram.class, PackedConcurrentHistogram.class, minBarForHighestToLowestValueRatio); return histogram; } catch (DataFormatException ex) { throw new RuntimeException(ex); } } /** * Construct a new ConcurrentDoubleHistogram by decoding it from a compressed form in a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high * @return The newly constructed ConcurrentDoubleHistogram * @throws DataFormatException on error parsing/decompressing the buffer */ public static PackedConcurrentDoubleHistogram decodeFromCompressedByteBuffer( final ByteBuffer buffer, final long minBarForHighestToLowestValueRatio) throws DataFormatException { int cookie = buffer.getInt(); if (!isCompressedDoubleHistogramCookie(cookie)) { throw new IllegalArgumentException("The buffer does not contain a compressed DoubleHistogram"); } PackedConcurrentDoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, PackedConcurrentDoubleHistogram.class, PackedConcurrentHistogram.class, minBarForHighestToLowestValueRatio); return histogram; } }
9,626
58.795031
130
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/IntCountsHistogram.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.io.IOException; import java.io.ObjectInputStream; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.zip.DataFormatException; /** * <h3>A High Dynamic Range (HDR) Histogram using an <b><code>int</code></b> count type </h3> * <p> * See package description for {@link org.HdrHistogram} for details. */ public class IntCountsHistogram extends AbstractHistogram { long totalCount; int[] counts; int normalizingIndexOffset; @Override long getCountAtIndex(final int index) { return counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)]; } @Override long getCountAtNormalizedIndex(final int index) { return counts[index]; } @Override void incrementCountAtIndex(final int index) { int normalizedIndex = normalizeIndex(index, normalizingIndexOffset, countsArrayLength); int currentCount = counts[normalizedIndex]; int newCount = currentCount + 1; if (newCount < 0) { throw new IllegalStateException("would overflow integer count"); } counts[normalizedIndex] = newCount; } @Override void addToCountAtIndex(final int index, final long value) { int normalizedIndex = normalizeIndex(index, normalizingIndexOffset, countsArrayLength); long currentCount = counts[normalizedIndex]; long newCount = (currentCount + value); if ((newCount < Integer.MIN_VALUE) || (newCount > Integer.MAX_VALUE)) { throw new IllegalStateException("would overflow integer count"); } counts[normalizedIndex] = (int) newCount; } @Override void setCountAtIndex(int index, long value) { setCountAtNormalizedIndex(normalizeIndex(index, normalizingIndexOffset, countsArrayLength), value); } @Override void setCountAtNormalizedIndex(int index, long value) { if ((value < 0) || (value > Integer.MAX_VALUE)) { throw new IllegalStateException("would overflow integer count"); } counts[index] = (int) value; } @Override int getNormalizingIndexOffset() { return normalizingIndexOffset; } @Override void setNormalizingIndexOffset(int normalizingIndexOffset) { this.normalizingIndexOffset = normalizingIndexOffset; } @Override void setIntegerToDoubleValueConversionRatio(double integerToDoubleValueConversionRatio) { nonConcurrentSetIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio); } @Override void shiftNormalizingIndexByOffset(int offsetToAdd, boolean lowestHalfBucketPopulated, double newIntegerToDoubleValueConversionRatio) { nonConcurrentNormalizingIndexShift(offsetToAdd, lowestHalfBucketPopulated); } @Override void clearCounts() { java.util.Arrays.fill(counts, 0); totalCount = 0; } @Override public IntCountsHistogram copy() { IntCountsHistogram copy = new IntCountsHistogram(this); copy.add(this); return copy; } @Override public IntCountsHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { IntCountsHistogram toHistogram = new IntCountsHistogram(this); toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); return toHistogram; } @Override public long getTotalCount() { return totalCount; } @Override void setTotalCount(final long totalCount) { this.totalCount = totalCount; } @Override void incrementTotalCount() { totalCount++; } @Override void addToTotalCount(long value) { totalCount += value; } @Override int _getEstimatedFootprintInBytes() { return (512 + (4 * counts.length)); } @Override void resize(long newHighestTrackableValue) { int oldNormalizedZeroIndex = normalizeIndex(0, normalizingIndexOffset, countsArrayLength); establishSize(newHighestTrackableValue); int countsDelta = countsArrayLength - counts.length; counts = Arrays.copyOf(counts, countsArrayLength); if (oldNormalizedZeroIndex != 0) { // We need to shift the stuff from the zero index and up to the end of the array: int newNormalizedZeroIndex = oldNormalizedZeroIndex + countsDelta; int lengthToCopy = (countsArrayLength - countsDelta) - oldNormalizedZeroIndex; System.arraycopy(counts, oldNormalizedZeroIndex, counts, newNormalizedZeroIndex, lengthToCopy); Arrays.fill(counts, oldNormalizedZeroIndex, newNormalizedZeroIndex, 0); } } /** * Construct an auto-resizing IntCountsHistogram with a lowest discernible value of 1 and an auto-adjusting * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public IntCountsHistogram(final int numberOfSignificantValueDigits) { this(1, 2, numberOfSignificantValueDigits); setAutoResize(true); } /** * Construct a IntCountsHistogram given the Highest value to be tracked and a number of significant decimal digits. The * histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. * * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive * integer that is {@literal >=} 2. * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public IntCountsHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { this(1, highestTrackableValue, numberOfSignificantValueDigits); } /** * Construct a IntCountsHistogram given the Lowest and Highest values to be tracked and a number of significant * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the * proper value for lowestDiscernibleValue would be 1000. * * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. * Must be a positive integer that is {@literal >=} 1. May be internally rounded * down to nearest power of 2. * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive * integer that is {@literal >=} (2 * lowestDiscernibleValue). * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public IntCountsHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, final int numberOfSignificantValueDigits) { super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits); counts = new int[countsArrayLength]; wordSizeInBytes = 4; } /** * Construct a histogram with the same range settings as a given source histogram, * duplicating the source's start/end timestamps (but NOT it's contents) * @param source The source histogram to duplicate */ public IntCountsHistogram(final AbstractHistogram source) { super(source); counts = new int[countsArrayLength]; wordSizeInBytes = 4; } /** * Construct a new histogram by decoding it from a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram */ public static IntCountsHistogram decodeFromByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) { return decodeFromByteBuffer(buffer, IntCountsHistogram.class, minBarForHighestTrackableValue); } /** * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram * @throws DataFormatException on error parsing/decompressing the buffer */ public static IntCountsHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) throws DataFormatException { return decodeFromCompressedByteBuffer(buffer, IntCountsHistogram.class, minBarForHighestTrackableValue); } /** * Construct a new IntCountsHistogram by decoding it from a String containing a base64 encoded * compressed histogram representation. * * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram * @return A IntCountsHistogram decoded from the string * @throws DataFormatException on error parsing/decompressing the input */ public static IntCountsHistogram fromString(final String base64CompressedHistogramString) throws DataFormatException { return decodeFromCompressedByteBuffer( ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), 0); } private void readObject(final ObjectInputStream o) throws IOException, ClassNotFoundException { o.defaultReadObject(); } }
10,945
40.462121
123
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/ShortCountsHistogram.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.io.IOException; import java.io.ObjectInputStream; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.zip.DataFormatException; /** * <h3>A High Dynamic Range (HDR) Histogram using a <b><code>short</code></b> count type </h3> * <p> * See package description for {@link org.HdrHistogram} for details. */ public class ShortCountsHistogram extends AbstractHistogram { long totalCount; short[] counts; int normalizingIndexOffset; @Override long getCountAtIndex(final int index) { return counts[normalizeIndex(index, normalizingIndexOffset, countsArrayLength)]; } @Override long getCountAtNormalizedIndex(final int index) { return counts[index]; } @Override void incrementCountAtIndex(final int index) { int normalizedIndex = normalizeIndex(index, normalizingIndexOffset, countsArrayLength); short currentCount = counts[normalizedIndex]; short newCount = (short) (currentCount + 1); if (newCount < 0) { throw new IllegalStateException("would overflow short integer count"); } counts[normalizedIndex] = newCount; } @Override void addToCountAtIndex(final int index, final long value) { int normalizedIndex = normalizeIndex(index, normalizingIndexOffset, countsArrayLength); long currentCount = counts[normalizedIndex]; long newCount = (currentCount + value); if ((newCount < Short.MIN_VALUE) || (newCount > Short.MAX_VALUE)) { throw new IllegalStateException("would overflow short integer count"); } counts[normalizedIndex] = (short) newCount; } @Override void setCountAtIndex(int index, long value) { setCountAtNormalizedIndex(normalizeIndex(index, normalizingIndexOffset, countsArrayLength), value); } @Override void setCountAtNormalizedIndex(int index, long value) { if ((value < 0) || (value > Short.MAX_VALUE)) { throw new IllegalStateException("would overflow short integer count"); } counts[index] = (short) value; } @Override int getNormalizingIndexOffset() { return normalizingIndexOffset; } @Override void setNormalizingIndexOffset(int normalizingIndexOffset) { this.normalizingIndexOffset = normalizingIndexOffset; } @Override void setIntegerToDoubleValueConversionRatio(double integerToDoubleValueConversionRatio) { nonConcurrentSetIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio); } @Override void shiftNormalizingIndexByOffset(int offsetToAdd, boolean lowestHalfBucketPopulated, double newIntegerToDoubleValueConversionRatio) { nonConcurrentNormalizingIndexShift(offsetToAdd, lowestHalfBucketPopulated); } @Override void clearCounts() { java.util.Arrays.fill(counts, (short) 0); totalCount = 0; } @Override public ShortCountsHistogram copy() { ShortCountsHistogram copy = new ShortCountsHistogram(this); copy.add(this); return copy; } @Override public ShortCountsHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { ShortCountsHistogram toHistogram = new ShortCountsHistogram(this); toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); return toHistogram; } @Override public long getTotalCount() { return totalCount; } @Override void setTotalCount(final long totalCount) { this.totalCount = totalCount; } @Override void incrementTotalCount() { totalCount++; } @Override void addToTotalCount(long value) { totalCount += value; } @Override int _getEstimatedFootprintInBytes() { return (512 + (2 * counts.length)); } @Override void resize(long newHighestTrackableValue) { int oldNormalizedZeroIndex = normalizeIndex(0, normalizingIndexOffset, countsArrayLength); establishSize(newHighestTrackableValue); int countsDelta = countsArrayLength - counts.length; counts = Arrays.copyOf(counts, countsArrayLength); if (oldNormalizedZeroIndex != 0) { // We need to shift the stuff from the zero index and up to the end of the array: int newNormalizedZeroIndex = oldNormalizedZeroIndex + countsDelta; int lengthToCopy = (countsArrayLength - countsDelta) - oldNormalizedZeroIndex; System.arraycopy(counts, oldNormalizedZeroIndex, counts, newNormalizedZeroIndex, lengthToCopy); Arrays.fill(counts, oldNormalizedZeroIndex, newNormalizedZeroIndex, (short) 0); } } /** * Construct an auto-resizing ShortCountsHistogram with a lowest discernible value of 1 and an auto-adjusting * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public ShortCountsHistogram(final int numberOfSignificantValueDigits) { this(1, 2, numberOfSignificantValueDigits); setAutoResize(true); } /** * Construct a ShortCountsHistogram given the Highest value to be tracked and a number of significant decimal * digits. The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. * * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive * integer that is {@literal >=} 2. * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public ShortCountsHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { this(1, highestTrackableValue, numberOfSignificantValueDigits); } /** * Construct a ShortCountsHistogram given the Lowest and Highest values to be tracked and a number of significant * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the * proper value for lowestDiscernibleValue would be 1000. * * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. * Must be a positive integer that is {@literal >=} 1. May be internally rounded * down to nearest power of 2. * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive * integer that is {@literal >=} (2 * lowestDiscernibleValue). * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public ShortCountsHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, final int numberOfSignificantValueDigits) { super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits); counts = new short[countsArrayLength]; wordSizeInBytes = 2; } /** * Construct a histogram with the same range settings as a given source histogram, * duplicating the source's start/end timestamps (but NOT it's contents) * @param source The source histogram to duplicate */ public ShortCountsHistogram(final AbstractHistogram source) { super(source); counts = new short[countsArrayLength]; wordSizeInBytes = 2; } /** * Construct a new histogram by decoding it from a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram */ public static ShortCountsHistogram decodeFromByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) { return decodeFromByteBuffer(buffer, ShortCountsHistogram.class, minBarForHighestTrackableValue); } /** * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram * @throws DataFormatException on error parsing/decompressing the buffer */ public static ShortCountsHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) throws DataFormatException { return decodeFromCompressedByteBuffer(buffer, ShortCountsHistogram.class, minBarForHighestTrackableValue); } /** * Construct a new ShortCountsHistogram by decoding it from a String containing a base64 encoded * compressed histogram representation. * * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram * @return A ShortCountsHistogram decoded from the string * @throws DataFormatException on error parsing/decompressing the input */ public static ShortCountsHistogram fromString(final String base64CompressedHistogramString) throws DataFormatException { return decodeFromCompressedByteBuffer( ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), 0); } private void readObject(final ObjectInputStream o) throws IOException, ClassNotFoundException { o.defaultReadObject(); } }
11,039
41.298851
117
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/SynchronizedDoubleHistogram.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.io.PrintStream; import java.nio.ByteBuffer; /** * <h3>A floating point values High Dynamic Range (HDR) Histogram that is synchronized as a whole</h3> * <p> * A {@link SynchronizedDoubleHistogram} is a variant of {@link org.HdrHistogram.DoubleHistogram} that is * synchronized as a whole, such that queries, copying, and addition operations are atomic with relation to * modification on the {@link SynchronizedDoubleHistogram}, nd such that external accessors (e.g. iterations on the * histogram data) that synchronize on the {@link SynchronizedDoubleHistogram} instance can safely assume that no * modifications to the histogram data occur within their synchronized block. * <p> * It is important to note that synchronization can result in blocking recoding calls. If non-blocking recoding * operations are required, consider using {@link org.HdrHistogram.ConcurrentDoubleHistogram}, or (recommended) * {@link DoubleRecorder} which were intended for concurrent operations. * <p> * {@link SynchronizedDoubleHistogram} supports the recording and analyzing sampled data value counts across a * configurable dynamic range of floating point (double) values, with configurable value precision within the range. * Dynamic range is expressed as a ratio between the highest and lowest non-zero values trackable within the histogram * at any given time. Value precision is expressed as the number of significant [decimal] digits in the value recording, * and provides control over value quantization behavior across the value range and the subsequent value resolution at * any given level. * <p> * Auto-ranging: Unlike integer value based histograms, the specific value range tracked by a {@link * SynchronizedDoubleHistogram} is not specified upfront. Only the dynamic range of values that the histogram can * cover is (optionally) specified. E.g. When a {@link ConcurrentDoubleHistogram} is created to track a dynamic range of * 3600000000000 (enough to track values from a nanosecond to an hour), values could be recorded into into it in any * consistent unit of time as long as the ratio between the highest and lowest non-zero values stays within the * specified dynamic range, so recording in units of nanoseconds (1.0 thru 3600000000000.0), milliseconds (0.000001 * thru 3600000.0) seconds (0.000000001 thru 3600.0), hours (1/3.6E12 thru 1.0) will all work just as well. * <p> * Auto-resizing: When constructed with no specified dynamic range (or when auto-resize is turned on with {@link * SynchronizedDoubleHistogram#setAutoResize}) a {@link SynchronizedDoubleHistogram} will auto-resize its dynamic * range to include recorded values as they are encountered. Note that recording calls that cause auto-resizing may * take longer to execute, as resizing incurs allocation and copying of internal data structures. * <p> * Attempts to record non-zero values that range outside of the specified dynamic range (or exceed the limits of * of dynamic range when auto-resizing) may results in {@link ArrayIndexOutOfBoundsException} exceptions, either * due to overflow or underflow conditions. These exceptions will only be thrown if recording the value would have * resulted in discarding or losing the required value precision of values already recorded in the histogram. * <p> * See package description for {@link org.HdrHistogram} for details. */ public class SynchronizedDoubleHistogram extends DoubleHistogram { /** * Construct a new auto-resizing DoubleHistogram using a precision stated as a number of significant * decimal digits. * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public SynchronizedDoubleHistogram(final int numberOfSignificantValueDigits) { this(2, numberOfSignificantValueDigits); setAutoResize(true); } /** * Construct a new DoubleHistogram with the specified dynamic range (provided in * {@code highestToLowestValueRatio}) and using a precision stated as a number of significant * decimal digits. * * @param highestToLowestValueRatio specifies the dynamic range to use * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public SynchronizedDoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) { super(highestToLowestValueRatio, numberOfSignificantValueDigits, SynchronizedHistogram.class); } /** * Construct a {@link SynchronizedDoubleHistogram} with the same range settings as a given source, * duplicating the source's start/end timestamps (but NOT it's contents) * @param source The source histogram to duplicate */ public SynchronizedDoubleHistogram(final ConcurrentDoubleHistogram source) { super(source); } @Override public synchronized boolean isAutoResize() { return super.isAutoResize(); } @Override public synchronized void setAutoResize(boolean autoResize) { super.setAutoResize(autoResize); } @Override public synchronized void recordValue(final double value) throws ArrayIndexOutOfBoundsException { super.recordValue(value); } @Override public synchronized void recordValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { super.recordValueWithCount(value, count); } @Override public synchronized void recordValueWithExpectedInterval(final double value, final double expectedIntervalBetweenValueSamples) throws ArrayIndexOutOfBoundsException { super.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); } @Override public synchronized void reset() { super.reset(); } @Override public synchronized DoubleHistogram copy() { final DoubleHistogram targetHistogram = new DoubleHistogram(this); integerValuesHistogram.copyInto(targetHistogram.integerValuesHistogram); return targetHistogram; } @Override public synchronized DoubleHistogram copyCorrectedForCoordinatedOmission(final double expectedIntervalBetweenValueSamples) { final DoubleHistogram targetHistogram = new DoubleHistogram(this); targetHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); return targetHistogram; } @Override public synchronized void copyInto(final DoubleHistogram targetHistogram) { // Synchronize copyInto(). Avoid deadlocks by synchronizing in order of construction identity count. if (integerValuesHistogram.identity < targetHistogram.integerValuesHistogram.identity) { synchronized (this) { synchronized (targetHistogram) { super.copyInto(targetHistogram); } } } else { synchronized (targetHistogram) { synchronized (this) { super.copyInto(targetHistogram); } } } } @Override public synchronized void copyIntoCorrectedForCoordinatedOmission(final DoubleHistogram targetHistogram, final double expectedIntervalBetweenValueSamples) { // Synchronize copyIntoCorrectedForCoordinatedOmission(). Avoid deadlocks by synchronizing in order // of construction identity count. if (integerValuesHistogram.identity < targetHistogram.integerValuesHistogram.identity) { synchronized (this) { synchronized (targetHistogram) { super.copyIntoCorrectedForCoordinatedOmission(targetHistogram, expectedIntervalBetweenValueSamples); } } } else { synchronized (targetHistogram) { synchronized (this) { super.copyIntoCorrectedForCoordinatedOmission(targetHistogram, expectedIntervalBetweenValueSamples); } } } } @Override public synchronized void add(final DoubleHistogram fromHistogram) throws ArrayIndexOutOfBoundsException { // Synchronize add(). Avoid deadlocks by synchronizing in order of construction identity count. if (integerValuesHistogram.identity < fromHistogram.integerValuesHistogram.identity) { synchronized (this) { synchronized (fromHistogram) { super.add(fromHistogram); } } } else { synchronized (fromHistogram) { synchronized (this) { super.add(fromHistogram); } } } } @Override public synchronized void subtract(final DoubleHistogram fromHistogram) { // Synchronize subtract(). Avoid deadlocks by synchronizing in order of construction identity count. if (integerValuesHistogram.identity < fromHistogram.integerValuesHistogram.identity) { synchronized (this) { synchronized (fromHistogram) { super.subtract(fromHistogram); } } } else { synchronized (fromHistogram) { synchronized (this) { super.subtract(fromHistogram); } } } } @Override public synchronized void addWhileCorrectingForCoordinatedOmission(final DoubleHistogram fromHistogram, final double expectedIntervalBetweenValueSamples) { // Synchronize addWhileCorrectingForCoordinatedOmission(). Avoid deadlocks by synchronizing in // order of construction identity count. if (integerValuesHistogram.identity < fromHistogram.integerValuesHistogram.identity) { synchronized (this) { synchronized (fromHistogram) { super.addWhileCorrectingForCoordinatedOmission(fromHistogram, expectedIntervalBetweenValueSamples); } } } else { synchronized (fromHistogram) { synchronized (this) { super.addWhileCorrectingForCoordinatedOmission(fromHistogram, expectedIntervalBetweenValueSamples); } } } } @Override public synchronized boolean equals(final Object other) { if ( this == other ) { return true; } if (other instanceof DoubleHistogram) { DoubleHistogram otherHistogram = (DoubleHistogram) other; if (integerValuesHistogram.identity < otherHistogram.integerValuesHistogram.identity) { synchronized (this) { synchronized (otherHistogram) { return super.equals(otherHistogram); } } } else { synchronized (otherHistogram) { synchronized (this) { return super.equals(otherHistogram); } } } } else { synchronized (this) { return super.equals(other); } } } @Override public synchronized int hashCode() { return super.hashCode(); } @Override public synchronized long getTotalCount() { return super.getTotalCount(); } @Override public synchronized double getIntegerToDoubleValueConversionRatio() { return super.getIntegerToDoubleValueConversionRatio(); } @Override public synchronized int getNumberOfSignificantValueDigits() { return super.getNumberOfSignificantValueDigits(); } @Override public synchronized long getHighestToLowestValueRatio() { return super.getHighestToLowestValueRatio(); } @Override public synchronized double sizeOfEquivalentValueRange(final double value) { return super.sizeOfEquivalentValueRange(value); } @Override public synchronized double lowestEquivalentValue(final double value) { return super.lowestEquivalentValue(value); } @Override public synchronized double highestEquivalentValue(final double value) { return super.highestEquivalentValue(value); } @Override public synchronized double medianEquivalentValue(final double value) { return super.medianEquivalentValue(value); } @Override public synchronized double nextNonEquivalentValue(final double value) { return super.nextNonEquivalentValue(value); } @Override public synchronized boolean valuesAreEquivalent(final double value1, final double value2) { return super.valuesAreEquivalent(value1, value2); } @Override public synchronized int getEstimatedFootprintInBytes() { return super.getEstimatedFootprintInBytes(); } @Override public synchronized long getStartTimeStamp() { return super.getStartTimeStamp(); } @Override public synchronized void setStartTimeStamp(final long timeStampMsec) { super.setStartTimeStamp(timeStampMsec); } @Override public synchronized long getEndTimeStamp() { return super.getEndTimeStamp(); } @Override public synchronized void setEndTimeStamp(final long timeStampMsec) { super.setEndTimeStamp(timeStampMsec); } @Override public synchronized double getMinValue() { return super.getMinValue(); } @Override public synchronized double getMaxValue() { return super.getMaxValue(); } @Override public synchronized double getMinNonZeroValue() { return super.getMinNonZeroValue(); } @Override public synchronized double getMaxValueAsDouble() { return super.getMaxValueAsDouble(); } @Override public synchronized double getMean() { return super.getMean(); } @Override public synchronized double getStdDeviation() { return super.getStdDeviation(); } @Override public synchronized double getValueAtPercentile(final double percentile) { return super.getValueAtPercentile(percentile); } @Override public synchronized double getPercentileAtOrBelowValue(final double value) { return super.getPercentileAtOrBelowValue(value); } @Override public synchronized double getCountBetweenValues(final double lowValue, final double highValue) throws ArrayIndexOutOfBoundsException { return super.getCountBetweenValues(lowValue, highValue); } @Override public synchronized long getCountAtValue(final double value) throws ArrayIndexOutOfBoundsException { return super.getCountAtValue(value); } @Override public synchronized Percentiles percentiles(final int percentileTicksPerHalfDistance) { return super.percentiles(percentileTicksPerHalfDistance); } @Override public synchronized LinearBucketValues linearBucketValues(final double valueUnitsPerBucket) { return super.linearBucketValues(valueUnitsPerBucket); } @Override public synchronized LogarithmicBucketValues logarithmicBucketValues(final double valueUnitsInFirstBucket, final double logBase) { return super.logarithmicBucketValues(valueUnitsInFirstBucket, logBase); } @Override public synchronized RecordedValues recordedValues() { return super.recordedValues(); } @Override public synchronized AllValues allValues() { return super.allValues(); } @Override public synchronized void outputPercentileDistribution(final PrintStream printStream, final Double outputValueUnitScalingRatio) { super.outputPercentileDistribution(printStream, outputValueUnitScalingRatio); } @Override public synchronized void outputPercentileDistribution(final PrintStream printStream, final int percentileTicksPerHalfDistance, final Double outputValueUnitScalingRatio) { super.outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio); } @Override public synchronized void outputPercentileDistribution(final PrintStream printStream, final int percentileTicksPerHalfDistance, final Double outputValueUnitScalingRatio, final boolean useCsvFormat) { super.outputPercentileDistribution( printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio, useCsvFormat); } @Override public synchronized int getNeededByteBufferCapacity() { return super.getNeededByteBufferCapacity(); } @Override public synchronized int encodeIntoByteBuffer(final ByteBuffer buffer) { return super.encodeIntoByteBuffer(buffer); } @Override public synchronized int encodeIntoCompressedByteBuffer( final ByteBuffer targetBuffer, final int compressionLevel) { return super.encodeIntoCompressedByteBuffer(targetBuffer, compressionLevel); } @Override public synchronized int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer) { return super.encodeIntoCompressedByteBuffer(targetBuffer); } }
18,453
38.686022
130
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/SingleWriterDoubleRecorder.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.concurrent.atomic.AtomicLong; /** * Records floating point values, and provides stable interval {@link DoubleHistogram} samples from live recorded data * without interrupting or stalling active recording of values. Each interval histogram provided contains all * value counts accumulated since the previous interval histogram was taken. * <p> * This pattern is commonly used in logging interval histogram information while recording is ongoing. * <p> * {@link SingleWriterDoubleRecorder} expects only a single thread (the "single writer") to * call {@link SingleWriterDoubleRecorder#recordValue} or * {@link SingleWriterDoubleRecorder#recordValueWithExpectedInterval} at any point in time. * It DOES NOT support concurrent recording calls. * Recording calls are wait-free on architectures that support atomic increment operations, and * are lock-free on architectures that do not. * <p> * A common pattern for using a {@link SingleWriterDoubleRecorder} looks like this: * <br><pre><code> * SingleWriterDoubleRecorder recorder = new SingleWriterDoubleRecorder(2); // Two decimal point accuracy * DoubleHistogram intervalHistogram = null; * ... * [start of some loop construct that periodically wants to grab an interval histogram] * ... * // Get interval histogram, recycling previous interval histogram: * intervalHistogram = recorder.getIntervalHistogram(intervalHistogram); * histogramLogWriter.outputIntervalHistogram(intervalHistogram); * ... * [end of loop construct] * </code></pre> */ public class SingleWriterDoubleRecorder implements DoubleValueRecorder, IntervalHistogramProvider<DoubleHistogram> { private static AtomicLong instanceIdSequencer = new AtomicLong(1); private final long instanceId = instanceIdSequencer.getAndIncrement(); private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser(); private volatile DoubleHistogram activeHistogram; private DoubleHistogram inactiveHistogram; /** * Construct an auto-resizing {@link SingleWriterDoubleRecorder} using a precision stated as a * number of significant decimal digits. * <p> * Depending on the valuer of the <b><code>packed</code></b> parameter {@link SingleWriterDoubleRecorder} can * be configured to track value counts in a packed internal representation optimized for typical histogram * recoded values are sparse in the value range and tend to be incremented in small unit counts. This packed * representation tends to require significantly smaller amounts of storage when compared to unpacked * representations, but can incur additional recording cost due to resizing and repacking operations that may * occur as previously unrecorded values are encountered. * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. * @param packed Specifies whether the recorder will uses a packed internal representation or not. */ public SingleWriterDoubleRecorder(final int numberOfSignificantValueDigits, final boolean packed) { activeHistogram = packed ? new PackedInternalDoubleHistogram(instanceId, numberOfSignificantValueDigits) : new InternalDoubleHistogram(instanceId, numberOfSignificantValueDigits); inactiveHistogram = null; activeHistogram.setStartTimeStamp(System.currentTimeMillis()); } /** * Construct an auto-resizing {@link SingleWriterDoubleRecorder} using a precision stated as a * number of significant decimal digits. * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public SingleWriterDoubleRecorder(final int numberOfSignificantValueDigits) { this(numberOfSignificantValueDigits, false); } /** * Construct a {@link SingleWriterDoubleRecorder} dynamic range of values to cover and a number * of significant decimal digits. * * @param highestToLowestValueRatio specifies the dynamic range to use (as a ratio) * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public SingleWriterDoubleRecorder(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) { activeHistogram = new InternalDoubleHistogram( instanceId, highestToLowestValueRatio, numberOfSignificantValueDigits); inactiveHistogram = null; activeHistogram.setStartTimeStamp(System.currentTimeMillis()); } /** * Record a value * @param value the value to record * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue */ public void recordValue(final double value) { long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); try { activeHistogram.recordValue(value); } finally { recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); } } /** * Record a value in the histogram (adding to the value's current count) * * @param value The value to be recorded * @param count The number of occurrences of this value to record * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue */ public void recordValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); try { activeHistogram.recordValueWithCount(value, count); } finally { recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); } } /** * Record a value * <p> * To compensate for the loss of sampled values when a recorded value is larger than the expected * interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller * (down to the expectedIntervalBetweenValueSamples) value records. * <p> * See related notes {@link org.HdrHistogram.DoubleHistogram#recordValueWithExpectedInterval(double, double)} * for more explanations about coordinated omission and expected interval correction. * * * @param value The value to record * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue */ public void recordValueWithExpectedInterval(final double value, final double expectedIntervalBetweenValueSamples) throws ArrayIndexOutOfBoundsException { long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); try { activeHistogram.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); } finally { recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); } } @Override public synchronized DoubleHistogram getIntervalHistogram() { return getIntervalHistogram(null); } @Override public synchronized DoubleHistogram getIntervalHistogram(DoubleHistogram histogramToRecycle) { return getIntervalHistogram(histogramToRecycle, true); } @Override public synchronized DoubleHistogram getIntervalHistogram(DoubleHistogram histogramToRecycle, boolean enforceContainingInstance) { // Verify that replacement histogram can validly be used as an inactive histogram replacement: validateFitAsReplacementHistogram(histogramToRecycle, enforceContainingInstance); inactiveHistogram = histogramToRecycle; performIntervalSample(); DoubleHistogram sampledHistogram = inactiveHistogram; inactiveHistogram = null; // Once we expose the sample, we can't reuse it internally until it is recycled return sampledHistogram; } @Override public synchronized void getIntervalHistogramInto(DoubleHistogram targetHistogram) { performIntervalSample(); inactiveHistogram.copyInto(targetHistogram); } /** * Reset any value counts accumulated thus far. */ public synchronized void reset() { // the currently inactive histogram is reset each time we flip. So flipping twice resets both: performIntervalSample(); performIntervalSample(); } private void performIntervalSample() { try { recordingPhaser.readerLock(); // Make sure we have an inactive version to flip in: if (inactiveHistogram == null) { if (activeHistogram instanceof InternalDoubleHistogram) { inactiveHistogram = new InternalDoubleHistogram((InternalDoubleHistogram) activeHistogram); } else if (activeHistogram instanceof PackedInternalDoubleHistogram) { inactiveHistogram = new PackedInternalDoubleHistogram( instanceId, activeHistogram.getNumberOfSignificantValueDigits()); } else { throw new IllegalStateException("Unexpected internal histogram type for activeHistogram"); } } inactiveHistogram.reset(); // Swap active and inactive histograms: final DoubleHistogram tempHistogram = inactiveHistogram; inactiveHistogram = activeHistogram; activeHistogram = tempHistogram; // Mark end time of previous interval and start time of new one: long now = System.currentTimeMillis(); activeHistogram.setStartTimeStamp(now); inactiveHistogram.setEndTimeStamp(now); // Make sure we are not in the middle of recording a value on the previously active histogram: // Flip phase to make sure no recordings that were in flight pre-flip are still active: recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */); } finally { recordingPhaser.readerUnlock(); } } private class InternalDoubleHistogram extends DoubleHistogram { private final long containingInstanceId; private InternalDoubleHistogram(long id, int numberOfSignificantValueDigits) { super(numberOfSignificantValueDigits); this.containingInstanceId = id; } private InternalDoubleHistogram(long id, long highestToLowestValueRatio, int numberOfSignificantValueDigits) { super(highestToLowestValueRatio, numberOfSignificantValueDigits); this.containingInstanceId = id; } private InternalDoubleHistogram(InternalDoubleHistogram source) { super(source); this.containingInstanceId = source.containingInstanceId; } } private class PackedInternalDoubleHistogram extends PackedDoubleHistogram { private final long containingInstanceId; private PackedInternalDoubleHistogram(long id, int numberOfSignificantValueDigits) { super(numberOfSignificantValueDigits); this.containingInstanceId = id; } } private void validateFitAsReplacementHistogram(DoubleHistogram replacementHistogram, boolean enforceContainingInstance) { boolean bad = true; if (replacementHistogram == null) { bad = false; } else if ((replacementHistogram instanceof InternalDoubleHistogram) && ((!enforceContainingInstance) || (((InternalDoubleHistogram) replacementHistogram).containingInstanceId == ((InternalDoubleHistogram)activeHistogram).containingInstanceId) )) { bad = false; } else if ((replacementHistogram instanceof PackedInternalDoubleHistogram) && ((!enforceContainingInstance) || (((PackedInternalDoubleHistogram) replacementHistogram).containingInstanceId == ((PackedInternalDoubleHistogram)activeHistogram).containingInstanceId) )) { bad = false; } if (bad) { throw new IllegalArgumentException("replacement histogram must have been obtained via a previous " + "getIntervalHistogram() call from this " + this.getClass().getName() +" instance"); } } }
13,873
46.841379
118
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/DoubleLinearIterator.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.Iterator; /** * Used for iterating through {@link DoubleHistogram} values in linear steps. The iteration is * performed in steps of <i>valueUnitsPerBucket</i> in size, terminating when all recorded histogram * values are exhausted. Note that each iteration "bucket" includes values up to and including * the next bucket boundary value. */ public class DoubleLinearIterator implements Iterator<DoubleHistogramIterationValue> { private final LinearIterator integerLinearIterator; private final DoubleHistogramIterationValue iterationValue; DoubleHistogram histogram; /** * Reset iterator for re-use in a fresh iteration over the same histogram data set. * @param valueUnitsPerBucket The size (in value units) of each bucket iteration. */ public void reset(final double valueUnitsPerBucket) { integerLinearIterator.reset((long) (valueUnitsPerBucket * histogram.getDoubleToIntegerValueConversionRatio())); } /** * @param histogram The histogram this iterator will operate on * @param valueUnitsPerBucket The size (in value units) of each bucket iteration. */ public DoubleLinearIterator(final DoubleHistogram histogram, final double valueUnitsPerBucket) { this.histogram = histogram; integerLinearIterator = new LinearIterator( histogram.integerValuesHistogram, (long) (valueUnitsPerBucket * histogram.getDoubleToIntegerValueConversionRatio()) ); iterationValue = new DoubleHistogramIterationValue(integerLinearIterator.currentIterationValue); } @Override public boolean hasNext() { return integerLinearIterator.hasNext(); } @Override public DoubleHistogramIterationValue next() { integerLinearIterator.next(); return iterationValue; } @Override public void remove() { integerLinearIterator.remove(); } }
2,138
34.65
119
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/PercentileIterator.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.Iterator; /** * Used for iterating through histogram values according to percentile levels. The iteration is * performed in steps that start at 0% and reduce their distance to 100% according to the * <i>percentileTicksPerHalfDistance</i> parameter, ultimately reaching 100% when all recorded histogram * values are exhausted. */ public class PercentileIterator extends AbstractHistogramIterator implements Iterator<HistogramIterationValue> { int percentileTicksPerHalfDistance; double percentileLevelToIterateTo; double percentileLevelToIterateFrom; boolean reachedLastRecordedValue; /** * Reset iterator for re-use in a fresh iteration over the same histogram data set. * * @param percentileTicksPerHalfDistance The number of iteration steps per half-distance to 100%. */ public void reset(final int percentileTicksPerHalfDistance) { reset(histogram, percentileTicksPerHalfDistance); } private void reset(final AbstractHistogram histogram, final int percentileTicksPerHalfDistance) { super.resetIterator(histogram); this.percentileTicksPerHalfDistance = percentileTicksPerHalfDistance; this.percentileLevelToIterateTo = 0.0; this.percentileLevelToIterateFrom = 0.0; this.reachedLastRecordedValue = false; } /** * @param histogram The histogram this iterator will operate on * @param percentileTicksPerHalfDistance The number of equal-sized iteration steps per half-distance to 100%. */ public PercentileIterator(final AbstractHistogram histogram, final int percentileTicksPerHalfDistance) { reset(histogram, percentileTicksPerHalfDistance); } @Override public boolean hasNext() { if (super.hasNext()) return true; // We want one additional last step to 100% if (!reachedLastRecordedValue && (arrayTotalCount > 0)) { percentileLevelToIterateTo = 100.0; reachedLastRecordedValue = true; return true; } return false; } @Override void incrementIterationLevel() { percentileLevelToIterateFrom = percentileLevelToIterateTo; // The choice to maintain fixed-sized "ticks" in each half-distance to 100% [starting // from 0%], as opposed to a "tick" size that varies with each interval, was made to // make the steps easily comprehensible and readable to humans. The resulting percentile // steps are much easier to browse through in a percentile distribution output, for example. // // We calculate the number of equal-sized "ticks" that the 0-100 range will be divided // by at the current scale. The scale is determined by the percentile level we are // iterating to. The following math determines the tick size for the current scale, // and maintain a fixed tick size for the remaining "half the distance to 100%" // [from either 0% or from the previous half-distance]. When that half-distance is // crossed, the scale changes and the tick size is effectively cut in half. long percentileReportingTicks = percentileTicksPerHalfDistance * (long) Math.pow(2, (long) (Math.log(100.0 / (100.0 - (percentileLevelToIterateTo))) / Math.log(2)) + 1); percentileLevelToIterateTo += 100.0 / percentileReportingTicks; } @Override boolean reachedIterationLevel() { if (countAtThisValue == 0) return false; double currentPercentile = (100.0 * (double) totalCountToCurrentIndex) / arrayTotalCount; return (currentPercentile >= percentileLevelToIterateTo); } @Override double getPercentileIteratedTo() { return percentileLevelToIterateTo; } @Override double getPercentileIteratedFrom() { return percentileLevelToIterateFrom; } }
4,167
39.466019
117
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/DoublePercentileIterator.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.Iterator; /** * Used for iterating through {@link DoubleHistogram} values values according to percentile levels. The iteration is * performed in steps that start at 0% and reduce their distance to 100% according to the * <i>percentileTicksPerHalfDistance</i> parameter, ultimately reaching 100% when all recorded histogram * values are exhausted. */ public class DoublePercentileIterator implements Iterator<DoubleHistogramIterationValue> { private final PercentileIterator integerPercentileIterator; private final DoubleHistogramIterationValue iterationValue; DoubleHistogram histogram; /** * Reset iterator for re-use in a fresh iteration over the same histogram data set. * * @param percentileTicksPerHalfDistance The number of iteration steps per half-distance to 100%. */ public void reset(final int percentileTicksPerHalfDistance) { integerPercentileIterator.reset(percentileTicksPerHalfDistance); } /** * @param histogram The histogram this iterator will operate on * @param percentileTicksPerHalfDistance The number of iteration steps per half-distance to 100%. */ public DoublePercentileIterator(final DoubleHistogram histogram, final int percentileTicksPerHalfDistance) { this.histogram = histogram; integerPercentileIterator = new PercentileIterator( histogram.integerValuesHistogram, percentileTicksPerHalfDistance ); iterationValue = new DoubleHistogramIterationValue(integerPercentileIterator.currentIterationValue); } @Override public boolean hasNext() { return integerPercentileIterator.hasNext(); } @Override public DoubleHistogramIterationValue next() { integerPercentileIterator.next(); return iterationValue; } @Override public void remove() { integerPercentileIterator.remove(); } }
2,146
34.196721
116
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/AbstractHistogramIterator.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.ConcurrentModificationException; import java.util.Iterator; import java.util.NoSuchElementException; /** * Used for iterating through histogram values. */ abstract class AbstractHistogramIterator implements Iterator<HistogramIterationValue> { AbstractHistogram histogram; long arrayTotalCount; int currentIndex; long currentValueAtIndex; long nextValueAtIndex; long prevValueIteratedTo; long totalCountToPrevIndex; long totalCountToCurrentIndex; long totalValueToCurrentIndex; long countAtThisValue; private boolean freshSubBucket; final HistogramIterationValue currentIterationValue = new HistogramIterationValue(); private double integerToDoubleValueConversionRatio; void resetIterator(final AbstractHistogram histogram) { this.histogram = histogram; this.arrayTotalCount = histogram.getTotalCount(); this.integerToDoubleValueConversionRatio = histogram.getIntegerToDoubleValueConversionRatio(); this.currentIndex = 0; this.currentValueAtIndex = 0; this.nextValueAtIndex = 1 << histogram.unitMagnitude; this.prevValueIteratedTo = 0; this.totalCountToPrevIndex = 0; this.totalCountToCurrentIndex = 0; this.totalValueToCurrentIndex = 0; this.countAtThisValue = 0; this.freshSubBucket = true; currentIterationValue.reset(); } /** * Returns true if the iteration has more elements. (In other words, returns true if next would return an * element rather than throwing an exception.) * * @return true if the iterator has more elements. */ @Override public boolean hasNext() { if (histogram.getTotalCount() != arrayTotalCount) { throw new ConcurrentModificationException(); } return (totalCountToCurrentIndex < arrayTotalCount); } /** * Returns the next element in the iteration. * * @return the {@link HistogramIterationValue} associated with the next element in the iteration. */ @Override public HistogramIterationValue next() { // Move through the sub buckets and buckets until we hit the next reporting level: while (!exhaustedSubBuckets()) { countAtThisValue = histogram.getCountAtIndex(currentIndex); if (freshSubBucket) { // Don't add unless we've incremented since last bucket... totalCountToCurrentIndex += countAtThisValue; totalValueToCurrentIndex += countAtThisValue * histogram.highestEquivalentValue(currentValueAtIndex); freshSubBucket = false; } if (reachedIterationLevel()) { long valueIteratedTo = getValueIteratedTo(); currentIterationValue.set(valueIteratedTo, prevValueIteratedTo, countAtThisValue, (totalCountToCurrentIndex - totalCountToPrevIndex), totalCountToCurrentIndex, totalValueToCurrentIndex, ((100.0 * totalCountToCurrentIndex) / arrayTotalCount), getPercentileIteratedTo(), integerToDoubleValueConversionRatio); prevValueIteratedTo = valueIteratedTo; totalCountToPrevIndex = totalCountToCurrentIndex; // move the next iteration level forward: incrementIterationLevel(); if (histogram.getTotalCount() != arrayTotalCount) { throw new ConcurrentModificationException(); } return currentIterationValue; } incrementSubBucket(); } // Should not reach here. But possible for concurrent modification or overflowed histograms // under certain conditions if ((histogram.getTotalCount() != arrayTotalCount) || (totalCountToCurrentIndex > arrayTotalCount)) { throw new ConcurrentModificationException(); } throw new NoSuchElementException(); } /** * Not supported. Will throw an {@link UnsupportedOperationException}. */ @Override public void remove() { throw new UnsupportedOperationException(); } abstract void incrementIterationLevel(); /** * @return true if the current position's data should be emitted by the iterator */ abstract boolean reachedIterationLevel(); double getPercentileIteratedTo() { return (100.0 * (double) totalCountToCurrentIndex) / arrayTotalCount; } double getPercentileIteratedFrom() { return (100.0 * (double) totalCountToPrevIndex) / arrayTotalCount; } long getValueIteratedTo() { return histogram.highestEquivalentValue(currentValueAtIndex); } private boolean exhaustedSubBuckets() { return (currentIndex >= histogram.countsArrayLength); } void incrementSubBucket() { freshSubBucket = true; // Take on the next index: currentIndex++; currentValueAtIndex = histogram.valueFromIndex(currentIndex); // Figure out the value at the next index (used by some iterators): nextValueAtIndex = histogram.valueFromIndex(currentIndex + 1); } }
5,416
35.113333
117
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/HistogramIterationValue.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; /** * Represents a value point iterated through in a Histogram, with associated stats. * <ul> * <li><b><code>valueIteratedTo</code></b> :<br> The actual value level that was iterated to by the iterator</li> * <li><b><code>prevValueIteratedTo</code></b> :<br> The actual value level that was iterated from by the iterator</li> * <li><b><code>countAtValueIteratedTo</code></b> :<br> The count of recorded values in the histogram that * exactly match this [lowestEquivalentValue(valueIteratedTo)...highestEquivalentValue(valueIteratedTo)] value * range.</li> * <li><b><code>countAddedInThisIterationStep</code></b> :<br> The count of recorded values in the histogram that * were added to the totalCountToThisValue (below) as a result on this iteration step. Since multiple iteration * steps may occur with overlapping equivalent value ranges, the count may be lower than the count found at * the value (e.g. multiple linear steps or percentile levels can occur within a single equivalent value range)</li> * <li><b><code>totalCountToThisValue</code></b> :<br> The total count of all recorded values in the histogram at * values equal or smaller than valueIteratedTo.</li> * <li><b><code>totalValueToThisValue</code></b> :<br> The sum of all recorded values in the histogram at values * equal or smaller than valueIteratedTo.</li> * <li><b><code>percentile</code></b> :<br> The percentile of recorded values in the histogram at values equal * or smaller than valueIteratedTo.</li> * <li><b><code>percentileLevelIteratedTo</code></b> :<br> The percentile level that the iterator returning this * HistogramIterationValue had iterated to. Generally, percentileLevelIteratedTo will be equal to or smaller than * percentile, but the same value point can contain multiple iteration levels for some iterators. E.g. a * PercentileIterator can stop multiple times in the exact same value point (if the count at that value covers a * range of multiple percentiles in the requested percentile iteration points).</li> * </ul> */ public class HistogramIterationValue { private long valueIteratedTo; private long valueIteratedFrom; private long countAtValueIteratedTo; private long countAddedInThisIterationStep; private long totalCountToThisValue; private long totalValueToThisValue; private double percentile; private double percentileLevelIteratedTo; private double integerToDoubleValueConversionRatio; // Set is all-or-nothing to avoid the potential for accidental omission of some values... void set(final long valueIteratedTo, final long valueIteratedFrom, final long countAtValueIteratedTo, final long countInThisIterationStep, final long totalCountToThisValue, final long totalValueToThisValue, final double percentile, final double percentileLevelIteratedTo, double integerToDoubleValueConversionRatio) { this.valueIteratedTo = valueIteratedTo; this.valueIteratedFrom = valueIteratedFrom; this.countAtValueIteratedTo = countAtValueIteratedTo; this.countAddedInThisIterationStep = countInThisIterationStep; this.totalCountToThisValue = totalCountToThisValue; this.totalValueToThisValue = totalValueToThisValue; this.percentile = percentile; this.percentileLevelIteratedTo = percentileLevelIteratedTo; this.integerToDoubleValueConversionRatio = integerToDoubleValueConversionRatio; } void reset() { this.valueIteratedTo = 0; this.valueIteratedFrom = 0; this.countAtValueIteratedTo = 0; this.countAddedInThisIterationStep = 0; this.totalCountToThisValue = 0; this.totalValueToThisValue = 0; this.percentile = 0.0; this.percentileLevelIteratedTo = 0.0; } HistogramIterationValue() { } public String toString() { return "valueIteratedTo:" + valueIteratedTo + ", prevValueIteratedTo:" + valueIteratedFrom + ", countAtValueIteratedTo:" + countAtValueIteratedTo + ", countAddedInThisIterationStep:" + countAddedInThisIterationStep + ", totalCountToThisValue:" + totalCountToThisValue + ", totalValueToThisValue:" + totalValueToThisValue + ", percentile:" + percentile + ", percentileLevelIteratedTo:" + percentileLevelIteratedTo; } public long getValueIteratedTo() { return valueIteratedTo; } public double getDoubleValueIteratedTo() { return valueIteratedTo * integerToDoubleValueConversionRatio; } public long getValueIteratedFrom() { return valueIteratedFrom; } public double getDoubleValueIteratedFrom() { return valueIteratedFrom * integerToDoubleValueConversionRatio; } public long getCountAtValueIteratedTo() { return countAtValueIteratedTo; } public long getCountAddedInThisIterationStep() { return countAddedInThisIterationStep; } public long getTotalCountToThisValue() { return totalCountToThisValue; } public long getTotalValueToThisValue() { return totalValueToThisValue; } public double getPercentile() { return percentile; } public double getPercentileLevelIteratedTo() { return percentileLevelIteratedTo; } public double getIntegerToDoubleValueConversionRatio() { return integerToDoubleValueConversionRatio; } }
5,672
42.976744
123
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/AtomicHistogram.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.io.IOException; import java.io.ObjectInputStream; import java.nio.ByteBuffer; import java.util.concurrent.atomic.*; import java.util.zip.DataFormatException; /** * <h3>A High Dynamic Range (HDR) Histogram using atomic <b><code>long</code></b> count type </h3> * An AtomicHistogram guarantees lossless recording of values into the histogram even when the * histogram is updated by multiple threads. It is important to note though that this lossless * recording capability is the only thread-safe behavior provided by AtomicHistogram, and that it * is not otherwise synchronized. Specifically, AtomicHistogram does not support auto-resizing, * does not support value shift operations, and provides no implicit synchronization * that would prevent the contents of the histogram from changing during iterations, copies, or * addition operations on the histogram. Callers wishing to make potentially concurrent, * multi-threaded updates that would safely work in the presence of queries, copies, or additions * of histogram objects should either take care to externally synchronize and/or order their access, * use the {@link org.HdrHistogram.SynchronizedHistogram} variant, or (recommended) use the * {@link Recorder} class, which is intended for this purpose. * <p> * See package description for {@link org.HdrHistogram} for details. */ public class AtomicHistogram extends Histogram { static final AtomicLongFieldUpdater<AtomicHistogram> totalCountUpdater = AtomicLongFieldUpdater.newUpdater(AtomicHistogram.class, "totalCount"); volatile long totalCount; volatile AtomicLongArray counts; @Override long getCountAtIndex(final int index) { return counts.get(index); } @Override long getCountAtNormalizedIndex(final int index) { return counts.get(index); } @Override void incrementCountAtIndex(final int index) { counts.getAndIncrement(index); } @Override void addToCountAtIndex(final int index, final long value) { counts.getAndAdd(index, value); } @Override void setCountAtIndex(int index, long value) { counts.lazySet(index, value); } @Override void setCountAtNormalizedIndex(int index, long value) { counts.lazySet(index, value); } @Override int getNormalizingIndexOffset() { return 0; } @Override void setNormalizingIndexOffset(int normalizingIndexOffset) { if (normalizingIndexOffset != 0) { throw new IllegalStateException( "AtomicHistogram does not support non-zero normalizing index settings." + " Use ConcurrentHistogram Instead."); } } @Override void shiftNormalizingIndexByOffset(int offsetToAdd, boolean lowestHalfBucketPopulated, double newIntegerToDoubleValueConversionRatio) { throw new IllegalStateException( "AtomicHistogram does not support Shifting operations." + " Use ConcurrentHistogram Instead."); } @Override void resize(long newHighestTrackableValue) { throw new IllegalStateException( "AtomicHistogram does not support resizing operations." + " Use ConcurrentHistogram Instead."); } @Override public void setAutoResize(boolean autoResize) { throw new IllegalStateException( "AtomicHistogram does not support AutoResize operation." + " Use ConcurrentHistogram Instead."); } @Override public boolean supportsAutoResize() { return false; } @Override void clearCounts() { for (int i = 0; i < counts.length(); i++) { counts.lazySet(i, 0); } totalCountUpdater.set(this, 0); } @Override public AtomicHistogram copy() { AtomicHistogram copy = new AtomicHistogram(this); copy.add(this); return copy; } @Override public AtomicHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { AtomicHistogram toHistogram = new AtomicHistogram(this); toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); return toHistogram; } @Override public long getTotalCount() { return totalCountUpdater.get(this); } @Override void setTotalCount(final long totalCount) { totalCountUpdater.set(this, totalCount); } @Override void incrementTotalCount() { totalCountUpdater.incrementAndGet(this); } @Override void addToTotalCount(final long value) { totalCountUpdater.addAndGet(this, value); } @Override int _getEstimatedFootprintInBytes() { return (512 + (8 * counts.length())); } /** * Construct a AtomicHistogram given the Highest value to be tracked and a number of significant decimal digits. * The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. * * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive * integer that is {@literal >=} 2. * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public AtomicHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { this(1, highestTrackableValue, numberOfSignificantValueDigits); } /** * Construct a AtomicHistogram given the Lowest and Highest values to be tracked and a number of significant * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the * proper value for lowestDiscernibleValue would be 1000. * * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. * Must be a positive integer that is {@literal >=} 1. May be internally rounded * down to nearest power of 2. * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive * integer that is {@literal >=} (2 * lowestDiscernibleValue). * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public AtomicHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, final int numberOfSignificantValueDigits) { super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, false); counts = new AtomicLongArray(countsArrayLength); wordSizeInBytes = 8; } /** * Construct a histogram with the same range settings as a given source histogram, * duplicating the source's start/end timestamps (but NOT it's contents) * @param source The source histogram to duplicate */ public AtomicHistogram(final AbstractHistogram source) { super(source, false); counts = new AtomicLongArray(countsArrayLength); wordSizeInBytes = 8; } /** * Construct a new histogram by decoding it from a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram */ public static AtomicHistogram decodeFromByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) { return decodeFromByteBuffer(buffer, AtomicHistogram.class, minBarForHighestTrackableValue); } /** * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram * @throws DataFormatException on error parsing/decompressing the buffer */ public static AtomicHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) throws DataFormatException { return decodeFromCompressedByteBuffer(buffer, AtomicHistogram.class, minBarForHighestTrackableValue); } /** * Construct a new AtomicHistogram by decoding it from a String containing a base64 encoded * compressed histogram representation. * * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram * @return A AtomicHistogram decoded from the string * @throws DataFormatException on error parsing/decompressing the input */ public static AtomicHistogram fromString(final String base64CompressedHistogramString) throws DataFormatException { return decodeFromCompressedByteBuffer( ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), 0); } private void readObject(final ObjectInputStream o) throws IOException, ClassNotFoundException { o.defaultReadObject(); } }
10,359
40.606426
116
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/RecordedValuesIterator.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.Iterator; /** * Used for iterating through all recorded histogram values using the finest granularity steps supported by the * underlying representation. The iteration steps through all non-zero recorded value counts, and terminates when * all recorded histogram values are exhausted. */ public class RecordedValuesIterator extends AbstractHistogramIterator implements Iterator<HistogramIterationValue> { int visitedIndex; /** * Reset iterator for re-use in a fresh iteration over the same histogram data set. */ public void reset() { reset(histogram); } private void reset(final AbstractHistogram histogram) { super.resetIterator(histogram); visitedIndex = -1; } /** * @param histogram The histogram this iterator will operate on */ public RecordedValuesIterator(final AbstractHistogram histogram) { reset(histogram); } @Override void incrementIterationLevel() { visitedIndex = currentIndex; } @Override boolean reachedIterationLevel() { long currentCount = histogram.getCountAtIndex(currentIndex); return (currentCount != 0) && (visitedIndex != currentIndex); } }
1,433
27.117647
116
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/EncodableHistogram.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.nio.ByteBuffer; import java.util.zip.DataFormatException; /** * A base class for all encodable (and decodable) histogram classes. Log readers and writers * will generally use this base class to provide common log processing across the integer value * based AbstractHistogram subclasses and the double value based DoubleHistogram class. * */ public abstract class EncodableHistogram { public abstract int getNeededByteBufferCapacity(); public abstract int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer, int compressionLevel); public abstract long getStartTimeStamp(); public abstract void setStartTimeStamp(long startTimeStamp); public abstract long getEndTimeStamp(); public abstract void setEndTimeStamp(long endTimestamp); public abstract String getTag(); public abstract void setTag(String tag); public abstract double getMaxValueAsDouble(); /** * Decode a {@link EncodableHistogram} from a compressed byte buffer. Will return either a * {@link org.HdrHistogram.Histogram} or {@link org.HdrHistogram.DoubleHistogram} depending * on the format found in the supplied buffer. * * @param buffer The input buffer to decode from. * @param minBarForHighestTrackableValue A lower bound either on the highestTrackableValue of * the created Histogram, or on the HighestToLowestValueRatio * of the created DoubleHistogram. * @return The decoded {@link org.HdrHistogram.Histogram} or {@link org.HdrHistogram.DoubleHistogram} * @throws DataFormatException on errors in decoding the buffer compression. */ static EncodableHistogram decodeFromCompressedByteBuffer( ByteBuffer buffer, final long minBarForHighestTrackableValue) throws DataFormatException { // Peek iun buffer to see the cookie: int cookie = buffer.getInt(buffer.position()); if (DoubleHistogram.isDoubleHistogramCookie(cookie)) { return DoubleHistogram.decodeFromCompressedByteBuffer(buffer, minBarForHighestTrackableValue); } else { return Histogram.decodeFromCompressedByteBuffer(buffer, minBarForHighestTrackableValue); } } }
2,496
38.634921
108
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/DoubleRecordedValuesIterator.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.Iterator; /** * Used for iterating through {@link DoubleHistogram} values values using the finest granularity steps supported by * the underlying representation. The iteration steps through all possible unit value levels, regardless of whether * or not there were recorded values for that value level, and terminates when all recorded histogram values are * exhausted. */ public class DoubleRecordedValuesIterator implements Iterator<DoubleHistogramIterationValue> { private final RecordedValuesIterator integerRecordedValuesIterator; private final DoubleHistogramIterationValue iterationValue; DoubleHistogram histogram; /** * Reset iterator for re-use in a fresh iteration over the same histogram data set. */ public void reset() { integerRecordedValuesIterator.reset(); } /** * @param histogram The histogram this iterator will operate on */ public DoubleRecordedValuesIterator(final DoubleHistogram histogram) { this.histogram = histogram; integerRecordedValuesIterator = new RecordedValuesIterator(histogram.integerValuesHistogram); iterationValue = new DoubleHistogramIterationValue(integerRecordedValuesIterator.currentIterationValue); } @Override public boolean hasNext() { return integerRecordedValuesIterator.hasNext(); } @Override public DoubleHistogramIterationValue next() { integerRecordedValuesIterator.next(); return iterationValue; } @Override public void remove() { integerRecordedValuesIterator.remove(); } }
1,815
32.018182
115
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/DoubleHistogram.java
/* * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.io.*; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.zip.DataFormatException; import java.util.zip.Deflater; /** * <h3>A floating point values High Dynamic Range (HDR) Histogram</h3> * <p> * It is important to note that {@link DoubleHistogram} is not thread-safe, and does not support safe concurrent * recording by multiple threads. If concurrent operation is required, consider using * {@link ConcurrentDoubleHistogram}, {@link SynchronizedDoubleHistogram}, * or (recommended) {@link DoubleRecorder} or {@link SingleWriterDoubleRecorder} which are intended for this purpose. * <p> * {@link DoubleHistogram} supports the recording and analyzing sampled data value counts across a * configurable dynamic range of floating point (double) values, with configurable value precision within the range. * Dynamic range is expressed as a ratio between the highest and lowest non-zero values trackable within the histogram * at any given time. Value precision is expressed as the number of significant [decimal] digits in the value recording, * and provides control over value quantization behavior across the value range and the subsequent value resolution at * any given level. * <p> * Auto-ranging: Unlike integer value based histograms, the specific value range tracked by a {@link * DoubleHistogram} is not specified upfront. Only the dynamic range of values that the histogram can cover is * (optionally) specified. E.g. When a {@link DoubleHistogram} is created to track a dynamic range of * 3600000000000 (enough to track values from a nanosecond to an hour), values could be recorded into into it in any * consistent unit of time as long as the ratio between the highest and lowest non-zero values stays within the * specified dynamic range, so recording in units of nanoseconds (1.0 thru 3600000000000.0), milliseconds (0.000001 * thru 3600000.0) seconds (0.000000001 thru 3600.0), hours (1/3.6E12 thru 1.0) will all work just as well. * <p> * Auto-resizing: When constructed with no specified dynamic range (or when auto-resize is turned on with {@link * DoubleHistogram#setAutoResize}) a {@link DoubleHistogram} will auto-resize its dynamic range to * include recorded values as they are encountered. Note that recording calls that cause auto-resizing may take * longer to execute, as resizing incurs allocation and copying of internal data structures. * <p> * Attempts to record non-zero values that range outside of the specified dynamic range (or exceed the limits of * of dynamic range when auto-resizing) may results in {@link ArrayIndexOutOfBoundsException} exceptions, either * due to overflow or underflow conditions. These exceptions will only be thrown if recording the value would have * resulted in discarding or losing the required value precision of values already recorded in the histogram. * <p> * See package description for {@link org.HdrHistogram} for details. */ public class DoubleHistogram extends EncodableHistogram implements DoubleValueRecorder, Serializable { private static final double highestAllowedValueEver; // A value that will keep us from multiplying into infinity. private long configuredHighestToLowestValueRatio; private volatile double currentLowestValueInAutoRange; private volatile double currentHighestValueLimitInAutoRange; AbstractHistogram integerValuesHistogram; // volatile double doubleToIntegerValueConversionRatio; // volatile double integerToDoubleValueConversionRatio; private boolean autoResize = false; /** * Construct a new auto-resizing DoubleHistogram using a precision stated as a number * of significant decimal digits. * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public DoubleHistogram(final int numberOfSignificantValueDigits) { this(2, numberOfSignificantValueDigits, Histogram.class, null); setAutoResize(true); } /** * Construct a new auto-resizing DoubleHistogram using a precision stated as a number * of significant decimal digits. * * The {@link org.HdrHistogram.DoubleHistogram} will use the specified AbstractHistogram subclass * for tracking internal counts (e.g. {@link org.HdrHistogram.Histogram}, * {@link org.HdrHistogram.ConcurrentHistogram}, {@link org.HdrHistogram.SynchronizedHistogram}, * {@link org.HdrHistogram.IntCountsHistogram}, {@link org.HdrHistogram.ShortCountsHistogram}). * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. * @param internalCountsHistogramClass The class to use for internal counts tracking */ public DoubleHistogram(final int numberOfSignificantValueDigits, final Class<? extends AbstractHistogram> internalCountsHistogramClass) { this(2, numberOfSignificantValueDigits, internalCountsHistogramClass, null); setAutoResize(true); } /** * Construct a new DoubleHistogram with the specified dynamic range (provided in * {@code highestToLowestValueRatio}) and using a precision stated as a number of significant * decimal digits. * * @param highestToLowestValueRatio specifies the dynamic range to use * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public DoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) { this(highestToLowestValueRatio, numberOfSignificantValueDigits, Histogram.class); } /** * Construct a new DoubleHistogram with the specified dynamic range (provided in * {@code highestToLowestValueRatio}) and using a precision stated as a number of significant * decimal digits. * * The {@link org.HdrHistogram.DoubleHistogram} will use the specified AbstractHistogram subclass * for tracking internal counts (e.g. {@link org.HdrHistogram.Histogram}, * {@link org.HdrHistogram.ConcurrentHistogram}, {@link org.HdrHistogram.SynchronizedHistogram}, * {@link org.HdrHistogram.IntCountsHistogram}, {@link org.HdrHistogram.ShortCountsHistogram}). * * @param highestToLowestValueRatio specifies the dynamic range to use. * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. * @param internalCountsHistogramClass The class to use for internal counts tracking */ protected DoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits, final Class<? extends AbstractHistogram> internalCountsHistogramClass) { this(highestToLowestValueRatio, numberOfSignificantValueDigits, internalCountsHistogramClass, null); } DoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits, final Class<? extends AbstractHistogram> internalCountsHistogramClass, AbstractHistogram internalCountsHistogram) { this( highestToLowestValueRatio, numberOfSignificantValueDigits, internalCountsHistogramClass, internalCountsHistogram, false ); } private DoubleHistogram(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits, final Class<? extends AbstractHistogram> internalCountsHistogramClass, AbstractHistogram internalCountsHistogram, boolean mimicInternalModel) { try { if (highestToLowestValueRatio < 2) { throw new IllegalArgumentException("highestToLowestValueRatio must be >= 2"); } if ((highestToLowestValueRatio * Math.pow(10.0, numberOfSignificantValueDigits)) >= (1L << 61)) { throw new IllegalArgumentException( "highestToLowestValueRatio * (10^numberOfSignificantValueDigits) must be < (1L << 61)"); } if (internalCountsHistogramClass == AtomicHistogram.class) { throw new IllegalArgumentException( "AtomicHistogram cannot be used as an internal counts histogram (does not support shifting)." + " Use ConcurrentHistogram instead."); } long integerValueRange = deriveIntegerValueRange(highestToLowestValueRatio, numberOfSignificantValueDigits); final AbstractHistogram valuesHistogram; double initialLowestValueInAutoRange; if (internalCountsHistogram == null) { // Create the internal counts histogram: Constructor<? extends AbstractHistogram> histogramConstructor = internalCountsHistogramClass.getConstructor(long.class, long.class, int.class); valuesHistogram = histogramConstructor.newInstance( 1L, (integerValueRange - 1), numberOfSignificantValueDigits ); // We want the auto-ranging to tend towards using a value range that will result in using the // lower tracked value ranges and leave the higher end empty unless the range is actually used. // This is most easily done by making early recordings force-shift the lower value limit to // accommodate them (forcing a force-shift for the higher values would achieve the opposite). // We will therefore start with a very high value range, and let the recordings autoAdjust // downwards from there: initialLowestValueInAutoRange = Math.pow(2.0, 800); } else if (mimicInternalModel) { Constructor<? extends AbstractHistogram> histogramConstructor = internalCountsHistogramClass.getConstructor(AbstractHistogram.class); valuesHistogram = histogramConstructor.newInstance(internalCountsHistogram); initialLowestValueInAutoRange = Math.pow(2.0, 800); } else { // Verify that the histogram we got matches: if ((internalCountsHistogram.getLowestDiscernibleValue() != 1) || (internalCountsHistogram.getHighestTrackableValue() != integerValueRange - 1) || internalCountsHistogram.getNumberOfSignificantValueDigits() != numberOfSignificantValueDigits) { throw new IllegalStateException("integer values histogram does not match stated parameters."); } valuesHistogram = internalCountsHistogram; // Derive initialLowestValueInAutoRange from valuesHistogram's integerToDoubleValueConversionRatio: initialLowestValueInAutoRange = internalCountsHistogram.getIntegerToDoubleValueConversionRatio() * internalCountsHistogram.subBucketHalfCount; } // Set our double tracking range and internal histogram: init(highestToLowestValueRatio, initialLowestValueInAutoRange, valuesHistogram); } catch (NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException ex) { throw new IllegalArgumentException(ex); } } /** * Construct a {@link org.HdrHistogram.DoubleHistogram} with the same range settings as a given source, * duplicating the source's start/end timestamps (but NOT it's contents) * @param source The source histogram to duplicate */ public DoubleHistogram(final DoubleHistogram source) { this(source.configuredHighestToLowestValueRatio, source.getNumberOfSignificantValueDigits(), source.integerValuesHistogram.getClass(), source.integerValuesHistogram, true); this.autoResize = source.autoResize; setTrackableValueRange(source.currentLowestValueInAutoRange, source.currentHighestValueLimitInAutoRange); } private void init(final long configuredHighestToLowestValueRatio, final double lowestTrackableUnitValue, final AbstractHistogram integerValuesHistogram) { this.configuredHighestToLowestValueRatio = configuredHighestToLowestValueRatio; this.integerValuesHistogram = integerValuesHistogram; long internalHighestToLowestValueRatio = deriveInternalHighestToLowestValueRatio(configuredHighestToLowestValueRatio); setTrackableValueRange(lowestTrackableUnitValue, lowestTrackableUnitValue * internalHighestToLowestValueRatio); } private void setTrackableValueRange(final double lowestValueInAutoRange, final double highestValueInAutoRange) { this.currentLowestValueInAutoRange = lowestValueInAutoRange; this.currentHighestValueLimitInAutoRange = highestValueInAutoRange; double integerToDoubleValueConversionRatio = lowestValueInAutoRange / getLowestTrackingIntegerValue(); integerValuesHistogram.setIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio); } double getDoubleToIntegerValueConversionRatio() { return integerValuesHistogram.getDoubleToIntegerValueConversionRatio(); } // // // Auto-resizing control: // // public boolean isAutoResize() { return autoResize; } public void setAutoResize(boolean autoResize) { this.autoResize = autoResize; } // // // // Value recording support: // // // /** * Record a value in the histogram * * @param value The value to be recorded * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range */ @Override public void recordValue(final double value) throws ArrayIndexOutOfBoundsException { recordSingleValue(value); } /** * Record a value in the histogram (adding to the value's current count) * * @param value The value to be recorded * @param count The number of occurrences of this value to record * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range */ @Override public void recordValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { recordCountAtValue(count, value); } /** * Record a value in the histogram. * <p> * To compensate for the loss of sampled values when a recorded value is larger than the expected * interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller * (down to the expectedIntervalBetweenValueSamples) value records. * <p> * Note: This is a at-recording correction method, as opposed to the post-recording correction method provided * by {@link #copyCorrectedForCoordinatedOmission(double)}. * The use cases for these two methods are mutually exclusive, and only one of the two should be be used on * a given data set to correct for the same coordinated omission issue. * <p> * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is * important. * * @param value The value to record * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range */ @Override public void recordValueWithExpectedInterval(final double value, final double expectedIntervalBetweenValueSamples) throws ArrayIndexOutOfBoundsException { recordValueWithCountAndExpectedInterval(value, 1, expectedIntervalBetweenValueSamples); } private void recordCountAtValue(final long count, final double value) throws ArrayIndexOutOfBoundsException { int throwCount = 0; while (true) { if ((value < currentLowestValueInAutoRange) || (value >= currentHighestValueLimitInAutoRange)) { // Zero is valid and needs no auto-ranging, but also rare enough that we should deal // with it on the slow path... autoAdjustRangeForValue(value); } try { integerValuesHistogram.recordConvertedDoubleValueWithCount(value, count); return; } catch (IndexOutOfBoundsException ex) { // A race that would pass the auto-range check above and would still take an AIOOB // can only occur due to a value that would have been valid becoming invalid due // to a concurrent adjustment operation. Such adjustment operations can happen no // more than 64 times in the entire lifetime of the Histogram, which makes it safe // to retry with no fear of live-locking. if (++throwCount > 64) { // For the retry check to not detect an out of range attempt after 64 retries // should be theoretically impossible, and would indicate a bug. throw new ArrayIndexOutOfBoundsException( "BUG: Unexpected non-transient AIOOB Exception caused by:\n" + ex); } } } } private void recordSingleValue(final double value) throws ArrayIndexOutOfBoundsException { int throwCount = 0; while (true) { if ((value < currentLowestValueInAutoRange) || (value >= currentHighestValueLimitInAutoRange)) { // Zero is valid and needs no auto-ranging, but also rare enough that we should deal // with it on the slow path... autoAdjustRangeForValue(value); } try { integerValuesHistogram.recordConvertedDoubleValue(value); return; } catch (IndexOutOfBoundsException ex) { // A race that would pass the auto-range check above and would still take an AIOOB // can only occur due to a value that would have been valid becoming invalid due // to a concurrent adjustment operation. Such adjustment operations can happen no // more than 64 times in the entire lifetime of the Histogram, which makes it safe // to retry with no fear of live-locking. if (++throwCount > 64) { // For the retry check to not detect an out of range attempt after 64 retries // should be theoretically impossible, and would indicate a bug. throw new ArrayIndexOutOfBoundsException( "BUG: Unexpected non-transient AIOOB Exception caused by:\n" + ex); } } } } private void recordValueWithCountAndExpectedInterval(final double value, final long count, final double expectedIntervalBetweenValueSamples) throws ArrayIndexOutOfBoundsException { recordCountAtValue(count, value); if (expectedIntervalBetweenValueSamples <= 0) return; for (double missingValue = value - expectedIntervalBetweenValueSamples; missingValue >= expectedIntervalBetweenValueSamples; missingValue -= expectedIntervalBetweenValueSamples) { recordCountAtValue(count, missingValue); } } // // // // Shift and auto-ranging support: // // // private void autoAdjustRangeForValue(final double value) { // Zero is always valid, and doesn't need auto-range adjustment: if (value == 0.0) { return; } autoAdjustRangeForValueSlowPath(value); } private synchronized void autoAdjustRangeForValueSlowPath(final double value) { try { if (value < currentLowestValueInAutoRange) { if (value < 0.0) { throw new ArrayIndexOutOfBoundsException("Negative values cannot be recorded"); } do { int shiftAmount = findCappedContainingBinaryOrderOfMagnitude( Math.ceil(currentLowestValueInAutoRange / value) - 1.0); shiftCoveredRangeToTheRight(shiftAmount); } while (value < currentLowestValueInAutoRange); } else if (value >= currentHighestValueLimitInAutoRange) { if (value > highestAllowedValueEver) { throw new ArrayIndexOutOfBoundsException( "Values above " + highestAllowedValueEver + " cannot be recorded"); } do { // If value is an exact whole multiple of currentHighestValueLimitInAutoRange, it "belongs" with // the next level up, as it crosses the limit. With floating point values, the simplest way to // make this shift on exact multiple values happen (but not for any just-smaller-than-exact-multiple // values) is to use a value that is 1 ulp bigger in computing the ratio for the shift amount: int shiftAmount = findCappedContainingBinaryOrderOfMagnitude( Math.ceil((value + Math.ulp(value)) / currentHighestValueLimitInAutoRange) - 1.0); shiftCoveredRangeToTheLeft(shiftAmount); } while (value >= currentHighestValueLimitInAutoRange); } } catch (ArrayIndexOutOfBoundsException ex) { throw new ArrayIndexOutOfBoundsException("The value " + value + " is out of bounds for histogram, current covered range [" + currentLowestValueInAutoRange + ", " + currentHighestValueLimitInAutoRange + ") cannot be extended any further.\n"+ "Caused by: " + ex); } } private void shiftCoveredRangeToTheRight(final int numberOfBinaryOrdersOfMagnitude) { // We are going to adjust the tracked range by effectively shifting it to the right // (in the integer shift sense). // // To counter the right shift of the value multipliers, we need to left shift the internal // representation such that the newly shifted integer values will continue to return the // same double values. // Initially, new range is the same as current range, to make sure we correctly recover // from a shift failure if one happens: double newLowestValueInAutoRange = currentLowestValueInAutoRange; double newHighestValueLimitInAutoRange = currentHighestValueLimitInAutoRange; try { double shiftMultiplier = 1.0 / (1L << numberOfBinaryOrdersOfMagnitude); // First, temporarily change the highest value in auto-range without changing conversion ratios. // This is done to force new values higher than the new expected highest value to attempt an // adjustment (which is synchronized and will wait behind this one). This ensures that we will // not end up with any concurrently recorded values that would need to be discarded if the shift // fails. If this shift succeeds, the pending adjustment attempt will end up doing nothing. currentHighestValueLimitInAutoRange *= shiftMultiplier; double newIntegerToDoubleValueConversionRatio = getIntegerToDoubleValueConversionRatio() * shiftMultiplier; // First shift the values, to give the shift a chance to fail: // Shift integer histogram left, increasing the recorded integer values for current recordings // by a factor of (1 << numberOfBinaryOrdersOfMagnitude): // (no need to shift any values if all recorded values are at the 0 value level:) if (getTotalCount() > integerValuesHistogram.getCountAtIndex(0)) { // Apply the shift: try { integerValuesHistogram.shiftValuesLeft(numberOfBinaryOrdersOfMagnitude, newIntegerToDoubleValueConversionRatio); } catch (ArrayIndexOutOfBoundsException ex) { // Failed to shift, try to expand size instead: handleShiftValuesException(numberOfBinaryOrdersOfMagnitude, ex); // First expand the highest limit to reflect successful size expansion: newHighestValueLimitInAutoRange /= shiftMultiplier; // Successfully expanded histogram range by numberOfBinaryOrdersOfMagnitude, but not // by shifting (shifting failed because there was not room to shift left into). Instead, // we grew the max value without changing the value mapping. Since we were trying to // shift values left to begin with, trying to shift the left again will work (we now // have room to shift into): integerValuesHistogram.shiftValuesLeft(numberOfBinaryOrdersOfMagnitude, newIntegerToDoubleValueConversionRatio); } } // Shift (or resize) was successful. Adjust new range to reflect: newLowestValueInAutoRange *= shiftMultiplier; newHighestValueLimitInAutoRange *= shiftMultiplier; } finally { // Set the new range to either the successfully changed one, or the original one: setTrackableValueRange(newLowestValueInAutoRange, newHighestValueLimitInAutoRange); } } private void shiftCoveredRangeToTheLeft(final int numberOfBinaryOrdersOfMagnitude) { // We are going to adjust the tracked range by effectively shifting it to the right // (in the integer shift sense). // // To counter the left shift of the value multipliers, we need to right shift the internal // representation such that the newly shifted integer values will continue to return the // same double values. // Initially, new range is the same as current range, to make sure we correctly recover // from a shift failure if one happens: double newLowestValueInAutoRange = currentLowestValueInAutoRange; double newHighestValueLimitInAutoRange = currentHighestValueLimitInAutoRange; try { double shiftMultiplier = 1.0 * (1L << numberOfBinaryOrdersOfMagnitude); double newIntegerToDoubleValueConversionRatio = getIntegerToDoubleValueConversionRatio() * shiftMultiplier; // First, temporarily change the lowest value in auto-range without changing conversion ratios. // This is done to force new values lower than the new expected lowest value to attempt an // adjustment (which is synchronized and will wait behind this one). This ensures that we will // not end up with any concurrently recorded values that would need to be discarded if the shift // fails. If this shift succeeds, the pending adjustment attempt will end up doing nothing. currentLowestValueInAutoRange *= shiftMultiplier; // First shift the values, to give the shift a chance to fail: // Shift integer histogram right, decreasing the recorded integer values for current recordings // by a factor of (1 << numberOfBinaryOrdersOfMagnitude): // (no need to shift any values if all recorded values are at the 0 value level:) if (getTotalCount() > integerValuesHistogram.getCountAtIndex(0)) { // Apply the shift: try { integerValuesHistogram.shiftValuesRight(numberOfBinaryOrdersOfMagnitude, newIntegerToDoubleValueConversionRatio); // Shift was successful. Adjust new range to reflect: newLowestValueInAutoRange *= shiftMultiplier; newHighestValueLimitInAutoRange *= shiftMultiplier; } catch (ArrayIndexOutOfBoundsException ex) { // Failed to shift, try to expand size instead: handleShiftValuesException(numberOfBinaryOrdersOfMagnitude, ex); // Successfully expanded histogram range by numberOfBinaryOrdersOfMagnitude, but not // by shifting (shifting failed because there was not room to shift right into). Instead, // we grew the max value without changing the value mapping. Since we were trying to // shift values right to begin with to make room for a larger value than we had had // been able to fit before, no shift is needed, as the value should now fit. So rather // than shifting and adjusting both lowest and highest limits, we'll end up just // expanding newHighestValueLimitInAutoRange to indicate the newly expanded range. // We therefore reverse-scale the newLowestValueInAutoRange before letting the later // code scale both up: newLowestValueInAutoRange /= shiftMultiplier; } } // Shift (or resize) was successful. Adjust new range to reflect: newLowestValueInAutoRange *= shiftMultiplier; newHighestValueLimitInAutoRange *= shiftMultiplier; } finally { // Set the new range to either the successfully changed one, or the original one: setTrackableValueRange(newLowestValueInAutoRange, newHighestValueLimitInAutoRange); } } private void handleShiftValuesException(final int numberOfBinaryOrdersOfMagnitude, Exception ex) { if (!autoResize) { throw new ArrayIndexOutOfBoundsException("Value outside of histogram covered range.\nCaused by: " + ex); } long highestTrackableValue = integerValuesHistogram.getHighestTrackableValue(); int currentContainingOrderOfMagnitude = findContainingBinaryOrderOfMagnitude(highestTrackableValue); int newContainingOrderOfMagnitude = numberOfBinaryOrdersOfMagnitude + currentContainingOrderOfMagnitude; if (newContainingOrderOfMagnitude > 63) { throw new ArrayIndexOutOfBoundsException( "Cannot resize histogram covered range beyond (1L << 63) / (1L << " + (integerValuesHistogram.subBucketHalfCountMagnitude) + ") - 1.\n" + "Caused by: " + ex); } long newHighestTrackableValue = (1L << newContainingOrderOfMagnitude) - 1; integerValuesHistogram.resize(newHighestTrackableValue); integerValuesHistogram.highestTrackableValue = newHighestTrackableValue; configuredHighestToLowestValueRatio <<= numberOfBinaryOrdersOfMagnitude; } // // // // Clearing support: // // // /** * Reset the contents and stats of this histogram */ @Override public void reset() { integerValuesHistogram.reset(); double initialLowestValueInAutoRange = Math.pow(2.0, 800); init(configuredHighestToLowestValueRatio, initialLowestValueInAutoRange, integerValuesHistogram); } // // // // Copy support: // // // /** * Create a copy of this histogram, complete with data and everything. * * @return A distinct copy of this histogram. */ public DoubleHistogram copy() { final DoubleHistogram targetHistogram = new DoubleHistogram(configuredHighestToLowestValueRatio, getNumberOfSignificantValueDigits()); targetHistogram.setTrackableValueRange(currentLowestValueInAutoRange, currentHighestValueLimitInAutoRange); integerValuesHistogram.copyInto(targetHistogram.integerValuesHistogram); return targetHistogram; } /** * Get a copy of this histogram, corrected for coordinated omission. * <p> * To compensate for the loss of sampled values when a recorded value is larger than the expected * interval between value samples, the new histogram will include an auto-generated additional series of * decreasingly-smaller (down to the expectedIntervalBetweenValueSamples) value records for each count found * in the current histogram that is larger than the expectedIntervalBetweenValueSamples. * * Note: This is a post-correction method, as opposed to the at-recording correction method provided * by {@link #recordValueWithExpectedInterval(double, double) recordValueWithExpectedInterval}. The two * methods are mutually exclusive, and only one of the two should be be used on a given data set to correct * for the same coordinated omission issue. * by * <p> * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is * important. * * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples * @return a copy of this histogram, corrected for coordinated omission. */ public DoubleHistogram copyCorrectedForCoordinatedOmission(final double expectedIntervalBetweenValueSamples) { final DoubleHistogram targetHistogram = new DoubleHistogram(configuredHighestToLowestValueRatio, getNumberOfSignificantValueDigits()); targetHistogram.setTrackableValueRange(currentLowestValueInAutoRange, currentHighestValueLimitInAutoRange); targetHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); return targetHistogram; } /** * Copy this histogram into the target histogram, overwriting it's contents. * * @param targetHistogram the histogram to copy into */ public void copyInto(final DoubleHistogram targetHistogram) { targetHistogram.reset(); targetHistogram.add(this); targetHistogram.setStartTimeStamp(integerValuesHistogram.startTimeStampMsec); targetHistogram.setEndTimeStamp(integerValuesHistogram.endTimeStampMsec); } /** * Copy this histogram, corrected for coordinated omission, into the target histogram, overwriting it's contents. * (see {@link #copyCorrectedForCoordinatedOmission} for more detailed explanation about how correction is applied) * * @param targetHistogram the histogram to copy into * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples */ public void copyIntoCorrectedForCoordinatedOmission(final DoubleHistogram targetHistogram, final double expectedIntervalBetweenValueSamples) { targetHistogram.reset(); targetHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); targetHistogram.setStartTimeStamp(integerValuesHistogram.startTimeStampMsec); targetHistogram.setEndTimeStamp(integerValuesHistogram.endTimeStampMsec); } // // // // Add support: // // // /** * Add the contents of another histogram to this one. * * @param fromHistogram The other histogram. * @throws ArrayIndexOutOfBoundsException (may throw) if values in fromHistogram's cannot be * covered by this histogram's range */ public void add(final DoubleHistogram fromHistogram) throws ArrayIndexOutOfBoundsException { int arrayLength = fromHistogram.integerValuesHistogram.countsArrayLength; AbstractHistogram fromIntegerHistogram = fromHistogram.integerValuesHistogram; for (int i = 0; i < arrayLength; i++) { long count = fromIntegerHistogram.getCountAtIndex(i); if (count > 0) { recordValueWithCount( fromIntegerHistogram.valueFromIndex(i) * fromHistogram.getIntegerToDoubleValueConversionRatio(), count); } } } /** * Add the contents of another histogram to this one, while correcting the incoming data for coordinated omission. * <p> * To compensate for the loss of sampled values when a recorded value is larger than the expected * interval between value samples, the values added will include an auto-generated additional series of * decreasingly-smaller (down to the expectedIntervalBetweenValueSamples) value records for each count found * in the current histogram that is larger than the expectedIntervalBetweenValueSamples. * * Note: This is a post-recording correction method, as opposed to the at-recording correction method provided * by {@link #recordValueWithExpectedInterval(double, double) recordValueWithExpectedInterval}. The two * methods are mutually exclusive, and only one of the two should be be used on a given data set to correct * for the same coordinated omission issue. * by * <p> * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is * important. * * @param fromHistogram Other histogram. highestToLowestValueRatio and numberOfSignificantValueDigits must match. * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples * @throws ArrayIndexOutOfBoundsException (may throw) if values exceed highestTrackableValue */ public void addWhileCorrectingForCoordinatedOmission(final DoubleHistogram fromHistogram, final double expectedIntervalBetweenValueSamples) { final DoubleHistogram toHistogram = this; for (HistogramIterationValue v : fromHistogram.integerValuesHistogram.recordedValues()) { toHistogram.recordValueWithCountAndExpectedInterval( v.getValueIteratedTo() * getIntegerToDoubleValueConversionRatio(), v.getCountAtValueIteratedTo(), expectedIntervalBetweenValueSamples); } } /** * Subtract the contents of another histogram from this one. * * @param otherHistogram The other histogram. * @throws ArrayIndexOutOfBoundsException (may throw) if values in fromHistogram's cannot be * covered by this histogram's range */ public void subtract(final DoubleHistogram otherHistogram) { int arrayLength = otherHistogram.integerValuesHistogram.countsArrayLength; AbstractHistogram otherIntegerHistogram = otherHistogram.integerValuesHistogram; for (int i = 0; i < arrayLength; i++) { long otherCount = otherIntegerHistogram.getCountAtIndex(i); if (otherCount > 0) { double otherValue = otherIntegerHistogram.valueFromIndex(i) * otherHistogram.getIntegerToDoubleValueConversionRatio(); if (getCountAtValue(otherValue) < otherCount) { throw new IllegalArgumentException("otherHistogram count (" + otherCount + ") at value " + otherValue + " is larger than this one's (" + getCountAtValue(otherValue) + ")"); } recordValueWithCount(otherValue, -otherCount); } } } // // // // Comparison support: // // // /** * Determine if this histogram is equivalent to another. * * @param other the other histogram to compare to * @return True if this histogram are equivalent with the other. */ public boolean equals(final Object other){ if ( this == other ) { return true; } if ( !(other instanceof DoubleHistogram) ) { return false; } DoubleHistogram that = (DoubleHistogram) other; return integerValuesHistogram.equals(that.integerValuesHistogram); } @Override public int hashCode() { return integerValuesHistogram.hashCode(); } // // // // Histogram structure querying support: // // // /** * Get the total count of all recorded values in the histogram * @return the total count of all recorded values in the histogram */ public long getTotalCount() { return integerValuesHistogram.getTotalCount(); } /** * get the current lowest (non zero) trackable value the automatically determined range * (keep in mind that this can change because it is auto ranging) * @return current lowest trackable value the automatically determined range */ double getCurrentLowestTrackableNonZeroValue() { return currentLowestValueInAutoRange; } /** * get the current highest trackable value in the automatically determined range * (keep in mind that this can change because it is auto ranging) * @return current highest trackable value in the automatically determined range */ double getCurrentHighestTrackableValue() { return currentHighestValueLimitInAutoRange; } /** * Get the current conversion ratio from interval integer value representation to double units. * (keep in mind that this can change because it is auto ranging). This ratio can be useful * for converting integer values found in iteration, although the preferred form for accessing * iteration values would be to use the * {@link org.HdrHistogram.HistogramIterationValue#getDoubleValueIteratedTo() getDoubleValueIteratedTo()} * and * {@link org.HdrHistogram.HistogramIterationValue#getDoubleValueIteratedFrom() getDoubleValueIteratedFrom()} * accessors to {@link org.HdrHistogram.HistogramIterationValue} iterated values. * * @return the current conversion ratio from interval integer value representation to double units. */ public double getIntegerToDoubleValueConversionRatio() { return integerValuesHistogram.integerToDoubleValueConversionRatio; } /** * get the configured numberOfSignificantValueDigits * @return numberOfSignificantValueDigits */ public int getNumberOfSignificantValueDigits() { return integerValuesHistogram.numberOfSignificantValueDigits; } /** * get the Dynamic range of the histogram: the configured ratio between the highest trackable value and the * lowest trackable non zero value at any given time. * @return the dynamic range of the histogram, expressed as the ratio between the highest trackable value * and the lowest trackable non zero value at any given time. */ public long getHighestToLowestValueRatio() { return configuredHighestToLowestValueRatio; } /** * Get the size (in value units) of the range of values that are equivalent to the given value within the * histogram's resolution. Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value The given value * @return The lowest value that is equivalent to the given value within the histogram's resolution. */ public double sizeOfEquivalentValueRange(final double value) { return integerValuesHistogram.sizeOfEquivalentValueRange((long)(value * getDoubleToIntegerValueConversionRatio())) * getIntegerToDoubleValueConversionRatio(); } /** * Get the lowest value that is equivalent to the given value within the histogram's resolution. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value The given value * @return The lowest value that is equivalent to the given value within the histogram's resolution. */ public double lowestEquivalentValue(final double value) { return integerValuesHistogram.lowestEquivalentValue((long)(value * getDoubleToIntegerValueConversionRatio())) * getIntegerToDoubleValueConversionRatio(); } /** * Get the highest value that is equivalent to the given value within the histogram's resolution. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value The given value * @return The highest value that is equivalent to the given value within the histogram's resolution. */ public double highestEquivalentValue(final double value) { double nextNonEquivalentValue = nextNonEquivalentValue(value); // Theoretically, nextNonEquivalentValue - ulp(nextNonEquivalentValue) == nextNonEquivalentValue // is possible (if the ulp size switches right at nextNonEquivalentValue), so drop by 2 ulps and // increment back up to closest within-ulp value. double highestEquivalentValue = nextNonEquivalentValue - (2 * Math.ulp(nextNonEquivalentValue)); while (highestEquivalentValue + Math.ulp(highestEquivalentValue) < nextNonEquivalentValue) { highestEquivalentValue += Math.ulp(highestEquivalentValue); } return highestEquivalentValue; } /** * Get a value that lies in the middle (rounded up) of the range of values equivalent the given value. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value The given value * @return The value lies in the middle (rounded up) of the range of values equivalent the given value. */ public double medianEquivalentValue(final double value) { return integerValuesHistogram.medianEquivalentValue((long)(value * getDoubleToIntegerValueConversionRatio())) * getIntegerToDoubleValueConversionRatio(); } /** * Get the next value that is not equivalent to the given value within the histogram's resolution. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value The given value * @return The next value that is not equivalent to the given value within the histogram's resolution. */ public double nextNonEquivalentValue(final double value) { return integerValuesHistogram.nextNonEquivalentValue((long)(value * getDoubleToIntegerValueConversionRatio())) * getIntegerToDoubleValueConversionRatio(); } /** * Determine if two values are equivalent with the histogram's resolution. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value1 first value to compare * @param value2 second value to compare * @return True if values are equivalent to within the histogram's resolution. */ public boolean valuesAreEquivalent(final double value1, final double value2) { return (lowestEquivalentValue(value1) == lowestEquivalentValue(value2)); } /** * Provide a (conservatively high) estimate of the Histogram's total footprint in bytes * * @return a (conservatively high) estimate of the Histogram's total footprint in bytes */ public int getEstimatedFootprintInBytes() { return integerValuesHistogram._getEstimatedFootprintInBytes(); } // // // // Timestamp and tag support: // // // /** * get the start time stamp [optionally] stored with this histogram * @return the start time stamp [optionally] stored with this histogram */ public long getStartTimeStamp() { return integerValuesHistogram.getStartTimeStamp(); } /** * Set the start time stamp value associated with this histogram to a given value. * @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch. */ public void setStartTimeStamp(final long timeStampMsec) { integerValuesHistogram.setStartTimeStamp(timeStampMsec); } /** * get the end time stamp [optionally] stored with this histogram * @return the end time stamp [optionally] stored with this histogram */ public long getEndTimeStamp() { return integerValuesHistogram.getEndTimeStamp(); } /** * Set the end time stamp value associated with this histogram to a given value. * @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch. */ public void setEndTimeStamp(final long timeStampMsec) { integerValuesHistogram.setEndTimeStamp(timeStampMsec); } /** * get the tag string [optionally] associated with this histogram * @return tag string [optionally] associated with this histogram */ public String getTag() { return integerValuesHistogram.getTag(); } /** * Set the tag string associated with this histogram * @param tag the tag string to associate with this histogram */ public void setTag(String tag) { integerValuesHistogram.setTag(tag); } // // // // Histogram Data access support: // // // /** * Get the lowest recorded value level in the histogram * * @return the Min value recorded in the histogram */ public double getMinValue() { return integerValuesHistogram.getMinValue() * getIntegerToDoubleValueConversionRatio(); } /** * Get the highest recorded value level in the histogram * * @return the Max value recorded in the histogram */ public double getMaxValue() { return highestEquivalentValue(integerValuesHistogram.getMaxValue() * getIntegerToDoubleValueConversionRatio()); } /** * Get the lowest recorded non-zero value level in the histogram * * @return the lowest recorded non-zero value level in the histogram */ public double getMinNonZeroValue() { return integerValuesHistogram.getMinNonZeroValue() * getIntegerToDoubleValueConversionRatio(); } /** * Get the highest recorded value level in the histogram as a double * * @return the highest recorded value level in the histogram as a double */ @Override public double getMaxValueAsDouble() { return getMaxValue(); } /** * Get the computed mean value of all recorded values in the histogram * * @return the mean value (in value units) of the histogram data */ public double getMean() { return integerValuesHistogram.getMean() * getIntegerToDoubleValueConversionRatio(); } /** * Get the computed standard deviation of all recorded values in the histogram * * @return the standard deviation (in value units) of the histogram data */ public double getStdDeviation() { return integerValuesHistogram.getStdDeviation() * getIntegerToDoubleValueConversionRatio(); } /** * Get the value at a given percentile. * When the percentile is &gt; 0.0, the value returned is the value that the given the given * percentage of the overall recorded value entries in the histogram are either smaller than * or equivalent to. When the percentile is 0.0, the value returned is the value that all value * entries in the histogram are either larger than or equivalent to. * <p> * Note that two values are "equivalent" in this statement if * {@link org.HdrHistogram.DoubleHistogram#valuesAreEquivalent} would return true. * * @param percentile The percentile for which to return the associated value * @return The value that the given percentage of the overall recorded value entries in the * histogram are either smaller than or equivalent to. When the percentile is 0.0, returns the * value that all value entries in the histogram are either larger than or equivalent to. */ public double getValueAtPercentile(final double percentile) { return integerValuesHistogram.getValueAtPercentile(percentile) * getIntegerToDoubleValueConversionRatio(); } /** * Get the percentile at a given value. * The percentile returned is the percentile of values recorded in the histogram that are smaller * than or equivalent to the given value. * <p> * Note that two values are "equivalent" in this statement if * {@link org.HdrHistogram.DoubleHistogram#valuesAreEquivalent} would return true. * * @param value The value for which to return the associated percentile * @return The percentile of values recorded in the histogram that are smaller than or equivalent * to the given value. */ public double getPercentileAtOrBelowValue(final double value) { return integerValuesHistogram.getPercentileAtOrBelowValue((long)(value * getDoubleToIntegerValueConversionRatio())); } /** * Get the count of recorded values within a range of value levels (inclusive to within the histogram's resolution). * * @param lowValue The lower value bound on the range for which * to provide the recorded count. Will be rounded down with * {@link DoubleHistogram#lowestEquivalentValue lowestEquivalentValue}. * @param highValue The higher value bound on the range for which to provide the recorded count. * Will be rounded up with {@link DoubleHistogram#highestEquivalentValue highestEquivalentValue}. * @return the total count of values recorded in the histogram within the value range that is * {@literal >=} lowestEquivalentValue(<i>lowValue</i>) and {@literal <=} highestEquivalentValue(<i>highValue</i>) */ public double getCountBetweenValues(final double lowValue, final double highValue) throws ArrayIndexOutOfBoundsException { return integerValuesHistogram.getCountBetweenValues( (long)(lowValue * getDoubleToIntegerValueConversionRatio()), (long)(highValue * getDoubleToIntegerValueConversionRatio()) ); } /** * Get the count of recorded values at a specific value (to within the histogram resolution at the value level). * * @param value The value for which to provide the recorded count * @return The total count of values recorded in the histogram within the value range that is * {@literal >=} lowestEquivalentValue(<i>value</i>) and {@literal <=} highestEquivalentValue(<i>value</i>) */ public long getCountAtValue(final double value) throws ArrayIndexOutOfBoundsException { return integerValuesHistogram.getCountAtValue((long)(value * getDoubleToIntegerValueConversionRatio())); } /** * Provide a means of iterating through histogram values according to percentile levels. The iteration is * performed in steps that start at 0% and reduce their distance to 100% according to the * <i>percentileTicksPerHalfDistance</i> parameter, ultimately reaching 100% when all recorded histogram * values are exhausted. * <p> * @param percentileTicksPerHalfDistance The number of iteration steps per half-distance to 100%. * @return An {@link java.lang.Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} * through the histogram using a * {@link DoublePercentileIterator} */ public Percentiles percentiles(final int percentileTicksPerHalfDistance) { return new Percentiles(this, percentileTicksPerHalfDistance); } /** * Provide a means of iterating through histogram values using linear steps. The iteration is * performed in steps of <i>valueUnitsPerBucket</i> in size, terminating when all recorded histogram * values are exhausted. * * @param valueUnitsPerBucket The size (in value units) of the linear buckets to use * @return An {@link java.lang.Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} * through the histogram using a * {@link DoubleLinearIterator} */ public LinearBucketValues linearBucketValues(final double valueUnitsPerBucket) { return new LinearBucketValues(this, valueUnitsPerBucket); } /** * Provide a means of iterating through histogram values at logarithmically increasing levels. The iteration is * performed in steps that start at <i>valueUnitsInFirstBucket</i> and increase exponentially according to * <i>logBase</i>, terminating when all recorded histogram values are exhausted. * * @param valueUnitsInFirstBucket The size (in value units) of the first bucket in the iteration * @param logBase The multiplier by which bucket sizes will grow in each iteration step * @return An {@link java.lang.Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} * through the histogram using * a {@link DoubleLogarithmicIterator} */ public LogarithmicBucketValues logarithmicBucketValues(final double valueUnitsInFirstBucket, final double logBase) { return new LogarithmicBucketValues(this, valueUnitsInFirstBucket, logBase); } /** * Provide a means of iterating through all recorded histogram values using the finest granularity steps * supported by the underlying representation. The iteration steps through all non-zero recorded value counts, * and terminates when all recorded histogram values are exhausted. * * @return An {@link java.lang.Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} * through the histogram using * a {@link DoubleRecordedValuesIterator} */ public RecordedValues recordedValues() { return new RecordedValues(this); } /** * Provide a means of iterating through all histogram values using the finest granularity steps supported by * the underlying representation. The iteration steps through all possible unit value levels, regardless of * whether or not there were recorded values for that value level, and terminates when all recorded histogram * values are exhausted. * * @return An {@link java.lang.Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} * through the histogram using a {@link DoubleAllValuesIterator} */ public AllValues allValues() { return new AllValues(this); } // Percentile iterator support: /** * An {@link java.lang.Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} through * the histogram using a {@link DoublePercentileIterator} */ public class Percentiles implements Iterable<DoubleHistogramIterationValue> { final DoubleHistogram histogram; final int percentileTicksPerHalfDistance; private Percentiles(final DoubleHistogram histogram, final int percentileTicksPerHalfDistance) { this.histogram = histogram; this.percentileTicksPerHalfDistance = percentileTicksPerHalfDistance; } /** * @return A {@link DoublePercentileIterator}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} */ public Iterator<DoubleHistogramIterationValue> iterator() { return new DoublePercentileIterator(histogram, percentileTicksPerHalfDistance); } } // Linear iterator support: /** * An {@link java.lang.Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} through * the histogram using a {@link DoubleLinearIterator} */ public class LinearBucketValues implements Iterable<DoubleHistogramIterationValue> { final DoubleHistogram histogram; final double valueUnitsPerBucket; private LinearBucketValues(final DoubleHistogram histogram, final double valueUnitsPerBucket) { this.histogram = histogram; this.valueUnitsPerBucket = valueUnitsPerBucket; } /** * @return A {@link DoubleLinearIterator}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} */ public Iterator<DoubleHistogramIterationValue> iterator() { return new DoubleLinearIterator(histogram, valueUnitsPerBucket); } } // Logarithmic iterator support: /** * An {@link java.lang.Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} through * the histogram using a {@link DoubleLogarithmicIterator} */ public class LogarithmicBucketValues implements Iterable<DoubleHistogramIterationValue> { final DoubleHistogram histogram; final double valueUnitsInFirstBucket; final double logBase; private LogarithmicBucketValues(final DoubleHistogram histogram, final double valueUnitsInFirstBucket, final double logBase) { this.histogram = histogram; this.valueUnitsInFirstBucket = valueUnitsInFirstBucket; this.logBase = logBase; } /** * @return A {@link DoubleLogarithmicIterator}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} */ public Iterator<DoubleHistogramIterationValue> iterator() { return new DoubleLogarithmicIterator(histogram, valueUnitsInFirstBucket, logBase); } } // Recorded value iterator support: /** * An {@link java.lang.Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} through * the histogram using a {@link DoubleRecordedValuesIterator} */ public class RecordedValues implements Iterable<DoubleHistogramIterationValue> { final DoubleHistogram histogram; private RecordedValues(final DoubleHistogram histogram) { this.histogram = histogram; } /** * @return A {@link DoubleRecordedValuesIterator}{@literal <}{@link HistogramIterationValue}{@literal >} */ public Iterator<DoubleHistogramIterationValue> iterator() { return new DoubleRecordedValuesIterator(histogram); } } // AllValues iterator support: /** * An {@link java.lang.Iterable}{@literal <}{@link DoubleHistogramIterationValue}{@literal >} through * the histogram using a {@link DoubleAllValuesIterator} */ public class AllValues implements Iterable<DoubleHistogramIterationValue> { final DoubleHistogram histogram; private AllValues(final DoubleHistogram histogram) { this.histogram = histogram; } /** * @return A {@link DoubleAllValuesIterator}{@literal <}{@link HistogramIterationValue}{@literal >} */ public Iterator<DoubleHistogramIterationValue> iterator() { return new DoubleAllValuesIterator(histogram); } } /** * Produce textual representation of the value distribution of histogram data by percentile. The distribution is * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing * five (5) percentile reporting tick points. * * @param printStream Stream into which the distribution will be output * <p> * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in * output */ public void outputPercentileDistribution(final PrintStream printStream, final Double outputValueUnitScalingRatio) { outputPercentileDistribution(printStream, 5, outputValueUnitScalingRatio); } // // // // Textual percentile output support: // // // /** * Produce textual representation of the value distribution of histogram data by percentile. The distribution is * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing * <i>dumpTicksPerHalf</i> percentile reporting tick points. * * @param printStream Stream into which the distribution will be output * <p> * @param percentileTicksPerHalfDistance The number of reporting points per exponentially decreasing half-distance * <p> * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in * output */ public void outputPercentileDistribution(final PrintStream printStream, final int percentileTicksPerHalfDistance, final Double outputValueUnitScalingRatio) { outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio, false); } /** * Produce textual representation of the value distribution of histogram data by percentile. The distribution is * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing * <i>dumpTicksPerHalf</i> percentile reporting tick points. * * @param printStream Stream into which the distribution will be output * <p> * @param percentileTicksPerHalfDistance The number of reporting points per exponentially decreasing half-distance * <p> * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in * output * @param useCsvFormat Output in CSV format if true. Otherwise use plain text form. */ public void outputPercentileDistribution(final PrintStream printStream, final int percentileTicksPerHalfDistance, final Double outputValueUnitScalingRatio, final boolean useCsvFormat) { integerValuesHistogram.outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio / getIntegerToDoubleValueConversionRatio(), useCsvFormat); } // // // // Serialization support: // // // private static final long serialVersionUID = 42L; private void writeObject(final ObjectOutputStream o) throws IOException { o.writeLong(configuredHighestToLowestValueRatio); o.writeDouble(currentLowestValueInAutoRange); o.writeObject(integerValuesHistogram); } private void readObject(final ObjectInputStream o) throws IOException, ClassNotFoundException { final long configuredHighestToLowestValueRatio = o.readLong(); final double lowestValueInAutoRange = o.readDouble(); AbstractHistogram integerValuesHistogram = (AbstractHistogram) o.readObject(); init(configuredHighestToLowestValueRatio, lowestValueInAutoRange, integerValuesHistogram); } // // // // Encoding/Decoding support: // // // /** * Get the capacity needed to encode this histogram into a ByteBuffer * @return the capacity needed to encode this histogram into a ByteBuffer */ @Override public int getNeededByteBufferCapacity() { return integerValuesHistogram.getNeededByteBufferCapacity(); } private int getNeededByteBufferCapacity(final int relevantLength) { return integerValuesHistogram.getNeededByteBufferCapacity(relevantLength); } private static final int DHIST_encodingCookie = 0x0c72124e; private static final int DHIST_compressedEncodingCookie = 0x0c72124f; static boolean isDoubleHistogramCookie(int cookie) { return isCompressedDoubleHistogramCookie(cookie) || isNonCompressedDoubleHistogramCookie(cookie); } static boolean isCompressedDoubleHistogramCookie(int cookie) { return (cookie == DHIST_compressedEncodingCookie); } static boolean isNonCompressedDoubleHistogramCookie(int cookie) { return (cookie == DHIST_encodingCookie); } /** * Encode this histogram into a ByteBuffer * @param buffer The buffer to encode into * @return The number of bytes written to the buffer */ synchronized public int encodeIntoByteBuffer(final ByteBuffer buffer) { long maxValue = integerValuesHistogram.getMaxValue(); int relevantLength = integerValuesHistogram.getLengthForNumberOfBuckets( integerValuesHistogram.getBucketsNeededToCoverValue(maxValue)); if (buffer.capacity() < getNeededByteBufferCapacity(relevantLength)) { throw new ArrayIndexOutOfBoundsException("buffer does not have capacity for " + getNeededByteBufferCapacity(relevantLength) + " bytes"); } buffer.putInt(DHIST_encodingCookie); buffer.putInt(getNumberOfSignificantValueDigits()); buffer.putLong(configuredHighestToLowestValueRatio); return integerValuesHistogram.encodeIntoByteBuffer(buffer) + 16; } /** * Encode this histogram in compressed form into a byte array * @param targetBuffer The buffer to encode into * @param compressionLevel Compression level (for java.util.zip.Deflater). * @return The number of bytes written to the buffer */ @Override synchronized public int encodeIntoCompressedByteBuffer( final ByteBuffer targetBuffer, final int compressionLevel) { targetBuffer.putInt(DHIST_compressedEncodingCookie); targetBuffer.putInt(getNumberOfSignificantValueDigits()); targetBuffer.putLong(configuredHighestToLowestValueRatio); return integerValuesHistogram.encodeIntoCompressedByteBuffer(targetBuffer, compressionLevel) + 16; } /** * Encode this histogram in compressed form into a byte array * @param targetBuffer The buffer to encode into * @return The number of bytes written to the array */ public int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer) { return encodeIntoCompressedByteBuffer(targetBuffer, Deflater.DEFAULT_COMPRESSION); } private static final Class[] constructorArgTypes = {long.class, int.class, Class.class, AbstractHistogram.class}; static <T extends DoubleHistogram> T constructHistogramFromBuffer( int cookie, final ByteBuffer buffer, final Class<T> doubleHistogramClass, final Class<? extends AbstractHistogram> histogramClass, final long minBarForHighestToLowestValueRatio) throws DataFormatException { int numberOfSignificantValueDigits = buffer.getInt(); long configuredHighestToLowestValueRatio = buffer.getLong(); final AbstractHistogram valuesHistogram; if (isNonCompressedDoubleHistogramCookie(cookie)) { valuesHistogram = AbstractHistogram.decodeFromByteBuffer(buffer, histogramClass, minBarForHighestToLowestValueRatio); } else if (isCompressedDoubleHistogramCookie(cookie)) { valuesHistogram = AbstractHistogram.decodeFromCompressedByteBuffer(buffer, histogramClass, minBarForHighestToLowestValueRatio); } else { throw new IllegalArgumentException("The buffer does not contain a DoubleHistogram"); } try { Constructor<T> doubleHistogramConstructor = doubleHistogramClass.getDeclaredConstructor(constructorArgTypes); T histogram = doubleHistogramConstructor.newInstance( configuredHighestToLowestValueRatio, numberOfSignificantValueDigits, histogramClass, valuesHistogram ); histogram.setAutoResize(true); return histogram; } catch (NoSuchMethodException | InstantiationException | IllegalAccessException | InvocationTargetException ex) { throw new IllegalStateException("Unable to construct DoubleHistogram of type " + doubleHistogramClass); } } /** * Construct a new DoubleHistogram by decoding it from a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high * @return The newly constructed DoubleHistogram */ public static DoubleHistogram decodeFromByteBuffer( final ByteBuffer buffer, final long minBarForHighestToLowestValueRatio) { return decodeFromByteBuffer(buffer, Histogram.class, minBarForHighestToLowestValueRatio); } /** * Construct a new DoubleHistogram by decoding it from a ByteBuffer, using a * specified AbstractHistogram subclass for tracking internal counts (e.g. {@link org.HdrHistogram.Histogram}, * {@link org.HdrHistogram.ConcurrentHistogram}, {@link org.HdrHistogram.SynchronizedHistogram}, * {@link org.HdrHistogram.IntCountsHistogram}, {@link org.HdrHistogram.ShortCountsHistogram}). * * @param buffer The buffer to decode from * @param internalCountsHistogramClass The class to use for internal counts tracking * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high * @return The newly constructed DoubleHistogram */ public static DoubleHistogram decodeFromByteBuffer( final ByteBuffer buffer, final Class<? extends AbstractHistogram> internalCountsHistogramClass, long minBarForHighestToLowestValueRatio) { try { int cookie = buffer.getInt(); if (!isNonCompressedDoubleHistogramCookie(cookie)) { throw new IllegalArgumentException("The buffer does not contain a DoubleHistogram"); } DoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, DoubleHistogram.class, internalCountsHistogramClass, minBarForHighestToLowestValueRatio); return histogram; } catch (DataFormatException ex) { throw new RuntimeException(ex); } } /** * Construct a new DoubleHistogram by decoding it from a compressed form in a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high * @return The newly constructed DoubleHistogram * @throws DataFormatException on error parsing/decompressing the buffer */ public static DoubleHistogram decodeFromCompressedByteBuffer( final ByteBuffer buffer, final long minBarForHighestToLowestValueRatio) throws DataFormatException { return decodeFromCompressedByteBuffer(buffer, Histogram.class, minBarForHighestToLowestValueRatio); } /** * Construct a new DoubleHistogram by decoding it from a compressed form in a ByteBuffer, using a * specified AbstractHistogram subclass for tracking internal counts (e.g. {@link org.HdrHistogram.Histogram}, * {@link org.HdrHistogram.AtomicHistogram}, {@link org.HdrHistogram.SynchronizedHistogram}, * {@link org.HdrHistogram.IntCountsHistogram}, {@link org.HdrHistogram.ShortCountsHistogram}). * * @param buffer The buffer to decode from * @param internalCountsHistogramClass The class to use for internal counts tracking * @param minBarForHighestToLowestValueRatio Force highestTrackableValue to be set at least this high * @return The newly constructed DoubleHistogram * @throws DataFormatException on error parsing/decompressing the buffer */ public static DoubleHistogram decodeFromCompressedByteBuffer( final ByteBuffer buffer, Class<? extends AbstractHistogram> internalCountsHistogramClass, long minBarForHighestToLowestValueRatio) throws DataFormatException { int cookie = buffer.getInt(); if (!isCompressedDoubleHistogramCookie(cookie)) { throw new IllegalArgumentException("The buffer does not contain a compressed DoubleHistogram"); } DoubleHistogram histogram = constructHistogramFromBuffer(cookie, buffer, DoubleHistogram.class, internalCountsHistogramClass, minBarForHighestToLowestValueRatio); return histogram; } /** * Construct a new DoubleHistogram by decoding it from a String containing a base64 encoded * compressed histogram representation. * * @param base64CompressedHistogramString A string containing a base64 encoding of a compressed histogram * @return A DoubleHistogram decoded from the string * @throws DataFormatException on error parsing/decompressing the input */ public static DoubleHistogram fromString(final String base64CompressedHistogramString) throws DataFormatException { return decodeFromCompressedByteBuffer( ByteBuffer.wrap(Base64Helper.parseBase64Binary(base64CompressedHistogramString)), 0); } // // // // Internal helper methods: // // // private long deriveInternalHighestToLowestValueRatio(final long externalHighestToLowestValueRatio) { // Internal dynamic range needs to be 1 order of magnitude larger than the containing order of magnitude. // e.g. the dynamic range that covers [0.9, 2.1) is 2.33x, which on it's own would require 4x range to // cover the contained order of magnitude. But (if 1.0 was a bucket boundary, for example, the range // will actually need to cover [0.5..1.0) [1.0..2.0) [2.0..4.0), mapping to an 8x internal dynamic range. long internalHighestToLowestValueRatio = 1L << (findContainingBinaryOrderOfMagnitude(externalHighestToLowestValueRatio) + 1); return internalHighestToLowestValueRatio; } private long deriveIntegerValueRange(final long externalHighestToLowestValueRatio, final int numberOfSignificantValueDigits) { long internalHighestToLowestValueRatio = deriveInternalHighestToLowestValueRatio(externalHighestToLowestValueRatio); // We cannot use the bottom half of bucket 0 in an integer values histogram to represent double // values, because the required precision does not exist there. We therefore need the integer // range to be bigger, such that the entire double value range can fit in the upper halves of // all buckets. Compute the integer value range that will achieve this: long lowestTackingIntegerValue = AbstractHistogram.numberOfSubBuckets(numberOfSignificantValueDigits) / 2; long integerValueRange = lowestTackingIntegerValue * internalHighestToLowestValueRatio; return integerValueRange; } private long getLowestTrackingIntegerValue() { return integerValuesHistogram.subBucketHalfCount; } private static int findContainingBinaryOrderOfMagnitude(final long longNumber) { int pow2ceiling = 64 - Long.numberOfLeadingZeros(longNumber); // smallest power of 2 containing value return pow2ceiling; } private static int findContainingBinaryOrderOfMagnitude(final double doubleNumber) { long longNumber = (long) Math.ceil(doubleNumber); return findContainingBinaryOrderOfMagnitude(longNumber); } private int findCappedContainingBinaryOrderOfMagnitude(final double doubleNumber) { if (doubleNumber > configuredHighestToLowestValueRatio) { return (int) (Math.log(configuredHighestToLowestValueRatio)/Math.log(2)); } if (doubleNumber > Math.pow(2.0, 50)) { return 50; } return findContainingBinaryOrderOfMagnitude(doubleNumber); } static { // We don't want to allow the histogram to shift and expand into value ranges that could equate // to infinity (e.g. 1024.0 * (Double.MAX_VALUE / 1024.0) == Infinity). So lets makes sure the // highestAllowedValueEver cap is a couple of binary orders of magnitude away from MAX_VALUE: // Choose a highestAllowedValueEver that is a nice power of 2 multiple of 1.0 : double value = 1.0; while (value < Double.MAX_VALUE / 4.0) { value *= 2; } highestAllowedValueEver = value; } }
83,562
47.526713
129
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/DoubleHistogramIterationValue.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; /** * Represents a value point iterated through in a {@link DoubleHistogram}, with associated stats. * <ul> * <li><b><code>valueIteratedTo</code></b> :<br> The actual value level that was iterated to by the iterator</li> * <li><b><code>prevValueIteratedTo</code></b> :<br> The actual value level that was iterated from by the iterator</li> * <li><b><code>countAtValueIteratedTo</code></b> :<br> The count of recorded values in the histogram that * exactly match this [lowestEquivalentValue(valueIteratedTo)...highestEquivalentValue(valueIteratedTo)] value * range.</li> * <li><b><code>countAddedInThisIterationStep</code></b> :<br> The count of recorded values in the histogram that * were added to the totalCountToThisValue (below) as a result on this iteration step. Since multiple iteration * steps may occur with overlapping equivalent value ranges, the count may be lower than the count found at * the value (e.g. multiple linear steps or percentile levels can occur within a single equivalent value range)</li> * <li><b><code>totalCountToThisValue</code></b> :<br> The total count of all recorded values in the histogram at * values equal or smaller than valueIteratedTo.</li> * <li><b><code>totalValueToThisValue</code></b> :<br> The sum of all recorded values in the histogram at values * equal or smaller than valueIteratedTo.</li> * <li><b><code>percentile</code></b> :<br> The percentile of recorded values in the histogram at values equal * or smaller than valueIteratedTo.</li> * <li><b><code>percentileLevelIteratedTo</code></b> :<br> The percentile level that the iterator returning this * HistogramIterationValue had iterated to. Generally, percentileLevelIteratedTo will be equal to or smaller than * percentile, but the same value point can contain multiple iteration levels for some iterators. E.g. a * PercentileIterator can stop multiple times in the exact same value point (if the count at that value covers a * range of multiple percentiles in the requested percentile iteration points).</li> * </ul> */ public class DoubleHistogramIterationValue { private final HistogramIterationValue integerHistogramIterationValue; void reset() { integerHistogramIterationValue.reset(); } DoubleHistogramIterationValue(HistogramIterationValue integerHistogramIterationValue) { this.integerHistogramIterationValue = integerHistogramIterationValue; } public String toString() { return "valueIteratedTo:" + getValueIteratedTo() + ", prevValueIteratedTo:" + getValueIteratedFrom() + ", countAtValueIteratedTo:" + getCountAtValueIteratedTo() + ", countAddedInThisIterationStep:" + getCountAddedInThisIterationStep() + ", totalCountToThisValue:" + getTotalCountToThisValue() + ", totalValueToThisValue:" + getTotalValueToThisValue() + ", percentile:" + getPercentile() + ", percentileLevelIteratedTo:" + getPercentileLevelIteratedTo(); } public double getValueIteratedTo() { return integerHistogramIterationValue.getValueIteratedTo() * integerHistogramIterationValue.getIntegerToDoubleValueConversionRatio(); } public double getValueIteratedFrom() { return integerHistogramIterationValue.getValueIteratedFrom() * integerHistogramIterationValue.getIntegerToDoubleValueConversionRatio(); } public long getCountAtValueIteratedTo() { return integerHistogramIterationValue.getCountAtValueIteratedTo(); } public long getCountAddedInThisIterationStep() { return integerHistogramIterationValue.getCountAddedInThisIterationStep(); } public long getTotalCountToThisValue() { return integerHistogramIterationValue.getTotalCountToThisValue(); } public double getTotalValueToThisValue() { return integerHistogramIterationValue.getTotalValueToThisValue() * integerHistogramIterationValue.getIntegerToDoubleValueConversionRatio(); } public double getPercentile() { return integerHistogramIterationValue.getPercentile(); } public double getPercentileLevelIteratedTo() { return integerHistogramIterationValue.getPercentileLevelIteratedTo(); } public HistogramIterationValue getIntegerHistogramIterationValue() { return integerHistogramIterationValue; } }
4,639
46.835052
119
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/IntervalHistogramProvider.java
package org.HdrHistogram; public interface IntervalHistogramProvider<T extends EncodableHistogram> { /** * Get a new instance of an interval histogram, which will include a stable, consistent view of all value * counts accumulated since the last interval histogram was taken. * <p> * Calling this will reset the value counts, and start accumulating value counts for the next interval. * * @return a histogram containing the value counts accumulated since the last interval histogram was taken. */ T getIntervalHistogram(); /** * Get an interval histogram, which will include a stable, consistent view of all value counts * accumulated since the last interval histogram was taken. * <p> * {@link #getIntervalHistogram(EncodableHistogram histogramToRecycle) * getIntervalHistogram(histogramToRecycle)} * accepts a previously returned interval histogram that can be recycled internally to avoid allocation * and content copying operations, and is therefore significantly more efficient for repeated use than * {@link #getIntervalHistogram()} and * {@link #getIntervalHistogramInto getIntervalHistogramInto()}. The provided * {@code histogramToRecycle} must * be either be null or an interval histogram returned by a previous call to * {@link #getIntervalHistogram(EncodableHistogram histogramToRecycle) * getIntervalHistogram(histogramToRecycle)} or * {@link #getIntervalHistogram()}. * <p> * NOTE: The caller is responsible for not recycling the same returned interval histogram more than once. If * the same interval histogram instance is recycled more than once, behavior is undefined. * <p> * Calling {@link #getIntervalHistogram(EncodableHistogram histogramToRecycle) * getIntervalHistogram(histogramToRecycle)} will reset the value counts, and start accumulating value * counts for the next interval * * @param histogramToRecycle a previously returned interval histogram that may be recycled to avoid allocation and * copy operations. * @return a histogram containing the value counts accumulated since the last interval histogram was taken. */ T getIntervalHistogram(T histogramToRecycle); /** * Get an interval histogram, which will include a stable, consistent view of all value counts * accumulated since the last interval histogram was taken. * <p> * {@link #getIntervalHistogram(EncodableHistogram histogramToRecycle) * getIntervalHistogram(histogramToRecycle)} * accepts a previously returned interval histogram that can be recycled internally to avoid allocation * and content copying operations, and is therefore significantly more efficient for repeated use than * {@link #getIntervalHistogram()} and * {@link #getIntervalHistogramInto getIntervalHistogramInto()}. The provided * {@code histogramToRecycle} must * be either be null or an interval histogram returned by a previous call to * {@link #getIntervalHistogram(EncodableHistogram histogramToRecycle) * getIntervalHistogram(histogramToRecycle)} or * {@link #getIntervalHistogram()}. * <p> * NOTE: The caller is responsible for not recycling the same returned interval histogram more than once. If * the same interval histogram instance is recycled more than once, behavior is undefined. * <p> * Calling {@link #getIntervalHistogram(EncodableHistogram histogramToRecycle) * getIntervalHistogram(histogramToRecycle)} will reset the value counts, and start accumulating value * counts for the next interval * * @param histogramToRecycle a previously returned interval histogram that may be recycled to avoid allocation and * copy operations. * @param enforceContainingInstance if true, will only allow recycling of histograms previously returned from this * instance of {@link IntervalHistogramProvider}. If false, will allow recycling histograms * previously returned by other instances of {@link IntervalHistogramProvider}. * @return a histogram containing the value counts accumulated since the last interval histogram was taken. */ T getIntervalHistogram(T histogramToRecycle, boolean enforceContainingInstance); /** * Place a copy of the value counts accumulated since accumulated (since the last interval histogram * was taken) into {@code targetHistogram}. * <p> * Calling {@link #getIntervalHistogramInto getIntervalHistogramInto()} will reset * the value counts, and start accumulating value counts for the next interval. * * @param targetHistogram the histogram into which the interval histogram's data should be copied */ void getIntervalHistogramInto(T targetHistogram); }
4,893
55.252874
127
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/PackedHistogram.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import org.HdrHistogram.packedarray.PackedLongArray; import java.io.IOException; import java.io.ObjectInputStream; import java.nio.ByteBuffer; import java.util.zip.DataFormatException; /** * <h3>A High Dynamic Range (HDR) Histogram that uses a packed internal representation</h3> * <p> * {@link PackedHistogram} supports the recording and analyzing sampled data value counts across a configurable * integer value range with configurable value precision within the range. Value precision is expressed as the * number of significant digits in the value recording, and provides control over value quantization behavior * across the value range and the subsequent value resolution at any given level. * <p> * {@link PackedHistogram} tracks value counts in a packed internal representation optimized * for typical histogram recoded values are sparse in the value range and tend to be incremented in small unit counts. * This packed representation tends to require significantly smaller amounts of storage when compared to unpacked * representations, but can incur additional recording cost due to resizing and repacking operations that may * occur as previously unrecorded values are encountered. * <p> * For example, a {@link PackedHistogram} could be configured to track the counts of observed integer values between 0 and * 3,600,000,000,000 while maintaining a value precision of 3 significant digits across that range. Value quantization * within the range will thus be no larger than 1/1,000th (or 0.1%) of any value. This example Histogram could * be used to track and analyze the counts of observed response times ranging between 1 nanosecond and 1 hour * in magnitude, while maintaining a value resolution of 1 microsecond up to 1 millisecond, a resolution of * 1 millisecond (or better) up to one second, and a resolution of 1 second (or better) up to 1,000 seconds. At its * maximum tracked value (1 hour), it would still maintain a resolution of 3.6 seconds (or better). * <p> * Auto-resizing: When constructed with no specified value range range (or when auto-resize is turned on with {@link * Histogram#setAutoResize}) a {@link PackedHistogram} will auto-resize its dynamic range to include recorded values as * they are encountered. Note that recording calls that cause auto-resizing may take longer to execute, as resizing * incurs allocation and copying of internal data structures. * <p> * See package description for {@link org.HdrHistogram} for details. */ public class PackedHistogram extends Histogram { private PackedLongArray packedCounts; @Override long getCountAtIndex(final int index) { return getCountAtNormalizedIndex(normalizeIndex(index, normalizingIndexOffset, countsArrayLength)); } @Override long getCountAtNormalizedIndex(final int index) { long count = packedCounts.get(index); return count; } @Override void incrementCountAtIndex(final int index) { packedCounts.increment(normalizeIndex(index, normalizingIndexOffset, countsArrayLength)); } @Override void addToCountAtIndex(final int index, final long value) { packedCounts.add(normalizeIndex(index, normalizingIndexOffset, countsArrayLength), value); } @Override void setCountAtIndex(int index, long value) { setCountAtNormalizedIndex(normalizeIndex(index, normalizingIndexOffset, countsArrayLength), value); } @Override void setCountAtNormalizedIndex(int index, long value) { packedCounts.set(index, value); } @Override void clearCounts() { packedCounts.clear(); packedCounts.setVirtualLength(countsArrayLength); totalCount = 0; } @Override public PackedHistogram copy() { PackedHistogram copy = new PackedHistogram(this); copy.add(this); return copy; } @Override public PackedHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { PackedHistogram toHistogram = new PackedHistogram(this); toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); return toHistogram; } @Override void resize(long newHighestTrackableValue) { int oldNormalizedZeroIndex = normalizeIndex(0, normalizingIndexOffset, countsArrayLength); int oldCountsArrayLength = countsArrayLength; establishSize(newHighestTrackableValue); if (oldNormalizedZeroIndex != 0) { // We need to shift the stuff from the zero index and up to the end of the array: // When things are shifted in a packed array its not simple to identify the region shifted, // so re-record everything from the old normalized indexes to the new normalized indexes: PackedLongArray newPackedCounts = new PackedLongArray(countsArrayLength, packedCounts.getPhysicalLength()); // Copy everything up to the oldNormalizedZeroIndex in place: for (int fromIndex = 0; fromIndex < oldNormalizedZeroIndex; fromIndex++) { long value = packedCounts.get(fromIndex); if (value != 0) { newPackedCounts.set(fromIndex, value); } } // Copy everything from the oldNormalizedZeroIndex to the end with an index delta shift: int countsDelta = countsArrayLength - oldCountsArrayLength; for (int fromIndex = oldNormalizedZeroIndex; fromIndex < oldCountsArrayLength; fromIndex++) { long value = packedCounts.get(fromIndex); if (value != 0) { int toIndex = fromIndex + countsDelta; newPackedCounts.set(toIndex, value); } } // All unrecorded values are implicitly zero in the packed array packedCounts = newPackedCounts; } else { packedCounts.setVirtualLength(countsArrayLength); } } @Override int _getEstimatedFootprintInBytes() { return 192 + (8 * packedCounts.getPhysicalLength()); } /** * Construct an auto-resizing PackedHistogram with a lowest discernible value of 1 and an auto-adjusting * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public PackedHistogram(final int numberOfSignificantValueDigits) { this(1, 2, numberOfSignificantValueDigits); setAutoResize(true); } /** * Construct a PackedHistogram given the Highest value to be tracked and a number of significant decimal digits. The * histogram will be constructed to implicitly track (distinguish from 0) values as low as 1. * * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive * integer that is {@literal >=} 2. * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public PackedHistogram(final long highestTrackableValue, final int numberOfSignificantValueDigits) { this(1, highestTrackableValue, numberOfSignificantValueDigits); } /** * Construct a PackedHistogram given the Lowest and Highest values to be tracked and a number of significant * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the * proper value for lowestDiscernibleValue would be 1000. * * @param lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. * Must be a positive integer that is {@literal >=} 1. May be internally rounded * down to nearest power of 2. * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive * integer that is {@literal >=} (2 * lowestDiscernibleValue). * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public PackedHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, final int numberOfSignificantValueDigits) { super(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, false); packedCounts = new PackedLongArray(countsArrayLength); wordSizeInBytes = 8; } /** * Construct a PackedHistogram with the same range settings as a given source histogram, * duplicating the source's start/end timestamps (but NOT it's contents) * @param source The source histogram to duplicate */ public PackedHistogram(final AbstractHistogram source) { super(source, false); packedCounts = new PackedLongArray(countsArrayLength); wordSizeInBytes = 8; } /** * Construct a new histogram by decoding it from a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram */ public static PackedHistogram decodeFromByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) { return (PackedHistogram) decodeFromByteBuffer(buffer, PackedHistogram.class, minBarForHighestTrackableValue); } /** * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram * @throws DataFormatException on error parsing/decompressing the buffer */ public static PackedHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) throws DataFormatException { return decodeFromCompressedByteBuffer(buffer, PackedHistogram.class, minBarForHighestTrackableValue); } private void readObject(final ObjectInputStream o) throws IOException, ClassNotFoundException { o.defaultReadObject(); } }
11,593
48.127119
139
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/ValueRecorder.java
package org.HdrHistogram; public interface ValueRecorder { /** * Record a value * * @param value The value to be recorded * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range */ void recordValue(long value) throws ArrayIndexOutOfBoundsException; /** * Record a value (adding to the value's current count) * * @param value The value to be recorded * @param count The number of occurrences of this value to record * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range */ void recordValueWithCount(long value, long count) throws ArrayIndexOutOfBoundsException; /** * Record a value. * <p> * To compensate for the loss of sampled values when a recorded value is larger than the expected * interval between value samples, will auto-generate an additional series of decreasingly-smaller * (down to the expectedIntervalBetweenValueSamples) value records. * <p> * Note: This is a at-recording correction method, as opposed to the post-recording correction method provided * by {@link AbstractHistogram#copyCorrectedForCoordinatedOmission(long)}. * The two methods are mutually exclusive, and only one of the two should be be used on a given data set to correct * for the same coordinated omission issue. * * @param value The value to record * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples * @throws ArrayIndexOutOfBoundsException (may throw) if value cannot be covered by the histogram's range */ void recordValueWithExpectedInterval(long value, long expectedIntervalBetweenValueSamples) throws ArrayIndexOutOfBoundsException; /** * Reset the contents and collected stats */ void reset(); }
2,167
44.166667
119
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/AbstractHistogram.java
/* * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.io.*; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.Locale; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.zip.DataFormatException; import java.util.zip.Deflater; import java.util.zip.Inflater; import static java.nio.ByteOrder.BIG_ENDIAN; /** * This non-public AbstractHistogramBase super-class separation is meant to bunch "cold" fields * separately from "hot" fields, in an attempt to force the JVM to place the (hot) fields * commonly used in the value recording code paths close together. * Subclass boundaries tend to be strongly control memory layout decisions in most practical * JVM implementations, making this an effective method for control filed grouping layout. */ abstract class AbstractHistogramBase extends EncodableHistogram { static AtomicLong constructionIdentityCount = new AtomicLong(0); // "Cold" accessed fields. Not used in the recording code path: long identity; volatile boolean autoResize = false; long highestTrackableValue; long lowestDiscernibleValue; int numberOfSignificantValueDigits; int bucketCount; /** * Power-of-two length of linearly scaled array slots in the counts array. Long enough to hold the first sequence of * entries that must be distinguished by a single unit (determined by configured precision). */ int subBucketCount; int countsArrayLength; int wordSizeInBytes; long startTimeStampMsec = Long.MAX_VALUE; long endTimeStampMsec = 0; String tag = null; double integerToDoubleValueConversionRatio = 1.0; double doubleToIntegerValueConversionRatio = 1.0; PercentileIterator percentileIterator; RecordedValuesIterator recordedValuesIterator; ByteBuffer intermediateUncompressedByteBuffer = null; byte[] intermediateUncompressedByteArray = null; double getIntegerToDoubleValueConversionRatio() { return integerToDoubleValueConversionRatio; } double getDoubleToIntegerValueConversionRatio() { return doubleToIntegerValueConversionRatio; } void nonConcurrentSetIntegerToDoubleValueConversionRatio(double integerToDoubleValueConversionRatio) { this.integerToDoubleValueConversionRatio = integerToDoubleValueConversionRatio; this.doubleToIntegerValueConversionRatio = 1.0/integerToDoubleValueConversionRatio; } abstract void setIntegerToDoubleValueConversionRatio(double integerToDoubleValueConversionRatio); } /** * <h3>An abstract base class for integer values High Dynamic Range (HDR) Histograms</h3> * <p> * AbstractHistogram supports the recording and analyzing sampled data value counts across a configurable integer value * range with configurable value precision within the range. Value precision is expressed as the number of significant * digits in the value recording, and provides control over value quantization behavior across the value range and the * subsequent value resolution at any given level. * <p> * For example, a Histogram could be configured to track the counts of observed integer values between 0 and * 3,600,000,000 while maintaining a value precision of 3 significant digits across that range. Value quantization * within the range will thus be no larger than 1/1,000th (or 0.1%) of any value. This example Histogram could * be used to track and analyze the counts of observed response times ranging between 1 microsecond and 1 hour * in magnitude, while maintaining a value resolution of 1 microsecond up to 1 millisecond, a resolution of * 1 millisecond (or better) up to one second, and a resolution of 1 second (or better) up to 1,000 seconds. At it's * maximum tracked value (1 hour), it would still maintain a resolution of 3.6 seconds (or better). * <p> * See package description for {@link org.HdrHistogram} for details. * */ public abstract class AbstractHistogram extends AbstractHistogramBase implements ValueRecorder, Serializable { // "Hot" accessed fields (used in the the value recording code path) are bunched here, such // that they will have a good chance of ending up in the same cache line as the totalCounts and // counts array reference fields that subclass implementations will typically add. /** * Number of leading zeros in the largest value that can fit in bucket 0. */ int leadingZeroCountBase; int subBucketHalfCountMagnitude; /** * Largest k such that 2^k &lt;= lowestDiscernibleValue */ int unitMagnitude; int subBucketHalfCount; /** * Biggest value that can fit in bucket 0 */ long subBucketMask; /** * Lowest unitMagnitude bits are set */ long unitMagnitudeMask; volatile long maxValue = 0; volatile long minNonZeroValue = Long.MAX_VALUE; private static final AtomicLongFieldUpdater<AbstractHistogram> maxValueUpdater = AtomicLongFieldUpdater.newUpdater(AbstractHistogram.class, "maxValue"); private static final AtomicLongFieldUpdater<AbstractHistogram> minNonZeroValueUpdater = AtomicLongFieldUpdater.newUpdater(AbstractHistogram.class, "minNonZeroValue"); // Sub-classes will typically add a totalCount field and a counts array field, which will likely be laid out // right around here due to the subclass layout rules in most practical JVM implementations. // ######## ### ###### ## ## ### ###### ######## // ## ## ## ## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## ## ## ## // ######## ## ## ## ##### ## ## ## #### ###### // ## ######### ## ## ## ######### ## ## ## // ## ## ## ## ## ## ## ## ## ## ## ## // ## ## ## ###### ## ## ## ## ###### ######## // // ### ######## ###### ######## ######## ### ###### ######## // ## ## ## ## ## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## ## ## ## ## // ## ## ######## ###### ## ######## ## ## ## ## // ######### ## ## ## ## ## ## ######### ## ## // ## ## ## ## ## ## ## ## ## ## ## ## ## ## // ## ## ######## ###### ## ## ## ## ## ###### ## // // Abstract, counts-type dependent methods to be provided by subclass implementations: // abstract long getCountAtIndex(int index); abstract long getCountAtNormalizedIndex(int index); abstract void incrementCountAtIndex(int index); abstract void addToCountAtIndex(int index, long value); abstract void setCountAtIndex(int index, long value); abstract void setCountAtNormalizedIndex(int index, long value); abstract int getNormalizingIndexOffset(); abstract void setNormalizingIndexOffset(int normalizingIndexOffset); abstract void shiftNormalizingIndexByOffset(int offsetToAdd, boolean lowestHalfBucketPopulated, double newIntegerToDoubleValueConversionRatio); abstract void setTotalCount(long totalCount); abstract void incrementTotalCount(); abstract void addToTotalCount(long value); abstract void clearCounts(); abstract int _getEstimatedFootprintInBytes(); abstract void resize(long newHighestTrackableValue); /** * Get the total count of all recorded values in the histogram * @return the total count of all recorded values in the histogram */ abstract public long getTotalCount(); /** * Set internally tracked maxValue to new value if new value is greater than current one. * May be overridden by subclasses for synchronization or atomicity purposes. * @param value new maxValue to set */ private void updatedMaxValue(final long value) { final long internalValue = value | unitMagnitudeMask; // Max unit-equivalent value long sampledMaxValue; while (internalValue > (sampledMaxValue = maxValue)) { maxValueUpdater.compareAndSet(this, sampledMaxValue, internalValue); } } private void resetMaxValue(final long maxValue) { this.maxValue = maxValue | unitMagnitudeMask; // Max unit-equivalent value } /** * Set internally tracked minNonZeroValue to new value if new value is smaller than current one. * May be overridden by subclasses for synchronization or atomicity purposes. * @param value new minNonZeroValue to set */ private void updateMinNonZeroValue(final long value) { if (value <= unitMagnitudeMask) { return; // Unit-equivalent to 0. } final long internalValue = value & ~unitMagnitudeMask; // Min unit-equivalent value long sampledMinNonZeroValue; while (internalValue < (sampledMinNonZeroValue = minNonZeroValue)) { minNonZeroValueUpdater.compareAndSet(this, sampledMinNonZeroValue, internalValue); } } private void resetMinNonZeroValue(final long minNonZeroValue) { final long internalValue = minNonZeroValue & ~unitMagnitudeMask; // Min unit-equivalent value this.minNonZeroValue = (minNonZeroValue == Long.MAX_VALUE) ? minNonZeroValue : internalValue; } // ###### ####### ## ## ###### ######## ######## ## ## ###### ######## #### ####### ## ## // ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## // ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## #### ## // ## ## ## ## ## ## ###### ## ######## ## ## ## ## ## ## ## ## ## ## // ## ## ## ## #### ## ## ## ## ## ## ## ## ## ## ## ## #### // ## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### // ###### ####### ## ## ###### ## ## ## ####### ###### ## #### ####### ## ## // // Construction: // /** * Construct an auto-resizing histogram with a lowest discernible value of 1 and an auto-adjusting * highestTrackableValue. Can auto-resize up to track values up to (Long.MAX_VALUE / 2). * * @param numberOfSignificantValueDigits The number of significant decimal digits to which the histogram will * maintain value resolution and separation. Must be a non-negative * integer between 0 and 5. */ protected AbstractHistogram(final int numberOfSignificantValueDigits) { this(1, 2, numberOfSignificantValueDigits); autoResize = true; } /** * Construct a histogram given the Lowest and Highest values to be tracked and a number of significant * decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used * for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking * time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the * proper value for lowestDiscernibleValue would be 1000. * * @param lowestDiscernibleValue The lowest value that can be discerned (distinguished from 0) by the histogram. * Must be a positive integer that is {@literal >=} 1. May be internally rounded * down to nearest power of 2. * @param highestTrackableValue The highest value to be tracked by the histogram. Must be a positive * integer that is {@literal >=} (2 * lowestDiscernibleValue). * @param numberOfSignificantValueDigits The number of significant decimal digits to which the histogram will * maintain value resolution and separation. Must be a non-negative * integer between 0 and 5. */ protected AbstractHistogram(final long lowestDiscernibleValue, final long highestTrackableValue, final int numberOfSignificantValueDigits) { // Verify argument validity if (lowestDiscernibleValue < 1) { throw new IllegalArgumentException("lowestDiscernibleValue must be >= 1"); } if (lowestDiscernibleValue > Long.MAX_VALUE / 2) { // prevent subsequent multiplication by 2 for highestTrackableValue check from overflowing throw new IllegalArgumentException("lowestDiscernibleValue must be <= Long.MAX_VALUE / 2"); } if (highestTrackableValue < 2L * lowestDiscernibleValue) { throw new IllegalArgumentException("highestTrackableValue must be >= 2 * lowestDiscernibleValue"); } if ((numberOfSignificantValueDigits < 0) || (numberOfSignificantValueDigits > 5)) { throw new IllegalArgumentException("numberOfSignificantValueDigits must be between 0 and 5"); } identity = constructionIdentityCount.getAndIncrement(); init(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, 1.0, 0); } /** * Construct a histogram with the same range settings as a given source histogram, * duplicating the source's start/end timestamps (but NOT it's contents) * @param source The source histogram to duplicate */ protected AbstractHistogram(final AbstractHistogram source) { this(source.getLowestDiscernibleValue(), source.getHighestTrackableValue(), source.getNumberOfSignificantValueDigits()); this.setStartTimeStamp(source.getStartTimeStamp()); this.setEndTimeStamp(source.getEndTimeStamp()); this.autoResize = source.autoResize; } private void init(final long lowestDiscernibleValue, final long highestTrackableValue, final int numberOfSignificantValueDigits, final double integerToDoubleValueConversionRatio, final int normalizingIndexOffset) { this.lowestDiscernibleValue = lowestDiscernibleValue; this.highestTrackableValue = highestTrackableValue; this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; this.integerToDoubleValueConversionRatio = integerToDoubleValueConversionRatio; if (normalizingIndexOffset != 0) { setNormalizingIndexOffset(normalizingIndexOffset); } /* * Given a 3 decimal point accuracy, the expectation is obviously for "+/- 1 unit at 1000". It also means that * it's "ok to be +/- 2 units at 2000". The "tricky" thing is that it is NOT ok to be +/- 2 units at 1999. Only * starting at 2000. So internally, we need to maintain single unit resolution to 2x 10^decimalPoints. */ final long largestValueWithSingleUnitResolution = 2 * (long) Math.pow(10, numberOfSignificantValueDigits); unitMagnitude = (int) (Math.log(lowestDiscernibleValue)/Math.log(2)); unitMagnitudeMask = (1 << unitMagnitude) - 1; // We need to maintain power-of-two subBucketCount (for clean direct indexing) that is large enough to // provide unit resolution to at least largestValueWithSingleUnitResolution. So figure out // largestValueWithSingleUnitResolution's nearest power-of-two (rounded up), and use that: int subBucketCountMagnitude = (int) Math.ceil(Math.log(largestValueWithSingleUnitResolution)/Math.log(2)); subBucketHalfCountMagnitude = subBucketCountMagnitude - 1; subBucketCount = 1 << subBucketCountMagnitude; subBucketHalfCount = subBucketCount / 2; subBucketMask = ((long)subBucketCount - 1) << unitMagnitude; if (subBucketCountMagnitude + unitMagnitude > 62) { // subBucketCount entries can't be represented, with unitMagnitude applied, in a positive long. // Technically it still sort of works if their sum is 63: you can represent all but the last number // in the shifted subBucketCount. However, the utility of such a histogram vs ones whose magnitude here // fits in 62 bits is debatable, and it makes it harder to work through the logic. // Sums larger than 64 are totally broken as leadingZeroCountBase would go negative. throw new IllegalArgumentException("Cannot represent numberOfSignificantValueDigits worth of values " + "beyond lowestDiscernibleValue"); } // determine exponent range needed to support the trackable value with no overflow: establishSize(highestTrackableValue); // Establish leadingZeroCountBase, used in getBucketIndex() fast path: // subtract the bits that would be used by the largest value in bucket 0. leadingZeroCountBase = 64 - unitMagnitude - subBucketCountMagnitude; percentileIterator = new PercentileIterator(this, 1); recordedValuesIterator = new RecordedValuesIterator(this); } /** * The buckets (each of which has subBucketCount sub-buckets, here assumed to be 2048 as an example) overlap: * * <pre> * The 0'th bucket covers from 0...2047 in multiples of 1, using all 2048 sub-buckets * The 1'th bucket covers from 2048..4097 in multiples of 2, using only the top 1024 sub-buckets * The 2'th bucket covers from 4096..8191 in multiple of 4, using only the top 1024 sub-buckets * ... * </pre> * * Bucket 0 is "special" here. It is the only one that has 2048 entries. All the rest have 1024 entries (because * their bottom half overlaps with and is already covered by the all of the previous buckets put together). In other * words, the k'th bucket could represent 0 * 2^k to 2048 * 2^k in 2048 buckets with 2^k precision, but the midpoint * of 1024 * 2^k = 2048 * 2^(k-1) = the k-1'th bucket's end, so we would use the previous bucket for those lower * values as it has better precision. */ final void establishSize(long newHighestTrackableValue) { // establish counts array length: countsArrayLength = determineArrayLengthNeeded(newHighestTrackableValue); // establish exponent range needed to support the trackable value with no overflow: bucketCount = getBucketsNeededToCoverValue(newHighestTrackableValue); // establish the new highest trackable value: highestTrackableValue = newHighestTrackableValue; } final int determineArrayLengthNeeded(long highestTrackableValue) { if (highestTrackableValue < 2L * lowestDiscernibleValue) { throw new IllegalArgumentException("highestTrackableValue (" + highestTrackableValue + ") cannot be < (2 * lowestDiscernibleValue)"); } //determine counts array length needed: int countsArrayLength = getLengthForNumberOfBuckets(getBucketsNeededToCoverValue(highestTrackableValue)); return countsArrayLength; } // ### ## ## ######## ####### // ## ## ## ## ## ## ## // ## ## ## ## ## ## ## // ## ## ## ## ## ## ## // ######### ## ## ## ## ## // ## ## ## ## ## ## ## // ## ## ####### ## ####### // // ######## ######## ###### #### ######## #### ## ## ###### // ## ## ## ## ## ## ## ## ### ## ## ## // ## ## ## ## ## ## ## #### ## ## // ######## ###### ###### ## ## ## ## ## ## ## #### // ## ## ## ## ## ## ## ## #### ## ## // ## ## ## ## ## ## ## ## ## ### ## ## // ## ## ######## ###### #### ######## #### ## ## ###### // // Auto-resizing control: // /** * Indicate whether or not the histogram is set to auto-resize and auto-adjust it's * highestTrackableValue * @return autoResize setting */ public boolean isAutoResize() { return autoResize; } /** * Indicate whether or not the histogram is capable of supporting auto-resize functionality. * Note that this is an indication that enabling auto-resize by calling setAutoResize() is allowed, * and NOT that the histogram will actually auto-resize. Use isAutoResize() to determine if * the histogram is in auto-resize mode. * @return autoResize setting */ public boolean supportsAutoResize() { return true; } /** * Control whether or not the histogram can auto-resize and auto-adjust it's * highestTrackableValue * @param autoResize autoResize setting */ public void setAutoResize(boolean autoResize) { this.autoResize = autoResize; } // ## ## ### ## ## ## ######## // ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ###### // ## ## ######### ## ## ## ## // ## ## ## ## ## ## ## ## // ### ## ## ######## ####### ######## // // ######## ######## ###### ####### ######## ######## #### ## ## ###### // ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## // ## ## ## ## ## ## ## ## ## ## ## #### ## ## // ######## ###### ## ## ## ######## ## ## ## ## ## ## ## #### // ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## // ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## // ## ## ######## ###### ####### ## ## ######## #### ## ## ###### // // Value recording support: // /** * Record a value in the histogram * * @param value The value to be recorded * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue */ @Override public void recordValue(final long value) throws ArrayIndexOutOfBoundsException { recordSingleValue(value); } /** * Record a value in the histogram (adding to the value's current count) * * @param value The value to be recorded * @param count The number of occurrences of this value to record * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue */ @Override public void recordValueWithCount(final long value, final long count) throws ArrayIndexOutOfBoundsException { recordCountAtValue(count, value); } /** * Record a value in the histogram. * <p> * To compensate for the loss of sampled values when a recorded value is larger than the expected * interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller * (down to the expectedIntervalBetweenValueSamples) value records. * <p> * Note: This is a at-recording correction method, as opposed to the post-recording correction method provided * by {@link #copyCorrectedForCoordinatedOmission(long)}. * The two methods are mutually exclusive, and only one of the two should be be used on a given data set to correct * for the same coordinated omission issue. * <p> * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is * important. * * @param value The value to record * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue */ @Override public void recordValueWithExpectedInterval(final long value, final long expectedIntervalBetweenValueSamples) throws ArrayIndexOutOfBoundsException { recordSingleValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); } // Package-internal support for converting and recording double values into integer histograms: void recordConvertedDoubleValue(final double value) { long integerValue = (long) (value * doubleToIntegerValueConversionRatio); recordValue(integerValue); } public void recordConvertedDoubleValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { long integerValue = (long) (value * doubleToIntegerValueConversionRatio); recordCountAtValue(count, integerValue); } /** * @deprecated * * Record a value in the histogram. This deprecated method has identical behavior to * <b><code>recordValueWithExpectedInterval()</code></b>. It was renamed to avoid ambiguity. * * @param value The value to record * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue */ public void recordValue(final long value, final long expectedIntervalBetweenValueSamples) throws ArrayIndexOutOfBoundsException { recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); } void updateMinAndMax(final long value) { if (value > maxValue) { updatedMaxValue(value); } if ((value < minNonZeroValue) && (value != 0)) { updateMinNonZeroValue(value); } } private void recordCountAtValue(final long count, final long value) throws ArrayIndexOutOfBoundsException { int countsIndex = countsArrayIndex(value); try { addToCountAtIndex(countsIndex, count); } catch (IndexOutOfBoundsException ex) { handleRecordException(count, value, ex); } updateMinAndMax(value); addToTotalCount(count); } private void recordSingleValue(final long value) throws ArrayIndexOutOfBoundsException { int countsIndex = countsArrayIndex(value); try { incrementCountAtIndex(countsIndex); } catch (IndexOutOfBoundsException ex) { handleRecordException(1, value, ex); } updateMinAndMax(value); incrementTotalCount(); } private void handleRecordException(final long count, final long value, Exception ex) { if (!autoResize) { throw new ArrayIndexOutOfBoundsException("value " + value + " outside of histogram covered range. Caused by: " + ex); } resize(value); int countsIndex = countsArrayIndex(value); addToCountAtIndex(countsIndex, count); this.highestTrackableValue = highestEquivalentValue(valueFromIndex(countsArrayLength - 1)); } private void recordValueWithCountAndExpectedInterval(final long value, final long count, final long expectedIntervalBetweenValueSamples) throws ArrayIndexOutOfBoundsException { recordCountAtValue(count, value); if (expectedIntervalBetweenValueSamples <= 0) return; for (long missingValue = value - expectedIntervalBetweenValueSamples; missingValue >= expectedIntervalBetweenValueSamples; missingValue -= expectedIntervalBetweenValueSamples) { recordCountAtValue(count, missingValue); } } private void recordSingleValueWithExpectedInterval(final long value, final long expectedIntervalBetweenValueSamples) throws ArrayIndexOutOfBoundsException { recordSingleValue(value); if (expectedIntervalBetweenValueSamples <= 0) return; for (long missingValue = value - expectedIntervalBetweenValueSamples; missingValue >= expectedIntervalBetweenValueSamples; missingValue -= expectedIntervalBetweenValueSamples) { recordSingleValue(missingValue); } } // ###### ## ######## ### ######## #### ## ## ###### // ## ## ## ## ## ## ## ## ## ### ## ## ## // ## ## ## ## ## ## ## ## #### ## ## // ## ## ###### ## ## ######## ## ## ## ## ## #### // ## ## ## ######### ## ## ## ## #### ## ## // ## ## ## ## ## ## ## ## ## ## ### ## ## // ###### ######## ######## ## ## ## ## #### ## ## ###### // // Clearing support: // /** * Reset the contents and stats of this histogram */ @Override public void reset() { clearCounts(); resetMaxValue(0); resetMinNonZeroValue(Long.MAX_VALUE); setNormalizingIndexOffset(0); startTimeStampMsec = Long.MAX_VALUE; endTimeStampMsec = 0; tag = null; } // ###### ####### ######## ## ## // ## ## ## ## ## ## ## ## // ## ## ## ## ## #### // ## ## ## ######## ## // ## ## ## ## ## // ## ## ## ## ## ## // ###### ####### ## ## // // Copy support: // /** * Create a copy of this histogram, complete with data and everything. * * @return A distinct copy of this histogram. */ abstract public AbstractHistogram copy(); /** * Get a copy of this histogram, corrected for coordinated omission. * <p> * To compensate for the loss of sampled values when a recorded value is larger than the expected * interval between value samples, the new histogram will include an auto-generated additional series of * decreasingly-smaller (down to the expectedIntervalBetweenValueSamples) value records for each count found * in the current histogram that is larger than the expectedIntervalBetweenValueSamples. * * Note: This is a post-correction method, as opposed to the at-recording correction method provided * by {@link #recordValueWithExpectedInterval(long, long) recordValueWithExpectedInterval}. The two * methods are mutually exclusive, and only one of the two should be be used on a given data set to correct * for the same coordinated omission issue. * by * <p> * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is * important. * * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples * @return a copy of this histogram, corrected for coordinated omission. */ abstract public AbstractHistogram copyCorrectedForCoordinatedOmission(long expectedIntervalBetweenValueSamples); /** * Copy this histogram into the target histogram, overwriting it's contents. * * @param targetHistogram the histogram to copy into */ public void copyInto(final AbstractHistogram targetHistogram) { targetHistogram.reset(); targetHistogram.add(this); targetHistogram.setStartTimeStamp(this.startTimeStampMsec); targetHistogram.setEndTimeStamp(this.endTimeStampMsec); } /** * Copy this histogram, corrected for coordinated omission, into the target histogram, overwriting it's contents. * (see {@link #copyCorrectedForCoordinatedOmission} for more detailed explanation about how correction is applied) * * @param targetHistogram the histogram to copy into * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples */ public void copyIntoCorrectedForCoordinatedOmission(final AbstractHistogram targetHistogram, final long expectedIntervalBetweenValueSamples) { targetHistogram.reset(); targetHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); targetHistogram.setStartTimeStamp(this.startTimeStampMsec); targetHistogram.setEndTimeStamp(this.endTimeStampMsec); } // ### ######## ######## // ## ## ## ## ## ## // ## ## ## ## ## ## // ## ## ## ## ## ## // ######### ## ## ## ## // ## ## ## ## ## ## // ## ## ######## ######## // // Add support: // /** * Add the contents of another histogram to this one. * <p> * As part of adding the contents, the start/end timestamp range of this histogram will be * extended to include the start/end timestamp range of the other histogram. * * @param otherHistogram The other histogram. * @throws ArrayIndexOutOfBoundsException (may throw) if values in fromHistogram's are * higher than highestTrackableValue. */ public void add(final AbstractHistogram otherHistogram) throws ArrayIndexOutOfBoundsException { long highestRecordableValue = highestEquivalentValue(valueFromIndex(countsArrayLength - 1)); if (highestRecordableValue < otherHistogram.getMaxValue()) { if (!isAutoResize()) { throw new ArrayIndexOutOfBoundsException( "The other histogram includes values that do not fit in this histogram's range."); } resize(otherHistogram.getMaxValue()); } if ((bucketCount == otherHistogram.bucketCount) && (subBucketCount == otherHistogram.subBucketCount) && (unitMagnitude == otherHistogram.unitMagnitude) && (getNormalizingIndexOffset() == otherHistogram.getNormalizingIndexOffset()) && !(otherHistogram instanceof ConcurrentHistogram) ) { // Counts arrays are of the same length and meaning, so we can just iterate and add directly: long observedOtherTotalCount = 0; for (int i = 0; i < otherHistogram.countsArrayLength; i++) { long otherCount = otherHistogram.getCountAtIndex(i); if (otherCount > 0) { addToCountAtIndex(i, otherCount); observedOtherTotalCount += otherCount; } } setTotalCount(getTotalCount() + observedOtherTotalCount); updatedMaxValue(Math.max(getMaxValue(), otherHistogram.getMaxValue())); updateMinNonZeroValue(Math.min(getMinNonZeroValue(), otherHistogram.getMinNonZeroValue())); } else { // Arrays are not a direct match (or the other could change on the fly in some valid way), // so we can't just stream through and add them. Instead, go through the array and add each // non-zero value found at it's proper value: // Do max value first, to avoid max value updates on each iteration: int otherMaxIndex = otherHistogram.countsArrayIndex(otherHistogram.getMaxValue()); long otherCount = otherHistogram.getCountAtIndex(otherMaxIndex); recordValueWithCount(otherHistogram.valueFromIndex(otherMaxIndex), otherCount); // Record the remaining values, up to but not including the max value: for (int i = 0; i < otherMaxIndex; i++) { otherCount = otherHistogram.getCountAtIndex(i); if (otherCount > 0) { recordValueWithCount(otherHistogram.valueFromIndex(i), otherCount); } } } setStartTimeStamp(Math.min(startTimeStampMsec, otherHistogram.startTimeStampMsec)); setEndTimeStamp(Math.max(endTimeStampMsec, otherHistogram.endTimeStampMsec)); } /** * Subtract the contents of another histogram from this one. * <p> * The start/end timestamps of this histogram will remain unchanged. * * @param otherHistogram The other histogram. * @throws ArrayIndexOutOfBoundsException (may throw) if values in otherHistogram's are higher than highestTrackableValue. * */ public void subtract(final AbstractHistogram otherHistogram) throws ArrayIndexOutOfBoundsException, IllegalArgumentException { if (highestEquivalentValue(otherHistogram.getMaxValue()) > highestEquivalentValue(valueFromIndex(this.countsArrayLength - 1))) { throw new IllegalArgumentException( "The other histogram includes values that do not fit in this histogram's range."); } for (int i = 0; i < otherHistogram.countsArrayLength; i++) { long otherCount = otherHistogram.getCountAtIndex(i); if (otherCount > 0) { long otherValue = otherHistogram.valueFromIndex(i); if (getCountAtValue(otherValue) < otherCount) { throw new IllegalArgumentException("otherHistogram count (" + otherCount + ") at value " + otherValue + " is larger than this one's (" + getCountAtValue(otherValue) + ")"); } recordValueWithCount(otherValue, -otherCount); } } // With subtraction, the max and minNonZero values could have changed: if ((getCountAtValue(getMaxValue()) <= 0) || getCountAtValue(getMinNonZeroValue()) <= 0) { establishInternalTackingValues(); } } /** * Add the contents of another histogram to this one, while correcting the incoming data for coordinated omission. * <p> * To compensate for the loss of sampled values when a recorded value is larger than the expected * interval between value samples, the values added will include an auto-generated additional series of * decreasingly-smaller (down to the expectedIntervalBetweenValueSamples) value records for each count found * in the current histogram that is larger than the expectedIntervalBetweenValueSamples. * * Note: This is a post-recording correction method, as opposed to the at-recording correction method provided * by {@link #recordValueWithExpectedInterval(long, long) recordValueWithExpectedInterval}. The two * methods are mutually exclusive, and only one of the two should be be used on a given data set to correct * for the same coordinated omission issue. * by * <p> * See notes in the description of the Histogram calls for an illustration of why this corrective behavior is * important. * * @param otherHistogram The other histogram. highestTrackableValue and largestValueWithSingleUnitResolution must match. * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples * @throws ArrayIndexOutOfBoundsException (may throw) if values exceed highestTrackableValue */ public void addWhileCorrectingForCoordinatedOmission(final AbstractHistogram otherHistogram, final long expectedIntervalBetweenValueSamples) { final AbstractHistogram toHistogram = this; for (HistogramIterationValue v : otherHistogram.recordedValues()) { toHistogram.recordValueWithCountAndExpectedInterval(v.getValueIteratedTo(), v.getCountAtValueIteratedTo(), expectedIntervalBetweenValueSamples); } } // ###### ## ## #### ######## ######## #### ## ## ###### // ## ## ## ## ## ## ## ## ### ## ## ## // ## ## ## ## ## ## ## #### ## ## // ###### ######### ## ###### ## ## ## ## ## ## #### // ## ## ## ## ## ## ## ## #### ## ## // ## ## ## ## ## ## ## ## ## ### ## ## // ###### ## ## #### ## ## #### ## ## ###### // // // Shifting support: // /** * Shift recorded values to the left (the equivalent of a &lt;&lt; shift operation on all recorded values). The * configured integer value range limits and value precision setting will remain unchanged. * * An {@link ArrayIndexOutOfBoundsException} will be thrown if any recorded values may be lost * as a result of the attempted operation, reflecting an "overflow" conditions. Expect such an overflow * exception if the operation would cause the current maxValue to be scaled to a value that is outside * of the covered value range. * * @param numberOfBinaryOrdersOfMagnitude The number of binary orders of magnitude to shift by */ public void shiftValuesLeft(final int numberOfBinaryOrdersOfMagnitude) { shiftValuesLeft(numberOfBinaryOrdersOfMagnitude, integerToDoubleValueConversionRatio); } void shiftValuesLeft(final int numberOfBinaryOrdersOfMagnitude, final double newIntegerToDoubleValueConversionRatio) { if (numberOfBinaryOrdersOfMagnitude < 0) { throw new IllegalArgumentException("Cannot shift by a negative number of magnitudes"); } if (numberOfBinaryOrdersOfMagnitude == 0) { return; } if (getTotalCount() == getCountAtIndex(0)) { // (no need to shift any values if all recorded values are at the 0 value level:) return; } final int shiftAmount = numberOfBinaryOrdersOfMagnitude << subBucketHalfCountMagnitude; int maxValueIndex = countsArrayIndex(getMaxValue()); // indicate overflow if maxValue is in the range being wrapped: if (maxValueIndex >= (countsArrayLength - shiftAmount)) { throw new ArrayIndexOutOfBoundsException( "Operation would overflow, would discard recorded value counts"); } long maxValueBeforeShift = maxValueUpdater.getAndSet(this, 0); long minNonZeroValueBeforeShift = minNonZeroValueUpdater.getAndSet(this, Long.MAX_VALUE); boolean lowestHalfBucketPopulated = (minNonZeroValueBeforeShift < (subBucketHalfCount << unitMagnitude)); // Perform the shift: shiftNormalizingIndexByOffset(shiftAmount, lowestHalfBucketPopulated, newIntegerToDoubleValueConversionRatio); // adjust min, max: updateMinAndMax(maxValueBeforeShift << numberOfBinaryOrdersOfMagnitude); if (minNonZeroValueBeforeShift < Long.MAX_VALUE) { updateMinAndMax(minNonZeroValueBeforeShift << numberOfBinaryOrdersOfMagnitude); } } void nonConcurrentNormalizingIndexShift(int shiftAmount, boolean lowestHalfBucketPopulated) { // Save and clear the 0 value count: long zeroValueCount = getCountAtIndex(0); setCountAtIndex(0, 0); int preShiftZeroIndex = normalizeIndex(0, getNormalizingIndexOffset(), countsArrayLength); setNormalizingIndexOffset(getNormalizingIndexOffset() + shiftAmount); // Deal with lower half bucket if needed: if (lowestHalfBucketPopulated) { if (shiftAmount <= 0) { // Shifts with lowest half bucket populated can only be to the left. // Any right shift logic calling this should have already verified that // the lowest half bucket is not populated. throw new ArrayIndexOutOfBoundsException( "Attempt to right-shift with already-recorded value counts that would underflow and lose precision"); } shiftLowestHalfBucketContentsLeft(shiftAmount, preShiftZeroIndex); } // Restore the 0 value count: setCountAtIndex(0, zeroValueCount); } private void shiftLowestHalfBucketContentsLeft(int shiftAmount, int preShiftZeroIndex) { final int numberOfBinaryOrdersOfMagnitude = shiftAmount >> subBucketHalfCountMagnitude; // The lowest half-bucket (not including the 0 value) is special: unlike all other half // buckets, the lowest half bucket values cannot be scaled by simply changing the // normalizing offset. Instead, they must be individually re-recorded at the new // scale, and cleared from the current one. // // We know that all half buckets "below" the current lowest one are full of 0s, because // we would have overflowed otherwise. So we need to shift the values in the current // lowest half bucket into that range (including the current lowest half bucket itself). // Iterating up from the lowermost non-zero "from slot" and copying values to the newly // scaled "to slot" (and then zeroing the "from slot"), will work in a single pass, // because the scale "to slot" index will always be a lower index than its or any // preceding non-scaled "from slot" index: // // (Note that we specifically avoid slot 0, as it is directly handled in the outer case) for (int fromIndex = 1; fromIndex < subBucketHalfCount; fromIndex++) { long toValue = valueFromIndex(fromIndex) << numberOfBinaryOrdersOfMagnitude; int toIndex = countsArrayIndex(toValue); long countAtFromIndex = getCountAtNormalizedIndex(fromIndex + preShiftZeroIndex); setCountAtIndex(toIndex, countAtFromIndex); setCountAtNormalizedIndex(fromIndex + preShiftZeroIndex, 0); } // Note that the above loop only creates O(N) work for histograms that have values in // the lowest half-bucket (excluding the 0 value). Histograms that never have values // there (e.g. all integer value histograms used as internal storage in DoubleHistograms) // will never loop, and their shifts will remain O(1). } /** * Shift recorded values to the right (the equivalent of a &gt;&gt; shift operation on all recorded values). The * configured integer value range limits and value precision setting will remain unchanged. * <p> * Shift right operations that do not underflow are reversible with a shift left operation with no loss of * information. An {@link ArrayIndexOutOfBoundsException} reflecting an "underflow" conditions will be thrown * if any recorded values may lose representation accuracy as a result of the attempted shift operation. * <p> * For a shift of a single order of magnitude, expect such an underflow exception if any recorded non-zero * values up to [numberOfSignificantValueDigits (rounded up to nearest power of 2) multiplied by * (2 ^ numberOfBinaryOrdersOfMagnitude) currently exist in the histogram. * * @param numberOfBinaryOrdersOfMagnitude The number of binary orders of magnitude to shift by */ public void shiftValuesRight(final int numberOfBinaryOrdersOfMagnitude) { shiftValuesRight(numberOfBinaryOrdersOfMagnitude, integerToDoubleValueConversionRatio); } void shiftValuesRight(final int numberOfBinaryOrdersOfMagnitude, final double newIntegerToDoubleValueConversionRatio) { if (numberOfBinaryOrdersOfMagnitude < 0) { throw new IllegalArgumentException("Cannot shift by a negative number of magnitudes"); } if (numberOfBinaryOrdersOfMagnitude == 0) { return; } if (getTotalCount() == getCountAtIndex(0)) { // (no need to shift any values if all recorded values are at the 0 value level:) return; } final int shiftAmount = subBucketHalfCount * numberOfBinaryOrdersOfMagnitude; // indicate underflow if minValue is in the range being shifted from: int minNonZeroValueIndex = countsArrayIndex(getMinNonZeroValue()); // Any shifting into the bottom-most half bucket would represents a loss of accuracy, // and a non-reversible operation. Therefore any non-0 value that falls in an // index below (shiftAmount + subBucketHalfCount) would represent an underflow: // <DetailedExplanation:> // The fact that the top and bottom halves of the first bucket use the same scale // means any shift into the bottom half is invalid. The shift requires that each // successive subBucketCount be encoded with a scale 2x the previous one, as that // is how the powers of 2 are applied. // In particular, if the shift amount is such that it would shift something from // the top half of the first bucket to the bottom half, that's all stored with the // same unit, so half of a larger odd value couldn't be restored to its proper // value by a subsequent left shift because we would need the bottom half to be // encoded in half-units. // Furthermore, shifts from anywhere (in the top half of the first bucket or // beyond) will be incorrectly encoded if they end up in the bottom half. If // subBucketHalfCount is, say, 1024, and the shift is by 1, the value for 1600 // would become 576, which is certainly not 1600/2. With a shift of 2 and a // value of 2112 (index 2048 + 32), the resulting value is 32, not 525. For // comparison, with shift 2 and value 4096 (index 2048 + 1024 = 3072), 3072 - 2048 = 1024. // That's the first entry in the top half of bucket 0, which encodes simply // 1024 = 4096 / 4. Thus, any non-0 value that falls in an index below // (shiftAmount + subBucketHalfCount) would represent an underflow. // </DetailedExplanation:> if (minNonZeroValueIndex < shiftAmount + subBucketHalfCount) { throw new ArrayIndexOutOfBoundsException( "Operation would underflow and lose precision of already recorded value counts"); } // perform shift: long maxValueBeforeShift = maxValueUpdater.getAndSet(this, 0); long minNonZeroValueBeforeShift = minNonZeroValueUpdater.getAndSet(this, Long.MAX_VALUE); // move normalizingIndexOffset shiftNormalizingIndexByOffset(-shiftAmount, false, newIntegerToDoubleValueConversionRatio); // adjust min, max: updateMinAndMax(maxValueBeforeShift >> numberOfBinaryOrdersOfMagnitude); if (minNonZeroValueBeforeShift < Long.MAX_VALUE) { updateMinAndMax(minNonZeroValueBeforeShift >> numberOfBinaryOrdersOfMagnitude); } } // ###### ####### ## ## ######## ### ######## #### ###### ####### ## ## // ## ## ## ## ### ### ## ## ## ## ## ## ## ## ## ## ## ### ## // ## ## ## #### #### ## ## ## ## ## ## ## ## ## ## #### ## // ## ## ## ## ### ## ######## ## ## ######## ## ###### ## ## ## ## ## // ## ## ## ## ## ## ######### ## ## ## ## ## ## ## #### // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### // ###### ####### ## ## ## ## ## ## ## #### ###### ####### ## ## // // Comparison support: // /** * Determine if this histogram is equivalent to another. * * @param other the other histogram to compare to * @return True if this histogram are equivalent with the other. */ public boolean equals(final Object other){ if ( this == other ) { return true; } if ( !(other instanceof AbstractHistogram) ) { return false; } AbstractHistogram that = (AbstractHistogram)other; if ((lowestDiscernibleValue != that.lowestDiscernibleValue) || (numberOfSignificantValueDigits != that.numberOfSignificantValueDigits) || (integerToDoubleValueConversionRatio != that.integerToDoubleValueConversionRatio)) { return false; } if (getTotalCount() != that.getTotalCount()) { return false; } if (getMaxValue() != that.getMaxValue()) { return false; } if (getMinNonZeroValue() != that.getMinNonZeroValue()) { return false; } // 2 histograms may be equal but have different underlying array sizes. This can happen for instance due to // resizing. if (countsArrayLength == that.countsArrayLength) { for (int i = 0; i < countsArrayLength; i++) { if (getCountAtIndex(i) != that.getCountAtIndex(i)) { return false; } } } else { // Comparing the values is valid here because we have already confirmed the histograms have the same total // count. It would not be correct otherwise. for (HistogramIterationValue value : this.recordedValues()) { long countAtValueIteratedTo = value.getCountAtValueIteratedTo(); long valueIteratedTo = value.getValueIteratedTo(); if (that.getCountAtValue(valueIteratedTo) != countAtValueIteratedTo) { return false; } } } return true; } @Override public int hashCode() { int h = 0; h = oneAtATimeHashStep(h, unitMagnitude); h = oneAtATimeHashStep(h, numberOfSignificantValueDigits); h = oneAtATimeHashStep(h, (int) getTotalCount()); h = oneAtATimeHashStep(h, (int) getMaxValue()); h = oneAtATimeHashStep(h, (int) getMinNonZeroValue()); h += (h << 3); h ^= (h >> 11); h += (h << 15); return h; } private int oneAtATimeHashStep(int h, final int v) { h += v; h += (h << 10); h ^= (h >> 6); return h; } // ###### ######## ######## ## ## ###### ######## ## ## ######## ######## // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## ## ## ## ## ## // ###### ## ######## ## ## ## ## ## ## ######## ###### // ## ## ## ## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## // ###### ## ## ## ####### ###### ## ####### ## ## ######## // // ####### ## ## ######## ######## ## ## #### ## ## ###### // ## ## ## ## ## ## ## ## ## ## ### ## ## ## // ## ## ## ## ## ## ## #### ## #### ## ## // ## ## ## ## ###### ######## ## ## ## ## ## ## #### // ## ## ## ## ## ## ## ## ## ## ## #### ## ## // ## ## ## ## ## ## ## ## ## ## ### ## ## // ##### ## ####### ######## ## ## ## #### ## ## ###### // // Histogram structure querying support: // /** * get the configured lowestDiscernibleValue * @return lowestDiscernibleValue */ public long getLowestDiscernibleValue() { return lowestDiscernibleValue; } /** * get the configured highestTrackableValue * @return highestTrackableValue */ public long getHighestTrackableValue() { return highestTrackableValue; } /** * get the configured numberOfSignificantValueDigits * @return numberOfSignificantValueDigits */ public int getNumberOfSignificantValueDigits() { return numberOfSignificantValueDigits; } /** * Get the size (in value units) of the range of values that are equivalent to the given value within the * histogram's resolution. Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value The given value * @return The size of the range of values equivalent to the given value. */ public long sizeOfEquivalentValueRange(final long value) { final int bucketIndex = getBucketIndex(value); long distanceToNextValue = 1L << (unitMagnitude + bucketIndex); return distanceToNextValue; } /** * Get the lowest value that is equivalent to the given value within the histogram's resolution. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value The given value * @return The lowest value that is equivalent to the given value within the histogram's resolution. */ public long lowestEquivalentValue(final long value) { final int bucketIndex = getBucketIndex(value); final int subBucketIndex = getSubBucketIndex(value, bucketIndex); long thisValueBaseLevel = valueFromIndex(bucketIndex, subBucketIndex); return thisValueBaseLevel; } /** * Get the highest value that is equivalent to the given value within the histogram's resolution. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value The given value * @return The highest value that is equivalent to the given value within the histogram's resolution. */ public long highestEquivalentValue(final long value) { return nextNonEquivalentValue(value) - 1; } /** * Get a value that lies in the middle (rounded up) of the range of values equivalent the given value. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value The given value * @return The value lies in the middle (rounded up) of the range of values equivalent the given value. */ public long medianEquivalentValue(final long value) { return (lowestEquivalentValue(value) + (sizeOfEquivalentValueRange(value) >> 1)); } /** * Get the next value that is not equivalent to the given value within the histogram's resolution. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value The given value * @return The next value that is not equivalent to the given value within the histogram's resolution. */ public long nextNonEquivalentValue(final long value) { return lowestEquivalentValue(value) + sizeOfEquivalentValueRange(value); } /** * Determine if two values are equivalent with the histogram's resolution. * Where "equivalent" means that value samples recorded for any two * equivalent values are counted in a common total count. * * @param value1 first value to compare * @param value2 second value to compare * @return True if values are equivalent with the histogram's resolution. */ public boolean valuesAreEquivalent(final long value1, final long value2) { return (lowestEquivalentValue(value1) == lowestEquivalentValue(value2)); } /** * Provide a (conservatively high) estimate of the Histogram's total footprint in bytes * * @return a (conservatively high) estimate of the Histogram's total footprint in bytes */ public int getEstimatedFootprintInBytes() { return _getEstimatedFootprintInBytes(); } // ######## #### ## ## ######## ###### ######## ### ## ## ######## // ## ## ### ### ## ## ## ## ## ## ### ### ## ## // ## ## #### #### ## ## ## ## ## #### #### ## ## // ## ## ## ### ## ###### ###### ## ## ## ## ### ## ######## // ## ## ## ## ## ## ## ######### ## ## ## // ## ## ## ## ## ## ## ## ## ## ## ## ## // ## #### ## ## ######## ###### ## ## ## ## ## ## // // #### ######## ### ###### // ## ## ## ## ## ## ## // #### ## ## ## ## // #### ## ## ## ## #### // ## ## ## ## ######### ## ## // ## ## ## ## ## ## ## // #### ## ## ## ## ###### // // Timestamp and tag support: // /** * get the start time stamp [optionally] stored with this histogram * @return the start time stamp [optionally] stored with this histogram */ @Override public long getStartTimeStamp() { return startTimeStampMsec; } /** * Set the start time stamp value associated with this histogram to a given value. * @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch. */ @Override public void setStartTimeStamp(final long timeStampMsec) { this.startTimeStampMsec = timeStampMsec; } /** * get the end time stamp [optionally] stored with this histogram * @return the end time stamp [optionally] stored with this histogram */ @Override public long getEndTimeStamp() { return endTimeStampMsec; } /** * Set the end time stamp value associated with this histogram to a given value. * @param timeStampMsec the value to set the time stamp to, [by convention] in msec since the epoch. */ @Override public void setEndTimeStamp(final long timeStampMsec) { this.endTimeStampMsec = timeStampMsec; } /** * get the tag string [optionally] associated with this histogram * @return tag string [optionally] associated with this histogram */ public String getTag() { return tag; } /** * Set the tag string associated with this histogram * @param tag the tag string to associate with this histogram */ public void setTag(String tag) { this.tag = tag; } // ######## ### ######## ### ### ###### ###### ######## ###### ###### // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## ## ## ## ###### ###### ###### // ## ## ######### ## ######### ######### ## ## ## ## ## // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## // ######## ## ## ## ## ## ## ## ###### ###### ######## ###### ###### // // Histogram Data access support: // /** * Get the lowest recorded value level in the histogram. If the histogram has no recorded values, * the value returned is undefined. * * @return the Min value recorded in the histogram */ public long getMinValue() { if ((getCountAtIndex(0) > 0) || (getTotalCount() == 0)) { return 0; } return getMinNonZeroValue(); } /** * Get the highest recorded value level in the histogram. If the histogram has no recorded values, * the value returned is undefined. * * @return the Max value recorded in the histogram */ public long getMaxValue() { return (maxValue == 0) ? 0 : highestEquivalentValue(maxValue); } /** * Get the lowest recorded non-zero value level in the histogram. If the histogram has no recorded values, * the value returned is undefined. * * @return the lowest recorded non-zero value level in the histogram */ public long getMinNonZeroValue() { return (minNonZeroValue == Long.MAX_VALUE) ? Long.MAX_VALUE : lowestEquivalentValue(minNonZeroValue); } /** * Get the highest recorded value level in the histogram as a double * * @return the Max value recorded in the histogram */ @Override public double getMaxValueAsDouble() { return getMaxValue(); } /** * Get the computed mean value of all recorded values in the histogram * * @return the mean value (in value units) of the histogram data */ public double getMean() { if (getTotalCount() == 0) { return 0.0; } recordedValuesIterator.reset(); double totalValue = 0; while (recordedValuesIterator.hasNext()) { HistogramIterationValue iterationValue = recordedValuesIterator.next(); totalValue += medianEquivalentValue(iterationValue.getValueIteratedTo()) * (double) iterationValue.getCountAtValueIteratedTo(); } return (totalValue * 1.0) / getTotalCount(); } /** * Get the computed standard deviation of all recorded values in the histogram * * @return the standard deviation (in value units) of the histogram data */ public double getStdDeviation() { if (getTotalCount() == 0) { return 0.0; } final double mean = getMean(); double geometric_deviation_total = 0.0; recordedValuesIterator.reset(); while (recordedValuesIterator.hasNext()) { HistogramIterationValue iterationValue = recordedValuesIterator.next(); double deviation = (medianEquivalentValue(iterationValue.getValueIteratedTo()) * 1.0) - mean; geometric_deviation_total += (deviation * deviation) * iterationValue.getCountAddedInThisIterationStep(); } double std_deviation = Math.sqrt(geometric_deviation_total / getTotalCount()); return std_deviation; } /** * Get the value at a given percentile. * Returns the largest value that (100% - percentile) [+/- 1 ulp] of the overall recorded value entries * in the histogram are either larger than or equivalent to. Returns 0 if no recorded values exist. * <p> * Note that two values are "equivalent" in this statement if * {@link org.HdrHistogram.AbstractHistogram#valuesAreEquivalent} would return true. * * @param percentile The percentile for which to return the associated value * @return The largest value that (100% - percentile) [+/- 1 ulp] of the overall recorded value entries * in the histogram are either larger than or equivalent to. Returns 0 if no recorded values exist. */ public long getValueAtPercentile(final double percentile) { // Truncate to 0..100%, and remove 1 ulp to avoid roundoff overruns into next bucket when we // subsequently round up to the nearest integer: double requestedPercentile = Math.min(Math.max(Math.nextAfter(percentile, Double.NEGATIVE_INFINITY), 0.0D), 100.0D); // derive the count at the requested percentile. We round up to nearest integer to ensure that the // largest value that the requested percentile of overall recorded values is <= is actually included. double fpCountAtPercentile = (requestedPercentile * getTotalCount()) / 100.0D; long countAtPercentile = (long)(Math.ceil(fpCountAtPercentile)); // round up countAtPercentile = Math.max(countAtPercentile, 1); // Make sure we at least reach the first recorded entry long totalToCurrentIndex = 0; for (int i = 0; i < countsArrayLength; i++) { totalToCurrentIndex += getCountAtIndex(i); if (totalToCurrentIndex >= countAtPercentile) { long valueAtIndex = valueFromIndex(i); return (percentile == 0.0) ? lowestEquivalentValue(valueAtIndex) : highestEquivalentValue(valueAtIndex); } } return 0; } /** * Get the percentile at a given value. * The percentile returned is the percentile of values recorded in the histogram that are smaller * than or equivalent to the given value. * <p> * Note that two values are "equivalent" in this statement if * {@link org.HdrHistogram.AbstractHistogram#valuesAreEquivalent} would return true. * * @param value The value for which to return the associated percentile * @return The percentile of values recorded in the histogram that are smaller than or equivalent * to the given value. */ public double getPercentileAtOrBelowValue(final long value) { if (getTotalCount() == 0) { return 100.0; } final int targetIndex = Math.min(countsArrayIndex(value), (countsArrayLength - 1)); long totalToCurrentIndex = 0; for (int i = 0; i <= targetIndex; i++) { totalToCurrentIndex += getCountAtIndex(i); } return (100.0 * totalToCurrentIndex) / getTotalCount(); } /** * Get the count of recorded values within a range of value levels (inclusive to within the histogram's resolution). * * @param lowValue The lower value bound on the range for which * to provide the recorded count. Will be rounded down with * {@link Histogram#lowestEquivalentValue lowestEquivalentValue}. * @param highValue The higher value bound on the range for which to provide the recorded count. * Will be rounded up with {@link Histogram#highestEquivalentValue highestEquivalentValue}. * @return the total count of values recorded in the histogram within the value range that is * {@literal >=} lowestEquivalentValue(<i>lowValue</i>) and {@literal <=} highestEquivalentValue(<i>highValue</i>) */ public long getCountBetweenValues(final long lowValue, final long highValue) throws ArrayIndexOutOfBoundsException { final int lowIndex = Math.max(0, countsArrayIndex(lowValue)); final int highIndex = Math.min(countsArrayIndex(highValue), (countsArrayLength - 1)); long count = 0; for (int i = lowIndex ; i <= highIndex; i++) { count += getCountAtIndex(i); } return count; } /** * Get the count of recorded values at a specific value (to within the histogram resolution at the value level). * * @param value The value for which to provide the recorded count * @return The total count of values recorded in the histogram within the value range that is * {@literal >=} lowestEquivalentValue(<i>value</i>) and {@literal <=} highestEquivalentValue(<i>value</i>) */ public long getCountAtValue(final long value) throws ArrayIndexOutOfBoundsException { final int index = Math.min(Math.max(0, countsArrayIndex(value)), (countsArrayLength - 1)); return getCountAtIndex(index); } // #### ######## ######## ######## ### ######## #### ####### ## ## // ## ## ## ## ## ## ## ## ## ## ## ### ## // ## ## ## ## ## ## ## ## ## ## ## #### ## // ## ## ###### ######## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ######### ## ## ## ## ## #### // ## ## ## ## ## ## ## ## ## ## ## ## ### // #### ## ######## ## ## ## ## ## #### ####### ## ## // // Iteration Support: // /** * Provide a means of iterating through histogram values according to percentile levels. The iteration is * performed in steps that start at 0% and reduce their distance to 100% according to the * <i>percentileTicksPerHalfDistance</i> parameter, ultimately reaching 100% when all recorded histogram * values are exhausted. * <p> * @param percentileTicksPerHalfDistance The number of iteration steps per half-distance to 100%. * @return An {@link java.lang.Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} * through the histogram using a * {@link PercentileIterator} */ public Percentiles percentiles(final int percentileTicksPerHalfDistance) { return new Percentiles(this, percentileTicksPerHalfDistance); } /** * Provide a means of iterating through histogram values using linear steps. The iteration is * performed in steps of <i>valueUnitsPerBucket</i> in size, terminating when all recorded histogram * values are exhausted. * * @param valueUnitsPerBucket The size (in value units) of the linear buckets to use * @return An {@link java.lang.Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} * through the histogram using a * {@link LinearIterator} */ public LinearBucketValues linearBucketValues(final long valueUnitsPerBucket) { return new LinearBucketValues(this, valueUnitsPerBucket); } /** * Provide a means of iterating through histogram values at logarithmically increasing levels. The iteration is * performed in steps that start at <i>valueUnitsInFirstBucket</i> and increase exponentially according to * <i>logBase</i>, terminating when all recorded histogram values are exhausted. * * @param valueUnitsInFirstBucket The size (in value units) of the first bucket in the iteration * @param logBase The multiplier by which bucket sizes will grow in each iteration step * @return An {@link java.lang.Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} * through the histogram using * a {@link LogarithmicIterator} */ public LogarithmicBucketValues logarithmicBucketValues(final long valueUnitsInFirstBucket, final double logBase) { return new LogarithmicBucketValues(this, valueUnitsInFirstBucket, logBase); } /** * Provide a means of iterating through all recorded histogram values using the finest granularity steps * supported by the underlying representation. The iteration steps through all non-zero recorded value counts, * and terminates when all recorded histogram values are exhausted. * * @return An {@link java.lang.Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} * through the histogram using * a {@link RecordedValuesIterator} */ public RecordedValues recordedValues() { return new RecordedValues(this); } /** * Provide a means of iterating through all histogram values using the finest granularity steps supported by * the underlying representation. The iteration steps through all possible unit value levels, regardless of * whether or not there were recorded values for that value level, and terminates when all recorded histogram * values are exhausted. * * @return An {@link java.lang.Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} * through the histogram using * a {@link AllValuesIterator} */ public AllValues allValues() { return new AllValues(this); } // Percentile iterator support: /** * An {@link java.lang.Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} through * the histogram using a {@link PercentileIterator} */ public class Percentiles implements Iterable<HistogramIterationValue> { final AbstractHistogram histogram; final int percentileTicksPerHalfDistance; private Percentiles(final AbstractHistogram histogram, final int percentileTicksPerHalfDistance) { this.histogram = histogram; this.percentileTicksPerHalfDistance = percentileTicksPerHalfDistance; } /** * @return A {@link PercentileIterator}{@literal <}{@link HistogramIterationValue}{@literal >} */ @Override public Iterator<HistogramIterationValue> iterator() { return new PercentileIterator(histogram, percentileTicksPerHalfDistance); } } // Linear iterator support: /** * An {@link java.lang.Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} through * the histogram using a {@link LinearIterator} */ public class LinearBucketValues implements Iterable<HistogramIterationValue> { final AbstractHistogram histogram; final long valueUnitsPerBucket; private LinearBucketValues(final AbstractHistogram histogram, final long valueUnitsPerBucket) { this.histogram = histogram; this.valueUnitsPerBucket = valueUnitsPerBucket; } /** * @return A {@link LinearIterator}{@literal <}{@link HistogramIterationValue}{@literal >} */ public Iterator<HistogramIterationValue> iterator() { return new LinearIterator(histogram, valueUnitsPerBucket); } } // Logarithmic iterator support: /** * An {@link java.lang.Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} through * the histogram using a {@link LogarithmicIterator} */ public class LogarithmicBucketValues implements Iterable<HistogramIterationValue> { final AbstractHistogram histogram; final long valueUnitsInFirstBucket; final double logBase; private LogarithmicBucketValues(final AbstractHistogram histogram, final long valueUnitsInFirstBucket, final double logBase) { this.histogram = histogram; this.valueUnitsInFirstBucket = valueUnitsInFirstBucket; this.logBase = logBase; } /** * @return A {@link LogarithmicIterator}{@literal <}{@link HistogramIterationValue}{@literal >} */ public Iterator<HistogramIterationValue> iterator() { return new LogarithmicIterator(histogram, valueUnitsInFirstBucket, logBase); } } // Recorded value iterator support: /** * An {@link java.lang.Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} through * the histogram using a {@link RecordedValuesIterator} */ public class RecordedValues implements Iterable<HistogramIterationValue> { final AbstractHistogram histogram; private RecordedValues(final AbstractHistogram histogram) { this.histogram = histogram; } /** * @return A {@link RecordedValuesIterator}{@literal <}{@link HistogramIterationValue}{@literal >} */ public Iterator<HistogramIterationValue> iterator() { return new RecordedValuesIterator(histogram); } } // AllValues iterator support: /** * An {@link java.lang.Iterable}{@literal <}{@link HistogramIterationValue}{@literal >} through * the histogram using a {@link AllValuesIterator} */ public class AllValues implements Iterable<HistogramIterationValue> { final AbstractHistogram histogram; private AllValues(final AbstractHistogram histogram) { this.histogram = histogram; } /** * @return A {@link AllValuesIterator}{@literal <}{@link HistogramIterationValue}{@literal >} */ public Iterator<HistogramIterationValue> iterator() { return new AllValuesIterator(histogram); } } // ######## ######## ######## ###### ######## ## ## ######## #### ## ######## // ## ## ## ## ## ## ## ## ### ## ## ## ## ## // ## ## ## ## ## ## ## #### ## ## ## ## ## // ######## ###### ######## ## ###### ## ## ## ## ## ## ###### // ## ## ## ## ## ## ## #### ## ## ## ## // ## ## ## ## ## ## ## ## ### ## ## ## ## // ## ######## ## ## ###### ######## ## ## ## #### ######## ######## // // ####### ## ## ######## ######## ## ## ######## // ## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ######## ## ## ## // ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## ## // ####### ####### ## ## ####### ## // // Textual percentile output support: // /** * Produce textual representation of the value distribution of histogram data by percentile. The distribution is * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing * five (5) percentile reporting tick points. * * @param printStream Stream into which the distribution will be output * <p> * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in * output */ public void outputPercentileDistribution(final PrintStream printStream, final Double outputValueUnitScalingRatio) { outputPercentileDistribution(printStream, 5, outputValueUnitScalingRatio); } /** * Produce textual representation of the value distribution of histogram data by percentile. The distribution is * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing * <i>dumpTicksPerHalf</i> percentile reporting tick points. * * @param printStream Stream into which the distribution will be output * <p> * @param percentileTicksPerHalfDistance The number of reporting points per exponentially decreasing half-distance * <p> * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in * output */ public void outputPercentileDistribution(final PrintStream printStream, final int percentileTicksPerHalfDistance, final Double outputValueUnitScalingRatio) { outputPercentileDistribution(printStream, percentileTicksPerHalfDistance, outputValueUnitScalingRatio, false); } /** * Produce textual representation of the value distribution of histogram data by percentile. The distribution is * output with exponentially increasing resolution, with each exponentially decreasing half-distance containing * <i>dumpTicksPerHalf</i> percentile reporting tick points. * * @param printStream Stream into which the distribution will be output * <p> * @param percentileTicksPerHalfDistance The number of reporting points per exponentially decreasing half-distance * <p> * @param outputValueUnitScalingRatio The scaling factor by which to divide histogram recorded values units in * output * @param useCsvFormat Output in CSV format if true. Otherwise use plain text form. */ public void outputPercentileDistribution(final PrintStream printStream, final int percentileTicksPerHalfDistance, final Double outputValueUnitScalingRatio, final boolean useCsvFormat) { if (useCsvFormat) { printStream.format("\"Value\",\"Percentile\",\"TotalCount\",\"1/(1-Percentile)\"\n"); } else { printStream.format("%12s %14s %10s %14s\n\n", "Value", "Percentile", "TotalCount", "1/(1-Percentile)"); } PercentileIterator iterator = percentileIterator; iterator.reset(percentileTicksPerHalfDistance); String percentileFormatString; String lastLinePercentileFormatString; if (useCsvFormat) { percentileFormatString = "%." + numberOfSignificantValueDigits + "f,%.12f,%d,%.2f\n"; lastLinePercentileFormatString = "%." + numberOfSignificantValueDigits + "f,%.12f,%d,Infinity\n"; } else { percentileFormatString = "%12." + numberOfSignificantValueDigits + "f %2.12f %10d %14.2f\n"; lastLinePercentileFormatString = "%12." + numberOfSignificantValueDigits + "f %2.12f %10d\n"; } while (iterator.hasNext()) { HistogramIterationValue iterationValue = iterator.next(); if (iterationValue.getPercentileLevelIteratedTo() != 100.0D) { printStream.format(Locale.US, percentileFormatString, iterationValue.getValueIteratedTo() / outputValueUnitScalingRatio, iterationValue.getPercentileLevelIteratedTo()/100.0D, iterationValue.getTotalCountToThisValue(), 1/(1.0D - (iterationValue.getPercentileLevelIteratedTo()/100.0D)) ); } else { printStream.format(Locale.US, lastLinePercentileFormatString, iterationValue.getValueIteratedTo() / outputValueUnitScalingRatio, iterationValue.getPercentileLevelIteratedTo()/100.0D, iterationValue.getTotalCountToThisValue()); } } if (!useCsvFormat) { // Calculate and output mean and std. deviation. // Note: mean/std. deviation numbers are very often completely irrelevant when // data is extremely non-normal in distribution (e.g. in cases of strong multi-modal // response time distribution associated with GC pauses). However, reporting these numbers // can be very useful for contrasting with the detailed percentile distribution // reported by outputPercentileDistribution(). It is not at all surprising to find // percentile distributions where results fall many tens or even hundreds of standard // deviations away from the mean - such results simply indicate that the data sampled // exhibits a very non-normal distribution, highlighting situations for which the std. // deviation metric is a useless indicator. // double mean = getMean() / outputValueUnitScalingRatio; double std_deviation = getStdDeviation() / outputValueUnitScalingRatio; printStream.format(Locale.US, "#[Mean = %12." + numberOfSignificantValueDigits + "f, StdDeviation = %12." + numberOfSignificantValueDigits +"f]\n", mean, std_deviation); printStream.format(Locale.US, "#[Max = %12." + numberOfSignificantValueDigits + "f, Total count = %12d]\n", getMaxValue() / outputValueUnitScalingRatio, getTotalCount()); printStream.format(Locale.US, "#[Buckets = %12d, SubBuckets = %12d]\n", bucketCount, subBucketCount); } } // ###### ######## ######## #### ### ## #### ######## ### ######## #### ####### ## ## // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## // ###### ###### ######## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ######### ## ## ## ######### ## ## ## ## ## #### // ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### // ###### ######## ## ## #### ## ## ######## #### ######## ## ## ## #### ####### ## ## // // Serialization support: // private static final long serialVersionUID = 0x1c849302; private void writeObject(final ObjectOutputStream o) throws IOException { o.writeLong(lowestDiscernibleValue); o.writeLong(highestTrackableValue); o.writeInt(numberOfSignificantValueDigits); o.writeInt(getNormalizingIndexOffset()); o.writeDouble(integerToDoubleValueConversionRatio); o.writeLong(getTotalCount()); // Max Value is added to the serialized form because establishing max via scanning is "harder" during // deserialization, as the counts array is not available at the subclass deserializing level, and we don't // really want to have each subclass establish max on it's own... o.writeLong(maxValue); o.writeLong(minNonZeroValue); o.writeLong(startTimeStampMsec); o.writeLong(endTimeStampMsec); o.writeBoolean(autoResize); o.writeInt(wordSizeInBytes); } private void readObject(final ObjectInputStream o) throws IOException, ClassNotFoundException { final long lowestDiscernibleValue = o.readLong(); final long highestTrackableValue = o.readLong(); final int numberOfSignificantValueDigits = o.readInt(); final int normalizingIndexOffset = o.readInt(); final double integerToDoubleValueConversionRatio = o.readDouble(); final long indicatedTotalCount = o.readLong(); final long indicatedMaxValue = o.readLong(); final long indicatedMinNonZeroValue = o.readLong(); final long indicatedStartTimeStampMsec = o.readLong(); final long indicatedEndTimeStampMsec = o.readLong(); final boolean indicatedAutoResize = o.readBoolean(); final int indicatedWordSizeInBytes = o.readInt(); init(lowestDiscernibleValue, highestTrackableValue, numberOfSignificantValueDigits, integerToDoubleValueConversionRatio, normalizingIndexOffset); // Set internalTrackingValues (can't establish them from array yet, because it's not yet read...) setTotalCount(indicatedTotalCount); maxValue = indicatedMaxValue; minNonZeroValue = indicatedMinNonZeroValue; startTimeStampMsec = indicatedStartTimeStampMsec; endTimeStampMsec = indicatedEndTimeStampMsec; autoResize = indicatedAutoResize; wordSizeInBytes = indicatedWordSizeInBytes; } // ######## ## ## ###### ####### ######## #### ## ## ###### // ## ### ## ## ## ## ## ## ## ## ### ## ## ## // ## #### ## ## ## ## ## ## ## #### ## ## // ###### ## ## ## ## ## ## ## ## ## ## ## ## ## #### // ## ## #### ## ## ## ## ## ## ## #### ## ## // ## ## ### ## ## ## ## ## ## ## ## ### ## ## // ######## ## ## ###### ####### ######## #### ## ## ###### // // #### ######## ######## ###### ####### ######## #### ## ## ###### // ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## ## // #### ## ## ## ## ## ## ## ## ## #### ## ## // #### ## ## ###### ## ## ## ## ## ## ## ## ## ## #### // ## ## ## ## ## ## ## ## ## ## ## ## ## #### ## ## // ## ## ## ## ## ## ## ## ## ## ## ## ## ### ## ## // #### ## ######## ######## ###### ####### ######## #### ## ## ###### // // Encoding/Decoding support: // /** * Get the capacity needed to encode this histogram into a ByteBuffer * @return the capacity needed to encode this histogram into a ByteBuffer */ @Override public int getNeededByteBufferCapacity() { return getNeededByteBufferCapacity(countsArrayLength); } private static final int ENCODING_HEADER_SIZE = 40; private static final int V0_ENCODING_HEADER_SIZE = 32; int getNeededByteBufferCapacity(final int relevantLength) { return getNeededPayloadByteBufferCapacity(relevantLength) + ENCODING_HEADER_SIZE; } int getNeededPayloadByteBufferCapacity(final int relevantLength) { return (relevantLength * V2maxWordSizeInBytes); } int getNeededV0PayloadByteBufferCapacity(final int relevantLength) { return (relevantLength * wordSizeInBytes); } private static final int V0EncodingCookieBase = 0x1c849308; private static final int V0CompressedEncodingCookieBase = 0x1c849309; private static final int V1EncodingCookieBase = 0x1c849301; private static final int V1CompressedEncodingCookieBase = 0x1c849302; private static final int V2EncodingCookieBase = 0x1c849303; private static final int V2CompressedEncodingCookieBase = 0x1c849304; private static final int V2maxWordSizeInBytes = 9; // LEB128-64b9B + ZigZag require up to 9 bytes per word private static final int encodingCookieBase = V2EncodingCookieBase; private static final int compressedEncodingCookieBase = V2CompressedEncodingCookieBase; private int getEncodingCookie() { return encodingCookieBase | 0x10; // LSBit of wordSize byte indicates TLZE Encoding } private int getCompressedEncodingCookie() { return compressedEncodingCookieBase | 0x10; // LSBit of wordSize byte indicates TLZE Encoding } private static int getCookieBase(final int cookie) { return (cookie & ~0xf0); } private static int getWordSizeInBytesFromCookie(final int cookie) { if ((getCookieBase(cookie) == V2EncodingCookieBase) || (getCookieBase(cookie) == V2CompressedEncodingCookieBase)) { return V2maxWordSizeInBytes; } int sizeByte = (cookie & 0xf0) >> 4; return sizeByte & 0xe; } /** * Encode this histogram into a ByteBuffer * @param buffer The buffer to encode into * @return The number of bytes written to the buffer */ synchronized public int encodeIntoByteBuffer(final ByteBuffer buffer) { final long maxValue = getMaxValue(); final int relevantLength = countsArrayIndex(maxValue) + 1; if (buffer.capacity() < getNeededByteBufferCapacity(relevantLength)) { throw new ArrayIndexOutOfBoundsException("buffer does not have capacity for " + getNeededByteBufferCapacity(relevantLength) + " bytes"); } int initialPosition = buffer.position(); buffer.putInt(getEncodingCookie()); buffer.putInt(0); // Placeholder for payload length in bytes. buffer.putInt(getNormalizingIndexOffset()); buffer.putInt(numberOfSignificantValueDigits); buffer.putLong(lowestDiscernibleValue); buffer.putLong(highestTrackableValue); buffer.putDouble(getIntegerToDoubleValueConversionRatio()); int payloadStartPosition = buffer.position(); fillBufferFromCountsArray(buffer); buffer.putInt(initialPosition + 4, buffer.position() - payloadStartPosition); // Record the payload length return buffer.position() - initialPosition; } /** * Encode this histogram in compressed form into a byte array * @param targetBuffer The buffer to encode into * @param compressionLevel Compression level (for java.util.zip.Deflater). * @return The number of bytes written to the buffer */ @Override synchronized public int encodeIntoCompressedByteBuffer( final ByteBuffer targetBuffer, final int compressionLevel) { int neededCapacity = getNeededByteBufferCapacity(countsArrayLength); if (intermediateUncompressedByteBuffer == null || intermediateUncompressedByteBuffer.capacity() < neededCapacity) { intermediateUncompressedByteBuffer = ByteBuffer.allocate(neededCapacity).order(BIG_ENDIAN); } intermediateUncompressedByteBuffer.clear(); int initialTargetPosition = targetBuffer.position(); final int uncompressedLength = encodeIntoByteBuffer(intermediateUncompressedByteBuffer); targetBuffer.putInt(getCompressedEncodingCookie()); targetBuffer.putInt(0); // Placeholder for compressed contents length Deflater compressor = new Deflater(compressionLevel); compressor.setInput(intermediateUncompressedByteBuffer.array(), 0, uncompressedLength); compressor.finish(); byte[] targetArray; if (targetBuffer.hasArray()) { targetArray = targetBuffer.array(); } else { if (intermediateUncompressedByteArray == null || intermediateUncompressedByteArray.length < targetBuffer.capacity()) { intermediateUncompressedByteArray = new byte[targetBuffer.capacity()]; } targetArray = intermediateUncompressedByteArray; } int compressedTargetOffset = initialTargetPosition + 8; int compressedDataLength = compressor.deflate( targetArray, compressedTargetOffset, targetArray.length - compressedTargetOffset ); compressor.end(); if (!targetBuffer.hasArray()) { targetBuffer.put(targetArray, compressedTargetOffset, compressedDataLength); } targetBuffer.putInt(initialTargetPosition + 4, compressedDataLength); // Record the compressed length int bytesWritten = compressedDataLength + 8; targetBuffer.position(initialTargetPosition + bytesWritten); return bytesWritten; } /** * Encode this histogram in compressed form into a byte array * @param targetBuffer The buffer to encode into * @return The number of bytes written to the array */ public int encodeIntoCompressedByteBuffer(final ByteBuffer targetBuffer) { return encodeIntoCompressedByteBuffer(targetBuffer, Deflater.DEFAULT_COMPRESSION); } private static final Class[] constructorArgsTypes = {Long.TYPE, Long.TYPE, Integer.TYPE}; static <T extends AbstractHistogram> T decodeFromByteBuffer( final ByteBuffer buffer, final Class<T> histogramClass, final long minBarForHighestTrackableValue) { try { return decodeFromByteBuffer(buffer, histogramClass, minBarForHighestTrackableValue, null); } catch (DataFormatException ex) { throw new RuntimeException(ex); } } private static <T extends AbstractHistogram> T decodeFromByteBuffer( final ByteBuffer buffer, final Class<T> histogramClass, final long minBarForHighestTrackableValue, final Inflater decompressor) throws DataFormatException { final int cookie = buffer.getInt(); final int payloadLengthInBytes; final int normalizingIndexOffset; final int numberOfSignificantValueDigits; final long lowestTrackableUnitValue; long highestTrackableValue; final double integerToDoubleValueConversionRatio; if ((getCookieBase(cookie) == encodingCookieBase) || (getCookieBase(cookie) == V1EncodingCookieBase)) { if (getCookieBase(cookie) == V2EncodingCookieBase) { if (getWordSizeInBytesFromCookie(cookie) != V2maxWordSizeInBytes) { throw new IllegalArgumentException( "The buffer does not contain a Histogram (no valid cookie found)"); } } payloadLengthInBytes = buffer.getInt(); normalizingIndexOffset = buffer.getInt(); numberOfSignificantValueDigits = buffer.getInt(); lowestTrackableUnitValue = buffer.getLong(); highestTrackableValue = buffer.getLong(); integerToDoubleValueConversionRatio = buffer.getDouble(); } else if (getCookieBase(cookie) == V0EncodingCookieBase) { numberOfSignificantValueDigits = buffer.getInt(); lowestTrackableUnitValue = buffer.getLong(); highestTrackableValue = buffer.getLong(); buffer.getLong(); // Discard totalCount field in V0 header. payloadLengthInBytes = Integer.MAX_VALUE; integerToDoubleValueConversionRatio = 1.0; normalizingIndexOffset = 0; } else { throw new IllegalArgumentException("The buffer does not contain a Histogram (no valid cookie found)"); } highestTrackableValue = Math.max(highestTrackableValue, minBarForHighestTrackableValue); T histogram; // Construct histogram: try { Constructor<T> constructor = histogramClass.getConstructor(constructorArgsTypes); histogram = constructor.newInstance(lowestTrackableUnitValue, highestTrackableValue, numberOfSignificantValueDigits); histogram.setIntegerToDoubleValueConversionRatio(integerToDoubleValueConversionRatio); histogram.setNormalizingIndexOffset(normalizingIndexOffset); try { histogram.setAutoResize(true); } catch (IllegalStateException ex) { // Allow histogram to refuse auto-sizing setting } } catch (IllegalAccessException | NoSuchMethodException | InstantiationException | InvocationTargetException ex) { throw new IllegalArgumentException(ex); } ByteBuffer payLoadSourceBuffer; final int expectedCapacity = Math.min( histogram.getNeededV0PayloadByteBufferCapacity(histogram.countsArrayLength), payloadLengthInBytes ); if (decompressor == null) { // No compressed source buffer. Payload is in buffer, after header. if (expectedCapacity > buffer.remaining()) { throw new IllegalArgumentException("The buffer does not contain the full Histogram payload"); } payLoadSourceBuffer = buffer; } else { // Compressed source buffer. Payload needs to be decoded from there. payLoadSourceBuffer = ByteBuffer.allocate(expectedCapacity).order(BIG_ENDIAN); int decompressedByteCount = decompressor.inflate(payLoadSourceBuffer.array()); if ((payloadLengthInBytes != Integer.MAX_VALUE) && (decompressedByteCount < payloadLengthInBytes)) { throw new IllegalArgumentException("The buffer does not contain the indicated payload amount"); } } int filledLength = ((AbstractHistogram) histogram).fillCountsArrayFromSourceBuffer( payLoadSourceBuffer, expectedCapacity, getWordSizeInBytesFromCookie(cookie)); histogram.establishInternalTackingValues(filledLength); return histogram; } private int fillCountsArrayFromSourceBuffer(ByteBuffer sourceBuffer, int lengthInBytes, int wordSizeInBytes) { if ((wordSizeInBytes != 2) && (wordSizeInBytes != 4) && (wordSizeInBytes != 8) && (wordSizeInBytes != V2maxWordSizeInBytes)) { throw new IllegalArgumentException("word size must be 2, 4, 8, or V2maxWordSizeInBytes ("+ V2maxWordSizeInBytes + ") bytes"); } final long maxAllowableCountInHistogram = ((this.wordSizeInBytes == 2) ? Short.MAX_VALUE : ((this.wordSizeInBytes == 4) ? Integer.MAX_VALUE : Long.MAX_VALUE) ); int dstIndex = 0; int endPosition = sourceBuffer.position() + lengthInBytes; while (sourceBuffer.position() < endPosition) { long count; int zerosCount = 0; if (wordSizeInBytes == V2maxWordSizeInBytes) { // V2 encoding format uses a long encoded in a ZigZag LEB128 format (up to V2maxWordSizeInBytes): count = ZigZagEncoding.getLong(sourceBuffer); if (count < 0) { long zc = -count; if (zc > Integer.MAX_VALUE) { throw new IllegalArgumentException( "An encoded zero count of > Integer.MAX_VALUE was encountered in the source"); } zerosCount = (int) zc; } } else { // decoding V1 and V0 encoding formats depends on indicated word size: count = ((wordSizeInBytes == 2) ? sourceBuffer.getShort() : ((wordSizeInBytes == 4) ? sourceBuffer.getInt() : sourceBuffer.getLong() ) ); } if (count > maxAllowableCountInHistogram) { throw new IllegalArgumentException( "An encoded count (" + count + ") does not fit in the Histogram's (" + this.wordSizeInBytes + " bytes) was encountered in the source"); } if (zerosCount > 0) { dstIndex += zerosCount; // No need to set zeros in array. Just skip them. } else { setCountAtIndex(dstIndex++, count); } } return dstIndex; // this is the destination length } synchronized void fillBufferFromCountsArray(ByteBuffer buffer) { final int countsLimit = countsArrayIndex(maxValue) + 1; int srcIndex = 0; while (srcIndex < countsLimit) { // V2 encoding format uses a ZigZag LEB128-64b9B encoded long. Positive values are counts, // while negative values indicate a repeat zero counts. long count = getCountAtIndex(srcIndex++); if (count < 0) { throw new RuntimeException("Cannot encode histogram containing negative counts (" + count + ") at index " + srcIndex + ", corresponding the value range [" + lowestEquivalentValue(valueFromIndex(srcIndex)) + "," + nextNonEquivalentValue(valueFromIndex(srcIndex)) + ")"); } // Count trailing 0s (which follow this count): long zerosCount = 0; if (count == 0) { zerosCount = 1; while ((srcIndex < countsLimit) && (getCountAtIndex(srcIndex) == 0)) { zerosCount++; srcIndex++; } } if (zerosCount > 1) { ZigZagEncoding.putLong(buffer, -zerosCount); } else { ZigZagEncoding.putLong(buffer, count); } } } static <T extends AbstractHistogram> T decodeFromCompressedByteBuffer( final ByteBuffer buffer, final Class<T> histogramClass, final long minBarForHighestTrackableValue) throws DataFormatException { int initialTargetPosition = buffer.position(); final int cookie = buffer.getInt(); final int headerSize; if ((getCookieBase(cookie) == compressedEncodingCookieBase) || (getCookieBase(cookie) == V1CompressedEncodingCookieBase)) { headerSize = ENCODING_HEADER_SIZE; } else if (getCookieBase(cookie) == V0CompressedEncodingCookieBase) { headerSize = V0_ENCODING_HEADER_SIZE; } else { throw new IllegalArgumentException("The buffer does not contain a compressed Histogram"); } final int lengthOfCompressedContents = buffer.getInt(); final Inflater decompressor = new Inflater(); if (buffer.hasArray()) { decompressor.setInput(buffer.array(), initialTargetPosition + 8, lengthOfCompressedContents); } else { byte[] compressedContents = new byte[lengthOfCompressedContents]; buffer.get(compressedContents); decompressor.setInput(compressedContents); } final ByteBuffer headerBuffer = ByteBuffer.allocate(headerSize).order(BIG_ENDIAN); decompressor.inflate(headerBuffer.array()); T histogram = decodeFromByteBuffer( headerBuffer, histogramClass, minBarForHighestTrackableValue, decompressor); decompressor.end(); return histogram; } // #### ## ## ######## ######## ######## ## ## ### ## // ## ### ## ## ## ## ## ### ## ## ## ## // ## #### ## ## ## ## ## #### ## ## ## ## // ## ## ## ## ## ###### ######## ## ## ## ## ## ## // ## ## #### ## ## ## ## ## #### ######### ## // ## ## ### ## ## ## ## ## ### ## ## ## // #### ## ## ## ######## ## ## ## ## ## ## ######## // // ## ## ######## ## ######## ######## ######## ###### // ## ## ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## ## ## // ######### ###### ## ######## ###### ######## ###### // ## ## ## ## ## ## ## ## ## // ## ## ## ## ## ## ## ## ## ## // ## ## ######## ######## ## ######## ## ## ###### // // Internal helper methods: // private String recordedValuesToString() { String output = ""; try { for (int i = 0; i < countsArrayLength; i++) { if (getCountAtIndex(i) != 0) { output += String.format("[%d] : %d\n", i, getCountAtIndex(i)); } } return output; } catch(Exception ex) { output += "!!! Exception thrown in value iteration...\n"; } return output; } @Override public String toString() { String output = "AbstractHistogram:\n"; output += super.toString(); output += recordedValuesToString(); return output; } void establishInternalTackingValues() { establishInternalTackingValues(countsArrayLength); } void establishInternalTackingValues(final int lengthToCover) { resetMaxValue(0); resetMinNonZeroValue(Long.MAX_VALUE); int maxIndex = -1; int minNonZeroIndex = -1; long observedTotalCount = 0; for (int index = 0; index < lengthToCover; index++) { long countAtIndex; if ((countAtIndex = getCountAtIndex(index)) > 0) { observedTotalCount += countAtIndex; maxIndex = index; if ((minNonZeroIndex == -1) && (index != 0)) { minNonZeroIndex = index; } } } if (maxIndex >= 0) { updatedMaxValue(highestEquivalentValue(valueFromIndex(maxIndex))); } if (minNonZeroIndex >= 0) { updateMinNonZeroValue(valueFromIndex(minNonZeroIndex)); } setTotalCount(observedTotalCount); } int getBucketsNeededToCoverValue(final long value) { // Shift won't overflow because subBucketMagnitude + unitMagnitude <= 62. // the k'th bucket can express from 0 * 2^k to subBucketCount * 2^k in units of 2^k long smallestUntrackableValue = ((long) subBucketCount) << unitMagnitude; // always have at least 1 bucket int bucketsNeeded = 1; while (smallestUntrackableValue <= value) { if (smallestUntrackableValue > (Long.MAX_VALUE / 2)) { // next shift will overflow, meaning that bucket could represent values up to ones greater than // Long.MAX_VALUE, so it's the last bucket return bucketsNeeded + 1; } smallestUntrackableValue <<= 1; bucketsNeeded++; } return bucketsNeeded; } /** * If we have N such that subBucketCount * 2^N > max value, we need storage for N+1 buckets, each with enough * slots to hold the top half of the subBucketCount (the lower half is covered by previous buckets), and the +1 * being used for the lower half of the 0'th bucket. Or, equivalently, we need 1 more bucket to capture the max * value if we consider the sub-bucket length to be halved. */ int getLengthForNumberOfBuckets(final int numberOfBuckets) { final int lengthNeeded = (numberOfBuckets + 1) * (subBucketHalfCount); return lengthNeeded; } int countsArrayIndex(final long value) { if (value < 0) { throw new ArrayIndexOutOfBoundsException("Histogram recorded value cannot be negative."); } final int bucketIndex = getBucketIndex(value); final int subBucketIndex = getSubBucketIndex(value, bucketIndex); return countsArrayIndex(bucketIndex, subBucketIndex); } private int countsArrayIndex(final int bucketIndex, final int subBucketIndex) { assert(subBucketIndex < subBucketCount); assert(bucketIndex == 0 || (subBucketIndex >= subBucketHalfCount)); // Calculate the index for the first entry that will be used in the bucket (halfway through subBucketCount). // For bucketIndex 0, all subBucketCount entries may be used, but bucketBaseIndex is still set in the middle. final int bucketBaseIndex = (bucketIndex + 1) << subBucketHalfCountMagnitude; // Calculate the offset in the bucket. This subtraction will result in a positive value in all buckets except // the 0th bucket (since a value in that bucket may be less than half the bucket's 0 to subBucketCount range). // However, this works out since we give bucket 0 twice as much space. final int offsetInBucket = subBucketIndex - subBucketHalfCount; // The following is the equivalent of ((subBucketIndex - subBucketHalfCount) + bucketBaseIndex; return bucketBaseIndex + offsetInBucket; } /** * @return the lowest (and therefore highest precision) bucket index that can represent the value */ int getBucketIndex(final long value) { // Calculates the number of powers of two by which the value is greater than the biggest value that fits in // bucket 0. This is the bucket index since each successive bucket can hold a value 2x greater. // The mask maps small values to bucket 0. return leadingZeroCountBase - Long.numberOfLeadingZeros(value | subBucketMask); } int getSubBucketIndex(final long value, final int bucketIndex) { // For bucketIndex 0, this is just value, so it may be anywhere in 0 to subBucketCount. // For other bucketIndex, this will always end up in the top half of subBucketCount: assume that for some bucket // k > 0, this calculation will yield a value in the bottom half of 0 to subBucketCount. Then, because of how // buckets overlap, it would have also been in the top half of bucket k-1, and therefore would have // returned k-1 in getBucketIndex(). Since we would then shift it one fewer bits here, it would be twice as big, // and therefore in the top half of subBucketCount. return (int)(value >>> (bucketIndex + unitMagnitude)); } /** * @return The value `index - normalizingIndexOffset` modulo arrayLength (always non-negative) */ int normalizeIndex(int index, int normalizingIndexOffset, int arrayLength) { if (normalizingIndexOffset == 0) { // Fastpath out of normalization. Keeps integer value histograms fast while allowing // others (like DoubleHistogram) to use normalization at a cost... return index; } if ((index > arrayLength) || (index < 0)) { throw new ArrayIndexOutOfBoundsException("index out of covered value range"); } int normalizedIndex = index - normalizingIndexOffset; // The following is the same as an unsigned remainder operation, as long as no double wrapping happens // (which shouldn't happen, as normalization is never supposed to wrap, since it would have overflowed // or underflowed before it did). This (the + and - tests) seems to be faster than a % op with a // correcting if < 0...: if (normalizedIndex < 0) { normalizedIndex += arrayLength; } else if (normalizedIndex >= arrayLength) { normalizedIndex -= arrayLength; } return normalizedIndex; } private long valueFromIndex(final int bucketIndex, final int subBucketIndex) { return ((long) subBucketIndex) << (bucketIndex + unitMagnitude); } final long valueFromIndex(final int index) { int bucketIndex = (index >> subBucketHalfCountMagnitude) - 1; int subBucketIndex = (index & (subBucketHalfCount - 1)) + subBucketHalfCount; if (bucketIndex < 0) { subBucketIndex -= subBucketHalfCount; bucketIndex = 0; } return valueFromIndex(bucketIndex, subBucketIndex); } static int numberOfSubBuckets(final int numberOfSignificantValueDigits) { final long largestValueWithSingleUnitResolution = 2 * (long) Math.pow(10, numberOfSignificantValueDigits); // We need to maintain power-of-two subBucketCount (for clean direct indexing) that is large enough to // provide unit resolution to at least largestValueWithSingleUnitResolution. So figure out // largestValueWithSingleUnitResolution's nearest power-of-two (rounded up), and use that: int subBucketCountMagnitude = (int) Math.ceil(Math.log(largestValueWithSingleUnitResolution)/Math.log(2)); int subBucketCount = (int) Math.pow(2, subBucketCountMagnitude); return subBucketCount; } }
120,126
47.147094
129
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/HistogramLogReader.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.io.*; import java.util.zip.DataFormatException; /** * A histogram log reader. * <p> * Histogram logs are used to capture full fidelity, per-time-interval * histograms of a recorded value. * <p> * For example, a histogram log can be used to capture high fidelity * reaction-time logs for some measured system or subsystem component. * Such a log would capture a full reaction time histogram for each * logged interval, and could be used to later reconstruct a full * HdrHistogram of the measured reaction time behavior for any arbitrary * time range within the log, by adding [only] the relevant interval * histograms. * <h3>Histogram log format:</h3> * A histogram log file consists of text lines. Lines beginning with * the "#" character are optional and treated as comments. Lines * containing the legend (starting with "Timestamp") are also optional * and ignored in parsing the histogram log. All other lines must * be valid interval description lines. Text fields are delimited by * commas, spaces. * <p> * A valid interval description line contains an optional Tag=tagString * text field, followed by an interval description. * <p> * A valid interval description must contain exactly four text fields: * <ul> * <li>StartTimestamp: The first field must contain a number parse-able as a Double value, * representing the start timestamp of the interval in seconds.</li> * <li>intervalLength: The second field must contain a number parse-able as a Double value, * representing the length of the interval in seconds.</li> * <li>Interval_Max: The third field must contain a number parse-able as a Double value, * which generally represents the maximum value of the interval histogram.</li> * <li>Interval_Compressed_Histogram: The fourth field must contain a text field * parse-able as a Base64 text representation of a compressed HdrHistogram.</li> * </ul> * The log file may contain an optional indication of a starting time. Starting time * is indicated using a special comments starting with "#[StartTime: " and followed * by a number parse-able as a double, representing the start time (in seconds) * that may be added to timestamps in the file to determine an absolute * timestamp (e.g. since the epoch) for each interval. */ public class HistogramLogReader implements Closeable { private final HistogramLogScanner scanner; private final HistogramLogScanner.EventHandler handler = new HistogramLogScanner.EventHandler() { @Override public boolean onComment(String comment) { return false; } @Override public boolean onBaseTime(double secondsSinceEpoch) { baseTimeSec = secondsSinceEpoch; // base time represented as seconds since epoch observedBaseTime = true; return false; } @Override public boolean onStartTime(double secondsSinceEpoch) { startTimeSec = secondsSinceEpoch; // start time represented as seconds since epoch observedStartTime = true; return false; } @Override public boolean onHistogram(String tag, double timestamp, double length, HistogramLogScanner.EncodableHistogramSupplier lazyReader) { final double logTimeStampInSec = timestamp; // Timestamp is expected to be in seconds if (!observedStartTime) { // No explicit start time noted. Use 1st observed time: startTimeSec = logTimeStampInSec; observedStartTime = true; } if (!observedBaseTime) { // No explicit base time noted. Deduce from 1st observed time (compared to start time): if (logTimeStampInSec < startTimeSec - (365 * 24 * 3600.0)) { // Criteria Note: if log timestamp is more than a year in the past (compared to // StartTime), we assume that timestamps in the log are not absolute baseTimeSec = startTimeSec; } else { // Timestamps are absolute baseTimeSec = 0.0; } observedBaseTime = true; } final double absoluteStartTimeStampSec = logTimeStampInSec + baseTimeSec; final double offsetStartTimeStampSec = absoluteStartTimeStampSec - startTimeSec; final double intervalLengthSec = length; // Timestamp length is expect to be in seconds final double absoluteEndTimeStampSec = absoluteStartTimeStampSec + intervalLengthSec; final double startTimeStampToCheckRangeOn = absolute ? absoluteStartTimeStampSec : offsetStartTimeStampSec; if (startTimeStampToCheckRangeOn < rangeStartTimeSec) { // keep on trucking return false; } if (startTimeStampToCheckRangeOn > rangeEndTimeSec) { // after limit we stop on each line return true; } EncodableHistogram histogram; try { histogram = lazyReader.read(); } catch (DataFormatException e) { // stop after exception return true; } histogram.setStartTimeStamp((long) (absoluteStartTimeStampSec * 1000.0)); histogram.setEndTimeStamp((long) (absoluteEndTimeStampSec * 1000.0)); histogram.setTag(tag); nextHistogram = histogram; return true; } @Override public boolean onException(Throwable t) { // We ignore NoSuchElementException, but stop processing. // Next call to nextIntervalHistogram may return null. if (t instanceof java.util.NoSuchElementException){ return true; } // rethrow if (t instanceof RuntimeException) { throw (RuntimeException) t; } else { throw new RuntimeException(t); } } }; private double startTimeSec = 0.0; private boolean observedStartTime = false; private double baseTimeSec = 0.0; private boolean observedBaseTime = false; // scanner handling state private boolean absolute; private double rangeStartTimeSec; private double rangeEndTimeSec; private EncodableHistogram nextHistogram; /** * Constructs a new HistogramLogReader that produces intervals read from the specified file name. * @param inputFileName The name of the file to read from * @throws java.io.FileNotFoundException when unable to find inputFileName */ public HistogramLogReader(final String inputFileName) throws FileNotFoundException { scanner = new HistogramLogScanner(new File(inputFileName)); } /** * Constructs a new HistogramLogReader that produces intervals read from the specified InputStream. * @param inputStream The InputStream to read from */ public HistogramLogReader(final InputStream inputStream) { scanner = new HistogramLogScanner(inputStream); } /** * Constructs a new HistogramLogReader that produces intervals read from the specified file. * @param inputFile The File to read from * @throws java.io.FileNotFoundException when unable to find inputFile */ public HistogramLogReader(final File inputFile) throws FileNotFoundException { scanner = new HistogramLogScanner(inputFile); } /** * get the latest start time found in the file so far (or 0.0), * per the log file format explained above. Assuming the "#[StartTime:" comment * line precedes the actual intervals recorded in the file, getStartTimeSec() can * be safely used after each interval is read to determine the offset of that * interval's timestamp from the epoch. * @return latest Start Time found in the file (or 0.0 if non found) */ public double getStartTimeSec() { return startTimeSec; } /** * Read the next interval histogram from the log, if interval falls within a time range. * <p> * Returns a histogram object if an interval line was found with an * associated start timestamp value that falls between startTimeSec and * endTimeSec, or null if no such interval line is found. Note that * the range is assumed to be in seconds relative to the actual * timestamp value found in each interval line in the log, and not * in absolute time. * <p> * Timestamps are assumed to appear in order in the log file, and as such * this method will return a null upon encountering a timestamp larger than * rangeEndTimeSec. * <p> * The histogram returned will have it's timestamp set to the absolute * timestamp calculated from adding the interval's indicated timestamp * value to the latest [optional] start time found in the log. * <p> * Upon encountering any unexpected format errors in reading the next * interval from the file, this method will return a null. Use {@link #hasNext} to determine * whether or not additional intervals may be available for reading in the log input. * * @param startTimeSec The (non-absolute time) start of the expected * time range, in seconds. * @param endTimeSec The (non-absolute time) end of the expected time * range, in seconds. * @return a histogram, or a null if no appropriate interval found */ public EncodableHistogram nextIntervalHistogram(final double startTimeSec, final double endTimeSec) { return nextIntervalHistogram(startTimeSec, endTimeSec, false); } /** * Read the next interval histogram from the log, if interval falls within an absolute time range * <p> * Returns a histogram object if an interval line was found with an * associated absolute start timestamp value that falls between * absoluteStartTimeSec and absoluteEndTimeSec, or null if no such * interval line is found. * <p> * Timestamps are assumed to appear in order in the log file, and as such * this method will return a null upon encountering a timestamp larger than * rangeEndTimeSec. * <p> * The histogram returned will have it's timestamp set to the absolute * timestamp calculated from adding the interval's indicated timestamp * value to the latest [optional] start time found in the log. * <p> * Absolute timestamps are calculated by adding the timestamp found * with the recorded interval to the [latest, optional] start time * found in the log. The start time is indicated in the log with * a "#[StartTime: " followed by the start time in seconds. * <p> * Upon encountering any unexpected format errors in reading the next * interval from the file, this method will return a null. Use {@link #hasNext} to determine * whether or not additional intervals may be available for reading in the log input. * * @param absoluteStartTimeSec The (absolute time) start of the expected * time range, in seconds. * @param absoluteEndTimeSec The (absolute time) end of the expected * time range, in seconds. * @return A histogram, or a null if no appropriate interval found */ public EncodableHistogram nextAbsoluteIntervalHistogram(final double absoluteStartTimeSec, final double absoluteEndTimeSec) { return nextIntervalHistogram(absoluteStartTimeSec, absoluteEndTimeSec, true); } /** * Read the next interval histogram from the log. Returns a Histogram object if * an interval line was found, or null if not. * <p>Upon encountering any unexpected format errors in reading the next interval * from the input, this method will return a null. Use {@link #hasNext} to determine * whether or not additional intervals may be available for reading in the log input. * @return a DecodedInterval, or a null if no appropriately formatted interval was found */ public EncodableHistogram nextIntervalHistogram() { return nextIntervalHistogram(0.0, Long.MAX_VALUE * 1.0, true); } private EncodableHistogram nextIntervalHistogram(final double rangeStartTimeSec, final double rangeEndTimeSec, boolean absolute) { this.rangeStartTimeSec = rangeStartTimeSec; this.rangeEndTimeSec = rangeEndTimeSec; this.absolute = absolute; scanner.process(handler); EncodableHistogram histogram = this.nextHistogram; nextHistogram = null; return histogram; } /** * Indicates whether or not additional intervals may exist in the log * @return true if additional intervals may exist in the log */ public boolean hasNext() { return scanner.hasNextLine(); } @Override public void close() { scanner.close(); } }
13,431
42.61039
119
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/DoubleAllValuesIterator.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.Iterator; /** * Used for iterating through {@link DoubleHistogram} values using the finest granularity steps supported by the * underlying representation. The iteration steps through all possible unit value levels, regardless of whether or not * there were recorded values for that value level, and terminates when all recorded histogram values are exhausted. */ public class DoubleAllValuesIterator implements Iterator<DoubleHistogramIterationValue> { private final AllValuesIterator integerAllValuesIterator; private final DoubleHistogramIterationValue iterationValue; DoubleHistogram histogram; /** * Reset iterator for re-use in a fresh iteration over the same histogram data set. */ public void reset() { integerAllValuesIterator.reset(); } /** * @param histogram The histogram this iterator will operate on */ public DoubleAllValuesIterator(final DoubleHistogram histogram) { this.histogram = histogram; integerAllValuesIterator = new AllValuesIterator(histogram.integerValuesHistogram); iterationValue = new DoubleHistogramIterationValue(integerAllValuesIterator.currentIterationValue); } @Override public boolean hasNext() { return integerAllValuesIterator.hasNext(); } @Override public DoubleHistogramIterationValue next() { integerAllValuesIterator.next(); return iterationValue; } @Override public void remove() { integerAllValuesIterator.remove(); } }
1,750
31.425926
118
java
null
NearPMSW-main/baseline/logging/YCSB/HdrHistogram/src/main/java/org/HdrHistogram/DoubleRecorder.java
/** * Written by Gil Tene of Azul Systems, and released to the public domain, * as explained at http://creativecommons.org/publicdomain/zero/1.0/ * * @author Gil Tene */ package org.HdrHistogram; import java.util.concurrent.atomic.AtomicLong; /** * Records floating point (double) values, and provides stable * interval {@link DoubleHistogram} samples from live recorded data without interrupting or stalling active recording * of values. Each interval histogram provided contains all value counts accumulated since the * previous interval histogram was taken. * <p> * This pattern is commonly used in logging interval histogram information while recording is ongoing. * <p> * {@link DoubleRecorder} supports concurrent * {@link DoubleRecorder#recordValue} or * {@link DoubleRecorder#recordValueWithExpectedInterval} calls. * Recording calls are wait-free on architectures that support atomic increment operations, and * are lock-free on architectures that do not. * <p> * A common pattern for using a {@link DoubleRecorder} looks like this: * <br><pre><code> * DoubleRecorder recorder = new DoubleRecorder(2); // Two decimal point accuracy * DoubleHistogram intervalHistogram = null; * ... * [start of some loop construct that periodically wants to grab an interval histogram] * ... * // Get interval histogram, recycling previous interval histogram: * intervalHistogram = recorder.getIntervalHistogram(intervalHistogram); * histogramLogWriter.outputIntervalHistogram(intervalHistogram); * ... * [end of loop construct] * </code></pre> */ public class DoubleRecorder implements DoubleValueRecorder, IntervalHistogramProvider<DoubleHistogram> { private static AtomicLong instanceIdSequencer = new AtomicLong(1); private final long instanceId = instanceIdSequencer.getAndIncrement(); private final WriterReaderPhaser recordingPhaser = new WriterReaderPhaser(); private volatile ConcurrentDoubleHistogram activeHistogram; private ConcurrentDoubleHistogram inactiveHistogram; /** * Construct an auto-resizing {@link DoubleRecorder} using a precision stated as a number * of significant decimal digits. * <p> * Depending on the valuer of the <b><code>packed</code></b> parameter {@link DoubleRecorder} can be configured to * track value counts in a packed internal representation optimized for typical histogram recoded values are * sparse in the value range and tend to be incremented in small unit counts. This packed representation tends * to require significantly smaller amounts of storage when compared to unpacked representations, but can incur * additional recording cost due to resizing and repacking operations that may * occur as previously unrecorded values are encountered. * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. * @param packed Specifies whether the recorder will uses a packed internal representation or not. */ public DoubleRecorder(final int numberOfSignificantValueDigits, boolean packed) { activeHistogram = packed ? new PackedInternalConcurrentDoubleHistogram(instanceId, numberOfSignificantValueDigits) : new InternalConcurrentDoubleHistogram(instanceId, numberOfSignificantValueDigits); inactiveHistogram = null; activeHistogram.setStartTimeStamp(System.currentTimeMillis()); } /** * Construct an auto-resizing {@link DoubleRecorder} using a precision stated as a number * of significant decimal digits. * * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public DoubleRecorder(final int numberOfSignificantValueDigits) { this(numberOfSignificantValueDigits, false); } /** * Construct a {@link DoubleRecorder} dynamic range of values to cover and a number of significant * decimal digits. * * @param highestToLowestValueRatio specifies the dynamic range to use (as a ratio) * @param numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant * decimal digits to which the histogram will maintain value resolution * and separation. Must be a non-negative integer between 0 and 5. */ public DoubleRecorder(final long highestToLowestValueRatio, final int numberOfSignificantValueDigits) { activeHistogram = new InternalConcurrentDoubleHistogram( instanceId, highestToLowestValueRatio, numberOfSignificantValueDigits); inactiveHistogram = null; activeHistogram.setStartTimeStamp(System.currentTimeMillis()); } /** * Record a value * @param value the value to record * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue */ @Override public void recordValue(final double value) { long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); try { activeHistogram.recordValue(value); } finally { recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); } } /** * Record a value in the histogram (adding to the value's current count) * * @param value The value to be recorded * @param count The number of occurrences of this value to record * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue */ @Override public void recordValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); try { activeHistogram.recordValueWithCount(value, count); } finally { recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); } } /** * Record a value * <p> * To compensate for the loss of sampled values when a recorded value is larger than the expected * interval between value samples, Histogram will auto-generate an additional series of decreasingly-smaller * (down to the expectedIntervalBetweenValueSamples) value records. * <p> * See related notes {@link org.HdrHistogram.DoubleHistogram#recordValueWithExpectedInterval(double, double)} * for more explanations about coordinated omission and expected interval correction. * * * @param value The value to record * @param expectedIntervalBetweenValueSamples If expectedIntervalBetweenValueSamples is larger than 0, add * auto-generated value records as appropriate if value is larger * than expectedIntervalBetweenValueSamples * @throws ArrayIndexOutOfBoundsException (may throw) if value is exceeds highestTrackableValue */ @Override public void recordValueWithExpectedInterval(final double value, final double expectedIntervalBetweenValueSamples) throws ArrayIndexOutOfBoundsException { long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); try { activeHistogram.recordValueWithExpectedInterval(value, expectedIntervalBetweenValueSamples); } finally { recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); } } @Override public synchronized DoubleHistogram getIntervalHistogram() { return getIntervalHistogram(null); } @Override public synchronized DoubleHistogram getIntervalHistogram(DoubleHistogram histogramToRecycle) { return getIntervalHistogram(histogramToRecycle, true); } @Override public synchronized DoubleHistogram getIntervalHistogram(DoubleHistogram histogramToRecycle, boolean enforceContainingInstance) { // Verify that replacement histogram can validly be used as an inactive histogram replacement: validateFitAsReplacementHistogram(histogramToRecycle, enforceContainingInstance); inactiveHistogram = (ConcurrentDoubleHistogram) histogramToRecycle; performIntervalSample(); DoubleHistogram sampledHistogram = inactiveHistogram; inactiveHistogram = null; // Once we expose the sample, we can't reuse it internally until it is recycled return sampledHistogram; } @Override public synchronized void getIntervalHistogramInto(DoubleHistogram targetHistogram) { performIntervalSample(); inactiveHistogram.copyInto(targetHistogram); } /** * Reset any value counts accumulated thus far. */ @Override public synchronized void reset() { // the currently inactive histogram is reset each time we flip. So flipping twice resets both: performIntervalSample(); performIntervalSample(); } private void performIntervalSample() { try { recordingPhaser.readerLock(); // Make sure we have an inactive version to flip in: if (inactiveHistogram == null) { if (activeHistogram instanceof InternalConcurrentDoubleHistogram) { inactiveHistogram = new InternalConcurrentDoubleHistogram( (InternalConcurrentDoubleHistogram) activeHistogram); } else if (activeHistogram instanceof PackedInternalConcurrentDoubleHistogram) { inactiveHistogram = new PackedInternalConcurrentDoubleHistogram( instanceId, activeHistogram.getNumberOfSignificantValueDigits()); } else { throw new IllegalStateException("Unexpected internal histogram type for activeHistogram"); } } inactiveHistogram.reset(); // Swap active and inactive histograms: final ConcurrentDoubleHistogram tempHistogram = inactiveHistogram; inactiveHistogram = activeHistogram; activeHistogram = tempHistogram; // Mark end time of previous interval and start time of new one: long now = System.currentTimeMillis(); activeHistogram.setStartTimeStamp(now); inactiveHistogram.setEndTimeStamp(now); // Make sure we are not in the middle of recording a value on the previously active histogram: // Flip phase to make sure no recordings that were in flight pre-flip are still active: recordingPhaser.flipPhase(500000L /* yield in 0.5 msec units if needed */); } finally { recordingPhaser.readerUnlock(); } } private static class InternalConcurrentDoubleHistogram extends ConcurrentDoubleHistogram { private final long containingInstanceId; private InternalConcurrentDoubleHistogram(long id, int numberOfSignificantValueDigits) { super(numberOfSignificantValueDigits); this.containingInstanceId = id; } private InternalConcurrentDoubleHistogram(long id, long highestToLowestValueRatio, int numberOfSignificantValueDigits) { super(highestToLowestValueRatio, numberOfSignificantValueDigits); this.containingInstanceId = id; } private InternalConcurrentDoubleHistogram(InternalConcurrentDoubleHistogram source) { super(source); this.containingInstanceId = source.containingInstanceId; } } private static class PackedInternalConcurrentDoubleHistogram extends PackedConcurrentDoubleHistogram { private final long containingInstanceId; private PackedInternalConcurrentDoubleHistogram(long id, int numberOfSignificantValueDigits) { super(numberOfSignificantValueDigits); this.containingInstanceId = id; } } private void validateFitAsReplacementHistogram(DoubleHistogram replacementHistogram, boolean enforceContainingInstance) { boolean bad = true; if (replacementHistogram == null) { bad = false; } else if ((replacementHistogram instanceof InternalConcurrentDoubleHistogram) && ((!enforceContainingInstance) || (((InternalConcurrentDoubleHistogram) replacementHistogram).containingInstanceId == ((InternalConcurrentDoubleHistogram) activeHistogram).containingInstanceId) )) { bad = false; } else if ((replacementHistogram instanceof PackedInternalConcurrentDoubleHistogram) && ((!enforceContainingInstance) || (((PackedInternalConcurrentDoubleHistogram) replacementHistogram).containingInstanceId == ((PackedInternalConcurrentDoubleHistogram) activeHistogram).containingInstanceId) )) { bad = false; } if (bad) { throw new IllegalArgumentException("replacement histogram must have been obtained via a previous" + " getIntervalHistogram() call from this " + this.getClass().getName() +" instance"); } } }
14,003
46.471186
118
java