diff --git a/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java b/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java index ceaf53a2df..b0115a9328 100644 --- a/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java +++ b/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java @@ -223,6 +223,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java b/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java index 57c4c788f2..b3f8025d0f 100644 --- a/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java +++ b/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java @@ -129,6 +129,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String start, int count, Set fields, Vector> result) { diff --git a/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java b/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java index 3f4a86afeb..f9459b4bb7 100644 --- a/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java +++ b/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java @@ -225,6 +225,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record diff --git a/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java b/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java index 98ac4e47e0..5142839bd6 100644 --- a/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java +++ b/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java @@ -246,6 +246,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java b/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java index 62519e6636..8b727be7ff 100644 --- a/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java +++ b/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java @@ -319,6 +319,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. diff --git a/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java b/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java index 7745920d5e..8397d66a20 100644 --- a/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java +++ b/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java @@ -116,6 +116,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java b/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java index 939b5951f9..3522dacb2a 100644 --- a/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java +++ b/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java @@ -349,6 +349,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. diff --git a/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java b/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java index 45ec3dc1ef..15c3e4758a 100644 --- a/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java +++ b/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java @@ -271,6 +271,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + private Status scanUsingQuery( String table, String startKey, int recordCount, Set fields, Vector> result) { diff --git a/core/src/main/java/site/ycsb/BasicDB.java b/core/src/main/java/site/ycsb/BasicDB.java index 043b6201a9..98ce0d5d15 100644 --- a/core/src/main/java/site/ycsb/BasicDB.java +++ b/core/src/main/java/site/ycsb/BasicDB.java @@ -166,6 +166,13 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. diff --git a/core/src/main/java/site/ycsb/DB.java b/core/src/main/java/site/ycsb/DB.java index 9a61729c05..abf084be5f 100644 --- a/core/src/main/java/site/ycsb/DB.java +++ b/core/src/main/java/site/ycsb/DB.java @@ -99,6 +99,16 @@ public void cleanup() throws DBException { public abstract Status batchRead(String table, List keys, List> fields, HashMap> result); + /** + * Batch update records. + * + * @param table The name of the table + * @param keys record keys + * @param values A list of HashMaps of field/value pairs to update in the record + * @return The result of the operation. + */ + public abstract Status batchUpdate(String table, List keys, List> values); + /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. diff --git a/core/src/main/java/site/ycsb/DBWrapper.java b/core/src/main/java/site/ycsb/DBWrapper.java index 2783b26e2d..53d84aa0ea 100644 --- a/core/src/main/java/site/ycsb/DBWrapper.java +++ b/core/src/main/java/site/ycsb/DBWrapper.java @@ -161,6 +161,20 @@ public Status batchRead(String table, List keys, List> field } } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + try (final TraceScope span = tracer.newScope(scopeStringRead)) { + long ist = measurements.getIntendedStartTimeNs(); + long st = System.nanoTime(); + Status res = db.batchUpdate(table, keys, values); + long en = System.nanoTime(); + measure("BATCH_UPDATE", res, ist, st, en); + measurements.reportStatus("BATCH_UPDATE", res); + return res; + } + } + /** * Perform a range scan for a set of records in the database. * Each field/value pair from the result will be stored in a HashMap. diff --git a/core/src/main/java/site/ycsb/GoodBadUglyDB.java b/core/src/main/java/site/ycsb/GoodBadUglyDB.java index 733b93df93..de0b718f1d 100644 --- a/core/src/main/java/site/ycsb/GoodBadUglyDB.java +++ b/core/src/main/java/site/ycsb/GoodBadUglyDB.java @@ -103,6 +103,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. diff --git a/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java b/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java index 989e8b860a..206f1bbfe5 100644 --- a/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java +++ b/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java @@ -71,6 +71,7 @@ public class OneMeasurementHdrHistogram extends OneMeasurement { private final List percentiles; private final int readBatchSize; + private final int updateBatchSize; public OneMeasurementHdrHistogram(String name, Properties props) { super(name); @@ -78,6 +79,8 @@ public OneMeasurementHdrHistogram(String name, Properties props) { verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false))); readBatchSize =Integer.valueOf(props.getProperty(CoreWorkload.READ_BATCH_SIZE_PROPERTY, CoreWorkload.READ_BATCH_SIZE_PROPERTY_DEFAULT)); + updateBatchSize =Integer.valueOf(props.getProperty(CoreWorkload.UPDATE_BATCH_SIZE_PROPERTY, + CoreWorkload.UPDATE_BATCH_SIZE_PROPERTY_DEFAULT)); boolean shouldLog = Boolean.parseBoolean(props.getProperty("hdrhistogram.fileoutput", "false")); if (!shouldLog) { log = null; @@ -127,6 +130,11 @@ public void exportMeasurements(MeasurementsExporter exporter) throws IOException prepend = "Batch"; } + if (getName().compareTo("BATCH_UPDATE") == 0) { + exporter.write(getName(), "BatchSize", updateBatchSize); + prepend = "Batch"; + } + exporter.write(getName(), prepend+"Operations", totalHistogram.getTotalCount()); exporter.write(getName(), prepend+"AverageLatency(us)", totalHistogram.getMean()); exporter.write(getName(), prepend+"MinLatency(us)", totalHistogram.getMinValue()); diff --git a/core/src/main/java/site/ycsb/measurements/OneMeasurementHistogram.java b/core/src/main/java/site/ycsb/measurements/OneMeasurementHistogram.java index eb4f9a9bb1..4aa3e2922e 100644 --- a/core/src/main/java/site/ycsb/measurements/OneMeasurementHistogram.java +++ b/core/src/main/java/site/ycsb/measurements/OneMeasurementHistogram.java @@ -80,6 +80,7 @@ public class OneMeasurementHistogram extends OneMeasurement { private int max; private final int readBatchSize; + private final int updateBatchSize; public OneMeasurementHistogram(String name, Properties props) { super(name); @@ -87,6 +88,8 @@ public OneMeasurementHistogram(String name, Properties props) { verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false))); readBatchSize = Integer.valueOf(props.getProperty(CoreWorkload.READ_BATCH_SIZE_PROPERTY, CoreWorkload.READ_BATCH_SIZE_PROPERTY_DEFAULT)); + updateBatchSize = Integer.valueOf(props.getProperty(CoreWorkload.UPDATE_BATCH_SIZE_PROPERTY, + CoreWorkload.UPDATE_BATCH_SIZE_PROPERTY_DEFAULT)); histogram = new long[buckets]; histogramoverflow = 0; operations = 0; @@ -131,6 +134,9 @@ public void exportMeasurements(MeasurementsExporter exporter) throws IOException if (getName().compareTo("BATCH_READ") == 0) { exporter.write(getName(), "BatchSize", readBatchSize); } + if (getName().compareTo("BATCH_UPDATE") == 0) { + exporter.write(getName(), "BatchSize", updateBatchSize); + } exporter.write(getName(), "Operations", operations); exporter.write(getName(), "AverageLatency(us)", mean); exporter.write(getName(), "LatencyVariance(us)", variance); diff --git a/core/src/main/java/site/ycsb/measurements/OneMeasurementTimeSeries.java b/core/src/main/java/site/ycsb/measurements/OneMeasurementTimeSeries.java index de7d6e1fba..018dcacc48 100644 --- a/core/src/main/java/site/ycsb/measurements/OneMeasurementTimeSeries.java +++ b/core/src/main/java/site/ycsb/measurements/OneMeasurementTimeSeries.java @@ -68,6 +68,7 @@ public class OneMeasurementTimeSeries extends OneMeasurement { private int min = -1; private int max = -1; private final int readBatchSize; + private final int updateBatchSize; public OneMeasurementTimeSeries(String name, Properties props) { super(name); @@ -75,6 +76,8 @@ public OneMeasurementTimeSeries(String name, Properties props) { measurements = new Vector<>(); readBatchSize = Integer.valueOf(props.getProperty(CoreWorkload.READ_BATCH_SIZE_PROPERTY, CoreWorkload.READ_BATCH_SIZE_PROPERTY_DEFAULT)); + updateBatchSize = Integer.valueOf(props.getProperty(CoreWorkload.UPDATE_BATCH_SIZE_PROPERTY, + CoreWorkload.UPDATE_BATCH_SIZE_PROPERTY_DEFAULT)); } private synchronized void checkEndOfUnit(boolean forceend) { @@ -128,6 +131,11 @@ public void exportMeasurements(MeasurementsExporter exporter) throws IOException exporter.write(getName(), "BatchSize", readBatchSize); prepend = "Batch"; } + + if (getName().compareTo("BATCH_UPDATE") == 0) { + exporter.write(getName(), "BatchSize", updateBatchSize); + prepend = "Batch"; + } exporter.write(getName(), prepend+"Operations", operations); exporter.write(getName(), prepend+"AverageLatency(us)", (((double) totallatency) / ((double) operations))); exporter.write(getName(), prepend+"MinLatency(us)", min); diff --git a/core/src/main/java/site/ycsb/workloads/CoreWorkload.java b/core/src/main/java/site/ycsb/workloads/CoreWorkload.java index f7035bf687..7bd6805eac 100644 --- a/core/src/main/java/site/ycsb/workloads/CoreWorkload.java +++ b/core/src/main/java/site/ycsb/workloads/CoreWorkload.java @@ -221,6 +221,16 @@ public class CoreWorkload extends Workload { */ public static final String READ_BATCH_SIZE_PROPERTY_DEFAULT = "1"; + /** + * The name of the property for the size of update batches. + */ + public static final String UPDATE_BATCH_SIZE_PROPERTY = "updateBatchSize"; + + /** + * The default update batch. + */ + public static final String UPDATE_BATCH_SIZE_PROPERTY_DEFAULT = "1"; + /** * The name of the property for the proportion of transactions that are updates. */ @@ -380,6 +390,7 @@ public class CoreWorkload extends Workload { protected int insertionRetryLimit; protected int insertionRetryInterval; protected int readBatchSize; + protected int updateBatchSize; private Measurements measurements = Measurements.getMeasurements(); @@ -562,6 +573,11 @@ public void init(Properties p) throws WorkloadException { if (readBatchSize <= 0) { throw new WorkloadException("Invalid read batch size \"" + readBatchSize + "\""); } + updateBatchSize = Integer.parseInt(p.getProperty( + UPDATE_BATCH_SIZE_PROPERTY, UPDATE_BATCH_SIZE_PROPERTY_DEFAULT)); + if (updateBatchSize <= 0) { + throw new WorkloadException("Invalid update batch size \"" + updateBatchSize + "\""); + } } /** @@ -685,7 +701,11 @@ public boolean doTransaction(DB db, Object threadstate) { } break; case "UPDATE": - doTransactionUpdate(db); + if (updateBatchSize == 1){ + doTransactionUpdate(db); + } else { + doTransactionBatchUpdate(db); + } break; case "INSERT": doTransactionInsert(db); @@ -893,6 +913,33 @@ public void doTransactionUpdate(DB db) { db.update(table, keyname, values); } + public void doTransactionBatchUpdate(DB db) { + LinkedList keys = new LinkedList<>(); + LinkedList> allValues = new LinkedList<>(); + + for (int i = 0; i < updateBatchSize; i++) { + // choose a random key + long keynum = nextKeynum(); + + String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts); + + HashMap values; + + if (writeallfields) { + // new data for all the fields + values = buildValues(keyname); + } else { + // update a random field + values = buildSingleValue(keyname); + } + + keys.add(keyname); + allValues.add(values); + } + + db.batchUpdate(table, keys, allValues); + } + public void doTransactionInsert(DB db) { // choose the next key long keynum = transactioninsertkeysequence.nextValue(); diff --git a/core/src/main/java/site/ycsb/workloads/RestWorkload.java b/core/src/main/java/site/ycsb/workloads/RestWorkload.java index 4d01000d0f..1353491365 100644 --- a/core/src/main/java/site/ycsb/workloads/RestWorkload.java +++ b/core/src/main/java/site/ycsb/workloads/RestWorkload.java @@ -152,6 +152,11 @@ public void init(Properties p) throws WorkloadException { if (readBatchSize <= 0) { throw new WorkloadException("Invalid read batch size \"" + readBatchSize + "\""); } + updateBatchSize = Integer.parseInt(p.getProperty( + UPDATE_BATCH_SIZE_PROPERTY, UPDATE_BATCH_SIZE_PROPERTY_DEFAULT)); + if (updateBatchSize <= 0) { + throw new WorkloadException("Invalid update batch size \"" + updateBatchSize + "\""); + } } public static DiscreteGenerator createOperationGenerator(final Properties p) { diff --git a/core/src/test/java/site/ycsb/workloads/TestTimeSeriesWorkload.java b/core/src/test/java/site/ycsb/workloads/TestTimeSeriesWorkload.java index 1f76040dfe..13925a1d23 100644 --- a/core/src/test/java/site/ycsb/workloads/TestTimeSeriesWorkload.java +++ b/core/src/test/java/site/ycsb/workloads/TestTimeSeriesWorkload.java @@ -531,6 +531,12 @@ public Status batchRead(String table, List keys, List> field return Status.OK; } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + return Status.OK; + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/couchbase/src/main/java/site/ycsb/db/CouchbaseClient.java b/couchbase/src/main/java/site/ycsb/db/CouchbaseClient.java index e15449b2aa..1e797d043b 100644 --- a/couchbase/src/main/java/site/ycsb/db/CouchbaseClient.java +++ b/couchbase/src/main/java/site/ycsb/db/CouchbaseClient.java @@ -201,6 +201,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(final String table, final String startkey, final int recordcount, final Set fields, final Vector> result) { diff --git a/couchbase2/src/main/java/site/ycsb/db/couchbase2/Couchbase2Client.java b/couchbase2/src/main/java/site/ycsb/db/couchbase2/Couchbase2Client.java index b489aa9bfc..6bde5be07e 100644 --- a/couchbase2/src/main/java/site/ycsb/db/couchbase2/Couchbase2Client.java +++ b/couchbase2/src/main/java/site/ycsb/db/couchbase2/Couchbase2Client.java @@ -262,6 +262,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Performs the {@link #read(String, String, Set, Map)} operation via Key/Value ("get"). * diff --git a/crail/src/main/java/site/ycsb/db/crail/CrailClient.java b/crail/src/main/java/site/ycsb/db/crail/CrailClient.java index bd1dd71978..0d15a41aa1 100644 --- a/crail/src/main/java/site/ycsb/db/crail/CrailClient.java +++ b/crail/src/main/java/site/ycsb/db/crail/CrailClient.java @@ -129,6 +129,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startKey, int recordCount, Set fields, Vector> result) { diff --git a/distribution/pom.xml b/distribution/pom.xml index eeada40172..bee92202e4 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -179,11 +179,11 @@ LICENSE file. maprjsondb-binding ${project.version} - - site.ycsb - mongodb-binding - ${project.version} - + + + + + site.ycsb nosqldb-binding @@ -239,11 +239,11 @@ LICENSE file. scylla-binding ${project.version} - - site.ycsb - solr7-binding - ${project.version} - + + + + + site.ycsb tarantool-binding diff --git a/dynamodb/src/main/java/site/ycsb/db/DynamoDBClient.java b/dynamodb/src/main/java/site/ycsb/db/DynamoDBClient.java index 5dc651b5a7..15b0361dfa 100644 --- a/dynamodb/src/main/java/site/ycsb/db/DynamoDBClient.java +++ b/dynamodb/src/main/java/site/ycsb/db/DynamoDBClient.java @@ -187,6 +187,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/elasticsearch/src/main/java/site/ycsb/db/ElasticsearchClient.java b/elasticsearch/src/main/java/site/ycsb/db/ElasticsearchClient.java index 1b963a9858..2fdf9d4c4b 100644 --- a/elasticsearch/src/main/java/site/ycsb/db/ElasticsearchClient.java +++ b/elasticsearch/src/main/java/site/ycsb/db/ElasticsearchClient.java @@ -284,6 +284,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record diff --git a/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/ElasticsearchClient.java b/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/ElasticsearchClient.java index cf55864ea1..92ef8c083a 100644 --- a/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/ElasticsearchClient.java +++ b/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/ElasticsearchClient.java @@ -242,6 +242,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status update(final String table, final String key, final Map values) { try { diff --git a/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/ElasticsearchRestClient.java b/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/ElasticsearchRestClient.java index 193c8eabd2..b174cd4a46 100644 --- a/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/ElasticsearchRestClient.java +++ b/elasticsearch5/src/main/java/site/ycsb/db/elasticsearch5/ElasticsearchRestClient.java @@ -296,6 +296,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status update(final String table, final String key, final Map values) { try { diff --git a/foundationdb/src/main/java/site/ycsb/db/foundationdb/FoundationDBClient.java b/foundationdb/src/main/java/site/ycsb/db/foundationdb/FoundationDBClient.java index 361a739ede..2a82e8cba8 100644 --- a/foundationdb/src/main/java/site/ycsb/db/foundationdb/FoundationDBClient.java +++ b/foundationdb/src/main/java/site/ycsb/db/foundationdb/FoundationDBClient.java @@ -228,6 +228,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status update(String table, String key, Map values) { String rowKey = getRowKey(dbName, table, key); diff --git a/geode/src/main/java/site/ycsb/db/GeodeClient.java b/geode/src/main/java/site/ycsb/db/GeodeClient.java index 47b1f8a4a4..688503a6ea 100644 --- a/geode/src/main/java/site/ycsb/db/GeodeClient.java +++ b/geode/src/main/java/site/ycsb/db/GeodeClient.java @@ -184,6 +184,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/googlebigtable/src/main/java/site/ycsb/db/GoogleBigtableClient.java b/googlebigtable/src/main/java/site/ycsb/db/GoogleBigtableClient.java index 8ac7520282..0521394f74 100644 --- a/googlebigtable/src/main/java/site/ycsb/db/GoogleBigtableClient.java +++ b/googlebigtable/src/main/java/site/ycsb/db/GoogleBigtableClient.java @@ -263,6 +263,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/googledatastore/src/main/java/site/ycsb/db/GoogleDatastoreClient.java b/googledatastore/src/main/java/site/ycsb/db/GoogleDatastoreClient.java index bd70a72691..8ade61400a 100644 --- a/googledatastore/src/main/java/site/ycsb/db/GoogleDatastoreClient.java +++ b/googledatastore/src/main/java/site/ycsb/db/GoogleDatastoreClient.java @@ -243,6 +243,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/griddb/src/main/java/site/ycsb/db/griddb/GridDBClient.java b/griddb/src/main/java/site/ycsb/db/griddb/GridDBClient.java index 31df082eb8..d97e91c45a 100644 --- a/griddb/src/main/java/site/ycsb/db/griddb/GridDBClient.java +++ b/griddb/src/main/java/site/ycsb/db/griddb/GridDBClient.java @@ -214,6 +214,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { LOGGER.severe("[ERROR]scan() not supported"); diff --git a/hbase1/src/main/java/site/ycsb/db/hbase1/HBaseClient1.java b/hbase1/src/main/java/site/ycsb/db/hbase1/HBaseClient1.java index a95158d3cb..33c4a2593c 100644 --- a/hbase1/src/main/java/site/ycsb/db/hbase1/HBaseClient1.java +++ b/hbase1/src/main/java/site/ycsb/db/hbase1/HBaseClient1.java @@ -300,6 +300,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. diff --git a/hbase2/src/main/java/site/ycsb/db/hbase2/HBaseClient2.java b/hbase2/src/main/java/site/ycsb/db/hbase2/HBaseClient2.java index 4a47663385..1f7f937d9d 100644 --- a/hbase2/src/main/java/site/ycsb/db/hbase2/HBaseClient2.java +++ b/hbase2/src/main/java/site/ycsb/db/hbase2/HBaseClient2.java @@ -324,6 +324,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. diff --git a/ignite/src/main/java/site/ycsb/db/ignite/IgniteClient.java b/ignite/src/main/java/site/ycsb/db/ignite/IgniteClient.java index 0e045dd8ed..c13133771c 100644 --- a/ignite/src/main/java/site/ycsb/db/ignite/IgniteClient.java +++ b/ignite/src/main/java/site/ycsb/db/ignite/IgniteClient.java @@ -112,6 +112,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record diff --git a/ignite/src/main/java/site/ycsb/db/ignite/IgniteSqlClient.java b/ignite/src/main/java/site/ycsb/db/ignite/IgniteSqlClient.java index 7fa0abc831..c02e250a28 100644 --- a/ignite/src/main/java/site/ycsb/db/ignite/IgniteSqlClient.java +++ b/ignite/src/main/java/site/ycsb/db/ignite/IgniteSqlClient.java @@ -103,6 +103,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record diff --git a/infinispan/src/main/java/site/ycsb/db/InfinispanClient.java b/infinispan/src/main/java/site/ycsb/db/InfinispanClient.java index c166f515a0..6e4f20dfa5 100644 --- a/infinispan/src/main/java/site/ycsb/db/InfinispanClient.java +++ b/infinispan/src/main/java/site/ycsb/db/InfinispanClient.java @@ -95,6 +95,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { LOGGER.warn("Infinispan does not support scan semantics"); diff --git a/infinispan/src/main/java/site/ycsb/db/InfinispanRemoteClient.java b/infinispan/src/main/java/site/ycsb/db/InfinispanRemoteClient.java index 55eca946ca..1d51cfbb27 100644 --- a/infinispan/src/main/java/site/ycsb/db/InfinispanRemoteClient.java +++ b/infinispan/src/main/java/site/ycsb/db/InfinispanRemoteClient.java @@ -96,6 +96,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/jdbc/src/main/java/site/ycsb/db/JdbcDBClient.java b/jdbc/src/main/java/site/ycsb/db/JdbcDBClient.java index f895b20e91..f1544edec4 100644 --- a/jdbc/src/main/java/site/ycsb/db/JdbcDBClient.java +++ b/jdbc/src/main/java/site/ycsb/db/JdbcDBClient.java @@ -371,6 +371,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String tableName, String startKey, int recordcount, Set fields, Vector> result) { diff --git a/kudu/src/main/java/site/ycsb/db/KuduYCSBClient.java b/kudu/src/main/java/site/ycsb/db/KuduYCSBClient.java index e0d55c914b..e030e338be 100644 --- a/kudu/src/main/java/site/ycsb/db/KuduYCSBClient.java +++ b/kudu/src/main/java/site/ycsb/db/KuduYCSBClient.java @@ -304,6 +304,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, diff --git a/maprjsondb/src/main/java/site/ycsb/db/mapr/MapRJSONDBClient.java b/maprjsondb/src/main/java/site/ycsb/db/mapr/MapRJSONDBClient.java index e0a282fb17..73901389ed 100644 --- a/maprjsondb/src/main/java/site/ycsb/db/mapr/MapRJSONDBClient.java +++ b/maprjsondb/src/main/java/site/ycsb/db/mapr/MapRJSONDBClient.java @@ -78,6 +78,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/memcached/src/main/java/site/ycsb/db/MemcachedClient.java b/memcached/src/main/java/site/ycsb/db/MemcachedClient.java index 5e94f31915..0df6497c36 100644 --- a/memcached/src/main/java/site/ycsb/db/MemcachedClient.java +++ b/memcached/src/main/java/site/ycsb/db/MemcachedClient.java @@ -196,6 +196,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan( String table, String startkey, int recordcount, Set fields, diff --git a/nosqldb/src/main/java/site/ycsb/db/NoSqlDbClient.java b/nosqldb/src/main/java/site/ycsb/db/NoSqlDbClient.java index a3144d719f..ce256bc212 100644 --- a/nosqldb/src/main/java/site/ycsb/db/NoSqlDbClient.java +++ b/nosqldb/src/main/java/site/ycsb/db/NoSqlDbClient.java @@ -203,6 +203,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/orientdb/src/main/java/site/ycsb/db/OrientDBClient.java b/orientdb/src/main/java/site/ycsb/db/OrientDBClient.java index 213ba84047..1276c56e28 100644 --- a/orientdb/src/main/java/site/ycsb/db/OrientDBClient.java +++ b/orientdb/src/main/java/site/ycsb/db/OrientDBClient.java @@ -256,6 +256,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status update(String table, String key, Map values) { while (true) { diff --git a/pom.xml b/pom.xml index 022de957d9..87187a4334 100644 --- a/pom.xml +++ b/pom.xml @@ -130,7 +130,7 @@ LICENSE file. 4.0.0 1.4.12 2.2.3 - 4.5.13 + 4.5.14 2.7.6 7.2.2.Final 1.11.1 diff --git a/postgrenosql/src/main/java/site/ycsb/postgrenosql/PostgreNoSQLDBClient.java b/postgrenosql/src/main/java/site/ycsb/postgrenosql/PostgreNoSQLDBClient.java index f5e464e116..a1ac438b96 100644 --- a/postgrenosql/src/main/java/site/ycsb/postgrenosql/PostgreNoSQLDBClient.java +++ b/postgrenosql/src/main/java/site/ycsb/postgrenosql/PostgreNoSQLDBClient.java @@ -179,6 +179,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String tableName, String startKey, int recordcount, Set fields, Vector> result) { diff --git a/rados/src/main/java/site/ycsb/db/RadosClient.java b/rados/src/main/java/site/ycsb/db/RadosClient.java index 28ec58b3a5..b172378372 100644 --- a/rados/src/main/java/site/ycsb/db/RadosClient.java +++ b/rados/src/main/java/site/ycsb/db/RadosClient.java @@ -140,6 +140,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status insert(String table, String key, Map values) { JSONObject json = new JSONObject(); diff --git a/redis/src/main/java/site/ycsb/db/RedisClient.java b/redis/src/main/java/site/ycsb/db/RedisClient.java index 241dde738f..2afd9912a0 100644 --- a/redis/src/main/java/site/ycsb/db/RedisClient.java +++ b/redis/src/main/java/site/ycsb/db/RedisClient.java @@ -139,6 +139,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status insert(String table, String key, Map values) { diff --git a/rest/src/main/java/site/ycsb/webservice/rest/RestClient.java b/rest/src/main/java/site/ycsb/webservice/rest/RestClient.java index 265b103ce4..c2842017a1 100644 --- a/rest/src/main/java/site/ycsb/webservice/rest/RestClient.java +++ b/rest/src/main/java/site/ycsb/webservice/rest/RestClient.java @@ -120,6 +120,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status insert(String table, String endpoint, Map values) { int responseCode; diff --git a/riak/src/main/java/site/ycsb/db/riak/RiakKVClient.java b/riak/src/main/java/site/ycsb/db/riak/RiakKVClient.java index 97f66a5b97..6d29218821 100644 --- a/riak/src/main/java/site/ycsb/db/riak/RiakKVClient.java +++ b/riak/src/main/java/site/ycsb/db/riak/RiakKVClient.java @@ -271,6 +271,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored in * a HashMap. diff --git a/rocksdb/src/main/java/site/ycsb/db/rocksdb/RocksDBClient.java b/rocksdb/src/main/java/site/ycsb/db/rocksdb/RocksDBClient.java index 3f79037e8e..5992331d1e 100644 --- a/rocksdb/src/main/java/site/ycsb/db/rocksdb/RocksDBClient.java +++ b/rocksdb/src/main/java/site/ycsb/db/rocksdb/RocksDBClient.java @@ -237,6 +237,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(final String table, final String startkey, final int recordcount, final Set fields, final Vector> result) { diff --git a/rondb/README.md b/rondb/README.md index 499415a337..4a9ca696bc 100644 --- a/rondb/README.md +++ b/rondb/README.md @@ -59,15 +59,15 @@ This section describes how to run YCSB on RonDB. 6. Customise workload configuration - Specify the desired benchmark configurations using a custom or pre-defined [workload file](../workloads/). - Inside the workload file, define RonDB-specific parameters: - - *rondb.api.type* i.e. clusterj, REST, gRPC. Default: clusterj + Specify the desired benchmark configurations using a custom or + pre-defined [workload file](../workloads/). This module supports three different APIs to access RonDB. These APIs are + ClusterJ, REST, and gRPC. Currently, REST and gRPC only supports read-only operations. For + workloads that involve both read and write operations, all write operations are performed + using clusterJ API while you can choose an API type for read operations, for example, + - *rondb.read.api.type* i.e. clusterj, REST, gRPC. Default: clusterj - - *ClusterJ Configurations* - - Configuration parameters needed for running YCSB using ClusterJ API - + ClusterJ Configuration parameters - rondb.connection.string Default: 127.0.0.1:1186 - rondb.schema Default: ycsb @@ -96,7 +96,7 @@ This section describes how to run YCSB on RonDB. - "fieldcount=1", "fieldlength=4096", "fieldnameprefix=FIELD" - "fieldcount=10", "fieldlength=100", "fieldnameprefix=FIELD" -7. Load the data +7. Load data Currently, you can only use "ClusterJ" API for loading data into RonDB cluster. @@ -105,13 +105,12 @@ This section describes how to run YCSB on RonDB. ./bin/ycsb load rondb -s -P workloads/workloadc \ -p "rondb.connection.string=127.0.0.1:1186" \ -p "rondb.schema=ycsb" \ - -p "rondb.api.type=clusterj" \ -p "fieldcount=10" \ -p "fieldlength=100" \ -p "fieldnameprefix=FIELD" ``` -8. Run the workload test +8. Run a workload test - *ClusterJ API* @@ -120,25 +119,28 @@ This section describes how to run YCSB on RonDB. ./bin/ycsb run rondb -s -P workloads/workloadc \ -p "rondb.connection.string=127.0.0.1:1186" \ -p "rondb.schema=ycsb" \ - -p "rondb.api.type=clusterj" \ + -p "rondb.read.api.type=clusterj" \ -p "fieldcount=10" \ -p "fieldlength=100" \ -p "fieldnameprefix=FIELD" \ - -p "readBatchSize=5" + -p "readBatchSize=5" \ + -p "updateBatchSize=5" ``` - *REST API* ```bash # Use -p flag to overwrite any parameters in the specified workload file ./bin/ycsb run rondb -s -P workloads/workloadc \ + -p "rondb.connection.string=127.0.0.1:1186" \ -p "rondb.schema=ycsb" \ - -p "rondb.api.type=REST" \ + -p "rondb.read.api.type=REST" \ -p "rondb.api.server.ip=127.0.0.1" \ -p "rondb.api.server.rest.port=4406" \ -p "fieldcount=10" \ -p "fieldlength=100" \ -p "fieldnameprefix=FIELD" \ - -p "readBatchSize=5" + -p "readBatchSize=5" \ + -p "updateBatchSize=5" ``` - *gRPC* @@ -146,12 +148,14 @@ This section describes how to run YCSB on RonDB. ```bash # Use -p flag to overwrite any parameters in the specified workload file ./bin/ycsb run rondb -s -P workloads/workloadc \ + -p "rondb.connection.string=127.0.0.1:1186" \ -p "rondb.schema=ycsb" \ - -p "rondb.api.type=GRPC" \ + -p "rondb.read.api.type=GRPC" \ -p "rondb.api.server.ip=127.0.0.1" \ -p "rondb.api.server.rest.port=5406" \ -p "fieldcount=10" \ -p "fieldlength=100" \ -p "fieldnameprefix=FIELD" \ - -p "readBatchSize=5" + -p "readBatchSize=5" \ + -p "updateBatchSize=5" ``` diff --git a/rondb/scripts/clusterj b/rondb/scripts/clusterj new file mode 100755 index 0000000000..0c0acbd59d --- /dev/null +++ b/rondb/scripts/clusterj @@ -0,0 +1,18 @@ +#!/bin/bash + +DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +cd $DIR/../.. + +export JAVA_OPTS="-Djava.library.path=/Users/salman/code/hops/rondb21/build/lib" +./bin/ycsb run rondb -s -P ./workloads/workloada \ + -p "rondb.connection.string=localhost:13000" \ + -p "rondb.schema=ycsb" \ + -p "rondb.read.api.type=clusterj" \ + -p "fieldcount=10" \ + -p "fieldlength=100" \ + -p "fieldnameprefix=FIELD" \ + -p "threadcount=10" \ + -p "readBatchSize=5" \ + -p "updateBatchSize=5" + + diff --git a/rondb/scripts/grpc b/rondb/scripts/grpc new file mode 100755 index 0000000000..5474de2608 --- /dev/null +++ b/rondb/scripts/grpc @@ -0,0 +1,21 @@ +#!/bin/bash + +DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +cd $DIR/../.. + +export JAVA_OPTS="-Djava.library.path=/Users/salman/code/hops/rondb21/build/lib" + +./bin/ycsb run rondb -s -P ./workloads/workloada \ + -p "rondb.connection.string=localhost:13000" \ + -p "rondb.schema=ycsb" \ + -p "rondb.read.api.type=GRPC" \ + -p "rondb.api.server.ip=127.0.0.1" \ + -p "rondb.api.server.rest.port=5406" \ + -p "rondb.grpc.api.use.tls=true" \ + -p "fieldcount=10" \ + -p "fieldlength=100" \ + -p "fieldnameprefix=FIELD" \ + -p "threadcount=1" \ + -p "readBatchSize=5" \ + -p "updateBatchSize=5" + diff --git a/rondb/scripts/load b/rondb/scripts/load new file mode 100755 index 0000000000..eb5672649d --- /dev/null +++ b/rondb/scripts/load @@ -0,0 +1,19 @@ +#!/bin/bash + +DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +cd $DIR/../.. + +export JAVA_OPTS="-Djava.library.path=/Users/salman/code/hops/rondb21/build/lib" +mysql=~/code/hops/rondb21/build/bin/mysql + +$mysql -uroot -P 13001 --protocol=TCP -h localhost -e "drop database ycsb" +$mysql -uroot -P 13001 --protocol=TCP -h localhost -e "create database ycsb" +$mysql -uroot -P 13001 --protocol=TCP -h localhost ycsb -e "CREATE TABLE usertable (YCSB_KEY VARCHAR(255) PRIMARY KEY, FIELD0 varchar(100), FIELD1 varchar(100), FIELD2 varchar(100), FIELD3 varchar(100), FIELD4 varchar(100), FIELD5 varchar(100), FIELD6 varchar(100), FIELD7 varchar(100), FIELD8 varchar(100), FIELD9 varchar(100)) charset latin1 engine=ndbcluster" + +./bin/ycsb load rondb -s -P ./workloads/workloada \ + -p "rondb.connection.string=localhost:13000" \ + -p "rondb.schema=ycsb" \ + -p "fieldcount=10" \ + -p "fieldlength=100" \ + -p "fieldnameprefix=FIELD" + diff --git a/rondb/scripts/rdrs_config_mtr.json b/rondb/scripts/rdrs_config_mtr.json new file mode 100644 index 0000000000..3ebc8c0230 --- /dev/null +++ b/rondb/scripts/rdrs_config_mtr.json @@ -0,0 +1,45 @@ +{ + "Internal": { + "APIVersion": "0.1.0" + }, + "REST": { + "ServerIP": "0.0.0.0", + "ServerPort": 4406 + }, + "GRPC": { + "ServerIP": "0.0.0.0", + "ServerPort": 5406 + }, + "RonDB": { + "Mgmds": [ + { + "IP": "0.0.0.0", + "Port": 13000 + } + ] + }, + "Security": { + "TLS": { + "EnableTLS": false, + "RequireAndVerifyClientCert": false + }, + "APIKey": { + "UseHopsworksAPIKeys": false + } + }, + "Log": { + "Level": "info" + }, + "Testing": { + "MySQL": { + "Servers": [ + { + "IP": "0.0.0.0", + "Port": 13001 + } + ], + "User": "root", + "Password": "" + } + } +} diff --git a/rondb/scripts/rest b/rondb/scripts/rest new file mode 100755 index 0000000000..de0b49b383 --- /dev/null +++ b/rondb/scripts/rest @@ -0,0 +1,23 @@ +#!/bin/bash + +DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +cd $DIR/../.. + +export JAVA_OPTS="-Djava.library.path=/Users/salman/code/hops/rondb21/build/lib" + +./bin/ycsb run rondb -s -P ./workloads/workloada \ + -p "rondb.connection.string=localhost:13000" \ + -p "rondb.schema=ycsb" \ + -p "rondb.read.api.type=REST" \ + -p "rondb.api.server.ip=127.0.0.1" \ + -p "rondb.api.server.rest.port=4406" \ + -p "rondb.rest.api.use.tls=true" \ + -p "rondb.rest.api.use.async.requests=false" \ + -p "fieldcount=10" \ + -p "fieldlength=100" \ + -p "fieldnameprefix=FIELD" \ + -p "threadcount=1" \ + -p "readBatchSize=5" \ + -p "updateBatchSize=5" + + diff --git a/rondb/scripts/run_rdrs b/rondb/scripts/run_rdrs new file mode 100755 index 0000000000..9453faf051 --- /dev/null +++ b/rondb/scripts/run_rdrs @@ -0,0 +1,8 @@ +#!/bin/bash + +DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + +RDRS_BIN=~/code/hops/rondb21/build/storage/ndb/rest-server/rest-api-server/bin/server/rdrs + +$RDRS_BIN -config $DIR/rdrs_config_mtr.json + diff --git a/rondb/src/main/java/site/ycsb/db/ConfigKeys.java b/rondb/src/main/java/site/ycsb/db/ConfigKeys.java index 9d2558a961..3a0c86fe0b 100644 --- a/rondb/src/main/java/site/ycsb/db/ConfigKeys.java +++ b/rondb/src/main/java/site/ycsb/db/ConfigKeys.java @@ -23,8 +23,8 @@ public final class ConfigKeys { private ConfigKeys(){} - public static final String RONDB_API_TYPE_KEY = "rondb.api.type"; - public static final String RONDB_API_TYPE_DEFAULT = RonDBAPIType.CLUSTERJ.toString(); + public static final String RONDB_READ_API_TYPE_KEY = "rondb.read.api.type"; + public static final String RONDB_READ_API_TYPE_DEFAULT = RonDBAPIType.CLUSTERJ.toString(); public static final String SCHEMA_KEY = "rondb.schema"; public static final String SCHEMA_DEFAULT = "ycsb"; @@ -46,4 +46,10 @@ private ConfigKeys(){} public static final String RONDB_REST_API_USE_ASYNC_REQUESTS_KEY = "rondb.rest.api.use.async.requests"; public static final boolean RONDB_REST_API_USE_ASYNC_REQUESTS_DEFAULT = false; + + public static final String RONDB_REST_API_USE_TLS_KEY = "rondb.rest.api.use.tls"; + public static final boolean RONDB_REST_API_USE_TLS_DEFAULT = false; + + public static final String RONDB_GRPC_API_USE_TLS_KEY = "rondb.grpc.api.use.tls"; + public static final boolean RONDB_GRPC_API_USE_TLS_DEFAULT = false; } diff --git a/rondb/src/main/java/site/ycsb/db/RonDBClient.java b/rondb/src/main/java/site/ycsb/db/RonDBClient.java index 82f49963ca..43cd912771 100644 --- a/rondb/src/main/java/site/ycsb/db/RonDBClient.java +++ b/rondb/src/main/java/site/ycsb/db/RonDBClient.java @@ -37,7 +37,8 @@ */ public class RonDBClient extends DB { protected static Logger logger = LoggerFactory.getLogger(RonDBClient.class); - private DB dbClient; + private DB dbReadClient; + private DB clusterJClient; private static Object lock = new Object(); private long fieldCount; private Set fieldNames; @@ -62,28 +63,17 @@ public void init() throws DBException { threadID = maxThreadID++; - String apiPropStr = properties.getProperty(ConfigKeys.RONDB_API_TYPE_KEY, - ConfigKeys.RONDB_API_TYPE_DEFAULT); - - try { - if (apiPropStr.compareToIgnoreCase(RonDBAPIType.CLUSTERJ.toString()) == 0) { - dbClient = new ClusterJClient(properties); - } else if (apiPropStr.compareToIgnoreCase(RonDBAPIType.REST.toString()) == 0) { - dbClient = new RestApiClient(threadID, properties); - } else if (apiPropStr.compareToIgnoreCase(RonDBAPIType.GRPC.toString()) == 0) { - dbClient = new GrpcClient(threadID, properties); - } else { - throw new IllegalArgumentException("Wrong argument " + ConfigKeys.RONDB_API_TYPE_KEY); - } - dbClient.init(); - } catch (Exception e) { - logger.error("Initialization failed ", e); - e.printStackTrace(); - if (e instanceof DBException) { - throw (DBException) e; - } else { - throw new DBException(e); - } + String readApiPropStr = properties.getProperty(ConfigKeys.RONDB_READ_API_TYPE_KEY, + ConfigKeys.RONDB_READ_API_TYPE_DEFAULT); + + // writer + clusterJClient = initClusterJClient(properties); + + // reader + if (readApiPropStr.compareToIgnoreCase(RonDBAPIType.CLUSTERJ.toString()) == 0) { + dbReadClient = clusterJClient; // same client + } else { + dbReadClient = initClient(readApiPropStr, properties); } fieldCount = Long.parseLong(properties.getProperty(CoreWorkload.FIELD_COUNT_PROPERTY, @@ -97,6 +87,49 @@ public void init() throws DBException { } } + private DB initClusterJClient(Properties properties) throws DBException { + DB dbClient; + try { + dbClient = new ClusterJClient(properties); + dbClient.init(); + } catch (Exception e) { + logger.error("Initialization failed ", e); + e.printStackTrace(); + if (e instanceof DBException) { + throw (DBException) e; + } else { + throw new DBException(e); + } + } + return dbClient; + + } + + private DB initClient(String clientType, Properties properties) throws DBException { + DB dbClient = null; + try { + if (clientType.compareToIgnoreCase(RonDBAPIType.CLUSTERJ.toString()) == 0) { + dbClient = new ClusterJClient(properties); + } else if (clientType.compareToIgnoreCase(RonDBAPIType.REST.toString()) == 0) { + dbClient = new RestApiClient(threadID, properties); + } else if (clientType.compareToIgnoreCase(RonDBAPIType.GRPC.toString()) == 0) { + dbClient = new GrpcClient(threadID, properties); + } else { + throw new IllegalArgumentException("Wrong argument " + ConfigKeys.RONDB_READ_API_TYPE_KEY); + } + dbClient.init(); + } catch (Exception e) { + logger.error("Initialization failed ", e); + e.printStackTrace(); + if (e instanceof DBException) { + throw (DBException) e; + } else { + throw new DBException(e); + } + } + return dbClient; + } + /** * Cleanup any state for this DB. * Called once per DB instance; there is one DB instance per client thread. @@ -106,7 +139,8 @@ public void cleanup() throws DBException { try { //stop all threads at the same time threadCompletionCount.await(); - dbClient.cleanup(); + dbReadClient.cleanup(); + clusterJClient.cleanup(); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -126,7 +160,7 @@ public void cleanup() throws DBException { public Status read(String table, String key, Set fields, Map result) { Set fieldsToRead = fields != null ? fields : fieldNames; try { - return dbClient.read(table, key, fieldsToRead, result); + return dbReadClient.read(table, key, fieldsToRead, result); } catch (Exception e) { logger.error("Error " + e); return Status.ERROR; @@ -137,7 +171,19 @@ public Status read(String table, String key, Set fields, Map keys, List> fields, HashMap> results) { try { - return dbClient.batchRead(table, keys, fields, results); + return dbReadClient.batchRead(table, keys, fields, results); + } catch (Exception e) { + e.printStackTrace(); + logger.error("Error " + e); + return Status.ERROR; + } + } + + @Override + public Status batchUpdate(String table, List keys, + List> values) { + try { + return clusterJClient.batchUpdate(table, keys, values); } catch (Exception e) { e.printStackTrace(); logger.error("Error " + e); @@ -162,7 +208,8 @@ public Status scan(String table, String startkey, int recordcount, Set f Vector> result) { Set fieldsToRead = fields != null ? fields : fieldNames; try { - return dbClient.scan(table, startkey, recordcount, fieldsToRead, result); + //scan operation using the writer as Rest API does not yet support scan operations + return clusterJClient.scan(table, startkey, recordcount, fieldsToRead, result); } catch (Exception e) { logger.error("Error " + e); return Status.ERROR; @@ -183,7 +230,7 @@ public Status scan(String table, String startkey, int recordcount, Set f @Override public Status update(String table, String key, Map values) { try { - return dbClient.update(table, key, values); + return clusterJClient.update(table, key, values); } catch (Exception e) { logger.error("Error " + e); return Status.ERROR; @@ -203,7 +250,7 @@ public Status update(String table, String key, Map values) @Override public Status insert(String table, String key, Map values) { try { - return dbClient.insert(table, key, values); + return clusterJClient.insert(table, key, values); } catch (Exception e) { logger.error("Error " + e); return Status.ERROR; @@ -220,7 +267,7 @@ public Status insert(String table, String key, Map values) @Override public Status delete(String table, String key) { try { - return dbClient.delete(table, key); + return clusterJClient.delete(table, key); } catch (Exception e) { logger.error("Error " + e); return Status.ERROR; diff --git a/rondb/src/main/java/site/ycsb/db/clusterj/ClusterJClient.java b/rondb/src/main/java/site/ycsb/db/clusterj/ClusterJClient.java index 204e6cd75b..43f15c4d63 100644 --- a/rondb/src/main/java/site/ycsb/db/clusterj/ClusterJClient.java +++ b/rondb/src/main/java/site/ycsb/db/clusterj/ClusterJClient.java @@ -21,6 +21,7 @@ */ package site.ycsb.db.clusterj; +import com.mysql.clusterj.ColumnMetadata; import com.mysql.clusterj.DynamicObject; import com.mysql.clusterj.Query; import com.mysql.clusterj.Session; @@ -94,8 +95,9 @@ public Status action() throws Exception { logger.info("Read. Key: " + key + " Not Found."); return Status.NOT_FOUND; } + ColumnMetadata[] columnMetadata = row.columnMetadata(); for (String field : fields) { - result.put(field, UserTableHelper.readFieldFromDTO(field, row)); + result.put(field, UserTableHelper.readFieldFromDTO(field, row, columnMetadata)); } connection.releaseDTO(session, row); if (logger.isDebugEnabled()) { @@ -136,8 +138,9 @@ public Status action() throws Exception { Set rowFields = fields.get(i); HashMap rowResult = results.get(pk); + ColumnMetadata[] columnMetadata = row.columnMetadata(); for (String field : rowFields) { - rowResult.put(field, UserTableHelper.readFieldFromDTO(field, row)); + rowResult.put(field, UserTableHelper.readFieldFromDTO(field, row, columnMetadata)); } } @@ -158,6 +161,43 @@ public Status action() throws Exception { } } + @Override + public Status batchUpdate(String table, List keys, + List> allValues) { + Class dbClass = connection.getDTOClass(table); + final Session session = connection.getSession(); + + try { + TransactionReqHandler handler = new TransactionReqHandler("BatchUpdate") { + @Override + public Status action() throws Exception { + + List allRows = new ArrayList<>(keys.size()); + + for (int i = 0; i < keys.size(); i++) { + String key = keys.get(i); + Map values = allValues.get(i); + DynamicObject row = UserTableHelper.createDTO(connection.classGenerator, session, table + , key, values); + allRows.add(row); + } + + session.savePersistentAll(allRows); + for (DynamicObject row : allRows) { + connection.releaseDTO(session, row); + } + if (logger.isDebugEnabled()) { + logger.debug("BatchUpdated keys: " + Arrays.toString(keys.toArray())); + } + return Status.OK; + } + }; + return handler.runTx(session, dbClass, keys.get(0)); + } finally { + connection.releaseSession(session); + } + } + @Override public Status delete(String table, String key) { Class dbClass; diff --git a/rondb/src/main/java/site/ycsb/db/clusterj/table/UserTableHelper.java b/rondb/src/main/java/site/ycsb/db/clusterj/table/UserTableHelper.java index 3345ab5015..7f00f0d098 100644 --- a/rondb/src/main/java/site/ycsb/db/clusterj/table/UserTableHelper.java +++ b/rondb/src/main/java/site/ycsb/db/clusterj/table/UserTableHelper.java @@ -20,6 +20,7 @@ */ package site.ycsb.db.clusterj.table; +import com.mysql.clusterj.ColumnMetadata; import com.mysql.clusterj.ColumnType; import com.mysql.clusterj.DynamicObject; import com.mysql.clusterj.Session; @@ -46,40 +47,43 @@ public static DynamicObject createDTO(ClassGenerator classGenerator, Session ses Map values) throws Exception { DynamicObject persistable = getTableObject(classGenerator, session, tableName); - setFieldValue(persistable, KEY, keyVal.getBytes(), keyVal.getBytes().length); + ColumnMetadata[] columnMetadata = persistable.columnMetadata(); + setFieldValue(persistable, columnMetadata, KEY, keyVal.getBytes(), keyVal.getBytes().length); if (values != null) { for (String colName : values.keySet()) { byte[] value = values.get(colName).toArray(); - setFieldValue(persistable, colName, value, value.length); + setFieldValue(persistable, columnMetadata, colName, value, value.length); } } return persistable; } - private static void setFieldValue(DynamicObject persistable, String colName, byte[] value, int lenght) { + private static void setFieldValue(DynamicObject persistable, ColumnMetadata[] columnMetadata, + String colName, byte[] value, int lenght) { boolean found = false; - for (int i = 0; i < persistable.columnMetadata().length; i++) { - String fieldName = persistable.columnMetadata()[i].name(); + for (int i = 0; i < columnMetadata.length; i++) { + String fieldName = columnMetadata[i].name(); if (fieldName.equals(colName)) { - int maxLength = persistable.columnMetadata()[i].maximumLength(); + int maxLength = columnMetadata[i].maximumLength(); if (maxLength < lenght) { throw new IllegalArgumentException("Column \"" + colName + "\" can only store " + maxLength + ". Request length: " + lenght); } - ColumnType cType = persistable.columnMetadata()[i].columnType(); + ColumnType cType = columnMetadata[i].columnType(); if (cType == ColumnType.Varchar || cType == ColumnType.Longvarchar) { persistable.set(i, new String(value, StandardCharsets.UTF_8)); } else if (cType == ColumnType.Varbinary || cType == ColumnType.Longvarbinary) { persistable.set(i, value); } else { - throw new UnsupportedOperationException(persistable.columnMetadata()[i].columnType() + + throw new UnsupportedOperationException(columnMetadata[i].columnType() + " is not supported in this benchmark"); } found = true; + break; } } if (!found) { @@ -88,17 +92,19 @@ private static void setFieldValue(DynamicObject persistable, String colName, byt } public static HashMap readFieldsFromDTO(DynamicObject dto, Set fields) { + ColumnMetadata[] columnMetadata = dto.columnMetadata(); HashMap values = new HashMap<>(); for (String field : fields) { - values.put(field, readFieldFromDTO(field, dto)); + values.put(field, readFieldFromDTO(field, dto, columnMetadata)); } return values; } - public static ByteIterator readFieldFromDTO(String colName, DynamicObject row) { - for (int i = 0; i < row.columnMetadata().length; i++) { - String fieldName = row.columnMetadata()[i].name(); - ColumnType cType = row.columnMetadata()[i].columnType(); + public static ByteIterator readFieldFromDTO(String colName, DynamicObject row, + ColumnMetadata[] columnMetadata) { + for (int i = 0; i < columnMetadata.length; i++) { + String fieldName = columnMetadata[i].name(); + ColumnType cType = columnMetadata[i].columnType(); if (fieldName.equals(colName)) { if (cType == ColumnType.Varchar || cType == ColumnType.Longvarchar) { @@ -137,7 +143,7 @@ public static void setPK(String pk, DynamicObject row) { } } - if(!set){ + if (!set) { throw new UnsupportedOperationException("Failed to set primary key for read operation"); } } diff --git a/rondb/src/main/java/site/ycsb/db/grpc/GrpcClient.java b/rondb/src/main/java/site/ycsb/db/grpc/GrpcClient.java index 01964ec267..e57cf3776f 100644 --- a/rondb/src/main/java/site/ycsb/db/grpc/GrpcClient.java +++ b/rondb/src/main/java/site/ycsb/db/grpc/GrpcClient.java @@ -25,6 +25,9 @@ import com.rondb.grpcserver.RonDBRESTGrpc.RonDBRESTBlockingStub; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; +import io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.NettyChannelBuilder; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import site.ycsb.*; @@ -46,6 +49,7 @@ public final class GrpcClient extends DB { private String databaseName; private String grpcServerIP; private int grpcServerPort; + private boolean useTLS; private final Properties properties; private final int threadID; @@ -71,11 +75,22 @@ public void init() throws DBException { grpcServerIP = inetAddress.getHostAddress(); grpcServerPort = Integer.parseInt(properties.getProperty(ConfigKeys.RONDB_GRPC_SERVER_PORT_KEY, Integer.toString(ConfigKeys.RONDB_GRPC_SERVER_PORT_DEFAULT))); + useTLS = Boolean.parseBoolean(properties.getProperty(ConfigKeys.RONDB_GRPC_API_USE_TLS_KEY, + Boolean.toString(ConfigKeys.RONDB_GRPC_API_USE_TLS_DEFAULT))); String grpcServerAddress = grpcServerIP + ":" + grpcServerPort; logger.info("Connecting to gRPC test endpoint " + grpcServerAddress); - channel = ManagedChannelBuilder.forAddress(grpcServerIP, - grpcServerPort).usePlaintext().build(); + if (useTLS) { + channel = NettyChannelBuilder.forAddress(grpcServerIP, + grpcServerPort) + .sslContext(GrpcSslContexts.forClient() + .trustManager(InsecureTrustManagerFactory.INSTANCE) + .build()) + .build(); + } else { + channel = ManagedChannelBuilder.forAddress(grpcServerIP, + grpcServerPort).usePlaintext().build(); + } blockingStub = RonDBRESTGrpc.newBlockingStub(channel); test(); @@ -130,12 +145,12 @@ public Status batchRead(String table, List keys, List> field // unpack the response for (int i = 0; i < response.getResponsesCount(); i++) { PKReadResponseProto pkResponse = response.getResponses(i); - if (pkResponse.getCode() != 200){ + if (pkResponse.getCode() != 200) { allGood = false; break; } - String pk = pkResponse.getOperationID(); + String pk = pkResponse.getOperationID(); HashMap result = results.get(pk); assert result != null; @@ -145,13 +160,19 @@ public Status batchRead(String table, List keys, List> field } } - if(allGood) { + if (allGood) { return Status.OK; } else { return Status.ERROR; } } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + private PKReadRequestProto createPKRequestProto(String table, String pk, Set fields) { PKReadRequestProto.Builder pkReadBuilder = PKReadRequestProto.newBuilder(); @@ -195,7 +216,10 @@ public Status delete(String table, String key) { } @Override - public void cleanup() throws DBException { - channel.shutdown(); + public synchronized void cleanup() throws DBException { + if (channel != null) { + channel.shutdown(); + channel = null; + } } } diff --git a/rondb/src/main/java/site/ycsb/db/http/RestApiClient.java b/rondb/src/main/java/site/ycsb/db/http/RestApiClient.java index 6ef93c93e3..7703f36ace 100644 --- a/rondb/src/main/java/site/ycsb/db/http/RestApiClient.java +++ b/rondb/src/main/java/site/ycsb/db/http/RestApiClient.java @@ -52,6 +52,7 @@ public final class RestApiClient extends DB { private String restAPIVersion; private String restServerURI; private MyHttpClient myHttpClient; + private boolean useTLS; private final int threadID; private Properties properties; @@ -71,16 +72,25 @@ public void init() throws DBException { Integer.toString(ConfigKeys.RONDB_REST_SERVER_PORT_DEFAULT))); restAPIVersion = properties.getProperty(ConfigKeys.RONDB_REST_API_VERSION_KEY, ConfigKeys.RONDB_REST_API_VERSION_DEFAULT); - restServerURI = "http://" + restServerIP + ":" + restServerPort + "/" + restAPIVersion; + restAPIVersion = properties.getProperty(ConfigKeys.RONDB_REST_API_VERSION_KEY, + ConfigKeys.RONDB_REST_API_VERSION_DEFAULT); + useTLS = Boolean.parseBoolean(properties.getProperty(ConfigKeys.RONDB_REST_API_USE_TLS_KEY, + Boolean.toString(ConfigKeys.RONDB_REST_API_USE_TLS_DEFAULT))); + if (useTLS) { + restServerURI = "https://"; + } else { + restServerURI = "http://"; + } + restServerURI = restServerURI + restServerIP + ":" + restServerPort + "/" + restAPIVersion; boolean async = Boolean.parseBoolean(properties.getProperty(ConfigKeys.RONDB_REST_API_USE_ASYNC_REQUESTS_KEY, Boolean.toString(ConfigKeys.RONDB_REST_API_USE_ASYNC_REQUESTS_DEFAULT))); try { if (async) { - myHttpClient = new MyHttpClientAsync(numThreads); + myHttpClient = new MyHttpClientAsync(numThreads, useTLS); } else { - myHttpClient = new MyHttpClientSync(); + myHttpClient = new MyHttpClientSync(useTLS); } } catch (IOReactorException e) { logger.error(e.getMessage(), e); @@ -172,6 +182,12 @@ public Status batchRead(String table, List keys, List> field } } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { @@ -243,6 +259,6 @@ private Status processBatchResponse(String responseStr, List keys, } @Override - public void cleanup() throws DBException { + public synchronized void cleanup() throws DBException { } } diff --git a/rondb/src/main/java/site/ycsb/db/http/ds/MyHttpClientAsync.java b/rondb/src/main/java/site/ycsb/db/http/ds/MyHttpClientAsync.java index fe5b26f9ca..739ccf350f 100644 --- a/rondb/src/main/java/site/ycsb/db/http/ds/MyHttpClientAsync.java +++ b/rondb/src/main/java/site/ycsb/db/http/ds/MyHttpClientAsync.java @@ -22,16 +22,27 @@ import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.config.Registry; +import org.apache.http.config.RegistryBuilder; +import org.apache.http.conn.ssl.TrustStrategy; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.impl.nio.client.HttpAsyncClients; import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor; +import org.apache.http.nio.conn.SchemeIOSessionStrategy; +import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.http.nio.reactor.ConnectingIOReactor; import org.apache.http.nio.reactor.IOReactorException; +import org.apache.http.ssl.SSLContextBuilder; +import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; import site.ycsb.db.http.MyHttpException; +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSession; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; import java.util.concurrent.Future; /** @@ -40,29 +51,66 @@ public class MyHttpClientAsync extends MyHttpClient { private CloseableHttpAsyncClient client; - public MyHttpClientAsync(int numThreads) throws IOReactorException { - ConnectingIOReactor ioReactor = new DefaultConnectingIOReactor(); - PoolingNHttpClientConnectionManager cm = new PoolingNHttpClientConnectionManager(ioReactor); - cm.setMaxTotal(numThreads); - cm.setDefaultMaxPerRoute(numThreads); - client = HttpAsyncClients.custom().setConnectionManager(cm).build(); - client.start(); + public MyHttpClientAsync(int numThreads, boolean useTLS) throws IOReactorException { + + try { + if (useTLS) { + SSLContextBuilder builder = SSLContexts.custom(); + builder.loadTrustMaterial(null, new TrustStrategy() { + @Override + public boolean isTrusted(X509Certificate[] chain, String authType) + throws CertificateException { + return true; + } + }); + SSLContext sslContext = builder.build(); + SchemeIOSessionStrategy sslioSessionStrategy = new SSLIOSessionStrategy(sslContext, + new HostnameVerifier() { + @Override + public boolean verify(String hostname, SSLSession session) { + return true; // allow all hostnames + } + }); + Registry sslioSessionRegistry = + RegistryBuilder.create(). + register("https", sslioSessionStrategy).build(); + PoolingNHttpClientConnectionManager ncm = + new PoolingNHttpClientConnectionManager(new DefaultConnectingIOReactor(), + sslioSessionRegistry); + ncm.setMaxTotal(numThreads); + ncm.setDefaultMaxPerRoute(numThreads); + client = HttpAsyncClients.custom().setConnectionManager(ncm).build(); + } else { + ConnectingIOReactor ioReactor = new DefaultConnectingIOReactor(); + PoolingNHttpClientConnectionManager cm = new PoolingNHttpClientConnectionManager(ioReactor); + cm.setMaxTotal(numThreads); + cm.setDefaultMaxPerRoute(numThreads); + client = HttpAsyncClients.custom().setConnectionManager(cm).build(); + } + client.start(); + } catch (Exception e) { + throw new RuntimeException(e); + } } @Override public String execute(HttpRequestBase req) throws MyHttpException { HttpResponse resp = null; try { - HttpClientContext context = HttpClientContext.create(); - Future future = client.execute(req, context, null); +// HttpClientContext context = HttpClientContext.create(); + Future future = client.execute(req, null); resp = future.get(); if (resp.getStatusLine().getStatusCode() == 200) { String b = EntityUtils.toString(resp.getEntity()); return b; } + System.out.println("response is not 200"); } catch (Exception ex) { System.out.println(ex.getLocalizedMessage()); } + if (resp == null) { + System.out.println("resp is null"); + } throw new MyHttpException("Req failed code : " + resp.getStatusLine().getStatusCode()); } } diff --git a/rondb/src/main/java/site/ycsb/db/http/ds/MyHttpClientSync.java b/rondb/src/main/java/site/ycsb/db/http/ds/MyHttpClientSync.java index 5d4e2e0642..fb2793d37b 100644 --- a/rondb/src/main/java/site/ycsb/db/http/ds/MyHttpClientSync.java +++ b/rondb/src/main/java/site/ycsb/db/http/ds/MyHttpClientSync.java @@ -23,12 +23,25 @@ import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.config.Registry; +import org.apache.http.config.RegistryBuilder; +import org.apache.http.conn.socket.ConnectionSocketFactory; +import org.apache.http.conn.socket.PlainConnectionSocketFactory; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.conn.ssl.TrustStrategy; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.conn.BasicHttpClientConnectionManager; +import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; import site.ycsb.db.http.MyHttpException; +import javax.net.ssl.SSLContext; import java.io.IOException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; /** * MyHttpClientSync. @@ -36,17 +49,49 @@ public class MyHttpClientSync extends MyHttpClient { private ThreadLocal httpClients = new ThreadLocal<>(); + private boolean useTLS; - public MyHttpClientSync() { - //cm = new PoolingHttpClientConnectionManager(); - // cm.setMaxTotal(numThreads); - // cm.setDefaultMaxPerRoute(numThreads); - //HttpHost host = new HttpHost(restServerIP, restServerPort); - // cm.setMaxPerRoute(new HttpRoute(host), numThreads); + public MyHttpClientSync(boolean useTLS) { + this.useTLS = useTLS; } public CloseableHttpClient getHttpClient() { + if (useTLS){ + return getHttpClientTLS(); + } else { + return getHttpClientNonSSL(); + } + } + private CloseableHttpClient getHttpClientTLS(){ + CloseableHttpClient httpClient; + httpClient = httpClients.get(); + if (httpClient == null) { + try { + TrustStrategy acceptingTrustStrategy = (cert, authType) -> true; + SSLContext sslContext = + SSLContexts.custom().loadTrustMaterial(null, acceptingTrustStrategy).build(); + SSLConnectionSocketFactory sslsf = new SSLConnectionSocketFactory(sslContext, + NoopHostnameVerifier.INSTANCE); + + Registry socketFactoryRegistry = + RegistryBuilder.create() + .register("https", sslsf) + .register("http", new PlainConnectionSocketFactory()) + .build(); + + BasicHttpClientConnectionManager connectionManager = + new BasicHttpClientConnectionManager(socketFactoryRegistry); + httpClient = HttpClients.custom().setSSLSocketFactory(sslsf) + .setConnectionManager(connectionManager).build(); + httpClients.set(httpClient); + } catch (NoSuchAlgorithmException | KeyManagementException | KeyStoreException e) { + e.printStackTrace(); + } + } + return httpClient; + } + private CloseableHttpClient getHttpClientNonSSL(){ CloseableHttpClient httpClient; httpClient = httpClients.get(); if (httpClient == null) { @@ -76,6 +121,9 @@ public String execute(HttpRequestBase req) throws MyHttpException { } } } + System.out.println(">more details " + resp.getStatusLine()); + System.out.println(">more details " + resp); throw new MyHttpException("Req failed code : " + resp.getStatusLine().getStatusCode()); } } + diff --git a/s3/src/main/java/site/ycsb/db/S3Client.java b/s3/src/main/java/site/ycsb/db/S3Client.java index 5ab8b1c8a2..bd81becd66 100644 --- a/s3/src/main/java/site/ycsb/db/S3Client.java +++ b/s3/src/main/java/site/ycsb/db/S3Client.java @@ -288,6 +288,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Update a file in the database. Any field/value pairs in the specified * values HashMap will be written into the file with the specified file diff --git a/scylla/src/main/java/site/ycsb/db/scylla/ScyllaCQLClient.java b/scylla/src/main/java/site/ycsb/db/scylla/ScyllaCQLClient.java index 7683b76850..6addb03515 100644 --- a/scylla/src/main/java/site/ycsb/db/scylla/ScyllaCQLClient.java +++ b/scylla/src/main/java/site/ycsb/db/scylla/ScyllaCQLClient.java @@ -351,6 +351,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. diff --git a/seaweedfs/src/main/java/site/ycsb/db/seaweed/SeaweedClient.java b/seaweedfs/src/main/java/site/ycsb/db/seaweed/SeaweedClient.java index 4037185f87..04bddd1a28 100644 --- a/seaweedfs/src/main/java/site/ycsb/db/seaweed/SeaweedClient.java +++ b/seaweedfs/src/main/java/site/ycsb/db/seaweed/SeaweedClient.java @@ -147,6 +147,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + /** * Update a file in the table. Any field/value pairs in the specified * values HashMap will be written into the file with the specified file diff --git a/tablestore/src/main/java/site/ycsb/db/tablestore/TableStoreClient.java b/tablestore/src/main/java/site/ycsb/db/tablestore/TableStoreClient.java index 8f76347e87..0feb4e9403 100755 --- a/tablestore/src/main/java/site/ycsb/db/tablestore/TableStoreClient.java +++ b/tablestore/src/main/java/site/ycsb/db/tablestore/TableStoreClient.java @@ -166,6 +166,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { diff --git a/tarantool/src/main/java/site/ycsb/db/TarantoolClient.java b/tarantool/src/main/java/site/ycsb/db/TarantoolClient.java index 3f93b95f9c..8c48afea91 100644 --- a/tarantool/src/main/java/site/ycsb/db/TarantoolClient.java +++ b/tarantool/src/main/java/site/ycsb/db/TarantoolClient.java @@ -97,6 +97,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String table, String startkey, int recordcount, Set fields, diff --git a/voltdb/src/main/java/site/ycsb/db/voltdb/VoltClient4.java b/voltdb/src/main/java/site/ycsb/db/voltdb/VoltClient4.java index d380326977..8a27e387d9 100644 --- a/voltdb/src/main/java/site/ycsb/db/voltdb/VoltClient4.java +++ b/voltdb/src/main/java/site/ycsb/db/voltdb/VoltClient4.java @@ -160,6 +160,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status scan(String keyspace, String lowerBound, int recordCount, Set columns, Vector> result) { diff --git a/zookeeper/src/main/java/site/ycsb/db/zookeeper/ZKClient.java b/zookeeper/src/main/java/site/ycsb/db/zookeeper/ZKClient.java index 7de32757de..5ad3ac401d 100644 --- a/zookeeper/src/main/java/site/ycsb/db/zookeeper/ZKClient.java +++ b/zookeeper/src/main/java/site/ycsb/db/zookeeper/ZKClient.java @@ -123,6 +123,12 @@ public Status batchRead(String table, List keys, List> field throw new UnsupportedOperationException("Batch reads are not yet supported"); } + @Override + public Status batchUpdate(String table, List keys, + List> values) { + throw new UnsupportedOperationException("Batch updates are not yet supported"); + } + @Override public Status insert(String table, String key, Map values) {