diff --git a/.github/workflows/pr-jobs.yml b/.github/workflows/pr-jobs.yml index a3cfcdbce73e..20e2bc0c4916 100644 --- a/.github/workflows/pr-jobs.yml +++ b/.github/workflows/pr-jobs.yml @@ -444,7 +444,35 @@ jobs: - name: Build RocksDBJava Static Library # NOTE: replaced scl enable devtoolset-7 'make V=1 J=8 -j8 rocksdbjavastatic' run: make V=1 J=8 -j8 rocksdbjavastatic - # post-steps skipped because of compatibility issues with docker image + check-javadocs: + if: ${{ github.repository_owner == 'facebook' }} + runs-on: + labels: 4-core-ubuntu + container: + image: evolvedbinary/rocksjava:centos7_x64-be + options: --shm-size=16gb + steps: + # The docker image is based on such an old OS that it has a GLIBC + # incompatibility with actions/checkout and other actions. Thus we + # implement a manual checkout step. + - name: Checkout + env: + GH_TOKEN: ${{ github.token }} + run: | + chown `whoami` . || true + git clone --no-checkout https://oath2:$GH_TOKEN@github.com/${{ github.repository }}.git . + git -c protocol.version=2 fetch --update-head-ok --no-tags --prune --no-recurse-submodules --depth=1 origin +${{ github.sha }}:${{ github.ref }} + git checkout --progress --force ${{ github.ref }} + git log -1 --format='%H' + - uses: "./.github/actions/pre-steps" + - name: Set Java Environment + run: |- + echo "JAVA_HOME=${JAVA_HOME}" + which java && java -version + which javac && javac -version + - name: Check RocksDBJava JavaDocs + run: scl enable devtoolset-7 'pushd java; make V=1 J=8 -j8 javadocs' + # NOTE: post-steps skipped because of compatibility issues with docker image build-macos-java: if: ${{ github.repository_owner == 'facebook' }} runs-on: macos-15-xlarge diff --git a/include/rocksdb/utilities/table_properties_collectors.h b/include/rocksdb/utilities/table_properties_collectors.h index c8c8af1de6a8..3395a7fd3998 100644 --- a/include/rocksdb/utilities/table_properties_collectors.h +++ b/include/rocksdb/utilities/table_properties_collectors.h @@ -83,7 +83,7 @@ class CompactOnDeletionCollectorFactory }; // Creates a factory of a table property collector that marks a SST -// file as need-compaction when it observe at least "D" deletion +// file as need-compaction when it observes at least "D" deletion // entries in any "N" consecutive entries, or the ratio of tombstone // entries >= deletion_ratio. // diff --git a/java/Makefile b/java/Makefile index 5e00921c62b9..1ef540915c40 100644 --- a/java/Makefile +++ b/java/Makefile @@ -115,12 +115,11 @@ JAVA_TESTS = \ org.rocksdb.BuiltinComparatorTest\ org.rocksdb.ByteBufferUnsupportedOperationTest\ org.rocksdb.BytewiseComparatorRegressionTest\ - org.rocksdb.util.BytewiseComparatorTest\ - org.rocksdb.util.BytewiseComparatorIntTest\ org.rocksdb.CheckPointTest\ org.rocksdb.ClockCacheTest\ org.rocksdb.ColumnFamilyOptionsTest\ org.rocksdb.ColumnFamilyTest\ + org.rocksdb.CompactRangeOptionsTest\ org.rocksdb.CompactionFilterFactoryTest\ org.rocksdb.CompactionJobInfoTest\ org.rocksdb.CompactionJobStatsTest\ @@ -132,21 +131,21 @@ JAVA_TESTS = \ org.rocksdb.ComparatorOptionsTest\ org.rocksdb.CompressionOptionsTest\ org.rocksdb.CompressionTypesTest\ + org.rocksdb.ConcurrentTaskLimiterTest\ + org.rocksdb.DefaultEnvTest\ org.rocksdb.DBOptionsTest\ org.rocksdb.DirectSliceTest\ - org.rocksdb.util.EnvironmentTest\ org.rocksdb.EnvOptionsTest\ org.rocksdb.EventListenerTest\ - org.rocksdb.IngestExternalFileOptionsTest\ - org.rocksdb.util.IntComparatorTest\ - org.rocksdb.util.JNIComparatorTest\ org.rocksdb.FilterTest\ + org.rocksdb.FlushOptionsTest\ org.rocksdb.FlushTest\ + org.rocksdb.HyperClockCacheTest\ org.rocksdb.ImportColumnFamilyTest\ org.rocksdb.InfoLogLevelTest\ + org.rocksdb.IngestExternalFileOptionsTest\ org.rocksdb.KeyExistsTest \ org.rocksdb.KeyMayExistTest\ - org.rocksdb.ConcurrentTaskLimiterTest\ org.rocksdb.LoggerTest\ org.rocksdb.LRUCacheTest\ org.rocksdb.MemoryUtilTest\ @@ -154,10 +153,10 @@ JAVA_TESTS = \ org.rocksdb.MergeCFVariantsTest\ org.rocksdb.MergeTest\ org.rocksdb.MergeVariantsTest\ - org.rocksdb.MultiColumnRegressionTest \ + org.rocksdb.MixedOptionsTest\ + org.rocksdb.MultiColumnRegressionTest\ org.rocksdb.MultiGetManyKeysTest\ org.rocksdb.MultiGetTest\ - org.rocksdb.MixedOptionsTest\ org.rocksdb.MutableColumnFamilyOptionsTest\ org.rocksdb.MutableDBOptionsTest\ org.rocksdb.MutableOptionsGetSetTest \ @@ -166,48 +165,53 @@ JAVA_TESTS = \ org.rocksdb.OptimisticTransactionTest\ org.rocksdb.OptimisticTransactionDBTest\ org.rocksdb.OptimisticTransactionOptionsTest\ - org.rocksdb.OptionsUtilTest\ org.rocksdb.OptionsTest\ - org.rocksdb.PerfLevelTest \ + org.rocksdb.OptionsUtilTest\ org.rocksdb.PerfContextTest \ + org.rocksdb.PerfLevelTest \ + org.rocksdb.PlainTableConfigTest\ org.rocksdb.PutCFVariantsTest\ + org.rocksdb.PutMultiplePartsTest\ org.rocksdb.PutVariantsTest\ - org.rocksdb.PlainTableConfigTest\ org.rocksdb.RateLimiterTest\ org.rocksdb.ReadOnlyTest\ org.rocksdb.ReadOptionsTest\ - org.rocksdb.util.ReverseBytewiseComparatorIntTest\ - org.rocksdb.RocksDBTest\ org.rocksdb.RocksDBExceptionTest\ - org.rocksdb.DefaultEnvTest\ + org.rocksdb.RocksDBTest\ org.rocksdb.RocksIteratorTest\ org.rocksdb.RocksMemEnvTest\ - org.rocksdb.util.SizeUnitTest\ org.rocksdb.SecondaryDBTest\ org.rocksdb.SliceTest\ org.rocksdb.SnapshotTest\ org.rocksdb.SstFileManagerTest\ - org.rocksdb.SstFileWriterTest\ org.rocksdb.SstFileReaderTest\ + org.rocksdb.SstFileWriterTest\ org.rocksdb.SstPartitionerTest\ + org.rocksdb.StatisticsCollectorTest\ + org.rocksdb.StatisticsTest\ org.rocksdb.TableFilterTest\ org.rocksdb.TimedEnvTest\ - org.rocksdb.TransactionTest\ - org.rocksdb.TransactionDBTest\ - org.rocksdb.TransactionOptionsTest\ org.rocksdb.TransactionDBOptionsTest\ + org.rocksdb.TransactionDBTest\ org.rocksdb.TransactionLogIteratorTest\ + org.rocksdb.TransactionOptionsTest\ + org.rocksdb.TransactionTest\ org.rocksdb.TtlDBTest\ - org.rocksdb.StatisticsTest\ - org.rocksdb.StatisticsCollectorTest\ org.rocksdb.VerifyChecksumsTest\ org.rocksdb.WalFilterTest\ org.rocksdb.WALRecoveryModeTest\ org.rocksdb.WriteBatchHandlerTest\ org.rocksdb.WriteBatchTest\ org.rocksdb.WriteBatchThreadedTest\ - org.rocksdb.WriteOptionsTest\ org.rocksdb.WriteBatchWithIndexTest\ + org.rocksdb.WriteOptionsTest\ + org.rocksdb.util.BytewiseComparatorIntTest\ + org.rocksdb.util.BytewiseComparatorTest\ + org.rocksdb.util.EnvironmentTest\ + org.rocksdb.util.IntComparatorTest\ + org.rocksdb.util.JNIComparatorTest\ + org.rocksdb.util.ReverseBytewiseComparatorIntTest\ + org.rocksdb.util.SizeUnitTest\ org.rocksdb.util.StdErrLoggerTest MAIN_SRC = src/main/java @@ -343,7 +347,7 @@ clean-downloaded: javadocs: java $(AM_V_GEN)mkdir -p $(JAVADOC) - $(AM_V_at)$(JAVADOC_CMD) -d $(JAVADOC) -sourcepath $(MAIN_SRC) -subpackages org + $(AM_V_at)$(JAVADOC_CMD) -Xwerror -d $(JAVADOC) -sourcepath $(MAIN_SRC) -subpackages org javalib: java java_test javadocs diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index fd7eef4d4cfb..45ad20e2cad0 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -10,14 +10,24 @@ *
* At present, we just permit an overriding Java class to wrap a C++
* implementation
+ *
+ * @param
* Implementations of Comparators in Java should extend this class.
*/
public abstract class AbstractComparator
@@ -20,6 +20,11 @@ public abstract class AbstractComparator
super();
}
+ /**
+ * Construct an AbstractComparator.
+ *
+ * @param comparatorOptions options for the comparator.
+ */
protected AbstractComparator(final ComparatorOptions comparatorOptions) {
super(comparatorOptions.nativeHandle_);
}
@@ -59,7 +64,7 @@ ComparatorType getComparatorType() {
* Three-way key comparison. Implementations should provide a
* total order
* on keys that might be passed to it.
- *
+ *
* The implementation may modify the {@code ByteBuffer}s passed in, though
* it would be unconventional to modify the "limit" or any of the
* underlying bytes. As a callback, RocksJava will ensure that {@code a}
@@ -114,6 +119,11 @@ public void findShortSuccessor(final ByteBuffer key) {
// no-op
}
+ /**
+ * Returns true if we are using direct byte buffers.
+ *
+ * @return true if we are using direct byte buffers, false otherwise.
+ */
public final boolean usingDirectBuffers() {
return usingDirectBuffers(nativeHandle_);
}
diff --git a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
index d0ceef93d419..9bd1ff7694bc 100644
--- a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
+++ b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java
@@ -12,7 +12,7 @@
* it holds methods which are called
* from C++ to interact with a Comparator
* written in Java.
- *
+ *
* Placing these bridge methods in this
* class keeps the API of the
* {@link org.rocksdb.AbstractComparator} clean.
diff --git a/java/src/main/java/org/rocksdb/AbstractEventListener.java b/java/src/main/java/org/rocksdb/AbstractEventListener.java
index c9371c45eb0c..5f29024bf063 100644
--- a/java/src/main/java/org/rocksdb/AbstractEventListener.java
+++ b/java/src/main/java/org/rocksdb/AbstractEventListener.java
@@ -12,28 +12,118 @@
*/
@SuppressWarnings("PMD.AvoidDuplicateLiterals")
public abstract class AbstractEventListener extends RocksCallbackObject implements EventListener {
+ /**
+ * Callback events that can be enabled.
+ */
public enum EnabledEventCallback {
+ /**
+ * Flush completed.
+ */
ON_FLUSH_COMPLETED((byte) 0x0),
+
+ /**
+ * Flush beginning.
+ */
ON_FLUSH_BEGIN((byte) 0x1),
+
+ /**
+ * Table file was deleted.
+ */
ON_TABLE_FILE_DELETED((byte) 0x2),
+
+ /**
+ * Compaction beginning.
+ */
ON_COMPACTION_BEGIN((byte) 0x3),
+
+ /**
+ * Compaction completed.
+ */
ON_COMPACTION_COMPLETED((byte) 0x4),
+
+ /**
+ * Table file created.
+ */
ON_TABLE_FILE_CREATED((byte) 0x5),
+
+ /**
+ * Started creation of Table file.
+ */
ON_TABLE_FILE_CREATION_STARTED((byte) 0x6),
+
+ /**
+ * Memtable has been sealed.
+ */
ON_MEMTABLE_SEALED((byte) 0x7),
+
+ /**
+ * Started deletion of Column Family handle.
+ */
ON_COLUMN_FAMILY_HANDLE_DELETION_STARTED((byte) 0x8),
+
+ /**
+ * External file ingested.
+ */
ON_EXTERNAL_FILE_INGESTED((byte) 0x9),
+
+ /**
+ * Background error.
+ */
ON_BACKGROUND_ERROR((byte) 0xA),
+
+ /**
+ * Stall conditions have been changed.
+ */
ON_STALL_CONDITIONS_CHANGED((byte) 0xB),
+
+ /**
+ * File read has finished.
+ */
ON_FILE_READ_FINISH((byte) 0xC),
+
+ /**
+ * File write has finished.
+ */
ON_FILE_WRITE_FINISH((byte) 0xD),
+
+ /**
+ * File flush has finished.
+ */
ON_FILE_FLUSH_FINISH((byte) 0xE),
+
+ /**
+ * File sync has finished.
+ */
ON_FILE_SYNC_FINISH((byte) 0xF),
+
+ /**
+ * Range file read sync finished.
+ */
ON_FILE_RANGE_SYNC_FINISH((byte) 0x10),
+
+ /**
+ * File truncation has finished.
+ */
ON_FILE_TRUNCATE_FINISH((byte) 0x11),
+
+ /**
+ * Closing a file has finished.
+ */
ON_FILE_CLOSE_FINISH((byte) 0x12),
+
+ /**
+ * Flag has been set to be notified on file IO.
+ */
SHOULD_BE_NOTIFIED_ON_FILE_IO((byte) 0x13),
+
+ /**
+ * Error recovery beginning.
+ */
ON_ERROR_RECOVERY_BEGIN((byte) 0x14),
+
+ /**
+ * Error recovery completed.
+ */
ON_ERROR_RECOVERY_COMPLETED((byte) 0x15);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
index 173d63e9011e..8c500d8a5df2 100644
--- a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
+++ b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
@@ -22,6 +22,11 @@ public abstract class AbstractImmutableNativeReference
*/
protected final AtomicBoolean owningHandle_;
+ /**
+ * Construct an AbstractImmutableNativeReference.
+ *
+ * @param owningHandle true if this Java object owns the underlying C++ object, false otherwise.
+ */
protected AbstractImmutableNativeReference(final boolean owningHandle) {
this.owningHandle_ = new AtomicBoolean(owningHandle);
}
diff --git a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
index 577e89593eaa..7b3f5b39c2d4 100644
--- a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
+++ b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java
@@ -9,12 +9,26 @@
* The constructor is protected, so it will always be used as a base class.
*/
public class AbstractMutableOptions {
+ /**
+ * Separator between Key/Value pairs.
+ */
protected static final String KEY_VALUE_PAIR_SEPARATOR = ";";
+
+ /**
+ * Separator between Key and Value.
+ */
protected static final char KEY_VALUE_SEPARATOR = '=';
+
+ /**
+ * Separator between integers in an integer array.
+ */
static final String INT_ARRAY_INT_SEPARATOR = ":";
private static final String HAS_NOT_BEEN_SET = " has not been set";
+ /**
+ * the keys.
+ */
protected final String[] keys;
private final String[] values;
@@ -62,12 +76,24 @@ public String toString() {
return buffer.toString();
}
+ /**
+ * Builder base class for constructing Mutable Options.
+ *
+ * @param
extends RocksObject implements RocksIteratorInterface {
final P parent_;
+ /**
+ * Constructs an AbstractRocksIterator.
+ *
+ * @param parent the parent object from which the Rocks Iterator was created.
+ * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native
+ * RocksDB C++ RocksIterator.
+ */
protected AbstractRocksIterator(final P parent,
final long nativeHandle) {
super(nativeHandle);
@@ -111,7 +118,7 @@ public void refresh() throws RocksDBException {
@Override
public void refresh(final Snapshot snapshot) throws RocksDBException {
assert (isOwningHandle());
- refresh1(nativeHandle_, snapshot.getNativeHandle());
+ refresh1(nativeHandle_, snapshot.nativeHandle_);
}
@Override
diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java
index a73d9c644f17..0d00a056a2da 100644
--- a/java/src/main/java/org/rocksdb/AbstractSlice.java
+++ b/java/src/main/java/org/rocksdb/AbstractSlice.java
@@ -23,13 +23,23 @@
* the Java @see org.rocksdb.AbstractComparator subclass, it disposes the
* C++ BaseComparatorJniCallback subclass, which in turn destroys the
* Java @see org.rocksdb.AbstractSlice subclass Objects.
+ *
+ * @param
+ * The updates are applied in the order in which they are added
+ * to the WriteBatch. For example, the value of "key" will be "v3"
+ * after the following batch is written:
+ *
* Taken from include/rocksdb/advanced_options.h
+ *
+ * @param Default: false
+ * Default: false
* Taken from include/rocksdb/advanced_options.h
* and MutableCFOptions in util/cf_options.h
+ *
+ * @param
* See {@link IndexShorteningMode}.
*
* @param indexShortening the index shortening mode.
@@ -937,7 +940,7 @@ public BlockBasedTableConfig setCacheNumShardBits(
*
* @deprecated This option is now deprecated. No matter what value it
* is set to, it will behave as
- * if {@link #hashIndexAllowCollision()} == true.
+ * if {@code setHashIndexAllowCollision(true)}
*/
@Deprecated
public boolean hashIndexAllowCollision() {
diff --git a/java/src/main/java/org/rocksdb/BuiltinComparator.java b/java/src/main/java/org/rocksdb/BuiltinComparator.java
index 2c89bf218d1d..f4806fe57d72 100644
--- a/java/src/main/java/org/rocksdb/BuiltinComparator.java
+++ b/java/src/main/java/org/rocksdb/BuiltinComparator.java
@@ -6,15 +6,16 @@
package org.rocksdb;
/**
- * Builtin RocksDB comparators
- *
- *
+ * Compaction filter for removing expired Cassandra data with ttl.
+ * Is also in charge of removing tombstone that has been
+ * promoted to kValue type after serials of merging in compaction.
*/
public class CassandraCompactionFilter
extends AbstractCompactionFilter
* CassandraValueMergeOperator is a merge operator that merges two cassandra wide column
* values.
*/
public class CassandraValueMergeOperator extends MergeOperator {
+ /**
+ * Constructs a new CassandraValueMergeOperator.
+ *
+ * @param gcGracePeriodInSeconds the grace period in seconds for gc.
+ */
public CassandraValueMergeOperator(final int gcGracePeriodInSeconds) {
super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, 0));
}
+ /**
+ * Constructs a new CassandraValueMergeOperator.
+ *
+ * @param gcGracePeriodInSeconds the grace period in seconds for gc.
+ * @param operandsLimit the maximum size of the operands list before merge is applied.
+ */
public CassandraValueMergeOperator(final int gcGracePeriodInSeconds, final int operandsLimit) {
super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit));
}
diff --git a/java/src/main/java/org/rocksdb/Checkpoint.java b/java/src/main/java/org/rocksdb/Checkpoint.java
index e50068a6e32c..4cdfe0ff20e7 100644
--- a/java/src/main/java/org/rocksdb/Checkpoint.java
+++ b/java/src/main/java/org/rocksdb/Checkpoint.java
@@ -50,6 +50,22 @@ public void createCheckpoint(final String checkpointPath)
createCheckpoint(nativeHandle_, checkpointPath);
}
+ /**
+ * Exports all live SST files of a specified Column Family into {@code exportPath}.
+ *
+ * Always triggers a flush.
+ *
+ * @param columnFamilyHandle the column family to export.
+ *
+ * @param exportPath should not already exist and will be created by this API.
+ * SST files will be created as hard links when the directory specified
+ * is in the same partition as the db directory, copied otherwise.
+ *
+ * @return metadata about the exported SST files.
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
public ExportImportFilesMetaData exportColumnFamily(final ColumnFamilyHandle columnFamilyHandle,
final String exportPath) throws RocksDBException {
return new ExportImportFilesMetaData(
diff --git a/java/src/main/java/org/rocksdb/ChecksumType.java b/java/src/main/java/org/rocksdb/ChecksumType.java
index 5b3d2249250f..556220f8baa5 100644
--- a/java/src/main/java/org/rocksdb/ChecksumType.java
+++ b/java/src/main/java/org/rocksdb/ChecksumType.java
@@ -14,18 +14,20 @@ public enum ChecksumType {
*/
kNoChecksum((byte) 0),
/**
- * CRC32 Checksum
+ * CRC32 Checksum.
*/
kCRC32c((byte) 1),
/**
- * XX Hash
+ * XX Hash.
*/
kxxHash((byte) 2),
/**
- * XX Hash 64
+ * XX Hash 64.
*/
kxxHash64((byte) 3),
-
+ /**
+ * XX Hash v3.
+ */
kXXH3((byte) 4);
/**
diff --git a/java/src/main/java/org/rocksdb/ClockCache.java b/java/src/main/java/org/rocksdb/ClockCache.java
index afbd7f75532c..452ef33f7b2c 100644
--- a/java/src/main/java/org/rocksdb/ClockCache.java
+++ b/java/src/main/java/org/rocksdb/ClockCache.java
@@ -14,6 +14,7 @@
* configuration parameter that is not provided by this API. This function
* simply returns a new LRUCache for functional compatibility.
*/
+@Deprecated
public class ClockCache extends Cache {
/**
* Create a new cache with a fixed size capacity.
@@ -22,6 +23,7 @@ public class ClockCache extends Cache {
*
* @param capacity The fixed size capacity of the cache
*/
+ @Deprecated
public ClockCache(final long capacity) {
super(newClockCache(capacity, -1, false));
}
@@ -39,6 +41,7 @@ public ClockCache(final long capacity) {
* @param numShardBits The cache is sharded to 2^numShardBits shards,
* by hash of the key
*/
+ @Deprecated
public ClockCache(final long capacity, final int numShardBits) {
super(newClockCache(capacity, numShardBits, false));
}
@@ -58,8 +61,9 @@ public ClockCache(final long capacity, final int numShardBits) {
* by hash of the key
* @param strictCapacityLimit insert to the cache will fail when cache is full
*/
- public ClockCache(final long capacity, final int numShardBits,
- final boolean strictCapacityLimit) {
+ @Deprecated
+ public ClockCache(
+ final long capacity, final int numShardBits, final boolean strictCapacityLimit) {
super(newClockCache(capacity, numShardBits, strictCapacityLimit));
}
diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
index 00bff0b07307..054d35adf23d 100644
--- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
+++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java
@@ -123,6 +123,11 @@ public int hashCode() {
}
}
+ /**
+ * Returns true if this is the handle for the default column family.
+ *
+ * @return true if this is the handle for the default column family, false otherwise.
+ */
protected boolean isDefaultColumnFamily() {
return nativeHandle_ == rocksDB_.getDefaultColumnFamily().nativeHandle_;
}
diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
index d25f8c73bc7b..aa7fe8f944ee 100644
--- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
+++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java
@@ -147,7 +147,7 @@ public ColumnFamilyOptions optimizeForSmallDb() {
@Override
public ColumnFamilyOptions optimizeForSmallDb(final Cache cache) {
- optimizeForSmallDb(nativeHandle_, cache.getNativeHandle());
+ optimizeForSmallDb(nativeHandle_, cache.nativeHandle_);
return this;
}
diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
index 4776773bd8bd..40c7c5806409 100644
--- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
+++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
@@ -8,6 +8,11 @@
import java.util.Collection;
import java.util.List;
+/**
+ * Interface for Column Family Options.
+ *
+ * @param
* This could be a new value or a deletion entry for that key so this field
* sums up all updated and deleted keys.
*
@@ -149,7 +154,7 @@ public long totalInputRawValueBytes() {
/**
* Get the number of deletion entries before compaction.
- *
+ *
* Deletion entries can disappear after compaction because they expired.
*
* @return the number of deletion entries before compaction.
@@ -182,7 +187,7 @@ public long numCorruptKeys() {
/**
* Get the Time spent on file's Append() call.
- *
+ *
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on file's Append() call.
@@ -193,7 +198,7 @@ public long fileWriteNanos() {
/**
* Get the Time spent on sync file range.
- *
+ *
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on sync file range.
@@ -204,7 +209,7 @@ public long fileRangeSyncNanos() {
/**
* Get the Time spent on file fsync.
- *
+ *
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on file fsync.
@@ -215,7 +220,7 @@ public long fileFsyncNanos() {
/**
* Get the Time spent on preparing file write (falocate, etc)
- *
+ *
* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
*
* @return the Time spent on preparing file write (falocate, etc).
diff --git a/java/src/main/java/org/rocksdb/CompactionOptions.java b/java/src/main/java/org/rocksdb/CompactionOptions.java
index 08cbdf6378b3..47ebfc8d0f28 100644
--- a/java/src/main/java/org/rocksdb/CompactionOptions.java
+++ b/java/src/main/java/org/rocksdb/CompactionOptions.java
@@ -13,14 +13,16 @@
* calls.
*/
public class CompactionOptions extends RocksObject {
-
+ /**
+ * Constructs a new CompactionOptions.
+ */
public CompactionOptions() {
super(newCompactionOptions());
}
/**
* Get the compaction output compression type.
- *
+ *
* See {@link #setCompression(CompressionType)}.
*
* @return the compression type.
@@ -32,9 +34,9 @@ public CompressionType compression() {
/**
* Set the compaction output compression type.
- *
+ *
* Default: snappy
- *
+ *
* If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION},
* RocksDB will choose compression type according to the
* {@link ColumnFamilyOptions#compressionType()}, taking into account
@@ -52,7 +54,7 @@ public CompactionOptions setCompression(final CompressionType compression) {
/**
* Get the compaction output file size limit.
- *
+ *
* See {@link #setOutputFileSizeLimit(long)}.
*
* @return the file size limit.
@@ -63,7 +65,7 @@ public long outputFileSizeLimit() {
/**
* Compaction will create files of size {@link #outputFileSizeLimit()}.
- *
+ *
* Default: 2^64-1, which means that compaction will create a single file
*
* @param outputFileSizeLimit the size limit
@@ -90,9 +92,9 @@ public int maxSubcompactions() {
* This value represents the maximum number of threads that will
* concurrently perform a compaction job by breaking it into multiple,
* smaller ones that are run simultaneously.
- *
+ *
* Default: 0 (i.e. no subcompactions)
- *
+ *
* If > 0, it will replace the option in
* {@link DBOptions#maxSubcompactions()} for this compaction.
*
diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
index 24ebe0da2ff1..7ea28695815d 100644
--- a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
+++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java
@@ -9,7 +9,9 @@
* Options for FIFO Compaction
*/
public class CompactionOptionsFIFO extends RocksObject {
-
+ /**
+ * Constructs a new CompactionOptionsFIFO.
+ */
public CompactionOptionsFIFO() {
super(newCompactionOptionsFIFO());
}
diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
index f18915b8f569..54013b071cfd 100644
--- a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
+++ b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java
@@ -9,7 +9,9 @@
* Options for Universal Compaction
*/
public class CompactionOptionsUniversal extends RocksObject {
-
+ /**
+ * Constructs a new CompactionOptionsUniversal.
+ */
public CompactionOptionsUniversal() {
super(newCompactionOptionsUniversal());
}
diff --git a/java/src/main/java/org/rocksdb/CompactionReason.java b/java/src/main/java/org/rocksdb/CompactionReason.java
index 46ec33f3f141..68828c3a9ef5 100644
--- a/java/src/main/java/org/rocksdb/CompactionReason.java
+++ b/java/src/main/java/org/rocksdb/CompactionReason.java
@@ -5,7 +5,13 @@
package org.rocksdb;
+/**
+ * Reasons for compaction.
+ */
public enum CompactionReason {
+ /**
+ * Unknown.
+ */
kUnknown((byte)0x0),
/**
diff --git a/java/src/main/java/org/rocksdb/CompactionStyle.java b/java/src/main/java/org/rocksdb/CompactionStyle.java
index 7b955a7a248c..6a1de336abfb 100644
--- a/java/src/main/java/org/rocksdb/CompactionStyle.java
+++ b/java/src/main/java/org/rocksdb/CompactionStyle.java
@@ -35,9 +35,24 @@
* FIFO Compaction
*/
public enum CompactionStyle {
+ /**
+ * Level Compaction.
+ */
LEVEL((byte) 0x0),
+
+ /**
+ * Universal Compaction.
+ */
UNIVERSAL((byte) 0x1),
+
+ /**
+ * First-in First-out Compaction.
+ */
FIFO((byte) 0x2),
+
+ /**
+ * No compaction.
+ */
NONE((byte) 0x3);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/ComparatorOptions.java b/java/src/main/java/org/rocksdb/ComparatorOptions.java
index da287b51816b..3247a85601a5 100644
--- a/java/src/main/java/org/rocksdb/ComparatorOptions.java
+++ b/java/src/main/java/org/rocksdb/ComparatorOptions.java
@@ -13,6 +13,9 @@
* instance becomes out-of-scope to release the allocated memory in C++.
*/
public class ComparatorOptions extends RocksObject {
+ /**
+ * Constructs a new ComparatorOptions.
+ */
public ComparatorOptions() {
super(newComparatorOptions());
}
diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java
index e6316af451e6..acc11863d02c 100644
--- a/java/src/main/java/org/rocksdb/CompressionOptions.java
+++ b/java/src/main/java/org/rocksdb/CompressionOptions.java
@@ -9,34 +9,93 @@
* Options for Compression
*/
public class CompressionOptions extends RocksObject {
+ /**
+ * RocksDB's generic default compression level. Internally it'll be translated
+ * to the default compression level specific to the library being used.
+ */
+ public static final int DEFAULT_COMPRESSION_LEVEL = 32_767;
+ /**
+ * Constructs a new CompressionOptions.
+ */
public CompressionOptions() {
super(newCompressionOptions());
}
+ /**
+ * Set the Window size.
+ * Zlib only.
+ *
+ * @param windowBits the size of the window.
+ *
+ * @return the reference to the current compression options.
+ */
public CompressionOptions setWindowBits(final int windowBits) {
setWindowBits(nativeHandle_, windowBits);
return this;
}
+ /**
+ * Get the Window size.
+ * Zlib only.
+ *
+ * @return the size of the window.
+ */
public int windowBits() {
return windowBits(nativeHandle_);
}
+ /**
+ * Compression "level" applicable to zstd, zlib, LZ4, and LZ4HC. Except for
+ * {@link #DEFAULT_COMPRESSION_LEVEL}, the meaning of each value depends
+ * on the compression algorithm. Decreasing across non-
+ * {@link #DEFAULT_COMPRESSION_LEVEL} values will either favor speed over
+ * compression ratio or have no effect.
+ *
+ * In LZ4 specifically, the absolute value of a negative `level` internally
+ * configures the `acceleration` parameter. For example, set `level=-10` for
+ * `acceleration=10`. This negation is necessary to ensure decreasing `level`
+ * values favor speed over compression ratio.
+ *
+ * @param level the compression level.
+ *
+ * @return the reference to the current compression options.
+ */
public CompressionOptions setLevel(final int level) {
setLevel(nativeHandle_, level);
return this;
}
+ /**
+ * Get the Compression "level".
+ *
+ * See {@link #setLevel(int)}
+ *
+ * @return the compression level.
+ */
public int level() {
return level(nativeHandle_);
}
+ /**
+ * Set the compression strategy.
+ * Zlib only.
+ *
+ * @param strategy the strategy.
+ *
+ * @return the reference to the current compression options.
+ */
public CompressionOptions setStrategy(final int strategy) {
setStrategy(nativeHandle_, strategy);
return this;
}
+ /**
+ * Get the compression strategy.
+ * Zlib only.
+ *
+ * @return the strategy.
+ */
public int strategy() {
return strategy(nativeHandle_);
}
diff --git a/java/src/main/java/org/rocksdb/CompressionType.java b/java/src/main/java/org/rocksdb/CompressionType.java
index d1ecf0ac84c5..4f683d036735 100644
--- a/java/src/main/java/org/rocksdb/CompressionType.java
+++ b/java/src/main/java/org/rocksdb/CompressionType.java
@@ -14,14 +14,49 @@
* compression method (if any) is used to compress a block.
* Features:
* - Throttle the deletion rate of the SST files.
* - Keep track the total size of all SST files.
@@ -167,7 +171,7 @@ public interface DBOptionsInterface
* Limitations:
* - Only track and throttle deletes of SST files in
* first db_path (db_name if db_paths is empty).
@@ -208,7 +212,7 @@ public interface DBOptionsInterface
* Default: 16
*
* @param maxFileOpeningThreads the maximum number of threads to use to
@@ -222,7 +226,7 @@ public interface DBOptionsInterface
* Default: 16
*
* @return the maximum number of threads to use to open files
@@ -278,27 +282,27 @@ public interface DBOptionsInterface
* For example, you have a flash device with 10GB allocated for the DB,
* as well as a hard drive of 2TB, you should config it to be:
* [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
- *
+ *
* The system will try to guarantee data under each path is close to but
* not larger than the target size. But current and future file sizes used
* by determining where to place a file are based on best-effort estimation,
* which means there is a chance that the actual size under the directory
* is slightly more than target size under some workloads. User should give
* some buffer room for those cases.
- *
+ *
* If none of the paths has sufficient room to place a file, the file will
* be placed to the last path anyway, despite to the target size.
- *
+ *
* Placing newer data to earlier paths is also best-efforts. User should
* expect user files to be placed in higher levels in some extreme cases.
- *
+ *
* If left empty, only one path will be used, which is db_name passed when
* opening the DB.
- *
+ *
* Default: empty
*
* @param dbPaths the paths and target sizes
@@ -311,27 +315,27 @@ public interface DBOptionsInterface
* For example, you have a flash device with 10GB allocated for the DB,
* as well as a hard drive of 2TB, you should config it to be:
* [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
- *
+ *
* The system will try to guarantee data under each path is close to but
* not larger than the target size. But current and future file sizes used
* by determining where to place a file are based on best-effort estimation,
* which means there is a chance that the actual size under the directory
* is slightly more than target size under some workloads. User should give
* some buffer room for those cases.
- *
+ *
* If none of the paths has sufficient room to place a file, the file will
* be placed to the last path anyway, despite to the target size.
- *
+ *
* Placing newer data to earlier paths is also best-efforts. User should
* expect user files to be placed in higher levels in some extreme cases.
- *
+ *
* If left empty, only one path will be used, which is db_name passed when
* opening the DB.
- *
+ *
* Default: {@link java.util.Collections#emptyList()}
*
* @return dbPaths the paths and target sizes
@@ -352,7 +356,7 @@ public interface DBOptionsInterface
* If it is empty, the log files will be in the same dir as data.
* If it is non empty, the log files will be in the specified dir,
* and the db data dir's absolute path will be used as the log file
@@ -377,7 +381,7 @@ public interface DBOptionsInterface
* If it is empty, the log files will be in the same dir as data,
* dbname is used as the data dir by default
* If it is non empty, the log files will be in kept the specified dir.
@@ -439,7 +443,7 @@ public interface DBOptionsInterface
* Specifies the maximum number of concurrent background flush jobs.
* If you're increasing this, also consider increasing number of threads in
* HIGH priority thread pool. For more information, see
@@ -463,7 +467,7 @@ public interface DBOptionsInterface
* Returns the maximum number of concurrent background flush jobs.
* If you're increasing this, also consider increasing number of threads in
* HIGH priority thread pool. For more information, see
@@ -542,16 +546,16 @@ public interface DBOptionsInterface
* If non-zero, we will reuse previously written log files for new
* logs, overwriting the old data. The value indicates how many
* such files we will keep around at any point in time for later
* use.
- *
+ *
* This is more efficient because the blocks are already
* allocated and fdatasync does not need to update the inode after
* each write.
- *
+ *
* Default: 0
*
* @param recycleLogFileNum the number of log files to keep for recycling
@@ -562,16 +566,16 @@ public interface DBOptionsInterface
* If non-zero, we will reuse previously written log files for new
* logs, overwriting the old data. The value indicates how many
* such files we will keep around at any point in time for later
* use.
- *
+ *
* This is more efficient because the blocks are already
* allocated and fdatasync does not need to update the inode after
* each write.
- *
+ *
* Default: 0
*
* @return the number of log files kept for recycling
@@ -617,17 +621,17 @@ public interface DBOptionsInterface
* When both are zero, obsolete WALs will not be archived and will be deleted
* immediately. Otherwise, obsolete WALs will be archived prior to deletion.
- *
+ *
* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the
* earliest will be deleted until the total size of the archive falls below
* this limit. All empty WALs will be deleted.
- *
+ *
* When `WAL_ttl_seconds` is nonzero, archived WALs older than
* `WAL_ttl_seconds` will be deleted.
- *
+ *
* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived
* WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only
* `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten
@@ -643,17 +647,17 @@ public interface DBOptionsInterface
* When both are zero, obsolete WALs will not be archived and will be deleted
* immediately. Otherwise, obsolete WALs will be archived prior to deletion.
- *
+ *
* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the
* earliest will be deleted until the total size of the archive falls below
* this limit. All empty WALs will be deleted.
- *
+ *
* When `WAL_ttl_seconds` is nonzero, archived WALs older than
* `WAL_ttl_seconds` will be deleted.
- *
+ *
* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived
* WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only
* `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten
@@ -668,17 +672,17 @@ public interface DBOptionsInterface
* When both are zero, obsolete WALs will not be archived and will be deleted
* immediately. Otherwise, obsolete WALs will be archived prior to deletion.
- *
+ *
* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the
* earliest will be deleted until the total size of the archive falls below
* this limit. All empty WALs will be deleted.
- *
+ *
* When `WAL_ttl_seconds` is nonzero, archived WALs older than
* `WAL_ttl_seconds` will be deleted.
- *
+ *
* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived
* WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only
* `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten
@@ -694,17 +698,17 @@ public interface DBOptionsInterface
* When both are zero, obsolete WALs will not be archived and will be deleted
* immediately. Otherwise, obsolete WALs will be archived prior to deletion.
- *
+ *
* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the
* earliest will be deleted until the total size of the archive falls below
* this limit. All empty WALs will be deleted.
- *
+ *
* When `WAL_ttl_seconds` is nonzero, archived WALs older than
* `WAL_ttl_seconds` will be deleted.
- *
+ *
* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived
* WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only
* `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten
@@ -720,7 +724,7 @@ public interface DBOptionsInterface
* Default: 1 MB
*
* @param maxWriteBatchGroupSizeBytes the maximum limit of number of bytes, see description.
@@ -732,7 +736,7 @@ public interface DBOptionsInterface
* Default: 1 MB
*
* @return the maximum limit of number of bytes, see description.
@@ -885,13 +889,13 @@ public interface DBOptionsInterface
* This is distinct from {@link ColumnFamilyOptions#writeBufferSize()},
* which enforces a limit for a single memtable.
- *
+ *
* This feature is disabled by default. Specify a non-zero value
* to enable it.
- *
+ *
* Default: 0 (disabled)
*
* @param dbWriteBufferSize the size of the write buffer
@@ -903,7 +907,7 @@ public interface DBOptionsInterface
* Check
* https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager
* for more details on when to use it
@@ -925,13 +929,13 @@ public interface DBOptionsInterface
* This is distinct from {@link ColumnFamilyOptions#writeBufferSize()},
* which enforces a limit for a single memtable.
- *
+ *
* This feature is disabled by default. Specify a non-zero value
* to enable it.
- *
+ *
* Default: 0 (disabled)
*
* @return the size of the write buffer
@@ -964,7 +968,7 @@ public interface DBOptionsInterface
* Note: the RocksJava API currently only supports EventListeners implemented in Java.
* It could be extended in future to also support adding/removing EventListeners implemented in
* C++.
@@ -978,7 +982,7 @@ public interface DBOptionsInterface
* Note: the RocksJava API currently only supports EventListeners implemented in Java.
* It could be extended in future to also support adding/removing EventListeners implemented in
* C++.
@@ -990,7 +994,7 @@ public interface DBOptionsInterface
* Default: false
*
* @param enableThreadTracking true to enable tracking
@@ -1002,7 +1006,7 @@ public interface DBOptionsInterface
* Default: false
*
* @return true if tracking is enabled
@@ -1013,7 +1017,7 @@ public interface DBOptionsInterface
* If {@link #enablePipelinedWrite()} is true, separate write thread queue is
* maintained for WAL write and memtable write. A write thread first enter WAL
* writer queue and then memtable writer queue. Pending thread on the WAL
@@ -1021,7 +1025,7 @@ public interface DBOptionsInterface
* Default: false
*
* @param enablePipelinedWrite true to enabled pipelined writes
@@ -1048,7 +1052,7 @@ public interface DBOptionsInterface
* By default, i.e., when it is false, rocksdb does not advance the sequence
* number for new snapshots unless all the writes with lower sequence numbers
* are already finished. This provides the immutability that we except from
@@ -1193,7 +1197,7 @@ T setEnableWriteThreadAdaptiveYield(
* compaction decision by loading table properties from many files.
* Turning off this feature will improve DBOpen time especially in
* disk environment.
- *
+ *
* Default: false
*
* @param skipStatsUpdateOnDbOpen true if updating stats will be skipped
@@ -1207,7 +1211,7 @@ T setEnableWriteThreadAdaptiveYield(
* compaction decision by loading table properties from many files.
* Turning off this feature will improve DBOpen time especially in
* disk environment.
- *
+ *
* Default: false
*
* @return true if updating stats will be skipped
@@ -1221,7 +1225,7 @@ T setEnableWriteThreadAdaptiveYield(
* We'll still check that all required sst files exist.
* If {@code paranoid_checks} is false, this option is ignored, and sst files are
* not checked at all.
- *
+ *
* Default: false
*
* @param skipCheckingSstFileSizesOnDbOpen if true, then SST file sizes will not be checked
@@ -1237,7 +1241,7 @@ T setEnableWriteThreadAdaptiveYield(
* We'll still check that all required sst files exist.
* If {@code paranoid_checks} is false, this option is ignored, and sst files are
* not checked at all.
- *
+ *
* Default: false
*
* @return true, if file sizes will not be checked when calling {@link RocksDB#open(String)}.
@@ -1246,7 +1250,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* Recovery mode to control the consistency while replaying WAL
- *
+ *
* Default: {@link WALRecoveryMode#PointInTimeRecovery}
*
* @param walRecoveryMode The WAL recover mode
@@ -1257,7 +1261,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* Recovery mode to control the consistency while replaying WAL
- *
+ *
* Default: {@link WALRecoveryMode#PointInTimeRecovery}
*
* @return The WAL recover mode
@@ -1267,7 +1271,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* if set to false then recovery will fail when a prepared
* transaction is encountered in the WAL
- *
+ *
* Default: false
*
* @param allow2pc true if two-phase-commit is enabled
@@ -1279,7 +1283,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* if set to false then recovery will fail when a prepared
* transaction is encountered in the WAL
- *
+ *
* Default: false
*
* @return true if two-phase-commit is enabled
@@ -1288,7 +1292,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* A global cache for table-level rows.
- *
+ *
* Default: null (disabled)
*
* @param rowCache The global row cache
@@ -1299,7 +1303,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* A global cache for table-level rows.
- *
+ *
* Default: null (disabled)
*
* @return The global row cache
@@ -1331,7 +1335,7 @@ T setEnableWriteThreadAdaptiveYield(
* If true, then DB::Open / CreateColumnFamily / DropColumnFamily
* / SetOptions will fail if options file is not detected or properly
* persisted.
- *
+ *
* DEFAULT: false
*
* @param failIfOptionsFileError true if we should fail if there is an error
@@ -1345,7 +1349,7 @@ T setEnableWriteThreadAdaptiveYield(
* If true, then DB::Open / CreateColumnFamily / DropColumnFamily
* / SetOptions will fail if options file is not detected or properly
* persisted.
- *
+ *
* DEFAULT: false
*
* @return true if we should fail if there is an error in the options file
@@ -1355,7 +1359,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* If true, then print malloc stats together with rocksdb.stats
* when printing to LOG.
- *
+ *
* DEFAULT: false
*
* @param dumpMallocStats true if malloc stats should be printed to LOG
@@ -1367,7 +1371,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* If true, then print malloc stats together with rocksdb.stats
* when printing to LOG.
- *
+ *
* DEFAULT: false
*
* @return true if malloc stats should be printed to LOG
@@ -1380,7 +1384,7 @@ T setEnableWriteThreadAdaptiveYield(
* to avoid (but not guarantee not to) flush during recovery. Also, existing
* WAL logs will be kept, so that if crash happened before flush, we still
* have logs to recover from.
- *
+ *
* DEFAULT: false
*
* @param avoidFlushDuringRecovery true to try to avoid (but not guarantee
@@ -1396,7 +1400,7 @@ T setEnableWriteThreadAdaptiveYield(
* to avoid (but not guarantee not to) flush during recovery. Also, existing
* WAL logs will be kept, so that if crash happened before flush, we still
* have logs to recover from.
- *
+ *
* DEFAULT: false
*
* @return true to try to avoid (but not guarantee not to) flush during
@@ -1412,7 +1416,7 @@ T setEnableWriteThreadAdaptiveYield(
* 1) Disable some internal optimizations around SST file compression
* 2) Reserve bottom-most level for ingested files only.
* 3) Note that num_levels should be >= 3 if this option is turned on.
- *
+ *
* DEFAULT: false
*
* @param allowIngestBehind true to allow ingest behind, false to disallow.
@@ -1435,7 +1439,7 @@ T setEnableWriteThreadAdaptiveYield(
* allows the memtable writes not to lag behind other writes. It can be used
* to optimize MySQL 2PC in which only the commits, which are serial, write to
* memtable.
- *
+ *
* DEFAULT: false
*
* @param twoWriteQueues true to enable two write queues, false otherwise.
@@ -1455,7 +1459,7 @@ T setEnableWriteThreadAdaptiveYield(
* If true WAL is not flushed automatically after each write. Instead it
* relies on manual invocation of FlushWAL to write the WAL buffer to its
* file.
- *
+ *
* DEFAULT: false
*
* @param manualWalFlush true to set disable automatic WAL flushing,
@@ -1483,7 +1487,7 @@ T setEnableWriteThreadAdaptiveYield(
* For manual flush, application has to specify which column families to
* flush atomically in {@link RocksDB#flush(FlushOptions, List)}.
* For auto-triggered flush, RocksDB atomically flushes ALL column families.
- *
+ *
* Currently, any WAL-enabled writes after atomic flush may be replayed
* independently if the process crashes later and tries to recover.
*
@@ -1495,7 +1499,7 @@ T setEnableWriteThreadAdaptiveYield(
/**
* Determine if atomic flush of multiple column families is enabled.
- *
+ *
* See {@link #setAtomicFlush(boolean)}.
*
* @return true if atomic flush is enabled.
@@ -1596,7 +1600,7 @@ T setEnableWriteThreadAdaptiveYield(
* The number of bytes to prefetch when reading the log. This is mostly useful
* for reading a remotely located log, as it can save the number of
* round-trips. If 0, then the prefetching is disabled.
- *
+ *
* Default: 0
*
* @param logReadaheadSize the number of bytes to prefetch when reading the log.
@@ -1608,7 +1612,7 @@ T setEnableWriteThreadAdaptiveYield(
* The number of bytes to prefetch when reading the log. This is mostly useful
* for reading a remotely located log, as it can save the number of
* round-trips. If 0, then the prefetching is disabled.
- *
+ *
* Default: 0
*
* @return the number of bytes to prefetch when reading the log.
@@ -1651,7 +1655,7 @@ T setEnableWriteThreadAdaptiveYield(
* can be auto-recovered (e.g., retryable IO Error during Flush or WAL write),
* then db resume is called in background to recover from the error. If this
* value is 0 or negative, db resume will not be called.
- *
+ *
* Default: INT_MAX
*
* @param maxBgerrorResumeCount maximum number of times db resume should be called when IO Error
@@ -1667,7 +1671,7 @@ T setEnableWriteThreadAdaptiveYield(
* can be auto-recovered (e.g., retryable IO Error during Flush or WAL write),
* then db resume is called in background to recover from the error. If this
* value is 0 or negative, db resume will not be called.
- *
+ *
* Default: INT_MAX
*
* @return maximum number of times db resume should be called when IO Error happens.
@@ -1678,7 +1682,7 @@ T setEnableWriteThreadAdaptiveYield(
* If max_bgerror_resume_count is ≥ 2, db resume is called multiple times.
* This option decides how long to wait to retry the next resume if the
* previous resume fails and satisfy redo resume conditions.
- *
+ *
* Default: 1000000 (microseconds).
*
* @param bgerrorResumeRetryInterval how many microseconds to wait between DB resume attempts.
@@ -1690,7 +1694,7 @@ T setEnableWriteThreadAdaptiveYield(
* If max_bgerror_resume_count is ≥ 2, db resume is called multiple times.
* This option decides how long to wait to retry the next resume if the
* previous resume fails and satisfy redo resume conditions.
- *
+ *
* Default: 1000000 (microseconds).
*
* @return the instance of the current object.
@@ -1714,7 +1718,9 @@ T setEnableWriteThreadAdaptiveYield(
* use "0:00-23:59". To make an entire day have no offpeak period, leave
* this field blank. Default: Empty string (no offpeak).
*
- * @param offpeakTimeUTC String value from which to parse offpeak time range
+ * @param offpeakTimeUTC String value from which to parse offpeak time range.
+ *
+ * @return the instance of the current object.
*/
T setDailyOffpeakTimeUTC(final String offpeakTimeUTC);
diff --git a/java/src/main/java/org/rocksdb/DbPath.java b/java/src/main/java/org/rocksdb/DbPath.java
index 3f0b67557c5e..3895b258556e 100644
--- a/java/src/main/java/org/rocksdb/DbPath.java
+++ b/java/src/main/java/org/rocksdb/DbPath.java
@@ -14,6 +14,12 @@ public class DbPath {
final Path path;
final long targetSize;
+ /**
+ * Constructs a DbPath.
+ *
+ * @param path the path.
+ * @param targetSize the target size.
+ */
public DbPath(final Path path, final long targetSize) {
this.path = path;
this.targetSize = targetSize;
diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java
index 88ec29e3bd65..be7b2cd9e546 100644
--- a/java/src/main/java/org/rocksdb/DirectSlice.java
+++ b/java/src/main/java/org/rocksdb/DirectSlice.java
@@ -16,6 +16,9 @@
* values consider using @see org.rocksdb.Slice
*/
public class DirectSlice extends AbstractSlice
* Note that this function should be called only after all
* RocksDB instances referencing the filter are closed.
* Otherwise an undefined behavior will occur.
diff --git a/java/src/main/java/org/rocksdb/FilterPolicyType.java b/java/src/main/java/org/rocksdb/FilterPolicyType.java
index 6a693ee4039d..c7051ac07be6 100644
--- a/java/src/main/java/org/rocksdb/FilterPolicyType.java
+++ b/java/src/main/java/org/rocksdb/FilterPolicyType.java
@@ -9,6 +9,9 @@
* IndexType used in conjunction with BlockBasedTable.
*/
public enum FilterPolicyType {
+ /**
+ * Unknown filter policy.
+ */
kUnknownFilterPolicy((byte) 0),
/**
@@ -25,7 +28,7 @@ public enum FilterPolicyType {
*/
kRibbonFilterPolicy((byte) 2);
- public Filter createFilter(final long handle, final double param) {
+ Filter createFilter(final long handle, final double param) {
if (this == kBloomFilterPolicy) {
return new BloomFilter(handle, param);
}
diff --git a/java/src/main/java/org/rocksdb/FlushJobInfo.java b/java/src/main/java/org/rocksdb/FlushJobInfo.java
index 414d3a2f332e..52af3afe1795 100644
--- a/java/src/main/java/org/rocksdb/FlushJobInfo.java
+++ b/java/src/main/java/org/rocksdb/FlushJobInfo.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Information about a flush job.
+ */
public class FlushJobInfo {
private final long columnFamilyId;
private final String columnFamilyName;
diff --git a/java/src/main/java/org/rocksdb/FlushReason.java b/java/src/main/java/org/rocksdb/FlushReason.java
index 21abbb352134..177e5aa3656c 100644
--- a/java/src/main/java/org/rocksdb/FlushReason.java
+++ b/java/src/main/java/org/rocksdb/FlushReason.java
@@ -5,21 +5,83 @@
package org.rocksdb;
+/**
+ * Reasons for a flush.
+ */
public enum FlushReason {
+ /**
+ * Other.
+ */
OTHERS((byte) 0x00),
+
+ /**
+ * Get live files.
+ */
GET_LIVE_FILES((byte) 0x01),
+
+ /**
+ * Shutdown.
+ */
SHUTDOWN((byte) 0x02),
+
+ /**
+ * External file ingestion.
+ */
EXTERNAL_FILE_INGESTION((byte) 0x03),
+
+ /**
+ * Manual compaction.
+ */
MANUAL_COMPACTION((byte) 0x04),
+
+ /**
+ * Write buffer manager.
+ */
WRITE_BUFFER_MANAGER((byte) 0x05),
+
+ /**
+ * Write buffer full.
+ */
WRITE_BUFFER_FULL((byte) 0x06),
+
+ /**
+ * Test.
+ */
TEST((byte) 0x07),
+
+ /**
+ * Delete file(s).
+ */
DELETE_FILES((byte) 0x08),
+
+ /**
+ * Automatic compaction.
+ */
AUTO_COMPACTION((byte) 0x09),
+
+ /**
+ * Manual flush.
+ */
MANUAL_FLUSH((byte) 0x0a),
+
+ /**
+ * Error recovery.
+ */
ERROR_RECOVERY((byte) 0x0b),
+
+ /**
+ * Error recovery retry flush.
+ */
ERROR_RECOVERY_RETRY_FLUSH((byte) 0x0c),
+
+ /**
+ * Write Ahead Log full.
+ */
WAL_FULL((byte) 0x0d),
+
+ /**
+ * Catch up after error recovery.
+ */
CATCH_UP_AFTER_ERROR_RECOVERY((byte) 0x0e);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/GetStatus.java b/java/src/main/java/org/rocksdb/GetStatus.java
index a2afafe39ebd..0c9fbd27f8c8 100644
--- a/java/src/main/java/org/rocksdb/GetStatus.java
+++ b/java/src/main/java/org/rocksdb/GetStatus.java
@@ -12,7 +12,14 @@
* If the target of the fetch is not big enough, this may be bigger than the contents of the target.
*/
public class GetStatus {
+ /**
+ * The status of the request to fetch into the buffer.
+ */
public final Status status;
+
+ /**
+ * The size of the data, which may be bigger than the buffer.
+ */
public final int requiredSize;
/**
diff --git a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
index cc18b61d2260..9d490a017853 100644
--- a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
+++ b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
@@ -15,22 +15,40 @@
* and post a warning in the LOG.
*/
public class HashLinkedListMemTableConfig extends MemTableConfig {
+ /**
+ * The default number of buckets.
+ */
public static final long DEFAULT_BUCKET_COUNT = 50_000;
+
+ /**
+ * The default size of huge TLB pages.
+ */
public static final long DEFAULT_HUGE_PAGE_TLB_SIZE = 0;
+
+ /**
+ * The default log threshold for bucket entries.
+ */
public static final int DEFAULT_BUCKET_ENTRIES_LOG_THRES = 4096;
- public static final boolean
- DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true;
- public static final int DEFAUL_THRESHOLD_USE_SKIPLIST = 256;
/**
- * HashLinkedListMemTableConfig constructor
+ * The default of whether to log when a bucket is flushed.
+ */
+ public static final boolean DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true;
+
+ /**
+ * The default threshold for determining when to use a Skip List.
+ */
+ public static final int DEFAULT_THRESHOLD_USE_SKIPLIST = 256;
+
+ /**
+ * Constructs a HashLinkedListMemTableConfig.
*/
public HashLinkedListMemTableConfig() {
bucketCount_ = DEFAULT_BUCKET_COUNT;
hugePageTlbSize_ = DEFAULT_HUGE_PAGE_TLB_SIZE;
bucketEntriesLoggingThreshold_ = DEFAULT_BUCKET_ENTRIES_LOG_THRES;
ifLogBucketDistWhenFlush_ = DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH;
- thresholdUseSkiplist_ = DEFAUL_THRESHOLD_USE_SKIPLIST;
+ thresholdUseSkiplist_ = DEFAULT_THRESHOLD_USE_SKIPLIST;
}
/**
diff --git a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
index 33991f90f729..8197a8879b2d 100644
--- a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
+++ b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
@@ -15,12 +15,23 @@
* and post a warning in the LOG.
*/
public class HashSkipListMemTableConfig extends MemTableConfig {
+ /**
+ * The default number of buckets.
+ */
public static final int DEFAULT_BUCKET_COUNT = 1_000_000;
+
+ /**
+ * The default branching factor.
+ */
public static final int DEFAULT_BRANCHING_FACTOR = 4;
+
+ /**
+ * The default skip list height.
+ */
public static final int DEFAULT_HEIGHT = 4;
/**
- * HashSkipListMemTableConfig constructor
+ * Constructs a HashSkipListMemTableConfig.
*/
public HashSkipListMemTableConfig() {
bucketCount_ = DEFAULT_BUCKET_COUNT;
diff --git a/java/src/main/java/org/rocksdb/HistogramData.java b/java/src/main/java/org/rocksdb/HistogramData.java
index 81d890883487..1fdd0c26e9a7 100644
--- a/java/src/main/java/org/rocksdb/HistogramData.java
+++ b/java/src/main/java/org/rocksdb/HistogramData.java
@@ -5,6 +5,9 @@
package org.rocksdb;
+/**
+ * Histogram Data.
+ */
public class HistogramData {
private final double median_;
private final double percentile95_;
@@ -16,12 +19,34 @@ public class HistogramData {
private final long sum_;
private final double min_;
+ /**
+ * Constructs a HistogramData.
+ *
+ * @param median the median value.
+ * @param percentile95 the 95th percentile value.
+ * @param percentile99 the 99th percentile value.
+ * @param average the average value.
+ * @param standardDeviation the value of the standard deviation.
+ */
public HistogramData(final double median, final double percentile95,
final double percentile99, final double average,
final double standardDeviation) {
this(median, percentile95, percentile99, average, standardDeviation, 0.0, 0, 0, 0.0);
}
+ /**
+ * Constructs a HistogramData.
+ *
+ * @param median the median value.
+ * @param percentile95 the 95th percentile value.
+ * @param percentile99 the 99th percentile value.
+ * @param average the average value.
+ * @param standardDeviation the value of the standard deviation.
+ * @param max the maximum value.
+ * @param count the number of values.
+ * @param sum the sum of the values.
+ * @param min the minimum value.
+ */
public HistogramData(final double median, final double percentile95,
final double percentile99, final double average,
final double standardDeviation, final double max, final long count,
@@ -37,38 +62,83 @@ public HistogramData(final double median, final double percentile95,
sum_ = sum;
}
+ /**
+ * Get the median value.
+ *
+ * @return the median value.
+ */
public double getMedian() {
return median_;
}
+ /**
+ * Get the 95th percentile value.
+ *
+ * @return the 95th percentile value.
+ */
public double getPercentile95() {
return percentile95_;
}
+ /**
+ * Get the 99th percentile value.
+ *
+ * @return the 99th percentile value.
+ */
public double getPercentile99() {
return percentile99_;
}
+ /**
+ * Get the average value.
+ *
+ * @return the average value.
+ */
public double getAverage() {
return average_;
}
+ /**
+ * Get the value of the standard deviation.
+ *
+ * @return the value of the standard deviation.
+ */
public double getStandardDeviation() {
return standardDeviation_;
}
+ /**
+ * Get the maximum value.
+ *
+ * @return the maximum value.
+ */
public double getMax() {
return max_;
}
+ /**
+ * Get the number of values.
+ *
+ * @return the number of values.
+ */
public long getCount() {
return count_;
}
+ /**
+ * Get the sum of the values.
+ *
+ * @return the sum of the values.
+ */
public long getSum() {
return sum_;
}
+ /**
+ * Get the minimum value.
+ *
+ * @return the minimum value.
+ */
public double getMin() {
return min_;
}
diff --git a/java/src/main/java/org/rocksdb/HistogramType.java b/java/src/main/java/org/rocksdb/HistogramType.java
index b4a56cc07e0d..ea9523e0bd93 100644
--- a/java/src/main/java/org/rocksdb/HistogramType.java
+++ b/java/src/main/java/org/rocksdb/HistogramType.java
@@ -5,69 +5,179 @@
package org.rocksdb;
+/**
+ * The types of histogram.
+ */
public enum HistogramType {
-
+ /**
+ * DB Get.
+ */
DB_GET((byte) 0x0),
+ /**
+ * DB Write.
+ */
DB_WRITE((byte) 0x1),
+ /**
+ * Time spent in compaction.
+ */
COMPACTION_TIME((byte) 0x2),
+ /**
+ * CPU time spent in compaction.
+ */
COMPACTION_CPU_TIME((byte) 0x3),
+ /**
+ * Time spent in setting up sub-compaction.
+ */
SUBCOMPACTION_SETUP_TIME((byte) 0x4),
+ /**
+ * Time spent in IO during table sync.
+ * Measured in microseconds.
+ */
TABLE_SYNC_MICROS((byte) 0x5),
+ /**
+ * Time spent in IO during compaction of outfile.
+ * Measured in microseconds.
+ */
COMPACTION_OUTFILE_SYNC_MICROS((byte) 0x6),
+ /**
+ * Time spent in IO during WAL file sync.
+ * Measured in microseconds.
+ */
WAL_FILE_SYNC_MICROS((byte) 0x7),
+ /**
+ * Time spent in IO during manifest file sync.
+ * Measured in microseconds.
+ */
MANIFEST_FILE_SYNC_MICROS((byte) 0x8),
/**
- * TIME SPENT IN IO DURING TABLE OPEN.
+ * Time spent in IO during table open.
+ * Measured in microseconds.
*/
TABLE_OPEN_IO_MICROS((byte) 0x9),
+ /**
+ * DB Multi-Get.
+ */
DB_MULTIGET((byte) 0xA),
+ /**
+ * Time spent in block reads during compaction.
+ * Measured in microseconds.
+ */
READ_BLOCK_COMPACTION_MICROS((byte) 0xB),
+ /**
+ * Time spent in block reads.
+ * Measured in microseconds.
+ */
READ_BLOCK_GET_MICROS((byte) 0xC),
+ /**
+ * Time spent in raw block writes.
+ * Measured in microseconds.
+ */
WRITE_RAW_BLOCK_MICROS((byte) 0xD),
+ /**
+ * Number of files in a single compaction.
+ */
NUM_FILES_IN_SINGLE_COMPACTION((byte) 0xE),
+ /**
+ * DB Seek.
+ */
DB_SEEK((byte) 0xF),
+ /**
+ * Write stall.
+ */
WRITE_STALL((byte) 0x10),
+ /**
+ * Time spent in SST reads.
+ * Measured in microseconds.
+ */
SST_READ_MICROS((byte) 0x11),
+ /**
+ * File read during flush.
+ * Measured in microseconds.
+ */
FILE_READ_FLUSH_MICROS((byte) 0x12),
+ /**
+ * File read during compaction.
+ * Measured in microseconds.
+ */
FILE_READ_COMPACTION_MICROS((byte) 0x13),
+ /**
+ * File read during DB Open.
+ * Measured in microseconds.
+ */
FILE_READ_DB_OPEN_MICROS((byte) 0x14),
+ /**
+ * File read during DB Get.
+ * Measured in microseconds.
+ */
FILE_READ_GET_MICROS((byte) 0x15),
+ /**
+ * File read during DB Multi-Get.
+ * Measured in microseconds.
+ */
FILE_READ_MULTIGET_MICROS((byte) 0x16),
+ /**
+ * File read during DB Iterator.
+ * Measured in microseconds.
+ */
FILE_READ_DB_ITERATOR_MICROS((byte) 0x17),
+ /**
+ * File read during DB checksum validation.
+ * Measured in microseconds.
+ */
FILE_READ_VERIFY_DB_CHECKSUM_MICROS((byte) 0x18),
+ /**
+ * File read during file checksum validation.
+ * Measured in microseconds.
+ */
FILE_READ_VERIFY_FILE_CHECKSUMS_MICROS((byte) 0x19),
+ /**
+ * Time spent writing SST files.
+ * Measured in microseconds.
+ */
SST_WRITE_MICROS((byte) 0x1A),
+ /**
+ * Time spent in writing SST table (currently only block-based table) or blob file for flush.
+ * Measured in microseconds.
+ */
FILE_WRITE_FLUSH_MICROS((byte) 0x1B),
+ /**
+ * Time spent in writing SST table (currently only block-based table) for compaction.
+ * Measured in microseconds.
+ */
FILE_WRITE_COMPACTION_MICROS((byte) 0x1C),
+ /**
+ * Time spent in writing SST table (currently only block-based table) or blob file for db open.
+ * Measured in microseconds.
+ */
FILE_WRITE_DB_OPEN_MICROS((byte) 0x1D),
/**
@@ -79,13 +189,34 @@ public enum HistogramType {
* Value size distribution in each operation.
*/
BYTES_PER_READ((byte) 0x1F),
+
+ /**
+ * Bytes per write.
+ * Value size distribution in each operation.
+ */
BYTES_PER_WRITE((byte) 0x20),
+
+ /**
+ * Bytes per Multi-Get.
+ * Value size distribution in each operation.
+ */
BYTES_PER_MULTIGET((byte) 0x21),
+ /**
+ * Time spent in compression.
+ * Measured in nanoseconds.
+ */
COMPRESSION_TIMES_NANOS((byte) 0x22),
+ /**
+ * Time spent in decompression.
+ * Measured in nanoseconds.
+ */
DECOMPRESSION_TIMES_NANOS((byte) 0x23),
+ /**
+ * Number of merge operands for read.
+ */
READ_NUM_MERGE_OPERANDS((byte) 0x24),
/**
@@ -100,56 +231,67 @@ public enum HistogramType {
/**
* BlobDB Put/PutWithTTL/PutUntil/Write latency.
+ * Measured in microseconds.
*/
BLOB_DB_WRITE_MICROS((byte) 0x27),
/**
* BlobDB Get lagency.
+ * Measured in microseconds.
*/
BLOB_DB_GET_MICROS((byte) 0x28),
/**
* BlobDB MultiGet latency.
+ * Measured in microseconds.
*/
BLOB_DB_MULTIGET_MICROS((byte) 0x29),
/**
* BlobDB Seek/SeekToFirst/SeekToLast/SeekForPrev latency.
+ * Measured in microseconds.
*/
BLOB_DB_SEEK_MICROS((byte) 0x2A),
/**
* BlobDB Next latency.
+ * Measured in microseconds.
*/
BLOB_DB_NEXT_MICROS((byte) 0x2B),
/**
* BlobDB Prev latency.
+ * Measured in microseconds.
*/
BLOB_DB_PREV_MICROS((byte) 0x2C),
/**
* Blob file write latency.
+ * Measured in microseconds.
*/
BLOB_DB_BLOB_FILE_WRITE_MICROS((byte) 0x2D),
/**
* Blob file read latency.
+ * Measured in microseconds.
*/
BLOB_DB_BLOB_FILE_READ_MICROS((byte) 0x2E),
/**
* Blob file sync latency.
+ * Measured in microseconds.
*/
BLOB_DB_BLOB_FILE_SYNC_MICROS((byte) 0x2F),
/**
* BlobDB compression time.
+ * Measured in microseconds.
*/
BLOB_DB_COMPRESSION_MICROS((byte) 0x30),
/**
* BlobDB decompression time.
+ * Measured in microseconds.
*/
BLOB_DB_DECOMPRESSION_MICROS((byte) 0x31),
@@ -159,18 +301,17 @@ public enum HistogramType {
FLUSH_TIME((byte) 0x32),
/**
- * Number of MultiGet batch keys overlapping a file
+ * Number of MultiGet batch keys overlapping a file.
*/
SST_BATCH_SIZE((byte) 0x33),
/**
- * Size of a single IO batch issued by MultiGet
+ * Size of a single IO batch issued by MultiGet.
*/
MULTIGET_IO_BATCH_SIZE((byte) 0x34),
/**
- * Num of Index and Filter blocks read from file system per level in MultiGet
- * request
+ * Num of Index and Filter blocks read from file system per level in MultiGet request.
*/
NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL((byte) 0x35),
@@ -185,12 +326,19 @@ public enum HistogramType {
NUM_LEVEL_READ_PER_MULTIGET((byte) 0x37),
/**
- * The number of retry in auto resume
+ * The number of retry in auto resume.
*/
ERROR_HANDLER_AUTORESUME_RETRY_COUNT((byte) 0x38),
+ /**
+ * Bytes read asynchronously.
+ */
ASYNC_READ_BYTES((byte) 0x39),
+ /**
+ * Wait time for polling.
+ * Measured in microseconds.
+ */
POLL_WAIT_MICROS((byte) 0x3A),
/**
@@ -199,17 +347,20 @@ public enum HistogramType {
PREFETCHED_BYTES_DISCARDED((byte) 0x3B),
/**
- * Wait time for aborting async read in FilePrefetchBuffer destructor
+ * Wait time for aborting async read in FilePrefetchBuffer destructor.
+ * Measured in microseconds.
*/
ASYNC_PREFETCH_ABORT_MICROS((byte) 0x3C),
/**
- * Number of bytes read for RocksDB's prefetching contents
- * (as opposed to file system's prefetch)
- * from the end of SST table during block based table open
+ * Number of bytes read for RocksDB's prefetching contents (as opposed to file system's prefetch)
+ * from the end of SST table during block based table open.
*/
TABLE_OPEN_PREFETCH_TAIL_READ_BYTES((byte) 0x3D),
+ /**
+ * Bytes prefetched during compaction.
+ */
COMPACTION_PREFETCH_BYTES((byte) 0x3F),
/**
@@ -217,12 +368,13 @@ public enum HistogramType {
*/
/**
- * Time spent in Iterator::Prepare() for multi-scan (microseconds)
+ * Time spent in Iterator::Prepare() for multi-scan (microseconds).
+ * Measured in microseconds.
*/
MULTISCAN_PREPARE_MICROS((byte) 0x40),
/**
- * Number of blocks per multi-scan Prepare() call
+ * Number of blocks per multi-scan Prepare() call.
*/
MULTISCAN_BLOCKS_PER_PREPARE((byte) 0x41),
diff --git a/java/src/main/java/org/rocksdb/Holder.java b/java/src/main/java/org/rocksdb/Holder.java
index 716a0bda0736..dd088dcd767e 100644
--- a/java/src/main/java/org/rocksdb/Holder.java
+++ b/java/src/main/java/org/rocksdb/Holder.java
@@ -7,6 +7,8 @@
/**
* Simple instance reference wrapper.
+ *
+ * @param
* The index contains a key separating each pair of consecutive blocks.
* Let A be the highest key in one block, B the lowest key in the next block,
* and I the index entry separating these two blocks:
@@ -22,7 +22,7 @@
* However, if I=A, this can't happen, and we'll read only the second block.
* In kNoShortening mode, we use I=A. In other modes, we use the shortest
* key in [A, B), which usually significantly reduces index size.
- *
+ *
* There's a similar story for the last index entry, which is an upper bound
* of the highest key in the file. If it's shortened and therefore
* overestimated, iterator is likely to unnecessarily read the last data block
diff --git a/java/src/main/java/org/rocksdb/InfoLogLevel.java b/java/src/main/java/org/rocksdb/InfoLogLevel.java
index 197bd89dab68..c5fda9acd7c3 100644
--- a/java/src/main/java/org/rocksdb/InfoLogLevel.java
+++ b/java/src/main/java/org/rocksdb/InfoLogLevel.java
@@ -5,12 +5,39 @@
* RocksDB log levels.
*/
public enum InfoLogLevel {
+ /**
+ * Log 'debug' level events.
+ */
DEBUG_LEVEL((byte)0),
+
+ /**
+ * Log 'info' level events.
+ */
INFO_LEVEL((byte)1),
+
+ /**
+ * Log 'warn' level events.
+ */
WARN_LEVEL((byte)2),
+
+ /**
+ * Log 'error' level events.
+ */
ERROR_LEVEL((byte)3),
+
+ /**
+ * Log 'fatal' level events.
+ */
FATAL_LEVEL((byte)4),
+
+ /**
+ * Log 'header' level events.
+ */
HEADER_LEVEL((byte)5),
+
+ /**
+ * The number of log levels available.
+ */
NUM_INFO_LOG_LEVELS((byte)6);
private final byte value_;
diff --git a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
index aed28131a17e..c9419f46ac41 100644
--- a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
+++ b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java
@@ -11,12 +11,16 @@
* {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}.
*/
public class IngestExternalFileOptions extends RocksObject {
-
+ /**
+ * Constructs an IngestExternalFileOptions.
+ */
public IngestExternalFileOptions() {
super(newIngestExternalFileOptions());
}
/**
+ * Constructs an IngestExternalFileOptions.
+ *
* @param moveFiles {@link #setMoveFiles(boolean)}
* @param snapshotConsistency {@link #setSnapshotConsistency(boolean)}
* @param allowGlobalSeqNo {@link #setAllowGlobalSeqNo(boolean)}
diff --git a/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java
index 6149b85292aa..60317d264fd7 100644
--- a/java/src/main/java/org/rocksdb/KeyMayExist.java
+++ b/java/src/main/java/org/rocksdb/KeyMayExist.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Indicates whether a key exists or not, and its corresponding value's length.
+ */
public class KeyMayExist {
@Override
public boolean equals(final Object o) {
@@ -23,13 +26,45 @@ public int hashCode() {
return Objects.hash(exists, valueLength);
}
- public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue }
+ /**
+ * Part of the return type from {@link RocksDB#keyMayExist(ColumnFamilyHandle,
+ * java.nio.ByteBuffer, java.nio.ByteBuffer)}.
+ */
+ public enum KeyMayExistEnum {
+ /**
+ * Key does not exist.
+ */
+ kNotExist,
- public KeyMayExist(final KeyMayExistEnum exists, final int valueLength) {
+ /**
+ * Key may exist without a value.
+ */
+ kExistsWithoutValue,
+
+ /**
+ * Key may exist with a value.
+ */
+ kExistsWithValue
+ }
+
+ /**
+ * Constructs a KeyMayExist.
+ *
+ * @param exists indicates if the key exists.
+ * @param valueLength the length of the value pointed to by the key (if it exists).
+ */
+ KeyMayExist(final KeyMayExistEnum exists, final int valueLength) {
this.exists = exists;
this.valueLength = valueLength;
}
+ /**
+ * Indicates if the key exists.
+ */
public final KeyMayExistEnum exists;
+
+ /**
+ * The length of the value pointed to by the key (if it exists).
+ */
public final int valueLength;
}
diff --git a/java/src/main/java/org/rocksdb/LiveFileMetaData.java b/java/src/main/java/org/rocksdb/LiveFileMetaData.java
index 5242496a315b..a15a5737489c 100644
--- a/java/src/main/java/org/rocksdb/LiveFileMetaData.java
+++ b/java/src/main/java/org/rocksdb/LiveFileMetaData.java
@@ -46,7 +46,8 @@ public int level() {
return level;
}
- public long newLiveFileMetaDataHandle() {
+ @SuppressWarnings("PMD.UnusedPrivateMethod")
+ private long newLiveFileMetaDataHandle() {
return newLiveFileMetaDataHandle(columnFamilyName(), columnFamilyName().length, level(),
fileName(), path(), size(), smallestSeqno(), largestSeqno(), smallestKey(),
smallestKey().length, largestKey(), largestKey().length, numReadsSampled(),
diff --git a/java/src/main/java/org/rocksdb/LogFile.java b/java/src/main/java/org/rocksdb/LogFile.java
index 5ee2c9fcc64a..2be597ce5f9b 100644
--- a/java/src/main/java/org/rocksdb/LogFile.java
+++ b/java/src/main/java/org/rocksdb/LogFile.java
@@ -5,6 +5,9 @@
package org.rocksdb;
+/**
+ * A (journal) log file.
+ */
@SuppressWarnings("PMD.MissingStaticMethodInNonInstantiatableClass")
public class LogFile {
private final String pathName;
diff --git a/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java
index b8d0e45efa09..42cc2e2057d2 100644
--- a/java/src/main/java/org/rocksdb/Logger.java
+++ b/java/src/main/java/org/rocksdb/Logger.java
@@ -99,20 +99,43 @@ public InfoLogLevel infoLogLevel() {
infoLogLevel(nativeHandle_));
}
- @Override
- public long getNativeHandle() {
- return nativeHandle_;
- }
-
@Override
public final LoggerType getLoggerType() {
return LoggerType.JAVA_IMPLEMENTATION;
}
+ /**
+ * Log a message.
+ *
+ * @param logLevel the log level.
+ * @param logMsg the log message.
+ */
protected abstract void log(final InfoLogLevel logLevel, final String logMsg);
+ /**
+ * Create a new Logger with Options.
+ *
+ * @param logLevel the log level.
+ *
+ * @return the native handle to the underlying C++ native Logger object.
+ */
protected native long newLogger(final long logLevel);
+
+ /**
+ * Set the log level.
+ *
+ * @param handle the native handle to the underlying C++ native Logger object.
+ * @param logLevel the log level.
+ */
protected native void setInfoLogLevel(final long handle, final byte logLevel);
+
+ /**
+ * Get the log level.
+ *
+ * @param handle the native handle to the underlying C++ native Logger object.
+ *
+ * @return the log level.
+ */
protected native byte infoLogLevel(final long handle);
/**
diff --git a/java/src/main/java/org/rocksdb/MemTableInfo.java b/java/src/main/java/org/rocksdb/MemTableInfo.java
index 3d429035a343..56396ac8d997 100644
--- a/java/src/main/java/org/rocksdb/MemTableInfo.java
+++ b/java/src/main/java/org/rocksdb/MemTableInfo.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Information about a Mem Table.
+ */
public class MemTableInfo {
private final String columnFamilyName;
private final long firstSeqno;
diff --git a/java/src/main/java/org/rocksdb/MergeOperator.java b/java/src/main/java/org/rocksdb/MergeOperator.java
index c299f62210fa..9bf93c8b5a62 100644
--- a/java/src/main/java/org/rocksdb/MergeOperator.java
+++ b/java/src/main/java/org/rocksdb/MergeOperator.java
@@ -12,7 +12,13 @@
* value.
*/
public abstract class MergeOperator extends RocksObject {
- protected MergeOperator(final long nativeHandle) {
- super(nativeHandle);
- }
+ /**
+ * Constructs a MergeOperator.
+ *
+ * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native
+ * RocksDB C++ MergeOperator.
+ */
+ protected MergeOperator(final long nativeHandle) {
+ super(nativeHandle);
+ }
}
diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
index e54db7171e54..b58098119e9e 100644
--- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
+++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
@@ -7,6 +7,9 @@
import java.util.*;
+/**
+ * Mutable Column Family Options.
+ */
public class MutableColumnFamilyOptions extends AbstractMutableOptions {
/**
* User must use builder pattern, or parser.
@@ -54,24 +57,87 @@ public static MutableColumnFamilyOptionsBuilder parse(
return new MutableColumnFamilyOptionsBuilder().fromParsed(parsedOptions, ignoreUnknown);
}
+ /**
+ * Parses a String representation of MutableColumnFamilyOptions
+ *
+ * The format is: key1=value1;key2=value2;key3=value3 etc
+ *
+ * For int[] values, each int should be separated by a colon, e.g.
+ *
+ * key1=value1;intArrayKey1=1:2:3
+ *
+ * @param str The string representation of the mutable column family options
+ *
+ * @return A builder for the mutable column family options
+ */
public static MutableColumnFamilyOptionsBuilder parse(final String str) {
return parse(str, false);
}
private interface MutableColumnFamilyOptionKey extends MutableOptionKey {}
+ /**
+ * Mem Table options.
+ */
public enum MemtableOption implements MutableColumnFamilyOptionKey {
+ /**
+ * Write buffer size.
+ */
write_buffer_size(ValueType.LONG),
+
+ /**
+ * Arena block size.
+ */
arena_block_size(ValueType.LONG),
+
+ /**
+ * Prefix size ratio for Memtable's Bloom Filter.
+ */
memtable_prefix_bloom_size_ratio(ValueType.DOUBLE),
+
+ /**
+ * Whether to filter whole keys in the Memtable(s).
+ */
memtable_whole_key_filtering(ValueType.BOOLEAN),
+
+ /**
+ * Number of bits for the prefix in Memtable's Bloom Filter.
+ */
@Deprecated memtable_prefix_bloom_bits(ValueType.INT),
+
+ /**
+ * Number of probes for the prefix in Memtable's Bloom Filter.
+ */
@Deprecated memtable_prefix_bloom_probes(ValueType.INT),
+
+ /**
+ * Huge Page Size for Memtable(s).
+ */
memtable_huge_page_size(ValueType.LONG),
+
+ /**
+ * Maximum number of successive merges.
+ */
max_successive_merges(ValueType.LONG),
+
+ /**
+ * Whether to filter deletes.
+ */
@Deprecated filter_deletes(ValueType.BOOLEAN),
+
+ /**
+ * Maximum number of write buffers.
+ */
max_write_buffer_number(ValueType.INT),
+
+ /**
+ * Number of in-place update locks.
+ */
inplace_update_num_locks(ValueType.LONG),
+
+ /**
+ * Memory purge threshold.
+ */
experimental_mempurge_threshold(ValueType.DOUBLE);
private final ValueType valueType;
@@ -85,20 +151,78 @@ public ValueType getValueType() {
}
}
+ /**
+ * Compaction options.
+ */
public enum CompactionOption implements MutableColumnFamilyOptionKey {
+ /**
+ * Disable auto compaction.
+ */
disable_auto_compactions(ValueType.BOOLEAN),
+
+ /**
+ * Soft limit on the number of bytes pending before compaction.
+ */
soft_pending_compaction_bytes_limit(ValueType.LONG),
+
+ /**
+ * Hard limit on the number of bytes pending before compaction.
+ */
hard_pending_compaction_bytes_limit(ValueType.LONG),
+
+ /**
+ * Number of files in Level 0 before compaction is triggered.
+ */
level0_file_num_compaction_trigger(ValueType.INT),
+
+ /**
+ * Writes to Level 0 before a slowdown is triggered.
+ */
level0_slowdown_writes_trigger(ValueType.INT),
+
+ /**
+ * Writes to Level 0 before a stop is triggered.
+ */
level0_stop_writes_trigger(ValueType.INT),
+
+ /**
+ * Max compaction bytes.
+ */
max_compaction_bytes(ValueType.LONG),
+
+ /**
+ * Target for the base size of files.
+ */
target_file_size_base(ValueType.LONG),
+
+ /**
+ * Multiplier for the size of files.
+ */
target_file_size_multiplier(ValueType.INT),
+
+ /**
+ * Maximum size in bytes for level base.
+ */
max_bytes_for_level_base(ValueType.LONG),
+
+ /**
+ * Maximum bytes for level multiplier.
+ */
max_bytes_for_level_multiplier(ValueType.INT),
+
+ /**
+ * Maximum bytes for level multiplier(s) additional
+ */
max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY),
+
+ /**
+ * Time-to-live.
+ */
ttl(ValueType.LONG),
+
+ /**
+ * Compaction period in seconds.
+ */
periodic_compaction_seconds(ValueType.LONG);
private final ValueType valueType;
@@ -112,16 +236,58 @@ public ValueType getValueType() {
}
}
+ /**
+ * Blob options.
+ */
public enum BlobOption implements MutableColumnFamilyOptionKey {
+ /**
+ * Enable BLOB files.
+ */
enable_blob_files(ValueType.BOOLEAN),
+
+ /**
+ * Minimum BLOB size.
+ */
min_blob_size(ValueType.LONG),
+
+ /**
+ * BLOB file size.
+ */
blob_file_size(ValueType.LONG),
+
+ /**
+ * BLOB compression type.
+ */
blob_compression_type(ValueType.ENUM),
+
+ /**
+ * Enable BLOB garbage collection.
+ */
enable_blob_garbage_collection(ValueType.BOOLEAN),
+
+ /**
+ * BLOB garbage collection age cut-off.
+ */
blob_garbage_collection_age_cutoff(ValueType.DOUBLE),
+
+ /**
+ * Threshold for forcing BLOB garbage collection.
+ */
blob_garbage_collection_force_threshold(ValueType.DOUBLE),
+
+ /**
+ * BLOB compaction read-ahead size.
+ */
blob_compaction_readahead_size(ValueType.LONG),
+
+ /**
+ * BLOB file starting level.
+ */
blob_file_starting_level(ValueType.INT),
+
+ /**
+ * Prepopulate BLOB Cache.
+ */
prepopulate_blob_cache(ValueType.ENUM);
private final ValueType valueType;
@@ -135,10 +301,28 @@ public ValueType getValueType() {
}
}
+ /**
+ * Miscellaneous options.
+ */
public enum MiscOption implements MutableColumnFamilyOptionKey {
+ /**
+ * Maximum number of sequential keys to skip during iteration.
+ */
max_sequential_skip_in_iterations(ValueType.LONG),
+
+ /**
+ * Whether to enable paranoid file checks.
+ */
paranoid_file_checks(ValueType.BOOLEAN),
+
+ /**
+ * Whether to report background I/O stats.
+ */
report_bg_io_stats(ValueType.BOOLEAN),
+
+ /**
+ * Compression type.
+ */
compression(ValueType.ENUM);
private final ValueType valueType;
@@ -152,6 +336,9 @@ public ValueType getValueType() {
}
}
+ /**
+ * Builder for constructing MutableColumnFamilyOptions.
+ */
public static class MutableColumnFamilyOptionsBuilder
extends AbstractMutableOptionsBuilder
+ * The format is: key1=value1;key2=value2;key3=value3 etc
+ *
+ * For int[] values, each int should be separated by a comma, e.g.
+ *
+ * key1=value1;intArrayKey1=1:2:3
+ *
+ * @param str The string representation of the mutable db options
+ *
+ * @return A builder for the mutable db options
+ */
public static MutableDBOptionsBuilder parse(final String str) {
return parse(str, false);
}
private interface MutableDBOptionKey extends MutableOptionKey {}
+ /**
+ * Database options.
+ */
public enum DBOption implements MutableDBOptionKey {
+ /**
+ * Maximum number of background jobs.
+ */
max_background_jobs(ValueType.INT),
+
+ /**
+ * Maximum number of background compactions.
+ */
max_background_compactions(ValueType.INT),
+
+ /**
+ * Whether to avoid flush during shutdown.
+ */
avoid_flush_during_shutdown(ValueType.BOOLEAN),
+
+ /**
+ * Max buffer size for writing to files.
+ */
writable_file_max_buffer_size(ValueType.LONG),
+
+ /**
+ * Delayed write rate.
+ */
delayed_write_rate(ValueType.LONG),
+
+ /**
+ * Maximum total size of the WAL.
+ */
max_total_wal_size(ValueType.LONG),
+
+ /**
+ * The period to delete obsolete file.
+ * Measured in microseconds.
+ */
delete_obsolete_files_period_micros(ValueType.LONG),
+
+ /**
+ * The period to dump statistics.
+ * Measured in seconds.
+ */
stats_dump_period_sec(ValueType.INT),
+
+ /**
+ * The period that statistics persist.
+ * Measured in seconds.
+ */
stats_persist_period_sec(ValueType.INT),
+
+ /**
+ * Buffer size for statistics history.
+ */
stats_history_buffer_size(ValueType.LONG),
+
+ /**
+ * Maximum number of open files.
+ */
max_open_files(ValueType.INT),
+
+ /**
+ * Bytes per sync.
+ */
bytes_per_sync(ValueType.LONG),
+
+ /**
+ * WAL bytes per sync.
+ */
wal_bytes_per_sync(ValueType.LONG),
+
+ /**
+ * Strict limit of bytes per sync.
+ */
strict_bytes_per_sync(ValueType.BOOLEAN),
+
+ /**
+ * Compaction readahead size.
+ */
compaction_readahead_size(ValueType.LONG),
+ /**
+ * Signifies periods characterized by significantly less read and write activity compared to
+ * other times.
+ */
daily_offpeak_time_utc(ValueType.STRING);
private final ValueType valueType;
@@ -91,6 +176,9 @@ public ValueType getValueType() {
}
}
+ /**
+ * Builder for constructing MutableDBOptions.
+ */
public static class MutableDBOptionsBuilder
extends AbstractMutableOptionsBuilder It also support prefix hash feature.
* RocksCallbackObject is the base-class any RocksDB classes that acts as a
- * callback from some underlying underlying native C++ {@code rocksdb} object.
+ * callback from some underlying native C++ {@code rocksdb} object.
+ * Its implementation is always coupled with
+ * a C++ implementation of {@code ROCKSDB_NAMESPACE::JniCallback}.
*
* The use of {@code RocksObject} should always be preferred over
* {@link RocksCallbackObject} if callbacks are not required.
*/
public abstract class RocksCallbackObject extends
AbstractImmutableNativeReference {
-
+ /**
+ * An immutable reference to the value of the C++ pointer pointing to some
+ * underlying native RocksDB C++ object that
+ * extends {@code ROCKSDB_NAMESPACE::JniCallback}.
+ */
protected final long nativeHandle_;
+ /**
+ * Constructs a RocksCallbackObject.
+ *
+ * @param nativeParameterHandles reference to the value of the C++ pointers pointing to the
+ * underlying native RocksDB C++ objects.
+ */
protected RocksCallbackObject(final long... nativeParameterHandles) {
super(true);
this.nativeHandle_ = initializeNative(nativeParameterHandles);
diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java
index fe2f38af64f9..fe074c9ba60c 100644
--- a/java/src/main/java/org/rocksdb/RocksDB.java
+++ b/java/src/main/java/org/rocksdb/RocksDB.java
@@ -21,7 +21,14 @@
* indicates sth wrong at the RocksDB library side and the call failed.
*/
public class RocksDB extends RocksObject {
+ /**
+ * The name of the default column family.
+ */
public static final byte[] DEFAULT_COLUMN_FAMILY = "default".getBytes(UTF_8);
+
+ /**
+ * A constant representing a result where something was searched for but not found.
+ */
public static final int NOT_FOUND = -1;
private enum LibraryState {
@@ -165,6 +172,11 @@ private static void waitForLibraryToBeLoaded() {
}
}
+ /**
+ * Get the RocksDB version.
+ *
+ * @return the version of RocksDB.
+ */
public static Version rocksdbVersion() {
return version;
}
@@ -808,6 +820,9 @@ public List
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
* @param key byte array of a key to search for*
* @return true if key exist in database, otherwise false.
@@ -2600,11 +2666,11 @@ public boolean keyExists(final byte[] key) {
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
* @param key byte array of a key to search for
* @param offset the offset of the "key" array to be used, must be
@@ -2620,11 +2686,11 @@ public boolean keyExists(final byte[] key, final int offset, final int len) {
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -2639,11 +2705,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final byte
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -2663,11 +2729,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final byte
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param readOptions {@link ReadOptions} instance
@@ -2682,11 +2748,11 @@ public boolean keyExists(final ReadOptions readOptions, final byte[] key) {
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param readOptions {@link ReadOptions} instance
@@ -2706,11 +2772,11 @@ public boolean keyExists(
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -2727,11 +2793,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle,
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -2755,11 +2821,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle,
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param key ByteBuffer with key. Must be allocated as direct.
@@ -2773,11 +2839,11 @@ public boolean keyExists(final ByteBuffer key) {
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -2792,11 +2858,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final Byte
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param readOptions {@link ReadOptions} instance
@@ -2811,11 +2877,11 @@ public boolean keyExists(final ReadOptions readOptions, final ByteBuffer key) {
* Check if a key exists in the database.
* This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee
* of a correct result, whether the key exists or not.
- *
+ *
* Internally it checks if the key may exist and then double checks with read operation
* that confirms the key exists. This deals with the case where {@code keyMayExist} may return
* a false positive.
- *
+ *
* The code crosses the Java/JNI boundary only once.
*
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance
@@ -3675,10 +3741,26 @@ public long[] getApproximateSizes(final List
+ * Should stay in sync with /include/rocksdb/status.h:Code and
+ * /java/rocksjni/portal.h:toJavaStatusCode
+ */
public enum Code {
+ /**
+ * Success.
+ */
Ok( (byte)0x0),
+
+ /**
+ * Not found.
+ */
NotFound( (byte)0x1),
+
+ /**
+ * Corruption detected.
+ */
Corruption( (byte)0x2),
+
+ /**
+ * Not supported.
+ */
NotSupported( (byte)0x3),
+
+ /**
+ * Invalid argument provided.
+ */
InvalidArgument( (byte)0x4),
+
+ /**
+ * I/O error.
+ */
IOError( (byte)0x5),
+
+ /**
+ * There is a merge in progress.
+ */
MergeInProgress( (byte)0x6),
+
+ /**
+ * Incomplete.
+ */
Incomplete( (byte)0x7),
+
+ /**
+ * There is a shutdown in progress.
+ */
ShutdownInProgress( (byte)0x8),
+
+ /**
+ * An operation timed out.
+ */
TimedOut( (byte)0x9),
+
+ /**
+ * An operation was aborted.
+ */
Aborted( (byte)0xA),
+
+ /**
+ * The system is busy.
+ */
Busy( (byte)0xB),
+
+ /**
+ * The request expired.
+ */
Expired( (byte)0xC),
+
+ /**
+ * The operation should be reattempted.
+ */
TryAgain( (byte)0xD),
+
+ /**
+ * Undefined.
+ */
Undefined( (byte)0x7F);
private final byte value;
@@ -82,6 +185,15 @@ public enum Code {
this.value = value;
}
+ /**
+ * Get a code from its byte representation.
+ *
+ * @param value the byte representation of the code.
+ *
+ * @return the code
+ *
+ * @throws IllegalArgumentException if the {@code value} parameter does not represent a code.
+ */
public static Code getCode(final byte value) {
for (final Code code : Code.values()) {
if (code.value == value){
@@ -102,16 +214,56 @@ public byte getValue() {
}
}
- // should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode
+ /**
+ * Status Sub-code.
+ *
+ * should stay in sync with /include/rocksdb/status.h:SubCode and
+ * /java/rocksjni/portal.h:toJavaStatusSubCode
+ */
public enum SubCode {
+ /**
+ * None.
+ */
None( (byte)0x0),
+
+ /**
+ * Timeout whilst waiting on Mutex.
+ */
MutexTimeout( (byte)0x1),
+
+ /**
+ * Timeout whilst waiting on Lock.
+ */
LockTimeout( (byte)0x2),
+
+ /**
+ * Maximum limit on number of locks reached.
+ */
LockLimit( (byte)0x3),
+
+ /**
+ * No space remaining.
+ */
NoSpace( (byte)0x4),
+
+ /**
+ * Deadlock detected.
+ */
Deadlock( (byte)0x5),
+
+ /**
+ * Stale file detected.
+ */
StaleFile( (byte)0x6),
+
+ /**
+ * Reached the maximum memory limit.
+ */
MemoryLimit( (byte)0x7),
+
+ /**
+ * Undefined.
+ */
Undefined( (byte)0x7F);
private final byte value;
@@ -120,6 +272,16 @@ public enum SubCode {
this.value = value;
}
+ /**
+ * Get a sub-code from its byte representation.
+ *
+ * @param value the byte representation of the sub-code.
+ *
+ * @return the sub-code
+ *
+ * @throws IllegalArgumentException if the {@code value} parameter does not represent a
+ * sub-code.
+ */
public static SubCode getSubCode(final byte value) {
for (final SubCode subCode : SubCode.values()) {
if (subCode.value == value){
diff --git a/java/src/main/java/org/rocksdb/StringAppendOperator.java b/java/src/main/java/org/rocksdb/StringAppendOperator.java
index 25b134c44af8..2bf2f1aff6f1 100644
--- a/java/src/main/java/org/rocksdb/StringAppendOperator.java
+++ b/java/src/main/java/org/rocksdb/StringAppendOperator.java
@@ -11,14 +11,27 @@
* two strings.
*/
public class StringAppendOperator extends MergeOperator {
+ /**
+ * Constructs a StringAppendOperator.
+ */
public StringAppendOperator() {
this(',');
}
+ /**
+ * Constructs a StringAppendOperator.
+ *
+ * @param delim the character delimiter to use when appending.
+ */
public StringAppendOperator(final char delim) {
super(newSharedStringAppendOperator(delim));
}
+ /**
+ * Constructs a StringAppendOperator.
+ *
+ * @param delim the string delimiter to use when appending.
+ */
public StringAppendOperator(final String delim) {
super(newSharedStringAppendOperator(delim));
}
diff --git a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java
index 8dc56796a25d..aaf34b2cbd57 100644
--- a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java
+++ b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Brief information on Table File creation.
+ */
public class TableFileCreationBriefInfo {
private final String dbName;
private final String columnFamilyName;
diff --git a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java
index 5654603c3833..1b65712b3b3b 100644
--- a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java
+++ b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Information on Table File creation.
+ */
public class TableFileCreationInfo extends TableFileCreationBriefInfo {
private final long fileSize;
private final TableProperties tableProperties;
diff --git a/java/src/main/java/org/rocksdb/TableFileCreationReason.java b/java/src/main/java/org/rocksdb/TableFileCreationReason.java
index d3984663dd28..eaa06245a0c0 100644
--- a/java/src/main/java/org/rocksdb/TableFileCreationReason.java
+++ b/java/src/main/java/org/rocksdb/TableFileCreationReason.java
@@ -5,10 +5,28 @@
package org.rocksdb;
+/**
+ * Reasons for Table File creation.
+ */
public enum TableFileCreationReason {
+ /**
+ * Flush.
+ */
FLUSH((byte) 0x00),
+
+ /**
+ * Compaction.
+ */
COMPACTION((byte) 0x01),
+
+ /**
+ * Recovery.
+ */
RECOVERY((byte) 0x02),
+
+ /**
+ * Miscellaneous.
+ */
MISC((byte) 0x03);
private final byte value;
diff --git a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java
index 9a777e3336c2..87bd2b8c87af 100644
--- a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java
+++ b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java
@@ -7,6 +7,9 @@
import java.util.Objects;
+/**
+ * Information on Table File deleteion.
+ */
public class TableFileDeletionInfo {
private final String dbName;
private final String filePath;
diff --git a/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java b/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java
index ae2789ef8263..fbf521408017 100644
--- a/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java
+++ b/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java
@@ -1,44 +1,65 @@
-// Copyright (c) Meta Platforms, Inc. and affiliates.
-//
-// This source code is licensed under both the GPLv2 (found in the
-// COPYING file in the root directory) and Apache 2.0 License
-// (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-public abstract class TablePropertiesCollectorFactory extends RocksObject {
- private TablePropertiesCollectorFactory(final long nativeHandle) {
- super(nativeHandle);
- }
-
- public static TablePropertiesCollectorFactory NewCompactOnDeletionCollectorFactory(
- final long sliding_window_size, final long deletion_trigger, final double deletion_ratio) {
- long handle =
- newCompactOnDeletionCollectorFactory(sliding_window_size, deletion_trigger, deletion_ratio);
- return new TablePropertiesCollectorFactory(handle) {
- @Override
- protected void disposeInternal(long handle) {
- TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle);
- }
- };
- }
-
- /**
- * Internal API. Do not use.
- * @param nativeHandle
- * @return
- */
- static TablePropertiesCollectorFactory newWrapper(final long nativeHandle) {
- return new TablePropertiesCollectorFactory(nativeHandle) {
- @Override
- protected void disposeInternal(long handle) {
- TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle);
- }
- };
- }
-
- private static native long newCompactOnDeletionCollectorFactory(
- final long slidingWindowSize, final long deletionTrigger, final double deletionRatio);
-
- private static native void deleteCompactOnDeletionCollectorFactory(final long handle);
-}
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+//
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Table Properties Collector Factory.
+ */
+public abstract class TablePropertiesCollectorFactory extends RocksObject {
+ private TablePropertiesCollectorFactory(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Creates a factory of a table property collector that marks a SST
+ * file as need-compaction when it observes at least "D" deletion
+ * entries in any "N" consecutive entries, or the ratio of tombstone
+ * entries >= deletion_ratio.
+ *
+ * @param slidingWindowSize "N".Note that this number will be
+ * round up to the smallest multiple of 128 that is no less
+ * than the specified size.
+ * @param deletionTrigger "D". Note that even when "N" is changed,
+ * the specified number for "D" will not be changed.
+ * @param deletionRatio if <= 0 or > 1, disable triggering compaction
+ * based on deletion ratio. Disabled by default.
+ *
+ * @return the new compact on deletion collector factory.
+ */
+ public static TablePropertiesCollectorFactory createNewCompactOnDeletionCollectorFactory(
+ final long slidingWindowSize, final long deletionTrigger, final double deletionRatio) {
+ final long handle =
+ newCompactOnDeletionCollectorFactory(slidingWindowSize, deletionTrigger, deletionRatio);
+ return new TablePropertiesCollectorFactory(handle) {
+ @Override
+ protected void disposeInternal(final long handle) {
+ TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle);
+ }
+ };
+ }
+
+ /**
+ * Internal API. Do not use.
+ *
+ * @param nativeHandle the native handle to wrap.
+ *
+ * @return the new TablePropertiesCollectorFactory.
+ */
+ static TablePropertiesCollectorFactory newWrapper(final long nativeHandle) {
+ return new TablePropertiesCollectorFactory(nativeHandle) {
+ @Override
+ protected void disposeInternal(long handle) {
+ TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle);
+ }
+ };
+ }
+
+ private static native long newCompactOnDeletionCollectorFactory(
+ final long slidingWindowSize, final long deletionTrigger, final double deletionRatio);
+
+ private static native void deleteCompactOnDeletionCollectorFactory(final long handle);
+}
diff --git a/java/src/main/java/org/rocksdb/ThreadStatus.java b/java/src/main/java/org/rocksdb/ThreadStatus.java
index 4211453d1a0b..c75d85d276f6 100644
--- a/java/src/main/java/org/rocksdb/ThreadStatus.java
+++ b/java/src/main/java/org/rocksdb/ThreadStatus.java
@@ -7,6 +7,9 @@
import java.util.Map;
+/**
+ * The status of a Thread.
+ */
public class ThreadStatus {
private final long threadId;
private final ThreadType threadType;
@@ -155,6 +158,13 @@ public static String getOperationName(final OperationType operationType) {
return getOperationName(operationType.getValue());
}
+ /**
+ * Converts microseconds to a string representation.
+ *
+ * @param operationElapsedTime the microseconds.
+ *
+ * @return the string representation.
+ */
public static String microsToString(final long operationElapsedTime) {
return microsToStringNative(operationElapsedTime);
}
diff --git a/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java
index bf1c73a129fb..c3d7213b261d 100644
--- a/java/src/main/java/org/rocksdb/TickerType.java
+++ b/java/src/main/java/org/rocksdb/TickerType.java
@@ -16,952 +16,1246 @@
* should descend into negative values until TICKER_ENUM_MAX reaches -128 (-0x80).
*/
public enum TickerType {
-
- /**
- * total block cache misses
- *
- * REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
- * BLOCK_CACHE_FILTER_MISS +
- * BLOCK_CACHE_DATA_MISS;
- */
- BLOCK_CACHE_MISS((byte) 0x0),
-
- /**
- * total block cache hit
- *
- * REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
- * BLOCK_CACHE_FILTER_HIT +
- * BLOCK_CACHE_DATA_HIT;
- */
- BLOCK_CACHE_HIT((byte) 0x1),
-
- BLOCK_CACHE_ADD((byte) 0x2),
-
- /**
- * # of failures when adding blocks to block cache.
- */
- BLOCK_CACHE_ADD_FAILURES((byte) 0x3),
-
- /**
- * # of times cache miss when accessing index block from block cache.
- */
- BLOCK_CACHE_INDEX_MISS((byte) 0x4),
-
- /**
- * # of times cache hit when accessing index block from block cache.
- */
- BLOCK_CACHE_INDEX_HIT((byte) 0x5),
-
- /**
- * # of index blocks added to block cache.
- */
- BLOCK_CACHE_INDEX_ADD((byte) 0x6),
-
- /**
- * # of bytes of index blocks inserted into cache
- */
- BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7),
-
- /**
- * # of times cache miss when accessing filter block from block cache.
- */
- BLOCK_CACHE_FILTER_MISS((byte) 0x8),
-
- /**
- * # of times cache hit when accessing filter block from block cache.
- */
- BLOCK_CACHE_FILTER_HIT((byte) 0x9),
-
- /**
- * # of filter blocks added to block cache.
- */
- BLOCK_CACHE_FILTER_ADD((byte) 0xA),
-
- /**
- * # of bytes of bloom filter blocks inserted into cache
- */
- BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xB),
-
- /**
- * # of times cache miss when accessing data block from block cache.
- */
- BLOCK_CACHE_DATA_MISS((byte) 0xC),
-
- /**
- * # of times cache hit when accessing data block from block cache.
- */
- BLOCK_CACHE_DATA_HIT((byte) 0xD),
-
- /**
- * # of data blocks added to block cache.
- */
- BLOCK_CACHE_DATA_ADD((byte) 0xE),
-
- /**
- * # of bytes of data blocks inserted into cache
- */
- BLOCK_CACHE_DATA_BYTES_INSERT((byte) 0xF),
-
- /**
- * # of bytes read from cache.
- */
- BLOCK_CACHE_BYTES_READ((byte) 0x10),
-
- /**
- * # of bytes written into cache.
- */
- BLOCK_CACHE_BYTES_WRITE((byte) 0x11),
-
- /**
- * Block cache related stats for Compression dictionaries
- */
- BLOCK_CACHE_COMPRESSION_DICT_MISS((byte) 0x12),
- BLOCK_CACHE_COMPRESSION_DICT_HIT((byte) 0x13),
- BLOCK_CACHE_COMPRESSION_DICT_ADD((byte) 0x14),
- BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT((byte) 0x15),
-
- /**
- * Redundant additions to block cache
- */
- BLOCK_CACHE_ADD_REDUNDANT((byte) 0x16),
- BLOCK_CACHE_INDEX_ADD_REDUNDANT((byte) 0x17),
- BLOCK_CACHE_FILTER_ADD_REDUNDANT((byte) 0x18),
- BLOCK_CACHE_DATA_ADD_REDUNDANT((byte) 0x19),
- BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT((byte) 0x1A),
-
- /**
- * Number of secondary cache hits
- */
- SECONDARY_CACHE_HITS((byte) 0x1B),
- SECONDARY_CACHE_FILTER_HITS((byte) 0x1C),
- SECONDARY_CACHE_INDEX_HITS((byte) 0x1D),
- SECONDARY_CACHE_DATA_HITS((byte) 0x1E),
-
- COMPRESSED_SECONDARY_CACHE_DUMMY_HITS((byte) 0x1F),
- COMPRESSED_SECONDARY_CACHE_HITS((byte) 0x20),
- COMPRESSED_SECONDARY_CACHE_PROMOTIONS((byte) 0x21),
- COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS((byte) 0x22),
-
- /**
- * # of times bloom filter has avoided file reads.
- */
- BLOOM_FILTER_USEFUL((byte) 0x23),
-
- /**
- * # of times bloom FullFilter has not avoided the reads.
- */
- BLOOM_FILTER_FULL_POSITIVE((byte) 0x24),
-
- /**
- * # of times bloom FullFilter has not avoided the reads and data actually
- * exist.
- */
- BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x25),
-
- /**
- * Number of times bloom was checked before creating iterator on a
- * file, and the number of times the check was useful in avoiding
- * iterator creation (and thus likely IOPs).
- */
- BLOOM_FILTER_PREFIX_CHECKED((byte) 0x26),
- BLOOM_FILTER_PREFIX_USEFUL((byte) 0x27),
- BLOOM_FILTER_PREFIX_TRUE_POSITIVE((byte) 0x28),
-
- /**
- * # persistent cache hit
- */
- PERSISTENT_CACHE_HIT((byte) 0x29),
-
- /**
- * # persistent cache miss
- */
- PERSISTENT_CACHE_MISS((byte) 0x2A),
-
- /**
- * # total simulation block cache hits
- */
- SIM_BLOCK_CACHE_HIT((byte) 0x2B),
-
- /**
- * # total simulation block cache misses
- */
- SIM_BLOCK_CACHE_MISS((byte) 0x2C),
-
- /**
- * # of memtable hits.
- */
- MEMTABLE_HIT((byte) 0x2D),
-
- /**
- * # of memtable misses.
- */
- MEMTABLE_MISS((byte) 0x2E),
-
- /**
- * # of Get() queries served by L0
- */
- GET_HIT_L0((byte) 0x2F),
-
- /**
- * # of Get() queries served by L1
- */
- GET_HIT_L1((byte) 0x30),
-
- /**
- * # of Get() queries served by L2 and up
- */
- GET_HIT_L2_AND_UP((byte) 0x31),
-
- /**
- * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction
- * There are 4 reasons currently.
- */
-
- /**
- * key was written with a newer value.
- */
- COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x32),
-
- /**
- * Also includes keys dropped for range del.
- * The key is obsolete.
- */
- COMPACTION_KEY_DROP_OBSOLETE((byte) 0x33),
-
- /**
- * key was covered by a range tombstone.
- */
- COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x34),
-
- /**
- * User compaction function has dropped the key.
- */
- COMPACTION_KEY_DROP_USER((byte) 0x35),
-
- /**
- * all keys in range were deleted.
- */
- COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x36),
-
- /**
- * Deletions obsoleted before bottom level due to file gap optimization.
- */
- COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE((byte) 0x37),
-
- /**
- * Compactions cancelled to prevent ENOSPC
- */
- COMPACTION_CANCELLED((byte) 0x38),
-
- /**
- * Number of keys written to the database via the Put and Write call's.
- */
- NUMBER_KEYS_WRITTEN((byte) 0x39),
-
- /**
- * Number of Keys read.
- */
- NUMBER_KEYS_READ((byte) 0x3A),
-
- /**
- * Number keys updated, if inplace update is enabled
- */
- NUMBER_KEYS_UPDATED((byte) 0x3B),
-
- /**
- * The number of uncompressed bytes issued by DB::Put(), DB::Delete(),\
- * DB::Merge(), and DB::Write().
- */
- BYTES_WRITTEN((byte) 0x3C),
-
- /**
- * The number of uncompressed bytes read from DB::Get(). It could be
- * either from memtables, cache, or table files.
- *
- * For the number of logical bytes read from DB::MultiGet(),
- * please use {@link #NUMBER_MULTIGET_BYTES_READ}.
- */
- BYTES_READ((byte) 0x3D),
-
- /**
- * The number of calls to seek.
- */
- NUMBER_DB_SEEK((byte) 0x3E),
-
- /**
- * The number of calls to next.
- */
- NUMBER_DB_NEXT((byte) 0x3F),
-
- /**
- * The number of calls to prev.
- */
- NUMBER_DB_PREV((byte) 0x40),
-
- /**
- * The number of calls to seek that returned data.
- */
- NUMBER_DB_SEEK_FOUND((byte) 0x41),
-
- /**
- * The number of calls to next that returned data.
- */
- NUMBER_DB_NEXT_FOUND((byte) 0x42),
-
- /**
- * The number of calls to prev that returned data.
- */
- NUMBER_DB_PREV_FOUND((byte) 0x43),
-
- /**
- * The number of uncompressed bytes read from an iterator.
- * Includes size of key and value.
- */
- ITER_BYTES_READ((byte) 0x44),
-
- /**
- * Number of internal skipped during iteration
- */
- NUMBER_ITER_SKIP((byte) 0x45),
-
- /**
- * Number of times we had to reseek inside an iteration to skip
- * over large number of keys with same userkey.
- */
- NUMBER_OF_RESEEKS_IN_ITERATION((byte) 0x46),
-
- /**
- * Number of iterators created.
- */
- NO_ITERATOR_CREATED((byte) 0x47),
-
- /**
- * Number of iterators deleted.
- */
- NO_ITERATOR_DELETED((byte) 0x48),
-
- NO_FILE_OPENS((byte) 0x49),
-
- NO_FILE_ERRORS((byte) 0x4A),
-
- /**
- * Writer has to wait for compaction or flush to finish.
- */
- STALL_MICROS((byte) 0x4B),
-
- /**
- * The wait time for db mutex.
- *
- * Disabled by default. To enable it set stats level to {@link StatsLevel#ALL}
- */
- DB_MUTEX_WAIT_MICROS((byte) 0x4C),
-
- /**
- * Number of MultiGet calls.
- */
- NUMBER_MULTIGET_CALLS((byte) 0x4D),
-
- /**
- * Number of MultiGet keys read.
- */
- NUMBER_MULTIGET_KEYS_READ((byte) 0x4E),
-
- /**
- * Number of MultiGet bytes read.
- */
- NUMBER_MULTIGET_BYTES_READ((byte) 0x4F),
-
- /**
- * Number of MultiGet keys found (vs number requested)
- */
- NUMBER_MULTIGET_KEYS_FOUND((byte) 0x50),
-
- NUMBER_MERGE_FAILURES((byte) 0x51),
-
- /**
- * Record the number of calls to {@link RocksDB#getUpdatesSince(long)}. Useful to keep track of
- * transaction log iterator refreshes.
- */
- GET_UPDATES_SINCE_CALLS((byte) 0x52),
-
- /**
- * Number of times WAL sync is done.
- */
- WAL_FILE_SYNCED((byte) 0x53),
-
- /**
- * Number of bytes written to WAL.
- */
- WAL_FILE_BYTES((byte) 0x54),
-
- /**
- * Writes can be processed by requesting thread or by the thread at the
- * head of the writers queue.
- */
- WRITE_DONE_BY_SELF((byte) 0x55),
-
- /**
- * Equivalent to writes done for others.
- */
- WRITE_DONE_BY_OTHER((byte) 0x56),
-
- /**
- * Number of Write calls that request WAL.
- */
- WRITE_WITH_WAL((byte) 0x57),
-
- /**
- * Bytes read during compaction.
- */
- COMPACT_READ_BYTES((byte) 0x58),
-
- /**
- * Bytes written during compaction.
- */
- COMPACT_WRITE_BYTES((byte) 0x59),
-
- /**
- * Bytes written during flush.
- */
- FLUSH_WRITE_BYTES((byte) 0x5A),
-
- /**
- * Compaction read and write statistics broken down by CompactionReason
- */
- COMPACT_READ_BYTES_MARKED((byte) 0x5B),
- COMPACT_READ_BYTES_PERIODIC((byte) 0x5C),
- COMPACT_READ_BYTES_TTL((byte) 0x5D),
- COMPACT_WRITE_BYTES_MARKED((byte) 0x5E),
- COMPACT_WRITE_BYTES_PERIODIC((byte) 0x5F),
- COMPACT_WRITE_BYTES_TTL((byte) 0x60),
-
- /**
- * Number of table's properties loaded directly from file, without creating
- * table reader object.
- */
- NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x61),
- NUMBER_SUPERVERSION_ACQUIRES((byte) 0x62),
- NUMBER_SUPERVERSION_RELEASES((byte) 0x63),
- NUMBER_SUPERVERSION_CLEANUPS((byte) 0x64),
-
- /**
- * # of compressions/decompressions executed
- */
- NUMBER_BLOCK_COMPRESSED((byte) 0x65),
- NUMBER_BLOCK_DECOMPRESSED((byte) 0x66),
-
- BYTES_COMPRESSED_FROM((byte) 0x67),
- BYTES_COMPRESSED_TO((byte) 0x68),
- BYTES_COMPRESSION_BYPASSED((byte) 0x69),
- BYTES_COMPRESSION_REJECTED((byte) 0x6A),
- NUMBER_BLOCK_COMPRESSION_BYPASSED((byte) 0x6B),
- NUMBER_BLOCK_COMPRESSION_REJECTED((byte) 0x6C),
- BYTES_DECOMPRESSED_FROM((byte) 0x6D),
- BYTES_DECOMPRESSED_TO((byte) 0x6E),
-
- MERGE_OPERATION_TOTAL_TIME((byte) 0x6F),
- FILTER_OPERATION_TOTAL_TIME((byte) 0x70),
- COMPACTION_CPU_TOTAL_TIME((byte) 0x71),
-
- /**
- * Row cache.
- */
- ROW_CACHE_HIT((byte) 0x72),
- ROW_CACHE_MISS((byte) 0x73),
-
- /**
- * Read amplification statistics.
- *
- * Read amplification can be calculated using this formula
- * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
- *
- * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
- */
-
- /**
- * Estimate of total bytes actually used.
- */
- READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x74),
-
- /**
- * Total size of loaded data blocks.
- */
- READ_AMP_TOTAL_READ_BYTES((byte) 0x75),
-
- /**
- * Number of refill intervals where rate limiter's bytes are fully consumed.
- */
- NUMBER_RATE_LIMITER_DRAINS((byte) 0x76),
-
- /**
- * BlobDB specific stats
- * # of Put/PutTTL/PutUntil to BlobDB.
- */
- BLOB_DB_NUM_PUT((byte) 0x77),
-
- /**
- * # of Write to BlobDB.
- */
- BLOB_DB_NUM_WRITE((byte) 0x78),
-
- /**
- * # of Get to BlobDB.
- */
- BLOB_DB_NUM_GET((byte) 0x79),
-
- /**
- * # of MultiGet to BlobDB.
- */
- BLOB_DB_NUM_MULTIGET((byte) 0x7A),
-
- /**
- * # of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator.
- */
- BLOB_DB_NUM_SEEK((byte) 0x7B),
-
- /**
- * # of Next to BlobDB iterator.
- */
- BLOB_DB_NUM_NEXT((byte) 0x7C),
-
- /**
- * # of Prev to BlobDB iterator.
- */
- BLOB_DB_NUM_PREV((byte) 0x7D),
-
- /**
- * # of keys written to BlobDB.
- */
- BLOB_DB_NUM_KEYS_WRITTEN((byte) 0x7E),
-
- /**
- * # of keys read from BlobDB.
- */
- BLOB_DB_NUM_KEYS_READ((byte) 0x7F),
-
- /**
- * # of bytes (key + value) written to BlobDB.
- */
- BLOB_DB_BYTES_WRITTEN((byte) -0x1),
-
- /**
- * # of bytes (keys + value) read from BlobDB.
- */
- BLOB_DB_BYTES_READ((byte) -0x2),
-
- /**
- * # of keys written by BlobDB as non-TTL inlined value.
- */
- BLOB_DB_WRITE_INLINED((byte) -0x3),
-
- /**
- * # of keys written by BlobDB as TTL inlined value.
- */
- BLOB_DB_WRITE_INLINED_TTL((byte) -0x4),
-
- /**
- * # of keys written by BlobDB as non-TTL blob value.
- */
- BLOB_DB_WRITE_BLOB((byte) -0x5),
-
- /**
- * # of keys written by BlobDB as TTL blob value.
- */
- BLOB_DB_WRITE_BLOB_TTL((byte) -0x6),
-
- /**
- * # of bytes written to blob file.
- */
- BLOB_DB_BLOB_FILE_BYTES_WRITTEN((byte) -0x7),
-
- /**
- * # of bytes read from blob file.
- */
- BLOB_DB_BLOB_FILE_BYTES_READ((byte) -0x8),
-
- /**
- * # of times a blob files being synced.
- */
- BLOB_DB_BLOB_FILE_SYNCED((byte) -0x9),
-
- /**
- * # of blob index evicted from base DB by BlobDB compaction filter because
- * of expiration.
- */
- BLOB_DB_BLOB_INDEX_EXPIRED_COUNT((byte) -0xA),
-
- /**
- * Size of blob index evicted from base DB by BlobDB compaction filter
- * because of expiration.
- */
- BLOB_DB_BLOB_INDEX_EXPIRED_SIZE((byte) -0xB),
-
- /**
- * # of blob index evicted from base DB by BlobDB compaction filter because
- * of corresponding file deleted.
- */
- BLOB_DB_BLOB_INDEX_EVICTED_COUNT((byte) -0xC),
-
- /**
- * Size of blob index evicted from base DB by BlobDB compaction filter
- * because of corresponding file deleted.
- */
- BLOB_DB_BLOB_INDEX_EVICTED_SIZE((byte) -0xD),
-
- /**
- * # of blob files being garbage collected.
- */
- BLOB_DB_GC_NUM_FILES((byte) -0xE),
-
- /**
- * # of blob files generated by garbage collection.
- */
- BLOB_DB_GC_NUM_NEW_FILES((byte) -0xF),
-
- /**
- * # of BlobDB garbage collection failures.
- */
- BLOB_DB_GC_FAILURES((byte) -0x10),
-
- /**
- * # of keys relocated to new blob file by garbage collection.
- */
- BLOB_DB_GC_NUM_KEYS_RELOCATED((byte) -0x11),
-
- /**
- * # of bytes relocated to new blob file by garbage collection.
- */
- BLOB_DB_GC_BYTES_RELOCATED((byte) -0x12),
-
- /**
- * # of blob files evicted because of BlobDB is full.
- */
- BLOB_DB_FIFO_NUM_FILES_EVICTED((byte) -0x13),
-
- /**
- * # of keys in the blob files evicted because of BlobDB is full.
- */
- BLOB_DB_FIFO_NUM_KEYS_EVICTED((byte) -0x14),
-
- /**
- * # of bytes in the blob files evicted because of BlobDB is full.
- */
- BLOB_DB_FIFO_BYTES_EVICTED((byte) -0x15),
-
- /**
- * # of times cache miss when accessing blob from blob cache.
- */
- BLOB_DB_CACHE_MISS((byte) -0x16),
-
- /**
- * # of times cache hit when accessing blob from blob cache.
- */
- BLOB_DB_CACHE_HIT((byte) -0x17),
-
- /**
- * # of data blocks added to blob cache.
- */
- BLOB_DB_CACHE_ADD((byte) -0x18),
-
- /**
- * # # of failures when adding blobs to blob cache.
- */
- BLOB_DB_CACHE_ADD_FAILURES((byte) -0x19),
-
- /**
- * # of bytes read from blob cache.
- */
- BLOB_DB_CACHE_BYTES_READ((byte) -0x1A),
-
- /**
- * # of bytes written into blob cache.
- */
- BLOB_DB_CACHE_BYTES_WRITE((byte) -0x1B),
-
- /**
- * These counters indicate a performance issue in WritePrepared transactions.
- * We should not seem them ticking them much.
- * # of times prepare_mutex_ is acquired in the fast path.
- */
- TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x1C),
-
- /**
- * # of times old_commit_map_mutex_ is acquired in the fast path.
- */
- TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x1D),
-
- /**
- * # of times we checked a batch for duplicate keys.
- */
- TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x1E),
-
- /**
- * # of times snapshot_mutex_ is acquired in the fast path.
- */
- TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x1F),
-
- /**
- * # of times ::Get returned TryAgain due to expired snapshot seq
- */
- TXN_GET_TRY_AGAIN((byte) -0x20),
-
- /**
- * # of files marked as trash by delete scheduler
- */
- FILES_MARKED_TRASH((byte) -0x21),
-
- /**
- * # of trash files deleted by the background thread from the trash queue
- */
- FILES_DELETED_FROM_TRASH_QUEUE((byte) -0x22),
-
- /**
- * # of files deleted immediately by delete scheduler
- */
- FILES_DELETED_IMMEDIATELY((byte) -0x23),
-
- /**
- * DB error handler statistics
- */
- ERROR_HANDLER_BG_ERROR_COUNT((byte) -0x24),
- ERROR_HANDLER_BG_IO_ERROR_COUNT((byte) -0x25),
- ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT((byte) -0x26),
- ERROR_HANDLER_AUTORESUME_COUNT((byte) -0x27),
- ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT((byte) -0x28),
- ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT((byte) -0x29),
-
- /**
- * Bytes of raw data (payload) found on memtable at flush time.
- * Contains the sum of garbage payload (bytes that are discarded
- * at flush time) and useful payload (bytes of data that will
- * eventually be written to SSTable).
- */
- MEMTABLE_PAYLOAD_BYTES_AT_FLUSH((byte) -0x2A),
- /**
- * Outdated bytes of data present on memtable at flush time.
- */
- MEMTABLE_GARBAGE_BYTES_AT_FLUSH((byte) -0x2B),
-
- /**
- * Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs.
- */
- VERIFY_CHECKSUM_READ_BYTES((byte) -0x2C),
-
- /**
- * Bytes read/written while creating backups
- */
- BACKUP_READ_BYTES((byte) -0x2D),
- BACKUP_WRITE_BYTES((byte) -0x2E),
-
- /**
- * Remote compaction read/write statistics
- */
- REMOTE_COMPACT_READ_BYTES((byte) -0x2F),
- REMOTE_COMPACT_WRITE_BYTES((byte) -0x30),
-
- /**
- * Tiered storage related statistics
- */
- HOT_FILE_READ_BYTES((byte) -0x31),
- WARM_FILE_READ_BYTES((byte) -0x32),
- COOL_FILE_READ_BYTES((byte) -0x5B),
- COLD_FILE_READ_BYTES((byte) -0x33),
- ICE_FILE_READ_BYTES((byte) -0x59),
- HOT_FILE_READ_COUNT((byte) -0x34),
- WARM_FILE_READ_COUNT((byte) -0x35),
- COOL_FILE_READ_COUNT((byte) -0x5C),
- COLD_FILE_READ_COUNT((byte) -0x36),
- ICE_FILE_READ_COUNT((byte) -0x5A),
-
- /**
- * (non-)last level read statistics
- */
- LAST_LEVEL_READ_BYTES((byte) -0x37),
- LAST_LEVEL_READ_COUNT((byte) -0x38),
- NON_LAST_LEVEL_READ_BYTES((byte) -0x39),
- NON_LAST_LEVEL_READ_COUNT((byte) -0x3A),
-
- /**
- * Statistics on iterator Seek() (and variants) for each sorted run.
- * i.e a single user Seek() can result in many sorted run Seek()s.
- * The stats are split between last level and non-last level.
- * Filtered: a filter such as prefix Bloom filter indicate the Seek() would
- * not find anything relevant, so avoided a likely access to data+index
- * blocks.
- */
- LAST_LEVEL_SEEK_FILTERED((byte) -0x3B),
- /**
- * Filter match: a filter such as prefix Bloom filter was queried but did
- * not filter out the seek.
- */
- LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x3C),
- /**
- * At least one data block was accessed for a Seek() (or variant) on a
- * sorted run.
- */
- LAST_LEVEL_SEEK_DATA((byte) -0x3D),
- /**
- * At least one value() was accessed for the seek (suggesting it was useful),
- * and no filter such as prefix Bloom was queried.
- */
- LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x3E),
- /**
- * At least one value() was accessed for the seek (suggesting it was useful),
- * after querying a filter such as prefix Bloom.
- */
- LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x3F),
-
- /**
- * The same set of stats, but for non-last level seeks.
- */
- NON_LAST_LEVEL_SEEK_FILTERED((byte) -0x40),
- NON_LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x41),
- NON_LAST_LEVEL_SEEK_DATA((byte) -0x42),
- NON_LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x43),
- NON_LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x44),
-
- /**
- * Number of block checksum verifications
- */
- BLOCK_CHECKSUM_COMPUTE_COUNT((byte) -0x45),
-
- /**
- * Number of times RocksDB detected a corruption while verifying a block
- * checksum. RocksDB does not remember corruptions that happened during user
- * reads so the same block corruption may be detected multiple times.
- */
- BLOCK_CHECKSUM_MISMATCH_COUNT((byte) -0x46),
-
- MULTIGET_COROUTINE_COUNT((byte) -0x47),
-
- /**
- * Time spent in the ReadAsync file system call
- */
- READ_ASYNC_MICROS((byte) -0x48),
-
- /**
- * Number of errors returned to the async read callback
- */
- ASYNC_READ_ERROR_COUNT((byte) -0x49),
-
- /**
- * Number of lookup into the prefetched tail (see
- * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`)
- * that can't find its data for table open
- */
- TABLE_OPEN_PREFETCH_TAIL_MISS((byte) -0x4A),
-
- /**
- * Number of lookup into the prefetched tail (see
- * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`)
- * that finds its data for table open
- */
- TABLE_OPEN_PREFETCH_TAIL_HIT((byte) -0x4B),
-
- /**
- * # of times timestamps are checked on accessing the table
- */
- TIMESTAMP_FILTER_TABLE_CHECKED((byte) -0x4C),
-
- /**
- * # of times timestamps can successfully help skip the table access
- */
- TIMESTAMP_FILTER_TABLE_FILTERED((byte) -0x4D),
-
- READAHEAD_TRIMMED((byte) -0x4E),
-
- FIFO_MAX_SIZE_COMPACTIONS((byte) -0x4F),
-
- FIFO_TTL_COMPACTIONS((byte) -0x50),
-
- FIFO_CHANGE_TEMPERATURE_COMPACTIONS((byte) -0x58),
-
- PREFETCH_BYTES((byte) -0x51),
-
- PREFETCH_BYTES_USEFUL((byte) -0x52),
-
- PREFETCH_HITS((byte) -0x53),
-
- SST_FOOTER_CORRUPTION_COUNT((byte) -0x55),
-
- FILE_READ_CORRUPTION_RETRY_COUNT((byte) -0x56),
-
- FILE_READ_CORRUPTION_RETRY_SUCCESS_COUNT((byte) -0x57),
-
- /**
- * Counter for the number of times a WBWI is ingested into the DB. This
- * happens when IngestWriteBatchWithIndex() is used and when large
- * transaction optimization is enabled through
- * TransactionOptions::large_txn_commit_optimize_threshold.
- */
- NUMBER_WBWI_INGEST((byte) -0x5D),
-
- /**
- * Failure to load the UDI during SST table open
- */
- SST_USER_DEFINED_INDEX_LOAD_FAIL_COUNT((byte) -0x5E),
-
- /**
- * Bytes of output files successfully resumed during remote compaction
- */
- REMOTE_COMPACT_RESUMED_BYTES((byte) -0x5F),
-
- /**
- * MultiScan statistics
- */
-
- /**
- * # of calls to Iterator::Prepare() for multi-scan
- */
- MULTISCAN_PREPARE_CALLS((byte) -0x60),
-
- /**
- * # of errors during Iterator::Prepare() for multi-scan
- */
- MULTISCAN_PREPARE_ERRORS((byte) -0x61),
-
- /**
- * # of data blocks prefetched during multi-scan Prepare()
- */
- MULTISCAN_BLOCKS_PREFETCHED((byte) -0x62),
-
- /**
- * # of data blocks found in cache during multi-scan Prepare()
- */
- MULTISCAN_BLOCKS_FROM_CACHE((byte) -0x63),
-
- /**
- * Total bytes prefetched during multi-scan Prepare()
- */
- MULTISCAN_PREFETCH_BYTES((byte) -0x64),
-
- /**
- * # of prefetched blocks that were never accessed (wasted)
- */
- MULTISCAN_PREFETCH_BLOCKS_WASTED((byte) -0x65),
-
- /**
- * # of I/O requests issued during multi-scan Prepare()
- */
- MULTISCAN_IO_REQUESTS((byte) -0x66),
-
- /**
- * # of non-adjacent blocks coalesced into single I/O request
- */
- MULTISCAN_IO_COALESCED_NONADJACENT((byte) -0x67),
-
- /**
- * # of seek errors during multi-scan iteration
- */
- MULTISCAN_SEEK_ERRORS((byte) -0x68),
-
- TICKER_ENUM_MAX((byte) -0x54);
-
- private final byte value;
-
- TickerType(final byte value) {
- this.value = value;
- }
+ /**
+ * total block cache misses
+ *
+ * REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
+ * BLOCK_CACHE_FILTER_MISS +
+ * BLOCK_CACHE_DATA_MISS;
+ */
+ BLOCK_CACHE_MISS((byte) 0x0),
+
+ /**
+ * total block cache hit
+ *
+ * REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
+ * BLOCK_CACHE_FILTER_HIT +
+ * BLOCK_CACHE_DATA_HIT;
+ */
+ BLOCK_CACHE_HIT((byte) 0x1),
+
+ BLOCK_CACHE_ADD((byte) 0x2),
+
+ /**
+ * Number of failures when adding blocks to block cache.
+ */
+ BLOCK_CACHE_ADD_FAILURES((byte) 0x3),
+
+ /**
+ * Number of times cache miss when accessing index block from block cache.
+ */
+ BLOCK_CACHE_INDEX_MISS((byte) 0x4),
+
+ /**
+ * Number of times cache hit when accessing index block from block cache.
+ */
+ BLOCK_CACHE_INDEX_HIT((byte) 0x5),
+
+ /**
+ * Number of index blocks added to block cache.
+ */
+ BLOCK_CACHE_INDEX_ADD((byte) 0x6),
+
+ /**
+ * Number of bytes of index blocks inserted into cache
+ */
+ BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7),
+
+ /**
+ * Number of times cache miss when accessing filter block from block cache.
+ */
+ BLOCK_CACHE_FILTER_MISS((byte) 0x8),
+
+ /**
+ * Number of times cache hit when accessing filter block from block cache.
+ */
+ BLOCK_CACHE_FILTER_HIT((byte) 0x9),
+
+ /**
+ * Number of filter blocks added to block cache.
+ */
+ BLOCK_CACHE_FILTER_ADD((byte) 0xA),
+
+ /**
+ * Number of bytes of bloom filter blocks inserted into cache
+ */
+ BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xB),
+
+ /**
+ * Number of times cache miss when accessing data block from block cache.
+ */
+ BLOCK_CACHE_DATA_MISS((byte) 0xC),
+
+ /**
+ * Number of times cache hit when accessing data block from block cache.
+ */
+ BLOCK_CACHE_DATA_HIT((byte) 0xD),
+
+ /**
+ * Number of data blocks added to block cache.
+ */
+ BLOCK_CACHE_DATA_ADD((byte) 0xE),
+
+ /**
+ * Number of bytes of data blocks inserted into cache
+ */
+ BLOCK_CACHE_DATA_BYTES_INSERT((byte) 0xF),
+
+ /**
+ * Number of bytes read from cache.
+ */
+ BLOCK_CACHE_BYTES_READ((byte) 0x10),
+
+ /**
+ * Number of bytes written into cache.
+ */
+ BLOCK_CACHE_BYTES_WRITE((byte) 0x11),
+
+ /**
+ * Number of Block cache Compression dictionary misses.
+ */
+ BLOCK_CACHE_COMPRESSION_DICT_MISS((byte) 0x12),
+
+ /**
+ * Number of Block cache Compression dictionary hits.
+ */
+ BLOCK_CACHE_COMPRESSION_DICT_HIT((byte) 0x13),
+
+ /**
+ * Number of Block cache Compression dictionary additions.
+ */
+ BLOCK_CACHE_COMPRESSION_DICT_ADD((byte) 0x14),
+
+ /**
+ * Number of Block cache Compression dictionary bytes inserted.
+ */
+ BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT((byte) 0x15),
+
+ /**
+ * Redundant additions to block cache.
+ */
+ BLOCK_CACHE_ADD_REDUNDANT((byte) 0x16),
+
+ /**
+ * Redundant additions to block cache index.
+ */
+ BLOCK_CACHE_INDEX_ADD_REDUNDANT((byte) 0x17),
+
+ /**
+ * Redundant additions to block cache filter.
+ */
+ BLOCK_CACHE_FILTER_ADD_REDUNDANT((byte) 0x18),
+
+ /**
+ * Redundant additions to block cache data.
+ */
+ BLOCK_CACHE_DATA_ADD_REDUNDANT((byte) 0x19),
+
+ /**
+ * Redundant additions to block cache compression dictionary.
+ */
+ BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT((byte) 0x1A),
+
+ /**
+ * Number of secondary cache hits.
+ */
+ SECONDARY_CACHE_HITS((byte) 0x1B),
+
+ /**
+ * Number of secondary cache filter hits.
+ */
+ SECONDARY_CACHE_FILTER_HITS((byte) 0x1C),
+
+ /**
+ * Number of secondary cache index hits.
+ */
+ SECONDARY_CACHE_INDEX_HITS((byte) 0x1D),
+
+ /**
+ * Number of secondary cache data hits.
+ */
+ SECONDARY_CACHE_DATA_HITS((byte) 0x1E),
+
+ /**
+ * Number of compressed secondary cache dummy hits.
+ */
+ COMPRESSED_SECONDARY_CACHE_DUMMY_HITS((byte) 0x1F),
+
+ /**
+ * Number of compressed secondary cache hits.
+ */
+ COMPRESSED_SECONDARY_CACHE_HITS((byte) 0x20),
+
+ /**
+ * Number of compressed secondary cache promotions.
+ */
+ COMPRESSED_SECONDARY_CACHE_PROMOTIONS((byte) 0x21),
+
+ /**
+ * Number of compressed secondary cache promotion skips.
+ */
+ COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS((byte) 0x22),
+
+ /**
+ * Number of times bloom filter has avoided file reads.
+ */
+ BLOOM_FILTER_USEFUL((byte) 0x23),
+
+ /**
+ * Number of times bloom FullFilter has not avoided the reads.
+ */
+ BLOOM_FILTER_FULL_POSITIVE((byte) 0x24),
+
+ /**
+ * Number of times bloom FullFilter has not avoided the reads and data actually
+ * exist.
+ */
+ BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x25),
+
+ /**
+ * Number of times bloom was checked before creating iterator on a file.
+ */
+ BLOOM_FILTER_PREFIX_CHECKED((byte) 0x26),
+
+ /**
+ * Number of times it was useful (in avoiding iterator creation) that bloom was checked before
+ * creating iterator on a file.
+ */
+ BLOOM_FILTER_PREFIX_USEFUL((byte) 0x27),
+
+ /**
+ * Number of times bloom produced a true positive result.
+ */
+ BLOOM_FILTER_PREFIX_TRUE_POSITIVE((byte) 0x28),
+
+ /**
+ * Number of persistent cache hit
+ */
+ PERSISTENT_CACHE_HIT((byte) 0x29),
+
+ /**
+ * Number of persistent cache miss
+ */
+ PERSISTENT_CACHE_MISS((byte) 0x2A),
+
+ /**
+ * Number of total simulation block cache hits
+ */
+ SIM_BLOCK_CACHE_HIT((byte) 0x2B),
+
+ /**
+ * Number of total simulation block cache misses
+ */
+ SIM_BLOCK_CACHE_MISS((byte) 0x2C),
+
+ /**
+ * Number of memtable hits.
+ */
+ MEMTABLE_HIT((byte) 0x2D),
+
+ /**
+ * Number of of memtable misses.
+ */
+ MEMTABLE_MISS((byte) 0x2E),
+
+ /**
+ * Number of Get() queries served by L0
+ */
+ GET_HIT_L0((byte) 0x2F),
+
+ /**
+ * Number of Get() queries served by L1
+ */
+ GET_HIT_L1((byte) 0x30),
+
+ /**
+ * Number of Get() queries served by L2 and up
+ */
+ GET_HIT_L2_AND_UP((byte) 0x31),
+
+ /**
+ * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction
+ * There are 4 reasons currently.
+ */
+
+ /**
+ * key was written with a newer value.
+ */
+ COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x32),
+
+ /**
+ * Also includes keys dropped for range del.
+ * The key is obsolete.
+ */
+ COMPACTION_KEY_DROP_OBSOLETE((byte) 0x33),
+
+ /**
+ * key was covered by a range tombstone.
+ */
+ COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x34),
+
+ /**
+ * User compaction function has dropped the key.
+ */
+ COMPACTION_KEY_DROP_USER((byte) 0x35),
+
+ /**
+ * all keys in range were deleted.
+ */
+ COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x36),
+
+ /**
+ * Deletions obsoleted before bottom level due to file gap optimization.
+ */
+ COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE((byte) 0x37),
+
+ /**
+ * Compactions cancelled to prevent ENOSPC
+ */
+ COMPACTION_CANCELLED((byte) 0x38),
+
+ /**
+ * Number of keys written to the database via the Put and Write call's.
+ */
+ NUMBER_KEYS_WRITTEN((byte) 0x39),
+
+ /**
+ * Number of Keys read.
+ */
+ NUMBER_KEYS_READ((byte) 0x3A),
+
+ /**
+ * Number keys updated, if inplace update is enabled
+ */
+ NUMBER_KEYS_UPDATED((byte) 0x3B),
+
+ /**
+ * The number of uncompressed bytes issued by DB::Put(), DB::Delete(),\
+ * DB::Merge(), and DB::Write().
+ */
+ BYTES_WRITTEN((byte) 0x3C),
+
+ /**
+ * The number of uncompressed bytes read from DB::Get(). It could be
+ * either from memtables, cache, or table files.
+ *
+ * For the number of logical bytes read from DB::MultiGet(),
+ * please use {@link #NUMBER_MULTIGET_BYTES_READ}.
+ */
+ BYTES_READ((byte) 0x3D),
+
+ /**
+ * The number of calls to seek.
+ */
+ NUMBER_DB_SEEK((byte) 0x3E),
+
+ /**
+ * The number of calls to next.
+ */
+ NUMBER_DB_NEXT((byte) 0x3F),
+
+ /**
+ * The number of calls to prev.
+ */
+ NUMBER_DB_PREV((byte) 0x40),
+
+ /**
+ * The number of calls to seek that returned data.
+ */
+ NUMBER_DB_SEEK_FOUND((byte) 0x41),
+
+ /**
+ * The number of calls to next that returned data.
+ */
+ NUMBER_DB_NEXT_FOUND((byte) 0x42),
+
+ /**
+ * The number of calls to prev that returned data.
+ */
+ NUMBER_DB_PREV_FOUND((byte) 0x43),
+
+ /**
+ * The number of uncompressed bytes read from an iterator.
+ * Includes size of key and value.
+ */
+ ITER_BYTES_READ((byte) 0x44),
+
+ /**
+ * Number of internal skipped during iteration
+ */
+ NUMBER_ITER_SKIP((byte) 0x45),
+
+ /**
+ * Number of times we had to reseek inside an iteration to skip
+ * over large number of keys with same userkey.
+ */
+ NUMBER_OF_RESEEKS_IN_ITERATION((byte) 0x46),
+
+ /**
+ * Number of iterators created.
+ */
+ NO_ITERATOR_CREATED((byte) 0x47),
+
+ /**
+ * Number of iterators deleted.
+ */
+ NO_ITERATOR_DELETED((byte) 0x48),
+
+ /**
+ * Number of file opens.
+ */
+ NO_FILE_OPENS((byte) 0x49),
+
+ /**
+ * Number of file errors.
+ */
+ NO_FILE_ERRORS((byte) 0x4A),
+
+ /**
+ * Writer has to wait for compaction or flush to finish.
+ */
+ STALL_MICROS((byte) 0x4B),
+
+ /**
+ * The wait time for db mutex.
+ *
+ * Disabled by default. To enable it set stats level to {@link StatsLevel#ALL}
+ */
+ DB_MUTEX_WAIT_MICROS((byte) 0x4C),
+
+ /**
+ * Number of MultiGet calls.
+ */
+ NUMBER_MULTIGET_CALLS((byte) 0x4D),
+
+ /**
+ * Number of MultiGet keys read.
+ */
+ NUMBER_MULTIGET_KEYS_READ((byte) 0x4E),
+
+ /**
+ * Number of MultiGet bytes read.
+ */
+ NUMBER_MULTIGET_BYTES_READ((byte) 0x4F),
+
+ /**
+ * Number of MultiGet keys found (vs number requested)
+ */
+ NUMBER_MULTIGET_KEYS_FOUND((byte) 0x50),
+
+ /**
+ * Number of Merge failures.
+ */
+ NUMBER_MERGE_FAILURES((byte) 0x51),
+
+ /**
+ * Record the number of calls to {@link RocksDB#getUpdatesSince(long)}. Useful to keep track of
+ * transaction log iterator refreshes.
+ */
+ GET_UPDATES_SINCE_CALLS((byte) 0x52),
+
+ /**
+ * Number of times WAL sync is done.
+ */
+ WAL_FILE_SYNCED((byte) 0x53),
+
+ /**
+ * Number of bytes written to WAL.
+ */
+ WAL_FILE_BYTES((byte) 0x54),
+
+ /**
+ * Writes can be processed by requesting thread or by the thread at the
+ * head of the writers queue.
+ */
+ WRITE_DONE_BY_SELF((byte) 0x55),
+
+ /**
+ * Equivalent to writes done for others.
+ */
+ WRITE_DONE_BY_OTHER((byte) 0x56),
+
+ /**
+ * Number of Write calls that request WAL.
+ */
+ WRITE_WITH_WAL((byte) 0x57),
+
+ /**
+ * Bytes read during compaction.
+ */
+ COMPACT_READ_BYTES((byte) 0x58),
+
+ /**
+ * Bytes written during compaction.
+ */
+ COMPACT_WRITE_BYTES((byte) 0x59),
+
+ /**
+ * Bytes written during flush.
+ */
+ FLUSH_WRITE_BYTES((byte) 0x5A),
+
+ /**
+ * Compaction read bytes marked.
+ */
+ COMPACT_READ_BYTES_MARKED((byte) 0x5B),
+
+ /**
+ * Compaction read bytes periodically.
+ */
+ COMPACT_READ_BYTES_PERIODIC((byte) 0x5C),
+
+ /**
+ * Compaction read bytes TTL.
+ */
+ COMPACT_READ_BYTES_TTL((byte) 0x5D),
+
+ /**
+ * Compaction write bytes marked.
+ */
+ COMPACT_WRITE_BYTES_MARKED((byte) 0x5E),
+
+ /**
+ * Compaction write bytes periodically.
+ */
+ COMPACT_WRITE_BYTES_PERIODIC((byte) 0x5F),
+
+ /**
+ * Compaction write bytes TTL.
+ */
+ COMPACT_WRITE_BYTES_TTL((byte) 0x60),
+
+ /**
+ * Number of table's properties loaded directly from file, without creating table reader object.
+ */
+ NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x61),
+
+ /**
+ * Number of supervision acquires.
+ */
+ NUMBER_SUPERVERSION_ACQUIRES((byte) 0x62),
+
+ /**
+ * Number of supervision releases.
+ */
+ NUMBER_SUPERVERSION_RELEASES((byte) 0x63),
+
+ /**
+ * Number of supervision cleanups.
+ */
+ NUMBER_SUPERVERSION_CLEANUPS((byte) 0x64),
+
+ /**
+ * Number of compressions executed.
+ */
+ NUMBER_BLOCK_COMPRESSED((byte) 0x65),
+
+ /**
+ * Number of decompressions executed.
+ */
+ NUMBER_BLOCK_DECOMPRESSED((byte) 0x66),
+
+ /**
+ * Number of input bytes (uncompressed) to compression for SST blocks that are stored compressed.
+ */
+ BYTES_COMPRESSED_FROM((byte) 0x67),
+
+ /**
+ * Number of output bytes (compressed) from compression for SST blocks that are stored compressed.
+ */
+ BYTES_COMPRESSED_TO((byte) 0x68),
+
+ /**
+ * Number of uncompressed bytes for SST blocks that are stored uncompressed because compression
+ * type is kNoCompression, or some error case caused compression not to run or produce an output.
+ * Index blocks are only counted if enable_index_compression is true.
+ */
+ BYTES_COMPRESSION_BYPASSED((byte) 0x69),
+
+ /**
+ * Number of input bytes (uncompressed) to compression for SST blocks that are stored uncompressed
+ * because the compression result was rejected, either because the ratio was not acceptable (see
+ * CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the `verify_compression`
+ * option.
+ */
+ BYTES_COMPRESSION_REJECTED((byte) 0x6A),
+
+ /**
+ * Like {@link #BYTES_COMPRESSION_BYPASSED} but counting number of blocks.
+ */
+ NUMBER_BLOCK_COMPRESSION_BYPASSED((byte) 0x6B),
+
+ /**
+ * Like {@link #BYTES_COMPRESSION_REJECTED} but counting number of blocks.
+ */
+ NUMBER_BLOCK_COMPRESSION_REJECTED((byte) 0x6C),
+
+ /**
+ * Number of input bytes (compressed) to decompression in reading compressed SST blocks from
+ * storage.
+ */
+ BYTES_DECOMPRESSED_FROM((byte) 0x6D),
+
+ /**
+ * Number of output bytes (uncompressed) from decompression in reading compressed SST blocks from
+ * storage.
+ */
+ BYTES_DECOMPRESSED_TO((byte) 0x6E),
+
+ /**
+ * Merge operations cumulative time.
+ */
+ MERGE_OPERATION_TOTAL_TIME((byte) 0x6F),
+
+ /**
+ * Filter operations cumulative time.
+ */
+ FILTER_OPERATION_TOTAL_TIME((byte) 0x70),
+
+ /**
+ * Compaction CPU cumulative time.
+ */
+ COMPACTION_CPU_TOTAL_TIME((byte) 0x71),
+
+ /**
+ * Row cache hits.
+ */
+ ROW_CACHE_HIT((byte) 0x72),
+
+ /**
+ * Row cache misses.
+ */
+ ROW_CACHE_MISS((byte) 0x73),
+
+ /**
+ * Read amplification statistics.
+ *
+ * Read amplification can be calculated using this formula
+ * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
+ *
+ * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
+ */
+
+ /**
+ * Estimate of total bytes actually used.
+ */
+ READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x74),
+
+ /**
+ * Total size of loaded data blocks.
+ */
+ READ_AMP_TOTAL_READ_BYTES((byte) 0x75),
+
+ /**
+ * Number of refill intervals where rate limiter's bytes are fully consumed.
+ */
+ NUMBER_RATE_LIMITER_DRAINS((byte) 0x76),
+
+ /**
+ * BlobDB specific stats
+ * Number of Put/PutTTL/PutUntil to BlobDB.
+ */
+ BLOB_DB_NUM_PUT((byte) 0x77),
+
+ /**
+ * Number of Write to BlobDB.
+ */
+ BLOB_DB_NUM_WRITE((byte) 0x78),
+
+ /**
+ * Number of Get to BlobDB.
+ */
+ BLOB_DB_NUM_GET((byte) 0x79),
+
+ /**
+ * Number of MultiGet to BlobDB.
+ */
+ BLOB_DB_NUM_MULTIGET((byte) 0x7A),
+
+ /**
+ * Number of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator.
+ */
+ BLOB_DB_NUM_SEEK((byte) 0x7B),
+
+ /**
+ * Number of Next to BlobDB iterator.
+ */
+ BLOB_DB_NUM_NEXT((byte) 0x7C),
+
+ /**
+ * Number of Prev to BlobDB iterator.
+ */
+ BLOB_DB_NUM_PREV((byte) 0x7D),
+
+ /**
+ * Number of keys written to BlobDB.
+ */
+ BLOB_DB_NUM_KEYS_WRITTEN((byte) 0x7E),
+
+ /**
+ * Number of keys read from BlobDB.
+ */
+ BLOB_DB_NUM_KEYS_READ((byte) 0x7F),
+
+ /**
+ * Number of bytes (key + value) written to BlobDB.
+ */
+ BLOB_DB_BYTES_WRITTEN((byte) -0x1),
+
+ /**
+ * Number of bytes (keys + value) read from BlobDB.
+ */
+ BLOB_DB_BYTES_READ((byte) -0x2),
+
+ /**
+ * Number of keys written by BlobDB as non-TTL inlined value.
+ */
+ BLOB_DB_WRITE_INLINED((byte) -0x3),
+
+ /**
+ * Number of keys written by BlobDB as TTL inlined value.
+ */
+ BLOB_DB_WRITE_INLINED_TTL((byte) -0x4),
+
+ /**
+ * Number of keys written by BlobDB as non-TTL blob value.
+ */
+ BLOB_DB_WRITE_BLOB((byte) -0x5),
+
+ /**
+ * Number of keys written by BlobDB as TTL blob value.
+ */
+ BLOB_DB_WRITE_BLOB_TTL((byte) -0x6),
+
+ /**
+ * Number of bytes written to blob file.
+ */
+ BLOB_DB_BLOB_FILE_BYTES_WRITTEN((byte) -0x7),
+
+ /**
+ * Number of bytes read from blob file.
+ */
+ BLOB_DB_BLOB_FILE_BYTES_READ((byte) -0x8),
+
+ /**
+ * Number of times a blob files being synced.
+ */
+ BLOB_DB_BLOB_FILE_SYNCED((byte) -0x9),
+
+ /**
+ * Number of blob index evicted from base DB by BlobDB compaction filter because
+ * of expiration.
+ */
+ BLOB_DB_BLOB_INDEX_EXPIRED_COUNT((byte) -0xA),
+
+ /**
+ * Size of blob index evicted from base DB by BlobDB compaction filter
+ * because of expiration.
+ */
+ BLOB_DB_BLOB_INDEX_EXPIRED_SIZE((byte) -0xB),
+
+ /**
+ * Number of blob index evicted from base DB by BlobDB compaction filter because
+ * of corresponding file deleted.
+ */
+ BLOB_DB_BLOB_INDEX_EVICTED_COUNT((byte) -0xC),
+
+ /**
+ * Size of blob index evicted from base DB by BlobDB compaction filter
+ * because of corresponding file deleted.
+ */
+ BLOB_DB_BLOB_INDEX_EVICTED_SIZE((byte) -0xD),
+
+ /**
+ * Number of blob files being garbage collected.
+ */
+ BLOB_DB_GC_NUM_FILES((byte) -0xE),
+
+ /**
+ * Number of blob files generated by garbage collection.
+ */
+ BLOB_DB_GC_NUM_NEW_FILES((byte) -0xF),
+
+ /**
+ * Number of BlobDB garbage collection failures.
+ */
+ BLOB_DB_GC_FAILURES((byte) -0x10),
+
+ /**
+ * Number of keys relocated to new blob file by garbage collection.
+ */
+ BLOB_DB_GC_NUM_KEYS_RELOCATED((byte) -0x11),
+
+ /**
+ * Number of bytes relocated to new blob file by garbage collection.
+ */
+ BLOB_DB_GC_BYTES_RELOCATED((byte) -0x12),
+
+ /**
+ * Number of blob files evicted because of BlobDB is full.
+ */
+ BLOB_DB_FIFO_NUM_FILES_EVICTED((byte) -0x13),
+
+ /**
+ * Number of keys in the blob files evicted because of BlobDB is full.
+ */
+ BLOB_DB_FIFO_NUM_KEYS_EVICTED((byte) -0x14),
+
+ /**
+ * Number of bytes in the blob files evicted because of BlobDB is full.
+ */
+ BLOB_DB_FIFO_BYTES_EVICTED((byte) -0x15),
+
+ /**
+ * Number of times cache miss when accessing blob from blob cache.
+ */
+ BLOB_DB_CACHE_MISS((byte) -0x16),
+
+ /**
+ * Number of times cache hit when accessing blob from blob cache.
+ */
+ BLOB_DB_CACHE_HIT((byte) -0x17),
+
+ /**
+ * Number of data blocks added to blob cache.
+ */
+ BLOB_DB_CACHE_ADD((byte) -0x18),
+
+ /**
+ * Number of failures when adding blobs to blob cache.
+ */
+ BLOB_DB_CACHE_ADD_FAILURES((byte) -0x19),
+
+ /**
+ * Number of bytes read from blob cache.
+ */
+ BLOB_DB_CACHE_BYTES_READ((byte) -0x1A),
+
+ /**
+ * Number of bytes written into blob cache.
+ */
+ BLOB_DB_CACHE_BYTES_WRITE((byte) -0x1B),
+
+ /**
+ * These counters indicate a performance issue in WritePrepared transactions.
+ * We should not seem them ticking them much.
+ * Number of times prepare_mutex_ is acquired in the fast path.
+ */
+ TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x1C),
+
+ /**
+ * Number of times old_commit_map_mutex_ is acquired in the fast path.
+ */
+ TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x1D),
+
+ /**
+ * Number of times we checked a batch for duplicate keys.
+ */
+ TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x1E),
+
+ /**
+ * Number of times snapshot_mutex_ is acquired in the fast path.
+ */
+ TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x1F),
+
+ /**
+ * Number of times ::Get returned TryAgain due to expired snapshot seq
+ */
+ TXN_GET_TRY_AGAIN((byte) -0x20),
+
+ /**
+ * Number of files marked as trash by delete scheduler
+ */
+ FILES_MARKED_TRASH((byte) -0x21),
+
+ /**
+ * Number of trash files deleted by the background thread from the trash queue
+ */
+ FILES_DELETED_FROM_TRASH_QUEUE((byte) -0x22),
+
+ /**
+ * Number of files deleted immediately by delete scheduler
+ */
+ FILES_DELETED_IMMEDIATELY((byte) -0x23),
+
+ /**
+ * DB error handler statistics
+ */
+ ERROR_HANDLER_BG_ERROR_COUNT((byte) -0x24),
+
+ /**
+ * Number of background errors handled by the error handler.
+ */
+ ERROR_HANDLER_BG_IO_ERROR_COUNT((byte) -0x25),
+
+ /**
+ * Number of retryable background I/O errors handled by the error handler.
+ * This is a subset of {@link #ERROR_HANDLER_BG_IO_ERROR_COUNT}.
+ */
+ ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT((byte) -0x26),
+
+ /**
+ * Number of auto resumes handled by the error handler.
+ */
+ ERROR_HANDLER_AUTORESUME_COUNT((byte) -0x27),
+
+ /**
+ * Total Number of auto resume retries handled by the error handler.
+ */
+ ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT((byte) -0x28),
+
+ /**
+ * Number of auto resumes that succeded that were handled by the error handler.
+ */
+ ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT((byte) -0x29),
+
+ /**
+ * Bytes of raw data (payload) found on memtable at flush time.
+ * Contains the sum of garbage payload (bytes that are discarded
+ * at flush time) and useful payload (bytes of data that will
+ * eventually be written to SSTable).
+ */
+ MEMTABLE_PAYLOAD_BYTES_AT_FLUSH((byte) -0x2A),
+
+ /**
+ * Outdated bytes of data present on memtable at flush time.
+ */
+ MEMTABLE_GARBAGE_BYTES_AT_FLUSH((byte) -0x2B),
+
+ /**
+ * Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs.
+ */
+ VERIFY_CHECKSUM_READ_BYTES((byte) -0x2C),
+
+ /**
+ * Bytes read whilst creating backups.
+ */
+ BACKUP_READ_BYTES((byte) -0x2D),
+
+ /**
+ * Bytes written whilst creating backups.
+ */
+ BACKUP_WRITE_BYTES((byte) -0x2E),
+
+ /**
+ * Remote compaction bytes read.
+ */
+ REMOTE_COMPACT_READ_BYTES((byte) -0x2F),
+
+ /**
+ * Remote compaction bytes written.
+ */
+ REMOTE_COMPACT_WRITE_BYTES((byte) -0x30),
+
+ /**
+ * Bytes read from hot files.
+ */
+ HOT_FILE_READ_BYTES((byte) -0x31),
+
+ /**
+ * Bytes read from warm files.
+ */
+ WARM_FILE_READ_BYTES((byte) -0x32),
+
+ /**
+ * Bytes read from cool files.
+ */
+ COOL_FILE_READ_BYTES((byte) -0x5B),
+
+ /**
+ * Bytes read from cold files.
+ */
+ COLD_FILE_READ_BYTES((byte) -0x33),
+
+ /**
+ * Bytes read from ice cold files.
+ */
+ ICE_FILE_READ_BYTES((byte) -0x59),
+
+ /**
+ * Numer of reads from hot files.
+ */
+ HOT_FILE_READ_COUNT((byte) -0x34),
+
+ /**
+ * Numer of reads from warm files.
+ */
+ WARM_FILE_READ_COUNT((byte) -0x35),
+
+ /**
+ * Numer of reads from cool files.
+ */
+ COOL_FILE_READ_COUNT((byte) -0x5C),
+
+ /**
+ * Numer of reads from cold files.
+ */
+ COLD_FILE_READ_COUNT((byte) -0x36),
+
+ /**
+ * Numer of reads from ice cold files.
+ */
+ ICE_FILE_READ_COUNT((byte) -0x5A),
+
+ /**
+ * Bytes read from the last level.
+ */
+ LAST_LEVEL_READ_BYTES((byte) -0x37),
+
+ /**
+ * Number of reads from the last level.
+ */
+ LAST_LEVEL_READ_COUNT((byte) -0x38),
+
+ /**
+ * Bytes read from the non-last level.
+ */
+ NON_LAST_LEVEL_READ_BYTES((byte) -0x39),
+
+ /**
+ * Number of reads from the non-last level.
+ */
+ NON_LAST_LEVEL_READ_COUNT((byte) -0x3A),
+
+ /**
+ * Statistics on iterator Seek() (and variants) for each sorted run.
+ * i.e a single user Seek() can result in many sorted run Seek()s.
+ * The stats are split between last level and non-last level.
+ * Filtered: a filter such as prefix Bloom filter indicate the Seek() would
+ * not find anything relevant, so avoided a likely access to data+index
+ * blocks.
+ */
+
+ LAST_LEVEL_SEEK_FILTERED((byte) -0x3B),
+ /**
+ * Filter match: a filter such as prefix Bloom filter was queried but did
+ * not filter out the seek.
+ */
+ LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x3C),
+
+ /**
+ * At least one data block was accessed for a Seek() (or variant) on a
+ * sorted run.
+ */
+ LAST_LEVEL_SEEK_DATA((byte) -0x3D),
+
+ /**
+ * At least one value() was accessed for the seek (suggesting it was useful),
+ * and no filter such as prefix Bloom was queried.
+ */
+ LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x3E),
+
+ /**
+ * At least one value() was accessed for the seek (suggesting it was useful),
+ * after querying a filter such as prefix Bloom.
+ */
+ LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x3F),
+
+ /**
+ * Similar to {@link #LAST_LEVEL_SEEK_FILTERED} but for the non-last level.
+ */
+ NON_LAST_LEVEL_SEEK_FILTERED((byte) -0x40),
+
+ /**
+ * Similar to {@link #LAST_LEVEL_SEEK_FILTER_MATCH} but for the non-last level.
+ */
+ NON_LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x41),
+
+ /**
+ * Similar to {@link #LAST_LEVEL_SEEK_DATA} but for the non-last level.
+ */
+ NON_LAST_LEVEL_SEEK_DATA((byte) -0x42),
+
+ /**
+ * Similar to {@link #LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER} but for the non-last level.
+ */
+ NON_LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x43),
+
+ /**
+ * Similar to {@link #LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH} but for the non-last level.
+ */
+ NON_LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x44),
+
+ /**
+ * Number of block checksum verifications.
+ */
+ BLOCK_CHECKSUM_COMPUTE_COUNT((byte) -0x45),
+
+ /**
+ * Number of times RocksDB detected a corruption while verifying a block
+ * checksum. RocksDB does not remember corruptions that happened during user
+ * reads so the same block corruption may be detected multiple times.
+ */
+ BLOCK_CHECKSUM_MISMATCH_COUNT((byte) -0x46),
+
+ /**
+ * Number of multiget co-rountines.
+ */
+ MULTIGET_COROUTINE_COUNT((byte) -0x47),
+
+ /**
+ * Time spent in the ReadAsync file system call.
+ */
+ READ_ASYNC_MICROS((byte) -0x48),
+
+ /**
+ * Number of errors returned to the async read callback.
+ */
+ ASYNC_READ_ERROR_COUNT((byte) -0x49),
+
+ /**
+ * Number of lookup into the prefetched tail (see
+ * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`)
+ * that can't find its data for table open
+ */
+ TABLE_OPEN_PREFETCH_TAIL_MISS((byte) -0x4A),
+
+ /**
+ * Number of lookup into the prefetched tail (see
+ * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`)
+ * that finds its data for table open
+ */
+ TABLE_OPEN_PREFETCH_TAIL_HIT((byte) -0x4B),
+
+ /**
+ * Number of times timestamps are checked on accessing the table
+ */
+ TIMESTAMP_FILTER_TABLE_CHECKED((byte) -0x4C),
+
+ /**
+ * Number of times timestamps can successfully help skip the table access
+ */
+ TIMESTAMP_FILTER_TABLE_FILTERED((byte) -0x4D),
+
+ /**
+ * Number of times readahead is trimmed during scans when ReadOptions.auto_readahead_size is set.
+ */
+ READAHEAD_TRIMMED((byte) -0x4E),
+
+ /**
+ * Maximum size of the FIFO compactions.
+ */
+ FIFO_MAX_SIZE_COMPACTIONS((byte) -0x4F),
+
+ /**
+ * TTL of the FIFO compactions.
+ */
+ FIFO_TTL_COMPACTIONS((byte) -0x50),
+
+ /**
+ * Change temperature of the FIFO compactions.
+ */
+ FIFO_CHANGE_TEMPERATURE_COMPACTIONS((byte) -0x58),
+
+ /**
+ * Number of bytes prefetched during user initiated scan.
+ */
+ PREFETCH_BYTES((byte) -0x51),
+
+ /**
+ * Number of prefetched bytes that were actually useful during user initiated scan.
+ */
+ PREFETCH_BYTES_USEFUL((byte) -0x52),
+
+ /**
+ * Number of FS reads avoided due to prefetching during user initiated scan.
+ */
+ PREFETCH_HITS((byte) -0x53),
+
+ /**
+ * Footer corruption detected when opening an SST file for reading.
+ */
+ SST_FOOTER_CORRUPTION_COUNT((byte) -0x55),
+
+ /**
+ * Counters for file read retries with the verify_and_reconstruct_read file system option after
+ * detecting a checksum mismatch.
+ */
+ FILE_READ_CORRUPTION_RETRY_COUNT((byte) -0x56),
+
+ /**
+ * Counters for file read retries with the verify_and_reconstruct_read file system option after
+ * detecting a checksum mismatch.
+ */
+ FILE_READ_CORRUPTION_RETRY_SUCCESS_COUNT((byte) -0x57),
+
+ /**
+ * Counter for the number of times a WBWI is ingested into the DB. This
+ * happens when IngestWriteBatchWithIndex() is used and when large
+ * transaction optimization is enabled through
+ * TransactionOptions::large_txn_commit_optimize_threshold.
+ */
+ NUMBER_WBWI_INGEST((byte) -0x5D),
+
+ /**
+ * Failure to load the UDI during SST table open.
+ */
+ SST_USER_DEFINED_INDEX_LOAD_FAIL_COUNT((byte) -0x5E),
+
+ /**
+ * Bytes of output files successfully resumed during remote compaction.
+ */
+ REMOTE_COMPACT_RESUMED_BYTES((byte) -0x5F),
+
+ /**
+ * MultiScan statistics
+ */
+
+ /**
+ * Number of calls to Iterator::Prepare() for multi-scan.
+ */
+ MULTISCAN_PREPARE_CALLS((byte) -0x60),
+
+ /**
+ * Number of errors during Iterator::Prepare() for multi-scan.
+ */
+ MULTISCAN_PREPARE_ERRORS((byte) -0x61),
+
+ /**
+ * Number of data blocks prefetched during multi-scan Prepare().
+ */
+ MULTISCAN_BLOCKS_PREFETCHED((byte) -0x62),
+
+ /**
+ * Number of data blocks found in cache during multi-scan Prepare().
+ */
+ MULTISCAN_BLOCKS_FROM_CACHE((byte) -0x63),
+
+ /**
+ * Total bytes prefetched during multi-scan Prepare().
+ */
+ MULTISCAN_PREFETCH_BYTES((byte) -0x64),
+
+ /**
+ * Number of prefetched blocks that were never accessed (wasted).
+ */
+ MULTISCAN_PREFETCH_BLOCKS_WASTED((byte) -0x65),
+
+ /**
+ * Number of I/O requests issued during multi-scan Prepare().
+ */
+ MULTISCAN_IO_REQUESTS((byte) -0x66),
+
+ /**
+ * Number of non-adjacent blocks coalesced into single I/O request.
+ */
+ MULTISCAN_IO_COALESCED_NONADJACENT((byte) -0x67),
+
+ /**
+ * Number of seek errors during multi-scan iteration.
+ */
+ MULTISCAN_SEEK_ERRORS((byte) -0x68),
+
+ /**
+ * Maximum number of ticker types.
+ */
+ TICKER_ENUM_MAX((byte) -0x54);
+
+ private final byte value;
+
+ TickerType(final byte value) {
+ this.value = value;
+ }
/**
* Returns the byte value of the enumerations value
diff --git a/java/src/main/java/org/rocksdb/TraceOptions.java b/java/src/main/java/org/rocksdb/TraceOptions.java
index cf5f7bbe12f8..85c8abcc2500 100644
--- a/java/src/main/java/org/rocksdb/TraceOptions.java
+++ b/java/src/main/java/org/rocksdb/TraceOptions.java
@@ -12,10 +12,18 @@
public class TraceOptions {
private final long maxTraceFileSize;
+ /**
+ * Constructs a TraceOptions.
+ */
public TraceOptions() {
this.maxTraceFileSize = 64L * 1024L * 1024L * 1024L; // 64 GB
}
+ /**
+ * Constructs a TraceOptions.
+ *
+ * @param maxTraceFileSize the maximum size of the trace file.
+ */
public TraceOptions(final long maxTraceFileSize) {
this.maxTraceFileSize = maxTraceFileSize;
}
diff --git a/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java
index ee8656460835..827d2e6aa8e6 100644
--- a/java/src/main/java/org/rocksdb/Transaction.java
+++ b/java/src/main/java/org/rocksdb/Transaction.java
@@ -184,7 +184,9 @@ public void clearSnapshot() {
}
/**
- * Prepare the current transaction for 2PC
+ * Prepare the current transaction for 2PC.
+ *
+ * @throws RocksDBException if the transaction cannot be prepared
*/
public void prepare() throws RocksDBException {
//TODO(AR) consider a Java'ish version of this function, which returns an AutoCloseable (commit)
@@ -257,7 +259,7 @@ public void rollbackToSavePoint() throws RocksDBException {
/**
* This function has an inconsistent parameter order compared to other {@code get()}
* methods and is deprecated in favour of one with a consistent order.
- *
+ *
* This function is similar to
* {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])} except it will
* also read pending changes in this transaction.
@@ -297,11 +299,11 @@ public byte[] get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions
* also read pending changes in this transaction.
* Currently, this function will return Status::MergeInProgress if the most
* recent write to the queried key in this batch is a Merge.
- *
+ *
* If {@link ReadOptions#snapshot()} is not set, the current version of the
* key will be read. Calling {@link #setSnapshot()} does not affect the
* version of the data returned.
- *
+ *
* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect
* what is read from the DB but will NOT change which keys are read from this
* transaction (the keys in this transaction do not yet belong to any snapshot
@@ -560,7 +562,7 @@ public byte[][] multiGet(final ReadOptions readOptions,
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param keys of keys for which values need to be retrieved.
*
- * @return Array of values, one for each key
+ * @return list of values, one for each key
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
@@ -646,7 +648,7 @@ public byte[][] multiGet(final ReadOptions readOptions, final byte[][] keys)
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param keys of keys for which values need to be retrieved.
*
- * @return Array of values, one for each key
+ * @return list of values, one for each key
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
@@ -1189,7 +1191,6 @@ public GetStatus getForUpdate(final ReadOptions readOptions,
/**
* A multi-key version of
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}.
- *
*
* @param readOptions Read options.
* @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle}
@@ -1225,14 +1226,13 @@ public byte[][] multiGetForUpdate(final ReadOptions readOptions,
/**
* A multi-key version of
* {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}.
- *
*
* @param readOptions Read options.
* @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle}
* instances
* @param keys the keys to retrieve the values for.
*
- * @return Array of values, one for each key
+ * @return list of values, one for each key
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
@@ -1261,7 +1261,6 @@ public List
*
* @param readOptions Read options.
* @param keys the keys to retrieve the values for.
@@ -1285,7 +1284,6 @@ public byte[][] multiGetForUpdate(final ReadOptions readOptions, final byte[][]
/**
* A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}.
- *
*
* @param readOptions Read options.
* @param keys the keys to retrieve the values for.
@@ -1332,7 +1330,7 @@ public RocksIterator getIterator() {
* Returns an iterator that will iterate on all keys in the default
* column family including both keys in the DB and uncommitted keys in this
* transaction.
- *
+ *
* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read
* from the DB but will NOT change which keys are read from this transaction
* (the keys in this transaction do not yet belong to any snapshot and will be
@@ -1555,10 +1553,10 @@ public void put(final ColumnFamilyHandle columnFamilyHandle,
/**
* Similar to {@link RocksDB#put(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1593,10 +1591,10 @@ public void put(final ByteBuffer key, final ByteBuffer value) throws RocksDBExce
/**
* Similar to {@link RocksDB#put(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1635,6 +1633,29 @@ public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer ke
key.position(key.limit());
value.position(value.limit());
}
+
+ /**
+ * Similar to {@link RocksDB#put(byte[], byte[])}, but
+ * will also perform conflict checking on the keys be written.
+ *
+ * If this Transaction was created on an {@link OptimisticTransactionDB},
+ * these functions should always succeed.
+ *
+ * If this Transaction was created on a {@link TransactionDB}, an
+ * {@link RocksDBException} may be thrown with an accompanying {@link Status}
+ * when:
+ * {@link Status.Code#Busy} if there is a write conflict,
+ * {@link Status.Code#TimedOut} if a lock could not be acquired,
+ * {@link Status.Code#TryAgain} if the memtable history size is not large
+ * enough.
+ *
+ * @param columnFamilyHandle The column family to put the key/value into
+ * @param key the specified key to be inserted.
+ * @param value the value associated with the specified key.
+ *
+ * @throws RocksDBException when one of the TransactionalDB conditions
+ * described above occurs, or in the case of an unexpected error
+ */
public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key,
final ByteBuffer value) throws RocksDBException {
put(columnFamilyHandle, key, value, false);
@@ -1755,10 +1776,10 @@ public void merge(final byte[] key, final byte[] value)
/**
* Similar to {@link RocksDB#merge(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1791,10 +1812,10 @@ public void merge(final ByteBuffer key, final ByteBuffer value) throws RocksDBEx
/**
* Similar to {@link RocksDB#merge(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -1833,10 +1854,10 @@ public void merge(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer
/**
* Similar to {@link RocksDB#merge(byte[], byte[])}, but
* will also perform conflict checking on the keys be written.
- *
+ *
* If this Transaction was created on an {@link OptimisticTransactionDB},
* these functions should always succeed.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, an
* {@link RocksDBException} may be thrown with an accompanying {@link Status}
* when:
@@ -2283,10 +2304,10 @@ public void mergeUntracked(final ColumnFamilyHandle columnFamilyHandle,
* Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])},
* but operates on the transactions write batch. This write will only happen
* if this transaction gets committed successfully.
- *
+ *
* Unlike {@link #merge(ColumnFamilyHandle, byte[], byte[])} no conflict
* checking will be performed for this key.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, this function
* will still acquire locks necessary to make sure this write doesn't cause
* conflicts in other transactions; This may cause a {@link RocksDBException}
@@ -2346,10 +2367,10 @@ public void mergeUntracked(final byte[] key, final byte[] value)
* Similar to {@link RocksDB#merge(byte[], byte[])},
* but operates on the transactions write batch. This write will only happen
* if this transaction gets committed successfully.
- *
+ *
* Unlike {@link #merge(byte[], byte[])} no conflict
* checking will be performed for this key.
- *
+ *
* If this Transaction was created on a {@link TransactionDB}, this function
* will still acquire locks necessary to make sure this write doesn't cause
* conflicts in other transactions; This may cause a {@link RocksDBException}
@@ -2792,21 +2813,58 @@ public long getId() {
return getId(nativeHandle_);
}
+ /**
+ * States of a Transaction.
+ */
public enum TransactionState {
+ /**
+ * Transaction started.
+ */
STARTED((byte)0),
+
+ /**
+ * Transaction is awaiting prepare.
+ */
AWAITING_PREPARE((byte)1),
+
+ /**
+ * Transaction is prepared.
+ */
PREPARED((byte)2),
+
+ /**
+ * Transaction awaiting commit.
+ */
AWAITING_COMMIT((byte)3),
+
+ /**
+ * Transaction is committed.
+ */
COMMITTED((byte)4),
+
+ /**
+ * Transaction is awaiting rollback.
+ */
AWAITING_ROLLBACK((byte)5),
+
+ /**
+ * Transaction rolled-back.
+ */
ROLLEDBACK((byte)6),
+
+ /**
+ * Transaction locks have been stolen.
+ */
LOCKS_STOLEN((byte)7);
- /*
- * Keep old misspelled variable as alias
- * Tip from https://stackoverflow.com/a/37092410/454544
+ /**
+ * Old misspelled variable as alias for {@link #COMMITTED}.
+ * Tip from https://stackoverflow.com/a/37092410/454544
+ *
+ * @deprecated use {@link #COMMITTED} instead.
*/
- public static final TransactionState COMMITED = COMMITTED;
+ @Deprecated public static final TransactionState COMMITED = COMMITTED;
private final byte value;
@@ -2850,6 +2908,9 @@ private WaitingTransactions newWaitingTransactions(
return new WaitingTransactions(columnFamilyId, key, transactionIds);
}
+ /**
+ * Waiting Transactions.
+ */
public static class WaitingTransactions {
private final long columnFamilyId;
private final String key;
diff --git a/java/src/main/java/org/rocksdb/TransactionDB.java b/java/src/main/java/org/rocksdb/TransactionDB.java
index 0f75e5f97019..cff970f6eef9 100644
--- a/java/src/main/java/org/rocksdb/TransactionDB.java
+++ b/java/src/main/java/org/rocksdb/TransactionDB.java
@@ -218,6 +218,14 @@ public Transaction beginTransaction(final WriteOptions writeOptions,
return oldTransaction;
}
+ /**
+ * Gets a transaction by name.
+ *
+ * @param transactionName the name of the transaction.
+ *
+ * @return the transaction, or null if the transaction can't be found.
+ *
+ */
public Transaction getTransactionByName(final String transactionName) {
final long jtxnHandle = getTransactionByName(nativeHandle_, transactionName);
if(jtxnHandle == 0) {
@@ -232,6 +240,11 @@ public Transaction getTransactionByName(final String transactionName) {
return txn;
}
+ /**
+ * Gets a list of all prepared transactions.
+ *
+ * @return the list of prepared transactions.
+ */
public List
- * If 0, no waiting is done if a lock cannot instantly be acquired.
- * If negative, there is no timeout and will block indefinitely when acquiring
- * a lock.
+ * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])},
+ * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(WriteOptions, byte[])} or {@link
+ * RocksDB#write(WriteOptions, WriteBatch)} directly). If 0, no waiting is done if a lock
+ * cannot instantly be acquired. If negative, there is no timeout and will block indefinitely when
+ * acquiring a lock.
*
- * @return the timeout in milliseconds when writing a key OUTSIDE of a
- * transaction
+ * @return the timeout in milliseconds when writing a key outside of the transaction
*/
public long getDefaultLockTimeout() {
assert(isOwningHandle());
@@ -128,24 +130,17 @@ public long getDefaultLockTimeout() {
/**
* If positive, specifies the wait timeout in milliseconds when writing a key
- * OUTSIDE of a transaction (ie by calling {@link RocksDB#put},
- * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write}
- * directly).
- *
- * If 0, no waiting is done if a lock cannot instantly be acquired.
- * If negative, there is no timeout and will block indefinitely when acquiring
- * a lock.
- *
- * Not using a timeout can lead to deadlocks. Currently, there
- * is no deadlock-detection to recover from a deadlock. While DB writes
- * cannot deadlock with other DB writes, they can deadlock with a transaction.
- * A negative timeout should only be used if all transactions have a small
- * expiration set.
- *
- * Default: 1000
+ * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])},
+ * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(byte[])} or {@link
+ * RocksDB#write(WriteOptions, WriteBatch)} directly). If 0, no waiting is done if a lock
+ * cannot instantly be acquired. If negative, there is no timeout and will block indefinitely when
+ * acquiring a lock. Not using a timeout can lead to deadlocks. Currently, there is no
+ * deadlock-detection to recover from a deadlock. While DB writes cannot deadlock with other DB
+ * writes, they can deadlock with a transaction. A negative timeout should only be used if all
+ * transactions have a small expiration set. Default: 1000
*
* @param defaultLockTimeout the timeout in milliseconds when writing a key
- * OUTSIDE of a transaction
+ * outside of the transaction
* @return this TransactionDBOptions instance
*/
public TransactionDBOptions setDefaultLockTimeout(final long defaultLockTimeout) {
diff --git a/java/src/main/java/org/rocksdb/TransactionOptions.java b/java/src/main/java/org/rocksdb/TransactionOptions.java
index d2efeb87ce4a..924e931e78f8 100644
--- a/java/src/main/java/org/rocksdb/TransactionOptions.java
+++ b/java/src/main/java/org/rocksdb/TransactionOptions.java
@@ -5,9 +5,14 @@
package org.rocksdb;
+/**
+ * Options for a Transaction.
+ */
public class TransactionOptions extends RocksObject
implements TransactionalOptions
* If 0, no waiting is done if a lock cannot instantly be acquired.
- * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)}
+ * If negative, {@link TransactionDBOptions#getTransactionLockTimeout()}
* will be used
*
* @return the lock timeout in milliseconds
@@ -71,7 +76,7 @@ public long getLockTimeout() {
* a transaction attempts to lock a key.
*
* If 0, no waiting is done if a lock cannot instantly be acquired.
- * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)}
+ * If negative, {@link TransactionDBOptions#getTransactionLockTimeout()}
* will be used
*
* Default: -1
diff --git a/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/java/src/main/java/org/rocksdb/UInt64AddOperator.java
index 536ba58d8352..f43903706930 100644
--- a/java/src/main/java/org/rocksdb/UInt64AddOperator.java
+++ b/java/src/main/java/org/rocksdb/UInt64AddOperator.java
@@ -10,9 +10,12 @@
* integer value.
*/
public class UInt64AddOperator extends MergeOperator {
- public UInt64AddOperator() {
- super(newSharedUInt64AddOperator());
- }
+ /**
+ * Constructs a UInt64AddOperator.
+ */
+ public UInt64AddOperator() {
+ super(newSharedUInt64AddOperator());
+ }
private static native long newSharedUInt64AddOperator();
@Override
diff --git a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java
index d87efb1b7fbf..428727c80e8c 100644
--- a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java
+++ b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java
@@ -5,6 +5,9 @@
* The config for vector memtable representation.
*/
public class VectorMemTableConfig extends MemTableConfig {
+ /**
+ * The default reserved size for the Vector Mem Table.
+ */
public static final int DEFAULT_RESERVED_SIZE = 0;
/**
diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
index 5f7b7b8a1d70..bdfdae31ebf4 100644
--- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
+++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java
@@ -7,10 +7,20 @@
import java.nio.ByteBuffer;
+/**
+ * Iterator over the contents of a Write Batch With Index.
+ */
public class WBWIRocksIterator
extends AbstractRocksIterator
+ * Similar to memcmp.c.
*
* @param x the first value to compare with
* @param y the second value to compare against
diff --git a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java
index 202241d3bad9..acb76c255b9e 100644
--- a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java
+++ b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java
@@ -22,7 +22,11 @@
* {@link org.rocksdb.BuiltinComparator#BYTEWISE_COMPARATOR}
*/
public final class BytewiseComparator extends AbstractComparator {
-
+ /**
+ * Constructs a new BytewiseComparator.
+ *
+ * @param copt the configuration options for the comparator.
+ */
public BytewiseComparator(final ComparatorOptions copt) {
super(copt);
}
@@ -46,7 +50,7 @@ static int _compare(final ByteBuffer a, final ByteBuffer b) {
if (a.remaining() < b.remaining()) {
r = -1;
} else if (a.remaining() > b.remaining()) {
- r = +1;
+ r = 1;
}
}
return r;
diff --git a/java/src/main/java/org/rocksdb/util/Environment.java b/java/src/main/java/org/rocksdb/util/Environment.java
index 78b73dc5d432..f1bbcfe927e0 100644
--- a/java/src/main/java/org/rocksdb/util/Environment.java
+++ b/java/src/main/java/org/rocksdb/util/Environment.java
@@ -5,6 +5,9 @@
import java.io.IOException;
import java.util.Locale;
+/**
+ * Provides information about the environment in which RocksJava is executing.
+ */
public class Environment {
@SuppressWarnings("FieldMayBeFinal")
private static String OS = System.getProperty("os.name").toLowerCase(Locale.getDefault());
@@ -24,38 +27,83 @@ public class Environment {
*/
private static Boolean MUSL_LIBC = null;
+ /**
+ * Returns true if the CPU architecture is aarch64.
+ *
+ * @return true if the CPU architecture is aarch64, false otherwise.
+ */
public static boolean isAarch64() {
return ARCH.contains("aarch64");
}
+ /**
+ * Returns true if the CPU architecture is ppc.
+ *
+ * @return true if the CPU architecture is ppc, false otherwise.
+ */
public static boolean isPowerPC() {
return ARCH.contains("ppc");
}
+ /**
+ * Returns true if the CPU architecture is s390x.
+ *
+ * @return true if the CPU architecture is s390x, false otherwise.
+ */
public static boolean isS390x() {
return ARCH.contains("s390x");
}
+ /**
+ * Returns true if the CPU architecture is riscv64.
+ *
+ * @return true if the CPU architecture is riscv64, false otherwise.
+ */
public static boolean isRiscv64() {
return ARCH.contains("riscv64");
}
+ /**
+ * Returns true if the OS is Windows.
+ *
+ * @return true if the OS is Windows, false otherwise.
+ */
public static boolean isWindows() {
return (OS.contains("win"));
}
+ /**
+ * Returns true if the OS is FreeBSD.
+ *
+ * @return true if the OS is FreeBSD, false otherwise.
+ */
public static boolean isFreeBSD() {
return (OS.contains("freebsd"));
}
+ /**
+ * Returns true if the OS is Mac.
+ *
+ * @return true if the OS is Mac, false otherwise.
+ */
public static boolean isMac() {
return (OS.contains("mac"));
}
+ /**
+ * Returns true if the OS is AIX.
+ *
+ * @return true if the OS is AIX, false otherwise.
+ */
public static boolean isAix() {
return OS.contains("aix");
}
-
+
+ /**
+ * Returns true if the OS is Unix.
+ *
+ * @return true if the OS is Unix, false otherwise.
+ */
public static boolean isUnix() {
return OS.contains("nix") ||
OS.contains("nux");
@@ -75,9 +123,9 @@ public static boolean isMuslLibc() {
/**
* Determine if the environment has a musl libc.
- *
+ *
* The initialisation counterpart of {@link #isMuslLibc()}.
- *
+ *
* Intentionally package-private for testing.
*
* @return true if the environment has a musl libc, false otherwise.
@@ -136,14 +184,29 @@ static boolean initIsMuslLibc() {
return false;
}
+ /**
+ * Returns true if the OS is Solaris.
+ *
+ * @return true if the OS is Solaris, false otherwise.
+ */
public static boolean isSolaris() {
return OS.contains("sunos");
}
+ /**
+ * Returns true if the OS is OpenBSD.
+ *
+ * @return true if the OS is OpenBSD, false otherwise.
+ */
public static boolean isOpenBSD() {
return (OS.contains("openbsd"));
}
+ /**
+ * Returns true if the system architecture is 64 bit.
+ *
+ * @return true if the system architecture is 64 bit, false otherwise.
+ */
public static boolean is64Bit() {
if (ARCH.contains(SPARCV9)) {
return true;
@@ -151,10 +214,24 @@ public static boolean is64Bit() {
return (ARCH.indexOf("64") > 0);
}
+ /**
+ * Get the name as that of a shared JNI library.
+ *
+ * @param name the name.
+ *
+ * @return the name of the shared JNI library.
+ */
public static String getSharedLibraryName(final String name) {
return name + "jni";
}
+ /**
+ * Get the filename as that of a shared JNI library.
+ *
+ * @param name the name.
+ *
+ * @return the filename of the shared JNI library.
+ */
public static String getSharedLibraryFileName(final String name) {
return appendLibOsSuffix("lib" + getSharedLibraryName(name), true);
}
@@ -181,6 +258,15 @@ private static String getLibcPostfix() {
return "-" + libcName;
}
+ /**
+ * Get the name as that of a JNI library.
+ *
+ * Deals with platform and architecture specific naming.
+ *
+ * @param name the name.
+ *
+ * @return the name of the JNI library.
+ */
public static String getJniLibraryName(final String name) {
if (isUnix()) {
final String arch = is64Bit() ? "64" : "32";
@@ -219,6 +305,15 @@ public static String getJniLibraryName(final String name) {
throw new UnsupportedOperationException(String.format("Cannot determine JNI library name for ARCH='%s' OS='%s' name='%s'", ARCH, OS, name));
}
+ /**
+ * Get a fallback name as that of a JNI library.
+ *
+ * Deals with platform and architecture specific naming.
+ *
+ * @param name the name.
+ *
+ * @return the fallback name of the JNI library.
+ */
public static /*@Nullable*/ String getFallbackJniLibraryName(final String name) {
if (isMac() && is64Bit()) {
return String.format("%sjni-osx", name);
@@ -226,10 +321,28 @@ public static String getJniLibraryName(final String name) {
return null;
}
+ /**
+ * Get the filename as that of a JNI library.
+ *
+ * Deals with platform and architecture specific naming.
+ *
+ * @param name the name.
+ *
+ * @return the filename of the JNI library.
+ */
public static String getJniLibraryFileName(final String name) {
return appendLibOsSuffix("lib" + getJniLibraryName(name), false);
}
+ /**
+ * Get the fallback filename as that of a JNI library.
+ *
+ * Deals with platform and architecture specific naming.
+ *
+ * @param name the name.
+ *
+ * @return the fallback filename of the JNI library.
+ */
public static /*@Nullable*/ String getFallbackJniLibraryFileName(final String name) {
final String fallbackJniLibraryName = getFallbackJniLibraryName(name);
if (fallbackJniLibraryName == null) {
@@ -249,6 +362,13 @@ private static String appendLibOsSuffix(final String libraryFileName, final bool
throw new UnsupportedOperationException();
}
+ /**
+ * Get the filename extension used for a JNI library.
+ *
+ * Deals with platform and architecture specific naming.
+ *
+ * @return the filename extension.
+ */
public static String getJniLibraryExtension() {
if (isWindows()) {
return ".dll";
diff --git a/java/src/main/java/org/rocksdb/util/IntComparator.java b/java/src/main/java/org/rocksdb/util/IntComparator.java
index 2caf0c601572..142e81c3a21f 100644
--- a/java/src/main/java/org/rocksdb/util/IntComparator.java
+++ b/java/src/main/java/org/rocksdb/util/IntComparator.java
@@ -13,20 +13,24 @@
/**
* This is a Java implementation of a Comparator for Java int
* keys.
- *
+ *
* This comparator assumes keys are (at least) four bytes, so
* the caller must guarantee that in accessing other APIs in
* combination with this comparator.
- *
+ *
* The performance of Comparators implemented in Java is always
* less than their C++ counterparts due to the bridging overhead,
* as such you likely don't want to use this apart from benchmarking
* or testing.
*/
public final class IntComparator extends AbstractComparator {
-
- public IntComparator(final ComparatorOptions copt) {
- super(copt);
+ /**
+ * Constructs an IntComparator.
+ *
+ * @param comparatorOptions the options for the comparator.
+ */
+ public IntComparator(final ComparatorOptions comparatorOptions) {
+ super(comparatorOptions);
}
@Override
diff --git a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
index 3d3c429416b0..4d0708ca077a 100644
--- a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
+++ b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java
@@ -23,7 +23,11 @@
* {@link BuiltinComparator#REVERSE_BYTEWISE_COMPARATOR}
*/
public final class ReverseBytewiseComparator extends AbstractComparator {
-
+ /**
+ * Constructs a ReverseBytewiseComparator.
+ *
+ * @param copt the comparator options.
+ */
public ReverseBytewiseComparator(final ComparatorOptions copt) {
super(copt);
}
diff --git a/java/src/main/java/org/rocksdb/util/SizeUnit.java b/java/src/main/java/org/rocksdb/util/SizeUnit.java
index 0f717e8d4540..a6205eba266c 100644
--- a/java/src/main/java/org/rocksdb/util/SizeUnit.java
+++ b/java/src/main/java/org/rocksdb/util/SizeUnit.java
@@ -5,12 +5,32 @@
package org.rocksdb.util;
+/**
+ * Simple factors of byte sizes.
+ */
public class SizeUnit {
+ /**
+ * 1 Kilobyte.
+ */
public static final long KB = 1024L;
+
+ /**
+ * 1 Megabyte.
+ */
public static final long MB = KB * KB;
+
+ /**
+ * 1 Gigabyte.
+ */
public static final long GB = KB * MB;
+
+ /**
+ * 1 Terabyte.
+ */
public static final long TB = KB * GB;
- public static final long PB = KB * TB;
- private SizeUnit() {}
+ /**
+ * 1 Petabyte.
+ */
+ public static final long PB = KB * TB;
}
diff --git a/java/src/main/java/org/rocksdb/util/StdErrLogger.java b/java/src/main/java/org/rocksdb/util/StdErrLogger.java
index 00b08d384522..656535c3ca83 100644
--- a/java/src/main/java/org/rocksdb/util/StdErrLogger.java
+++ b/java/src/main/java/org/rocksdb/util/StdErrLogger.java
@@ -47,6 +47,11 @@ public LoggerType getLoggerType() {
return LoggerType.STDERR_IMPLEMENTATION;
}
+ @Override
+ public long getNativeHandle() {
+ return nativeHandle_;
+ }
+
private static native long newStdErrLogger(
final byte logLevel, /* @Nullable */ final String logPrefix);
private static native void setInfoLogLevel(final long handle, final byte logLevel);
diff --git a/java/src/test/java/org/rocksdb/OptionsTest.java b/java/src/test/java/org/rocksdb/OptionsTest.java
index c78d0f76b3a4..d8e17e14ad56 100644
--- a/java/src/test/java/org/rocksdb/OptionsTest.java
+++ b/java/src/test/java/org/rocksdb/OptionsTest.java
@@ -1469,7 +1469,8 @@ public void onMemTableSealed(final MemTableInfo memTableInfo) {
public void tablePropertiesCollectorFactory() {
try (final Options options = new Options()) {
try (TablePropertiesCollectorFactory collectorFactory =
- TablePropertiesCollectorFactory.NewCompactOnDeletionCollectorFactory(10, 10, 1.0)) {
+ TablePropertiesCollectorFactory.createNewCompactOnDeletionCollectorFactory(
+ 10, 10, 1.0)) {
List
+ */
public abstract class AbstractWriteBatch extends RocksObject
implements WriteBatchInterface {
-
+ /**
+ * Construct an AbstractWriteBatch.
+ *
+ * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native
+ * RocksDB C++ Write Batch object.
+ */
protected AbstractWriteBatch(final long nativeHandle) {
super(nativeHandle);
}
diff --git a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
index 867f5ca959bd..200552634647 100644
--- a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
+++ b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
@@ -12,6 +12,8 @@
* mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface})
*
+ * batch.put("key", "v1");
+ * batch.remove("key");
+ * batch.put("key", "v2");
+ * batch.put("key", "v3");
+ *
- *
+ * Builtin RocksDB comparators.
*/
public enum BuiltinComparator {
- BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR
+ /**
+ * Sorts all keys in ascending byte wise.
+ */
+ BYTEWISE_COMPARATOR,
+
+ /**
+ * Sorts all keys in descending byte wise order.
+ */
+ REVERSE_BYTEWISE_COMPARATOR
}
diff --git a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java
index 4ab9e8475ce9..fead6b2c13b7 100644
--- a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java
+++ b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java
@@ -20,8 +20,19 @@
* {@link RocksDB#multiGetByteBuffers(ReadOptions, List, List, List)}
*/
public class ByteBufferGetStatus {
+ /**
+ * Status of the request to fetch into the buffer.
+ */
public final Status status;
+
+ /**
+ * Size of the data, which may be bigger than the buffer.
+ */
public final int requiredSize;
+
+ /**
+ * Buffer containing as much of the value as fits.
+ */
public final ByteBuffer value;
/**
diff --git a/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java
index 04bd3fcaa398..c1e4812031d2 100644
--- a/java/src/main/java/org/rocksdb/Cache.java
+++ b/java/src/main/java/org/rocksdb/Cache.java
@@ -5,8 +5,16 @@
package org.rocksdb;
-
+/**
+ * Base class for Cache implementations.
+ */
public abstract class Cache extends RocksObject {
+ /**
+ * Construct a Cache.
+ *
+ * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native
+ * RocksDB C++ cache object.
+ */
protected Cache(final long nativeHandle) {
super(nativeHandle);
}
diff --git a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
index 12854c5102be..c2705ed4e13e 100644
--- a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
+++ b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java
@@ -6,10 +6,25 @@
package org.rocksdb;
/**
- * Just a Java wrapper around CassandraCompactionFilter implemented in C++
+ * Just a Java wrapper around CassandraCompactionFilter implemented in C++.
+ *