diff --git a/.github/workflows/pr-jobs.yml b/.github/workflows/pr-jobs.yml index a3cfcdbce73e..20e2bc0c4916 100644 --- a/.github/workflows/pr-jobs.yml +++ b/.github/workflows/pr-jobs.yml @@ -444,7 +444,35 @@ jobs: - name: Build RocksDBJava Static Library # NOTE: replaced scl enable devtoolset-7 'make V=1 J=8 -j8 rocksdbjavastatic' run: make V=1 J=8 -j8 rocksdbjavastatic - # post-steps skipped because of compatibility issues with docker image + check-javadocs: + if: ${{ github.repository_owner == 'facebook' }} + runs-on: + labels: 4-core-ubuntu + container: + image: evolvedbinary/rocksjava:centos7_x64-be + options: --shm-size=16gb + steps: + # The docker image is based on such an old OS that it has a GLIBC + # incompatibility with actions/checkout and other actions. Thus we + # implement a manual checkout step. + - name: Checkout + env: + GH_TOKEN: ${{ github.token }} + run: | + chown `whoami` . || true + git clone --no-checkout https://oath2:$GH_TOKEN@github.com/${{ github.repository }}.git . + git -c protocol.version=2 fetch --update-head-ok --no-tags --prune --no-recurse-submodules --depth=1 origin +${{ github.sha }}:${{ github.ref }} + git checkout --progress --force ${{ github.ref }} + git log -1 --format='%H' + - uses: "./.github/actions/pre-steps" + - name: Set Java Environment + run: |- + echo "JAVA_HOME=${JAVA_HOME}" + which java && java -version + which javac && javac -version + - name: Check RocksDBJava JavaDocs + run: scl enable devtoolset-7 'pushd java; make V=1 J=8 -j8 javadocs' + # NOTE: post-steps skipped because of compatibility issues with docker image build-macos-java: if: ${{ github.repository_owner == 'facebook' }} runs-on: macos-15-xlarge diff --git a/include/rocksdb/utilities/table_properties_collectors.h b/include/rocksdb/utilities/table_properties_collectors.h index c8c8af1de6a8..3395a7fd3998 100644 --- a/include/rocksdb/utilities/table_properties_collectors.h +++ b/include/rocksdb/utilities/table_properties_collectors.h @@ -83,7 +83,7 @@ class CompactOnDeletionCollectorFactory }; // Creates a factory of a table property collector that marks a SST -// file as need-compaction when it observe at least "D" deletion +// file as need-compaction when it observes at least "D" deletion // entries in any "N" consecutive entries, or the ratio of tombstone // entries >= deletion_ratio. // diff --git a/java/Makefile b/java/Makefile index 5e00921c62b9..1ef540915c40 100644 --- a/java/Makefile +++ b/java/Makefile @@ -115,12 +115,11 @@ JAVA_TESTS = \ org.rocksdb.BuiltinComparatorTest\ org.rocksdb.ByteBufferUnsupportedOperationTest\ org.rocksdb.BytewiseComparatorRegressionTest\ - org.rocksdb.util.BytewiseComparatorTest\ - org.rocksdb.util.BytewiseComparatorIntTest\ org.rocksdb.CheckPointTest\ org.rocksdb.ClockCacheTest\ org.rocksdb.ColumnFamilyOptionsTest\ org.rocksdb.ColumnFamilyTest\ + org.rocksdb.CompactRangeOptionsTest\ org.rocksdb.CompactionFilterFactoryTest\ org.rocksdb.CompactionJobInfoTest\ org.rocksdb.CompactionJobStatsTest\ @@ -132,21 +131,21 @@ JAVA_TESTS = \ org.rocksdb.ComparatorOptionsTest\ org.rocksdb.CompressionOptionsTest\ org.rocksdb.CompressionTypesTest\ + org.rocksdb.ConcurrentTaskLimiterTest\ + org.rocksdb.DefaultEnvTest\ org.rocksdb.DBOptionsTest\ org.rocksdb.DirectSliceTest\ - org.rocksdb.util.EnvironmentTest\ org.rocksdb.EnvOptionsTest\ org.rocksdb.EventListenerTest\ - org.rocksdb.IngestExternalFileOptionsTest\ - org.rocksdb.util.IntComparatorTest\ - org.rocksdb.util.JNIComparatorTest\ org.rocksdb.FilterTest\ + org.rocksdb.FlushOptionsTest\ org.rocksdb.FlushTest\ + org.rocksdb.HyperClockCacheTest\ org.rocksdb.ImportColumnFamilyTest\ org.rocksdb.InfoLogLevelTest\ + org.rocksdb.IngestExternalFileOptionsTest\ org.rocksdb.KeyExistsTest \ org.rocksdb.KeyMayExistTest\ - org.rocksdb.ConcurrentTaskLimiterTest\ org.rocksdb.LoggerTest\ org.rocksdb.LRUCacheTest\ org.rocksdb.MemoryUtilTest\ @@ -154,10 +153,10 @@ JAVA_TESTS = \ org.rocksdb.MergeCFVariantsTest\ org.rocksdb.MergeTest\ org.rocksdb.MergeVariantsTest\ - org.rocksdb.MultiColumnRegressionTest \ + org.rocksdb.MixedOptionsTest\ + org.rocksdb.MultiColumnRegressionTest\ org.rocksdb.MultiGetManyKeysTest\ org.rocksdb.MultiGetTest\ - org.rocksdb.MixedOptionsTest\ org.rocksdb.MutableColumnFamilyOptionsTest\ org.rocksdb.MutableDBOptionsTest\ org.rocksdb.MutableOptionsGetSetTest \ @@ -166,48 +165,53 @@ JAVA_TESTS = \ org.rocksdb.OptimisticTransactionTest\ org.rocksdb.OptimisticTransactionDBTest\ org.rocksdb.OptimisticTransactionOptionsTest\ - org.rocksdb.OptionsUtilTest\ org.rocksdb.OptionsTest\ - org.rocksdb.PerfLevelTest \ + org.rocksdb.OptionsUtilTest\ org.rocksdb.PerfContextTest \ + org.rocksdb.PerfLevelTest \ + org.rocksdb.PlainTableConfigTest\ org.rocksdb.PutCFVariantsTest\ + org.rocksdb.PutMultiplePartsTest\ org.rocksdb.PutVariantsTest\ - org.rocksdb.PlainTableConfigTest\ org.rocksdb.RateLimiterTest\ org.rocksdb.ReadOnlyTest\ org.rocksdb.ReadOptionsTest\ - org.rocksdb.util.ReverseBytewiseComparatorIntTest\ - org.rocksdb.RocksDBTest\ org.rocksdb.RocksDBExceptionTest\ - org.rocksdb.DefaultEnvTest\ + org.rocksdb.RocksDBTest\ org.rocksdb.RocksIteratorTest\ org.rocksdb.RocksMemEnvTest\ - org.rocksdb.util.SizeUnitTest\ org.rocksdb.SecondaryDBTest\ org.rocksdb.SliceTest\ org.rocksdb.SnapshotTest\ org.rocksdb.SstFileManagerTest\ - org.rocksdb.SstFileWriterTest\ org.rocksdb.SstFileReaderTest\ + org.rocksdb.SstFileWriterTest\ org.rocksdb.SstPartitionerTest\ + org.rocksdb.StatisticsCollectorTest\ + org.rocksdb.StatisticsTest\ org.rocksdb.TableFilterTest\ org.rocksdb.TimedEnvTest\ - org.rocksdb.TransactionTest\ - org.rocksdb.TransactionDBTest\ - org.rocksdb.TransactionOptionsTest\ org.rocksdb.TransactionDBOptionsTest\ + org.rocksdb.TransactionDBTest\ org.rocksdb.TransactionLogIteratorTest\ + org.rocksdb.TransactionOptionsTest\ + org.rocksdb.TransactionTest\ org.rocksdb.TtlDBTest\ - org.rocksdb.StatisticsTest\ - org.rocksdb.StatisticsCollectorTest\ org.rocksdb.VerifyChecksumsTest\ org.rocksdb.WalFilterTest\ org.rocksdb.WALRecoveryModeTest\ org.rocksdb.WriteBatchHandlerTest\ org.rocksdb.WriteBatchTest\ org.rocksdb.WriteBatchThreadedTest\ - org.rocksdb.WriteOptionsTest\ org.rocksdb.WriteBatchWithIndexTest\ + org.rocksdb.WriteOptionsTest\ + org.rocksdb.util.BytewiseComparatorIntTest\ + org.rocksdb.util.BytewiseComparatorTest\ + org.rocksdb.util.EnvironmentTest\ + org.rocksdb.util.IntComparatorTest\ + org.rocksdb.util.JNIComparatorTest\ + org.rocksdb.util.ReverseBytewiseComparatorIntTest\ + org.rocksdb.util.SizeUnitTest\ org.rocksdb.util.StdErrLoggerTest MAIN_SRC = src/main/java @@ -343,7 +347,7 @@ clean-downloaded: javadocs: java $(AM_V_GEN)mkdir -p $(JAVADOC) - $(AM_V_at)$(JAVADOC_CMD) -d $(JAVADOC) -sourcepath $(MAIN_SRC) -subpackages org + $(AM_V_at)$(JAVADOC_CMD) -Xwerror -d $(JAVADOC) -sourcepath $(MAIN_SRC) -subpackages org javalib: java java_test javadocs diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index fd7eef4d4cfb..45ad20e2cad0 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -10,14 +10,24 @@ *

* At present, we just permit an overriding Java class to wrap a C++ * implementation + * + * @param the concrete type of the {@link AbstractSlice} that the Compaction Filter uses. */ public abstract class AbstractCompactionFilter> extends RocksObject { - + /** + * Context of the Compaction Filter. + */ public static class Context { private final boolean fullCompaction; private final boolean manualCompaction; + /** + * Context constructor. + * + * @param fullCompaction true to filter full compaction, false otherwise. + * @param manualCompaction true to filter manual compaction, false otherwise. + */ public Context(final boolean fullCompaction, final boolean manualCompaction) { this.fullCompaction = fullCompaction; this.manualCompaction = manualCompaction; @@ -43,6 +53,13 @@ public boolean isManualCompaction() { } } + /** + * Constructor to be called by subclasses to set the + * handle to the underlying C++ object. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ Compaction Filter. + */ protected AbstractCompactionFilter(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java index c10fb8a2a9bb..2c6fc0f68a48 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java @@ -13,7 +13,9 @@ */ public abstract class AbstractCompactionFilterFactory> extends RocksCallbackObject { - + /** + * Constructs a new Compaction Filter Factory which has no underlying C++ object. + */ public AbstractCompactionFilterFactory() { super(0L); } @@ -26,8 +28,8 @@ protected long initializeNative(final long... nativeParameterHandles) { /** * Called from JNI, see compaction_filter_factory_jnicallback.cc * - * @param fullCompaction {@link AbstractCompactionFilter.Context#fullCompaction} - * @param manualCompaction {@link AbstractCompactionFilter.Context#manualCompaction} + * @param fullCompaction {@link AbstractCompactionFilter.Context#isFullCompaction()} + * @param manualCompaction {@link AbstractCompactionFilter.Context#isManualCompaction()} * * @return native handle of the CompactionFilter */ diff --git a/java/src/main/java/org/rocksdb/AbstractComparator.java b/java/src/main/java/org/rocksdb/AbstractComparator.java index 5cb33c812d1f..2c6ad2df4cf6 100644 --- a/java/src/main/java/org/rocksdb/AbstractComparator.java +++ b/java/src/main/java/org/rocksdb/AbstractComparator.java @@ -10,7 +10,7 @@ /** * Comparators are used by RocksDB to determine * the ordering of keys. - * + *

* Implementations of Comparators in Java should extend this class. */ public abstract class AbstractComparator @@ -20,6 +20,11 @@ public abstract class AbstractComparator super(); } + /** + * Construct an AbstractComparator. + * + * @param comparatorOptions options for the comparator. + */ protected AbstractComparator(final ComparatorOptions comparatorOptions) { super(comparatorOptions.nativeHandle_); } @@ -59,7 +64,7 @@ ComparatorType getComparatorType() { * Three-way key comparison. Implementations should provide a * total order * on keys that might be passed to it. - * + *

* The implementation may modify the {@code ByteBuffer}s passed in, though * it would be unconventional to modify the "limit" or any of the * underlying bytes. As a callback, RocksJava will ensure that {@code a} @@ -114,6 +119,11 @@ public void findShortSuccessor(final ByteBuffer key) { // no-op } + /** + * Returns true if we are using direct byte buffers. + * + * @return true if we are using direct byte buffers, false otherwise. + */ public final boolean usingDirectBuffers() { return usingDirectBuffers(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java index d0ceef93d419..9bd1ff7694bc 100644 --- a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java +++ b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java @@ -12,7 +12,7 @@ * it holds methods which are called * from C++ to interact with a Comparator * written in Java. - * + *

* Placing these bridge methods in this * class keeps the API of the * {@link org.rocksdb.AbstractComparator} clean. diff --git a/java/src/main/java/org/rocksdb/AbstractEventListener.java b/java/src/main/java/org/rocksdb/AbstractEventListener.java index c9371c45eb0c..5f29024bf063 100644 --- a/java/src/main/java/org/rocksdb/AbstractEventListener.java +++ b/java/src/main/java/org/rocksdb/AbstractEventListener.java @@ -12,28 +12,118 @@ */ @SuppressWarnings("PMD.AvoidDuplicateLiterals") public abstract class AbstractEventListener extends RocksCallbackObject implements EventListener { + /** + * Callback events that can be enabled. + */ public enum EnabledEventCallback { + /** + * Flush completed. + */ ON_FLUSH_COMPLETED((byte) 0x0), + + /** + * Flush beginning. + */ ON_FLUSH_BEGIN((byte) 0x1), + + /** + * Table file was deleted. + */ ON_TABLE_FILE_DELETED((byte) 0x2), + + /** + * Compaction beginning. + */ ON_COMPACTION_BEGIN((byte) 0x3), + + /** + * Compaction completed. + */ ON_COMPACTION_COMPLETED((byte) 0x4), + + /** + * Table file created. + */ ON_TABLE_FILE_CREATED((byte) 0x5), + + /** + * Started creation of Table file. + */ ON_TABLE_FILE_CREATION_STARTED((byte) 0x6), + + /** + * Memtable has been sealed. + */ ON_MEMTABLE_SEALED((byte) 0x7), + + /** + * Started deletion of Column Family handle. + */ ON_COLUMN_FAMILY_HANDLE_DELETION_STARTED((byte) 0x8), + + /** + * External file ingested. + */ ON_EXTERNAL_FILE_INGESTED((byte) 0x9), + + /** + * Background error. + */ ON_BACKGROUND_ERROR((byte) 0xA), + + /** + * Stall conditions have been changed. + */ ON_STALL_CONDITIONS_CHANGED((byte) 0xB), + + /** + * File read has finished. + */ ON_FILE_READ_FINISH((byte) 0xC), + + /** + * File write has finished. + */ ON_FILE_WRITE_FINISH((byte) 0xD), + + /** + * File flush has finished. + */ ON_FILE_FLUSH_FINISH((byte) 0xE), + + /** + * File sync has finished. + */ ON_FILE_SYNC_FINISH((byte) 0xF), + + /** + * Range file read sync finished. + */ ON_FILE_RANGE_SYNC_FINISH((byte) 0x10), + + /** + * File truncation has finished. + */ ON_FILE_TRUNCATE_FINISH((byte) 0x11), + + /** + * Closing a file has finished. + */ ON_FILE_CLOSE_FINISH((byte) 0x12), + + /** + * Flag has been set to be notified on file IO. + */ SHOULD_BE_NOTIFIED_ON_FILE_IO((byte) 0x13), + + /** + * Error recovery beginning. + */ ON_ERROR_RECOVERY_BEGIN((byte) 0x14), + + /** + * Error recovery completed. + */ ON_ERROR_RECOVERY_COMPLETED((byte) 0x15); private final byte value; diff --git a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java index 173d63e9011e..8c500d8a5df2 100644 --- a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java +++ b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java @@ -22,6 +22,11 @@ public abstract class AbstractImmutableNativeReference */ protected final AtomicBoolean owningHandle_; + /** + * Construct an AbstractImmutableNativeReference. + * + * @param owningHandle true if this Java object owns the underlying C++ object, false otherwise. + */ protected AbstractImmutableNativeReference(final boolean owningHandle) { this.owningHandle_ = new AtomicBoolean(owningHandle); } diff --git a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java index 577e89593eaa..7b3f5b39c2d4 100644 --- a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java +++ b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java @@ -9,12 +9,26 @@ * The constructor is protected, so it will always be used as a base class. */ public class AbstractMutableOptions { + /** + * Separator between Key/Value pairs. + */ protected static final String KEY_VALUE_PAIR_SEPARATOR = ";"; + + /** + * Separator between Key and Value. + */ protected static final char KEY_VALUE_SEPARATOR = '='; + + /** + * Separator between integers in an integer array. + */ static final String INT_ARRAY_INT_SEPARATOR = ":"; private static final String HAS_NOT_BEEN_SET = " has not been set"; + /** + * the keys. + */ protected final String[] keys; private final String[] values; @@ -62,12 +76,24 @@ public String toString() { return buffer.toString(); } + /** + * Builder base class for constructing Mutable Options. + * + * @param the type of the Mutable Options. + * @param the type of the Builder. + * @param the type of the Option Key. + */ public abstract static class AbstractMutableOptionsBuilder< T extends AbstractMutableOptions, U extends AbstractMutableOptionsBuilder, K extends MutableOptionKey> { private final Map> options = new LinkedHashMap<>(); private final List unknown = new ArrayList<>(); + /** + * Return the builder. + * + * @return the builder. + */ protected abstract U self(); /** @@ -87,6 +113,11 @@ public abstract static class AbstractMutableOptionsBuilder< */ protected abstract T build(final String[] keys, final String[] values); + /** + * Construct a subclass instance of {@link AbstractMutableOptions}. + * + * @return an instance of the options. + */ public T build() { final String[] keys = new String[options.size()]; final String[] values = new String[options.size()]; @@ -101,6 +132,14 @@ public T build() { return build(keys, values); } + /** + * Set an option of `Double` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setDouble( final K key, final double value) { if (key.getValueType() != MutableOptionKey.ValueType.DOUBLE) { @@ -111,6 +150,13 @@ protected U setDouble( return self(); } + /** + * Get an option of `Double` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected double getDouble(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -120,6 +166,14 @@ protected double getDouble(final K key) return value.asDouble(); } + /** + * Set an option of `Long` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setLong( final K key, final long value) { if(key.getValueType() != MutableOptionKey.ValueType.LONG) { @@ -130,6 +184,13 @@ protected U setLong( return self(); } + /** + * Get an option of `Long` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected long getLong(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -139,6 +200,14 @@ protected long getLong(final K key) return value.asLong(); } + /** + * Set an option of `int` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setInt( final K key, final int value) { if(key.getValueType() != MutableOptionKey.ValueType.INT) { @@ -149,6 +218,13 @@ protected U setInt( return self(); } + /** + * Get an option of `int` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected int getInt(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -158,6 +234,14 @@ protected int getInt(final K key) return value.asInt(); } + /** + * Set an option of `boolean` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setBoolean( final K key, final boolean value) { if(key.getValueType() != MutableOptionKey.ValueType.BOOLEAN) { @@ -168,6 +252,13 @@ protected U setBoolean( return self(); } + /** + * Get an option of `boolean` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected boolean getBoolean(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -177,6 +268,14 @@ protected boolean getBoolean(final K key) return value.asBoolean(); } + /** + * Set an option of `int[]` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setIntArray( final K key, final int[] value) { if(key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) { @@ -187,6 +286,13 @@ protected U setIntArray( return self(); } + /** + * Get an option of `int[]` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected int[] getIntArray(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -196,6 +302,14 @@ protected int[] getIntArray(final K key) return value.asIntArray(); } + /** + * Set an option of `String` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setString(final K key, final String value) { if (key.getValueType() != MutableOptionKey.ValueType.STRING) { throw new IllegalArgumentException(key + " does not accept a string value"); @@ -204,6 +318,13 @@ protected U setString(final K key, final String value) { return self(); } + /** + * Get an option of `String` type. + * + * @param key the key. + * + * @return the value. + */ protected String getString(final K key) { final MutableOptionValue value = options.get(key); if (value == null) { @@ -212,6 +333,16 @@ protected String getString(final K key) { return value.asString(); } + /** + * Set an option of `Enum[N]` type. + * + * @param the concrete type of the Enum. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected > U setEnum( final K key, final N value) { if(key.getValueType() != MutableOptionKey.ValueType.ENUM) { @@ -222,6 +353,15 @@ protected > U setEnum( return self(); } + /** + * Get an option of `Enum[N]` type. + * + * @param the concrete type of the Enum. + * + * @param key the key. + * + * @return the value of the option. + */ @SuppressWarnings("unchecked") protected > N getEnum(final K key) throws NoSuchElementException, NumberFormatException { diff --git a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java index b7af848f0c5d..556e47226074 100644 --- a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java +++ b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java @@ -25,6 +25,13 @@ public abstract class AbstractRocksIterator

extends RocksObject implements RocksIteratorInterface { final P parent_; + /** + * Constructs an AbstractRocksIterator. + * + * @param parent the parent object from which the Rocks Iterator was created. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ RocksIterator. + */ protected AbstractRocksIterator(final P parent, final long nativeHandle) { super(nativeHandle); @@ -111,7 +118,7 @@ public void refresh() throws RocksDBException { @Override public void refresh(final Snapshot snapshot) throws RocksDBException { assert (isOwningHandle()); - refresh1(nativeHandle_, snapshot.getNativeHandle()); + refresh1(nativeHandle_, snapshot.nativeHandle_); } @Override diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java index a73d9c644f17..0d00a056a2da 100644 --- a/java/src/main/java/org/rocksdb/AbstractSlice.java +++ b/java/src/main/java/org/rocksdb/AbstractSlice.java @@ -23,13 +23,23 @@ * the Java @see org.rocksdb.AbstractComparator subclass, it disposes the * C++ BaseComparatorJniCallback subclass, which in turn destroys the * Java @see org.rocksdb.AbstractSlice subclass Objects. + * + * @param the concrete Java type that is wrapped by the subclass of {@link AbstractSlice}. */ public abstract class AbstractSlice extends RocksMutableObject { - + /** + * Constructs an AbstractSlice. + */ protected AbstractSlice() { super(); } + /** + * Constructs an AbstractSlice. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ Slice. + */ protected AbstractSlice(final long nativeHandle) { super(nativeHandle); } @@ -174,6 +184,13 @@ public boolean startsWith(final AbstractSlice prefix) { } } + /** + * Constructs a new Slice from a String. + * + * @param str the string. + * + * @return the handle to the native C++ Slice object. + */ protected static native long createNewSliceFromString(final String str); private static native int size0(long handle); private static native boolean empty0(long handle); @@ -186,6 +203,8 @@ public boolean startsWith(final AbstractSlice prefix) { * Note that this function should be called only after all * RocksDB instances referencing the slice are closed. * Otherwise, an undefined behavior will occur. + * + * @param handle the value of the C++ pointer to the underlying native C++ object. */ @Override protected final void disposeInternal(final long handle) { diff --git a/java/src/main/java/org/rocksdb/AbstractTableFilter.java b/java/src/main/java/org/rocksdb/AbstractTableFilter.java index c696c3e1352e..b24ce8692dd4 100644 --- a/java/src/main/java/org/rocksdb/AbstractTableFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractTableFilter.java @@ -6,7 +6,9 @@ */ public abstract class AbstractTableFilter extends RocksCallbackObject implements TableFilter { - + /** + * Constructs a new AbstractTableFilter. + */ protected AbstractTableFilter() { super(); } diff --git a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java index 40caaa0854cc..35e69301e44c 100644 --- a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java +++ b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java @@ -11,7 +11,9 @@ */ public abstract class AbstractTransactionNotifier extends RocksCallbackObject { - + /** + * Constructs an AbstractTransactionNotifier. + */ protected AbstractTransactionNotifier() { super(); } @@ -50,6 +52,15 @@ protected long initializeNative(final long... nativeParameterHandles) { protected void disposeInternal() { disposeInternal(nativeHandle_); } + + /** + * Deletes underlying C++ transaction notifier pointer. + * Note that this function should be called only after all + * RocksDB instances referencing the transaction notifier are closed. + * Otherwise, an undefined behavior will occur. + * + * @param handle the value of the C++ pointer to the underlying native C++ object. + */ protected final void disposeInternal(final long handle) { disposeInternalJni(handle); } diff --git a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java index 41d967f53179..33ca8d23aa29 100644 --- a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java +++ b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -7,9 +7,27 @@ import java.nio.ByteBuffer; +/** + * WriteBatch holds a collection of updates to apply atomically to a DB. + *

+ * The updates are applied in the order in which they are added + * to the WriteBatch. For example, the value of "key" will be "v3" + * after the following batch is written: + *


+ *    batch.put("key", "v1");
+ *    batch.remove("key");
+ *    batch.put("key", "v2");
+ *    batch.put("key", "v3");
+ * 
+ */ public abstract class AbstractWriteBatch extends RocksObject implements WriteBatchInterface { - + /** + * Construct an AbstractWriteBatch. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ Write Batch object. + */ protected AbstractWriteBatch(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java index 867f5ca959bd..200552634647 100644 --- a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java @@ -12,6 +12,8 @@ * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface}) *

* Taken from include/rocksdb/advanced_options.h + * + * @param the concrete type of the Column Family Options. */ public interface AdvancedColumnFamilyOptionsInterface< T extends AdvancedColumnFamilyOptionsInterface & ColumnFamilyOptionsInterface> { @@ -375,7 +377,7 @@ T setCompactionOptionsFIFO( * even for key hit because they tell us whether to look in that level or go * to the higher level.

* - *

Default: false

+ *

Default: false

* * @param optimizeFiltersForHits boolean value indicating if this flag is set. * @return the reference to the current options. diff --git a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java index 44e61c6d743d..44ebf5b21c81 100644 --- a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java @@ -10,6 +10,8 @@ *

* Taken from include/rocksdb/advanced_options.h * and MutableCFOptions in util/cf_options.h + * + * @param the concrete type of the Column Family Options. */ public interface AdvancedMutableColumnFamilyOptionsInterface< T extends AdvancedMutableColumnFamilyOptionsInterface> { diff --git a/java/src/main/java/org/rocksdb/BackgroundErrorReason.java b/java/src/main/java/org/rocksdb/BackgroundErrorReason.java index eec593d35c54..1c68a7e38dc5 100644 --- a/java/src/main/java/org/rocksdb/BackgroundErrorReason.java +++ b/java/src/main/java/org/rocksdb/BackgroundErrorReason.java @@ -5,10 +5,28 @@ package org.rocksdb; +/** + * Reasons for the background error. + */ public enum BackgroundErrorReason { + /** + * Flush. + */ FLUSH((byte) 0x0), + + /** + * Compaction. + */ COMPACTION((byte) 0x1), + + /** + * Write callback. + */ WRITE_CALLBACK((byte) 0x2), + + /** + * Memtable. + */ MEMTABLE((byte) 0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java index 4ee675ad758e..b07afbf8fc36 100644 --- a/java/src/main/java/org/rocksdb/BackupEngine.java +++ b/java/src/main/java/org/rocksdb/BackupEngine.java @@ -18,7 +18,12 @@ * time you need to do a backup. */ public class BackupEngine extends RocksObject implements AutoCloseable { - + /** + * Construct a BackupEngine. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ backup engine object. + */ protected BackupEngine(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java index df21d774484d..cdc86392961e 100644 --- a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java +++ b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java @@ -11,6 +11,9 @@ */ // TODO(AR) should be renamed BlockBasedTableOptions public class BlockBasedTableConfig extends TableFormatConfig { + /** + * Constructs a new BlockBasedTableConfig. + */ @SuppressWarnings("PMD.NullAssignment") public BlockBasedTableConfig() { //TODO(AR) flushBlockPolicyFactory @@ -859,7 +862,7 @@ public IndexShorteningMode indexShortening() { /** * Set the index shortening mode. - * + *

* See {@link IndexShorteningMode}. * * @param indexShortening the index shortening mode. @@ -937,7 +940,7 @@ public BlockBasedTableConfig setCacheNumShardBits( * * @deprecated This option is now deprecated. No matter what value it * is set to, it will behave as - * if {@link #hashIndexAllowCollision()} == true. + * if {@code setHashIndexAllowCollision(true)} */ @Deprecated public boolean hashIndexAllowCollision() { diff --git a/java/src/main/java/org/rocksdb/BuiltinComparator.java b/java/src/main/java/org/rocksdb/BuiltinComparator.java index 2c89bf218d1d..f4806fe57d72 100644 --- a/java/src/main/java/org/rocksdb/BuiltinComparator.java +++ b/java/src/main/java/org/rocksdb/BuiltinComparator.java @@ -6,15 +6,16 @@ package org.rocksdb; /** - * Builtin RocksDB comparators - * - *

    - *
  1. BYTEWISE_COMPARATOR - Sorts all keys in ascending bytewise - * order.
  2. - *
  3. REVERSE_BYTEWISE_COMPARATOR - Sorts all keys in descending bytewise - * order
  4. - *
+ * Builtin RocksDB comparators. */ public enum BuiltinComparator { - BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR + /** + * Sorts all keys in ascending byte wise. + */ + BYTEWISE_COMPARATOR, + + /** + * Sorts all keys in descending byte wise order. + */ + REVERSE_BYTEWISE_COMPARATOR } diff --git a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java index 4ab9e8475ce9..fead6b2c13b7 100644 --- a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java +++ b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java @@ -20,8 +20,19 @@ * {@link RocksDB#multiGetByteBuffers(ReadOptions, List, List, List)} */ public class ByteBufferGetStatus { + /** + * Status of the request to fetch into the buffer. + */ public final Status status; + + /** + * Size of the data, which may be bigger than the buffer. + */ public final int requiredSize; + + /** + * Buffer containing as much of the value as fits. + */ public final ByteBuffer value; /** diff --git a/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java index 04bd3fcaa398..c1e4812031d2 100644 --- a/java/src/main/java/org/rocksdb/Cache.java +++ b/java/src/main/java/org/rocksdb/Cache.java @@ -5,8 +5,16 @@ package org.rocksdb; - +/** + * Base class for Cache implementations. + */ public abstract class Cache extends RocksObject { + /** + * Construct a Cache. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ cache object. + */ protected Cache(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java index 12854c5102be..c2705ed4e13e 100644 --- a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java @@ -6,10 +6,25 @@ package org.rocksdb; /** - * Just a Java wrapper around CassandraCompactionFilter implemented in C++ + * Just a Java wrapper around CassandraCompactionFilter implemented in C++. + *

+ * Compaction filter for removing expired Cassandra data with ttl. + * Is also in charge of removing tombstone that has been + * promoted to kValue type after serials of merging in compaction. */ public class CassandraCompactionFilter extends AbstractCompactionFilter { + /** + * Constructs a new CasandraCompactionFilter. + * + * @param purgeTtlOnExpiration if set to true, expired data will be directly purged, + * otherwise expired data will be converted to tombstones + * first and then be eventually removed after + * {@code gcGracePeriodInSeconds}. Should only be on in + * the case that all the writes have the same ttl setting, + * otherwise it could bring old data back. + * @param gcGracePeriodInSeconds the grace period in seconds for gc. + */ public CassandraCompactionFilter( final boolean purgeTtlOnExpiration, final int gcGracePeriodInSeconds) { super(createNewCassandraCompactionFilter0(purgeTtlOnExpiration, gcGracePeriodInSeconds)); diff --git a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java index cdb82ee43473..597a18efd9ae 100644 --- a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java +++ b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java @@ -6,14 +6,27 @@ package org.rocksdb; /** + * Just a Java wrapper around CassandraValueMergeOperator implemented in C++. + *

* CassandraValueMergeOperator is a merge operator that merges two cassandra wide column * values. */ public class CassandraValueMergeOperator extends MergeOperator { + /** + * Constructs a new CassandraValueMergeOperator. + * + * @param gcGracePeriodInSeconds the grace period in seconds for gc. + */ public CassandraValueMergeOperator(final int gcGracePeriodInSeconds) { super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, 0)); } + /** + * Constructs a new CassandraValueMergeOperator. + * + * @param gcGracePeriodInSeconds the grace period in seconds for gc. + * @param operandsLimit the maximum size of the operands list before merge is applied. + */ public CassandraValueMergeOperator(final int gcGracePeriodInSeconds, final int operandsLimit) { super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit)); } diff --git a/java/src/main/java/org/rocksdb/Checkpoint.java b/java/src/main/java/org/rocksdb/Checkpoint.java index e50068a6e32c..4cdfe0ff20e7 100644 --- a/java/src/main/java/org/rocksdb/Checkpoint.java +++ b/java/src/main/java/org/rocksdb/Checkpoint.java @@ -50,6 +50,22 @@ public void createCheckpoint(final String checkpointPath) createCheckpoint(nativeHandle_, checkpointPath); } + /** + * Exports all live SST files of a specified Column Family into {@code exportPath}. + *

+ * Always triggers a flush. + * + * @param columnFamilyHandle the column family to export. + * + * @param exportPath should not already exist and will be created by this API. + * SST files will be created as hard links when the directory specified + * is in the same partition as the db directory, copied otherwise. + * + * @return metadata about the exported SST files. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ public ExportImportFilesMetaData exportColumnFamily(final ColumnFamilyHandle columnFamilyHandle, final String exportPath) throws RocksDBException { return new ExportImportFilesMetaData( diff --git a/java/src/main/java/org/rocksdb/ChecksumType.java b/java/src/main/java/org/rocksdb/ChecksumType.java index 5b3d2249250f..556220f8baa5 100644 --- a/java/src/main/java/org/rocksdb/ChecksumType.java +++ b/java/src/main/java/org/rocksdb/ChecksumType.java @@ -14,18 +14,20 @@ public enum ChecksumType { */ kNoChecksum((byte) 0), /** - * CRC32 Checksum + * CRC32 Checksum. */ kCRC32c((byte) 1), /** - * XX Hash + * XX Hash. */ kxxHash((byte) 2), /** - * XX Hash 64 + * XX Hash 64. */ kxxHash64((byte) 3), - + /** + * XX Hash v3. + */ kXXH3((byte) 4); /** diff --git a/java/src/main/java/org/rocksdb/ClockCache.java b/java/src/main/java/org/rocksdb/ClockCache.java index afbd7f75532c..452ef33f7b2c 100644 --- a/java/src/main/java/org/rocksdb/ClockCache.java +++ b/java/src/main/java/org/rocksdb/ClockCache.java @@ -14,6 +14,7 @@ * configuration parameter that is not provided by this API. This function * simply returns a new LRUCache for functional compatibility. */ +@Deprecated public class ClockCache extends Cache { /** * Create a new cache with a fixed size capacity. @@ -22,6 +23,7 @@ public class ClockCache extends Cache { * * @param capacity The fixed size capacity of the cache */ + @Deprecated public ClockCache(final long capacity) { super(newClockCache(capacity, -1, false)); } @@ -39,6 +41,7 @@ public ClockCache(final long capacity) { * @param numShardBits The cache is sharded to 2^numShardBits shards, * by hash of the key */ + @Deprecated public ClockCache(final long capacity, final int numShardBits) { super(newClockCache(capacity, numShardBits, false)); } @@ -58,8 +61,9 @@ public ClockCache(final long capacity, final int numShardBits) { * by hash of the key * @param strictCapacityLimit insert to the cache will fail when cache is full */ - public ClockCache(final long capacity, final int numShardBits, - final boolean strictCapacityLimit) { + @Deprecated + public ClockCache( + final long capacity, final int numShardBits, final boolean strictCapacityLimit) { super(newClockCache(capacity, numShardBits, strictCapacityLimit)); } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java index 00bff0b07307..054d35adf23d 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -123,6 +123,11 @@ public int hashCode() { } } + /** + * Returns true if this is the handle for the default column family. + * + * @return true if this is the handle for the default column family, false otherwise. + */ protected boolean isDefaultColumnFamily() { return nativeHandle_ == rocksDB_.getDefaultColumnFamily().nativeHandle_; } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java index d25f8c73bc7b..aa7fe8f944ee 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -147,7 +147,7 @@ public ColumnFamilyOptions optimizeForSmallDb() { @Override public ColumnFamilyOptions optimizeForSmallDb(final Cache cache) { - optimizeForSmallDb(nativeHandle_, cache.getNativeHandle()); + optimizeForSmallDb(nativeHandle_, cache.nativeHandle_); return this; } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java index 4776773bd8bd..40c7c5806409 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -8,6 +8,11 @@ import java.util.Collection; import java.util.List; +/** + * Interface for Column Family Options. + * + * @param the concrete type of the ColumnFamilyOptions. + */ public interface ColumnFamilyOptionsInterface> extends AdvancedColumnFamilyOptionsInterface { /** diff --git a/java/src/main/java/org/rocksdb/CompactRangeOptions.java b/java/src/main/java/org/rocksdb/CompactRangeOptions.java index ba5fa6455d27..3df1939f9ee3 100644 --- a/java/src/main/java/org/rocksdb/CompactRangeOptions.java +++ b/java/src/main/java/org/rocksdb/CompactRangeOptions.java @@ -17,9 +17,11 @@ public class CompactRangeOptions extends RocksObject { private static final byte VALUE_kForce = 2; private static final byte VALUE_kForceOptimized = 3; - // For level based compaction, we can configure if we want to skip/force bottommost level - // compaction. The order of this enum MUST follow the C++ layer. See BottommostLevelCompaction in - // db/options.h + /** + * For level based compaction, we can configure if we want to skip/force bottommost level + * compaction. The order of this enum MUST follow the C++ layer. See BottommostLevelCompaction in + * db/options.h + */ public enum BottommostLevelCompaction { /** * Skip bottommost level compaction @@ -71,15 +73,34 @@ public static BottommostLevelCompaction fromRocksId(final int bottommostLevelCom } } + /** + * Timestamp. + */ public static class Timestamp { + /** + * the start. + */ public final long start; + + /** + * the range. + */ public final long range; - public Timestamp(final long start, final long duration) { + /** + * Constructs a Timestamp. + * + * @param start the start. + * @param range the range. + */ + public Timestamp(final long start, final long range) { this.start = start; - this.range = duration; + this.range = range; } + /** + * Constructs a Timestamp. + */ public Timestamp() { this.start = 0; this.range = 0; @@ -250,20 +271,44 @@ public CompactRangeOptions setMaxSubcompactions(final int maxSubcompactions) { return this; } + /** + * Set Full History Low Timestamp; + * + * @param tsLow low timestamp. + * + * @return This CompactRangeOptions. + */ public CompactRangeOptions setFullHistoryTSLow(final Timestamp tsLow) { setFullHistoryTSLow(nativeHandle_, tsLow.start, tsLow.range); return this; } + /** + * Get the Full History Low Timestamp; + * + * @return low timestamp. + */ public Timestamp fullHistoryTSLow() { return fullHistoryTSLow(nativeHandle_); } + /** + * Set canceled. + * + * @param canceled true to cancel, otherwise false. + * + * @return This CompactRangeOptions. + */ public CompactRangeOptions setCanceled(final boolean canceled) { setCanceled(nativeHandle_, canceled); return this; } + /** + * Get the canceled status. + * + * @return true if canceled, false otherwise. + */ public boolean canceled() { return canceled(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/CompactionJobInfo.java b/java/src/main/java/org/rocksdb/CompactionJobInfo.java index 29369f174a1a..96f50120ba5a 100644 --- a/java/src/main/java/org/rocksdb/CompactionJobInfo.java +++ b/java/src/main/java/org/rocksdb/CompactionJobInfo.java @@ -9,8 +9,13 @@ import java.util.List; import java.util.Map; +/** + * Information about a Compaction Job. + */ public class CompactionJobInfo extends RocksObject { - + /** + * Constructs a new CompactionJobInfo. + */ public CompactionJobInfo() { super(newCompactionJobInfo()); } diff --git a/java/src/main/java/org/rocksdb/CompactionJobStats.java b/java/src/main/java/org/rocksdb/CompactionJobStats.java index 857de7b62430..f02799d823ec 100644 --- a/java/src/main/java/org/rocksdb/CompactionJobStats.java +++ b/java/src/main/java/org/rocksdb/CompactionJobStats.java @@ -5,8 +5,13 @@ package org.rocksdb; +/** + * Statistics about a Compaction Job. + */ public class CompactionJobStats extends RocksObject { - + /** + * Constructs a new CompactionJobStats. + */ public CompactionJobStats() { super(newCompactionJobStats()); } @@ -118,7 +123,7 @@ public long totalOutputBytes() { /** * Get the number of records being replaced by newer record associated * with same key. - * + *

* This could be a new value or a deletion entry for that key so this field * sums up all updated and deleted keys. * @@ -149,7 +154,7 @@ public long totalInputRawValueBytes() { /** * Get the number of deletion entries before compaction. - * + *

* Deletion entries can disappear after compaction because they expired. * * @return the number of deletion entries before compaction. @@ -182,7 +187,7 @@ public long numCorruptKeys() { /** * Get the Time spent on file's Append() call. - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on file's Append() call. @@ -193,7 +198,7 @@ public long fileWriteNanos() { /** * Get the Time spent on sync file range. - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on sync file range. @@ -204,7 +209,7 @@ public long fileRangeSyncNanos() { /** * Get the Time spent on file fsync. - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on file fsync. @@ -215,7 +220,7 @@ public long fileFsyncNanos() { /** * Get the Time spent on preparing file write (falocate, etc) - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on preparing file write (falocate, etc). diff --git a/java/src/main/java/org/rocksdb/CompactionOptions.java b/java/src/main/java/org/rocksdb/CompactionOptions.java index 08cbdf6378b3..47ebfc8d0f28 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptions.java +++ b/java/src/main/java/org/rocksdb/CompactionOptions.java @@ -13,14 +13,16 @@ * calls. */ public class CompactionOptions extends RocksObject { - + /** + * Constructs a new CompactionOptions. + */ public CompactionOptions() { super(newCompactionOptions()); } /** * Get the compaction output compression type. - * + *

* See {@link #setCompression(CompressionType)}. * * @return the compression type. @@ -32,9 +34,9 @@ public CompressionType compression() { /** * Set the compaction output compression type. - * + *

* Default: snappy - * + *

* If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION}, * RocksDB will choose compression type according to the * {@link ColumnFamilyOptions#compressionType()}, taking into account @@ -52,7 +54,7 @@ public CompactionOptions setCompression(final CompressionType compression) { /** * Get the compaction output file size limit. - * + *

* See {@link #setOutputFileSizeLimit(long)}. * * @return the file size limit. @@ -63,7 +65,7 @@ public long outputFileSizeLimit() { /** * Compaction will create files of size {@link #outputFileSizeLimit()}. - * + *

* Default: 2^64-1, which means that compaction will create a single file * * @param outputFileSizeLimit the size limit @@ -90,9 +92,9 @@ public int maxSubcompactions() { * This value represents the maximum number of threads that will * concurrently perform a compaction job by breaking it into multiple, * smaller ones that are run simultaneously. - * + *

* Default: 0 (i.e. no subcompactions) - * + *

* If > 0, it will replace the option in * {@link DBOptions#maxSubcompactions()} for this compaction. * diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java index 24ebe0da2ff1..7ea28695815d 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java @@ -9,7 +9,9 @@ * Options for FIFO Compaction */ public class CompactionOptionsFIFO extends RocksObject { - + /** + * Constructs a new CompactionOptionsFIFO. + */ public CompactionOptionsFIFO() { super(newCompactionOptionsFIFO()); } diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java index f18915b8f569..54013b071cfd 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java @@ -9,7 +9,9 @@ * Options for Universal Compaction */ public class CompactionOptionsUniversal extends RocksObject { - + /** + * Constructs a new CompactionOptionsUniversal. + */ public CompactionOptionsUniversal() { super(newCompactionOptionsUniversal()); } diff --git a/java/src/main/java/org/rocksdb/CompactionReason.java b/java/src/main/java/org/rocksdb/CompactionReason.java index 46ec33f3f141..68828c3a9ef5 100644 --- a/java/src/main/java/org/rocksdb/CompactionReason.java +++ b/java/src/main/java/org/rocksdb/CompactionReason.java @@ -5,7 +5,13 @@ package org.rocksdb; +/** + * Reasons for compaction. + */ public enum CompactionReason { + /** + * Unknown. + */ kUnknown((byte)0x0), /** diff --git a/java/src/main/java/org/rocksdb/CompactionStyle.java b/java/src/main/java/org/rocksdb/CompactionStyle.java index 7b955a7a248c..6a1de336abfb 100644 --- a/java/src/main/java/org/rocksdb/CompactionStyle.java +++ b/java/src/main/java/org/rocksdb/CompactionStyle.java @@ -35,9 +35,24 @@ * FIFO Compaction */ public enum CompactionStyle { + /** + * Level Compaction. + */ LEVEL((byte) 0x0), + + /** + * Universal Compaction. + */ UNIVERSAL((byte) 0x1), + + /** + * First-in First-out Compaction. + */ FIFO((byte) 0x2), + + /** + * No compaction. + */ NONE((byte) 0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/ComparatorOptions.java b/java/src/main/java/org/rocksdb/ComparatorOptions.java index da287b51816b..3247a85601a5 100644 --- a/java/src/main/java/org/rocksdb/ComparatorOptions.java +++ b/java/src/main/java/org/rocksdb/ComparatorOptions.java @@ -13,6 +13,9 @@ * instance becomes out-of-scope to release the allocated memory in C++. */ public class ComparatorOptions extends RocksObject { + /** + * Constructs a new ComparatorOptions. + */ public ComparatorOptions() { super(newComparatorOptions()); } diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java index e6316af451e6..acc11863d02c 100644 --- a/java/src/main/java/org/rocksdb/CompressionOptions.java +++ b/java/src/main/java/org/rocksdb/CompressionOptions.java @@ -9,34 +9,93 @@ * Options for Compression */ public class CompressionOptions extends RocksObject { + /** + * RocksDB's generic default compression level. Internally it'll be translated + * to the default compression level specific to the library being used. + */ + public static final int DEFAULT_COMPRESSION_LEVEL = 32_767; + /** + * Constructs a new CompressionOptions. + */ public CompressionOptions() { super(newCompressionOptions()); } + /** + * Set the Window size. + * Zlib only. + * + * @param windowBits the size of the window. + * + * @return the reference to the current compression options. + */ public CompressionOptions setWindowBits(final int windowBits) { setWindowBits(nativeHandle_, windowBits); return this; } + /** + * Get the Window size. + * Zlib only. + * + * @return the size of the window. + */ public int windowBits() { return windowBits(nativeHandle_); } + /** + * Compression "level" applicable to zstd, zlib, LZ4, and LZ4HC. Except for + * {@link #DEFAULT_COMPRESSION_LEVEL}, the meaning of each value depends + * on the compression algorithm. Decreasing across non- + * {@link #DEFAULT_COMPRESSION_LEVEL} values will either favor speed over + * compression ratio or have no effect. + *

+ * In LZ4 specifically, the absolute value of a negative `level` internally + * configures the `acceleration` parameter. For example, set `level=-10` for + * `acceleration=10`. This negation is necessary to ensure decreasing `level` + * values favor speed over compression ratio. + * + * @param level the compression level. + * + * @return the reference to the current compression options. + */ public CompressionOptions setLevel(final int level) { setLevel(nativeHandle_, level); return this; } + /** + * Get the Compression "level". + *

+ * See {@link #setLevel(int)} + * + * @return the compression level. + */ public int level() { return level(nativeHandle_); } + /** + * Set the compression strategy. + * Zlib only. + * + * @param strategy the strategy. + * + * @return the reference to the current compression options. + */ public CompressionOptions setStrategy(final int strategy) { setStrategy(nativeHandle_, strategy); return this; } + /** + * Get the compression strategy. + * Zlib only. + * + * @return the strategy. + */ public int strategy() { return strategy(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/CompressionType.java b/java/src/main/java/org/rocksdb/CompressionType.java index d1ecf0ac84c5..4f683d036735 100644 --- a/java/src/main/java/org/rocksdb/CompressionType.java +++ b/java/src/main/java/org/rocksdb/CompressionType.java @@ -14,14 +14,49 @@ * compression method (if any) is used to compress a block.

*/ public enum CompressionType { + /** + * No compression. + */ NO_COMPRESSION((byte) 0x0, null, "kNoCompression"), + + /** + * Snappy compression. + */ SNAPPY_COMPRESSION((byte) 0x1, "snappy", "kSnappyCompression"), + + /** + * ZLib compression. + */ ZLIB_COMPRESSION((byte) 0x2, "z", "kZlibCompression"), + + /** + * BZ2 compression. + */ BZLIB2_COMPRESSION((byte) 0x3, "bzip2", "kBZip2Compression"), + + /** + * LZ4 compression. + */ LZ4_COMPRESSION((byte) 0x4, "lz4", "kLZ4Compression"), + + /** + * LZ4 with high compression. + */ LZ4HC_COMPRESSION((byte) 0x5, "lz4hc", "kLZ4HCCompression"), + + /** + * Microsoft XPress compression (Windows only). + */ XPRESS_COMPRESSION((byte) 0x6, "xpress", "kXpressCompression"), + + /** + * ZStd compression. + */ ZSTD_COMPRESSION((byte) 0x7, "zstd", "kZSTD"), + + /** + * Disable compression. + */ DISABLE_COMPRESSION_OPTION((byte) 0x7F, null, "kDisableCompressionOption"); /** diff --git a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java index b4e34303b5f3..b07d691380f4 100644 --- a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java +++ b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java @@ -6,7 +6,16 @@ package org.rocksdb; +/** + * Base class for Concurrent Task Limiters. + */ public abstract class ConcurrentTaskLimiter extends RocksObject { + /** + * Constructs a ConcurrentTaskLimiter. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ concurrent task limiter object. + */ protected ConcurrentTaskLimiter(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java index 1c496ff2f5c8..acfc7829de97 100644 --- a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java +++ b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java @@ -6,7 +6,16 @@ package org.rocksdb; +/** + * Concurrent Task Limiter. + */ public class ConcurrentTaskLimiterImpl extends ConcurrentTaskLimiter { + /** + * Construct a new Concurrent Task Limiter. + * + * @param name the name of the limiter. + * @param maxOutstandingTask the maximum concurrent tasks. + */ public ConcurrentTaskLimiterImpl(final String name, final int maxOutstandingTask) { super(newConcurrentTaskLimiterImpl0(name, maxOutstandingTask)); } diff --git a/java/src/main/java/org/rocksdb/ConfigOptions.java b/java/src/main/java/org/rocksdb/ConfigOptions.java index 4717750b7f4b..c846f6892854 100644 --- a/java/src/main/java/org/rocksdb/ConfigOptions.java +++ b/java/src/main/java/org/rocksdb/ConfigOptions.java @@ -6,33 +6,72 @@ package org.rocksdb; +/** + * Configuration options. + */ public class ConfigOptions extends RocksObject { /** - * Construct with default Options + * Constructs a new ConfigOptions. */ public ConfigOptions() { super(newConfigOptionsInstance()); } + /** + * Set the delimiter used between options. + * + * @param delimiter the delimiter + * + * @return the reference to the current options + */ public ConfigOptions setDelimiter(final String delimiter) { setDelimiter(nativeHandle_, delimiter); return this; } + + /** + * Set whether to ignore unknown options. + * + * @param ignore true to ignore unknown options, otherwise raise an error. + * + * @return the reference to the current options + */ public ConfigOptions setIgnoreUnknownOptions(final boolean ignore) { setIgnoreUnknownOptions(nativeHandle_, ignore); return this; } + /** + * Set the environment. + * + * @param env the environment. + * + * @return the reference to the current options + */ public ConfigOptions setEnv(final Env env) { setEnv(nativeHandle_, env.nativeHandle_); return this; } + /** + * Set whether to escape input strings. + * + * @param escaped true to escape input strings, false otherwise. + * + * @return the reference to the current options + */ public ConfigOptions setInputStringsEscaped(final boolean escaped) { setInputStringsEscaped(nativeHandle_, escaped); return this; } + /** + * Set the sanity level. + * + * @param level the sanity level. + * + * @return the reference to the current options + */ public ConfigOptions setSanityLevel(final SanityLevel level) { setSanityLevel(nativeHandle_, level.getValue()); return this; diff --git a/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/java/src/main/java/org/rocksdb/DBOptionsInterface.java index bc9d9acbd65e..791cbd34c7f5 100644 --- a/java/src/main/java/org/rocksdb/DBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -8,6 +8,11 @@ import java.util.Collection; import java.util.List; +/** + * Interface for DB Options. + * + * @param the concrete type of DBOptions. + */ public interface DBOptionsInterface> { /** * Use this if your DB is very small (like under 1GB) and you don't want to @@ -78,8 +83,7 @@ public interface DBOptionsInterface> { * * @param flag a flag indicating if missing column families shall be * created automatically. - * @return true if missing column families shall be created automatically - * on open. + * @return the instance of the current Options */ T setCreateMissingColumnFamilies(boolean flag); @@ -159,7 +163,7 @@ public interface DBOptionsInterface> { /** * Use to track SST files and control their file deletion rate. - * + *

* Features: * - Throttle the deletion rate of the SST files. * - Keep track the total size of all SST files. @@ -167,7 +171,7 @@ public interface DBOptionsInterface> { * the DB wont do any further flushes or compactions and will set the * background error. * - Can be shared between multiple dbs. - * + *

* Limitations: * - Only track and throttle deletes of SST files in * first db_path (db_name if db_paths is empty). @@ -208,7 +212,7 @@ public interface DBOptionsInterface> { * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open * all files on DB::Open(). You can use this option to increase the number * of threads used to open the files. - * + *

* Default: 16 * * @param maxFileOpeningThreads the maximum number of threads to use to @@ -222,7 +226,7 @@ public interface DBOptionsInterface> { * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open all * files on DB::Open(). You can use this option to increase the number of * threads used to open the files. - * + *

* Default: 16 * * @return the maximum number of threads to use to open files @@ -278,27 +282,27 @@ public interface DBOptionsInterface> { * A list of paths where SST files can be put into, with its target size. * Newer data is placed into paths specified earlier in the vector while * older data gradually moves to paths specified later in the vector. - * + *

* For example, you have a flash device with 10GB allocated for the DB, * as well as a hard drive of 2TB, you should config it to be: * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] - * + *

* The system will try to guarantee data under each path is close to but * not larger than the target size. But current and future file sizes used * by determining where to place a file are based on best-effort estimation, * which means there is a chance that the actual size under the directory * is slightly more than target size under some workloads. User should give * some buffer room for those cases. - * + *

* If none of the paths has sufficient room to place a file, the file will * be placed to the last path anyway, despite to the target size. - * + *

* Placing newer data to earlier paths is also best-efforts. User should * expect user files to be placed in higher levels in some extreme cases. - * + *

* If left empty, only one path will be used, which is db_name passed when * opening the DB. - * + *

* Default: empty * * @param dbPaths the paths and target sizes @@ -311,27 +315,27 @@ public interface DBOptionsInterface> { * A list of paths where SST files can be put into, with its target size. * Newer data is placed into paths specified earlier in the vector while * older data gradually moves to paths specified later in the vector. - * + *

* For example, you have a flash device with 10GB allocated for the DB, * as well as a hard drive of 2TB, you should config it to be: * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] - * + *

* The system will try to guarantee data under each path is close to but * not larger than the target size. But current and future file sizes used * by determining where to place a file are based on best-effort estimation, * which means there is a chance that the actual size under the directory * is slightly more than target size under some workloads. User should give * some buffer room for those cases. - * + *

* If none of the paths has sufficient room to place a file, the file will * be placed to the last path anyway, despite to the target size. - * + *

* Placing newer data to earlier paths is also best-efforts. User should * expect user files to be placed in higher levels in some extreme cases. - * + *

* If left empty, only one path will be used, which is db_name passed when * opening the DB. - * + *

* Default: {@link java.util.Collections#emptyList()} * * @return dbPaths the paths and target sizes @@ -352,7 +356,7 @@ public interface DBOptionsInterface> { /** * Returns the directory of info log. - * + *

* If it is empty, the log files will be in the same dir as data. * If it is non empty, the log files will be in the specified dir, * and the db data dir's absolute path will be used as the log file @@ -377,7 +381,7 @@ public interface DBOptionsInterface> { /** * Returns the path to the write-ahead-logs (WAL) directory. - * + *

* If it is empty, the log files will be in the same dir as data, * dbname is used as the data dir by default * If it is non empty, the log files will be in kept the specified dir. @@ -439,7 +443,7 @@ public interface DBOptionsInterface> { * `max_background_jobs = max_background_compactions + max_background_flushes` * in the case where user sets at least one of `max_background_compactions` or * `max_background_flushes`. - * + *

* Specifies the maximum number of concurrent background flush jobs. * If you're increasing this, also consider increasing number of threads in * HIGH priority thread pool. For more information, see @@ -463,7 +467,7 @@ public interface DBOptionsInterface> { * `max_background_jobs = max_background_compactions + max_background_flushes` * in the case where user sets at least one of `max_background_compactions` or * `max_background_flushes`. - * + *

* Returns the maximum number of concurrent background flush jobs. * If you're increasing this, also consider increasing number of threads in * HIGH priority thread pool. For more information, see @@ -542,16 +546,16 @@ public interface DBOptionsInterface> { /** * Recycle log files. - * + *

* If non-zero, we will reuse previously written log files for new * logs, overwriting the old data. The value indicates how many * such files we will keep around at any point in time for later * use. - * + *

* This is more efficient because the blocks are already * allocated and fdatasync does not need to update the inode after * each write. - * + *

* Default: 0 * * @param recycleLogFileNum the number of log files to keep for recycling @@ -562,16 +566,16 @@ public interface DBOptionsInterface> { /** * Recycle log files. - * + *

* If non-zero, we will reuse previously written log files for new * logs, overwriting the old data. The value indicates how many * such files we will keep around at any point in time for later * use. - * + *

* This is more efficient because the blocks are already * allocated and fdatasync does not need to update the inode after * each write. - * + *

* Default: 0 * * @return the number of log files kept for recycling @@ -617,17 +621,17 @@ public interface DBOptionsInterface> { /** * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect when WALs * will be archived and deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -643,17 +647,17 @@ public interface DBOptionsInterface> { /** * WalTtlSeconds() and walSizeLimitMB() affect when WALs will be archived and * deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -668,17 +672,17 @@ public interface DBOptionsInterface> { /** * WalTtlSeconds() and walSizeLimitMB() affect how archived logs * will be deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -694,17 +698,17 @@ public interface DBOptionsInterface> { /** * WalTtlSeconds() and walSizeLimitMB() affect when WALs will be archived and * deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -720,7 +724,7 @@ public interface DBOptionsInterface> { * The maximum limit of number of bytes that are written in a single batch * of WAL or memtable write. It is followed when the leader write size * is larger than 1/8 of this limit. - * + *

* Default: 1 MB * * @param maxWriteBatchGroupSizeBytes the maximum limit of number of bytes, see description. @@ -732,7 +736,7 @@ public interface DBOptionsInterface> { * The maximum limit of number of bytes that are written in a single batch * of WAL or memtable write. It is followed when the leader write size * is larger than 1/8 of this limit. - * + *

* Default: 1 MB * * @return the maximum limit of number of bytes, see description. @@ -885,13 +889,13 @@ public interface DBOptionsInterface> { /** * Amount of data to build up in memtables across all column * families before writing to disk. - * + *

* This is distinct from {@link ColumnFamilyOptions#writeBufferSize()}, * which enforces a limit for a single memtable. - * + *

* This feature is disabled by default. Specify a non-zero value * to enable it. - * + *

* Default: 0 (disabled) * * @param dbWriteBufferSize the size of the write buffer @@ -903,7 +907,7 @@ public interface DBOptionsInterface> { /** * Use passed {@link WriteBufferManager} to control memory usage across * multiple column families and/or DB instances. - * + *

* Check * https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager * for more details on when to use it @@ -925,13 +929,13 @@ public interface DBOptionsInterface> { /** * Amount of data to build up in memtables across all column * families before writing to disk. - * + *

* This is distinct from {@link ColumnFamilyOptions#writeBufferSize()}, * which enforces a limit for a single memtable. - * + *

* This feature is disabled by default. Specify a non-zero value * to enable it. - * + *

* Default: 0 (disabled) * * @return the size of the write buffer @@ -964,7 +968,7 @@ public interface DBOptionsInterface> { /** * Sets the {@link EventListener}s whose callback functions * will be called when specific RocksDB event happens. - * + *

* Note: the RocksJava API currently only supports EventListeners implemented in Java. * It could be extended in future to also support adding/removing EventListeners implemented in * C++. @@ -978,7 +982,7 @@ public interface DBOptionsInterface> { /** * Sets the {@link EventListener}s whose callback functions * will be called when specific RocksDB event happens. - * + *

* Note: the RocksJava API currently only supports EventListeners implemented in Java. * It could be extended in future to also support adding/removing EventListeners implemented in * C++. @@ -990,7 +994,7 @@ public interface DBOptionsInterface> { /** * If true, then the status of the threads involved in this DB will * be tracked and available via GetThreadList() API. - * + *

* Default: false * * @param enableThreadTracking true to enable tracking @@ -1002,7 +1006,7 @@ public interface DBOptionsInterface> { /** * If true, then the status of the threads involved in this DB will * be tracked and available via GetThreadList() API. - * + *

* Default: false * * @return true if tracking is enabled @@ -1013,7 +1017,7 @@ public interface DBOptionsInterface> { * By default, a single write thread queue is maintained. The thread gets * to the head of the queue becomes write batch group leader and responsible * for writing to WAL and memtable for the batch group. - * + *

* If {@link #enablePipelinedWrite()} is true, separate write thread queue is * maintained for WAL write and memtable write. A write thread first enter WAL * writer queue and then memtable writer queue. Pending thread on the WAL @@ -1021,7 +1025,7 @@ public interface DBOptionsInterface> { * WAL writing but not the memtable writing. Enabling the feature may improve * write throughput and reduce latency of the prepare phase of two-phase * commit. - * + *

* Default: false * * @param enablePipelinedWrite true to enabled pipelined writes @@ -1048,7 +1052,7 @@ public interface DBOptionsInterface> { * throughput. Using TransactionDB with WRITE_PREPARED write policy and * {@link #twoWriteQueues()} true is one way to achieve immutable snapshots despite * unordered_write. - * + *

* By default, i.e., when it is false, rocksdb does not advance the sequence * number for new snapshots unless all the writes with lower sequence numbers * are already finished. This provides the immutability that we except from @@ -1193,7 +1197,7 @@ T setEnableWriteThreadAdaptiveYield( * compaction decision by loading table properties from many files. * Turning off this feature will improve DBOpen time especially in * disk environment. - * + *

* Default: false * * @param skipStatsUpdateOnDbOpen true if updating stats will be skipped @@ -1207,7 +1211,7 @@ T setEnableWriteThreadAdaptiveYield( * compaction decision by loading table properties from many files. * Turning off this feature will improve DBOpen time especially in * disk environment. - * + *

* Default: false * * @return true if updating stats will be skipped @@ -1221,7 +1225,7 @@ T setEnableWriteThreadAdaptiveYield( * We'll still check that all required sst files exist. * If {@code paranoid_checks} is false, this option is ignored, and sst files are * not checked at all. - * + *

* Default: false * * @param skipCheckingSstFileSizesOnDbOpen if true, then SST file sizes will not be checked @@ -1237,7 +1241,7 @@ T setEnableWriteThreadAdaptiveYield( * We'll still check that all required sst files exist. * If {@code paranoid_checks} is false, this option is ignored, and sst files are * not checked at all. - * + *

* Default: false * * @return true, if file sizes will not be checked when calling {@link RocksDB#open(String)}. @@ -1246,7 +1250,7 @@ T setEnableWriteThreadAdaptiveYield( /** * Recovery mode to control the consistency while replaying WAL - * + *

* Default: {@link WALRecoveryMode#PointInTimeRecovery} * * @param walRecoveryMode The WAL recover mode @@ -1257,7 +1261,7 @@ T setEnableWriteThreadAdaptiveYield( /** * Recovery mode to control the consistency while replaying WAL - * + *

* Default: {@link WALRecoveryMode#PointInTimeRecovery} * * @return The WAL recover mode @@ -1267,7 +1271,7 @@ T setEnableWriteThreadAdaptiveYield( /** * if set to false then recovery will fail when a prepared * transaction is encountered in the WAL - * + *

* Default: false * * @param allow2pc true if two-phase-commit is enabled @@ -1279,7 +1283,7 @@ T setEnableWriteThreadAdaptiveYield( /** * if set to false then recovery will fail when a prepared * transaction is encountered in the WAL - * + *

* Default: false * * @return true if two-phase-commit is enabled @@ -1288,7 +1292,7 @@ T setEnableWriteThreadAdaptiveYield( /** * A global cache for table-level rows. - * + *

* Default: null (disabled) * * @param rowCache The global row cache @@ -1299,7 +1303,7 @@ T setEnableWriteThreadAdaptiveYield( /** * A global cache for table-level rows. - * + *

* Default: null (disabled) * * @return The global row cache @@ -1331,7 +1335,7 @@ T setEnableWriteThreadAdaptiveYield( * If true, then DB::Open / CreateColumnFamily / DropColumnFamily * / SetOptions will fail if options file is not detected or properly * persisted. - * + *

* DEFAULT: false * * @param failIfOptionsFileError true if we should fail if there is an error @@ -1345,7 +1349,7 @@ T setEnableWriteThreadAdaptiveYield( * If true, then DB::Open / CreateColumnFamily / DropColumnFamily * / SetOptions will fail if options file is not detected or properly * persisted. - * + *

* DEFAULT: false * * @return true if we should fail if there is an error in the options file @@ -1355,7 +1359,7 @@ T setEnableWriteThreadAdaptiveYield( /** * If true, then print malloc stats together with rocksdb.stats * when printing to LOG. - * + *

* DEFAULT: false * * @param dumpMallocStats true if malloc stats should be printed to LOG @@ -1367,7 +1371,7 @@ T setEnableWriteThreadAdaptiveYield( /** * If true, then print malloc stats together with rocksdb.stats * when printing to LOG. - * + *

* DEFAULT: false * * @return true if malloc stats should be printed to LOG @@ -1380,7 +1384,7 @@ T setEnableWriteThreadAdaptiveYield( * to avoid (but not guarantee not to) flush during recovery. Also, existing * WAL logs will be kept, so that if crash happened before flush, we still * have logs to recover from. - * + *

* DEFAULT: false * * @param avoidFlushDuringRecovery true to try to avoid (but not guarantee @@ -1396,7 +1400,7 @@ T setEnableWriteThreadAdaptiveYield( * to avoid (but not guarantee not to) flush during recovery. Also, existing * WAL logs will be kept, so that if crash happened before flush, we still * have logs to recover from. - * + *

* DEFAULT: false * * @return true to try to avoid (but not guarantee not to) flush during @@ -1412,7 +1416,7 @@ T setEnableWriteThreadAdaptiveYield( * 1) Disable some internal optimizations around SST file compression * 2) Reserve bottom-most level for ingested files only. * 3) Note that num_levels should be >= 3 if this option is turned on. - * + *

* DEFAULT: false * * @param allowIngestBehind true to allow ingest behind, false to disallow. @@ -1435,7 +1439,7 @@ T setEnableWriteThreadAdaptiveYield( * allows the memtable writes not to lag behind other writes. It can be used * to optimize MySQL 2PC in which only the commits, which are serial, write to * memtable. - * + *

* DEFAULT: false * * @param twoWriteQueues true to enable two write queues, false otherwise. @@ -1455,7 +1459,7 @@ T setEnableWriteThreadAdaptiveYield( * If true WAL is not flushed automatically after each write. Instead it * relies on manual invocation of FlushWAL to write the WAL buffer to its * file. - * + *

* DEFAULT: false * * @param manualWalFlush true to set disable automatic WAL flushing, @@ -1483,7 +1487,7 @@ T setEnableWriteThreadAdaptiveYield( * For manual flush, application has to specify which column families to * flush atomically in {@link RocksDB#flush(FlushOptions, List)}. * For auto-triggered flush, RocksDB atomically flushes ALL column families. - * + *

* Currently, any WAL-enabled writes after atomic flush may be replayed * independently if the process crashes later and tries to recover. * @@ -1495,7 +1499,7 @@ T setEnableWriteThreadAdaptiveYield( /** * Determine if atomic flush of multiple column families is enabled. - * + *

* See {@link #setAtomicFlush(boolean)}. * * @return true if atomic flush is enabled. @@ -1596,7 +1600,7 @@ T setEnableWriteThreadAdaptiveYield( * The number of bytes to prefetch when reading the log. This is mostly useful * for reading a remotely located log, as it can save the number of * round-trips. If 0, then the prefetching is disabled. - * + *

* Default: 0 * * @param logReadaheadSize the number of bytes to prefetch when reading the log. @@ -1608,7 +1612,7 @@ T setEnableWriteThreadAdaptiveYield( * The number of bytes to prefetch when reading the log. This is mostly useful * for reading a remotely located log, as it can save the number of * round-trips. If 0, then the prefetching is disabled. - * + *

* Default: 0 * * @return the number of bytes to prefetch when reading the log. @@ -1651,7 +1655,7 @@ T setEnableWriteThreadAdaptiveYield( * can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), * then db resume is called in background to recover from the error. If this * value is 0 or negative, db resume will not be called. - * + *

* Default: INT_MAX * * @param maxBgerrorResumeCount maximum number of times db resume should be called when IO Error @@ -1667,7 +1671,7 @@ T setEnableWriteThreadAdaptiveYield( * can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), * then db resume is called in background to recover from the error. If this * value is 0 or negative, db resume will not be called. - * + *

* Default: INT_MAX * * @return maximum number of times db resume should be called when IO Error happens. @@ -1678,7 +1682,7 @@ T setEnableWriteThreadAdaptiveYield( * If max_bgerror_resume_count is ≥ 2, db resume is called multiple times. * This option decides how long to wait to retry the next resume if the * previous resume fails and satisfy redo resume conditions. - * + *

* Default: 1000000 (microseconds). * * @param bgerrorResumeRetryInterval how many microseconds to wait between DB resume attempts. @@ -1690,7 +1694,7 @@ T setEnableWriteThreadAdaptiveYield( * If max_bgerror_resume_count is ≥ 2, db resume is called multiple times. * This option decides how long to wait to retry the next resume if the * previous resume fails and satisfy redo resume conditions. - * + *

* Default: 1000000 (microseconds). * * @return the instance of the current object. @@ -1714,7 +1718,9 @@ T setEnableWriteThreadAdaptiveYield( * use "0:00-23:59". To make an entire day have no offpeak period, leave * this field blank. Default: Empty string (no offpeak). * - * @param offpeakTimeUTC String value from which to parse offpeak time range + * @param offpeakTimeUTC String value from which to parse offpeak time range. + * + * @return the instance of the current object. */ T setDailyOffpeakTimeUTC(final String offpeakTimeUTC); diff --git a/java/src/main/java/org/rocksdb/DbPath.java b/java/src/main/java/org/rocksdb/DbPath.java index 3f0b67557c5e..3895b258556e 100644 --- a/java/src/main/java/org/rocksdb/DbPath.java +++ b/java/src/main/java/org/rocksdb/DbPath.java @@ -14,6 +14,12 @@ public class DbPath { final Path path; final long targetSize; + /** + * Constructs a DbPath. + * + * @param path the path. + * @param targetSize the target size. + */ public DbPath(final Path path, final long targetSize) { this.path = path; this.targetSize = targetSize; diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java index 88ec29e3bd65..be7b2cd9e546 100644 --- a/java/src/main/java/org/rocksdb/DirectSlice.java +++ b/java/src/main/java/org/rocksdb/DirectSlice.java @@ -16,6 +16,9 @@ * values consider using @see org.rocksdb.Slice */ public class DirectSlice extends AbstractSlice { + /** + * Constant for No Direct Slice. + */ public static final DirectSlice NONE = new DirectSlice(); /** @@ -110,6 +113,11 @@ public void removePrefix(final int n) { this.internalBufferOffset += n; } + /** + * Set the length of the direct slice. + * + * @param n the length. + */ public void setLength(final int n) { setLength0(getNativeHandle(), n); } diff --git a/java/src/main/java/org/rocksdb/Experimental.java b/java/src/main/java/org/rocksdb/Experimental.java index 64b404d6f195..13ac5a0e3b19 100644 --- a/java/src/main/java/org/rocksdb/Experimental.java +++ b/java/src/main/java/org/rocksdb/Experimental.java @@ -19,5 +19,10 @@ @Retention(RetentionPolicy.SOURCE) @Target({ElementType.TYPE, ElementType.METHOD}) public @interface Experimental { + /** + * A description explaining why the feature is experimental. + * + * @return the explanation of why the feature is experimental. + */ String value(); } diff --git a/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java index 7a99dd6bfe2f..4a348ab32389 100644 --- a/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java +++ b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information about the ingestion of External Files. + */ public class ExternalFileIngestionInfo { private final String columnFamilyName; private final String externalFilePath; diff --git a/java/src/main/java/org/rocksdb/Filter.java b/java/src/main/java/org/rocksdb/Filter.java index 018807c0405a..9f42b84fa562 100644 --- a/java/src/main/java/org/rocksdb/Filter.java +++ b/java/src/main/java/org/rocksdb/Filter.java @@ -14,14 +14,19 @@ */ //TODO(AR) should be renamed FilterPolicy public abstract class Filter extends RocksObject { - + /** + * Constructs a filter. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ filter object. + */ protected Filter(final long nativeHandle) { super(nativeHandle); } /** * Deletes underlying C++ filter pointer. - * + *

* Note that this function should be called only after all * RocksDB instances referencing the filter are closed. * Otherwise an undefined behavior will occur. diff --git a/java/src/main/java/org/rocksdb/FilterPolicyType.java b/java/src/main/java/org/rocksdb/FilterPolicyType.java index 6a693ee4039d..c7051ac07be6 100644 --- a/java/src/main/java/org/rocksdb/FilterPolicyType.java +++ b/java/src/main/java/org/rocksdb/FilterPolicyType.java @@ -9,6 +9,9 @@ * IndexType used in conjunction with BlockBasedTable. */ public enum FilterPolicyType { + /** + * Unknown filter policy. + */ kUnknownFilterPolicy((byte) 0), /** @@ -25,7 +28,7 @@ public enum FilterPolicyType { */ kRibbonFilterPolicy((byte) 2); - public Filter createFilter(final long handle, final double param) { + Filter createFilter(final long handle, final double param) { if (this == kBloomFilterPolicy) { return new BloomFilter(handle, param); } diff --git a/java/src/main/java/org/rocksdb/FlushJobInfo.java b/java/src/main/java/org/rocksdb/FlushJobInfo.java index 414d3a2f332e..52af3afe1795 100644 --- a/java/src/main/java/org/rocksdb/FlushJobInfo.java +++ b/java/src/main/java/org/rocksdb/FlushJobInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information about a flush job. + */ public class FlushJobInfo { private final long columnFamilyId; private final String columnFamilyName; diff --git a/java/src/main/java/org/rocksdb/FlushReason.java b/java/src/main/java/org/rocksdb/FlushReason.java index 21abbb352134..177e5aa3656c 100644 --- a/java/src/main/java/org/rocksdb/FlushReason.java +++ b/java/src/main/java/org/rocksdb/FlushReason.java @@ -5,21 +5,83 @@ package org.rocksdb; +/** + * Reasons for a flush. + */ public enum FlushReason { + /** + * Other. + */ OTHERS((byte) 0x00), + + /** + * Get live files. + */ GET_LIVE_FILES((byte) 0x01), + + /** + * Shutdown. + */ SHUTDOWN((byte) 0x02), + + /** + * External file ingestion. + */ EXTERNAL_FILE_INGESTION((byte) 0x03), + + /** + * Manual compaction. + */ MANUAL_COMPACTION((byte) 0x04), + + /** + * Write buffer manager. + */ WRITE_BUFFER_MANAGER((byte) 0x05), + + /** + * Write buffer full. + */ WRITE_BUFFER_FULL((byte) 0x06), + + /** + * Test. + */ TEST((byte) 0x07), + + /** + * Delete file(s). + */ DELETE_FILES((byte) 0x08), + + /** + * Automatic compaction. + */ AUTO_COMPACTION((byte) 0x09), + + /** + * Manual flush. + */ MANUAL_FLUSH((byte) 0x0a), + + /** + * Error recovery. + */ ERROR_RECOVERY((byte) 0x0b), + + /** + * Error recovery retry flush. + */ ERROR_RECOVERY_RETRY_FLUSH((byte) 0x0c), + + /** + * Write Ahead Log full. + */ WAL_FULL((byte) 0x0d), + + /** + * Catch up after error recovery. + */ CATCH_UP_AFTER_ERROR_RECOVERY((byte) 0x0e); private final byte value; diff --git a/java/src/main/java/org/rocksdb/GetStatus.java b/java/src/main/java/org/rocksdb/GetStatus.java index a2afafe39ebd..0c9fbd27f8c8 100644 --- a/java/src/main/java/org/rocksdb/GetStatus.java +++ b/java/src/main/java/org/rocksdb/GetStatus.java @@ -12,7 +12,14 @@ * If the target of the fetch is not big enough, this may be bigger than the contents of the target. */ public class GetStatus { + /** + * The status of the request to fetch into the buffer. + */ public final Status status; + + /** + * The size of the data, which may be bigger than the buffer. + */ public final int requiredSize; /** diff --git a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java index cc18b61d2260..9d490a017853 100644 --- a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java @@ -15,22 +15,40 @@ * and post a warning in the LOG. */ public class HashLinkedListMemTableConfig extends MemTableConfig { + /** + * The default number of buckets. + */ public static final long DEFAULT_BUCKET_COUNT = 50_000; + + /** + * The default size of huge TLB pages. + */ public static final long DEFAULT_HUGE_PAGE_TLB_SIZE = 0; + + /** + * The default log threshold for bucket entries. + */ public static final int DEFAULT_BUCKET_ENTRIES_LOG_THRES = 4096; - public static final boolean - DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true; - public static final int DEFAUL_THRESHOLD_USE_SKIPLIST = 256; /** - * HashLinkedListMemTableConfig constructor + * The default of whether to log when a bucket is flushed. + */ + public static final boolean DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true; + + /** + * The default threshold for determining when to use a Skip List. + */ + public static final int DEFAULT_THRESHOLD_USE_SKIPLIST = 256; + + /** + * Constructs a HashLinkedListMemTableConfig. */ public HashLinkedListMemTableConfig() { bucketCount_ = DEFAULT_BUCKET_COUNT; hugePageTlbSize_ = DEFAULT_HUGE_PAGE_TLB_SIZE; bucketEntriesLoggingThreshold_ = DEFAULT_BUCKET_ENTRIES_LOG_THRES; ifLogBucketDistWhenFlush_ = DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH; - thresholdUseSkiplist_ = DEFAUL_THRESHOLD_USE_SKIPLIST; + thresholdUseSkiplist_ = DEFAULT_THRESHOLD_USE_SKIPLIST; } /** diff --git a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java index 33991f90f729..8197a8879b2d 100644 --- a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java @@ -15,12 +15,23 @@ * and post a warning in the LOG. */ public class HashSkipListMemTableConfig extends MemTableConfig { + /** + * The default number of buckets. + */ public static final int DEFAULT_BUCKET_COUNT = 1_000_000; + + /** + * The default branching factor. + */ public static final int DEFAULT_BRANCHING_FACTOR = 4; + + /** + * The default skip list height. + */ public static final int DEFAULT_HEIGHT = 4; /** - * HashSkipListMemTableConfig constructor + * Constructs a HashSkipListMemTableConfig. */ public HashSkipListMemTableConfig() { bucketCount_ = DEFAULT_BUCKET_COUNT; diff --git a/java/src/main/java/org/rocksdb/HistogramData.java b/java/src/main/java/org/rocksdb/HistogramData.java index 81d890883487..1fdd0c26e9a7 100644 --- a/java/src/main/java/org/rocksdb/HistogramData.java +++ b/java/src/main/java/org/rocksdb/HistogramData.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * Histogram Data. + */ public class HistogramData { private final double median_; private final double percentile95_; @@ -16,12 +19,34 @@ public class HistogramData { private final long sum_; private final double min_; + /** + * Constructs a HistogramData. + * + * @param median the median value. + * @param percentile95 the 95th percentile value. + * @param percentile99 the 99th percentile value. + * @param average the average value. + * @param standardDeviation the value of the standard deviation. + */ public HistogramData(final double median, final double percentile95, final double percentile99, final double average, final double standardDeviation) { this(median, percentile95, percentile99, average, standardDeviation, 0.0, 0, 0, 0.0); } + /** + * Constructs a HistogramData. + * + * @param median the median value. + * @param percentile95 the 95th percentile value. + * @param percentile99 the 99th percentile value. + * @param average the average value. + * @param standardDeviation the value of the standard deviation. + * @param max the maximum value. + * @param count the number of values. + * @param sum the sum of the values. + * @param min the minimum value. + */ public HistogramData(final double median, final double percentile95, final double percentile99, final double average, final double standardDeviation, final double max, final long count, @@ -37,38 +62,83 @@ public HistogramData(final double median, final double percentile95, sum_ = sum; } + /** + * Get the median value. + * + * @return the median value. + */ public double getMedian() { return median_; } + /** + * Get the 95th percentile value. + * + * @return the 95th percentile value. + */ public double getPercentile95() { return percentile95_; } + /** + * Get the 99th percentile value. + * + * @return the 99th percentile value. + */ public double getPercentile99() { return percentile99_; } + /** + * Get the average value. + * + * @return the average value. + */ public double getAverage() { return average_; } + /** + * Get the value of the standard deviation. + * + * @return the value of the standard deviation. + */ public double getStandardDeviation() { return standardDeviation_; } + /** + * Get the maximum value. + * + * @return the maximum value. + */ public double getMax() { return max_; } + /** + * Get the number of values. + * + * @return the number of values. + */ public long getCount() { return count_; } + /** + * Get the sum of the values. + * + * @return the sum of the values. + */ public long getSum() { return sum_; } + /** + * Get the minimum value. + * + * @return the minimum value. + */ public double getMin() { return min_; } diff --git a/java/src/main/java/org/rocksdb/HistogramType.java b/java/src/main/java/org/rocksdb/HistogramType.java index b4a56cc07e0d..ea9523e0bd93 100644 --- a/java/src/main/java/org/rocksdb/HistogramType.java +++ b/java/src/main/java/org/rocksdb/HistogramType.java @@ -5,69 +5,179 @@ package org.rocksdb; +/** + * The types of histogram. + */ public enum HistogramType { - + /** + * DB Get. + */ DB_GET((byte) 0x0), + /** + * DB Write. + */ DB_WRITE((byte) 0x1), + /** + * Time spent in compaction. + */ COMPACTION_TIME((byte) 0x2), + /** + * CPU time spent in compaction. + */ COMPACTION_CPU_TIME((byte) 0x3), + /** + * Time spent in setting up sub-compaction. + */ SUBCOMPACTION_SETUP_TIME((byte) 0x4), + /** + * Time spent in IO during table sync. + * Measured in microseconds. + */ TABLE_SYNC_MICROS((byte) 0x5), + /** + * Time spent in IO during compaction of outfile. + * Measured in microseconds. + */ COMPACTION_OUTFILE_SYNC_MICROS((byte) 0x6), + /** + * Time spent in IO during WAL file sync. + * Measured in microseconds. + */ WAL_FILE_SYNC_MICROS((byte) 0x7), + /** + * Time spent in IO during manifest file sync. + * Measured in microseconds. + */ MANIFEST_FILE_SYNC_MICROS((byte) 0x8), /** - * TIME SPENT IN IO DURING TABLE OPEN. + * Time spent in IO during table open. + * Measured in microseconds. */ TABLE_OPEN_IO_MICROS((byte) 0x9), + /** + * DB Multi-Get. + */ DB_MULTIGET((byte) 0xA), + /** + * Time spent in block reads during compaction. + * Measured in microseconds. + */ READ_BLOCK_COMPACTION_MICROS((byte) 0xB), + /** + * Time spent in block reads. + * Measured in microseconds. + */ READ_BLOCK_GET_MICROS((byte) 0xC), + /** + * Time spent in raw block writes. + * Measured in microseconds. + */ WRITE_RAW_BLOCK_MICROS((byte) 0xD), + /** + * Number of files in a single compaction. + */ NUM_FILES_IN_SINGLE_COMPACTION((byte) 0xE), + /** + * DB Seek. + */ DB_SEEK((byte) 0xF), + /** + * Write stall. + */ WRITE_STALL((byte) 0x10), + /** + * Time spent in SST reads. + * Measured in microseconds. + */ SST_READ_MICROS((byte) 0x11), + /** + * File read during flush. + * Measured in microseconds. + */ FILE_READ_FLUSH_MICROS((byte) 0x12), + /** + * File read during compaction. + * Measured in microseconds. + */ FILE_READ_COMPACTION_MICROS((byte) 0x13), + /** + * File read during DB Open. + * Measured in microseconds. + */ FILE_READ_DB_OPEN_MICROS((byte) 0x14), + /** + * File read during DB Get. + * Measured in microseconds. + */ FILE_READ_GET_MICROS((byte) 0x15), + /** + * File read during DB Multi-Get. + * Measured in microseconds. + */ FILE_READ_MULTIGET_MICROS((byte) 0x16), + /** + * File read during DB Iterator. + * Measured in microseconds. + */ FILE_READ_DB_ITERATOR_MICROS((byte) 0x17), + /** + * File read during DB checksum validation. + * Measured in microseconds. + */ FILE_READ_VERIFY_DB_CHECKSUM_MICROS((byte) 0x18), + /** + * File read during file checksum validation. + * Measured in microseconds. + */ FILE_READ_VERIFY_FILE_CHECKSUMS_MICROS((byte) 0x19), + /** + * Time spent writing SST files. + * Measured in microseconds. + */ SST_WRITE_MICROS((byte) 0x1A), + /** + * Time spent in writing SST table (currently only block-based table) or blob file for flush. + * Measured in microseconds. + */ FILE_WRITE_FLUSH_MICROS((byte) 0x1B), + /** + * Time spent in writing SST table (currently only block-based table) for compaction. + * Measured in microseconds. + */ FILE_WRITE_COMPACTION_MICROS((byte) 0x1C), + /** + * Time spent in writing SST table (currently only block-based table) or blob file for db open. + * Measured in microseconds. + */ FILE_WRITE_DB_OPEN_MICROS((byte) 0x1D), /** @@ -79,13 +189,34 @@ public enum HistogramType { * Value size distribution in each operation. */ BYTES_PER_READ((byte) 0x1F), + + /** + * Bytes per write. + * Value size distribution in each operation. + */ BYTES_PER_WRITE((byte) 0x20), + + /** + * Bytes per Multi-Get. + * Value size distribution in each operation. + */ BYTES_PER_MULTIGET((byte) 0x21), + /** + * Time spent in compression. + * Measured in nanoseconds. + */ COMPRESSION_TIMES_NANOS((byte) 0x22), + /** + * Time spent in decompression. + * Measured in nanoseconds. + */ DECOMPRESSION_TIMES_NANOS((byte) 0x23), + /** + * Number of merge operands for read. + */ READ_NUM_MERGE_OPERANDS((byte) 0x24), /** @@ -100,56 +231,67 @@ public enum HistogramType { /** * BlobDB Put/PutWithTTL/PutUntil/Write latency. + * Measured in microseconds. */ BLOB_DB_WRITE_MICROS((byte) 0x27), /** * BlobDB Get lagency. + * Measured in microseconds. */ BLOB_DB_GET_MICROS((byte) 0x28), /** * BlobDB MultiGet latency. + * Measured in microseconds. */ BLOB_DB_MULTIGET_MICROS((byte) 0x29), /** * BlobDB Seek/SeekToFirst/SeekToLast/SeekForPrev latency. + * Measured in microseconds. */ BLOB_DB_SEEK_MICROS((byte) 0x2A), /** * BlobDB Next latency. + * Measured in microseconds. */ BLOB_DB_NEXT_MICROS((byte) 0x2B), /** * BlobDB Prev latency. + * Measured in microseconds. */ BLOB_DB_PREV_MICROS((byte) 0x2C), /** * Blob file write latency. + * Measured in microseconds. */ BLOB_DB_BLOB_FILE_WRITE_MICROS((byte) 0x2D), /** * Blob file read latency. + * Measured in microseconds. */ BLOB_DB_BLOB_FILE_READ_MICROS((byte) 0x2E), /** * Blob file sync latency. + * Measured in microseconds. */ BLOB_DB_BLOB_FILE_SYNC_MICROS((byte) 0x2F), /** * BlobDB compression time. + * Measured in microseconds. */ BLOB_DB_COMPRESSION_MICROS((byte) 0x30), /** * BlobDB decompression time. + * Measured in microseconds. */ BLOB_DB_DECOMPRESSION_MICROS((byte) 0x31), @@ -159,18 +301,17 @@ public enum HistogramType { FLUSH_TIME((byte) 0x32), /** - * Number of MultiGet batch keys overlapping a file + * Number of MultiGet batch keys overlapping a file. */ SST_BATCH_SIZE((byte) 0x33), /** - * Size of a single IO batch issued by MultiGet + * Size of a single IO batch issued by MultiGet. */ MULTIGET_IO_BATCH_SIZE((byte) 0x34), /** - * Num of Index and Filter blocks read from file system per level in MultiGet - * request + * Num of Index and Filter blocks read from file system per level in MultiGet request. */ NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL((byte) 0x35), @@ -185,12 +326,19 @@ public enum HistogramType { NUM_LEVEL_READ_PER_MULTIGET((byte) 0x37), /** - * The number of retry in auto resume + * The number of retry in auto resume. */ ERROR_HANDLER_AUTORESUME_RETRY_COUNT((byte) 0x38), + /** + * Bytes read asynchronously. + */ ASYNC_READ_BYTES((byte) 0x39), + /** + * Wait time for polling. + * Measured in microseconds. + */ POLL_WAIT_MICROS((byte) 0x3A), /** @@ -199,17 +347,20 @@ public enum HistogramType { PREFETCHED_BYTES_DISCARDED((byte) 0x3B), /** - * Wait time for aborting async read in FilePrefetchBuffer destructor + * Wait time for aborting async read in FilePrefetchBuffer destructor. + * Measured in microseconds. */ ASYNC_PREFETCH_ABORT_MICROS((byte) 0x3C), /** - * Number of bytes read for RocksDB's prefetching contents - * (as opposed to file system's prefetch) - * from the end of SST table during block based table open + * Number of bytes read for RocksDB's prefetching contents (as opposed to file system's prefetch) + * from the end of SST table during block based table open. */ TABLE_OPEN_PREFETCH_TAIL_READ_BYTES((byte) 0x3D), + /** + * Bytes prefetched during compaction. + */ COMPACTION_PREFETCH_BYTES((byte) 0x3F), /** @@ -217,12 +368,13 @@ public enum HistogramType { */ /** - * Time spent in Iterator::Prepare() for multi-scan (microseconds) + * Time spent in Iterator::Prepare() for multi-scan (microseconds). + * Measured in microseconds. */ MULTISCAN_PREPARE_MICROS((byte) 0x40), /** - * Number of blocks per multi-scan Prepare() call + * Number of blocks per multi-scan Prepare() call. */ MULTISCAN_BLOCKS_PER_PREPARE((byte) 0x41), diff --git a/java/src/main/java/org/rocksdb/Holder.java b/java/src/main/java/org/rocksdb/Holder.java index 716a0bda0736..dd088dcd767e 100644 --- a/java/src/main/java/org/rocksdb/Holder.java +++ b/java/src/main/java/org/rocksdb/Holder.java @@ -7,6 +7,8 @@ /** * Simple instance reference wrapper. + * + * @param the concrete type that this holder holds. */ public class Holder { private /* @Nullable */ T value; diff --git a/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java index 652bd19dc8c1..100dd6eb897a 100644 --- a/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java @@ -12,6 +12,9 @@ * ExportImportFilesMetaData)}. */ public class ImportColumnFamilyOptions extends RocksObject { + /** + * Constructs an ImportColumnFamilyOptions. + */ public ImportColumnFamilyOptions() { super(newImportColumnFamilyOptions()); } diff --git a/java/src/main/java/org/rocksdb/IndexShorteningMode.java b/java/src/main/java/org/rocksdb/IndexShorteningMode.java index a68346c3823c..2d4b3f9ad629 100644 --- a/java/src/main/java/org/rocksdb/IndexShorteningMode.java +++ b/java/src/main/java/org/rocksdb/IndexShorteningMode.java @@ -11,7 +11,7 @@ * enabled ({@link DBOptions#useDirectReads()} == true). * The default mode is the best tradeoff for most use cases. * This option only affects newly written tables. - * + *

* The index contains a key separating each pair of consecutive blocks. * Let A be the highest key in one block, B the lowest key in the next block, * and I the index entry separating these two blocks: @@ -22,7 +22,7 @@ * However, if I=A, this can't happen, and we'll read only the second block. * In kNoShortening mode, we use I=A. In other modes, we use the shortest * key in [A, B), which usually significantly reduces index size. - * + *

* There's a similar story for the last index entry, which is an upper bound * of the highest key in the file. If it's shortened and therefore * overestimated, iterator is likely to unnecessarily read the last data block diff --git a/java/src/main/java/org/rocksdb/InfoLogLevel.java b/java/src/main/java/org/rocksdb/InfoLogLevel.java index 197bd89dab68..c5fda9acd7c3 100644 --- a/java/src/main/java/org/rocksdb/InfoLogLevel.java +++ b/java/src/main/java/org/rocksdb/InfoLogLevel.java @@ -5,12 +5,39 @@ * RocksDB log levels. */ public enum InfoLogLevel { + /** + * Log 'debug' level events. + */ DEBUG_LEVEL((byte)0), + + /** + * Log 'info' level events. + */ INFO_LEVEL((byte)1), + + /** + * Log 'warn' level events. + */ WARN_LEVEL((byte)2), + + /** + * Log 'error' level events. + */ ERROR_LEVEL((byte)3), + + /** + * Log 'fatal' level events. + */ FATAL_LEVEL((byte)4), + + /** + * Log 'header' level events. + */ HEADER_LEVEL((byte)5), + + /** + * The number of log levels available. + */ NUM_INFO_LOG_LEVELS((byte)6); private final byte value_; diff --git a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java index aed28131a17e..c9419f46ac41 100644 --- a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java +++ b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java @@ -11,12 +11,16 @@ * {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}. */ public class IngestExternalFileOptions extends RocksObject { - + /** + * Constructs an IngestExternalFileOptions. + */ public IngestExternalFileOptions() { super(newIngestExternalFileOptions()); } /** + * Constructs an IngestExternalFileOptions. + * * @param moveFiles {@link #setMoveFiles(boolean)} * @param snapshotConsistency {@link #setSnapshotConsistency(boolean)} * @param allowGlobalSeqNo {@link #setAllowGlobalSeqNo(boolean)} diff --git a/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java index 6149b85292aa..60317d264fd7 100644 --- a/java/src/main/java/org/rocksdb/KeyMayExist.java +++ b/java/src/main/java/org/rocksdb/KeyMayExist.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Indicates whether a key exists or not, and its corresponding value's length. + */ public class KeyMayExist { @Override public boolean equals(final Object o) { @@ -23,13 +26,45 @@ public int hashCode() { return Objects.hash(exists, valueLength); } - public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue } + /** + * Part of the return type from {@link RocksDB#keyMayExist(ColumnFamilyHandle, + * java.nio.ByteBuffer, java.nio.ByteBuffer)}. + */ + public enum KeyMayExistEnum { + /** + * Key does not exist. + */ + kNotExist, - public KeyMayExist(final KeyMayExistEnum exists, final int valueLength) { + /** + * Key may exist without a value. + */ + kExistsWithoutValue, + + /** + * Key may exist with a value. + */ + kExistsWithValue + } + + /** + * Constructs a KeyMayExist. + * + * @param exists indicates if the key exists. + * @param valueLength the length of the value pointed to by the key (if it exists). + */ + KeyMayExist(final KeyMayExistEnum exists, final int valueLength) { this.exists = exists; this.valueLength = valueLength; } + /** + * Indicates if the key exists. + */ public final KeyMayExistEnum exists; + + /** + * The length of the value pointed to by the key (if it exists). + */ public final int valueLength; } diff --git a/java/src/main/java/org/rocksdb/LiveFileMetaData.java b/java/src/main/java/org/rocksdb/LiveFileMetaData.java index 5242496a315b..a15a5737489c 100644 --- a/java/src/main/java/org/rocksdb/LiveFileMetaData.java +++ b/java/src/main/java/org/rocksdb/LiveFileMetaData.java @@ -46,7 +46,8 @@ public int level() { return level; } - public long newLiveFileMetaDataHandle() { + @SuppressWarnings("PMD.UnusedPrivateMethod") + private long newLiveFileMetaDataHandle() { return newLiveFileMetaDataHandle(columnFamilyName(), columnFamilyName().length, level(), fileName(), path(), size(), smallestSeqno(), largestSeqno(), smallestKey(), smallestKey().length, largestKey(), largestKey().length, numReadsSampled(), diff --git a/java/src/main/java/org/rocksdb/LogFile.java b/java/src/main/java/org/rocksdb/LogFile.java index 5ee2c9fcc64a..2be597ce5f9b 100644 --- a/java/src/main/java/org/rocksdb/LogFile.java +++ b/java/src/main/java/org/rocksdb/LogFile.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * A (journal) log file. + */ @SuppressWarnings("PMD.MissingStaticMethodInNonInstantiatableClass") public class LogFile { private final String pathName; diff --git a/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java index b8d0e45efa09..42cc2e2057d2 100644 --- a/java/src/main/java/org/rocksdb/Logger.java +++ b/java/src/main/java/org/rocksdb/Logger.java @@ -99,20 +99,43 @@ public InfoLogLevel infoLogLevel() { infoLogLevel(nativeHandle_)); } - @Override - public long getNativeHandle() { - return nativeHandle_; - } - @Override public final LoggerType getLoggerType() { return LoggerType.JAVA_IMPLEMENTATION; } + /** + * Log a message. + * + * @param logLevel the log level. + * @param logMsg the log message. + */ protected abstract void log(final InfoLogLevel logLevel, final String logMsg); + /** + * Create a new Logger with Options. + * + * @param logLevel the log level. + * + * @return the native handle to the underlying C++ native Logger object. + */ protected native long newLogger(final long logLevel); + + /** + * Set the log level. + * + * @param handle the native handle to the underlying C++ native Logger object. + * @param logLevel the log level. + */ protected native void setInfoLogLevel(final long handle, final byte logLevel); + + /** + * Get the log level. + * + * @param handle the native handle to the underlying C++ native Logger object. + * + * @return the log level. + */ protected native byte infoLogLevel(final long handle); /** diff --git a/java/src/main/java/org/rocksdb/MemTableInfo.java b/java/src/main/java/org/rocksdb/MemTableInfo.java index 3d429035a343..56396ac8d997 100644 --- a/java/src/main/java/org/rocksdb/MemTableInfo.java +++ b/java/src/main/java/org/rocksdb/MemTableInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information about a Mem Table. + */ public class MemTableInfo { private final String columnFamilyName; private final long firstSeqno; diff --git a/java/src/main/java/org/rocksdb/MergeOperator.java b/java/src/main/java/org/rocksdb/MergeOperator.java index c299f62210fa..9bf93c8b5a62 100644 --- a/java/src/main/java/org/rocksdb/MergeOperator.java +++ b/java/src/main/java/org/rocksdb/MergeOperator.java @@ -12,7 +12,13 @@ * value. */ public abstract class MergeOperator extends RocksObject { - protected MergeOperator(final long nativeHandle) { - super(nativeHandle); - } + /** + * Constructs a MergeOperator. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ MergeOperator. + */ + protected MergeOperator(final long nativeHandle) { + super(nativeHandle); + } } diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java index e54db7171e54..b58098119e9e 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java @@ -7,6 +7,9 @@ import java.util.*; +/** + * Mutable Column Family Options. + */ public class MutableColumnFamilyOptions extends AbstractMutableOptions { /** * User must use builder pattern, or parser. @@ -54,24 +57,87 @@ public static MutableColumnFamilyOptionsBuilder parse( return new MutableColumnFamilyOptionsBuilder().fromParsed(parsedOptions, ignoreUnknown); } + /** + * Parses a String representation of MutableColumnFamilyOptions + *

+ * The format is: key1=value1;key2=value2;key3=value3 etc + *

+ * For int[] values, each int should be separated by a colon, e.g. + *

+ * key1=value1;intArrayKey1=1:2:3 + * + * @param str The string representation of the mutable column family options + * + * @return A builder for the mutable column family options + */ public static MutableColumnFamilyOptionsBuilder parse(final String str) { return parse(str, false); } private interface MutableColumnFamilyOptionKey extends MutableOptionKey {} + /** + * Mem Table options. + */ public enum MemtableOption implements MutableColumnFamilyOptionKey { + /** + * Write buffer size. + */ write_buffer_size(ValueType.LONG), + + /** + * Arena block size. + */ arena_block_size(ValueType.LONG), + + /** + * Prefix size ratio for Memtable's Bloom Filter. + */ memtable_prefix_bloom_size_ratio(ValueType.DOUBLE), + + /** + * Whether to filter whole keys in the Memtable(s). + */ memtable_whole_key_filtering(ValueType.BOOLEAN), + + /** + * Number of bits for the prefix in Memtable's Bloom Filter. + */ @Deprecated memtable_prefix_bloom_bits(ValueType.INT), + + /** + * Number of probes for the prefix in Memtable's Bloom Filter. + */ @Deprecated memtable_prefix_bloom_probes(ValueType.INT), + + /** + * Huge Page Size for Memtable(s). + */ memtable_huge_page_size(ValueType.LONG), + + /** + * Maximum number of successive merges. + */ max_successive_merges(ValueType.LONG), + + /** + * Whether to filter deletes. + */ @Deprecated filter_deletes(ValueType.BOOLEAN), + + /** + * Maximum number of write buffers. + */ max_write_buffer_number(ValueType.INT), + + /** + * Number of in-place update locks. + */ inplace_update_num_locks(ValueType.LONG), + + /** + * Memory purge threshold. + */ experimental_mempurge_threshold(ValueType.DOUBLE); private final ValueType valueType; @@ -85,20 +151,78 @@ public ValueType getValueType() { } } + /** + * Compaction options. + */ public enum CompactionOption implements MutableColumnFamilyOptionKey { + /** + * Disable auto compaction. + */ disable_auto_compactions(ValueType.BOOLEAN), + + /** + * Soft limit on the number of bytes pending before compaction. + */ soft_pending_compaction_bytes_limit(ValueType.LONG), + + /** + * Hard limit on the number of bytes pending before compaction. + */ hard_pending_compaction_bytes_limit(ValueType.LONG), + + /** + * Number of files in Level 0 before compaction is triggered. + */ level0_file_num_compaction_trigger(ValueType.INT), + + /** + * Writes to Level 0 before a slowdown is triggered. + */ level0_slowdown_writes_trigger(ValueType.INT), + + /** + * Writes to Level 0 before a stop is triggered. + */ level0_stop_writes_trigger(ValueType.INT), + + /** + * Max compaction bytes. + */ max_compaction_bytes(ValueType.LONG), + + /** + * Target for the base size of files. + */ target_file_size_base(ValueType.LONG), + + /** + * Multiplier for the size of files. + */ target_file_size_multiplier(ValueType.INT), + + /** + * Maximum size in bytes for level base. + */ max_bytes_for_level_base(ValueType.LONG), + + /** + * Maximum bytes for level multiplier. + */ max_bytes_for_level_multiplier(ValueType.INT), + + /** + * Maximum bytes for level multiplier(s) additional + */ max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY), + + /** + * Time-to-live. + */ ttl(ValueType.LONG), + + /** + * Compaction period in seconds. + */ periodic_compaction_seconds(ValueType.LONG); private final ValueType valueType; @@ -112,16 +236,58 @@ public ValueType getValueType() { } } + /** + * Blob options. + */ public enum BlobOption implements MutableColumnFamilyOptionKey { + /** + * Enable BLOB files. + */ enable_blob_files(ValueType.BOOLEAN), + + /** + * Minimum BLOB size. + */ min_blob_size(ValueType.LONG), + + /** + * BLOB file size. + */ blob_file_size(ValueType.LONG), + + /** + * BLOB compression type. + */ blob_compression_type(ValueType.ENUM), + + /** + * Enable BLOB garbage collection. + */ enable_blob_garbage_collection(ValueType.BOOLEAN), + + /** + * BLOB garbage collection age cut-off. + */ blob_garbage_collection_age_cutoff(ValueType.DOUBLE), + + /** + * Threshold for forcing BLOB garbage collection. + */ blob_garbage_collection_force_threshold(ValueType.DOUBLE), + + /** + * BLOB compaction read-ahead size. + */ blob_compaction_readahead_size(ValueType.LONG), + + /** + * BLOB file starting level. + */ blob_file_starting_level(ValueType.INT), + + /** + * Prepopulate BLOB Cache. + */ prepopulate_blob_cache(ValueType.ENUM); private final ValueType valueType; @@ -135,10 +301,28 @@ public ValueType getValueType() { } } + /** + * Miscellaneous options. + */ public enum MiscOption implements MutableColumnFamilyOptionKey { + /** + * Maximum number of sequential keys to skip during iteration. + */ max_sequential_skip_in_iterations(ValueType.LONG), + + /** + * Whether to enable paranoid file checks. + */ paranoid_file_checks(ValueType.BOOLEAN), + + /** + * Whether to report background I/O stats. + */ report_bg_io_stats(ValueType.BOOLEAN), + + /** + * Compression type. + */ compression(ValueType.ENUM); private final ValueType valueType; @@ -152,6 +336,9 @@ public ValueType getValueType() { } } + /** + * Builder for constructing MutableColumnFamilyOptions. + */ public static class MutableColumnFamilyOptionsBuilder extends AbstractMutableOptionsBuilder implements MutableColumnFamilyOptionsInterface { diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java index 729b0e882788..c637989d82fa 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java @@ -5,6 +5,11 @@ package org.rocksdb; +/** + * Interface for MutableColumnFamilyOptions. + * + * @param the concrete type of the MutableColumnFamilyOptions. + */ public interface MutableColumnFamilyOptionsInterface< T extends MutableColumnFamilyOptionsInterface> extends AdvancedMutableColumnFamilyOptionsInterface { diff --git a/java/src/main/java/org/rocksdb/MutableDBOptions.java b/java/src/main/java/org/rocksdb/MutableDBOptions.java index 894154e0df45..07d4624b6035 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptions.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptions.java @@ -10,6 +10,9 @@ import java.util.Map; import java.util.Objects; +/** + * Mutable Database Options. + */ public class MutableDBOptions extends AbstractMutableOptions { /** * User must use builder pattern, or parser. @@ -55,29 +58,111 @@ public static MutableDBOptionsBuilder parse(final String str, final boolean igno return new MutableDBOptions.MutableDBOptionsBuilder().fromParsed(parsedOptions, ignoreUnknown); } + /** + * Parses a String representation of MutableDBOptions + *

+ * The format is: key1=value1;key2=value2;key3=value3 etc + *

+ * For int[] values, each int should be separated by a comma, e.g. + *

+ * key1=value1;intArrayKey1=1:2:3 + * + * @param str The string representation of the mutable db options + * + * @return A builder for the mutable db options + */ public static MutableDBOptionsBuilder parse(final String str) { return parse(str, false); } private interface MutableDBOptionKey extends MutableOptionKey {} + /** + * Database options. + */ public enum DBOption implements MutableDBOptionKey { + /** + * Maximum number of background jobs. + */ max_background_jobs(ValueType.INT), + + /** + * Maximum number of background compactions. + */ max_background_compactions(ValueType.INT), + + /** + * Whether to avoid flush during shutdown. + */ avoid_flush_during_shutdown(ValueType.BOOLEAN), + + /** + * Max buffer size for writing to files. + */ writable_file_max_buffer_size(ValueType.LONG), + + /** + * Delayed write rate. + */ delayed_write_rate(ValueType.LONG), + + /** + * Maximum total size of the WAL. + */ max_total_wal_size(ValueType.LONG), + + /** + * The period to delete obsolete file. + * Measured in microseconds. + */ delete_obsolete_files_period_micros(ValueType.LONG), + + /** + * The period to dump statistics. + * Measured in seconds. + */ stats_dump_period_sec(ValueType.INT), + + /** + * The period that statistics persist. + * Measured in seconds. + */ stats_persist_period_sec(ValueType.INT), + + /** + * Buffer size for statistics history. + */ stats_history_buffer_size(ValueType.LONG), + + /** + * Maximum number of open files. + */ max_open_files(ValueType.INT), + + /** + * Bytes per sync. + */ bytes_per_sync(ValueType.LONG), + + /** + * WAL bytes per sync. + */ wal_bytes_per_sync(ValueType.LONG), + + /** + * Strict limit of bytes per sync. + */ strict_bytes_per_sync(ValueType.BOOLEAN), + + /** + * Compaction readahead size. + */ compaction_readahead_size(ValueType.LONG), + /** + * Signifies periods characterized by significantly less read and write activity compared to + * other times. + */ daily_offpeak_time_utc(ValueType.STRING); private final ValueType valueType; @@ -91,6 +176,9 @@ public ValueType getValueType() { } } + /** + * Builder for constructing MutableDBOptions. + */ public static class MutableDBOptionsBuilder extends AbstractMutableOptionsBuilder implements MutableDBOptionsInterface { diff --git a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java index 37c654454af3..2971928a67e6 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java @@ -1,6 +1,11 @@ // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. package org.rocksdb; +/** + * Interface for MutableDBOptions. + * + * @param the concrete type of DBOptions. + */ public interface MutableDBOptionsInterface> { /** * Specifies the maximum number of concurrent background jobs (both flushes @@ -455,7 +460,9 @@ public interface MutableDBOptionsInterface the concrete type of the value. + */ public abstract class MutableOptionValue { abstract double asDouble() throws NumberFormatException; diff --git a/java/src/main/java/org/rocksdb/OperationStage.java b/java/src/main/java/org/rocksdb/OperationStage.java index 6ac0a15a2442..2ded8d8a642d 100644 --- a/java/src/main/java/org/rocksdb/OperationStage.java +++ b/java/src/main/java/org/rocksdb/OperationStage.java @@ -9,16 +9,59 @@ * The operation stage. */ public enum OperationStage { + /** + * Unknown. + */ STAGE_UNKNOWN((byte)0x0), + + /** + * Flush. + */ STAGE_FLUSH_RUN((byte)0x1), + + /** + * Flush writing Level 0. + */ STAGE_FLUSH_WRITE_L0((byte)0x2), + + /** + * Preparing compaction. + */ STAGE_COMPACTION_PREPARE((byte)0x3), + + /** + * Compaction. + */ STAGE_COMPACTION_RUN((byte)0x4), + + /** + * Compaction processing a key-value. + */ STAGE_COMPACTION_PROCESS_KV((byte)0x5), + + /** + * Installing compaction. + */ STAGE_COMPACTION_INSTALL((byte)0x6), + + /** + * Compaction syncing a file. + */ STAGE_COMPACTION_SYNC_FILE((byte)0x7), + + /** + * Picking Memtable(s) to flush. + */ STAGE_PICK_MEMTABLES_TO_FLUSH((byte)0x8), + + /** + * Rolling back Memtable(s). + */ STAGE_MEMTABLE_ROLLBACK((byte)0x9), + + /** + * Installing Memtable flush results. + */ STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS((byte)0xA); private final byte value; diff --git a/java/src/main/java/org/rocksdb/OperationType.java b/java/src/main/java/org/rocksdb/OperationType.java index bf73534683cc..0279e9e3b2f4 100644 --- a/java/src/main/java/org/rocksdb/OperationType.java +++ b/java/src/main/java/org/rocksdb/OperationType.java @@ -12,9 +12,24 @@ * examples include compaction and flush. */ public enum OperationType { + /** + * Unknown. + */ OP_UNKNOWN((byte)0x0), + + /** + * Compaction. + */ OP_COMPACTION((byte)0x1), + + /** + * Flush. + */ OP_FLUSH((byte) 0x2), + + /** + * DB Open. + */ OP_DBOPEN((byte) 0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java index 4674eae010ef..d088cd1a43be 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java @@ -229,9 +229,9 @@ protected final void disposeInternal(final long handle) { } private static native void disposeInternalJni(final long handle); - protected static native long open(final long optionsHandle, - final String path) throws RocksDBException; - protected static native long[] open(final long handle, final String path, + private static native long open(final long optionsHandle, final String path) + throws RocksDBException; + private static native long[] open(final long handle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions); private static native void closeDatabase(final long handle) throws RocksDBException; private static native long beginTransaction(final long handle, final long writeOptionsHandle); diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java index f4111c7b1875..22bf90901d43 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java @@ -5,9 +5,14 @@ package org.rocksdb; +/** + * Options for an Optimistic Transaction. + */ public class OptimisticTransactionOptions extends RocksObject implements TransactionalOptions { - + /** + * Constructs an OptimisticTransactionOptions. + */ public OptimisticTransactionOptions() { super(newOptimisticTransactionOptions()); } diff --git a/java/src/main/java/org/rocksdb/OptionString.java b/java/src/main/java/org/rocksdb/OptionString.java index d9e7e2689108..645afb1dd4b1 100644 --- a/java/src/main/java/org/rocksdb/OptionString.java +++ b/java/src/main/java/org/rocksdb/OptionString.java @@ -9,6 +9,9 @@ import java.util.List; import java.util.Objects; +/** + * An option expressed as a String. + */ @SuppressWarnings("PMD.AvoidStringBufferField") public class OptionString { private static final char kvPairSeparator = ';'; @@ -21,23 +24,51 @@ public class OptionString { private static final char escapeChar = '\\'; + /** + * The value of the option. + */ static class Value { final List list; final List complex; + /** + * Constructs a Value. + * + * @param list the list of values. + * @param complex the list of complex values. + */ public Value(final List list, final List complex) { this.list = list; this.complex = complex; } + /** + * Returns true if the value is a list. + * + * @return true if the value is a list, false otherwise. + */ public boolean isList() { return (this.list != null && this.complex == null); } + /** + * Constructs a value from a list. + * + * @param list a list of string values. + * + * @return the value. + */ public static Value fromList(final List list) { return new Value(list, null); } + /** + * Constructs a value from a complex value. + * + * @param complex the complex value. + * + * @return the value. + */ public static Value fromComplex(final List complex) { return new Value(null, complex); } @@ -256,6 +287,13 @@ private List parseComplex() { return entries; } + /** + * Parse a string into a list of entry. + * + * @param str the string. + * + * @return the list of entry. + */ public static List parse(final String str) { Objects.requireNonNull(str); diff --git a/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java index 675837df7a09..f8c869ba45bf 100644 --- a/java/src/main/java/org/rocksdb/Options.java +++ b/java/src/main/java/org/rocksdb/Options.java @@ -169,7 +169,7 @@ public Options optimizeForSmallDb() { @Override public Options optimizeForSmallDb(final Cache cache) { - optimizeForSmallDb(nativeHandle_, cache.getNativeHandle()); + optimizeForSmallDb(nativeHandle_, cache.nativeHandle_); return this; } @@ -2115,12 +2115,13 @@ public List tablePropertiesCollectorFactory() { * Set TablePropertiesCollectorFactory in underlying C++ object. * This method create its own copy of the list. Caller is responsible for * closing all the instances in the list. - * @param factories + * + * @param factories the collector factories. */ public void setTablePropertiesCollectorFactory(List factories) { long[] factoryHandlers = new long[factories.size()]; for (int i = 0; i < factoryHandlers.length; i++) { - factoryHandlers[i] = factories.get(i).getNativeHandle(); + factoryHandlers[i] = factories.get(i).nativeHandle_; } setTablePropertiesCollectorFactory(nativeHandle_, factoryHandlers); } diff --git a/java/src/main/java/org/rocksdb/OptionsUtil.java b/java/src/main/java/org/rocksdb/OptionsUtil.java index 642599205d62..822ecb56dfa7 100644 --- a/java/src/main/java/org/rocksdb/OptionsUtil.java +++ b/java/src/main/java/org/rocksdb/OptionsUtil.java @@ -7,6 +7,9 @@ import java.util.List; +/** + * Utility functions to assist in working with Options. + */ public class OptionsUtil { /** * A static method to construct the DBOptions and ColumnFamilyDescriptors by diff --git a/java/src/main/java/org/rocksdb/PerfContext.java b/java/src/main/java/org/rocksdb/PerfContext.java index 2d9ac3203098..9e48ec79dbc1 100644 --- a/java/src/main/java/org/rocksdb/PerfContext.java +++ b/java/src/main/java/org/rocksdb/PerfContext.java @@ -5,11 +5,23 @@ package org.rocksdb; +/** + * Performance Context. + */ public class PerfContext extends RocksObject { + /** + * Constructs a PerfContext. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ PerfContext. + */ protected PerfContext(final long nativeHandle) { super(nativeHandle); } + /** + * Reset the performance context. + */ public void reset() { reset(nativeHandle_); } @@ -42,8 +54,8 @@ public long getBlockReadByte() { return getBlockReadByte(nativeHandle_); } - /* - @return total nanos spent on block reads + /** + * @return total nanos spent on block reads */ public long getBlockReadTime() { return getBlockReadTime(nativeHandle_); @@ -220,7 +232,7 @@ public long getBlobDecompressTime() { } /** - * total number of internal keys skipped over during iteration. + * Get the total number of internal keys skipped over during iteration. * There are several reasons for it: * 1. when calling Next(), the iterator is in the position of the previous * key, so that we'll need to skip it. It means this counter will always @@ -236,51 +248,64 @@ public long getBlobDecompressTime() { * hidden by the tombstones will be included here. * 4. symmetric cases for Prev() and SeekToLast() * internal_recent_skipped_count is not included in this counter. + * + * @return the total number of internal keys skipped over during iteration */ public long getInternalKeySkippedCount() { return getInternalKeySkippedCount(nativeHandle_); } /** - * Total number of deletes and single deletes skipped over during iteration + * Get the Total number of deletes and single deletes skipped over during iteration * When calling Next(), Seek() or SeekToFirst(), after previous position * before calling Next(), the seek key in Seek() or the beginning for * SeekToFirst(), there may be one or more deleted keys before the next valid * key. Every deleted key is counted once. We don't recount here if there are * still older updates invalidated by the tombstones. + * + * @return total number of deletes and single deletes skipped over during iteration. */ public long getInternalDeleteSkippedCount() { return getInternalDeleteSkippedCount(nativeHandle_); } /** - * How many times iterators skipped over internal keys that are more recent + * Get how many times iterators skipped over internal keys that are more recent * than the snapshot that iterator is using. + * + * @return the number of times iterators skipped over internal keys that are more recent + * than the snapshot that iterator is using. */ public long getInternalRecentSkippedCount() { return getInternalRecentSkippedCount(nativeHandle_); } /** - * How many merge operands were fed into the merge operator by iterators. + * Get how many merge operands were fed into the merge operator by iterators. * Note: base values are not included in the count. + * + * @return the number of merge operands that were fed into the merge operator by iterators. */ public long getInternalMergeCount() { return getInternalMergeCount(nativeHandle_); } /** - * How many merge operands were fed into the merge operator by point lookups. + * Get how many merge operands were fed into the merge operator by point lookups. * Note: base values are not included in the count. + * + * @return the number of merge operands yjay were fed into the merge operator by point lookups. */ public long getInternalMergePointLookupCount() { return getInternalMergePointLookupCount(nativeHandle_); } /** - * Number of times we reseeked inside a merging iterator, specifically to skip + * Get the number of times we re-seek'd inside a merging iterator, specifically to skip * after or before a range of keys covered by a range deletion in a newer LSM * component. + * + * @return the number of times we re-seek'd inside a merging iterator. */ public long getInternalRangeDelReseekCount() { return getInternalRangeDelReseekCount(nativeHandle_); @@ -485,26 +510,37 @@ public long getNewTableIteratorNanos() { } /** + * Get total time of mem table block seeks in nanoseconds. + * * @return Time spent on seeking a key in data/index blocks */ public long getBlockSeekNanos() { return getBlockSeekNanos(nativeHandle_); } + /** - * @return Time spent on finding or creating a table reader + * Get total time spent on finding or creating a table reader. + * + * @return the time spent on finding or creating a table reader */ public long getFindTableNanos() { return getFindTableNanos(nativeHandle_); } /** + * Get total number of mem table bloom hits. + * * @return total number of mem table bloom hits */ public long getBloomMemtableHitCount() { return getBloomMemtableHitCount(nativeHandle_); } - // total number of mem table bloom misses + /** + * Get total number of mem table bloom misses. + * + * @return total number of mem table bloom misses. + */ public long getBloomMemtableMissCount() { return getBloomMemtableMissCount(nativeHandle_); } @@ -544,91 +580,211 @@ public long getEnvNewSequentialFileNanos() { return getEnvNewSequentialFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new random access file(s) in the environment. + * + * @return the total time + */ public long getEnvNewRandomAccessFileNanos() { return getEnvNewRandomAccessFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new writable file(s) in the environment. + * + * @return the total time + */ public long getEnvNewWritableFileNanos() { return getEnvNewWritableFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for reusing random access file(s) in the environment. + * + * @return the total time + */ public long getEnvReuseWritableFileNanos() { return getEnvReuseWritableFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new random access read-write file(s) in the + * environment. + * + * @return the total time + */ public long getEnvNewRandomRwFileNanos() { return getEnvNewRandomRwFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new directory(s) in the environment. + * + * @return the total time + */ public long getEnvNewDirectoryNanos() { return getEnvNewDirectoryNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for checking if a file exists in the environment. + * + * @return the total time + */ public long getEnvFileExistsNanos() { return getEnvFileExistsNanos(nativeHandle_); } + + /** + * Get the time taken in nanoseconds for getting children in the environment. + * + * @return the total time + */ public long getEnvGetChildrenNanos() { return getEnvGetChildrenNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for child file attributes in the environment. + * + * @return the total time + */ public long getEnvGetChildrenFileAttributesNanos() { return getEnvGetChildrenFileAttributesNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for deleting file(s) in the environment. + * + * @return the total time + */ public long getEnvDeleteFileNanos() { return getEnvDeleteFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating directories(s) in the environment. + * + * @return the total time + */ public long getEnvCreateDirNanos() { return getEnvCreateDirNanos(nativeHandle_); } + + /** + * Get the time taken in nanoseconds for creating directories(s) (only if not already existing) in + * the environment. + * + * @return the total time + */ public long getEnvCreateDirIfMissingNanos() { return getEnvCreateDirIfMissingNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for deleting directories(s) in the environment. + * + * @return the total time + */ public long getEnvDeleteDirNanos() { return getEnvDeleteDirNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for getting file size(s) in the environment. + * + * @return the total time + */ public long getEnvGetFileSizeNanos() { return getEnvGetFileSizeNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for getting file modification time(s) in the environment. + * + * @return the total time + */ public long getEnvGetFileModificationTimeNanos() { return getEnvGetFileModificationTimeNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for renaming file(s) in the environment. + * + * @return the total time + */ public long getEnvRenameFileNanos() { return getEnvRenameFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for linking file(s) in the environment. + * + * @return the total time + */ public long getEnvLinkFileNanos() { return getEnvLinkFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for locking file(s) in the environment. + * + * @return the total time + */ public long getEnvLockFileNanos() { return getEnvLockFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for unlocking file(s) in the environment. + * + * @return the total time + */ public long getEnvUnlockFileNanos() { return getEnvUnlockFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating loggers in the environment. + * + * @return the total time + */ public long getEnvNewLoggerNanos() { return getEnvNewLoggerNanos(nativeHandle_); } + /** + * Get the CPU time consumed in the environment. + * + * @return the total time + */ public long getGetCpuNanos() { return getGetCpuNanos(nativeHandle_); } + /** + * Get the CPU time consumed by calling 'next' on iterator(s) in the environment. + * + * @return the total time + */ public long getIterNextCpuNanos() { return getIterNextCpuNanos(nativeHandle_); } + + /** + * Get the CPU time consumed by calling 'prev' on iterator(s) in the environment. + * + * @return the total time + */ public long getIterPrevCpuNanos() { return getIterPrevCpuNanos(nativeHandle_); } + /** + * Get the CPU time consumed by calling 'seek' on iterator(s) in the environment. + * + * @return the total time + */ public long getIterSeekCpuNanos() { return getIterSeekCpuNanos(nativeHandle_); } @@ -647,6 +803,9 @@ public long getDecryptDataNanos() { return getDecryptDataNanos(nativeHandle_); } + /** + * @return the number of asynchronous seeks. + */ public long getNumberAsyncSeek() { return getNumberAsyncSeek(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/PerfLevel.java b/java/src/main/java/org/rocksdb/PerfLevel.java index 332e6d7d977b..a0db8a3286c4 100644 --- a/java/src/main/java/org/rocksdb/PerfLevel.java +++ b/java/src/main/java/org/rocksdb/PerfLevel.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * Performance monitoring levels. + */ public enum PerfLevel { /** * Unknown setting @@ -45,16 +48,31 @@ public enum PerfLevel { private final byte _value; + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ public byte getValue() { return _value; } + /** + * Get the PerfLevel from the internal representation value. + * + * @param level the internal representation value. + * + * @return the PerfLevel + * + * @throws IllegalArgumentException if the value does not match a + * PerfLevel + */ public static PerfLevel getPerfLevel(byte level) { for (PerfLevel l : PerfLevel.values()) { if (l.getValue() == level) { return l; } } - throw new IllegalArgumentException("Uknknown PerfLevel constant : " + level); + throw new IllegalArgumentException("Unknown PerfLevel constant : " + level); } } diff --git a/java/src/main/java/org/rocksdb/PersistentCache.java b/java/src/main/java/org/rocksdb/PersistentCache.java index 900e7d1393bc..3ba3b9609dec 100644 --- a/java/src/main/java/org/rocksdb/PersistentCache.java +++ b/java/src/main/java/org/rocksdb/PersistentCache.java @@ -10,7 +10,17 @@ * cache is specifically designed for persistent read cache. */ public class PersistentCache extends RocksObject { - + /** + * Constructs a persistent cache. + * + * @param env the environment. + * @param path the path for the cache. + * @param size the size of the cache. + * @param logger the logger to use. + * @param optimizedForNvm true to optimize for NVM, false otherwise. + * + * @throws RocksDBException if the cache cannot be created. + */ public PersistentCache(final Env env, final String path, final long size, final Logger logger, final boolean optimizedForNvm) throws RocksDBException { diff --git a/java/src/main/java/org/rocksdb/PlainTableConfig.java b/java/src/main/java/org/rocksdb/PlainTableConfig.java index 1331f5b0a2ac..f577b7c6f14b 100644 --- a/java/src/main/java/org/rocksdb/PlainTableConfig.java +++ b/java/src/main/java/org/rocksdb/PlainTableConfig.java @@ -13,17 +13,51 @@ *

It also support prefix hash feature.

*/ public class PlainTableConfig extends TableFormatConfig { + /** + * Indicates that the key sizew can be variable length. + */ public static final int VARIABLE_LENGTH = 0; + + /** + * The default bits per key in the bloom filter. + */ public static final int DEFAULT_BLOOM_BITS_PER_KEY = 10; + + /** + * The default ratio of the hash table. + */ public static final double DEFAULT_HASH_TABLE_RATIO = 0.75; + + /** + * The default sparseness factor of the index. + */ public static final int DEFAULT_INDEX_SPARSENESS = 16; + + /** + * The default size of the huge TLB. + */ public static final int DEFAULT_HUGE_TLB_SIZE = 0; + + /** + * The default encoding type. + */ public static final EncodingType DEFAULT_ENCODING_TYPE = EncodingType.kPlain; + + /** + * The default full scan mode. + */ public static final boolean DEFAULT_FULL_SCAN_MODE = false; + + /** + * The default setting for whether to store the index in a file. + */ public static final boolean DEFAULT_STORE_INDEX_IN_FILE = false; + /** + * Constructs a PlainTableConfig with the default settings. + */ public PlainTableConfig() { keySize_ = VARIABLE_LENGTH; bloomBitsPerKey_ = DEFAULT_BLOOM_BITS_PER_KEY; diff --git a/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java b/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java index f1237aa7c95b..e7317363f9cc 100644 --- a/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java +++ b/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java @@ -18,7 +18,14 @@ * system since it involves network traffic and higher latencies.

*/ public enum PrepopulateBlobCache { + /** + * Disable pre-populating the blob cache + */ PREPOPULATE_BLOB_DISABLE((byte) 0x0, "prepopulate_blob_disable", "kDisable"), + + /** + * Only pre-populate on BLOB flush. + */ PREPOPULATE_BLOB_FLUSH_ONLY((byte) 0x1, "prepopulate_blob_flush_only", "kFlushOnly"); /** diff --git a/java/src/main/java/org/rocksdb/Priority.java b/java/src/main/java/org/rocksdb/Priority.java index 34a56edcbcde..44026ed67a0b 100644 --- a/java/src/main/java/org/rocksdb/Priority.java +++ b/java/src/main/java/org/rocksdb/Priority.java @@ -9,9 +9,24 @@ * The Thread Pool priority. */ public enum Priority { + /** + * Bottom most priority. + */ BOTTOM((byte) 0x0), + + /** + * Low priority. + */ LOW((byte) 0x1), + + /** + * High priority. + */ HIGH((byte)0x2), + + /** + * maximum number of priority levels. + */ TOTAL((byte)0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/Range.java b/java/src/main/java/org/rocksdb/Range.java index 74c85e5f04f3..16f4dbe2567e 100644 --- a/java/src/main/java/org/rocksdb/Range.java +++ b/java/src/main/java/org/rocksdb/Range.java @@ -12,6 +12,13 @@ public class Range { final Slice start; final Slice limit; + /** + * Constructs a Range. + * + * + * @param start the start of the range + * @param limit the end (start+limit) of the range + */ public Range(final Slice start, final Slice limit) { this.start = start; this.limit = limit; diff --git a/java/src/main/java/org/rocksdb/RateLimiter.java b/java/src/main/java/org/rocksdb/RateLimiter.java index 4fa5551b7876..8cfe028d53f4 100644 --- a/java/src/main/java/org/rocksdb/RateLimiter.java +++ b/java/src/main/java/org/rocksdb/RateLimiter.java @@ -12,10 +12,25 @@ * @since 3.10.0 */ public class RateLimiter extends RocksObject { + /** + * The default refill period in microseconds. + */ public static final long DEFAULT_REFILL_PERIOD_MICROS = 100 * 1000; + + /** + * The default fairness parameter value. + */ public static final int DEFAULT_FAIRNESS = 10; + + /** + * The default rate limiter mode. + */ public static final RateLimiterMode DEFAULT_MODE = RateLimiterMode.WRITES_ONLY; + + /** + * The default of whether to enable auto-tune. + */ public static final boolean DEFAULT_AUTOTUNE = false; /** diff --git a/java/src/main/java/org/rocksdb/RateLimiterMode.java b/java/src/main/java/org/rocksdb/RateLimiterMode.java index 4b029d8165e2..68ea265d98ec 100644 --- a/java/src/main/java/org/rocksdb/RateLimiterMode.java +++ b/java/src/main/java/org/rocksdb/RateLimiterMode.java @@ -9,8 +9,19 @@ * Mode for {@link RateLimiter#RateLimiter(long, long, int, RateLimiterMode)}. */ public enum RateLimiterMode { + /** + * Only rate limit reads. + */ READS_ONLY((byte)0x0), + + /** + * Only rate limit writes. + */ WRITES_ONLY((byte)0x1), + + /** + * Rate limit all IO. + */ ALL_IO((byte)0x2); private final byte value; diff --git a/java/src/main/java/org/rocksdb/ReadOptions.java b/java/src/main/java/org/rocksdb/ReadOptions.java index 8cc9883d23cd..f141ef55e747 100644 --- a/java/src/main/java/org/rocksdb/ReadOptions.java +++ b/java/src/main/java/org/rocksdb/ReadOptions.java @@ -12,11 +12,16 @@ * become out-of-scope to release the allocated memory in c++. */ public class ReadOptions extends RocksObject { + /** + * Constructs a ReadOptions. + */ public ReadOptions() { super(newReadOptions()); } /** + * Constructs a ReadOptions. + * * @param verifyChecksums verification will be performed on every read * when set to true * @param fillCache if true, then fill-cache behavior will be performed. diff --git a/java/src/main/java/org/rocksdb/ReadTier.java b/java/src/main/java/org/rocksdb/ReadTier.java index 78f83f6ad657..43dd893c2ed2 100644 --- a/java/src/main/java/org/rocksdb/ReadTier.java +++ b/java/src/main/java/org/rocksdb/ReadTier.java @@ -9,9 +9,24 @@ * RocksDB {@link ReadOptions} read tiers. */ public enum ReadTier { + /** + * Read all tiers. + */ READ_ALL_TIER((byte)0), + + /** + * Read block cache. + */ BLOCK_CACHE_TIER((byte)1), + + /** + * Read persisted. + */ PERSISTED_TIER((byte)2), + + /** + * Read Memtable(s). + */ MEMTABLE_TIER((byte)3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java index e96694313b4a..52d0d90d4f76 100644 --- a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java @@ -6,10 +6,13 @@ package org.rocksdb; /** - * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++ + * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++. */ public class RemoveEmptyValueCompactionFilter extends AbstractCompactionFilter { + /** + * Constructs a RemoveEmptyValueCompactionFilter. + */ public RemoveEmptyValueCompactionFilter() { super(createNewRemoveEmptyValueCompactionFilter0()); } diff --git a/java/src/main/java/org/rocksdb/RocksCallbackObject.java b/java/src/main/java/org/rocksdb/RocksCallbackObject.java index 8a7c3713e9be..34cdd5d95b6c 100644 --- a/java/src/main/java/org/rocksdb/RocksCallbackObject.java +++ b/java/src/main/java/org/rocksdb/RocksCallbackObject.java @@ -13,16 +13,28 @@ * which are called from C++ via JNI. *

* RocksCallbackObject is the base-class any RocksDB classes that acts as a - * callback from some underlying underlying native C++ {@code rocksdb} object. + * callback from some underlying native C++ {@code rocksdb} object. + * Its implementation is always coupled with + * a C++ implementation of {@code ROCKSDB_NAMESPACE::JniCallback}. *

* The use of {@code RocksObject} should always be preferred over * {@link RocksCallbackObject} if callbacks are not required. */ public abstract class RocksCallbackObject extends AbstractImmutableNativeReference { - + /** + * An immutable reference to the value of the C++ pointer pointing to some + * underlying native RocksDB C++ object that + * extends {@code ROCKSDB_NAMESPACE::JniCallback}. + */ protected final long nativeHandle_; + /** + * Constructs a RocksCallbackObject. + * + * @param nativeParameterHandles reference to the value of the C++ pointers pointing to the + * underlying native RocksDB C++ objects. + */ protected RocksCallbackObject(final long... nativeParameterHandles) { super(true); this.nativeHandle_ = initializeNative(nativeParameterHandles); diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index fe2f38af64f9..fe074c9ba60c 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -21,7 +21,14 @@ * indicates sth wrong at the RocksDB library side and the call failed. */ public class RocksDB extends RocksObject { + /** + * The name of the default column family. + */ public static final byte[] DEFAULT_COLUMN_FAMILY = "default".getBytes(UTF_8); + + /** + * A constant representing a result where something was searched for but not found. + */ public static final int NOT_FOUND = -1; private enum LibraryState { @@ -165,6 +172,11 @@ private static void waitForLibraryToBeLoaded() { } } + /** + * Get the RocksDB version. + * + * @return the version of RocksDB. + */ public static Version rocksdbVersion() { return version; } @@ -808,6 +820,9 @@ public List createColumnFamilies( * The ColumnFamilyHandle is automatically disposed with DB disposal. * * @param columnFamilyDescriptor column family to be created. + * @param importColumnFamilyOptions the options for the import. + * @param metadata the metadata for the imported file. + * * @return {@link org.rocksdb.ColumnFamilyHandle} instance. * * @throws RocksDBException thrown if error happens in underlying @@ -823,6 +838,21 @@ public ColumnFamilyHandle createColumnFamilyWithImport( columnFamilyDescriptor, importColumnFamilyOptions, metadatas); } + /** + * Creates a new column family with the name columnFamilyName and + * import external SST files specified in `metadata` allocates a + * ColumnFamilyHandle within an internal structure. + * The ColumnFamilyHandle is automatically disposed with DB disposal. + * + * @param columnFamilyDescriptor column family to be created. + * @param importColumnFamilyOptions the options for the import. + * @param metadatas the metadata for the imported files. + * + * @return {@link org.rocksdb.ColumnFamilyHandle} instance. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ public ColumnFamilyHandle createColumnFamilyWithImport( final ColumnFamilyDescriptor columnFamilyDescriptor, final ImportColumnFamilyOptions importColumnFamilyOptions, @@ -830,7 +860,7 @@ public ColumnFamilyHandle createColumnFamilyWithImport( final int metadataNum = metadatas.size(); final long[] metadataHandleList = new long[metadataNum]; for (int i = 0; i < metadataNum; i++) { - metadataHandleList[i] = metadatas.get(i).getNativeHandle(); + metadataHandleList[i] = metadatas.get(i).nativeHandle_; } final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, createColumnFamilyWithImport(nativeHandle_, columnFamilyDescriptor.getName(), @@ -857,10 +887,17 @@ public void dropColumnFamily(final ColumnFamilyHandle columnFamilyHandle) dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_); } - // Bulk drop column families. This call only records drop records in the - // manifest and prevents the column families from flushing and compacting. - // In case of error, the request may succeed partially. User may call - // ListColumnFamilies to check the result. + /** + * Bulk drop column families. This call only records drop records in the + * manifest and prevents the column families from flushing and compacting. + * In case of error, the request may succeed partially. User may call + * {@link #listColumnFamilies(Options, String)} to check the result. + * + * @param columnFamilies the column families to drop. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ public void dropColumnFamilies( final List columnFamilies) throws RocksDBException { final long[] cfHandles = new long[columnFamilies.size()]; @@ -1716,6 +1753,19 @@ public void merge(final WriteOptions writeOpts, key, offset, len, value, vOffset, vLen); } + /** + * Add merge operand for key/value pair. + * + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ public void merge(final WriteOptions writeOpts, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { if (key.isDirect() && value.isDirect()) { @@ -1734,6 +1784,20 @@ public void merge(final WriteOptions writeOpts, final ByteBuffer key, final Byte value.position(value.limit()); } + /** + * Add merge operand for key/value pair. + * + * @param columnFamilyHandle the column family. + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ public void merge(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpts, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { if (key.isDirect() && value.isDirect()) { @@ -1944,6 +2008,7 @@ public int get(final byte[] key, final int offset, final int len, * * @throws RocksDBException thrown if error happens in underlying * native library. + * @throws IllegalArgumentException if the arguments are invalid. */ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final byte[] value) throws RocksDBException, IllegalArgumentException { @@ -1976,6 +2041,7 @@ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, * * @throws RocksDBException thrown if error happens in underlying * native library. + * @throws IllegalArgumentException if the arguments are invalid. */ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final int offset, final int len, final byte[] value, final int vOffset, @@ -2584,11 +2650,11 @@ public List multiGetByteBuffers(final ReadOptions readOptio * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * @param key byte array of a key to search for* * @return true if key exist in database, otherwise false. @@ -2600,11 +2666,11 @@ public boolean keyExists(final byte[] key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * @param key byte array of a key to search for * @param offset the offset of the "key" array to be used, must be @@ -2620,11 +2686,11 @@ public boolean keyExists(final byte[] key, final int offset, final int len) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2639,11 +2705,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final byte * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2663,11 +2729,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final byte * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param readOptions {@link ReadOptions} instance @@ -2682,11 +2748,11 @@ public boolean keyExists(final ReadOptions readOptions, final byte[] key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param readOptions {@link ReadOptions} instance @@ -2706,11 +2772,11 @@ public boolean keyExists( * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2727,11 +2793,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2755,11 +2821,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param key ByteBuffer with key. Must be allocated as direct. @@ -2773,11 +2839,11 @@ public boolean keyExists(final ByteBuffer key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2792,11 +2858,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final Byte * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param readOptions {@link ReadOptions} instance @@ -2811,11 +2877,11 @@ public boolean keyExists(final ReadOptions readOptions, final ByteBuffer key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -3675,10 +3741,26 @@ public long[] getApproximateSizes(final List ranges, return getApproximateSizes(null, ranges, sizeApproximationFlags); } + /** + * Count and size. + */ public static class CountAndSize { + /** + * The count. + */ public final long count; + + /** + * The size. + */ public final long size; + /** + * Constructs a CountAndSize. + * + * @param count the count. + * @param size the size. + */ public CountAndSize(final long count, final long size) { this.count = count; this.size = size; @@ -3878,7 +3960,9 @@ public void setOptions( /** * Set performance level for rocksdb performance measurement. - * @param level + * + * @param level the performance level + * * @throws IllegalArgumentException for UNINITIALIZED and OUT_OF_BOUNDS values * as they can't be used for settings. */ @@ -3894,7 +3978,8 @@ public void setPerfLevel(final PerfLevel level) { /** * Return current performance level measurement settings. - * @return + * + * @return the performance level */ public PerfLevel getPerfLevel() { byte level = getPerfLevelNative(); @@ -3902,8 +3987,9 @@ public PerfLevel getPerfLevel() { } /** - * Return perf context bound to this thread. - * @return + * Return performance context bound to this thread. + * + * @return the performance context */ public PerfContext getPerfContext() { long native_handle = getPerfContextNative(); @@ -3911,7 +3997,7 @@ public PerfContext getPerfContext() { } /** - * Get the options for the column family handle + * Get the options for the column family handle. * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance, or null for the default column family. @@ -4326,6 +4412,9 @@ public void enableFileDeletions() throws RocksDBException { enableFileDeletions(nativeHandle_); } + /** + * Live files. + */ public static class LiveFiles { /** * The valid size of the manifest file. The manifest file is an ever growing @@ -4788,15 +4877,29 @@ private static long[] toRangeSliceHandles(final List ranges) { return rangeSliceHandles; } + /** + * Store the options instance. + * + * This is used to ensure it is correct released later. + * + * @param options the options. + */ protected void storeOptionsInstance(final DBOptionsInterface options) { options_ = options; } - protected void storeDefaultColumnFamilyHandle(ColumnFamilyHandle columnFamilyHandle) { + /** + * Store the default column family handle. + * + * This is used to ensure it is correct released later. + * + * @param columnFamilyHandle the handle of the default column family. + */ + protected void storeDefaultColumnFamilyHandle(final ColumnFamilyHandle columnFamilyHandle) { defaultColumnFamilyHandle_ = columnFamilyHandle; } - private static void checkBounds(int offset, int len, int size) { + private static void checkBounds(final int offset, final int len, final int size) { if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size)); } @@ -5085,28 +5188,56 @@ private static native void destroyDB(final String path, final long optionsHandle private static native int version(); + /** + * The DB Options. + */ protected DBOptionsInterface options_; private static Version version; + /** + * Representation of a 3 part version number, e.g. MAJOR.MINOR.PATCH. + */ public static class Version { private final byte major; private final byte minor; private final byte patch; + /** + * Constructs a new Version. + * + * @param major the major component of the version number. + * @param minor the minor component of the version number. + * @param patch the patch component of the version number. + */ public Version(final byte major, final byte minor, final byte patch) { this.major = major; this.minor = minor; this.patch = patch; } + /** + * Get the major component of the version number. + * + * @return the major component of the version number. + */ public int getMajor() { return major; } + /** + * Get the minor component of the version number. + * + * @return the minor component of the version number. + */ public int getMinor() { return minor; } + /** + * Get the patch component of the version number. + * + * @return the patch component of the version number. + */ public int getPatch() { return patch; } diff --git a/java/src/main/java/org/rocksdb/RocksDBException.java b/java/src/main/java/org/rocksdb/RocksDBException.java index 9df411d121cc..8a74c2f515b7 100644 --- a/java/src/main/java/org/rocksdb/RocksDBException.java +++ b/java/src/main/java/org/rocksdb/RocksDBException.java @@ -11,22 +11,38 @@ */ public class RocksDBException extends Exception { private static final long serialVersionUID = -5187634878466267120L; + + /** + * The error status that led to this exception. + */ /* @Nullable */ private final Status status; /** * The private construct used by a set of public static factory method. * - * @param msg the specified error message. + * @param message the specified error message. */ - public RocksDBException(final String msg) { - this(msg, null); + public RocksDBException(final String message) { + this(message, null); } - public RocksDBException(final String msg, final Status status) { - super(msg); + /** + * Constructs a RocksDBException. + * + * @param message the detail message. The detail message is saved for later retrieval by the + * {@link #getMessage()} method. + * @param status the error status that led to this exception. + */ + public RocksDBException(final String message, final Status status) { + super(message); this.status = status; } + /** + * Constructs a RocksDBException. + * + * @param status the error status that led to this exception. + */ public RocksDBException(final Status status) { super(status.getState() != null ? status.getState() : status.getCodeString()); diff --git a/java/src/main/java/org/rocksdb/RocksIterator.java b/java/src/main/java/org/rocksdb/RocksIterator.java index 8e331d51845c..fbb042aca13d 100644 --- a/java/src/main/java/org/rocksdb/RocksIterator.java +++ b/java/src/main/java/org/rocksdb/RocksIterator.java @@ -23,8 +23,15 @@ * @see org.rocksdb.RocksObject */ public class RocksIterator extends AbstractRocksIterator { - protected RocksIterator(final RocksDB rocksDB, final long nativeHandle) { - super(rocksDB, nativeHandle); + /** + * Constructs a RocksIterator. + * + * @param rocksDb the database. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ RocksIterator. + */ + protected RocksIterator(final RocksDB rocksDb, final long nativeHandle) { + super(rocksDb, nativeHandle); } /** diff --git a/java/src/main/java/org/rocksdb/RocksIteratorInterface.java b/java/src/main/java/org/rocksdb/RocksIteratorInterface.java index 78f35e3f86a0..454ff95d6e62 100644 --- a/java/src/main/java/org/rocksdb/RocksIteratorInterface.java +++ b/java/src/main/java/org/rocksdb/RocksIteratorInterface.java @@ -133,6 +133,11 @@ public interface RocksIteratorInterface { /** * Similar to {@link #refresh()} but the iterator will be reading the latest DB state under the * given snapshot. + * + * @param snapshot the snapshot. + * + * @throws RocksDBException thrown if the operation is not supported or an error happens in the + * underlying native library */ void refresh(Snapshot snapshot) throws RocksDBException; } diff --git a/java/src/main/java/org/rocksdb/RocksMutableObject.java b/java/src/main/java/org/rocksdb/RocksMutableObject.java index eb3215290f84..3a69f699657d 100644 --- a/java/src/main/java/org/rocksdb/RocksMutableObject.java +++ b/java/src/main/java/org/rocksdb/RocksMutableObject.java @@ -22,9 +22,18 @@ public abstract class RocksMutableObject extends AbstractNativeReference { private long nativeHandle_; private boolean owningHandle_; + /** + * Constructs a RocksMutableObject with no initial underlying native C++ object. + */ protected RocksMutableObject() { } + /** + * Constructs a RocksMutableObject. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ object. + */ protected RocksMutableObject(final long nativeHandle) { this.nativeHandle_ = nativeHandle; this.owningHandle_ = true; @@ -79,9 +88,20 @@ public final synchronized void close() { } } + /** + * Deletes underlying C++ object pointer. + */ protected void disposeInternal() { disposeInternal(nativeHandle_); } + /** + * Deletes any underlying native C++ objects which are owned by this object. + * All subclasses of {@code RocksObject} must + * implement this to release their underlying native C++ objects. + * + * @param handle reference to the value of the C++ pointer pointing to some underlying native + * RocksDB C++ object. + */ protected abstract void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksObject.java b/java/src/main/java/org/rocksdb/RocksObject.java index f07e1018afd6..9af9d41ae7aa 100644 --- a/java/src/main/java/org/rocksdb/RocksObject.java +++ b/java/src/main/java/org/rocksdb/RocksObject.java @@ -24,6 +24,13 @@ public abstract class RocksObject extends AbstractImmutableNativeReference { */ protected final long nativeHandle_; + /** + * Constructor to be called by subclasses to set the + * handle to the underlying C++ object. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ object. + */ protected RocksObject(final long nativeHandle) { super(true); this.nativeHandle_ = nativeHandle; @@ -37,9 +44,13 @@ protected void disposeInternal() { disposeInternal(nativeHandle_); } + /** + * Deletes any underlying native C++ objects which are owned by this object. + * All subclasses of {@code RocksObject} must + * implement this to release their underlying native C++ objects. + * + * @param handle reference to the value of the C++ pointer pointing to some underlying native + * RocksDB C++ object. + */ protected abstract void disposeInternal(final long handle); - - public long getNativeHandle() { - return nativeHandle_; - } } diff --git a/java/src/main/java/org/rocksdb/SanityLevel.java b/java/src/main/java/org/rocksdb/SanityLevel.java index 30568c363377..f8546913a97f 100644 --- a/java/src/main/java/org/rocksdb/SanityLevel.java +++ b/java/src/main/java/org/rocksdb/SanityLevel.java @@ -6,9 +6,23 @@ package org.rocksdb; +/** + * The Sanity Level. + */ public enum SanityLevel { + /** + * None. + */ NONE((byte) 0x0), + + /** + * Loosely compatible. + */ LOOSELY_COMPATIBLE((byte) 0x1), + + /** + * Exactly matches. + */ EXACT_MATCH((byte) 0xFF); private final byte value; diff --git a/java/src/main/java/org/rocksdb/SizeApproximationFlag.java b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java index fe3c2dd05be8..e4ed68562d53 100644 --- a/java/src/main/java/org/rocksdb/SizeApproximationFlag.java +++ b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java @@ -10,8 +10,19 @@ * or file stats approximation or both. */ public enum SizeApproximationFlag { + /** + * None + */ NONE((byte)0x0), + + /** + * Include Memtable(s). + */ INCLUDE_MEMTABLES((byte)0x1), + + /** + * Include file(s). + */ INCLUDE_FILES((byte)0x2); private final byte value; diff --git a/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java index b9d143929092..b783f3ecd021 100644 --- a/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java @@ -5,7 +5,9 @@ * The config for skip-list memtable representation. */ public class SkipListMemTableConfig extends MemTableConfig { - + /** + * The default lookahead. + */ public static final long DEFAULT_LOOKAHEAD = 0; /** diff --git a/java/src/main/java/org/rocksdb/SstFileManager.java b/java/src/main/java/org/rocksdb/SstFileManager.java index efce94db24fe..9932abd43552 100644 --- a/java/src/main/java/org/rocksdb/SstFileManager.java +++ b/java/src/main/java/org/rocksdb/SstFileManager.java @@ -17,11 +17,25 @@ */ //@ThreadSafe public final class SstFileManager extends RocksObject { + /** + * The default bytes-per-sec rate. + */ + public static final long DEFAULT_RATE_BYTES_PER_SEC = 0; + + /** + * The default of whether to delete existing trash. + */ + public static final boolean DEFAULT_DELETE_EXISTING_TRASH = true; - public static final long RATE_BYTES_PER_SEC_DEFAULT = 0; - public static final boolean DELETE_EXISTING_TRASH_DEFAULT = true; - public static final double MAX_TRASH_DB_RATION_DEFAULT = 0.25; - public static final long BYTES_MAX_DELETE_CHUNK_DEFAULT = 64 * 1024 * 1024; + /** + * The default max trash db ratio. + */ + public static final double DEFAULT_MAX_TRASH_DB_RATIO = 0.25; + + /** + * The default max delete chunk size in bytes. + */ + public static final long DEFAULT_BYTES_MAX_DELETE_CHUNK = 64 * 1024 * 1024; /** * Create a new SstFileManager that can be shared among multiple RocksDB @@ -46,7 +60,7 @@ public SstFileManager(final Env env) throws RocksDBException { */ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger) throws RocksDBException { - this(env, logger, RATE_BYTES_PER_SEC_DEFAULT); + this(env, logger, DEFAULT_RATE_BYTES_PER_SEC); } /** @@ -66,7 +80,7 @@ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger) */ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, final long rateBytesPerSec) throws RocksDBException { - this(env, logger, rateBytesPerSec, MAX_TRASH_DB_RATION_DEFAULT); + this(env, logger, rateBytesPerSec, DEFAULT_MAX_TRASH_DB_RATIO); } /** @@ -90,8 +104,7 @@ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, final long rateBytesPerSec, final double maxTrashDbRatio) throws RocksDBException { - this(env, logger, rateBytesPerSec, maxTrashDbRatio, - BYTES_MAX_DELETE_CHUNK_DEFAULT); + this(env, logger, rateBytesPerSec, maxTrashDbRatio, DEFAULT_BYTES_MAX_DELETE_CHUNK); } /** diff --git a/java/src/main/java/org/rocksdb/SstFileReader.java b/java/src/main/java/org/rocksdb/SstFileReader.java index 46bebf1dd2e3..a4b03c7d050a 100644 --- a/java/src/main/java/org/rocksdb/SstFileReader.java +++ b/java/src/main/java/org/rocksdb/SstFileReader.java @@ -5,7 +5,15 @@ package org.rocksdb; +/** + * An SST File Reader. + */ public class SstFileReader extends RocksObject { + /** + * Constructs an SstFileReader. + * + * @param options the options for the reader. + */ public SstFileReader(final Options options) { super(newSstFileReader(options.nativeHandle_)); } diff --git a/java/src/main/java/org/rocksdb/SstFileReaderIterator.java b/java/src/main/java/org/rocksdb/SstFileReaderIterator.java index 31f2f393aaf3..821a6bff0e0a 100644 --- a/java/src/main/java/org/rocksdb/SstFileReaderIterator.java +++ b/java/src/main/java/org/rocksdb/SstFileReaderIterator.java @@ -21,6 +21,13 @@ * @see RocksObject */ public class SstFileReaderIterator extends AbstractRocksIterator { + /** + * Constructs a SstFileReaderIterator. + * + * @param reader the SST file reader. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ SstFileReaderIterator. + */ protected SstFileReaderIterator(final SstFileReader reader, final long nativeHandle) { super(reader, nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/SstPartitionerFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFactory.java index ea6f13565995..4e9c72bfdd94 100644 --- a/java/src/main/java/org/rocksdb/SstPartitionerFactory.java +++ b/java/src/main/java/org/rocksdb/SstPartitionerFactory.java @@ -9,6 +9,12 @@ * Handle to factory for SstPartitioner. It is used in {@link ColumnFamilyOptions} */ public abstract class SstPartitionerFactory extends RocksObject { + /** + * Constructs a SstPartitionerFactory. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ SstPartitionerFactory. + */ protected SstPartitionerFactory(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java index d9b7184aa012..d14cbe69e20d 100644 --- a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java +++ b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java @@ -9,6 +9,11 @@ * Fixed prefix factory. It partitions SST files using fixed prefix of the key. */ public class SstPartitionerFixedPrefixFactory extends SstPartitionerFactory { + /** + * Constructs an SstPartitionerFixedPrefixFactory. + * + * @param prefixLength the prefix length of the keys for partitioning. + */ public SstPartitionerFixedPrefixFactory(final long prefixLength) { super(newSstPartitionerFixedPrefixFactory0(prefixLength)); } diff --git a/java/src/main/java/org/rocksdb/StateType.java b/java/src/main/java/org/rocksdb/StateType.java index 803fa37d91ec..78829dbf557e 100644 --- a/java/src/main/java/org/rocksdb/StateType.java +++ b/java/src/main/java/org/rocksdb/StateType.java @@ -12,7 +12,14 @@ * such as reading / writing a file or waiting for a mutex. */ public enum StateType { + /** + * Unknown. + */ STATE_UNKNOWN((byte)0x0), + + /** + * Waiting on Mutex. + */ STATE_MUTEX_WAIT((byte)0x1); private final byte value; diff --git a/java/src/main/java/org/rocksdb/Statistics.java b/java/src/main/java/org/rocksdb/Statistics.java index 80ae24586930..bb1f197fd0bf 100644 --- a/java/src/main/java/org/rocksdb/Statistics.java +++ b/java/src/main/java/org/rocksdb/Statistics.java @@ -12,19 +12,37 @@ * is managed by Options class. */ public class Statistics extends RocksObject { - + /** + * Constructs a Statistics. + */ public Statistics() { super(newStatisticsInstance()); } + /** + * Constructs a Statistics. + * + * @param otherStatistics another statistics object to copy stats from. + */ public Statistics(final Statistics otherStatistics) { super(newStatistics(otherStatistics.nativeHandle_)); } + /** + * Constructs a Statistics. + * + * @param ignoreHistograms histograms to ignore. + */ public Statistics(final EnumSet ignoreHistograms) { super(newStatisticsInstance(toArrayValues(ignoreHistograms))); } + /** + * Constructs a Statistics. + * + * @param ignoreHistograms histograms to ignore. + * @param otherStatistics another statistics object to copy stats from. + */ public Statistics(final EnumSet ignoreHistograms, final Statistics otherStatistics) { super(newStatistics(toArrayValues(ignoreHistograms), otherStatistics.nativeHandle_)); } diff --git a/java/src/main/java/org/rocksdb/StatisticsCollector.java b/java/src/main/java/org/rocksdb/StatisticsCollector.java index dd0d98fe5214..e034bbfdc405 100644 --- a/java/src/main/java/org/rocksdb/StatisticsCollector.java +++ b/java/src/main/java/org/rocksdb/StatisticsCollector.java @@ -41,6 +41,9 @@ public StatisticsCollector( _executorService = Executors.newSingleThreadExecutor(); } + /** + * Start collecting statistics. + */ public void start() { _executorService.submit(collectStatistics()); } diff --git a/java/src/main/java/org/rocksdb/StatsCollectorInput.java b/java/src/main/java/org/rocksdb/StatsCollectorInput.java index 5bf43ade5a6f..f36f7baa3f7f 100644 --- a/java/src/main/java/org/rocksdb/StatsCollectorInput.java +++ b/java/src/main/java/org/rocksdb/StatsCollectorInput.java @@ -25,11 +25,21 @@ public StatsCollectorInput(final Statistics statistics, _statsCallback = statsCallback; } + /** + * Get the statistics. + * + * @return the statistics. + */ public Statistics getStatistics() { return _statistics; } - public StatisticsCollectorCallback getCallback() { + /** + * Get the statistics collector callback. + * + * @return the statistics collector callback. + */ + StatisticsCollectorCallback getCallback() { return _statsCallback; } } diff --git a/java/src/main/java/org/rocksdb/Status.java b/java/src/main/java/org/rocksdb/Status.java index 5f751f422089..11c5ceff0274 100644 --- a/java/src/main/java/org/rocksdb/Status.java +++ b/java/src/main/java/org/rocksdb/Status.java @@ -16,10 +16,29 @@ */ public class Status implements Serializable { private static final long serialVersionUID = -3794191127754280439L; + + /** + * The status code. + */ private final Code code; + + /** + * The status sub-code. + */ /* @Nullable */ private final SubCode subCode; + + /** + * The state of the status. + */ /* @Nullable */ private final String state; + /** + * Constructs a Status. + * + * @param code the code. + * @param subCode the sub-code. + * @param state the state. + */ public Status(final Code code, final SubCode subCode, final String state) { this.code = code; this.subCode = subCode; @@ -35,18 +54,38 @@ private Status(final byte code, final byte subCode, final String state) { this.state = state; } + /** + * Get the status code. + * + * @return the status code. + */ public Code getCode() { return code; } + /** + * Get the status sub-code. + * + * @return the status sub-code. + */ public SubCode getSubCode() { return subCode; } + /** + * Get the state of the status. + * + * @return the status state. + */ public String getState() { return state; } + /** + * Get a string representation of the status code. + * + * @return a string representation of the status code. + */ public String getCodeString() { final StringBuilder builder = new StringBuilder() .append(code.name()); @@ -58,22 +97,86 @@ public String getCodeString() { return builder.toString(); } - // should stay in sync with /include/rocksdb/status.h:Code and /java/rocksjni/portal.h:toJavaStatusCode + /** + * Status Code. + *

+ * Should stay in sync with /include/rocksdb/status.h:Code and + * /java/rocksjni/portal.h:toJavaStatusCode + */ public enum Code { + /** + * Success. + */ Ok( (byte)0x0), + + /** + * Not found. + */ NotFound( (byte)0x1), + + /** + * Corruption detected. + */ Corruption( (byte)0x2), + + /** + * Not supported. + */ NotSupported( (byte)0x3), + + /** + * Invalid argument provided. + */ InvalidArgument( (byte)0x4), + + /** + * I/O error. + */ IOError( (byte)0x5), + + /** + * There is a merge in progress. + */ MergeInProgress( (byte)0x6), + + /** + * Incomplete. + */ Incomplete( (byte)0x7), + + /** + * There is a shutdown in progress. + */ ShutdownInProgress( (byte)0x8), + + /** + * An operation timed out. + */ TimedOut( (byte)0x9), + + /** + * An operation was aborted. + */ Aborted( (byte)0xA), + + /** + * The system is busy. + */ Busy( (byte)0xB), + + /** + * The request expired. + */ Expired( (byte)0xC), + + /** + * The operation should be reattempted. + */ TryAgain( (byte)0xD), + + /** + * Undefined. + */ Undefined( (byte)0x7F); private final byte value; @@ -82,6 +185,15 @@ public enum Code { this.value = value; } + /** + * Get a code from its byte representation. + * + * @param value the byte representation of the code. + * + * @return the code + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent a code. + */ public static Code getCode(final byte value) { for (final Code code : Code.values()) { if (code.value == value){ @@ -102,16 +214,56 @@ public byte getValue() { } } - // should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode + /** + * Status Sub-code. + *

+ * should stay in sync with /include/rocksdb/status.h:SubCode and + * /java/rocksjni/portal.h:toJavaStatusSubCode + */ public enum SubCode { + /** + * None. + */ None( (byte)0x0), + + /** + * Timeout whilst waiting on Mutex. + */ MutexTimeout( (byte)0x1), + + /** + * Timeout whilst waiting on Lock. + */ LockTimeout( (byte)0x2), + + /** + * Maximum limit on number of locks reached. + */ LockLimit( (byte)0x3), + + /** + * No space remaining. + */ NoSpace( (byte)0x4), + + /** + * Deadlock detected. + */ Deadlock( (byte)0x5), + + /** + * Stale file detected. + */ StaleFile( (byte)0x6), + + /** + * Reached the maximum memory limit. + */ MemoryLimit( (byte)0x7), + + /** + * Undefined. + */ Undefined( (byte)0x7F); private final byte value; @@ -120,6 +272,16 @@ public enum SubCode { this.value = value; } + /** + * Get a sub-code from its byte representation. + * + * @param value the byte representation of the sub-code. + * + * @return the sub-code + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent a + * sub-code. + */ public static SubCode getSubCode(final byte value) { for (final SubCode subCode : SubCode.values()) { if (subCode.value == value){ diff --git a/java/src/main/java/org/rocksdb/StringAppendOperator.java b/java/src/main/java/org/rocksdb/StringAppendOperator.java index 25b134c44af8..2bf2f1aff6f1 100644 --- a/java/src/main/java/org/rocksdb/StringAppendOperator.java +++ b/java/src/main/java/org/rocksdb/StringAppendOperator.java @@ -11,14 +11,27 @@ * two strings. */ public class StringAppendOperator extends MergeOperator { + /** + * Constructs a StringAppendOperator. + */ public StringAppendOperator() { this(','); } + /** + * Constructs a StringAppendOperator. + * + * @param delim the character delimiter to use when appending. + */ public StringAppendOperator(final char delim) { super(newSharedStringAppendOperator(delim)); } + /** + * Constructs a StringAppendOperator. + * + * @param delim the string delimiter to use when appending. + */ public StringAppendOperator(final String delim) { super(newSharedStringAppendOperator(delim)); } diff --git a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java index 8dc56796a25d..aaf34b2cbd57 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Brief information on Table File creation. + */ public class TableFileCreationBriefInfo { private final String dbName; private final String columnFamilyName; diff --git a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java index 5654603c3833..1b65712b3b3b 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information on Table File creation. + */ public class TableFileCreationInfo extends TableFileCreationBriefInfo { private final long fileSize; private final TableProperties tableProperties; diff --git a/java/src/main/java/org/rocksdb/TableFileCreationReason.java b/java/src/main/java/org/rocksdb/TableFileCreationReason.java index d3984663dd28..eaa06245a0c0 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationReason.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationReason.java @@ -5,10 +5,28 @@ package org.rocksdb; +/** + * Reasons for Table File creation. + */ public enum TableFileCreationReason { + /** + * Flush. + */ FLUSH((byte) 0x00), + + /** + * Compaction. + */ COMPACTION((byte) 0x01), + + /** + * Recovery. + */ RECOVERY((byte) 0x02), + + /** + * Miscellaneous. + */ MISC((byte) 0x03); private final byte value; diff --git a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java index 9a777e3336c2..87bd2b8c87af 100644 --- a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information on Table File deleteion. + */ public class TableFileDeletionInfo { private final String dbName; private final String filePath; diff --git a/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java b/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java index ae2789ef8263..fbf521408017 100644 --- a/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java +++ b/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java @@ -1,44 +1,65 @@ -// Copyright (c) Meta Platforms, Inc. and affiliates. -// -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -package org.rocksdb; - -public abstract class TablePropertiesCollectorFactory extends RocksObject { - private TablePropertiesCollectorFactory(final long nativeHandle) { - super(nativeHandle); - } - - public static TablePropertiesCollectorFactory NewCompactOnDeletionCollectorFactory( - final long sliding_window_size, final long deletion_trigger, final double deletion_ratio) { - long handle = - newCompactOnDeletionCollectorFactory(sliding_window_size, deletion_trigger, deletion_ratio); - return new TablePropertiesCollectorFactory(handle) { - @Override - protected void disposeInternal(long handle) { - TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle); - } - }; - } - - /** - * Internal API. Do not use. - * @param nativeHandle - * @return - */ - static TablePropertiesCollectorFactory newWrapper(final long nativeHandle) { - return new TablePropertiesCollectorFactory(nativeHandle) { - @Override - protected void disposeInternal(long handle) { - TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle); - } - }; - } - - private static native long newCompactOnDeletionCollectorFactory( - final long slidingWindowSize, final long deletionTrigger, final double deletionRatio); - - private static native void deleteCompactOnDeletionCollectorFactory(final long handle); -} +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Table Properties Collector Factory. + */ +public abstract class TablePropertiesCollectorFactory extends RocksObject { + private TablePropertiesCollectorFactory(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Creates a factory of a table property collector that marks a SST + * file as need-compaction when it observes at least "D" deletion + * entries in any "N" consecutive entries, or the ratio of tombstone + * entries >= deletion_ratio. + * + * @param slidingWindowSize "N".Note that this number will be + * round up to the smallest multiple of 128 that is no less + * than the specified size. + * @param deletionTrigger "D". Note that even when "N" is changed, + * the specified number for "D" will not be changed. + * @param deletionRatio if <= 0 or > 1, disable triggering compaction + * based on deletion ratio. Disabled by default. + * + * @return the new compact on deletion collector factory. + */ + public static TablePropertiesCollectorFactory createNewCompactOnDeletionCollectorFactory( + final long slidingWindowSize, final long deletionTrigger, final double deletionRatio) { + final long handle = + newCompactOnDeletionCollectorFactory(slidingWindowSize, deletionTrigger, deletionRatio); + return new TablePropertiesCollectorFactory(handle) { + @Override + protected void disposeInternal(final long handle) { + TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle); + } + }; + } + + /** + * Internal API. Do not use. + * + * @param nativeHandle the native handle to wrap. + * + * @return the new TablePropertiesCollectorFactory. + */ + static TablePropertiesCollectorFactory newWrapper(final long nativeHandle) { + return new TablePropertiesCollectorFactory(nativeHandle) { + @Override + protected void disposeInternal(long handle) { + TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle); + } + }; + } + + private static native long newCompactOnDeletionCollectorFactory( + final long slidingWindowSize, final long deletionTrigger, final double deletionRatio); + + private static native void deleteCompactOnDeletionCollectorFactory(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/ThreadStatus.java b/java/src/main/java/org/rocksdb/ThreadStatus.java index 4211453d1a0b..c75d85d276f6 100644 --- a/java/src/main/java/org/rocksdb/ThreadStatus.java +++ b/java/src/main/java/org/rocksdb/ThreadStatus.java @@ -7,6 +7,9 @@ import java.util.Map; +/** + * The status of a Thread. + */ public class ThreadStatus { private final long threadId; private final ThreadType threadType; @@ -155,6 +158,13 @@ public static String getOperationName(final OperationType operationType) { return getOperationName(operationType.getValue()); } + /** + * Converts microseconds to a string representation. + * + * @param operationElapsedTime the microseconds. + * + * @return the string representation. + */ public static String microsToString(final long operationElapsedTime) { return microsToStringNative(operationElapsedTime); } diff --git a/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java index bf1c73a129fb..c3d7213b261d 100644 --- a/java/src/main/java/org/rocksdb/TickerType.java +++ b/java/src/main/java/org/rocksdb/TickerType.java @@ -16,952 +16,1246 @@ * should descend into negative values until TICKER_ENUM_MAX reaches -128 (-0x80). */ public enum TickerType { - - /** - * total block cache misses - * - * REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + - * BLOCK_CACHE_FILTER_MISS + - * BLOCK_CACHE_DATA_MISS; - */ - BLOCK_CACHE_MISS((byte) 0x0), - - /** - * total block cache hit - * - * REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + - * BLOCK_CACHE_FILTER_HIT + - * BLOCK_CACHE_DATA_HIT; - */ - BLOCK_CACHE_HIT((byte) 0x1), - - BLOCK_CACHE_ADD((byte) 0x2), - - /** - * # of failures when adding blocks to block cache. - */ - BLOCK_CACHE_ADD_FAILURES((byte) 0x3), - - /** - * # of times cache miss when accessing index block from block cache. - */ - BLOCK_CACHE_INDEX_MISS((byte) 0x4), - - /** - * # of times cache hit when accessing index block from block cache. - */ - BLOCK_CACHE_INDEX_HIT((byte) 0x5), - - /** - * # of index blocks added to block cache. - */ - BLOCK_CACHE_INDEX_ADD((byte) 0x6), - - /** - * # of bytes of index blocks inserted into cache - */ - BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7), - - /** - * # of times cache miss when accessing filter block from block cache. - */ - BLOCK_CACHE_FILTER_MISS((byte) 0x8), - - /** - * # of times cache hit when accessing filter block from block cache. - */ - BLOCK_CACHE_FILTER_HIT((byte) 0x9), - - /** - * # of filter blocks added to block cache. - */ - BLOCK_CACHE_FILTER_ADD((byte) 0xA), - - /** - * # of bytes of bloom filter blocks inserted into cache - */ - BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xB), - - /** - * # of times cache miss when accessing data block from block cache. - */ - BLOCK_CACHE_DATA_MISS((byte) 0xC), - - /** - * # of times cache hit when accessing data block from block cache. - */ - BLOCK_CACHE_DATA_HIT((byte) 0xD), - - /** - * # of data blocks added to block cache. - */ - BLOCK_CACHE_DATA_ADD((byte) 0xE), - - /** - * # of bytes of data blocks inserted into cache - */ - BLOCK_CACHE_DATA_BYTES_INSERT((byte) 0xF), - - /** - * # of bytes read from cache. - */ - BLOCK_CACHE_BYTES_READ((byte) 0x10), - - /** - * # of bytes written into cache. - */ - BLOCK_CACHE_BYTES_WRITE((byte) 0x11), - - /** - * Block cache related stats for Compression dictionaries - */ - BLOCK_CACHE_COMPRESSION_DICT_MISS((byte) 0x12), - BLOCK_CACHE_COMPRESSION_DICT_HIT((byte) 0x13), - BLOCK_CACHE_COMPRESSION_DICT_ADD((byte) 0x14), - BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT((byte) 0x15), - - /** - * Redundant additions to block cache - */ - BLOCK_CACHE_ADD_REDUNDANT((byte) 0x16), - BLOCK_CACHE_INDEX_ADD_REDUNDANT((byte) 0x17), - BLOCK_CACHE_FILTER_ADD_REDUNDANT((byte) 0x18), - BLOCK_CACHE_DATA_ADD_REDUNDANT((byte) 0x19), - BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT((byte) 0x1A), - - /** - * Number of secondary cache hits - */ - SECONDARY_CACHE_HITS((byte) 0x1B), - SECONDARY_CACHE_FILTER_HITS((byte) 0x1C), - SECONDARY_CACHE_INDEX_HITS((byte) 0x1D), - SECONDARY_CACHE_DATA_HITS((byte) 0x1E), - - COMPRESSED_SECONDARY_CACHE_DUMMY_HITS((byte) 0x1F), - COMPRESSED_SECONDARY_CACHE_HITS((byte) 0x20), - COMPRESSED_SECONDARY_CACHE_PROMOTIONS((byte) 0x21), - COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS((byte) 0x22), - - /** - * # of times bloom filter has avoided file reads. - */ - BLOOM_FILTER_USEFUL((byte) 0x23), - - /** - * # of times bloom FullFilter has not avoided the reads. - */ - BLOOM_FILTER_FULL_POSITIVE((byte) 0x24), - - /** - * # of times bloom FullFilter has not avoided the reads and data actually - * exist. - */ - BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x25), - - /** - * Number of times bloom was checked before creating iterator on a - * file, and the number of times the check was useful in avoiding - * iterator creation (and thus likely IOPs). - */ - BLOOM_FILTER_PREFIX_CHECKED((byte) 0x26), - BLOOM_FILTER_PREFIX_USEFUL((byte) 0x27), - BLOOM_FILTER_PREFIX_TRUE_POSITIVE((byte) 0x28), - - /** - * # persistent cache hit - */ - PERSISTENT_CACHE_HIT((byte) 0x29), - - /** - * # persistent cache miss - */ - PERSISTENT_CACHE_MISS((byte) 0x2A), - - /** - * # total simulation block cache hits - */ - SIM_BLOCK_CACHE_HIT((byte) 0x2B), - - /** - * # total simulation block cache misses - */ - SIM_BLOCK_CACHE_MISS((byte) 0x2C), - - /** - * # of memtable hits. - */ - MEMTABLE_HIT((byte) 0x2D), - - /** - * # of memtable misses. - */ - MEMTABLE_MISS((byte) 0x2E), - - /** - * # of Get() queries served by L0 - */ - GET_HIT_L0((byte) 0x2F), - - /** - * # of Get() queries served by L1 - */ - GET_HIT_L1((byte) 0x30), - - /** - * # of Get() queries served by L2 and up - */ - GET_HIT_L2_AND_UP((byte) 0x31), - - /** - * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction - * There are 4 reasons currently. - */ - - /** - * key was written with a newer value. - */ - COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x32), - - /** - * Also includes keys dropped for range del. - * The key is obsolete. - */ - COMPACTION_KEY_DROP_OBSOLETE((byte) 0x33), - - /** - * key was covered by a range tombstone. - */ - COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x34), - - /** - * User compaction function has dropped the key. - */ - COMPACTION_KEY_DROP_USER((byte) 0x35), - - /** - * all keys in range were deleted. - */ - COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x36), - - /** - * Deletions obsoleted before bottom level due to file gap optimization. - */ - COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE((byte) 0x37), - - /** - * Compactions cancelled to prevent ENOSPC - */ - COMPACTION_CANCELLED((byte) 0x38), - - /** - * Number of keys written to the database via the Put and Write call's. - */ - NUMBER_KEYS_WRITTEN((byte) 0x39), - - /** - * Number of Keys read. - */ - NUMBER_KEYS_READ((byte) 0x3A), - - /** - * Number keys updated, if inplace update is enabled - */ - NUMBER_KEYS_UPDATED((byte) 0x3B), - - /** - * The number of uncompressed bytes issued by DB::Put(), DB::Delete(),\ - * DB::Merge(), and DB::Write(). - */ - BYTES_WRITTEN((byte) 0x3C), - - /** - * The number of uncompressed bytes read from DB::Get(). It could be - * either from memtables, cache, or table files. - * - * For the number of logical bytes read from DB::MultiGet(), - * please use {@link #NUMBER_MULTIGET_BYTES_READ}. - */ - BYTES_READ((byte) 0x3D), - - /** - * The number of calls to seek. - */ - NUMBER_DB_SEEK((byte) 0x3E), - - /** - * The number of calls to next. - */ - NUMBER_DB_NEXT((byte) 0x3F), - - /** - * The number of calls to prev. - */ - NUMBER_DB_PREV((byte) 0x40), - - /** - * The number of calls to seek that returned data. - */ - NUMBER_DB_SEEK_FOUND((byte) 0x41), - - /** - * The number of calls to next that returned data. - */ - NUMBER_DB_NEXT_FOUND((byte) 0x42), - - /** - * The number of calls to prev that returned data. - */ - NUMBER_DB_PREV_FOUND((byte) 0x43), - - /** - * The number of uncompressed bytes read from an iterator. - * Includes size of key and value. - */ - ITER_BYTES_READ((byte) 0x44), - - /** - * Number of internal skipped during iteration - */ - NUMBER_ITER_SKIP((byte) 0x45), - - /** - * Number of times we had to reseek inside an iteration to skip - * over large number of keys with same userkey. - */ - NUMBER_OF_RESEEKS_IN_ITERATION((byte) 0x46), - - /** - * Number of iterators created. - */ - NO_ITERATOR_CREATED((byte) 0x47), - - /** - * Number of iterators deleted. - */ - NO_ITERATOR_DELETED((byte) 0x48), - - NO_FILE_OPENS((byte) 0x49), - - NO_FILE_ERRORS((byte) 0x4A), - - /** - * Writer has to wait for compaction or flush to finish. - */ - STALL_MICROS((byte) 0x4B), - - /** - * The wait time for db mutex. - * - * Disabled by default. To enable it set stats level to {@link StatsLevel#ALL} - */ - DB_MUTEX_WAIT_MICROS((byte) 0x4C), - - /** - * Number of MultiGet calls. - */ - NUMBER_MULTIGET_CALLS((byte) 0x4D), - - /** - * Number of MultiGet keys read. - */ - NUMBER_MULTIGET_KEYS_READ((byte) 0x4E), - - /** - * Number of MultiGet bytes read. - */ - NUMBER_MULTIGET_BYTES_READ((byte) 0x4F), - - /** - * Number of MultiGet keys found (vs number requested) - */ - NUMBER_MULTIGET_KEYS_FOUND((byte) 0x50), - - NUMBER_MERGE_FAILURES((byte) 0x51), - - /** - * Record the number of calls to {@link RocksDB#getUpdatesSince(long)}. Useful to keep track of - * transaction log iterator refreshes. - */ - GET_UPDATES_SINCE_CALLS((byte) 0x52), - - /** - * Number of times WAL sync is done. - */ - WAL_FILE_SYNCED((byte) 0x53), - - /** - * Number of bytes written to WAL. - */ - WAL_FILE_BYTES((byte) 0x54), - - /** - * Writes can be processed by requesting thread or by the thread at the - * head of the writers queue. - */ - WRITE_DONE_BY_SELF((byte) 0x55), - - /** - * Equivalent to writes done for others. - */ - WRITE_DONE_BY_OTHER((byte) 0x56), - - /** - * Number of Write calls that request WAL. - */ - WRITE_WITH_WAL((byte) 0x57), - - /** - * Bytes read during compaction. - */ - COMPACT_READ_BYTES((byte) 0x58), - - /** - * Bytes written during compaction. - */ - COMPACT_WRITE_BYTES((byte) 0x59), - - /** - * Bytes written during flush. - */ - FLUSH_WRITE_BYTES((byte) 0x5A), - - /** - * Compaction read and write statistics broken down by CompactionReason - */ - COMPACT_READ_BYTES_MARKED((byte) 0x5B), - COMPACT_READ_BYTES_PERIODIC((byte) 0x5C), - COMPACT_READ_BYTES_TTL((byte) 0x5D), - COMPACT_WRITE_BYTES_MARKED((byte) 0x5E), - COMPACT_WRITE_BYTES_PERIODIC((byte) 0x5F), - COMPACT_WRITE_BYTES_TTL((byte) 0x60), - - /** - * Number of table's properties loaded directly from file, without creating - * table reader object. - */ - NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x61), - NUMBER_SUPERVERSION_ACQUIRES((byte) 0x62), - NUMBER_SUPERVERSION_RELEASES((byte) 0x63), - NUMBER_SUPERVERSION_CLEANUPS((byte) 0x64), - - /** - * # of compressions/decompressions executed - */ - NUMBER_BLOCK_COMPRESSED((byte) 0x65), - NUMBER_BLOCK_DECOMPRESSED((byte) 0x66), - - BYTES_COMPRESSED_FROM((byte) 0x67), - BYTES_COMPRESSED_TO((byte) 0x68), - BYTES_COMPRESSION_BYPASSED((byte) 0x69), - BYTES_COMPRESSION_REJECTED((byte) 0x6A), - NUMBER_BLOCK_COMPRESSION_BYPASSED((byte) 0x6B), - NUMBER_BLOCK_COMPRESSION_REJECTED((byte) 0x6C), - BYTES_DECOMPRESSED_FROM((byte) 0x6D), - BYTES_DECOMPRESSED_TO((byte) 0x6E), - - MERGE_OPERATION_TOTAL_TIME((byte) 0x6F), - FILTER_OPERATION_TOTAL_TIME((byte) 0x70), - COMPACTION_CPU_TOTAL_TIME((byte) 0x71), - - /** - * Row cache. - */ - ROW_CACHE_HIT((byte) 0x72), - ROW_CACHE_MISS((byte) 0x73), - - /** - * Read amplification statistics. - * - * Read amplification can be calculated using this formula - * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) - * - * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled - */ - - /** - * Estimate of total bytes actually used. - */ - READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x74), - - /** - * Total size of loaded data blocks. - */ - READ_AMP_TOTAL_READ_BYTES((byte) 0x75), - - /** - * Number of refill intervals where rate limiter's bytes are fully consumed. - */ - NUMBER_RATE_LIMITER_DRAINS((byte) 0x76), - - /** - * BlobDB specific stats - * # of Put/PutTTL/PutUntil to BlobDB. - */ - BLOB_DB_NUM_PUT((byte) 0x77), - - /** - * # of Write to BlobDB. - */ - BLOB_DB_NUM_WRITE((byte) 0x78), - - /** - * # of Get to BlobDB. - */ - BLOB_DB_NUM_GET((byte) 0x79), - - /** - * # of MultiGet to BlobDB. - */ - BLOB_DB_NUM_MULTIGET((byte) 0x7A), - - /** - * # of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator. - */ - BLOB_DB_NUM_SEEK((byte) 0x7B), - - /** - * # of Next to BlobDB iterator. - */ - BLOB_DB_NUM_NEXT((byte) 0x7C), - - /** - * # of Prev to BlobDB iterator. - */ - BLOB_DB_NUM_PREV((byte) 0x7D), - - /** - * # of keys written to BlobDB. - */ - BLOB_DB_NUM_KEYS_WRITTEN((byte) 0x7E), - - /** - * # of keys read from BlobDB. - */ - BLOB_DB_NUM_KEYS_READ((byte) 0x7F), - - /** - * # of bytes (key + value) written to BlobDB. - */ - BLOB_DB_BYTES_WRITTEN((byte) -0x1), - - /** - * # of bytes (keys + value) read from BlobDB. - */ - BLOB_DB_BYTES_READ((byte) -0x2), - - /** - * # of keys written by BlobDB as non-TTL inlined value. - */ - BLOB_DB_WRITE_INLINED((byte) -0x3), - - /** - * # of keys written by BlobDB as TTL inlined value. - */ - BLOB_DB_WRITE_INLINED_TTL((byte) -0x4), - - /** - * # of keys written by BlobDB as non-TTL blob value. - */ - BLOB_DB_WRITE_BLOB((byte) -0x5), - - /** - * # of keys written by BlobDB as TTL blob value. - */ - BLOB_DB_WRITE_BLOB_TTL((byte) -0x6), - - /** - * # of bytes written to blob file. - */ - BLOB_DB_BLOB_FILE_BYTES_WRITTEN((byte) -0x7), - - /** - * # of bytes read from blob file. - */ - BLOB_DB_BLOB_FILE_BYTES_READ((byte) -0x8), - - /** - * # of times a blob files being synced. - */ - BLOB_DB_BLOB_FILE_SYNCED((byte) -0x9), - - /** - * # of blob index evicted from base DB by BlobDB compaction filter because - * of expiration. - */ - BLOB_DB_BLOB_INDEX_EXPIRED_COUNT((byte) -0xA), - - /** - * Size of blob index evicted from base DB by BlobDB compaction filter - * because of expiration. - */ - BLOB_DB_BLOB_INDEX_EXPIRED_SIZE((byte) -0xB), - - /** - * # of blob index evicted from base DB by BlobDB compaction filter because - * of corresponding file deleted. - */ - BLOB_DB_BLOB_INDEX_EVICTED_COUNT((byte) -0xC), - - /** - * Size of blob index evicted from base DB by BlobDB compaction filter - * because of corresponding file deleted. - */ - BLOB_DB_BLOB_INDEX_EVICTED_SIZE((byte) -0xD), - - /** - * # of blob files being garbage collected. - */ - BLOB_DB_GC_NUM_FILES((byte) -0xE), - - /** - * # of blob files generated by garbage collection. - */ - BLOB_DB_GC_NUM_NEW_FILES((byte) -0xF), - - /** - * # of BlobDB garbage collection failures. - */ - BLOB_DB_GC_FAILURES((byte) -0x10), - - /** - * # of keys relocated to new blob file by garbage collection. - */ - BLOB_DB_GC_NUM_KEYS_RELOCATED((byte) -0x11), - - /** - * # of bytes relocated to new blob file by garbage collection. - */ - BLOB_DB_GC_BYTES_RELOCATED((byte) -0x12), - - /** - * # of blob files evicted because of BlobDB is full. - */ - BLOB_DB_FIFO_NUM_FILES_EVICTED((byte) -0x13), - - /** - * # of keys in the blob files evicted because of BlobDB is full. - */ - BLOB_DB_FIFO_NUM_KEYS_EVICTED((byte) -0x14), - - /** - * # of bytes in the blob files evicted because of BlobDB is full. - */ - BLOB_DB_FIFO_BYTES_EVICTED((byte) -0x15), - - /** - * # of times cache miss when accessing blob from blob cache. - */ - BLOB_DB_CACHE_MISS((byte) -0x16), - - /** - * # of times cache hit when accessing blob from blob cache. - */ - BLOB_DB_CACHE_HIT((byte) -0x17), - - /** - * # of data blocks added to blob cache. - */ - BLOB_DB_CACHE_ADD((byte) -0x18), - - /** - * # # of failures when adding blobs to blob cache. - */ - BLOB_DB_CACHE_ADD_FAILURES((byte) -0x19), - - /** - * # of bytes read from blob cache. - */ - BLOB_DB_CACHE_BYTES_READ((byte) -0x1A), - - /** - * # of bytes written into blob cache. - */ - BLOB_DB_CACHE_BYTES_WRITE((byte) -0x1B), - - /** - * These counters indicate a performance issue in WritePrepared transactions. - * We should not seem them ticking them much. - * # of times prepare_mutex_ is acquired in the fast path. - */ - TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x1C), - - /** - * # of times old_commit_map_mutex_ is acquired in the fast path. - */ - TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x1D), - - /** - * # of times we checked a batch for duplicate keys. - */ - TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x1E), - - /** - * # of times snapshot_mutex_ is acquired in the fast path. - */ - TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x1F), - - /** - * # of times ::Get returned TryAgain due to expired snapshot seq - */ - TXN_GET_TRY_AGAIN((byte) -0x20), - - /** - * # of files marked as trash by delete scheduler - */ - FILES_MARKED_TRASH((byte) -0x21), - - /** - * # of trash files deleted by the background thread from the trash queue - */ - FILES_DELETED_FROM_TRASH_QUEUE((byte) -0x22), - - /** - * # of files deleted immediately by delete scheduler - */ - FILES_DELETED_IMMEDIATELY((byte) -0x23), - - /** - * DB error handler statistics - */ - ERROR_HANDLER_BG_ERROR_COUNT((byte) -0x24), - ERROR_HANDLER_BG_IO_ERROR_COUNT((byte) -0x25), - ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT((byte) -0x26), - ERROR_HANDLER_AUTORESUME_COUNT((byte) -0x27), - ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT((byte) -0x28), - ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT((byte) -0x29), - - /** - * Bytes of raw data (payload) found on memtable at flush time. - * Contains the sum of garbage payload (bytes that are discarded - * at flush time) and useful payload (bytes of data that will - * eventually be written to SSTable). - */ - MEMTABLE_PAYLOAD_BYTES_AT_FLUSH((byte) -0x2A), - /** - * Outdated bytes of data present on memtable at flush time. - */ - MEMTABLE_GARBAGE_BYTES_AT_FLUSH((byte) -0x2B), - - /** - * Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs. - */ - VERIFY_CHECKSUM_READ_BYTES((byte) -0x2C), - - /** - * Bytes read/written while creating backups - */ - BACKUP_READ_BYTES((byte) -0x2D), - BACKUP_WRITE_BYTES((byte) -0x2E), - - /** - * Remote compaction read/write statistics - */ - REMOTE_COMPACT_READ_BYTES((byte) -0x2F), - REMOTE_COMPACT_WRITE_BYTES((byte) -0x30), - - /** - * Tiered storage related statistics - */ - HOT_FILE_READ_BYTES((byte) -0x31), - WARM_FILE_READ_BYTES((byte) -0x32), - COOL_FILE_READ_BYTES((byte) -0x5B), - COLD_FILE_READ_BYTES((byte) -0x33), - ICE_FILE_READ_BYTES((byte) -0x59), - HOT_FILE_READ_COUNT((byte) -0x34), - WARM_FILE_READ_COUNT((byte) -0x35), - COOL_FILE_READ_COUNT((byte) -0x5C), - COLD_FILE_READ_COUNT((byte) -0x36), - ICE_FILE_READ_COUNT((byte) -0x5A), - - /** - * (non-)last level read statistics - */ - LAST_LEVEL_READ_BYTES((byte) -0x37), - LAST_LEVEL_READ_COUNT((byte) -0x38), - NON_LAST_LEVEL_READ_BYTES((byte) -0x39), - NON_LAST_LEVEL_READ_COUNT((byte) -0x3A), - - /** - * Statistics on iterator Seek() (and variants) for each sorted run. - * i.e a single user Seek() can result in many sorted run Seek()s. - * The stats are split between last level and non-last level. - * Filtered: a filter such as prefix Bloom filter indicate the Seek() would - * not find anything relevant, so avoided a likely access to data+index - * blocks. - */ - LAST_LEVEL_SEEK_FILTERED((byte) -0x3B), - /** - * Filter match: a filter such as prefix Bloom filter was queried but did - * not filter out the seek. - */ - LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x3C), - /** - * At least one data block was accessed for a Seek() (or variant) on a - * sorted run. - */ - LAST_LEVEL_SEEK_DATA((byte) -0x3D), - /** - * At least one value() was accessed for the seek (suggesting it was useful), - * and no filter such as prefix Bloom was queried. - */ - LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x3E), - /** - * At least one value() was accessed for the seek (suggesting it was useful), - * after querying a filter such as prefix Bloom. - */ - LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x3F), - - /** - * The same set of stats, but for non-last level seeks. - */ - NON_LAST_LEVEL_SEEK_FILTERED((byte) -0x40), - NON_LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x41), - NON_LAST_LEVEL_SEEK_DATA((byte) -0x42), - NON_LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x43), - NON_LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x44), - - /** - * Number of block checksum verifications - */ - BLOCK_CHECKSUM_COMPUTE_COUNT((byte) -0x45), - - /** - * Number of times RocksDB detected a corruption while verifying a block - * checksum. RocksDB does not remember corruptions that happened during user - * reads so the same block corruption may be detected multiple times. - */ - BLOCK_CHECKSUM_MISMATCH_COUNT((byte) -0x46), - - MULTIGET_COROUTINE_COUNT((byte) -0x47), - - /** - * Time spent in the ReadAsync file system call - */ - READ_ASYNC_MICROS((byte) -0x48), - - /** - * Number of errors returned to the async read callback - */ - ASYNC_READ_ERROR_COUNT((byte) -0x49), - - /** - * Number of lookup into the prefetched tail (see - * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`) - * that can't find its data for table open - */ - TABLE_OPEN_PREFETCH_TAIL_MISS((byte) -0x4A), - - /** - * Number of lookup into the prefetched tail (see - * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`) - * that finds its data for table open - */ - TABLE_OPEN_PREFETCH_TAIL_HIT((byte) -0x4B), - - /** - * # of times timestamps are checked on accessing the table - */ - TIMESTAMP_FILTER_TABLE_CHECKED((byte) -0x4C), - - /** - * # of times timestamps can successfully help skip the table access - */ - TIMESTAMP_FILTER_TABLE_FILTERED((byte) -0x4D), - - READAHEAD_TRIMMED((byte) -0x4E), - - FIFO_MAX_SIZE_COMPACTIONS((byte) -0x4F), - - FIFO_TTL_COMPACTIONS((byte) -0x50), - - FIFO_CHANGE_TEMPERATURE_COMPACTIONS((byte) -0x58), - - PREFETCH_BYTES((byte) -0x51), - - PREFETCH_BYTES_USEFUL((byte) -0x52), - - PREFETCH_HITS((byte) -0x53), - - SST_FOOTER_CORRUPTION_COUNT((byte) -0x55), - - FILE_READ_CORRUPTION_RETRY_COUNT((byte) -0x56), - - FILE_READ_CORRUPTION_RETRY_SUCCESS_COUNT((byte) -0x57), - - /** - * Counter for the number of times a WBWI is ingested into the DB. This - * happens when IngestWriteBatchWithIndex() is used and when large - * transaction optimization is enabled through - * TransactionOptions::large_txn_commit_optimize_threshold. - */ - NUMBER_WBWI_INGEST((byte) -0x5D), - - /** - * Failure to load the UDI during SST table open - */ - SST_USER_DEFINED_INDEX_LOAD_FAIL_COUNT((byte) -0x5E), - - /** - * Bytes of output files successfully resumed during remote compaction - */ - REMOTE_COMPACT_RESUMED_BYTES((byte) -0x5F), - - /** - * MultiScan statistics - */ - - /** - * # of calls to Iterator::Prepare() for multi-scan - */ - MULTISCAN_PREPARE_CALLS((byte) -0x60), - - /** - * # of errors during Iterator::Prepare() for multi-scan - */ - MULTISCAN_PREPARE_ERRORS((byte) -0x61), - - /** - * # of data blocks prefetched during multi-scan Prepare() - */ - MULTISCAN_BLOCKS_PREFETCHED((byte) -0x62), - - /** - * # of data blocks found in cache during multi-scan Prepare() - */ - MULTISCAN_BLOCKS_FROM_CACHE((byte) -0x63), - - /** - * Total bytes prefetched during multi-scan Prepare() - */ - MULTISCAN_PREFETCH_BYTES((byte) -0x64), - - /** - * # of prefetched blocks that were never accessed (wasted) - */ - MULTISCAN_PREFETCH_BLOCKS_WASTED((byte) -0x65), - - /** - * # of I/O requests issued during multi-scan Prepare() - */ - MULTISCAN_IO_REQUESTS((byte) -0x66), - - /** - * # of non-adjacent blocks coalesced into single I/O request - */ - MULTISCAN_IO_COALESCED_NONADJACENT((byte) -0x67), - - /** - * # of seek errors during multi-scan iteration - */ - MULTISCAN_SEEK_ERRORS((byte) -0x68), - - TICKER_ENUM_MAX((byte) -0x54); - - private final byte value; - - TickerType(final byte value) { - this.value = value; - } + /** + * total block cache misses + *

+ * REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + + * BLOCK_CACHE_FILTER_MISS + + * BLOCK_CACHE_DATA_MISS; + */ + BLOCK_CACHE_MISS((byte) 0x0), + + /** + * total block cache hit + *

+ * REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + + * BLOCK_CACHE_FILTER_HIT + + * BLOCK_CACHE_DATA_HIT; + */ + BLOCK_CACHE_HIT((byte) 0x1), + + BLOCK_CACHE_ADD((byte) 0x2), + + /** + * Number of failures when adding blocks to block cache. + */ + BLOCK_CACHE_ADD_FAILURES((byte) 0x3), + + /** + * Number of times cache miss when accessing index block from block cache. + */ + BLOCK_CACHE_INDEX_MISS((byte) 0x4), + + /** + * Number of times cache hit when accessing index block from block cache. + */ + BLOCK_CACHE_INDEX_HIT((byte) 0x5), + + /** + * Number of index blocks added to block cache. + */ + BLOCK_CACHE_INDEX_ADD((byte) 0x6), + + /** + * Number of bytes of index blocks inserted into cache + */ + BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7), + + /** + * Number of times cache miss when accessing filter block from block cache. + */ + BLOCK_CACHE_FILTER_MISS((byte) 0x8), + + /** + * Number of times cache hit when accessing filter block from block cache. + */ + BLOCK_CACHE_FILTER_HIT((byte) 0x9), + + /** + * Number of filter blocks added to block cache. + */ + BLOCK_CACHE_FILTER_ADD((byte) 0xA), + + /** + * Number of bytes of bloom filter blocks inserted into cache + */ + BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xB), + + /** + * Number of times cache miss when accessing data block from block cache. + */ + BLOCK_CACHE_DATA_MISS((byte) 0xC), + + /** + * Number of times cache hit when accessing data block from block cache. + */ + BLOCK_CACHE_DATA_HIT((byte) 0xD), + + /** + * Number of data blocks added to block cache. + */ + BLOCK_CACHE_DATA_ADD((byte) 0xE), + + /** + * Number of bytes of data blocks inserted into cache + */ + BLOCK_CACHE_DATA_BYTES_INSERT((byte) 0xF), + + /** + * Number of bytes read from cache. + */ + BLOCK_CACHE_BYTES_READ((byte) 0x10), + + /** + * Number of bytes written into cache. + */ + BLOCK_CACHE_BYTES_WRITE((byte) 0x11), + + /** + * Number of Block cache Compression dictionary misses. + */ + BLOCK_CACHE_COMPRESSION_DICT_MISS((byte) 0x12), + + /** + * Number of Block cache Compression dictionary hits. + */ + BLOCK_CACHE_COMPRESSION_DICT_HIT((byte) 0x13), + + /** + * Number of Block cache Compression dictionary additions. + */ + BLOCK_CACHE_COMPRESSION_DICT_ADD((byte) 0x14), + + /** + * Number of Block cache Compression dictionary bytes inserted. + */ + BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT((byte) 0x15), + + /** + * Redundant additions to block cache. + */ + BLOCK_CACHE_ADD_REDUNDANT((byte) 0x16), + + /** + * Redundant additions to block cache index. + */ + BLOCK_CACHE_INDEX_ADD_REDUNDANT((byte) 0x17), + + /** + * Redundant additions to block cache filter. + */ + BLOCK_CACHE_FILTER_ADD_REDUNDANT((byte) 0x18), + + /** + * Redundant additions to block cache data. + */ + BLOCK_CACHE_DATA_ADD_REDUNDANT((byte) 0x19), + + /** + * Redundant additions to block cache compression dictionary. + */ + BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT((byte) 0x1A), + + /** + * Number of secondary cache hits. + */ + SECONDARY_CACHE_HITS((byte) 0x1B), + + /** + * Number of secondary cache filter hits. + */ + SECONDARY_CACHE_FILTER_HITS((byte) 0x1C), + + /** + * Number of secondary cache index hits. + */ + SECONDARY_CACHE_INDEX_HITS((byte) 0x1D), + + /** + * Number of secondary cache data hits. + */ + SECONDARY_CACHE_DATA_HITS((byte) 0x1E), + + /** + * Number of compressed secondary cache dummy hits. + */ + COMPRESSED_SECONDARY_CACHE_DUMMY_HITS((byte) 0x1F), + + /** + * Number of compressed secondary cache hits. + */ + COMPRESSED_SECONDARY_CACHE_HITS((byte) 0x20), + + /** + * Number of compressed secondary cache promotions. + */ + COMPRESSED_SECONDARY_CACHE_PROMOTIONS((byte) 0x21), + + /** + * Number of compressed secondary cache promotion skips. + */ + COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS((byte) 0x22), + + /** + * Number of times bloom filter has avoided file reads. + */ + BLOOM_FILTER_USEFUL((byte) 0x23), + + /** + * Number of times bloom FullFilter has not avoided the reads. + */ + BLOOM_FILTER_FULL_POSITIVE((byte) 0x24), + + /** + * Number of times bloom FullFilter has not avoided the reads and data actually + * exist. + */ + BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x25), + + /** + * Number of times bloom was checked before creating iterator on a file. + */ + BLOOM_FILTER_PREFIX_CHECKED((byte) 0x26), + + /** + * Number of times it was useful (in avoiding iterator creation) that bloom was checked before + * creating iterator on a file. + */ + BLOOM_FILTER_PREFIX_USEFUL((byte) 0x27), + + /** + * Number of times bloom produced a true positive result. + */ + BLOOM_FILTER_PREFIX_TRUE_POSITIVE((byte) 0x28), + + /** + * Number of persistent cache hit + */ + PERSISTENT_CACHE_HIT((byte) 0x29), + + /** + * Number of persistent cache miss + */ + PERSISTENT_CACHE_MISS((byte) 0x2A), + + /** + * Number of total simulation block cache hits + */ + SIM_BLOCK_CACHE_HIT((byte) 0x2B), + + /** + * Number of total simulation block cache misses + */ + SIM_BLOCK_CACHE_MISS((byte) 0x2C), + + /** + * Number of memtable hits. + */ + MEMTABLE_HIT((byte) 0x2D), + + /** + * Number of of memtable misses. + */ + MEMTABLE_MISS((byte) 0x2E), + + /** + * Number of Get() queries served by L0 + */ + GET_HIT_L0((byte) 0x2F), + + /** + * Number of Get() queries served by L1 + */ + GET_HIT_L1((byte) 0x30), + + /** + * Number of Get() queries served by L2 and up + */ + GET_HIT_L2_AND_UP((byte) 0x31), + + /** + * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction + * There are 4 reasons currently. + */ + + /** + * key was written with a newer value. + */ + COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x32), + + /** + * Also includes keys dropped for range del. + * The key is obsolete. + */ + COMPACTION_KEY_DROP_OBSOLETE((byte) 0x33), + + /** + * key was covered by a range tombstone. + */ + COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x34), + + /** + * User compaction function has dropped the key. + */ + COMPACTION_KEY_DROP_USER((byte) 0x35), + + /** + * all keys in range were deleted. + */ + COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x36), + + /** + * Deletions obsoleted before bottom level due to file gap optimization. + */ + COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE((byte) 0x37), + + /** + * Compactions cancelled to prevent ENOSPC + */ + COMPACTION_CANCELLED((byte) 0x38), + + /** + * Number of keys written to the database via the Put and Write call's. + */ + NUMBER_KEYS_WRITTEN((byte) 0x39), + + /** + * Number of Keys read. + */ + NUMBER_KEYS_READ((byte) 0x3A), + + /** + * Number keys updated, if inplace update is enabled + */ + NUMBER_KEYS_UPDATED((byte) 0x3B), + + /** + * The number of uncompressed bytes issued by DB::Put(), DB::Delete(),\ + * DB::Merge(), and DB::Write(). + */ + BYTES_WRITTEN((byte) 0x3C), + + /** + * The number of uncompressed bytes read from DB::Get(). It could be + * either from memtables, cache, or table files. + * + * For the number of logical bytes read from DB::MultiGet(), + * please use {@link #NUMBER_MULTIGET_BYTES_READ}. + */ + BYTES_READ((byte) 0x3D), + + /** + * The number of calls to seek. + */ + NUMBER_DB_SEEK((byte) 0x3E), + + /** + * The number of calls to next. + */ + NUMBER_DB_NEXT((byte) 0x3F), + + /** + * The number of calls to prev. + */ + NUMBER_DB_PREV((byte) 0x40), + + /** + * The number of calls to seek that returned data. + */ + NUMBER_DB_SEEK_FOUND((byte) 0x41), + + /** + * The number of calls to next that returned data. + */ + NUMBER_DB_NEXT_FOUND((byte) 0x42), + + /** + * The number of calls to prev that returned data. + */ + NUMBER_DB_PREV_FOUND((byte) 0x43), + + /** + * The number of uncompressed bytes read from an iterator. + * Includes size of key and value. + */ + ITER_BYTES_READ((byte) 0x44), + + /** + * Number of internal skipped during iteration + */ + NUMBER_ITER_SKIP((byte) 0x45), + + /** + * Number of times we had to reseek inside an iteration to skip + * over large number of keys with same userkey. + */ + NUMBER_OF_RESEEKS_IN_ITERATION((byte) 0x46), + + /** + * Number of iterators created. + */ + NO_ITERATOR_CREATED((byte) 0x47), + + /** + * Number of iterators deleted. + */ + NO_ITERATOR_DELETED((byte) 0x48), + + /** + * Number of file opens. + */ + NO_FILE_OPENS((byte) 0x49), + + /** + * Number of file errors. + */ + NO_FILE_ERRORS((byte) 0x4A), + + /** + * Writer has to wait for compaction or flush to finish. + */ + STALL_MICROS((byte) 0x4B), + + /** + * The wait time for db mutex. + * + * Disabled by default. To enable it set stats level to {@link StatsLevel#ALL} + */ + DB_MUTEX_WAIT_MICROS((byte) 0x4C), + + /** + * Number of MultiGet calls. + */ + NUMBER_MULTIGET_CALLS((byte) 0x4D), + + /** + * Number of MultiGet keys read. + */ + NUMBER_MULTIGET_KEYS_READ((byte) 0x4E), + + /** + * Number of MultiGet bytes read. + */ + NUMBER_MULTIGET_BYTES_READ((byte) 0x4F), + + /** + * Number of MultiGet keys found (vs number requested) + */ + NUMBER_MULTIGET_KEYS_FOUND((byte) 0x50), + + /** + * Number of Merge failures. + */ + NUMBER_MERGE_FAILURES((byte) 0x51), + + /** + * Record the number of calls to {@link RocksDB#getUpdatesSince(long)}. Useful to keep track of + * transaction log iterator refreshes. + */ + GET_UPDATES_SINCE_CALLS((byte) 0x52), + + /** + * Number of times WAL sync is done. + */ + WAL_FILE_SYNCED((byte) 0x53), + + /** + * Number of bytes written to WAL. + */ + WAL_FILE_BYTES((byte) 0x54), + + /** + * Writes can be processed by requesting thread or by the thread at the + * head of the writers queue. + */ + WRITE_DONE_BY_SELF((byte) 0x55), + + /** + * Equivalent to writes done for others. + */ + WRITE_DONE_BY_OTHER((byte) 0x56), + + /** + * Number of Write calls that request WAL. + */ + WRITE_WITH_WAL((byte) 0x57), + + /** + * Bytes read during compaction. + */ + COMPACT_READ_BYTES((byte) 0x58), + + /** + * Bytes written during compaction. + */ + COMPACT_WRITE_BYTES((byte) 0x59), + + /** + * Bytes written during flush. + */ + FLUSH_WRITE_BYTES((byte) 0x5A), + + /** + * Compaction read bytes marked. + */ + COMPACT_READ_BYTES_MARKED((byte) 0x5B), + + /** + * Compaction read bytes periodically. + */ + COMPACT_READ_BYTES_PERIODIC((byte) 0x5C), + + /** + * Compaction read bytes TTL. + */ + COMPACT_READ_BYTES_TTL((byte) 0x5D), + + /** + * Compaction write bytes marked. + */ + COMPACT_WRITE_BYTES_MARKED((byte) 0x5E), + + /** + * Compaction write bytes periodically. + */ + COMPACT_WRITE_BYTES_PERIODIC((byte) 0x5F), + + /** + * Compaction write bytes TTL. + */ + COMPACT_WRITE_BYTES_TTL((byte) 0x60), + + /** + * Number of table's properties loaded directly from file, without creating table reader object. + */ + NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x61), + + /** + * Number of supervision acquires. + */ + NUMBER_SUPERVERSION_ACQUIRES((byte) 0x62), + + /** + * Number of supervision releases. + */ + NUMBER_SUPERVERSION_RELEASES((byte) 0x63), + + /** + * Number of supervision cleanups. + */ + NUMBER_SUPERVERSION_CLEANUPS((byte) 0x64), + + /** + * Number of compressions executed. + */ + NUMBER_BLOCK_COMPRESSED((byte) 0x65), + + /** + * Number of decompressions executed. + */ + NUMBER_BLOCK_DECOMPRESSED((byte) 0x66), + + /** + * Number of input bytes (uncompressed) to compression for SST blocks that are stored compressed. + */ + BYTES_COMPRESSED_FROM((byte) 0x67), + + /** + * Number of output bytes (compressed) from compression for SST blocks that are stored compressed. + */ + BYTES_COMPRESSED_TO((byte) 0x68), + + /** + * Number of uncompressed bytes for SST blocks that are stored uncompressed because compression + * type is kNoCompression, or some error case caused compression not to run or produce an output. + * Index blocks are only counted if enable_index_compression is true. + */ + BYTES_COMPRESSION_BYPASSED((byte) 0x69), + + /** + * Number of input bytes (uncompressed) to compression for SST blocks that are stored uncompressed + * because the compression result was rejected, either because the ratio was not acceptable (see + * CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the `verify_compression` + * option. + */ + BYTES_COMPRESSION_REJECTED((byte) 0x6A), + + /** + * Like {@link #BYTES_COMPRESSION_BYPASSED} but counting number of blocks. + */ + NUMBER_BLOCK_COMPRESSION_BYPASSED((byte) 0x6B), + + /** + * Like {@link #BYTES_COMPRESSION_REJECTED} but counting number of blocks. + */ + NUMBER_BLOCK_COMPRESSION_REJECTED((byte) 0x6C), + + /** + * Number of input bytes (compressed) to decompression in reading compressed SST blocks from + * storage. + */ + BYTES_DECOMPRESSED_FROM((byte) 0x6D), + + /** + * Number of output bytes (uncompressed) from decompression in reading compressed SST blocks from + * storage. + */ + BYTES_DECOMPRESSED_TO((byte) 0x6E), + + /** + * Merge operations cumulative time. + */ + MERGE_OPERATION_TOTAL_TIME((byte) 0x6F), + + /** + * Filter operations cumulative time. + */ + FILTER_OPERATION_TOTAL_TIME((byte) 0x70), + + /** + * Compaction CPU cumulative time. + */ + COMPACTION_CPU_TOTAL_TIME((byte) 0x71), + + /** + * Row cache hits. + */ + ROW_CACHE_HIT((byte) 0x72), + + /** + * Row cache misses. + */ + ROW_CACHE_MISS((byte) 0x73), + + /** + * Read amplification statistics. + * + * Read amplification can be calculated using this formula + * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) + * + * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled + */ + + /** + * Estimate of total bytes actually used. + */ + READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x74), + + /** + * Total size of loaded data blocks. + */ + READ_AMP_TOTAL_READ_BYTES((byte) 0x75), + + /** + * Number of refill intervals where rate limiter's bytes are fully consumed. + */ + NUMBER_RATE_LIMITER_DRAINS((byte) 0x76), + + /** + * BlobDB specific stats + * Number of Put/PutTTL/PutUntil to BlobDB. + */ + BLOB_DB_NUM_PUT((byte) 0x77), + + /** + * Number of Write to BlobDB. + */ + BLOB_DB_NUM_WRITE((byte) 0x78), + + /** + * Number of Get to BlobDB. + */ + BLOB_DB_NUM_GET((byte) 0x79), + + /** + * Number of MultiGet to BlobDB. + */ + BLOB_DB_NUM_MULTIGET((byte) 0x7A), + + /** + * Number of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator. + */ + BLOB_DB_NUM_SEEK((byte) 0x7B), + + /** + * Number of Next to BlobDB iterator. + */ + BLOB_DB_NUM_NEXT((byte) 0x7C), + + /** + * Number of Prev to BlobDB iterator. + */ + BLOB_DB_NUM_PREV((byte) 0x7D), + + /** + * Number of keys written to BlobDB. + */ + BLOB_DB_NUM_KEYS_WRITTEN((byte) 0x7E), + + /** + * Number of keys read from BlobDB. + */ + BLOB_DB_NUM_KEYS_READ((byte) 0x7F), + + /** + * Number of bytes (key + value) written to BlobDB. + */ + BLOB_DB_BYTES_WRITTEN((byte) -0x1), + + /** + * Number of bytes (keys + value) read from BlobDB. + */ + BLOB_DB_BYTES_READ((byte) -0x2), + + /** + * Number of keys written by BlobDB as non-TTL inlined value. + */ + BLOB_DB_WRITE_INLINED((byte) -0x3), + + /** + * Number of keys written by BlobDB as TTL inlined value. + */ + BLOB_DB_WRITE_INLINED_TTL((byte) -0x4), + + /** + * Number of keys written by BlobDB as non-TTL blob value. + */ + BLOB_DB_WRITE_BLOB((byte) -0x5), + + /** + * Number of keys written by BlobDB as TTL blob value. + */ + BLOB_DB_WRITE_BLOB_TTL((byte) -0x6), + + /** + * Number of bytes written to blob file. + */ + BLOB_DB_BLOB_FILE_BYTES_WRITTEN((byte) -0x7), + + /** + * Number of bytes read from blob file. + */ + BLOB_DB_BLOB_FILE_BYTES_READ((byte) -0x8), + + /** + * Number of times a blob files being synced. + */ + BLOB_DB_BLOB_FILE_SYNCED((byte) -0x9), + + /** + * Number of blob index evicted from base DB by BlobDB compaction filter because + * of expiration. + */ + BLOB_DB_BLOB_INDEX_EXPIRED_COUNT((byte) -0xA), + + /** + * Size of blob index evicted from base DB by BlobDB compaction filter + * because of expiration. + */ + BLOB_DB_BLOB_INDEX_EXPIRED_SIZE((byte) -0xB), + + /** + * Number of blob index evicted from base DB by BlobDB compaction filter because + * of corresponding file deleted. + */ + BLOB_DB_BLOB_INDEX_EVICTED_COUNT((byte) -0xC), + + /** + * Size of blob index evicted from base DB by BlobDB compaction filter + * because of corresponding file deleted. + */ + BLOB_DB_BLOB_INDEX_EVICTED_SIZE((byte) -0xD), + + /** + * Number of blob files being garbage collected. + */ + BLOB_DB_GC_NUM_FILES((byte) -0xE), + + /** + * Number of blob files generated by garbage collection. + */ + BLOB_DB_GC_NUM_NEW_FILES((byte) -0xF), + + /** + * Number of BlobDB garbage collection failures. + */ + BLOB_DB_GC_FAILURES((byte) -0x10), + + /** + * Number of keys relocated to new blob file by garbage collection. + */ + BLOB_DB_GC_NUM_KEYS_RELOCATED((byte) -0x11), + + /** + * Number of bytes relocated to new blob file by garbage collection. + */ + BLOB_DB_GC_BYTES_RELOCATED((byte) -0x12), + + /** + * Number of blob files evicted because of BlobDB is full. + */ + BLOB_DB_FIFO_NUM_FILES_EVICTED((byte) -0x13), + + /** + * Number of keys in the blob files evicted because of BlobDB is full. + */ + BLOB_DB_FIFO_NUM_KEYS_EVICTED((byte) -0x14), + + /** + * Number of bytes in the blob files evicted because of BlobDB is full. + */ + BLOB_DB_FIFO_BYTES_EVICTED((byte) -0x15), + + /** + * Number of times cache miss when accessing blob from blob cache. + */ + BLOB_DB_CACHE_MISS((byte) -0x16), + + /** + * Number of times cache hit when accessing blob from blob cache. + */ + BLOB_DB_CACHE_HIT((byte) -0x17), + + /** + * Number of data blocks added to blob cache. + */ + BLOB_DB_CACHE_ADD((byte) -0x18), + + /** + * Number of failures when adding blobs to blob cache. + */ + BLOB_DB_CACHE_ADD_FAILURES((byte) -0x19), + + /** + * Number of bytes read from blob cache. + */ + BLOB_DB_CACHE_BYTES_READ((byte) -0x1A), + + /** + * Number of bytes written into blob cache. + */ + BLOB_DB_CACHE_BYTES_WRITE((byte) -0x1B), + + /** + * These counters indicate a performance issue in WritePrepared transactions. + * We should not seem them ticking them much. + * Number of times prepare_mutex_ is acquired in the fast path. + */ + TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x1C), + + /** + * Number of times old_commit_map_mutex_ is acquired in the fast path. + */ + TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x1D), + + /** + * Number of times we checked a batch for duplicate keys. + */ + TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x1E), + + /** + * Number of times snapshot_mutex_ is acquired in the fast path. + */ + TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x1F), + + /** + * Number of times ::Get returned TryAgain due to expired snapshot seq + */ + TXN_GET_TRY_AGAIN((byte) -0x20), + + /** + * Number of files marked as trash by delete scheduler + */ + FILES_MARKED_TRASH((byte) -0x21), + + /** + * Number of trash files deleted by the background thread from the trash queue + */ + FILES_DELETED_FROM_TRASH_QUEUE((byte) -0x22), + + /** + * Number of files deleted immediately by delete scheduler + */ + FILES_DELETED_IMMEDIATELY((byte) -0x23), + + /** + * DB error handler statistics + */ + ERROR_HANDLER_BG_ERROR_COUNT((byte) -0x24), + + /** + * Number of background errors handled by the error handler. + */ + ERROR_HANDLER_BG_IO_ERROR_COUNT((byte) -0x25), + + /** + * Number of retryable background I/O errors handled by the error handler. + * This is a subset of {@link #ERROR_HANDLER_BG_IO_ERROR_COUNT}. + */ + ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT((byte) -0x26), + + /** + * Number of auto resumes handled by the error handler. + */ + ERROR_HANDLER_AUTORESUME_COUNT((byte) -0x27), + + /** + * Total Number of auto resume retries handled by the error handler. + */ + ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT((byte) -0x28), + + /** + * Number of auto resumes that succeded that were handled by the error handler. + */ + ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT((byte) -0x29), + + /** + * Bytes of raw data (payload) found on memtable at flush time. + * Contains the sum of garbage payload (bytes that are discarded + * at flush time) and useful payload (bytes of data that will + * eventually be written to SSTable). + */ + MEMTABLE_PAYLOAD_BYTES_AT_FLUSH((byte) -0x2A), + + /** + * Outdated bytes of data present on memtable at flush time. + */ + MEMTABLE_GARBAGE_BYTES_AT_FLUSH((byte) -0x2B), + + /** + * Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs. + */ + VERIFY_CHECKSUM_READ_BYTES((byte) -0x2C), + + /** + * Bytes read whilst creating backups. + */ + BACKUP_READ_BYTES((byte) -0x2D), + + /** + * Bytes written whilst creating backups. + */ + BACKUP_WRITE_BYTES((byte) -0x2E), + + /** + * Remote compaction bytes read. + */ + REMOTE_COMPACT_READ_BYTES((byte) -0x2F), + + /** + * Remote compaction bytes written. + */ + REMOTE_COMPACT_WRITE_BYTES((byte) -0x30), + + /** + * Bytes read from hot files. + */ + HOT_FILE_READ_BYTES((byte) -0x31), + + /** + * Bytes read from warm files. + */ + WARM_FILE_READ_BYTES((byte) -0x32), + + /** + * Bytes read from cool files. + */ + COOL_FILE_READ_BYTES((byte) -0x5B), + + /** + * Bytes read from cold files. + */ + COLD_FILE_READ_BYTES((byte) -0x33), + + /** + * Bytes read from ice cold files. + */ + ICE_FILE_READ_BYTES((byte) -0x59), + + /** + * Numer of reads from hot files. + */ + HOT_FILE_READ_COUNT((byte) -0x34), + + /** + * Numer of reads from warm files. + */ + WARM_FILE_READ_COUNT((byte) -0x35), + + /** + * Numer of reads from cool files. + */ + COOL_FILE_READ_COUNT((byte) -0x5C), + + /** + * Numer of reads from cold files. + */ + COLD_FILE_READ_COUNT((byte) -0x36), + + /** + * Numer of reads from ice cold files. + */ + ICE_FILE_READ_COUNT((byte) -0x5A), + + /** + * Bytes read from the last level. + */ + LAST_LEVEL_READ_BYTES((byte) -0x37), + + /** + * Number of reads from the last level. + */ + LAST_LEVEL_READ_COUNT((byte) -0x38), + + /** + * Bytes read from the non-last level. + */ + NON_LAST_LEVEL_READ_BYTES((byte) -0x39), + + /** + * Number of reads from the non-last level. + */ + NON_LAST_LEVEL_READ_COUNT((byte) -0x3A), + + /** + * Statistics on iterator Seek() (and variants) for each sorted run. + * i.e a single user Seek() can result in many sorted run Seek()s. + * The stats are split between last level and non-last level. + * Filtered: a filter such as prefix Bloom filter indicate the Seek() would + * not find anything relevant, so avoided a likely access to data+index + * blocks. + */ + + LAST_LEVEL_SEEK_FILTERED((byte) -0x3B), + /** + * Filter match: a filter such as prefix Bloom filter was queried but did + * not filter out the seek. + */ + LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x3C), + + /** + * At least one data block was accessed for a Seek() (or variant) on a + * sorted run. + */ + LAST_LEVEL_SEEK_DATA((byte) -0x3D), + + /** + * At least one value() was accessed for the seek (suggesting it was useful), + * and no filter such as prefix Bloom was queried. + */ + LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x3E), + + /** + * At least one value() was accessed for the seek (suggesting it was useful), + * after querying a filter such as prefix Bloom. + */ + LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x3F), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_FILTERED} but for the non-last level. + */ + NON_LAST_LEVEL_SEEK_FILTERED((byte) -0x40), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_FILTER_MATCH} but for the non-last level. + */ + NON_LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x41), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_DATA} but for the non-last level. + */ + NON_LAST_LEVEL_SEEK_DATA((byte) -0x42), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER} but for the non-last level. + */ + NON_LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x43), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH} but for the non-last level. + */ + NON_LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x44), + + /** + * Number of block checksum verifications. + */ + BLOCK_CHECKSUM_COMPUTE_COUNT((byte) -0x45), + + /** + * Number of times RocksDB detected a corruption while verifying a block + * checksum. RocksDB does not remember corruptions that happened during user + * reads so the same block corruption may be detected multiple times. + */ + BLOCK_CHECKSUM_MISMATCH_COUNT((byte) -0x46), + + /** + * Number of multiget co-rountines. + */ + MULTIGET_COROUTINE_COUNT((byte) -0x47), + + /** + * Time spent in the ReadAsync file system call. + */ + READ_ASYNC_MICROS((byte) -0x48), + + /** + * Number of errors returned to the async read callback. + */ + ASYNC_READ_ERROR_COUNT((byte) -0x49), + + /** + * Number of lookup into the prefetched tail (see + * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`) + * that can't find its data for table open + */ + TABLE_OPEN_PREFETCH_TAIL_MISS((byte) -0x4A), + + /** + * Number of lookup into the prefetched tail (see + * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`) + * that finds its data for table open + */ + TABLE_OPEN_PREFETCH_TAIL_HIT((byte) -0x4B), + + /** + * Number of times timestamps are checked on accessing the table + */ + TIMESTAMP_FILTER_TABLE_CHECKED((byte) -0x4C), + + /** + * Number of times timestamps can successfully help skip the table access + */ + TIMESTAMP_FILTER_TABLE_FILTERED((byte) -0x4D), + + /** + * Number of times readahead is trimmed during scans when ReadOptions.auto_readahead_size is set. + */ + READAHEAD_TRIMMED((byte) -0x4E), + + /** + * Maximum size of the FIFO compactions. + */ + FIFO_MAX_SIZE_COMPACTIONS((byte) -0x4F), + + /** + * TTL of the FIFO compactions. + */ + FIFO_TTL_COMPACTIONS((byte) -0x50), + + /** + * Change temperature of the FIFO compactions. + */ + FIFO_CHANGE_TEMPERATURE_COMPACTIONS((byte) -0x58), + + /** + * Number of bytes prefetched during user initiated scan. + */ + PREFETCH_BYTES((byte) -0x51), + + /** + * Number of prefetched bytes that were actually useful during user initiated scan. + */ + PREFETCH_BYTES_USEFUL((byte) -0x52), + + /** + * Number of FS reads avoided due to prefetching during user initiated scan. + */ + PREFETCH_HITS((byte) -0x53), + + /** + * Footer corruption detected when opening an SST file for reading. + */ + SST_FOOTER_CORRUPTION_COUNT((byte) -0x55), + + /** + * Counters for file read retries with the verify_and_reconstruct_read file system option after + * detecting a checksum mismatch. + */ + FILE_READ_CORRUPTION_RETRY_COUNT((byte) -0x56), + + /** + * Counters for file read retries with the verify_and_reconstruct_read file system option after + * detecting a checksum mismatch. + */ + FILE_READ_CORRUPTION_RETRY_SUCCESS_COUNT((byte) -0x57), + + /** + * Counter for the number of times a WBWI is ingested into the DB. This + * happens when IngestWriteBatchWithIndex() is used and when large + * transaction optimization is enabled through + * TransactionOptions::large_txn_commit_optimize_threshold. + */ + NUMBER_WBWI_INGEST((byte) -0x5D), + + /** + * Failure to load the UDI during SST table open. + */ + SST_USER_DEFINED_INDEX_LOAD_FAIL_COUNT((byte) -0x5E), + + /** + * Bytes of output files successfully resumed during remote compaction. + */ + REMOTE_COMPACT_RESUMED_BYTES((byte) -0x5F), + + /** + * MultiScan statistics + */ + + /** + * Number of calls to Iterator::Prepare() for multi-scan. + */ + MULTISCAN_PREPARE_CALLS((byte) -0x60), + + /** + * Number of errors during Iterator::Prepare() for multi-scan. + */ + MULTISCAN_PREPARE_ERRORS((byte) -0x61), + + /** + * Number of data blocks prefetched during multi-scan Prepare(). + */ + MULTISCAN_BLOCKS_PREFETCHED((byte) -0x62), + + /** + * Number of data blocks found in cache during multi-scan Prepare(). + */ + MULTISCAN_BLOCKS_FROM_CACHE((byte) -0x63), + + /** + * Total bytes prefetched during multi-scan Prepare(). + */ + MULTISCAN_PREFETCH_BYTES((byte) -0x64), + + /** + * Number of prefetched blocks that were never accessed (wasted). + */ + MULTISCAN_PREFETCH_BLOCKS_WASTED((byte) -0x65), + + /** + * Number of I/O requests issued during multi-scan Prepare(). + */ + MULTISCAN_IO_REQUESTS((byte) -0x66), + + /** + * Number of non-adjacent blocks coalesced into single I/O request. + */ + MULTISCAN_IO_COALESCED_NONADJACENT((byte) -0x67), + + /** + * Number of seek errors during multi-scan iteration. + */ + MULTISCAN_SEEK_ERRORS((byte) -0x68), + + /** + * Maximum number of ticker types. + */ + TICKER_ENUM_MAX((byte) -0x54); + + private final byte value; + + TickerType(final byte value) { + this.value = value; + } /** * Returns the byte value of the enumerations value diff --git a/java/src/main/java/org/rocksdb/TraceOptions.java b/java/src/main/java/org/rocksdb/TraceOptions.java index cf5f7bbe12f8..85c8abcc2500 100644 --- a/java/src/main/java/org/rocksdb/TraceOptions.java +++ b/java/src/main/java/org/rocksdb/TraceOptions.java @@ -12,10 +12,18 @@ public class TraceOptions { private final long maxTraceFileSize; + /** + * Constructs a TraceOptions. + */ public TraceOptions() { this.maxTraceFileSize = 64L * 1024L * 1024L * 1024L; // 64 GB } + /** + * Constructs a TraceOptions. + * + * @param maxTraceFileSize the maximum size of the trace file. + */ public TraceOptions(final long maxTraceFileSize) { this.maxTraceFileSize = maxTraceFileSize; } diff --git a/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java index ee8656460835..827d2e6aa8e6 100644 --- a/java/src/main/java/org/rocksdb/Transaction.java +++ b/java/src/main/java/org/rocksdb/Transaction.java @@ -184,7 +184,9 @@ public void clearSnapshot() { } /** - * Prepare the current transaction for 2PC + * Prepare the current transaction for 2PC. + * + * @throws RocksDBException if the transaction cannot be prepared */ public void prepare() throws RocksDBException { //TODO(AR) consider a Java'ish version of this function, which returns an AutoCloseable (commit) @@ -257,7 +259,7 @@ public void rollbackToSavePoint() throws RocksDBException { /** * This function has an inconsistent parameter order compared to other {@code get()} * methods and is deprecated in favour of one with a consistent order. - * + *

* This function is similar to * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])} except it will * also read pending changes in this transaction. @@ -297,11 +299,11 @@ public byte[] get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions * also read pending changes in this transaction. * Currently, this function will return Status::MergeInProgress if the most * recent write to the queried key in this batch is a Merge. - * + *

* If {@link ReadOptions#snapshot()} is not set, the current version of the * key will be read. Calling {@link #setSnapshot()} does not affect the * version of the data returned. - * + *

* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect * what is read from the DB but will NOT change which keys are read from this * transaction (the keys in this transaction do not yet belong to any snapshot @@ -560,7 +562,7 @@ public byte[][] multiGet(final ReadOptions readOptions, * {@link org.rocksdb.ColumnFamilyHandle} instances. * @param keys of keys for which values need to be retrieved. * - * @return Array of values, one for each key + * @return list of values, one for each key * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -646,7 +648,7 @@ public byte[][] multiGet(final ReadOptions readOptions, final byte[][] keys) * {@link org.rocksdb.ColumnFamilyHandle} instances. * @param keys of keys for which values need to be retrieved. * - * @return Array of values, one for each key + * @return list of values, one for each key * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1189,7 +1191,6 @@ public GetStatus getForUpdate(final ReadOptions readOptions, /** * A multi-key version of * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. - *

* * @param readOptions Read options. * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} @@ -1225,14 +1226,13 @@ public byte[][] multiGetForUpdate(final ReadOptions readOptions, /** * A multi-key version of * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. - *

* * @param readOptions Read options. * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} * instances * @param keys the keys to retrieve the values for. * - * @return Array of values, one for each key + * @return list of values, one for each key * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1261,7 +1261,6 @@ public List multiGetForUpdateAsList(final ReadOptions readOptions, /** * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. - *

* * @param readOptions Read options. * @param keys the keys to retrieve the values for. @@ -1285,7 +1284,6 @@ public byte[][] multiGetForUpdate(final ReadOptions readOptions, final byte[][] /** * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. - *

* * @param readOptions Read options. * @param keys the keys to retrieve the values for. @@ -1332,7 +1330,7 @@ public RocksIterator getIterator() { * Returns an iterator that will iterate on all keys in the default * column family including both keys in the DB and uncommitted keys in this * transaction. - * + *

* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read * from the DB but will NOT change which keys are read from this transaction * (the keys in this transaction do not yet belong to any snapshot and will be @@ -1555,10 +1553,10 @@ public void put(final ColumnFamilyHandle columnFamilyHandle, /** * Similar to {@link RocksDB#put(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1593,10 +1591,10 @@ public void put(final ByteBuffer key, final ByteBuffer value) throws RocksDBExce /** * Similar to {@link RocksDB#put(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1635,6 +1633,29 @@ public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer ke key.position(key.limit()); value.position(value.limit()); } + + /** + * Similar to {@link RocksDB#put(byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + *

+ * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + *

+ * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. + * + * @param columnFamilyHandle The column family to put the key/value into + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { put(columnFamilyHandle, key, value, false); @@ -1755,10 +1776,10 @@ public void merge(final byte[] key, final byte[] value) /** * Similar to {@link RocksDB#merge(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1791,10 +1812,10 @@ public void merge(final ByteBuffer key, final ByteBuffer value) throws RocksDBEx /** * Similar to {@link RocksDB#merge(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1833,10 +1854,10 @@ public void merge(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer /** * Similar to {@link RocksDB#merge(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -2283,10 +2304,10 @@ public void mergeUntracked(final ColumnFamilyHandle columnFamilyHandle, * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

* Unlike {@link #merge(ColumnFamilyHandle, byte[], byte[])} no conflict * checking will be performed for this key. - * + *

* If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -2346,10 +2367,10 @@ public void mergeUntracked(final byte[] key, final byte[] value) * Similar to {@link RocksDB#merge(byte[], byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

* Unlike {@link #merge(byte[], byte[])} no conflict * checking will be performed for this key. - * + *

* If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -2792,21 +2813,58 @@ public long getId() { return getId(nativeHandle_); } + /** + * States of a Transaction. + */ public enum TransactionState { + /** + * Transaction started. + */ STARTED((byte)0), + + /** + * Transaction is awaiting prepare. + */ AWAITING_PREPARE((byte)1), + + /** + * Transaction is prepared. + */ PREPARED((byte)2), + + /** + * Transaction awaiting commit. + */ AWAITING_COMMIT((byte)3), + + /** + * Transaction is committed. + */ COMMITTED((byte)4), + + /** + * Transaction is awaiting rollback. + */ AWAITING_ROLLBACK((byte)5), + + /** + * Transaction rolled-back. + */ ROLLEDBACK((byte)6), + + /** + * Transaction locks have been stolen. + */ LOCKS_STOLEN((byte)7); - /* - * Keep old misspelled variable as alias - * Tip from https://stackoverflow.com/a/37092410/454544 + /** + * Old misspelled variable as alias for {@link #COMMITTED}. + * Tip from https://stackoverflow.com/a/37092410/454544 + * + * @deprecated use {@link #COMMITTED} instead. */ - public static final TransactionState COMMITED = COMMITTED; + @Deprecated public static final TransactionState COMMITED = COMMITTED; private final byte value; @@ -2850,6 +2908,9 @@ private WaitingTransactions newWaitingTransactions( return new WaitingTransactions(columnFamilyId, key, transactionIds); } + /** + * Waiting Transactions. + */ public static class WaitingTransactions { private final long columnFamilyId; private final String key; diff --git a/java/src/main/java/org/rocksdb/TransactionDB.java b/java/src/main/java/org/rocksdb/TransactionDB.java index 0f75e5f97019..cff970f6eef9 100644 --- a/java/src/main/java/org/rocksdb/TransactionDB.java +++ b/java/src/main/java/org/rocksdb/TransactionDB.java @@ -218,6 +218,14 @@ public Transaction beginTransaction(final WriteOptions writeOptions, return oldTransaction; } + /** + * Gets a transaction by name. + * + * @param transactionName the name of the transaction. + * + * @return the transaction, or null if the transaction can't be found. + * + */ public Transaction getTransactionByName(final String transactionName) { final long jtxnHandle = getTransactionByName(nativeHandle_, transactionName); if(jtxnHandle == 0) { @@ -232,6 +240,11 @@ public Transaction getTransactionByName(final String transactionName) { return txn; } + /** + * Gets a list of all prepared transactions. + * + * @return the list of prepared transactions. + */ public List getAllPreparedTransactions() { final long[] jtxnHandles = getAllPreparedTransactions(nativeHandle_); @@ -247,11 +260,21 @@ public List getAllPreparedTransactions() { return txns; } + /** + * Information on Key Locks. + */ public static class KeyLockInfo { private final String key; private final long[] transactionIDs; private final boolean exclusive; + /** + * Constructs a KeyLockInfo. + * + * @param key the key. + * @param transactionIDs the transaction ids + * @param exclusive true if the lock is exclusive, false if the lock is shared. + */ @SuppressWarnings("PMD.ArrayIsStoredDirectly") public KeyLockInfo(final String key, final long[] transactionIDs, final boolean exclusive) { this.key = key; @@ -315,6 +338,9 @@ private DeadlockInfo newDeadlockInfo(final long transactionID, final long column waitingKey, exclusive); } + /** + * Information on a Deadlock. + */ public static class DeadlockInfo { private final long transactionID; private final long columnFamilyId; @@ -366,25 +392,49 @@ public boolean isExclusive() { } } + /** + * The paths of a Deadlock. + */ public static class DeadlockPath { final DeadlockInfo[] path; final boolean limitExceeded; + /** + * Construct a DeadLockPack. + * + * @param path the paths + * @param limitExceeded true if the limit is exceeded, false otherwise. + */ @SuppressWarnings("PMD.ArrayIsStoredDirectly") public DeadlockPath(final DeadlockInfo[] path, final boolean limitExceeded) { this.path = path; this.limitExceeded = limitExceeded; } + /** + * Returns true if there are no paths and the limit is not exceeded. + * + * @return true if empty, false otherwise. + */ public boolean isEmpty() { return path.length == 0 && !limitExceeded; } } + /** + * Get Deadlock Information. + * + * @return the deadlock paths. + */ public DeadlockPath[] getDeadlockInfoBuffer() { return getDeadlockInfoBuffer(nativeHandle_); } + /** + * Set the size of the deadlock information buffer. + * + * @param targetSize the target size of the buffer. + */ public void setDeadlockInfoBufferSize(final int targetSize) { setDeadlockInfoBufferSize(nativeHandle_, targetSize); } diff --git a/java/src/main/java/org/rocksdb/TransactionDBOptions.java b/java/src/main/java/org/rocksdb/TransactionDBOptions.java index 8257d50f7a60..15e8c2e52bcd 100644 --- a/java/src/main/java/org/rocksdb/TransactionDBOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionDBOptions.java @@ -5,8 +5,13 @@ package org.rocksdb; +/** + * Options for TransactionDB. + */ public class TransactionDBOptions extends RocksObject { - + /** + * Constructs a TransactionDB. + */ public TransactionDBOptions() { super(newTransactionDBOptions()); } @@ -110,16 +115,13 @@ public TransactionDBOptions setTransactionLockTimeout( /** * The wait timeout in milliseconds when writing a key - * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, - * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write} - * directly). - *

- * If 0, no waiting is done if a lock cannot instantly be acquired. - * If negative, there is no timeout and will block indefinitely when acquiring - * a lock. + * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])}, + * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(WriteOptions, byte[])} or {@link + * RocksDB#write(WriteOptions, WriteBatch)} directly).

If 0, no waiting is done if a lock + * cannot instantly be acquired. If negative, there is no timeout and will block indefinitely when + * acquiring a lock. * - * @return the timeout in milliseconds when writing a key OUTSIDE of a - * transaction + * @return the timeout in milliseconds when writing a key outside of the transaction */ public long getDefaultLockTimeout() { assert(isOwningHandle()); @@ -128,24 +130,17 @@ public long getDefaultLockTimeout() { /** * If positive, specifies the wait timeout in milliseconds when writing a key - * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, - * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write} - * directly). - *

- * If 0, no waiting is done if a lock cannot instantly be acquired. - * If negative, there is no timeout and will block indefinitely when acquiring - * a lock. - *

- * Not using a timeout can lead to deadlocks. Currently, there - * is no deadlock-detection to recover from a deadlock. While DB writes - * cannot deadlock with other DB writes, they can deadlock with a transaction. - * A negative timeout should only be used if all transactions have a small - * expiration set. - *

- * Default: 1000 + * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])}, + * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(byte[])} or {@link + * RocksDB#write(WriteOptions, WriteBatch)} directly).

If 0, no waiting is done if a lock + * cannot instantly be acquired. If negative, there is no timeout and will block indefinitely when + * acquiring a lock.

Not using a timeout can lead to deadlocks. Currently, there is no + * deadlock-detection to recover from a deadlock. While DB writes cannot deadlock with other DB + * writes, they can deadlock with a transaction. A negative timeout should only be used if all + * transactions have a small expiration set.

Default: 1000 * * @param defaultLockTimeout the timeout in milliseconds when writing a key - * OUTSIDE of a transaction + * outside of the transaction * @return this TransactionDBOptions instance */ public TransactionDBOptions setDefaultLockTimeout(final long defaultLockTimeout) { diff --git a/java/src/main/java/org/rocksdb/TransactionOptions.java b/java/src/main/java/org/rocksdb/TransactionOptions.java index d2efeb87ce4a..924e931e78f8 100644 --- a/java/src/main/java/org/rocksdb/TransactionOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionOptions.java @@ -5,9 +5,14 @@ package org.rocksdb; +/** + * Options for a Transaction. + */ public class TransactionOptions extends RocksObject implements TransactionalOptions { - + /** + * Constructs a TransactionOptions. + */ public TransactionOptions() { super(newTransactionOptions()); } @@ -56,7 +61,7 @@ public TransactionOptions setDeadlockDetect(final boolean deadlockDetect) { * The wait timeout in milliseconds when a transaction attempts to lock a key. *

* If 0, no waiting is done if a lock cannot instantly be acquired. - * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} + * If negative, {@link TransactionDBOptions#getTransactionLockTimeout()} * will be used * * @return the lock timeout in milliseconds @@ -71,7 +76,7 @@ public long getLockTimeout() { * a transaction attempts to lock a key. *

* If 0, no waiting is done if a lock cannot instantly be acquired. - * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} + * If negative, {@link TransactionDBOptions#getTransactionLockTimeout()} * will be used *

* Default: -1 diff --git a/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/java/src/main/java/org/rocksdb/UInt64AddOperator.java index 536ba58d8352..f43903706930 100644 --- a/java/src/main/java/org/rocksdb/UInt64AddOperator.java +++ b/java/src/main/java/org/rocksdb/UInt64AddOperator.java @@ -10,9 +10,12 @@ * integer value. */ public class UInt64AddOperator extends MergeOperator { - public UInt64AddOperator() { - super(newSharedUInt64AddOperator()); - } + /** + * Constructs a UInt64AddOperator. + */ + public UInt64AddOperator() { + super(newSharedUInt64AddOperator()); + } private static native long newSharedUInt64AddOperator(); @Override diff --git a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java index d87efb1b7fbf..428727c80e8c 100644 --- a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java @@ -5,6 +5,9 @@ * The config for vector memtable representation. */ public class VectorMemTableConfig extends MemTableConfig { + /** + * The default reserved size for the Vector Mem Table. + */ public static final int DEFAULT_RESERVED_SIZE = 0; /** diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java index 5f7b7b8a1d70..bdfdae31ebf4 100644 --- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java +++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java @@ -7,10 +7,20 @@ import java.nio.ByteBuffer; +/** + * Iterator over the contents of a Write Batch With Index. + */ public class WBWIRocksIterator extends AbstractRocksIterator { private final WriteEntry entry = new WriteEntry(); + /** + * Constructs a WBWIRocksIterator. + * + * @param wbwi the write batch with index. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ WBWIRocksIterator. + */ protected WBWIRocksIterator(final WriteBatchWithIndex wbwi, final long nativeHandle) { super(wbwi, nativeHandle); @@ -127,12 +137,39 @@ private static native void seekForPrevByteArray0Jni( * that created the record in the Write Batch */ public enum WriteType { + /** + * Put. + */ PUT((byte)0x0), + + /** + * Merge. + */ MERGE((byte)0x1), + + /** + * Delete. + */ DELETE((byte)0x2), + + /** + * Single Delete. + */ SINGLE_DELETE((byte)0x3), + + /** + * Delete Range. + */ DELETE_RANGE((byte)0x4), + + /** + * Log. + */ LOG((byte)0x5), + + /** + * Transaction ID. + */ XID((byte)0x6); final byte id; @@ -140,13 +177,23 @@ public enum WriteType { this.id = id; } - public static WriteType fromId(final byte id) { + /** + * Get a WriteType from its byte representation. + * + * @param value the byte representation of the WriteType. + * + * @return the WriteType + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent a + * WriteType. + */ + public static WriteType fromId(final byte value) { for(final WriteType wt : WriteType.values()) { - if(id == wt.id) { + if (value == wt.id) { return wt; } } - throw new IllegalArgumentException("No WriteType with id=" + id); + throw new IllegalArgumentException("No WriteType with id=" + value); } } @@ -182,6 +229,13 @@ private WriteEntry() { value = new DirectSlice(); } + /** + * Constructs a WriteEntry. + * + * @param type the type of the write. + * @param key the key. + * @param value the value. + */ public WriteEntry(final WriteType type, final DirectSlice key, final DirectSlice value) { this.type = type; diff --git a/java/src/main/java/org/rocksdb/WalFileType.java b/java/src/main/java/org/rocksdb/WalFileType.java index fed27ed11705..371f2e7b2ff6 100644 --- a/java/src/main/java/org/rocksdb/WalFileType.java +++ b/java/src/main/java/org/rocksdb/WalFileType.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * Types of WAL file. + */ public enum WalFileType { /** * Indicates that WAL file is in archive directory. WAL files are moved from diff --git a/java/src/main/java/org/rocksdb/WalFilter.java b/java/src/main/java/org/rocksdb/WalFilter.java index a2836634af65..3b04eafaca00 100644 --- a/java/src/main/java/org/rocksdb/WalFilter.java +++ b/java/src/main/java/org/rocksdb/WalFilter.java @@ -56,7 +56,13 @@ LogRecordFoundResult logRecordFound(final long logNumber, final String logFileName, final WriteBatch batch, final WriteBatch newBatch); + /** + * LogFoundResult. + */ class LogRecordFoundResult { + /** + * Constant for continuing processing unchanged. + */ public static LogRecordFoundResult CONTINUE_UNCHANGED = new LogRecordFoundResult(WalProcessingOption.CONTINUE_PROCESSING, false); diff --git a/java/src/main/java/org/rocksdb/WalProcessingOption.java b/java/src/main/java/org/rocksdb/WalProcessingOption.java index 3a9c2be0e3b5..a37c83634587 100644 --- a/java/src/main/java/org/rocksdb/WalProcessingOption.java +++ b/java/src/main/java/org/rocksdb/WalProcessingOption.java @@ -5,8 +5,11 @@ package org.rocksdb; +/** + * Options for WAL processing. + */ public enum WalProcessingOption { - /* + /** * Continue processing as usual. */ CONTINUE_PROCESSING((byte)0x0), @@ -42,6 +45,15 @@ byte getValue() { return value; } + /** + * Get an option from its byte representation. + * + * @param value the byte representation of the option. + * + * @return the option + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent an option. + */ public static WalProcessingOption fromValue(final byte value) { for (final WalProcessingOption walProcessingOption : WalProcessingOption.values()) { if (walProcessingOption.value == value) { diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java index 1802d929c226..32bd1f6c458e 100644 --- a/java/src/main/java/org/rocksdb/WriteBatch.java +++ b/java/src/main/java/org/rocksdb/WriteBatch.java @@ -392,6 +392,9 @@ private static native void iterate(final long handle, final long handlerHandle) * Handler callback for iterating over the contents of a batch. */ public abstract static class Handler extends RocksCallbackObject { + /** + * Constructs a Handler. + */ public Handler() { super(0L); } @@ -401,39 +404,182 @@ protected long initializeNative(final long... nativeParameterHandles) { return createNewHandler0(); } + /** + * Put operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the put operation. + * @param value the value from the put operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void put(final int columnFamilyId, final byte[] key, final byte[] value) throws RocksDBException; + + /** + * Put operation callback. + * + * @param key the key from the put operation. + * @param value the value from the put operation. + */ public abstract void put(final byte[] key, final byte[] value); + + /** + * Merge operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the merge operation. + * @param value the value from the merge operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void merge(final int columnFamilyId, final byte[] key, final byte[] value) throws RocksDBException; + + /** + * Merge operation callback. + * + * @param key the key from the merge operation. + * @param value the value from the merge operation. + */ public abstract void merge(final byte[] key, final byte[] value); + + /** + * Delete operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the delete operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void delete(final int columnFamilyId, final byte[] key) throws RocksDBException; + + /** + * Delete operation callback. + * + * @param key the key from the delete operation. + */ public abstract void delete(final byte[] key); + + /** + * Single Delete operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the single delete operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void singleDelete(final int columnFamilyId, final byte[] key) throws RocksDBException; + + /** + * Single Delete operation callback. + * + * @param key the key from the single delete operation. + */ public abstract void singleDelete(final byte[] key); + + /** + * Delete Range operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param beginKey the begin key from the delete range operation. + * @param endKey the end key from the delete range operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void deleteRange(final int columnFamilyId, final byte[] beginKey, final byte[] endKey) throws RocksDBException; + + /** + * Delete Range operation callback. + * + * @param beginKey the begin key from the delete range operation. + * @param endKey the end key from the delete range operation. + */ public abstract void deleteRange(final byte[] beginKey, final byte[] endKey); + + /** + * Log Data operation callback. + * + * @param blob the blob from the log data operation. + */ public abstract void logData(final byte[] blob); + + /** + * Put Blob Index operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the put blob index operation. + * @param value the value from the put blob index operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void putBlobIndex(final int columnFamilyId, final byte[] key, final byte[] value) throws RocksDBException; + + /** + * Mark Begin Prepare operation callback. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markBeginPrepare() throws RocksDBException; + + /** + * Mark End Prepare operation callback. + * + * @param xid the transaction id. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markEndPrepare(final byte[] xid) throws RocksDBException; + + /** + * Mark Noop operation callback. + * + * @param emptyBatch true if the batch was empty, false otherwise. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markNoop(final boolean emptyBatch) throws RocksDBException; + + /** + * Mark Rollback operation callback. + * + * @param xid the transaction id. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markRollback(final byte[] xid) throws RocksDBException; + + /** + * Mark Commit operation callback. + * + * @param xid the transaction id. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markCommit(final byte[] xid) throws RocksDBException; + + /** + * Mark Commit With Timestamp operation callback. + * + * @param xid the transaction id. + * @param ts the timestamp. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markCommitWithTimestamp(final byte[] xid, final byte[] ts) throws RocksDBException; /** - * shouldContinue is called by the underlying iterator + * Called by the underlying iterator * {@link WriteBatch#iterate(Handler)}. If it returns false, * iteration is halted. Otherwise, it continues * iterating. The default implementation always @@ -457,6 +603,13 @@ public static class SavePoint { private long count; private long contentFlags; + /** + * Constructs a SavePoint. + * + * @param size the size + * @param count the count + * @param contentFlags the content flags + */ public SavePoint(final long size, final long count, final long contentFlags) { this.size = size; @@ -464,6 +617,9 @@ public SavePoint(final long size, final long count, this.contentFlags = contentFlags; } + /** + * Clear the save point data. + */ public void clear() { this.size = 0; this.count = 0; @@ -497,6 +653,13 @@ public long getContentFlags() { return contentFlags; } + /** + * Determines if {@link #clear()} was + * called. + * + * @return true if {@link #clear()} was called and the save point remains empty, false + * otherwise. + */ public boolean isCleared() { return (size | count | contentFlags) == 0; } diff --git a/java/src/main/java/org/rocksdb/WriteBufferManager.java b/java/src/main/java/org/rocksdb/WriteBufferManager.java index 495fbdb961b9..5856b498aded 100644 --- a/java/src/main/java/org/rocksdb/WriteBufferManager.java +++ b/java/src/main/java/org/rocksdb/WriteBufferManager.java @@ -28,10 +28,21 @@ public WriteBufferManager( this.allowStall_ = allowStall; } - public WriteBufferManager(final long bufferSizeBytes, final Cache cache){ + /** + * Construct a new instance of WriteBufferManager. + * + * @param bufferSizeBytes the buffer size in bytes. + * @param cache the cache to use. + */ + public WriteBufferManager(final long bufferSizeBytes, final Cache cache) { this(bufferSizeBytes, cache, false); } + /** + * Determine if the Write Buffer Manager is allowed to stall. + * + * @return true if it is allowed to stall, false otherwise. + */ public boolean allowStall() { return allowStall_; } diff --git a/java/src/main/java/org/rocksdb/WriteStallCondition.java b/java/src/main/java/org/rocksdb/WriteStallCondition.java index 98d9e2ce4adf..2c011bc2e390 100644 --- a/java/src/main/java/org/rocksdb/WriteStallCondition.java +++ b/java/src/main/java/org/rocksdb/WriteStallCondition.java @@ -5,9 +5,23 @@ package org.rocksdb; +/** + * Conditions that caused Write Stalls. + */ public enum WriteStallCondition { + /** + * Delayed. + */ DELAYED((byte) 0x0), + + /** + * Stopped. + */ STOPPED((byte) 0x1), + + /** + * Normal. + */ NORMAL((byte) 0x2); private final byte value; diff --git a/java/src/main/java/org/rocksdb/WriteStallInfo.java b/java/src/main/java/org/rocksdb/WriteStallInfo.java index 1cade0acb8ed..9cccc0a95971 100644 --- a/java/src/main/java/org/rocksdb/WriteStallInfo.java +++ b/java/src/main/java/org/rocksdb/WriteStallInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information on a Write Stall. + */ public class WriteStallInfo { private final String columnFamilyName; private final WriteStallCondition currentCondition; diff --git a/java/src/main/java/org/rocksdb/util/BufferUtil.java b/java/src/main/java/org/rocksdb/util/BufferUtil.java index 54be3e6937d6..dccf71ce2361 100644 --- a/java/src/main/java/org/rocksdb/util/BufferUtil.java +++ b/java/src/main/java/org/rocksdb/util/BufferUtil.java @@ -6,7 +6,19 @@ package org.rocksdb.util; +/** + * Utility functions for working with buffers. + */ public class BufferUtil { + /** + * Check the bounds for an operation on a buffer. + * + * @param offset the offset + * @param len the length + * @param size the size + * + * @throws IndexOutOfBoundsException if the values are out of bounds + */ public static void CheckBounds(final int offset, final int len, final int size) { if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { throw new IndexOutOfBoundsException( diff --git a/java/src/main/java/org/rocksdb/util/ByteUtil.java b/java/src/main/java/org/rocksdb/util/ByteUtil.java index 5d64d5dcf29a..b5139b86da32 100644 --- a/java/src/main/java/org/rocksdb/util/ByteUtil.java +++ b/java/src/main/java/org/rocksdb/util/ByteUtil.java @@ -10,6 +10,9 @@ import static java.nio.charset.StandardCharsets.UTF_8; +/** + * Simple utility functions for working with bytes. + */ public class ByteUtil { /** @@ -29,8 +32,9 @@ public static byte[] bytes(final String str) { * lexically less than {@code y}, or a value greater than zero if {@code x} * is lexically greater than {@code y}. Note that lexical order is determined * as if comparing unsigned char arrays. - * - * Similar to memcmp.c. + *

+ * Similar to memcmp.c. * * @param x the first value to compare with * @param y the second value to compare against diff --git a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java index 202241d3bad9..acb76c255b9e 100644 --- a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java @@ -22,7 +22,11 @@ * {@link org.rocksdb.BuiltinComparator#BYTEWISE_COMPARATOR} */ public final class BytewiseComparator extends AbstractComparator { - + /** + * Constructs a new BytewiseComparator. + * + * @param copt the configuration options for the comparator. + */ public BytewiseComparator(final ComparatorOptions copt) { super(copt); } @@ -46,7 +50,7 @@ static int _compare(final ByteBuffer a, final ByteBuffer b) { if (a.remaining() < b.remaining()) { r = -1; } else if (a.remaining() > b.remaining()) { - r = +1; + r = 1; } } return r; diff --git a/java/src/main/java/org/rocksdb/util/Environment.java b/java/src/main/java/org/rocksdb/util/Environment.java index 78b73dc5d432..f1bbcfe927e0 100644 --- a/java/src/main/java/org/rocksdb/util/Environment.java +++ b/java/src/main/java/org/rocksdb/util/Environment.java @@ -5,6 +5,9 @@ import java.io.IOException; import java.util.Locale; +/** + * Provides information about the environment in which RocksJava is executing. + */ public class Environment { @SuppressWarnings("FieldMayBeFinal") private static String OS = System.getProperty("os.name").toLowerCase(Locale.getDefault()); @@ -24,38 +27,83 @@ public class Environment { */ private static Boolean MUSL_LIBC = null; + /** + * Returns true if the CPU architecture is aarch64. + * + * @return true if the CPU architecture is aarch64, false otherwise. + */ public static boolean isAarch64() { return ARCH.contains("aarch64"); } + /** + * Returns true if the CPU architecture is ppc. + * + * @return true if the CPU architecture is ppc, false otherwise. + */ public static boolean isPowerPC() { return ARCH.contains("ppc"); } + /** + * Returns true if the CPU architecture is s390x. + * + * @return true if the CPU architecture is s390x, false otherwise. + */ public static boolean isS390x() { return ARCH.contains("s390x"); } + /** + * Returns true if the CPU architecture is riscv64. + * + * @return true if the CPU architecture is riscv64, false otherwise. + */ public static boolean isRiscv64() { return ARCH.contains("riscv64"); } + /** + * Returns true if the OS is Windows. + * + * @return true if the OS is Windows, false otherwise. + */ public static boolean isWindows() { return (OS.contains("win")); } + /** + * Returns true if the OS is FreeBSD. + * + * @return true if the OS is FreeBSD, false otherwise. + */ public static boolean isFreeBSD() { return (OS.contains("freebsd")); } + /** + * Returns true if the OS is Mac. + * + * @return true if the OS is Mac, false otherwise. + */ public static boolean isMac() { return (OS.contains("mac")); } + /** + * Returns true if the OS is AIX. + * + * @return true if the OS is AIX, false otherwise. + */ public static boolean isAix() { return OS.contains("aix"); } - + + /** + * Returns true if the OS is Unix. + * + * @return true if the OS is Unix, false otherwise. + */ public static boolean isUnix() { return OS.contains("nix") || OS.contains("nux"); @@ -75,9 +123,9 @@ public static boolean isMuslLibc() { /** * Determine if the environment has a musl libc. - * + *

* The initialisation counterpart of {@link #isMuslLibc()}. - * + *

* Intentionally package-private for testing. * * @return true if the environment has a musl libc, false otherwise. @@ -136,14 +184,29 @@ static boolean initIsMuslLibc() { return false; } + /** + * Returns true if the OS is Solaris. + * + * @return true if the OS is Solaris, false otherwise. + */ public static boolean isSolaris() { return OS.contains("sunos"); } + /** + * Returns true if the OS is OpenBSD. + * + * @return true if the OS is OpenBSD, false otherwise. + */ public static boolean isOpenBSD() { return (OS.contains("openbsd")); } + /** + * Returns true if the system architecture is 64 bit. + * + * @return true if the system architecture is 64 bit, false otherwise. + */ public static boolean is64Bit() { if (ARCH.contains(SPARCV9)) { return true; @@ -151,10 +214,24 @@ public static boolean is64Bit() { return (ARCH.indexOf("64") > 0); } + /** + * Get the name as that of a shared JNI library. + * + * @param name the name. + * + * @return the name of the shared JNI library. + */ public static String getSharedLibraryName(final String name) { return name + "jni"; } + /** + * Get the filename as that of a shared JNI library. + * + * @param name the name. + * + * @return the filename of the shared JNI library. + */ public static String getSharedLibraryFileName(final String name) { return appendLibOsSuffix("lib" + getSharedLibraryName(name), true); } @@ -181,6 +258,15 @@ private static String getLibcPostfix() { return "-" + libcName; } + /** + * Get the name as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the name of the JNI library. + */ public static String getJniLibraryName(final String name) { if (isUnix()) { final String arch = is64Bit() ? "64" : "32"; @@ -219,6 +305,15 @@ public static String getJniLibraryName(final String name) { throw new UnsupportedOperationException(String.format("Cannot determine JNI library name for ARCH='%s' OS='%s' name='%s'", ARCH, OS, name)); } + /** + * Get a fallback name as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the fallback name of the JNI library. + */ public static /*@Nullable*/ String getFallbackJniLibraryName(final String name) { if (isMac() && is64Bit()) { return String.format("%sjni-osx", name); @@ -226,10 +321,28 @@ public static String getJniLibraryName(final String name) { return null; } + /** + * Get the filename as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the filename of the JNI library. + */ public static String getJniLibraryFileName(final String name) { return appendLibOsSuffix("lib" + getJniLibraryName(name), false); } + /** + * Get the fallback filename as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the fallback filename of the JNI library. + */ public static /*@Nullable*/ String getFallbackJniLibraryFileName(final String name) { final String fallbackJniLibraryName = getFallbackJniLibraryName(name); if (fallbackJniLibraryName == null) { @@ -249,6 +362,13 @@ private static String appendLibOsSuffix(final String libraryFileName, final bool throw new UnsupportedOperationException(); } + /** + * Get the filename extension used for a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @return the filename extension. + */ public static String getJniLibraryExtension() { if (isWindows()) { return ".dll"; diff --git a/java/src/main/java/org/rocksdb/util/IntComparator.java b/java/src/main/java/org/rocksdb/util/IntComparator.java index 2caf0c601572..142e81c3a21f 100644 --- a/java/src/main/java/org/rocksdb/util/IntComparator.java +++ b/java/src/main/java/org/rocksdb/util/IntComparator.java @@ -13,20 +13,24 @@ /** * This is a Java implementation of a Comparator for Java int * keys. - * + *

* This comparator assumes keys are (at least) four bytes, so * the caller must guarantee that in accessing other APIs in * combination with this comparator. - * + *

* The performance of Comparators implemented in Java is always * less than their C++ counterparts due to the bridging overhead, * as such you likely don't want to use this apart from benchmarking * or testing. */ public final class IntComparator extends AbstractComparator { - - public IntComparator(final ComparatorOptions copt) { - super(copt); + /** + * Constructs an IntComparator. + * + * @param comparatorOptions the options for the comparator. + */ + public IntComparator(final ComparatorOptions comparatorOptions) { + super(comparatorOptions); } @Override diff --git a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java index 3d3c429416b0..4d0708ca077a 100644 --- a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java @@ -23,7 +23,11 @@ * {@link BuiltinComparator#REVERSE_BYTEWISE_COMPARATOR} */ public final class ReverseBytewiseComparator extends AbstractComparator { - + /** + * Constructs a ReverseBytewiseComparator. + * + * @param copt the comparator options. + */ public ReverseBytewiseComparator(final ComparatorOptions copt) { super(copt); } diff --git a/java/src/main/java/org/rocksdb/util/SizeUnit.java b/java/src/main/java/org/rocksdb/util/SizeUnit.java index 0f717e8d4540..a6205eba266c 100644 --- a/java/src/main/java/org/rocksdb/util/SizeUnit.java +++ b/java/src/main/java/org/rocksdb/util/SizeUnit.java @@ -5,12 +5,32 @@ package org.rocksdb.util; +/** + * Simple factors of byte sizes. + */ public class SizeUnit { + /** + * 1 Kilobyte. + */ public static final long KB = 1024L; + + /** + * 1 Megabyte. + */ public static final long MB = KB * KB; + + /** + * 1 Gigabyte. + */ public static final long GB = KB * MB; + + /** + * 1 Terabyte. + */ public static final long TB = KB * GB; - public static final long PB = KB * TB; - private SizeUnit() {} + /** + * 1 Petabyte. + */ + public static final long PB = KB * TB; } diff --git a/java/src/main/java/org/rocksdb/util/StdErrLogger.java b/java/src/main/java/org/rocksdb/util/StdErrLogger.java index 00b08d384522..656535c3ca83 100644 --- a/java/src/main/java/org/rocksdb/util/StdErrLogger.java +++ b/java/src/main/java/org/rocksdb/util/StdErrLogger.java @@ -47,6 +47,11 @@ public LoggerType getLoggerType() { return LoggerType.STDERR_IMPLEMENTATION; } + @Override + public long getNativeHandle() { + return nativeHandle_; + } + private static native long newStdErrLogger( final byte logLevel, /* @Nullable */ final String logPrefix); private static native void setInfoLogLevel(final long handle, final byte logLevel); diff --git a/java/src/test/java/org/rocksdb/OptionsTest.java b/java/src/test/java/org/rocksdb/OptionsTest.java index c78d0f76b3a4..d8e17e14ad56 100644 --- a/java/src/test/java/org/rocksdb/OptionsTest.java +++ b/java/src/test/java/org/rocksdb/OptionsTest.java @@ -1469,7 +1469,8 @@ public void onMemTableSealed(final MemTableInfo memTableInfo) { public void tablePropertiesCollectorFactory() { try (final Options options = new Options()) { try (TablePropertiesCollectorFactory collectorFactory = - TablePropertiesCollectorFactory.NewCompactOnDeletionCollectorFactory(10, 10, 1.0)) { + TablePropertiesCollectorFactory.createNewCompactOnDeletionCollectorFactory( + 10, 10, 1.0)) { List factories = Arrays.asList(collectorFactory); options.setTablePropertiesCollectorFactory(factories); } diff --git a/java/src/test/java/org/rocksdb/SstFileManagerTest.java b/java/src/test/java/org/rocksdb/SstFileManagerTest.java index 2e136e820035..1984b8ed35fb 100644 --- a/java/src/test/java/org/rocksdb/SstFileManagerTest.java +++ b/java/src/test/java/org/rocksdb/SstFileManagerTest.java @@ -47,7 +47,8 @@ public void trackedFiles() throws RocksDBException { @Test public void deleteRateBytesPerSecond() throws RocksDBException { try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { - assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(SstFileManager.RATE_BYTES_PER_SEC_DEFAULT); + assertThat(sstFileManager.getDeleteRateBytesPerSecond()) + .isEqualTo(SstFileManager.DEFAULT_RATE_BYTES_PER_SEC); final long ratePerSecond = 1024 * 1024 * 52; sstFileManager.setDeleteRateBytesPerSecond(ratePerSecond); assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(ratePerSecond); @@ -57,7 +58,8 @@ public void deleteRateBytesPerSecond() throws RocksDBException { @Test public void maxTrashDBRatio() throws RocksDBException { try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { - assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(SstFileManager.MAX_TRASH_DB_RATION_DEFAULT); + assertThat(sstFileManager.getMaxTrashDBRatio()) + .isEqualTo(SstFileManager.DEFAULT_MAX_TRASH_DB_RATIO); final double trashRatio = 0.2; sstFileManager.setMaxTrashDBRatio(trashRatio); assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(trashRatio); diff --git a/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java index 8d7956cf27f6..90f301c39fc7 100644 --- a/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java +++ b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java @@ -8,7 +8,17 @@ import java.nio.ByteBuffer; +/** + * Allocates for creating new buffers. + */ public interface ByteBufferAllocator { + /** + * Allocate a new ByteBuffer. + * + * @param capacity the capacity of the buffer. + * + * @return the new ByteBuffer. + */ ByteBuffer allocate(int capacity); ByteBufferAllocator DIRECT = new DirectByteBufferAllocator(); diff --git a/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java b/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java index 8ea104332cb1..8b06fbfabca0 100644 --- a/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java +++ b/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java @@ -124,16 +124,34 @@ public void markCommitWithTimestamp(final byte[] xid, final byte[] ts) throws Ro events.add(new Event(Action.MARK_COMMIT_WITH_TIMESTAMP, (byte[]) null, (byte[]) null)); } + /** + * Event received by the handler. + */ public static class Event { public final Action action; public final int columnFamilyId; public final byte[] key; public final byte[] value; + /** + * Construct an event. + * + * @param action the action of the event + * @param key the key of the event + * @param value the value of the event + */ public Event(final Action action, final byte[] key, final byte[] value) { this(action, 0, key, value); } + /** + * Construct an event. + * + * @param action the action of the event + * @param columnFamilyId the id of the column family of the event + * @param key the key of the event + * @param value the value of the event + */ public Event(final Action action, final int columnFamilyId, final byte[] key, final byte[] value) { this.action = action;