diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 28e6aafff38c..17d027d33f68 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,7 +27,7 @@ on: default: '' required: false env: - BUILD_ARGS: "-Pdist -Psrc -Dmaven.javadoc.skip=true -Drocks_tools_native" + BUILD_ARGS: "-Pdist -Psrc -Dmaven.javadoc.skip=true" # Minimum required Java version for running Ozone is defined in pom.xml (javac.version). TEST_JAVA_VERSION: 21 # JDK version used by CI build and tests; should match the JDK version in apache/ozone-runner image # MAVEN_ARGS and MAVEN_OPTS are duplicated in check.yml, please keep in sync @@ -290,7 +290,7 @@ jobs: pre-script: sudo hostname localhost ratis-args: ${{ inputs.ratis_args }} script: integration - script-args: -Ptest-${{ matrix.profile }} -Drocks_tools_native + script-args: -Ptest-${{ matrix.profile }} sha: ${{ needs.build-info.outputs.sha }} split: ${{ matrix.profile }} timeout-minutes: 90 diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml index bf03d29d57f7..61430bea2787 100644 --- a/.github/workflows/intermittent-test-check.yml +++ b/.github/workflows/intermittent-test-check.yml @@ -131,7 +131,7 @@ jobs: java-version: ${{ github.event.inputs.java-version }} - name: Build (most) of Ozone run: | - args="-DskipRecon -DskipShade -Dmaven.javadoc.skip=true -Drocks_tools_native" + args="-DskipRecon -DskipShade -Dmaven.javadoc.skip=true" if [[ "$RATIS_VERSION" != "" ]]; then args="$args -Dratis.version=${{ needs.ratis.outputs.ratis-version }}" args="$args -Dratis.thirdparty.version=${{ needs.ratis.outputs.thirdparty-version }}" @@ -201,7 +201,7 @@ jobs: export OZONE_REPO_CACHED=true fi - args="-DexcludedGroups=slow|unhealthy -DskipShade -Drocks_tools_native" + args="-DexcludedGroups=slow|unhealthy -DskipShade" if [[ "$RATIS_VERSION" != "" ]]; then args="$args -Dratis.version=${{ needs.ratis.outputs.ratis-version }}" args="$args -Dratis.thirdparty.version=${{ needs.ratis.outputs.thirdparty-version }}" diff --git a/.github/workflows/populate-cache.yml b/.github/workflows/populate-cache.yml index f2a6843f6912..9cf4b514e606 100644 --- a/.github/workflows/populate-cache.yml +++ b/.github/workflows/populate-cache.yml @@ -73,7 +73,7 @@ jobs: - name: Fetch dependencies if: steps.restore-cache.outputs.cache-hit != 'true' - run: mvn --batch-mode --no-transfer-progress --show-version -Pgo-offline -Pdist -Drocks_tools_native clean verify + run: mvn --batch-mode --no-transfer-progress --show-version -Pgo-offline -Pdist clean verify - name: Delete Ozone jars from repo if: steps.restore-cache.outputs.cache-hit != 'true' diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index ceca7d0c8824..90646078d672 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -655,8 +655,10 @@ public final class OzoneConfigKeys { public static final int OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT = 2000; + @Deprecated public static final String OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB = "ozone.om.snapshot.load.native.lib"; + @Deprecated public static final boolean OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT = true; public static final String OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT = diff --git a/hadoop-hdds/docs/content/start/FromSource.md b/hadoop-hdds/docs/content/start/FromSource.md index 758ac39e80f0..4c69bbb51851 100644 --- a/hadoop-hdds/docs/content/start/FromSource.md +++ b/hadoop-hdds/docs/content/start/FromSource.md @@ -127,8 +127,6 @@ mvn clean package -DskipTests=true -Pdist * Use `-DskipShade` to skip shaded Ozone FS jar file creation. Saves time, but you can't test integration with other software that uses Ozone as a Hadoop-compatible file system. * Use `-DskipRecon` to skip building Recon Web UI. It saves about 2 minutes. * Use `-Dmaven.javadoc.skip=true` to skip building javadocs. -* Use `-Drocks_tools_native` to build the RocksDB native code for the Ozone Snapshot feature. This is optional and not required for building Ozone. It is only needed if you want to build the RocksDB native code for Ozone. - ## How to run Ozone from build diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index bdc5124ac3b4..468900f57ef2 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -23,6 +23,7 @@ import static org.rocksdb.RocksDB.listColumnFamilies; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; import java.io.Closeable; import java.io.File; import java.nio.ByteBuffer; @@ -56,7 +57,6 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedTransactionLogIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; -import org.apache.ozone.rocksdiff.RocksDiffUtils; import org.apache.ratis.util.MemoizedSupplier; import org.apache.ratis.util.UncheckedAutoCloseable; import org.rocksdb.ColumnFamilyDescriptor; @@ -883,36 +883,36 @@ private int getLastLevel() throws RocksDatabaseException { */ public void deleteFilesNotMatchingPrefix(TablePrefixInfo prefixInfo) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { + Map sstFileRangeMap = new HashMap<>(); for (LiveFileMetaData liveFileMetaData : getSstFileList()) { String sstFileColumnFamily = StringUtils.bytes2String(liveFileMetaData.columnFamilyName()); - int lastLevel = getLastLevel(); - - // RocksDB #deleteFile API allows only to delete the last level of - // SST Files. Any level < last level won't get deleted and - // only last file of level 0 can be deleted - // and will throw warning in the rocksdb manifest. - // Instead, perform the level check here - // itself to avoid failed delete attempts for lower level files. - if (liveFileMetaData.level() != lastLevel || lastLevel == 0) { - continue; - } - - String prefixForColumnFamily = prefixInfo.getTablePrefix(sstFileColumnFamily); String firstDbKey = StringUtils.bytes2String(liveFileMetaData.smallestKey()); String lastDbKey = StringUtils.bytes2String(liveFileMetaData.largestKey()); - boolean isKeyWithPrefixPresent = RocksDiffUtils.isKeyWithPrefixPresent( - prefixForColumnFamily, firstDbKey, lastDbKey); - if (!isKeyWithPrefixPresent) { - LOG.info("Deleting sst file: {} with start key: {} and end key: {} " - + "corresponding to column family {} from db: {}. " - + "Prefix for the column family: {}.", - liveFileMetaData.fileName(), - firstDbKey, lastDbKey, - StringUtils.bytes2String(liveFileMetaData.columnFamilyName()), - db.get().getName(), - prefixForColumnFamily); - db.deleteFile(liveFileMetaData); + sstFileRangeMap.compute(sstFileColumnFamily, (key, value) -> { + if (value == null) { + return new String[]{firstDbKey, lastDbKey}; + } + value[0] = firstDbKey.compareTo(value[0]) < 0 ? firstDbKey : value[0]; + value[1] = lastDbKey.compareTo(value[1]) > 0 ? lastDbKey : value[1]; + return value; + }); + } + for (String tableName : prefixInfo.getTableNames()) { + String prefixForColumnFamily = prefixInfo.getTablePrefix(tableName); + ColumnFamilyHandle ch = getColumnFamilyHandle(tableName); + if (ch == null || prefixForColumnFamily == null || prefixForColumnFamily.isEmpty()) { + continue; } + String smallestDBKey = sstFileRangeMap.get(tableName)[0]; + String largestDBKey = sstFileRangeMap.get(tableName)[1]; + String nextLargestDBKey = StringUtils.getLexicographicallyHigherString(prefixForColumnFamily); + LOG.info("Deleting sst files in range [{}, {}) and [{}, {}) corresponding to column family {} from db: {}. " + + "Prefix for the column family: {}.", + smallestDBKey, prefixForColumnFamily, nextLargestDBKey, largestDBKey, + tableName, db.get().getName(), prefixForColumnFamily); + db.deleteFile(ch, ImmutableList.of(StringUtils.string2Bytes(smallestDBKey), + StringUtils.string2Bytes(prefixForColumnFamily), StringUtils.string2Bytes(nextLargestDBKey), + StringUtils.string2Bytes(largestDBKey))); } } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java index 919b3b6cdad2..cddb11e95285 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java @@ -100,13 +100,13 @@ Answer newAnswer(String name, byte... b) { public void testForEachRemaining() throws Exception { when(rocksIteratorMock.isValid()) .thenReturn(true, true, true, true, true, true, true, false); - when(rocksIteratorMock.key(any())) + when(rocksIteratorMock.key(any(ByteBuffer.class))) .then(newAnswerInt("key1", 0x00)) .then(newAnswerInt("key2", 0x00)) .then(newAnswerInt("key3", 0x01)) .then(newAnswerInt("key4", 0x02)) .thenThrow(new NoSuchElementException()); - when(rocksIteratorMock.value(any())) + when(rocksIteratorMock.value(any(ByteBuffer.class))) .then(newAnswerInt("val1", 0x7f)) .then(newAnswerInt("val2", 0x7f)) .then(newAnswerInt("val3", 0x7e)) @@ -152,8 +152,8 @@ public void testNextCallsIsValidThenGetsTheValueAndStepsToNext() } verifier.verify(rocksIteratorMock).isValid(); - verifier.verify(rocksIteratorMock).key(any()); - verifier.verify(rocksIteratorMock).value(any()); + verifier.verify(rocksIteratorMock).key(any(ByteBuffer.class)); + verifier.verify(rocksIteratorMock).value(any(ByteBuffer.class)); verifier.verify(rocksIteratorMock).next(); CodecTestUtil.gc(); @@ -192,9 +192,9 @@ public void testSeekToLastSeeks() throws Exception { @Test public void testSeekReturnsTheActualKey() throws Exception { when(rocksIteratorMock.isValid()).thenReturn(true); - when(rocksIteratorMock.key(any())) + when(rocksIteratorMock.key(any(ByteBuffer.class))) .then(newAnswerInt("key1", 0x00)); - when(rocksIteratorMock.value(any())) + when(rocksIteratorMock.value(any(ByteBuffer.class))) .then(newAnswerInt("val1", 0x7f)); try (RDBStoreCodecBufferIterator i = newIterator(); @@ -208,8 +208,8 @@ public void testSeekReturnsTheActualKey() throws Exception { verifier.verify(rocksIteratorMock, times(1)) .seek(any(ByteBuffer.class)); verifier.verify(rocksIteratorMock, times(1)).isValid(); - verifier.verify(rocksIteratorMock, times(1)).key(any()); - verifier.verify(rocksIteratorMock, times(1)).value(any()); + verifier.verify(rocksIteratorMock, times(1)).key(any(ByteBuffer.class)); + verifier.verify(rocksIteratorMock, times(1)).value(any(ByteBuffer.class)); assertArrayEquals(new byte[]{0x00}, val.getKey().getArray()); assertArrayEquals(new byte[]{0x7f}, val.getValue().getArray()); } @@ -220,7 +220,7 @@ public void testSeekReturnsTheActualKey() throws Exception { @Test public void testGettingTheKeyIfIteratorIsValid() throws Exception { when(rocksIteratorMock.isValid()).thenReturn(true); - when(rocksIteratorMock.key(any())) + when(rocksIteratorMock.key(any(ByteBuffer.class))) .then(newAnswerInt("key1", 0x00)); byte[] key = null; @@ -233,7 +233,7 @@ public void testGettingTheKeyIfIteratorIsValid() throws Exception { InOrder verifier = inOrder(rocksIteratorMock); verifier.verify(rocksIteratorMock, times(1)).isValid(); - verifier.verify(rocksIteratorMock, times(1)).key(any()); + verifier.verify(rocksIteratorMock, times(1)).key(any(ByteBuffer.class)); assertArrayEquals(new byte[]{0x00}, key); CodecTestUtil.gc(); @@ -242,9 +242,9 @@ public void testGettingTheKeyIfIteratorIsValid() throws Exception { @Test public void testGettingTheValueIfIteratorIsValid() throws Exception { when(rocksIteratorMock.isValid()).thenReturn(true); - when(rocksIteratorMock.key(any())) + when(rocksIteratorMock.key(any(ByteBuffer.class))) .then(newAnswerInt("key1", 0x00)); - when(rocksIteratorMock.value(any())) + when(rocksIteratorMock.value(any(ByteBuffer.class))) .then(newAnswerInt("val1", 0x7f)); byte[] key = null; @@ -260,7 +260,7 @@ public void testGettingTheValueIfIteratorIsValid() throws Exception { InOrder verifier = inOrder(rocksIteratorMock); verifier.verify(rocksIteratorMock, times(1)).isValid(); - verifier.verify(rocksIteratorMock, times(1)).key(any()); + verifier.verify(rocksIteratorMock, times(1)).key(any(ByteBuffer.class)); assertArrayEquals(new byte[]{0x00}, key); assertArrayEquals(new byte[]{0x7f}, value); @@ -272,7 +272,7 @@ public void testRemovingFromDBActuallyDeletesFromTable() throws Exception { final byte[] testKey = new byte[10]; ThreadLocalRandom.current().nextBytes(testKey); when(rocksIteratorMock.isValid()).thenReturn(true); - when(rocksIteratorMock.key(any())) + when(rocksIteratorMock.key(any(ByteBuffer.class))) .then(newAnswer("key1", testKey)); try (RDBStoreCodecBufferIterator i = newIterator(null)) { @@ -320,7 +320,7 @@ public void testNullPrefixedIterator() throws Exception { when(rocksIteratorMock.isValid()).thenReturn(true); assertTrue(i.hasNext()); verify(rocksIteratorMock, times(1)).isValid(); - verify(rocksIteratorMock, times(0)).key(any()); + verify(rocksIteratorMock, times(0)).key(any(ByteBuffer.class)); i.seekToLast(); verify(rocksIteratorMock, times(1)).seekToLast(); @@ -343,11 +343,11 @@ public void testNormalPrefixedIterator() throws Exception { clearInvocations(rocksIteratorMock); when(rocksIteratorMock.isValid()).thenReturn(true); - when(rocksIteratorMock.key(any())) + when(rocksIteratorMock.key(any(ByteBuffer.class))) .then(newAnswer("key1", prefixBytes)); assertTrue(i.hasNext()); verify(rocksIteratorMock, times(1)).isValid(); - verify(rocksIteratorMock, times(1)).key(any()); + verify(rocksIteratorMock, times(1)).key(any(ByteBuffer.class)); Exception e = assertThrows(Exception.class, () -> i.seekToLast(), "Prefixed iterator does not support seekToLast"); diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java index 1809b0885600..082c486f8f29 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.ratis.util.UncheckedAutoCloseable; import org.rocksdb.DBOptions; -import org.rocksdb.Logger; +import org.rocksdb.LoggerInterface; /** * Managed DBOptions. @@ -32,18 +32,24 @@ public class ManagedDBOptions extends DBOptions { private final UncheckedAutoCloseable leakTracker = track(this); - private final AtomicReference loggerRef = new AtomicReference<>(); + private final AtomicReference loggerRef = new AtomicReference<>(); @Override - public DBOptions setLogger(Logger logger) { - IOUtils.close(LOG, loggerRef.getAndSet(logger)); + public DBOptions setLogger(LoggerInterface logger) { + LoggerInterface oldLogger = loggerRef.getAndSet(logger); + if (oldLogger instanceof AutoCloseable) { + IOUtils.close(LOG, ((AutoCloseable)oldLogger)); + } return super.setLogger(logger); } @Override public void close() { try { - IOUtils.close(LOG, loggerRef.getAndSet(null)); + LoggerInterface oldLogger = loggerRef.getAndSet(null); + if (oldLogger instanceof AutoCloseable) { + IOUtils.close(LOG, ((AutoCloseable)oldLogger)); + } super.close(); } finally { leakTracker.close(); diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedParsedEntry.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedParsedEntry.java new file mode 100644 index 000000000000..bda9ed81ee19 --- /dev/null +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedParsedEntry.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db.managed; + +import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track; + +import org.apache.ratis.util.UncheckedAutoCloseable; +import org.rocksdb.ParsedEntryInfo; + +/** + * ManagedParsedEntry is a subclass of ParsedEntryInfo that ensures proper + * tracking and closure of resources to prevent resource leakage. This class + * leverages an internal leak tracker to monitor lifecycle events and ensures + * that native resources are released correctly when the object is closed. + * + * It overrides the {@code close} method to integrate the cleanup logic for + * resource tracking, delegating the resource closure to its parent class, and + * subsequently ensuring the associated leak tracker is closed as well. + */ +public class ManagedParsedEntry extends ParsedEntryInfo { + private final UncheckedAutoCloseable leakTracker = track(this); + + @Override + public void close() { + try { + super.close(); + } finally { + leakTracker.close(); + } + } +} diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java index 3401469f6824..8a46ebb759a1 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java @@ -17,12 +17,11 @@ package org.apache.hadoop.hdds.utils.db.managed; -import java.io.File; -import java.time.Duration; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import org.apache.commons.io.FilenameUtils; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; @@ -115,19 +114,20 @@ public static ManagedRocksDB openWithLatestOptions( * Delete liveMetaDataFile from rocks db using RocksDB#deleteFile Api. * This function makes the RocksDB#deleteFile Api synchronized by waiting * for the deletes to happen. - * @param fileToBeDeleted File to be deleted. + * @param columnFamilyHandle column family handle. + * @param ranges list of ranges to be deleted. * @throws RocksDatabaseException if the underlying db throws an exception * or the file is not deleted within a time limit. */ - public void deleteFile(LiveFileMetaData fileToBeDeleted) throws RocksDatabaseException { - String sstFileName = fileToBeDeleted.fileName(); - File file = new File(fileToBeDeleted.path(), fileToBeDeleted.fileName()); + public void deleteFile(ColumnFamilyHandle columnFamilyHandle, List ranges) throws RocksDatabaseException { + String columnFamilyName = null; try { - get().deleteFile(sstFileName); + columnFamilyName = StringUtils.bytes2String(columnFamilyHandle.getName()); + get().deleteFilesInRanges(columnFamilyHandle, ranges, false); } catch (RocksDBException e) { - throw new RocksDatabaseException("Failed to delete " + file, e); + throw new RocksDatabaseException("Failed to delete files in ranges corresponding to columnFamily: " + + columnFamilyName, e); } - ManagedRocksObjectUtils.waitForFileDelete(file, Duration.ofSeconds(60)); } public static Map getLiveMetadataForSSTFiles(RocksDB db) { diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java index e60508033e51..da485c9ae9be 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java @@ -18,12 +18,9 @@ package org.apache.hadoop.hdds.utils.db.managed; import jakarta.annotation.Nullable; -import java.io.File; import java.time.Duration; import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.utils.LeakDetector; -import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.ratis.util.UncheckedAutoCloseable; import org.rocksdb.RocksDB; import org.rocksdb.util.Environment; @@ -65,22 +62,6 @@ static String formatStackTrace(@Nullable StackTraceElement[] elements) { return HddsUtils.formatStackTrace(elements, 4); } - /** - * Wait for file to be deleted. - * @param file File to be deleted. - * @param maxDuration poll max duration. - * @throws RocksDatabaseException in case of failure. - */ - public static void waitForFileDelete(File file, Duration maxDuration) - throws RocksDatabaseException { - if (!RatisHelper.attemptUntilTrue(() -> !file.exists(), POLL_INTERVAL_DURATION, maxDuration)) { - String msg = String.format("File: %s didn't get deleted in %s secs.", - file.getAbsolutePath(), maxDuration.getSeconds()); - LOG.warn(msg); - throw new RocksDatabaseException(msg); - } - } - /** * Ensures that the RocksDB native library is loaded. * This method should be called before performing any operations diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryNotLoadedException.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedTypeUtil.java similarity index 56% rename from hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryNotLoadedException.java rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedTypeUtil.java index 96a308a6b2e0..30ffe3d4e39e 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryNotLoadedException.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedTypeUtil.java @@ -15,15 +15,19 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.utils; +package org.apache.hadoop.hdds.utils.db.managed; + +import org.rocksdb.TypeUtil; /** - Exception when native library not loaded. + * Utility class that provides methods to manage and handle specific aspects of + * managed types and interactions with RocksDB objects. It extends TypeUtil, + * inheriting its utilities while offering specialized functionality for + * managed database components. + * + * This class is part of the framework designed to ensure proper handling and + * lifecycle management of RocksDB objects, reducing the risks of native + * resource leaks by tracking and enforcing explicit closure of objects. */ -public class NativeLibraryNotLoadedException extends Exception { - public NativeLibraryNotLoadedException(String libraryName) { - super(String.format("Unable to load library %s from both " + - "java.library.path & resource file %s from jar.", libraryName, - NativeLibraryLoader.getJniLibraryFileName(libraryName))); - } +public class ManagedTypeUtil extends TypeUtil { } diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index ba60486c1463..b5a5cfc8bc75 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -42,7 +42,6 @@ interface-client interface-server managed-rocksdb - rocks-native rocksdb-checkpoint-differ server-scm test-utils diff --git a/hadoop-hdds/rocks-native/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/rocks-native/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index 40d78d0cd6ce..000000000000 --- a/hadoop-hdds/rocks-native/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml deleted file mode 100644 index 74fdb749d252..000000000000 --- a/hadoop-hdds/rocks-native/pom.xml +++ /dev/null @@ -1,347 +0,0 @@ - - - - 4.0.0 - - org.apache.ozone - hdds - 2.2.0-SNAPSHOT - - hdds-rocks-native - Apache Ozone HDDS RocksDB Tools - - - - com.google.guava - guava - - - commons-io - commons-io - - - org.apache.commons - commons-lang3 - - - org.apache.ozone - hdds-common - - - org.apache.ozone - hdds-managed-rocksdb - - - org.rocksdb - rocksdbjni - - - - org.slf4j - slf4j-api - - - - - org.apache.ozone - hdds-test-utils - test-jar - test - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - org.apache.maven.plugins - maven-compiler-plugin - - none - - - - - - - cpu-count - - - !system.numCores - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - get-cpu-count - - cpu-count - - generate-sources - - system.numCores - - - - - - - - - rocks_tools_native - - - rocks_tools_native - - - - 20 - true - - - - - org.codehaus.mojo - exec-maven-plugin - - - set-property - - java - - initialize - - org.apache.hadoop.hdds.utils.db.managed.JniLibNamePropertyWriter - - ${project.build.directory}/propertyFile.txt - - - - - - - org.codehaus.mojo - properties-maven-plugin - - - read-property-from-file - - read-project-properties - - initialize - - - ${project.build.directory}/propertyFile.txt - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - unpack-dependency - - unpack - - initialize - - - - org.rocksdb - rocksdbjni - jar - false - ${project.build.directory}/rocksdbjni - - - - - - - - com.googlecode.maven-download-plugin - download-maven-plugin - - - rocksdb source download - - wget - - generate-sources - - https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz - rocksdb-v${rocksdb.version}.tar.gz - ${project.build.directory}/rocksdb - - - - - - org.apache.maven.plugins - maven-patch-plugin - - ${basedir}/src/main/patches/rocks-native.patch - 1 - ${project.build.directory}/rocksdb/rocksdb-${rocksdb.version} - - - - patch - - apply - - process-sources - - - - - org.apache.maven.plugins - maven-antrun-plugin - - - unzip-artifact - - run - - generate-sources - - - - - - - - build-rocksjava - - run - - generate-resources - - - - - - - - - - - - - - - - - - - build-rocks-tools - - run - - process-classes - - - - - - - - - - - - - - - - - - - - - - - copy-lib-file - - run - - process-classes - - - - - - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - - - default-compile - - compile - - compile - - - -h - ${project.build.directory}/native/javah - - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - **/*.class - **/lib*.dylib - **/lib*.so - **/lib*.jnilib - **/lib*.dll - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${maven-surefire-plugin.argLine} @{argLine} -Djava.library.path=${project.build.directory}/native/rocksdb - - - - - - - diff --git a/hadoop-hdds/rocks-native/src/CMakeLists.txt b/hadoop-hdds/rocks-native/src/CMakeLists.txt deleted file mode 100644 index 0d216f50db52..000000000000 --- a/hadoop-hdds/rocks-native/src/CMakeLists.txt +++ /dev/null @@ -1,62 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# -# CMake configuration. -# - -cmake_minimum_required(VERSION 2.8...3.31) -add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") -project(ozone_native) -set(CMAKE_BUILD_TYPE Release) -find_package(JNI REQUIRED) -include_directories(${JNI_INCLUDE_DIRS}) -set(CMAKE_CXX_STANDARD ${CMAKE_STANDARDS}) - -set(CMAKE_SKIP_BUILD_RPATH FALSE) - -set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) - -set(CMAKE_INSTALL_RPATH "") - -set(CMAKE_INSTALL_RPATH_USE_LINK_PATH FALSE) - -if(NOT GENERATED_JAVAH) - message(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH") -endif() -include_directories(${GENERATED_JAVAH}) -if(${SST_DUMP_INCLUDE}) - include_directories(${ROCKSDB_HEADERS}) - set(SOURCE_FILES ${NATIVE_DIR}/ManagedRawSSTFileReader.cpp ${NATIVE_DIR}/ManagedRawSSTFileIterator.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h) - ADD_LIBRARY(rocks_tools STATIC IMPORTED) - set_target_properties( - rocks_tools - PROPERTIES - IMPORTED_LOCATION ${ROCKSDB_TOOLS_LIB}/librocksdb_tools.a) -endif() - -add_library(ozone_rocksdb_tools SHARED ${SOURCE_FILES}) - - -target_link_libraries(ozone_rocksdb_tools PRIVATE ${ROCKSDB_LIB}) -target_link_libraries(ozone_rocksdb_tools PRIVATE rocks_tools) -set_target_properties(ozone_rocksdb_tools PROPERTIES - BUILD_WITH_INSTALL_RPATH FALSE - LINK_FLAGS "-Wl,-rpath -Wl,'$ORIGIN'") diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java deleted file mode 100644 index 61fbcf03568a..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -/** - * Native Constants. - */ -public final class NativeConstants { - - public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME = "ozone_rocksdb_tools"; - public static final String ROCKS_TOOLS_NATIVE_PROPERTY = "rocks_tools_native"; - - private NativeConstants() { - - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java deleted file mode 100644 index 39bb0b3ca56b..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; - -import com.google.common.annotations.VisibleForTesting; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.ozone.util.ShutdownHookManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class to load Native Libraries. - */ -public class NativeLibraryLoader { - - private static final Logger LOG = - LoggerFactory.getLogger(NativeLibraryLoader.class); - public static final int LIBRARY_SHUTDOWN_HOOK_PRIORITY = 1; - private static final String OS = System.getProperty("os.name").toLowerCase(); - - public static final String NATIVE_LIB_TMP_DIR = "native.lib.tmp.dir"; - private Map librariesLoaded; - private static volatile NativeLibraryLoader instance; - - public NativeLibraryLoader(final Map librariesLoaded) { - this.librariesLoaded = librariesLoaded; - } - - private static synchronized void initNewInstance() { - if (instance == null) { - instance = new NativeLibraryLoader(new ConcurrentHashMap<>()); - } - } - - public static NativeLibraryLoader getInstance() { - if (instance == null) { - initNewInstance(); - } - return instance; - } - - public static String getJniLibraryFileName() { - return appendLibOsSuffix("lib" + ROCKS_TOOLS_NATIVE_LIBRARY_NAME); - } - - public static String getJniLibraryFileName(String libraryName) { - return appendLibOsSuffix("lib" + libraryName); - } - - public static boolean isMac() { - return OS.startsWith("mac"); - } - - public static boolean isWindows() { - return OS.startsWith("win"); - } - - public static boolean isLinux() { - return OS.startsWith("linux"); - } - - @VisibleForTesting - static String getLibOsSuffix() { - if (isMac()) { - return ".dylib"; - } else if (isWindows()) { - return ".dll"; - } else if (isLinux()) { - return ".so"; - } - throw new UnsatisfiedLinkError(String.format("Unsupported OS %s", OS)); - } - - private static String appendLibOsSuffix(String libraryFileName) { - return libraryFileName + getLibOsSuffix(); - } - - public static boolean isLibraryLoaded() { - return isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); - } - - public static boolean isLibraryLoaded(final String libraryName) { - return getInstance().librariesLoaded.getOrDefault(libraryName, false); - } - - public synchronized boolean loadLibrary(final String libraryName, final List dependentFiles) { - if (isLibraryLoaded(libraryName)) { - return true; - } - LOG.info("Loading Library: {}", libraryName); - boolean loaded = false; - try { - loaded = false; - try { - System.loadLibrary(libraryName); - loaded = true; - } catch (Throwable e) { - - } - if (!loaded) { - Pair, List> files = copyResourceFromJarToTemp(libraryName, dependentFiles); - if (files.getKey().isPresent()) { - System.load(files.getKey().get().getAbsolutePath()); - loaded = true; - } - } - } catch (Throwable e) { - LOG.warn("Unable to load library: {}", libraryName, e); - } - this.librariesLoaded.put(libraryName, loaded); - return isLibraryLoaded(libraryName); - } - - // Added function to make this testable. - @VisibleForTesting - static String getSystemProperty(String property) { - return System.getProperty(property); - } - - // Added function to make this testable - @VisibleForTesting - static InputStream getResourceStream(String libraryFileName) throws IOException { - return NativeLibraryLoader.class.getClassLoader() - .getResourceAsStream(libraryFileName); - } - - private Pair, List> copyResourceFromJarToTemp(final String libraryName, - final List dependentFileNames) - throws IOException { - final String libraryFileName = getJniLibraryFileName(libraryName); - InputStream is = null; - try { - is = getResourceStream(libraryFileName); - if (is == null) { - return Pair.of(Optional.empty(), null); - } - - final String nativeLibDir = - Objects.nonNull(getSystemProperty(NATIVE_LIB_TMP_DIR)) ? - getSystemProperty(NATIVE_LIB_TMP_DIR) : ""; - final File dir = new File(nativeLibDir).getAbsoluteFile(); - - // create a temporary dir to copy the library to - final Path tempPath = Files.createTempDirectory(dir.toPath(), libraryName); - final File tempDir = tempPath.toFile(); - if (!tempDir.exists()) { - return Pair.of(Optional.empty(), null); - } - - Path libPath = tempPath.resolve(libraryFileName); - Files.copy(is, libPath, StandardCopyOption.REPLACE_EXISTING); - File libFile = libPath.toFile(); - if (libFile.exists()) { - libFile.deleteOnExit(); - } - - List dependentFiles = new ArrayList<>(); - for (String fileName : dependentFileNames) { - if (is != null) { - is.close(); - } - is = getResourceStream(fileName); - Path path = tempPath.resolve(fileName); - Files.copy(is, path, StandardCopyOption.REPLACE_EXISTING); - File file = path.toFile(); - if (file.exists()) { - file.deleteOnExit(); - } - dependentFiles.add(file); - } - ShutdownHookManager.get().addShutdownHook( - () -> FileUtils.deleteQuietly(tempDir), - LIBRARY_SHUTDOWN_HOOK_PRIORITY); - return Pair.of(Optional.of(libFile), dependentFiles); - } finally { - if (is != null) { - is.close(); - } - } - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileIterator.java deleted file mode 100644 index 134f24942dac..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileIterator.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db; - -import com.google.common.primitives.UnsignedLong; -import java.nio.ByteBuffer; -import java.util.NoSuchElementException; -import java.util.function.Function; -import org.apache.hadoop.ozone.util.ClosableIterator; - -/** - * Iterator for SSTFileReader which would read all entries including tombstones. - */ -public class ManagedRawSSTFileIterator implements ClosableIterator { - // Native address of pointer to the object. - private final long nativeHandle; - private final Function transformer; - private final IteratorType type; - private boolean closed; - private final Buffer keyBuffer; - private final Buffer valueBuffer; - - ManagedRawSSTFileIterator(String name, long nativeHandle, Function transformer, IteratorType type) { - this.nativeHandle = nativeHandle; - this.transformer = transformer; - this.type = type; - this.closed = false; - this.keyBuffer = new Buffer( - new CodecBuffer.Capacity(name + " iterator-key", 1 << 10), - this.type.readKey() ? buffer -> this.getKey(this.nativeHandle, buffer, buffer.position(), - buffer.remaining()) : null); - this.valueBuffer = new Buffer( - new CodecBuffer.Capacity(name + " iterator-value", 4 << 10), - this.type.readValue() ? buffer -> this.getValue(this.nativeHandle, buffer, buffer.position(), - buffer.remaining()) : null); - } - - private native boolean hasNext(long handle); - - private native void next(long handle); - - private native int getKey(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen); - - private native int getValue(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen); - - private native long getSequenceNumber(long handle); - - private native int getType(long handle); - - @Override - public boolean hasNext() { - return this.hasNext(nativeHandle); - } - - @Override - public T next() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - - KeyValue keyValue = new KeyValue(this.type.readKey() ? this.keyBuffer.getFromDb() : null, - UnsignedLong.fromLongBits(this.getSequenceNumber(this.nativeHandle)), - this.getType(nativeHandle), - this.type.readValue() ? this.valueBuffer.getFromDb() : null); - this.next(nativeHandle); - return this.transformer.apply(keyValue); - } - - private native void closeInternal(long handle); - - @Override - public synchronized void close() { - if (!closed) { - this.closeInternal(this.nativeHandle); - keyBuffer.release(); - valueBuffer.release(); - } - closed = true; - } - - /** - * Class containing Parsed KeyValue Record from RawSstReader output. - */ - public static final class KeyValue { - - private final CodecBuffer key; - private final UnsignedLong sequence; - private final Integer type; - private final CodecBuffer value; - - private KeyValue(CodecBuffer key, UnsignedLong sequence, Integer type, - CodecBuffer value) { - this.key = key; - this.sequence = sequence; - this.type = type; - this.value = value; - } - - public CodecBuffer getKey() { - return this.key; - } - - public UnsignedLong getSequence() { - return sequence; - } - - public Integer getType() { - return type; - } - - public CodecBuffer getValue() { - return value; - } - - @Override - public String toString() { - return "KeyValue{" + - "key=" + (key == null ? null : StringCodec.get().fromCodecBuffer(key)) + - ", sequence=" + sequence + - ", type=" + type + - ", value=" + (value == null ? null : StringCodec.get().fromCodecBuffer(value)) + - '}'; - } - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileReader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileReader.java deleted file mode 100644 index c644bd393b50..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileReader.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; - -import java.io.Closeable; -import java.util.Arrays; -import java.util.function.Function; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; -import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * JNI for RocksDB RawSSTFileReader. - */ -public class ManagedRawSSTFileReader implements Closeable { - - private static final Logger LOG = LoggerFactory.getLogger(ManagedRawSSTFileReader.class); - - private final String fileName; - // Native address of pointer to the object. - private final long nativeHandle; - - public static boolean tryLoadLibrary() { - try { - loadLibrary(); - return true; - } catch (NativeLibraryNotLoadedException ignored) { - return false; - } - } - - public static boolean loadLibrary() throws NativeLibraryNotLoadedException { - ManagedRocksObjectUtils.loadRocksDBLibrary(); - if (!NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME, Arrays.asList( - ManagedRocksObjectUtils.getRocksDBLibFileName()))) { - throw new NativeLibraryNotLoadedException(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); - } - return true; - } - - public ManagedRawSSTFileReader(final ManagedOptions options, final String fileName, final int readAheadSize) { - this.fileName = fileName; - this.nativeHandle = this.newRawSSTFileReader(options.getNativeHandle(), fileName, readAheadSize); - } - - public ManagedRawSSTFileIterator newIterator( - Function transformerFunction, - ManagedSlice fromSlice, ManagedSlice toSlice, IteratorType type) { - long fromNativeHandle = fromSlice == null ? 0 : fromSlice.getNativeHandle(); - long toNativeHandle = toSlice == null ? 0 : toSlice.getNativeHandle(); - LOG.info("Iterating SST file: {} with native lib. " + - "LowerBound: {}, UpperBound: {}, type : {} with reader handle: {}", fileName, fromSlice, toSlice, type, - this.nativeHandle); - return new ManagedRawSSTFileIterator<>(fileName + " " + this.nativeHandle, - newIterator(this.nativeHandle, fromSlice != null, - fromNativeHandle, toSlice != null, toNativeHandle), - transformerFunction, type); - } - - private native long newRawSSTFileReader(long optionsHandle, String filePath, int readSize); - - private native long newIterator(long handle, boolean hasFrom, long fromSliceHandle, boolean hasTo, - long toSliceHandle); - - private native void disposeInternal(long handle); - - @Override - public void close() { - disposeInternal(nativeHandle); - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java deleted file mode 100644 index 42b83808542d..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Native rocksdb utilities. - */ -package org.apache.hadoop.hdds.utils.db; diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/package-info.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/package-info.java deleted file mode 100644 index fe113d0e9591..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains util classes related to loading native rocksdb library. - */ -package org.apache.hadoop.hdds.utils; diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp deleted file mode 100644 index 7720e30b4119..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator.h" -#include "rocksdb/options.h" -#include "rocksdb/raw_iterator.h" -#include -#include "cplusplus_to_java_convert.h" -#include - -template -static jint copyToDirect(JNIEnv* env, T& source, jobject jtarget, jint jtarget_off, jint jtarget_len); - -jboolean Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_hasNext(JNIEnv *env, jobject obj, - jlong native_handle) { - return static_cast(reinterpret_cast(native_handle)->Valid()); -} - -void Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_next(JNIEnv *env, jobject obj, - jlong native_handle) { - reinterpret_cast(native_handle)->Next(); -} - -jint Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getKey(JNIEnv *env, - jobject obj, - jlong native_handle, - jobject jtarget, - jint jtarget_off, jint jtarget_len) { - ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->key(); - return copyToDirect(env, slice, jtarget, jtarget_off, jtarget_len); -} - - -jint Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getValue(JNIEnv *env, - jobject obj, - jlong native_handle, - jobject jtarget, - jint jtarget_off, jint jtarget_len) { - ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->value(); - return copyToDirect(env, slice, jtarget, jtarget_off, jtarget_len); -} - -jlong Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getSequenceNumber(JNIEnv *env, - jobject obj, - jlong native_handle) { - uint64_t sequence_number = reinterpret_cast(native_handle)->sequenceNumber(); - jlong result; - std::memcpy(&result, &sequence_number, sizeof(jlong)); - return result; -} - - -jint Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getType(JNIEnv *env, - jobject obj, - jlong native_handle) { - uint32_t type = reinterpret_cast(native_handle)->type(); - return static_cast(type); -} - - -void Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_closeInternal(JNIEnv *env, - jobject obj, - jlong native_handle) { - delete reinterpret_cast(native_handle); -} - -template -static jint copyToDirect(JNIEnv* env, T& source, jobject jtarget, - jint jtarget_off, jint jtarget_len) { - char* target = reinterpret_cast(env->GetDirectBufferAddress(jtarget)); - if (target == nullptr || env->GetDirectBufferCapacity(jtarget) < (jtarget_off + jtarget_len)) { - jclass exClass = env->FindClass("java/lang/IllegalArgumentException"); - if (exClass != nullptr) { - env->ThrowNew(exClass, "Invalid buffer address or capacity"); - } - return -1; - } - - target += jtarget_off; - - const jint cvalue_len = static_cast(source.size()); - const jint length = std::min(jtarget_len, cvalue_len); - - memcpy(target, source.data(), length); - - return cvalue_len; -} diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp deleted file mode 100644 index ff49ee56f06f..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader.h" -#include "rocksdb/options.h" -#include "rocksdb/raw_sst_file_reader.h" -#include "rocksdb/raw_iterator.h" -#include -#include "cplusplus_to_java_convert.h" -#include - -jlong Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader_newRawSSTFileReader(JNIEnv *env, jobject obj, - jlong options_handle, - jstring jfilename, - jint readahead_size) { - ROCKSDB_NAMESPACE::Options *options = reinterpret_cast(options_handle); - const char *file_path = env->GetStringUTFChars(jfilename, nullptr); - size_t read_ahead_size_value = static_cast(readahead_size); - ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader = - new ROCKSDB_NAMESPACE::RawSstFileReader(*options, file_path, read_ahead_size_value, true, true); - env->ReleaseStringUTFChars(jfilename, file_path); - return GET_CPLUSPLUS_POINTER(raw_sst_file_reader); -} - -jlong Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader_newIterator(JNIEnv *env, jobject obj, - jlong native_handle, - jboolean jhas_from, - jlong from_slice_handle, - jboolean jhas_to, - jlong to_slice_handle) { - ROCKSDB_NAMESPACE::Slice* from_slice = nullptr; - ROCKSDB_NAMESPACE::Slice* to_slice = nullptr; - ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader = - reinterpret_cast(native_handle); - bool has_from = static_cast(jhas_from); - bool has_to = static_cast(jhas_to); - if (has_from) { - from_slice = reinterpret_cast(from_slice_handle); - } - if (has_to) { - to_slice = reinterpret_cast(to_slice_handle); - } - ROCKSDB_NAMESPACE::RawIterator* iterator = raw_sst_file_reader->newIterator(has_from, from_slice, has_to, to_slice); - return GET_CPLUSPLUS_POINTER(iterator); -} - -void Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader_disposeInternal(JNIEnv *env, jobject obj, - jlong native_handle) { - delete reinterpret_cast(native_handle); -} diff --git a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h deleted file mode 100644 index 4862ea12a1b9..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -#pragma once - -/* - * This macro is used for 32 bit OS. In 32 bit OS, the result number is a - negative number if we use reinterpret_cast(pointer). - * For example, jlong ptr = reinterpret_cast(pointer), ptr is a negative - number in 32 bit OS. - * If we check ptr using ptr > 0, it fails. For example, the following code is - not correct. - * if (jblock_cache_handle > 0) { - std::shared_ptr *pCache = - reinterpret_cast *>( - jblock_cache_handle); - options.block_cache = *pCache; - } - * But the result number is positive number if we do - reinterpret_cast(pointer) first and then cast it to jlong. size_t is 4 - bytes long in 32 bit OS and 8 bytes long in 64 bit OS. - static_cast(reinterpret_cast(_pointer)) is also working in 64 - bit OS. - * - * We don't need an opposite cast because it works from jlong to c++ pointer in - both 32 bit and 64 bit OS. - * For example, the following code is working in both 32 bit and 64 bit OS. - jblock_cache_handle is jlong. - * std::shared_ptr *pCache = - reinterpret_cast *>( - jblock_cache_handle); -*/ - -#define GET_CPLUSPLUS_POINTER(_pointer) \ - static_cast(reinterpret_cast(_pointer)) diff --git a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch deleted file mode 100644 index b2627fbbb3ef..000000000000 --- a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch +++ /dev/null @@ -1,547 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -diff --git a/include/rocksdb/raw_iterator.h b/include/rocksdb/raw_iterator.h -new file mode 100644 -index 000000000..21242ed15 ---- /dev/null -+++ b/include/rocksdb/raw_iterator.h -@@ -0,0 +1,25 @@ -+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -+// This source code is licensed under both the GPLv2 (found in the -+// COPYING file in the root directory) and Apache 2.0 License -+// (found in the LICENSE.Apache file in the root directory). -+#pragma once -+#ifndef ROCKSDB_LITE -+ -+ -+#include "rocksdb/advanced_options.h" -+namespace ROCKSDB_NAMESPACE { -+ -+class RawIterator { -+ public: -+ virtual ~RawIterator() {} -+ virtual bool Valid() const = 0; -+ virtual Slice key() const = 0; -+ virtual Slice value() const = 0; -+ virtual uint64_t sequenceNumber() const = 0; -+ virtual uint32_t type() const = 0; -+ virtual void Next() = 0; -+}; -+ -+} // namespace ROCKSDB_NAMESPACE -+ -+#endif // ROCKSDB_LITE -diff --git a/include/rocksdb/raw_sst_file_reader.h b/include/rocksdb/raw_sst_file_reader.h -new file mode 100644 -index 000000000..09e748208 ---- /dev/null -+++ b/include/rocksdb/raw_sst_file_reader.h -@@ -0,0 +1,62 @@ -+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -+// This source code is licensed under both the GPLv2 (found in the -+// COPYING file in the root directory) and Apache 2.0 License -+// (found in the LICENSE.Apache file in the root directory). -+#pragma once -+#ifndef ROCKSDB_LITE -+ -+#include -+#include -+ -+#include "rocksdb/raw_iterator.h" -+#include "rocksdb/advanced_options.h" -+#include "rocksdb/options.h" -+ -+ -+ -+namespace ROCKSDB_NAMESPACE { -+ -+class RawSstFileReader { -+ public: -+ -+ RawSstFileReader(const Options& options, const std::string& file_name, -+ size_t readahead_size, bool verify_checksum, -+ bool silent = false); -+ ~RawSstFileReader(); -+ -+ RawIterator* newIterator(bool has_from, Slice* from, -+ bool has_to, Slice *to); -+ Status getStatus() { return init_result_; } -+ -+ private: -+ // Get the TableReader implementation for the sst file -+ Status GetTableReader(const std::string& file_path); -+ Status ReadTableProperties(uint64_t table_magic_number, -+ uint64_t file_size); -+ -+ Status SetTableOptionsByMagicNumber(uint64_t table_magic_number); -+ Status SetOldTableOptions(); -+ -+ // Helper function to call the factory with settings specific to the -+ // factory implementation -+ Status NewTableReader(uint64_t file_size); -+ -+ std::string file_name_; -+ Temperature file_temp_; -+ -+ // less verbose in stdout/stderr -+ bool silent_; -+ -+ // options_ and internal_comparator_ will also be used in -+ // ReadSequential internally (specifically, seek-related operations) -+ Options options_; -+ -+ Status init_result_; -+ -+ struct Rep; -+ std::unique_ptr rep_; -+}; -+ -+} // namespace ROCKSDB_NAMESPACE -+ -+#endif // ROCKSDB_LITE -diff --git a/src.mk b/src.mk -index b94bc43ca..c13e5cde6 100644 ---- a/src.mk -+++ b/src.mk -@@ -338,11 +338,8 @@ RANGE_TREE_SOURCES =\ - utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc - - TOOL_LIB_SOURCES = \ -- tools/io_tracer_parser_tool.cc \ -- tools/ldb_cmd.cc \ -- tools/ldb_tool.cc \ -- tools/sst_dump_tool.cc \ -- utilities/blob_db/blob_dump_tool.cc \ -+ tools/raw_sst_file_reader.cc \ -+ tools/raw_sst_file_iterator.cc \ - - ANALYZER_LIB_SOURCES = \ - tools/block_cache_analyzer/block_cache_trace_analyzer.cc \ -diff --git a/tools/raw_sst_file_iterator.cc b/tools/raw_sst_file_iterator.cc -new file mode 100644 -index 000000000..3051637a3 ---- /dev/null -+++ b/tools/raw_sst_file_iterator.cc -@@ -0,0 +1,76 @@ -+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -+// This source code is licensed under both the GPLv2 (found in the -+// COPYING file in the root directory) and Apache 2.0 License -+// (found in the LICENSE.Apache file in the root directory). -+// -+#ifndef ROCKSDB_LITE -+ -+ -+#include -+#include -+ -+#include "db/memtable.h" -+#include "db/write_batch_internal.h" -+#include "rocksdb/status.h" -+#include "rocksdb/utilities/ldb_cmd.h" -+#include "table/block_based/block.h" -+#include "table/block_based/block_based_table_builder.h" -+#include "table/block_based/block_based_table_factory.h" -+#include "table/meta_blocks.h" -+#include "table/plain/plain_table_factory.h" -+#include "tools/raw_sst_file_iterator.h" -+ -+namespace ROCKSDB_NAMESPACE { -+ -+RawSstFileIterator::RawSstFileIterator(InternalIterator* iterator, -+ bool has_from, Slice* from_key, -+ bool has_to, Slice* to_key) -+ : iter_(iterator), -+ ikey(new ParsedInternalKey()), -+ has_to_(has_to), -+ to_key_(to_key) { -+ if (has_from) { -+ InternalKey k; -+ k.SetMinPossibleForUserKey(*from_key); -+ iter_->Seek(k.Encode()); -+ } else { -+ iter_->SeekToFirst(); -+ } -+ initKey(); -+} -+ -+bool RawSstFileIterator::Valid() const { -+ return iter_->Valid() && (!has_to_ || -+ BytewiseComparator()->Compare( -+ key(), *to_key_) < 0); -+} -+ -+void RawSstFileIterator::initKey() { -+ if (iter_->Valid()) { -+ ParseInternalKey(iter_->key(), ikey, true /* log_err_key */); -+ } -+} -+void RawSstFileIterator::Next() { -+ iter_->Next(); -+ initKey(); -+ -+} -+ -+Slice RawSstFileIterator::key() const { -+ return ikey->user_key; -+} -+ -+uint64_t RawSstFileIterator::sequenceNumber() const { -+ return ikey->sequence; -+} -+ -+uint32_t RawSstFileIterator::type() const { -+ return static_cast(ikey->type); -+} -+ -+Slice RawSstFileIterator::value() const { -+ return iter_->value(); -+} -+} // namespace ROCKSDB_NAMESPACE -+ -+#endif // ROCKSDB_LITE -diff --git a/tools/raw_sst_file_iterator.h b/tools/raw_sst_file_iterator.h -new file mode 100644 -index 000000000..eedb848b5 ---- /dev/null -+++ b/tools/raw_sst_file_iterator.h -@@ -0,0 +1,46 @@ -+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -+// This source code is licensed under both the GPLv2 (found in the -+// COPYING file in the root directory) and Apache 2.0 License -+// (found in the LICENSE.Apache file in the root directory). -+#pragma once -+#ifndef ROCKSDB_LITE -+ -+#include -+#include -+#include "file/writable_file_writer.h" -+#include "rocksdb/advanced_options.h" -+#include "rocksdb/raw_iterator.h" -+ -+namespace ROCKSDB_NAMESPACE { -+ -+class RawSstFileIterator : public RawIterator { -+ public: -+ explicit RawSstFileIterator(InternalIterator* iterator, -+ bool has_from, -+ Slice* from_key, -+ bool has_to, -+ Slice* to_key); -+ -+ bool Valid() const override; -+ Slice key() const override; -+ Slice value() const override; -+ uint64_t sequenceNumber() const override; -+ uint32_t type() const override; -+ void Next() final override; -+ -+ ~RawSstFileIterator(){ -+ delete iter_; -+ delete ikey; -+ } -+ -+ private: -+ void initKey(); -+ InternalIterator* iter_; -+ ParsedInternalKey* ikey; -+ bool has_to_; -+ Slice* to_key_; -+}; -+ -+} // namespace ROCKSDB_NAMESPACE -+ -+#endif // ROCKSDB_LITE -diff --git a/tools/raw_sst_file_reader.cc b/tools/raw_sst_file_reader.cc -new file mode 100644 -index 000000000..5ba8a82ee ---- /dev/null -+++ b/tools/raw_sst_file_reader.cc -@@ -0,0 +1,272 @@ -+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -+// This source code is licensed under both the GPLv2 (found in the -+// COPYING file in the root directory) and Apache 2.0 License -+// (found in the LICENSE.Apache file in the root directory). -+// -+#ifndef ROCKSDB_LITE -+ -+#include "rocksdb/raw_sst_file_reader.h" -+ -+#include -+#include -+#include -+#include -+ -+ -+#include "db/memtable.h" -+#include "db/write_batch_internal.h" -+#include "options/cf_options.h" -+#include "rocksdb/env.h" -+#include "rocksdb/slice_transform.h" -+#include "rocksdb/status.h" -+#include "rocksdb/table_properties.h" -+#include "rocksdb/utilities/ldb_cmd.h" -+#include "table/block_based/block.h" -+#include "table/block_based/block_based_table_builder.h" -+#include "table/block_based/block_based_table_factory.h" -+#include "table/format.h" -+#include "table/meta_blocks.h" -+#include "table/plain/plain_table_factory.h" -+#include "table/table_reader.h" -+#include "tools/raw_sst_file_iterator.h" -+#include "db/dbformat.h" -+ -+namespace ROCKSDB_NAMESPACE { -+ -+struct RawSstFileReader::Rep { -+ Options options; -+ EnvOptions soptions_; -+ ReadOptions read_options_; -+ ImmutableOptions ioptions_; -+ MutableCFOptions moptions_; -+ InternalKeyComparator internal_comparator_; -+ std::unique_ptr table_properties_; -+ std::unique_ptr table_reader_; -+ std::unique_ptr file_; -+ -+ Rep(const Options& opts, bool verify_checksum, size_t readahead_size) -+ : options(opts), -+ soptions_(EnvOptions()), -+ read_options_(verify_checksum, false), -+ ioptions_(options), -+ moptions_(ColumnFamilyOptions(options)), -+ internal_comparator_(InternalKeyComparator(BytewiseComparator())) { -+ read_options_.readahead_size = readahead_size; -+ } -+}; -+ -+RawSstFileReader::RawSstFileReader(const Options& options, -+ const std::string& file_name, -+ size_t readahead_size, -+ bool verify_checksum, -+ bool silent) :rep_(new Rep(options, -+ verify_checksum, -+ readahead_size)) { -+ file_name_ = file_name; -+ silent_ = silent; -+ options_ = options; -+ file_temp_ = Temperature::kUnknown; -+ init_result_ = GetTableReader(file_name_); -+} -+ -+RawSstFileReader::~RawSstFileReader() {} -+ -+ -+ -+extern const uint64_t kBlockBasedTableMagicNumber; -+extern const uint64_t kLegacyBlockBasedTableMagicNumber; -+extern const uint64_t kPlainTableMagicNumber; -+extern const uint64_t kLegacyPlainTableMagicNumber; -+ -+Status RawSstFileReader::GetTableReader(const std::string& file_path) { -+ // Warning about 'magic_number' being uninitialized shows up only in UBsan -+ // builds. Though access is guarded by 's.ok()' checks, fix the issue to -+ // avoid any warnings. -+ uint64_t magic_number = Footer::kNullTableMagicNumber; -+ -+ // read table magic number -+ Footer footer; -+ -+ const auto& fs = options_.env->GetFileSystem(); -+ std::unique_ptr file; -+ uint64_t file_size = 0; -+ FileOptions fopts = rep_->soptions_; -+ fopts.temperature = file_temp_; -+ Status s = fs->NewRandomAccessFile(file_path, fopts, &file, nullptr); -+ if (s.ok()) { -+ s = fs->GetFileSize(file_path, IOOptions(), &file_size, nullptr); -+ } -+ -+ // check empty file -+ // if true, skip further processing of this file -+ if (file_size == 0) { -+ return Status::Aborted(file_path, "Empty file"); -+ } -+ -+ rep_->file_.reset(new RandomAccessFileReader(std::move(file), file_path)); -+ -+ FilePrefetchBuffer prefetch_buffer( -+ 0 /* readahead_size */, 0 /* max_readahead_size */, true /* enable */, -+ false /* track_min_offset */); -+ if (s.ok()) { -+ const uint64_t kSstDumpTailPrefetchSize = 512 * 1024; -+ uint64_t prefetch_size = (file_size > kSstDumpTailPrefetchSize) -+ ? kSstDumpTailPrefetchSize -+ : file_size; -+ uint64_t prefetch_off = file_size - prefetch_size; -+ IOOptions opts; -+ s = prefetch_buffer.Prefetch(opts, rep_->file_.get(), prefetch_off, -+ static_cast(prefetch_size), -+ Env::IO_TOTAL /* rate_limiter_priority */); -+ -+ s = ReadFooterFromFile(opts, rep_->file_.get(), &prefetch_buffer, file_size, -+ &footer); -+ } -+ if (s.ok()) { -+ magic_number = footer.table_magic_number(); -+ } -+ -+ if (s.ok()) { -+ if (magic_number == kPlainTableMagicNumber || -+ magic_number == kLegacyPlainTableMagicNumber) { -+ rep_->soptions_.use_mmap_reads = true; -+ -+ fs->NewRandomAccessFile(file_path, fopts, &file, nullptr); -+ rep_->file_.reset(new RandomAccessFileReader(std::move(file), file_path)); -+ } -+ -+ s = ROCKSDB_NAMESPACE::ReadTableProperties( -+ rep_->file_.get(), file_size, magic_number, rep_->ioptions_, &(rep_->table_properties_), -+ /* memory_allocator= */ nullptr, (magic_number == kBlockBasedTableMagicNumber) -+ ? &prefetch_buffer -+ : nullptr); -+ // For old sst format, ReadTableProperties might fail but file can be read -+ if (s.ok()) { -+ s = SetTableOptionsByMagicNumber(magic_number); -+ if (s.ok()) { -+ if (rep_->table_properties_ && !rep_->table_properties_->comparator_name.empty()) { -+ ConfigOptions config_options; -+ const Comparator* user_comparator = nullptr; -+ s = Comparator::CreateFromString(config_options, -+ rep_->table_properties_->comparator_name, -+ &user_comparator); -+ if (s.ok()) { -+ assert(user_comparator); -+ rep_->internal_comparator_ = InternalKeyComparator(user_comparator); -+ } -+ } -+ } -+ } else { -+ if (!silent_) { -+ fprintf(stderr, "Not able to read table properties\n"); -+ } -+ s = SetOldTableOptions(); -+ } -+ options_.comparator = rep_->internal_comparator_.user_comparator(); -+ } -+ -+ if (s.ok()) { -+ s = NewTableReader(file_size); -+ } -+ return s; -+} -+ -+Status RawSstFileReader::NewTableReader(uint64_t file_size) { -+ auto t_opt = -+ TableReaderOptions(rep_->ioptions_, rep_->moptions_.prefix_extractor, rep_->soptions_, -+ rep_->internal_comparator_, false /* skip_filters */, -+ false /* imortal */, true /* force_direct_prefetch */); -+ // Allow open file with global sequence number for backward compatibility. -+ t_opt.largest_seqno = kMaxSequenceNumber; -+ -+ // We need to turn off pre-fetching of index and filter nodes for -+ // BlockBasedTable -+ if (options_.table_factory->IsInstanceOf( -+ TableFactory::kBlockBasedTableName())) { -+ return options_.table_factory->NewTableReader(t_opt, std::move(rep_->file_), -+ file_size, &(rep_->table_reader_), -+ /*enable_prefetch=*/false); -+ } -+ -+ // For all other factory implementation -+ return options_.table_factory->NewTableReader(t_opt, std::move(rep_->file_), -+ file_size, &(rep_->table_reader_)); -+} -+ -+Status RawSstFileReader::SetTableOptionsByMagicNumber( -+ uint64_t table_magic_number) { -+ assert(rep_->table_properties_); -+ if (table_magic_number == kBlockBasedTableMagicNumber || -+ table_magic_number == kLegacyBlockBasedTableMagicNumber) { -+ BlockBasedTableFactory* bbtf = new BlockBasedTableFactory(); -+ // To force tail prefetching, we fake reporting two useful reads of 512KB -+ // from the tail. -+ // It needs at least two data points to warm up the stats. -+ bbtf->tail_prefetch_stats()->RecordEffectiveSize(512 * 1024); -+ bbtf->tail_prefetch_stats()->RecordEffectiveSize(512 * 1024); -+ -+ options_.table_factory.reset(bbtf); -+ if (!silent_) { -+ fprintf(stdout, "Sst file format: block-based\n"); -+ } -+ -+ auto& props = rep_->table_properties_->user_collected_properties; -+ auto pos = props.find(BlockBasedTablePropertyNames::kIndexType); -+ if (pos != props.end()) { -+ auto index_type_on_file = static_cast( -+ DecodeFixed32(pos->second.c_str())); -+ if (index_type_on_file == -+ BlockBasedTableOptions::IndexType::kHashSearch) { -+ options_.prefix_extractor.reset(NewNoopTransform()); -+ } -+ } -+ } else if (table_magic_number == kPlainTableMagicNumber || -+ table_magic_number == kLegacyPlainTableMagicNumber) { -+ options_.allow_mmap_reads = true; -+ -+ PlainTableOptions plain_table_options; -+ plain_table_options.user_key_len = kPlainTableVariableLength; -+ plain_table_options.bloom_bits_per_key = 0; -+ plain_table_options.hash_table_ratio = 0; -+ plain_table_options.index_sparseness = 1; -+ plain_table_options.huge_page_tlb_size = 0; -+ plain_table_options.encoding_type = kPlain; -+ plain_table_options.full_scan_mode = true; -+ -+ options_.table_factory.reset(NewPlainTableFactory(plain_table_options)); -+ if (!silent_) { -+ fprintf(stdout, "Sst file format: plain table\n"); -+ } -+ } else { -+ char error_msg_buffer[80]; -+ snprintf(error_msg_buffer, sizeof(error_msg_buffer) - 1, -+ "Unsupported table magic number --- %lx", -+ (long)table_magic_number); -+ return Status::InvalidArgument(error_msg_buffer); -+ } -+ -+ return Status::OK(); -+} -+ -+Status RawSstFileReader::SetOldTableOptions() { -+ assert(rep_->table_properties_ == nullptr); -+ options_.table_factory = std::make_shared(); -+ if (!silent_) { -+ fprintf(stdout, "Sst file format: block-based(old version)\n"); -+ } -+ -+ return Status::OK(); -+} -+ -+RawIterator* RawSstFileReader::newIterator( -+ bool has_from, Slice* from, bool has_to, Slice* to) { -+ InternalIterator* iter = rep_->table_reader_->NewIterator( -+ rep_->read_options_, rep_->moptions_.prefix_extractor.get(), -+ /*arena=*/nullptr, /*skip_filters=*/false, -+ TableReaderCaller::kSSTDumpTool); -+ return new RawSstFileIterator(iter, has_from, from, has_to, to); -+ -+} -+} // namespace ROCKSDB_NAMESPACE -+ -+#endif // ROCKSDB_LITE diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java deleted file mode 100644 index b8ac1c132e0c..000000000000 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY; -import static org.apache.hadoop.hdds.utils.NativeLibraryLoader.NATIVE_LIB_TMP_DIR; -import static org.apache.hadoop.hdds.utils.NativeLibraryLoader.getJniLibraryFileName; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.CALLS_REAL_METHODS; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.mockStatic; -import static org.mockito.Mockito.same; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Stream; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; -import org.junit.jupiter.api.condition.EnabledIfSystemProperty; -import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.MockedStatic; - -/** - * Test class for NativeLibraryLoader. - */ -public class TestNativeLibraryLoader { - - @TempDir - private static Path tempDir; - - private static Stream nativeLibraryDirectoryLocations() { - return Stream.of("", tempDir.toAbsolutePath().toString(), null); - } - - @EnabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches = "true") - @ParameterizedTest - @MethodSource("nativeLibraryDirectoryLocations") - public void testNativeLibraryLoader(String nativeLibraryDirectoryLocation) throws NativeLibraryNotLoadedException { - Map libraryLoadedMap = new HashMap<>(); - NativeLibraryLoader loader = new NativeLibraryLoader(libraryLoadedMap); - try (MockedStatic mockedNativeLibraryLoader = mockStatic(NativeLibraryLoader.class, - CALLS_REAL_METHODS)) { - mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) - .thenReturn(nativeLibraryDirectoryLocation); - mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()).thenReturn(loader); - ManagedRawSSTFileReader.loadLibrary(); - assertTrue(NativeLibraryLoader.isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); - } - } - - @ParameterizedTest - @MethodSource("nativeLibraryDirectoryLocations") - public void testDummyLibrary(String nativeLibraryDirectoryLocation) { - Map libraryLoadedMap = new HashMap<>(); - NativeLibraryLoader loader = new NativeLibraryLoader(libraryLoadedMap); - try (MockedStatic mockedNativeLibraryLoader = mockStatic(NativeLibraryLoader.class, - CALLS_REAL_METHODS)) { - mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) - .thenReturn(nativeLibraryDirectoryLocation); - mockedNativeLibraryLoader.when(NativeLibraryLoader::getInstance).thenReturn(loader); - // Mocking to force copy random bytes to create a lib file to - // nativeLibraryDirectoryLocation. But load library will fail. - mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getResourceStream(anyString())) - .thenReturn(new ByteArrayInputStream(new byte[]{0, 1, 2, 3})); - String dummyLibraryName = "dummy_lib"; - List dependencies = Arrays.asList("dep1", "dep2"); - File absDir = new File(nativeLibraryDirectoryLocation == null ? "" : nativeLibraryDirectoryLocation) - .getAbsoluteFile(); - - NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName, dependencies); - - // Checking if the resource with random was copied to a temp file. - File[] libPath = absDir - .listFiles((dir, name) -> name.startsWith(dummyLibraryName)); - assertThat(libPath) - .isNotNull() - .isNotEmpty(); - assertThat(libPath[0]) - .isDirectory(); - try { - assertThat(new File(libPath[0], getJniLibraryFileName(dummyLibraryName))) - .isFile(); - dependencies.forEach(dep -> assertThat(new File(libPath[0], dep)).isFile()); - } finally { - FileUtils.deleteQuietly(libPath[0]); - } - } - } -} diff --git a/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties b/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties deleted file mode 100644 index f2c32eee5db0..000000000000 --- a/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties +++ /dev/null @@ -1,75 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name=PropertiesConfig - -# Checks for config change periodically and reloads -monitorInterval=5 - -filter=read, write -# filter.read.onMatch = DENY avoids logging all READ events -# filter.read.onMatch = ACCEPT permits logging all READ events -# The above two settings ignore the log levels in configuration -# filter.read.onMatch = NEUTRAL permits logging of only those READ events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.read.type = MarkerFilter -filter.read.marker = READ -filter.read.onMatch = NEUTRAL -filter.read.onMismatch = NEUTRAL - -# filter.write.onMatch = DENY avoids logging all WRITE events -# filter.write.onMatch = ACCEPT permits logging all WRITE events -# The above two settings ignore the log levels in configuration -# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.write.type = MarkerFilter -filter.write.marker = WRITE -filter.write.onMatch = NEUTRAL -filter.write.onMismatch = NEUTRAL - -# Log Levels are organized from most specific to least: -# OFF (most specific, no logging) -# FATAL (most specific, little data) -# ERROR -# WARN -# INFO -# DEBUG -# TRACE (least specific, a lot of data) -# ALL (least specific, all data) - -appenders = console, audit -appender.console.type = Console -appender.console.name = STDOUT -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = %-5level | %c{1} | %msg%n - -appender.audit.type = File -appender.audit.name = AUDITLOG -appender.audit.fileName=audit.log -appender.audit.layout.type=PatternLayout -appender.audit.layout.pattern= %-5level | %c{1} | %C | %msg%n - -loggers=audit -logger.audit.name=OMAudit -logger.audit.level = INFO -logger.audit.appenderRefs = audit -logger.audit.appenderRef.file.ref = AUDITLOG - -rootLogger.level = INFO -rootLogger.appenderRefs = stdout -rootLogger.appenderRef.stdout.ref = STDOUT diff --git a/hadoop-hdds/rocks-native/src/test/resources/log4j.properties b/hadoop-hdds/rocks-native/src/test/resources/log4j.properties deleted file mode 100644 index 398786689af3..000000000000 --- a/hadoop-hdds/rocks-native/src/test/resources/log4j.properties +++ /dev/null @@ -1,23 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# log4j configuration used during build and unit tests - -log4j.rootLogger=INFO,stdout -log4j.threshold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index e991b8702990..13ecefc26979 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -67,10 +67,6 @@ org.apache.ozone hdds-managed-rocksdb - - org.apache.ozone - hdds-rocks-native - org.apache.ratis ratis-common @@ -90,12 +86,6 @@ hadoop-common provided - - org.apache.ozone - hdds-rocks-native - test-jar - test - org.apache.ozone hdds-test-utils @@ -144,24 +134,5 @@ - - native-testing - - - rocks_tools_native - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${maven-surefire-plugin.argLine} @{argLine} -Djava.library.path=${project.parent.basedir}/rocks-native/target/native/rocksdb - - - - - diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/AbstractSstFileIterator.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/AbstractSstFileIterator.java new file mode 100644 index 000000000000..afa74d0bac60 --- /dev/null +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/AbstractSstFileIterator.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db; + +import java.util.function.Function; +import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; +import org.apache.hadoop.ozone.util.ClosableIterator; +import org.rocksdb.RocksDBException; + +/** + * ManagedSstFileIterator is an abstract class designed to provide a managed, resource-safe + * iteration over SST (Sorted String Table) files leveraging RocksDB. It implements the + * {@link ClosableIterator} interface to support resource management and ensures proper + * cleanup of resources upon closure. This class binds together a ManagedSstFileReader, + * ManagedSstFileReaderIterator, and Buffers for keys and values, while allowing specific + * implementations to define how the iterator values are derived. + * + * @param The type of the element to be returned by the iterator. + */ +abstract class AbstractSstFileIterator implements ClosableIterator { + private final ManagedSstFileReader fileReader; + private final ManagedSstFileReaderIterator fileReaderIterator; + private final IteratorType type; + private final ManagedOptions options; + private boolean closed; + private final Buffer keyBuffer; + private final Buffer valueBuffer; + + AbstractSstFileIterator(String path, ManagedOptions options, IteratorType type, + Function itrInitFunction) throws RocksDatabaseException { + try { + this.fileReader = new ManagedSstFileReader(options); + this.fileReader.open(path); + this.fileReaderIterator = itrInitFunction.apply(fileReader); + fileReaderIterator.get().seekToFirst(); + this.closed = false; + this.type = type; + this.keyBuffer = new Buffer( + new CodecBuffer.Capacity(path + " iterator-key", 1 << 10), + this.type.readKey() ? buffer -> fileReaderIterator.get().key(buffer) : null); + this.valueBuffer = new Buffer( + new CodecBuffer.Capacity(path + " iterator-value", 4 << 10), + this.type.readValue() ? buffer -> fileReaderIterator.get().value(buffer) : null); + this.options = options; + } catch (RocksDBException e) { + throw new RocksDatabaseException("Failed to open SST file: " + path, e); + } + } + + @Override + public synchronized void close() { + if (!closed) { + this.fileReaderIterator.close(); + this.fileReader.close(); + keyBuffer.release(); + valueBuffer.release(); + } + closed = true; + } + + @Override + public synchronized boolean hasNext() { + return fileReaderIterator.get().isValid(); + } + + abstract T getIteratorValue(CodecBuffer key, CodecBuffer value); + + @Override + public synchronized T next() { + T value = getIteratorValue(this.type.readKey() ? keyBuffer.getFromDb() : null, + this.type.readValue() ? valueBuffer.getFromDb() : null); + fileReaderIterator.get().next(); + return value; + } + + ManagedOptions getOptions() { + return options; + } +} diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSstFileIterator.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSstFileIterator.java new file mode 100644 index 000000000000..2a94500d2934 --- /dev/null +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSstFileIterator.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db; + +import com.google.common.primitives.UnsignedLong; +import java.util.Optional; +import java.util.function.Function; +import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedParsedEntry; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; +import org.rocksdb.EntryType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * ManagedRawSstFileIterator provides an implementation of {@link AbstractSstFileIterator} + * to iterate over and transform all entries (including all non user entries like tombstone) in SST (Sorted String + * Table) files using RocksDB. It utilizes a custom entry parser, key-value transformation logic, and resource + * management features for efficient and safe access to the SST data. + * This implementation makes use of TableIterator implemented by RocksDB to read the entries from the SST files. + * + * @param The type of the transformation result for each SST record. + */ +public class ManagedRawSstFileIterator extends AbstractSstFileIterator { + + private static final Logger LOG = LoggerFactory.getLogger(ManagedRawSstFileIterator.class); + private final ManagedParsedEntry entryParser; + private final Buffer userKeyBuffer; + private final Function transformer; + private boolean closed; + private final KeyValue currentKeyValue; + + public ManagedRawSstFileIterator(String path, ManagedOptions options, + Optional internalKeyLowerBound, + Optional internalKeyUpperBound, IteratorType type, + Function transformer) throws RocksDatabaseException { + super(path, options, type, sstFileReader -> + ManagedSstFileReaderIterator.managed(sstFileReader.newTableIterator( + internalKeyLowerBound.orElse(null), internalKeyUpperBound.orElse(null)))); + LOG.info("Created ManagedRawSstFileIterator for path: {} with lower bound : {} and upper bound : {}", + path, internalKeyLowerBound, internalKeyUpperBound); + this.entryParser = new ManagedParsedEntry(); + this.userKeyBuffer = new Buffer( + new CodecBuffer.Capacity(path + " iterator-key", 1 << 10), + type.readKey() ? entryParser::userKey : null); + this.currentKeyValue = new KeyValue(); + this.transformer = transformer; + } + + @Override + T getIteratorValue(CodecBuffer key, CodecBuffer value) { + if (key != null) { + entryParser.parseEntry(getOptions(), key.asReadOnlyByteBuffer()); + CodecBuffer userKey = userKeyBuffer.getFromDb(); + EntryType type = entryParser.getEntryType(); + UnsignedLong sequenceNumber = UnsignedLong.fromLongBits(entryParser.getSequenceNumber()); + return this.transformer.apply(currentKeyValue.setKeyValue(userKey, sequenceNumber, type, value)); + } + return this.transformer.apply(currentKeyValue.setKeyValue(null, UnsignedLong.fromLongBits(0), null, + value)); + } + + @Override + public synchronized void close() { + super.close(); + if (!closed) { + userKeyBuffer.release(); + entryParser.close(); + } + closed = true; + } + + /** + * Class containing Parsed KeyValue Record from RawSstReader output. + */ + public static final class KeyValue { + + private CodecBuffer key; + private UnsignedLong sequence; + private EntryType type; + private CodecBuffer value; + + private KeyValue() { + } + + private KeyValue setKeyValue(CodecBuffer keyBuffer, UnsignedLong sequenceVal, EntryType typeVal, + CodecBuffer valueBuffer) { + this.key = keyBuffer; + this.sequence = sequenceVal; + this.type = typeVal; + this.value = valueBuffer; + return this; + } + + public CodecBuffer getKey() { + return this.key; + } + + public UnsignedLong getSequence() { + return sequence; + } + + public EntryType getType() { + return type; + } + + public CodecBuffer getValue() { + return value; + } + + @Override + public String toString() { + return "KeyValue{" + + "key=" + (key == null ? null : StringCodec.get().fromCodecBuffer(key)) + + ", sequence=" + sequence + + ", type=" + type + + ", value=" + (value == null ? null : StringCodec.get().fromCodecBuffer(value)) + + '}'; + } + } +} + diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java index abfbd48e347e..a6de8c9013f2 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java @@ -17,74 +17,16 @@ package org.apache.hadoop.hdds.utils.db; +import static org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator.managed; + import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; -import org.apache.hadoop.ozone.util.ClosableIterator; -import org.rocksdb.RocksDBException; - -/** - * ManagedSstFileIterator is an abstract class designed to provide a managed, resource-safe - * iteration over SST (Sorted String Table) files leveraging RocksDB. It implements the - * {@link ClosableIterator} interface to support resource management and ensures proper - * cleanup of resources upon closure. This class binds together a ManagedSstFileReader, - * ManagedSstFileReaderIterator, and Buffers for keys and values, while allowing specific - * implementations to define how the iterator values are derived. - * - * @param The type of the element to be returned by the iterator. - */ -abstract class ManagedSstFileIterator implements ClosableIterator { - private final ManagedSstFileReader fileReader; - private final ManagedSstFileReaderIterator fileReaderIterator; - private final IteratorType type; - private boolean closed; - private final Buffer keyBuffer; - private final Buffer valueBuffer; - ManagedSstFileIterator(String path, ManagedOptions options, ManagedReadOptions readOptions, - IteratorType type) throws RocksDatabaseException { - try { - this.fileReader = new ManagedSstFileReader(options); - this.fileReader.open(path); - this.fileReaderIterator = ManagedSstFileReaderIterator.managed(fileReader.newIterator(readOptions)); - fileReaderIterator.get().seekToFirst(); - this.closed = false; - this.type = type; - this.keyBuffer = new Buffer( - new CodecBuffer.Capacity(path + " iterator-key", 1 << 10), - this.type.readKey() ? buffer -> fileReaderIterator.get().key(buffer) : null); - this.valueBuffer = new Buffer( - new CodecBuffer.Capacity(path + " iterator-value", 4 << 10), - this.type.readValue() ? buffer -> fileReaderIterator.get().value(buffer) : null); - } catch (RocksDBException e) { - throw new RocksDatabaseException("Failed to open SST file: " + path, e); - } - } +abstract class ManagedSstFileIterator extends AbstractSstFileIterator { - @Override - public synchronized void close() { - if (!closed) { - this.fileReaderIterator.close(); - this.fileReader.close(); - keyBuffer.release(); - valueBuffer.release(); - } - closed = true; - } - - @Override - public synchronized boolean hasNext() { - return fileReaderIterator.get().isValid(); - } - - abstract T getIteratorValue(CodecBuffer key, CodecBuffer value); - - @Override - public synchronized T next() { - T value = getIteratorValue(this.type.readKey() ? keyBuffer.getFromDb() : null, - this.type.readValue() ? valueBuffer.getFromDb() : null); - fileReaderIterator.get().next(); - return value; + ManagedSstFileIterator(String path, ManagedOptions options, ManagedReadOptions readOptions, IteratorType type) + throws RocksDatabaseException { + super(path, options, type, sstFileReader -> managed(sstFileReader.newIterator(readOptions))); } } + diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/SstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/SstFileSetReader.java index b4c39ccc9c27..265b6520cead 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/SstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/SstFileSetReader.java @@ -27,14 +27,14 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; -import java.util.function.Function; +import java.util.Optional; import java.util.stream.Collectors; import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator.KeyValue; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; +import org.apache.hadoop.hdds.utils.db.managed.ManagedTypeUtil; import org.apache.hadoop.ozone.util.ClosableIterator; import org.rocksdb.RocksDBException; @@ -133,66 +133,42 @@ public ClosableIterator getKeyStreamWithTombstone(String lowerBound, Str final MultipleSstFileIterator itr = new MultipleSstFileIterator(sstFiles) { //TODO: [SNAPSHOT] Check if default Options is enough. private ManagedOptions options; - private ManagedSlice lowerBoundSlice; - private ManagedSlice upperBoundSlice; + private Optional lowerBoundSlice; + private Optional upperBoundSlice; @Override protected void init() throws CodecException { this.options = new ManagedOptions(); if (Objects.nonNull(lowerBound)) { - this.lowerBoundSlice = new ManagedSlice( - StringCodec.get().toPersistedFormat(lowerBound)); + this.lowerBoundSlice = Optional.of(new ManagedSlice( + ManagedTypeUtil.getInternalKey(StringCodec.get().toPersistedFormat(lowerBound), options))); + } else { + this.lowerBoundSlice = Optional.empty(); } if (Objects.nonNull(upperBound)) { - this.upperBoundSlice = new ManagedSlice( - StringCodec.get().toPersistedFormat(upperBound)); + this.upperBoundSlice = Optional.of(new ManagedSlice( + ManagedTypeUtil.getInternalKey(StringCodec.get().toPersistedFormat(upperBound), options))); + } else { + this.upperBoundSlice = Optional.empty(); } } @Override - protected ClosableIterator getKeyIteratorForFile(String file) { - return new ManagedRawSstFileIterator(file, options, lowerBoundSlice, upperBoundSlice, - keyValue -> StringCodec.get().fromCodecBuffer(keyValue.getKey()), KEY_ONLY); + protected ClosableIterator getKeyIteratorForFile(String file) throws RocksDatabaseException { + return new ManagedRawSstFileIterator<>(file, options, lowerBoundSlice, upperBoundSlice, KEY_ONLY, + keyValue -> StringCodec.get().fromCodecBuffer(keyValue.getKey())); } @Override public void close() throws UncheckedIOException { super.close(); options.close(); - IOUtils.closeQuietly(lowerBoundSlice, upperBoundSlice); + IOUtils.closeQuietly(lowerBoundSlice.orElse(null), upperBoundSlice.orElse(null)); } }; return itr; } - private static class ManagedRawSstFileIterator implements ClosableIterator { - private final ManagedRawSSTFileReader fileReader; - private final ManagedRawSSTFileIterator fileReaderIterator; - private static final int READ_AHEAD_SIZE = 2 * 1024 * 1024; - - ManagedRawSstFileIterator(String path, ManagedOptions options, ManagedSlice lowerBound, ManagedSlice upperBound, - Function keyValueFunction, IteratorType type) { - this.fileReader = new ManagedRawSSTFileReader(options, path, READ_AHEAD_SIZE); - this.fileReaderIterator = fileReader.newIterator(keyValueFunction, lowerBound, upperBound, type); - } - - @Override - public void close() { - this.fileReaderIterator.close(); - this.fileReader.close(); - } - - @Override - public boolean hasNext() { - return fileReaderIterator.hasNext(); - } - - @Override - public String next() { - return fileReaderIterator.next(); - } - } - /** * The MultipleSstFileIterator class is an abstract base for iterating over multiple SST files. * It uses a PriorityQueue to merge keys from all files in sorted order. diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index 956a0caac7c7..aada93e43b02 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -23,8 +23,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT; @@ -72,11 +70,9 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.CompactionLogEntryProto; import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.Scheduler; import org.apache.hadoop.hdds.utils.db.CodecBuffer; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; +import org.apache.hadoop.hdds.utils.db.ManagedRawSstFileIterator; import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; @@ -93,6 +89,7 @@ import org.rocksdb.AbstractEventListener; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.CompactionJobInfo; +import org.rocksdb.EntryType; import org.rocksdb.LiveFileMetaData; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; @@ -175,7 +172,6 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, private volatile boolean closed; private final long maxAllowedTimeInDag; private final BootstrapStateHandler.Lock lock; - private static final int SST_READ_AHEAD_SIZE = 2 * 1024 * 1024; private int pruneSSTFileBatchSize; private SSTFilePruningMetrics sstFilePruningMetrics; private ColumnFamilyHandle snapshotInfoTableCFHandle; @@ -247,16 +243,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE, OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT); this.sstFilePruningMetrics = SSTFilePruningMetrics.create(activeDBLocationName); - try { - if (configuration.getBoolean(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT) - && ManagedRawSSTFileReader.loadLibrary()) { - this.pruneQueue = new ConcurrentLinkedQueue<>(); - } - } catch (NativeLibraryNotLoadedException e) { - LOG.warn("Native Library for raw sst file reading loading failed." + - " Cannot prune OMKeyInfo from SST files. {}", e.getMessage()); - } - + this.pruneQueue = new ConcurrentLinkedQueue<>(); if (pruneCompactionDagDaemonRunIntervalInMs > 0) { this.scheduler = new Scheduler(DAG_PRUNING_SERVICE_NAME, true, 1); @@ -1363,14 +1350,15 @@ public void pruneSstFileValues() { } private void removeValueFromSSTFile(ManagedOptions options, String sstFilePath, File prunedFile) throws IOException { - try (ManagedRawSSTFileReader sstFileReader = new ManagedRawSSTFileReader(options, sstFilePath, SST_READ_AHEAD_SIZE); - ManagedRawSSTFileIterator> itr = sstFileReader.newIterator( - keyValue -> Pair.of(keyValue.getKey(), keyValue.getType()), null, null, KEY_ONLY); + try (ManagedRawSstFileIterator> itr = + new ManagedRawSstFileIterator<>(sstFilePath, options, + Optional.empty(), Optional.empty(), KEY_ONLY, + kv -> Pair.of(kv.getKey(), kv.getType())); RDBSstFileWriter sstFileWriter = new RDBSstFileWriter(prunedFile); CodecBuffer emptyCodecBuffer = CodecBuffer.getEmptyBuffer()) { while (itr.hasNext()) { - Pair keyValue = itr.next(); - if (keyValue.getValue() == 0) { + Pair keyValue = itr.next(); + if (Objects.requireNonNull(keyValue.getValue()) == EntryType.kEntryDelete) { sstFileWriter.delete(keyValue.getKey()); } else { sstFileWriter.put(keyValue.getKey(), emptyCodecBuffer); diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSSTFileIterator.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSstFileIterator.java similarity index 84% rename from hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSSTFileIterator.java rename to hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSstFileIterator.java index fee69e6ba187..a41e55717774 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSSTFileIterator.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSstFileIterator.java @@ -17,7 +17,6 @@ package org.apache.hadoop.hdds.utils.db; -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -38,15 +37,13 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; -import org.apache.hadoop.hdds.utils.TestUtils; +import org.apache.hadoop.hdds.utils.db.ManagedRawSstFileIterator.KeyValue; import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; -import org.junit.jupiter.api.BeforeAll; +import org.apache.hadoop.hdds.utils.db.managed.ManagedTypeUtil; import org.junit.jupiter.api.Named; -import org.junit.jupiter.api.condition.EnabledIfSystemProperty; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -55,8 +52,7 @@ /** * Test for ManagedRawSSTFileReaderIterator. */ -@EnabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches = "true") -class TestManagedRawSSTFileIterator { +class TestManagedRawSstFileIterator { @TempDir private Path tempDir; @@ -104,11 +100,6 @@ private static Stream keyValueFormatArgs() { .flatMap(i -> Arrays.stream(IteratorType.values()).map(type -> Arguments.of(i.get()[0], i.get()[1], type))); } - @BeforeAll - public static void init() throws NativeLibraryNotLoadedException { - ManagedRawSSTFileReader.loadLibrary(); - } - @ParameterizedTest @MethodSource("keyValueFormatArgs") public void testSSTDumpIteratorWithKeyFormat(String keyFormat, String valueFormat, IteratorType type) @@ -118,9 +109,7 @@ public void testSSTDumpIteratorWithKeyFormat(String keyFormat, String valueForma i -> i % 2 == 0 ? "" : String.format(valueFormat, i), (v1, v2) -> v2, TreeMap::new)); File file = createSSTFileWithKeys(keys); - try (ManagedOptions options = new ManagedOptions(); - ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader( - options, file.getAbsolutePath(), 2 * 1024 * 1024)) { + try (ManagedOptions options = new ManagedOptions()) { List> testBounds = TestUtils.getTestingBounds(keys.keySet().stream() .collect(Collectors.toMap(Pair::getKey, Pair::getValue, (v1, v2) -> v1, TreeMap::new))); for (Optional keyStart : testBounds) { @@ -130,13 +119,16 @@ public void testSSTDumpIteratorWithKeyFormat(String keyFormat, String valueForma .filter(e -> keyEnd.map(s -> e.getKey().getKey().compareTo(s) < 0).orElse(true)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (v1, v2) -> v1, TreeMap::new)); - Optional lowerBound = keyStart.map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); - Optional upperBound = keyEnd.map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); - try (ManagedRawSSTFileIterator iterator = - reader.newIterator(Function.identity(), lowerBound.orElse(null), upperBound.orElse(null), type)) { + Optional lowerBound = keyStart.map(s -> + new ManagedSlice(ManagedTypeUtil.getInternalKey(StringUtils.string2Bytes(s), options))); + Optional upperBound = keyEnd.map(s -> + new ManagedSlice(ManagedTypeUtil.getInternalKey(StringUtils.string2Bytes(s), options))); + try (ManagedRawSstFileIterator iterator = + new ManagedRawSstFileIterator<>(file.getAbsolutePath(), options, lowerBound, + upperBound, type, Function.identity())) { Iterator, String>> expectedKeyItr = expectedKeys.entrySet().iterator(); while (iterator.hasNext()) { - ManagedRawSSTFileIterator.KeyValue r = iterator.next(); + KeyValue r = iterator.next(); assertTrue(expectedKeyItr.hasNext()); Map.Entry, String> expectedKey = expectedKeyItr.next(); String key = r.getKey() == null ? null : StringCodec.get().fromCodecBuffer(r.getKey()); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java index 4476bcb808d2..5afee78c4b46 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java @@ -17,7 +17,6 @@ package org.apache.hadoop.hdds.utils.db; -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY; import static org.junit.jupiter.api.Assertions.assertEquals; import com.google.common.collect.ImmutableList; @@ -25,13 +24,16 @@ import java.nio.file.Path; import java.util.LinkedList; import java.util.List; +import java.util.Optional; import java.util.Queue; +import java.util.function.Function; import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.db.ManagedRawSstFileIterator.KeyValue; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.EnabledIfSystemProperty; import org.junit.jupiter.api.io.TempDir; +import org.rocksdb.EntryType; /** * Test for RDBSstFileWriter. @@ -41,10 +43,8 @@ public class TestRDBSstFileWriter { @TempDir private Path path; - @EnabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches = "true") @Test public void testSstFileTombstoneCreationWithCodecBufferReuse() throws IOException { - ManagedRawSSTFileReader.tryLoadLibrary(); Path sstPath = path.resolve("test.sst").toAbsolutePath(); try (CodecBuffer codecBuffer = CodecBuffer.allocateDirect(1024); RDBSstFileWriter sstFileWriter = new RDBSstFileWriter(sstPath.toFile()); @@ -75,15 +75,14 @@ public void testSstFileTombstoneCreationWithCodecBufferReuse() throws IOExceptio } Assertions.assertTrue(sstPath.toFile().exists()); try (ManagedOptions options = new ManagedOptions(); - ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader(options, sstPath.toString(), 1024); - ManagedRawSSTFileIterator itr = - reader.newIterator(kv -> kv, null, null, IteratorType.KEY_AND_VALUE)) { + ManagedRawSstFileIterator itr = new ManagedRawSstFileIterator<>(sstPath.toString(), options, + Optional.empty(), Optional.empty(), IteratorType.KEY_AND_VALUE, Function.identity())) { int idx = 0; List keys = ImmutableList.of("key1", "key1_rename"); while (itr.hasNext()) { - ManagedRawSSTFileIterator.KeyValue kv = itr.next(); - assertEquals(idx, kv.getType()); + KeyValue kv = itr.next(); + assertEquals(idx, kv.getType() == EntryType.kEntryDelete ? 0 : 1); assertEquals(keys.get(idx), keys.get(idx++)); assertEquals(0, kv.getValue().readableBytes()); } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestSstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestSstFileSetReader.java index fd4bcbb6d90d..e03517ef2598 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestSstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestSstFileSetReader.java @@ -17,7 +17,6 @@ package org.apache.hadoop.hdds.utils.db; -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -38,12 +37,10 @@ import java.util.stream.IntStream; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.TestUtils; import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; import org.apache.hadoop.ozone.util.ClosableIterator; -import org.junit.jupiter.api.condition.EnabledIfSystemProperty; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -189,12 +186,10 @@ public void testGetKeyStream(int numberOfFiles) * This test is only enabled when the native RocksDB tools library is available. * Unlike testGetKeyStream, this method returns ALL keys within bounds, including tombstones. */ - @EnabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches = "true") @ParameterizedTest @ValueSource(ints = {0, 1, 2, 3, 7, 10}) public void testGetKeyStreamWithTombstone(int numberOfFiles) throws RocksDBException, CodecException { - assumeTrue(ManagedRawSSTFileReader.tryLoadLibrary()); Pair, List> data = createDummyData(numberOfFiles); List files = data.getRight(); diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestUtils.java similarity index 97% rename from hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java rename to hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestUtils.java index 0e0d8306759a..a1746295ce5c 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestUtils.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.utils; +package org.apache.hadoop.hdds.utils.db; import static org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString; import static org.apache.hadoop.hdds.StringUtils.getLexicographicallyLowerString; diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java index 2fde23bb376e..d080bd864c96 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java @@ -22,8 +22,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT; @@ -63,7 +61,6 @@ import java.util.stream.Stream; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; @@ -76,8 +73,6 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.MockedStatic; -import org.mockito.Mockito; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -163,31 +158,22 @@ public void init() throws RocksDBException { OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE, OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT)) .thenReturn(2000); + ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + Function dummyLock = (readLock) -> { + if (readLock) { + readWriteLock.readLock().lock(); + return (UncheckedAutoCloseable) () -> readWriteLock.readLock().unlock(); + } else { + readWriteLock.writeLock().lock(); + return (UncheckedAutoCloseable) () -> readWriteLock.writeLock().unlock(); + } + }; + rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(METADATA_DIR_NAME, + SST_BACK_UP_DIR_NAME, + COMPACTION_LOG_DIR_NAME, + ACTIVE_DB_DIR_NAME, + config, dummyLock); - when(config.getBoolean( - OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, - OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)).thenReturn(true); - - try (MockedStatic mockedRawSSTReader = - Mockito.mockStatic(ManagedRawSSTFileReader.class)) { - mockedRawSSTReader.when(ManagedRawSSTFileReader::loadLibrary) - .thenReturn(true); - ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); - Function dummyLock = (readLock) -> { - if (readLock) { - readWriteLock.readLock().lock(); - return (UncheckedAutoCloseable) () -> readWriteLock.readLock().unlock(); - } else { - readWriteLock.writeLock().lock(); - return (UncheckedAutoCloseable) () -> readWriteLock.writeLock().unlock(); - } - }; - rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(METADATA_DIR_NAME, - SST_BACK_UP_DIR_NAME, - COMPACTION_LOG_DIR_NAME, - ACTIVE_DB_DIR_NAME, - config, dummyLock); - } ManagedColumnFamilyOptions cfOpts = new ManagedColumnFamilyOptions(); cfOpts.optimizeUniversalStyleCompaction(); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index 9c1fb6b0a060..dda4fa9cbcf4 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -24,8 +24,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT; @@ -40,7 +38,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; @@ -93,8 +90,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.CodecBuffer; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; +import org.apache.hadoop.hdds.utils.db.ManagedRawSstFileIterator; import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; @@ -125,10 +121,10 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.mockito.MockedConstruction; -import org.mockito.MockedStatic; import org.mockito.Mockito; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.EntryType; import org.rocksdb.LiveFileMetaData; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; @@ -354,28 +350,21 @@ public void init() throws RocksDBException { OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE, OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT)).thenReturn(2000); - when(config.getBoolean( - OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, - OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)).thenReturn(true); - - try (MockedStatic mockedRawSSTReader = Mockito.mockStatic(ManagedRawSSTFileReader.class)) { - mockedRawSSTReader.when(ManagedRawSSTFileReader::loadLibrary).thenReturn(true); - ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); - Function lockFunction = (readLock) -> { - if (readLock) { - readWriteLock.readLock().lock(); - return () -> readWriteLock.readLock().unlock(); - } else { - readWriteLock.writeLock().lock(); - return () -> readWriteLock.writeLock().unlock(); - } - }; - rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(METADATA_DIR_NAME, - SST_BACK_UP_DIR_NAME, - COMPACTION_LOG_DIR_NAME, - ACTIVE_DB_DIR_NAME, - config, lockFunction); - } + ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + Function lockFunction = (readLock) -> { + if (readLock) { + readWriteLock.readLock().lock(); + return () -> readWriteLock.readLock().unlock(); + } else { + readWriteLock.writeLock().lock(); + return () -> readWriteLock.writeLock().unlock(); + } + }; + rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(METADATA_DIR_NAME, + SST_BACK_UP_DIR_NAME, + COMPACTION_LOG_DIR_NAME, + ACTIVE_DB_DIR_NAME, + config, lockFunction); ManagedColumnFamilyOptions cfOpts = new ManagedColumnFamilyOptions(); cfOpts.optimizeUniversalStyleCompaction(); @@ -1644,17 +1633,15 @@ public void testPruneSSTFileValues() throws Exception { // Run the SST file pruner. try (CodecBuffer keyCodecBuffer = CodecBuffer.allocateDirect(1024); - MockedConstruction mockedRawSSTReader = Mockito.mockConstruction( - ManagedRawSSTFileReader.class, (mock, context) -> { - ManagedRawSSTFileIterator mockedRawSSTFileItr = mock(ManagedRawSSTFileIterator.class); - Iterator> keyItr = keys.stream().map(i -> { + MockedConstruction mockedRawSSTReader = Mockito.mockConstruction( + ManagedRawSstFileIterator.class, (mock, context) -> { + Iterator> keyItr = keys.stream().map(i -> { keyCodecBuffer.clear(); keyCodecBuffer.put(ByteBuffer.wrap(i.getKey().getBytes(UTF_8))); - return Pair.of(keyCodecBuffer, i.getValue()); + return Pair.of(keyCodecBuffer, i.getValue() == 0 ? EntryType.kEntryDelete : EntryType.kEntryPut); }).iterator(); - doAnswer(i -> keyItr.hasNext()).when(mockedRawSSTFileItr).hasNext(); - doAnswer(i -> keyItr.next()).when(mockedRawSSTFileItr).next(); - when(mock.newIterator(any(), any(), any(), any())).thenReturn(mockedRawSSTFileItr); + doAnswer(i -> keyItr.hasNext()).when(mock).hasNext(); + doAnswer(i -> keyItr.next()).when(mock).next(); doNothing().when(mock).close(); })) { rocksDBCheckpointDiffer.pruneSstFileValues(); diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index f3ef154e65c4..16fb16c71720 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -52,11 +52,6 @@ hdds-docs runtime - - org.apache.ozone - hdds-rocks-native - runtime - org.apache.ozone hdds-server-scm diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index 10f312c8c91a..4eb4cd7920e7 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -504,7 +504,7 @@ WTFPL -------------------------------------------------------------------------------- -hdds-server-scm, ozone-manager, ozone-s3gateway, hdds-rocks-native and hdds-server-framework +hdds-server-scm, ozone-manager, ozone-s3gateway and hdds-server-framework contains the source of the following javascript/css components (See licenses/ for text of these licenses): Apache Software Foundation License 2.0 diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index f8b3b6ff6dc8..86ba2a016edd 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -75,7 +75,6 @@ share/ozone/lib/hdds-erasurecode.jar share/ozone/lib/hdds-interface-admin.jar share/ozone/lib/hdds-interface-client.jar share/ozone/lib/hdds-interface-server.jar -share/ozone/lib/hdds-rocks-native.jar share/ozone/lib/hdds-managed-rocksdb.jar share/ozone/lib/hdds-server-framework.jar share/ozone/lib/hdds-server-scm.jar diff --git a/hadoop-ozone/integration-test/ozone_rocksdb_tools3729431883314919784/libozone_rocksdb_tools.dylib b/hadoop-ozone/integration-test/ozone_rocksdb_tools3729431883314919784/libozone_rocksdb_tools.dylib new file mode 100644 index 000000000000..2faa9833a80d Binary files /dev/null and b/hadoop-ozone/integration-test/ozone_rocksdb_tools3729431883314919784/libozone_rocksdb_tools.dylib differ diff --git a/hadoop-ozone/integration-test/ozone_rocksdb_tools3729431883314919784/librocksdbjni-osx-arm64.jnilib b/hadoop-ozone/integration-test/ozone_rocksdb_tools3729431883314919784/librocksdbjni-osx-arm64.jnilib new file mode 100644 index 000000000000..5b3bcfd7f009 Binary files /dev/null and b/hadoop-ozone/integration-test/ozone_rocksdb_tools3729431883314919784/librocksdbjni-osx-arm64.jnilib differ diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index df9da45b3b6d..660f860559a2 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -336,11 +336,6 @@ hdds-managed-rocksdb test - - org.apache.ozone - hdds-rocks-native - test - org.apache.ozone hdds-server-framework diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 5f14451fad32..94eb8ee321f5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -69,6 +69,7 @@ import jakarta.annotation.Nonnull; import java.io.File; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; @@ -79,6 +80,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -104,10 +106,10 @@ import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.hadoop.hdds.utils.db.DBProfile; import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; +import org.apache.hadoop.hdds.utils.db.ManagedRawSstFileIterator; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils; @@ -218,10 +220,6 @@ public TestOmSnapshot(BucketLayout newBucketLayout, this.counter = new AtomicInteger(); this.createLinkedBucket = createLinkedBucket; init(); - - if (!disableNativeDiff) { - assumeTrue(ManagedRawSSTFileReader.tryLoadLibrary()); - } } private void init() throws Exception { @@ -2523,13 +2521,15 @@ public void testSnapshotCompactionDag() throws Exception { java.nio.file.Path file = sstBackUpDir.resolve(f.getFileName() + ".sst"); if (COLUMN_FAMILIES_TO_TRACK_IN_DAG.contains(f.getColumnFamily()) && java.nio.file.Files.exists(file)) { assertTrue(f.isPruned()); - try (ManagedRawSSTFileReader sstFileReader = new ManagedRawSSTFileReader( - managedOptions, file.toFile().getAbsolutePath(), 2 * 1024 * 1024); - ManagedRawSSTFileIterator itr = sstFileReader.newIterator( - ManagedRawSSTFileIterator.KeyValue::getValue, null, null, KEY_AND_VALUE)) { + try (ManagedRawSstFileIterator itr = + new ManagedRawSstFileIterator<>(file.toFile().getAbsolutePath(), managedOptions, + Optional.empty(), Optional.empty(), + KEY_AND_VALUE, ManagedRawSstFileIterator.KeyValue::getValue)) { while (itr.hasNext()) { assertEquals(0, itr.next().readableBytes()); } + } catch (RocksDatabaseException e) { + throw new UncheckedIOException(e); } } else { assertFalse(f.isPruned()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java index 5fb86f5b162d..157090c9915d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java @@ -17,15 +17,11 @@ package org.apache.hadoop.ozone.om.snapshot; -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; -import org.junit.jupiter.api.condition.EnabledIfSystemProperty; - /** * Test OmSnapshot for FSO bucket type when native lib is enabled. */ -@EnabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches = "true") class TestOmSnapshotFsoWithNativeLib extends TestOmSnapshot { TestOmSnapshotFsoWithNativeLib() throws Exception { super(FILE_SYSTEM_OPTIMIZED, false, false, false, false); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLibWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLibWithLinkedBuckets.java index bda8d79c5ca8..9de66172757e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLibWithLinkedBuckets.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLibWithLinkedBuckets.java @@ -17,15 +17,11 @@ package org.apache.hadoop.ozone.om.snapshot; -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; -import org.junit.jupiter.api.condition.EnabledIfSystemProperty; - /** * Test OmSnapshot for FSO bucket type when native lib is enabled. */ -@EnabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches = "true") class TestOmSnapshotFsoWithNativeLibWithLinkedBuckets extends TestOmSnapshot { TestOmSnapshotFsoWithNativeLibWithLinkedBuckets() throws Exception { super(FILE_SYSTEM_OPTIMIZED, false, false, false, true); diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 923b1c02cbeb..8d4879a211e6 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -153,10 +153,6 @@ org.apache.ozone hdds-managed-rocksdb - - org.apache.ozone - hdds-rocks-native - org.apache.ozone hdds-server-framework diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 2147fc3ec180..40555b7b4131 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -23,8 +23,6 @@ import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.DELETE; import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.MODIFY; import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.RENAME; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS; @@ -95,9 +93,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.db.CodecRegistry; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.SstFileSetReader; import org.apache.hadoop.hdds.utils.db.Table; @@ -183,9 +179,7 @@ public class SnapshotDiffManager implements AutoCloseable { private final boolean snapshotForceFullDiff; - private final boolean diffDisableNativeLibs; - - private final boolean isNativeLibsLoaded; + private final boolean diffDisableEfficientDiff; private final BiFunction generateSnapDiffJobKey = @@ -215,7 +209,7 @@ public SnapshotDiffManager(ManagedRocksDB db, OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT); - this.diffDisableNativeLibs = ozoneManager.getConfiguration().getBoolean( + this.diffDisableEfficientDiff = ozoneManager.getConfiguration().getBoolean( OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS, OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS_DEFAULT); @@ -259,8 +253,6 @@ public SnapshotDiffManager(ManagedRocksDB db, createEmptySnapDiffDir(path); this.sstBackupDirForSnapDiffJobs = path.toString(); - this.isNativeLibsLoaded = initNativeLibraryForEfficientDiff(ozoneManager.getConfiguration()); - // Ideally, loadJobsOnStartUp should run only on OM node, since SnapDiff // is not HA currently and running this on all the nodes would be // inefficient. Especially, when OM node restarts and loses its leadership. @@ -282,19 +274,6 @@ public PersistentMap getSnapDiffJobTable() { return snapDiffJobTable; } - private boolean initNativeLibraryForEfficientDiff(final OzoneConfiguration conf) { - if (conf.getBoolean(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) { - try { - return ManagedRawSSTFileReader.loadLibrary(); - } catch (NativeLibraryNotLoadedException e) { - LOG.warn("Native Library for raw sst file reading loading failed." + - " Fallback to performing a full diff instead. {}", e.getMessage()); - return false; - } - } - return false; - } - /** * Creates an empty dir. If directory exists, it deletes that and then * creates new one otherwise just create a new dir. @@ -788,7 +767,7 @@ void generateSnapshotDiffReport(final String jobKey, UncheckedAutoCloseableSupplier rcToSnapshot = null; boolean useFullDiff = snapshotForceFullDiff || forceFullDiff; - boolean performNonNativeDiff = diffDisableNativeLibs || disableNativeDiff || !isNativeLibsLoaded; + boolean performNonNativeDiff = diffDisableEfficientDiff || disableNativeDiff; Consumer activityReporter = (jobStatus) -> recordActivity(jobKey, jobStatus); try (DeltaFileComputer deltaFileComputer = new CompositeDeltaDiffComputer(ozoneManager.getOmSnapshotManager(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java index 87e0704d10a7..0dd6713d98be 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java @@ -57,7 +57,6 @@ import org.apache.hadoop.hdds.utils.db.CodecException; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint; @@ -164,7 +163,7 @@ public SnapshotDefragService(long interval, TimeUnit unit, long serviceTimeout, this.deltaDiffComputer = new CompositeDeltaDiffComputer(omSnapshotManager, ozoneManager.getMetadataManager(), differTmpDir, (status) -> { LOG.debug("Snapshot defragmentation diff status: {}", status); - }, false, !isRocksToolsNativeLibAvailable()); + }, false, false); this.lockIds = new ArrayList<>(1); } @@ -188,18 +187,6 @@ boolean isRunning() { return running.get(); } - /** - * Checks if rocks-tools native library is available. - */ - private boolean isRocksToolsNativeLibAvailable() { - try { - return ManagedRawSSTFileReader.tryLoadLibrary(); - } catch (Exception e) { - LOG.warn("Failed to check native code availability", e); - return false; - } - } - /** * Determines whether the specified snapshot requires defragmentation and returns * a pair indicating the need for defragmentation and the corresponding version of the snapshot. @@ -653,13 +640,6 @@ public synchronized boolean triggerSnapshotDefragOnce() throws IOException { LOG.debug("Initiating Snapshot Defragmentation Task: run # {}", count); } - // Check if rocks-tools native lib is available - if (!isRocksToolsNativeLibAvailable()) { - LOG.warn("Rocks-tools native library is not available. " + - "Stopping SnapshotDefragService."); - return false; - } - Optional snapshotManager = Optional.ofNullable(ozoneManager) .map(OzoneManager::getOmSnapshotManager); if (!snapshotManager.isPresent()) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 5a82c3b1591e..ba700682156f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -18,8 +18,6 @@ package org.apache.hadoop.ozone.om.snapshot; import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.DEFAULT_COLUMN_FAMILY_NAME; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT; @@ -309,9 +307,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { .getInt(OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE, OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT)) .thenReturn(OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT); - when(configuration.getBoolean(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, - OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) - .thenReturn(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT); for (int i = 0; i < jobStatuses.size(); i++) { when(snapshotInfoTable.get(getTableKey(VOLUME_NAME, BUCKET_NAME, diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 1eb9c0605e60..1e337c141cf0 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -118,10 +118,6 @@ org.apache.ozone hdds-managed-rocksdb - - org.apache.ozone - hdds-rocks-native - org.apache.ozone hdds-server-framework diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CheckNative.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CheckNative.java index 3309faf5cdee..1b83bd77f1c8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CheckNative.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CheckNative.java @@ -17,9 +17,6 @@ package org.apache.hadoop.ozone.debug; -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; - -import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.Callable; @@ -27,8 +24,6 @@ import org.apache.hadoop.crypto.OpensslCipher; import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.cli.DebugSubcommand; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils; import org.apache.hadoop.io.erasurecode.ErasureCodeNative; import org.apache.hadoop.util.NativeCodeLoader; import org.kohsuke.MetaInfServices; @@ -55,15 +50,6 @@ public Void call() throws Exception { OpensslCipher::getLibraryName )); - // Ozone - ManagedRocksObjectUtils.loadRocksDBLibrary(); - NativeLibraryLoader.getInstance().loadLibrary( - ROCKS_TOOLS_NATIVE_LIBRARY_NAME, - Collections.singletonList(ManagedRocksObjectUtils.getRocksDBLibFileName())); - results.put("rocks-tools", checkLibrary( - NativeLibraryLoader.isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME), - NativeLibraryLoader::getJniLibraryFileName)); - final int maxLength = results.keySet().stream() .mapToInt(String::length) .max() diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestCheckNative.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestCheckNative.java index f66ec1a04964..f52d54a22702 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestCheckNative.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestCheckNative.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.debug; -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY; import static org.assertj.core.api.Assertions.assertThat; import org.apache.hadoop.hdds.utils.IOUtils; @@ -25,8 +24,6 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.DisabledIfSystemProperty; -import org.junit.jupiter.api.condition.EnabledIfSystemProperty; /** * Tests for {@link CheckNative}. @@ -40,21 +37,13 @@ void init() { out = GenericTestUtils.captureOut(); } - @DisabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches = "true") @Test void testCheckNativeNotLoaded() { executeCheckNative(); - assertOutput(false); + assertOutput(); } - @EnabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches = "true") - @Test - void testCheckNativeRocksToolsLoaded() { - executeCheckNative(); - assertOutput(true); - } - - private void assertOutput(boolean expectedRocksNative) { + private void assertOutput() { // trims multiple spaces String stdOut = out.get() .replaceAll(" +", " "); @@ -62,8 +51,7 @@ private void assertOutput(boolean expectedRocksNative) { .contains("Native library checking:") .contains("hadoop: false") .contains("ISA-L: false") - .contains("OpenSSL: false") - .contains("rocks-tools: " + expectedRocksNative); + .contains("OpenSSL: false"); } @AfterEach diff --git a/pom.xml b/pom.xml index 03cc1b2d9e27..cc40bae6a337 100644 --- a/pom.xml +++ b/pom.xml @@ -206,7 +206,7 @@ 0.10.2 1.2.26 2.6.1 - 7.7.3 + 10.10.0 3.1.0 bash 2.0.17 @@ -1075,17 +1075,6 @@ hdds-managed-rocksdb ${hdds.version} - - org.apache.ozone - hdds-rocks-native - ${hdds.rocks.native.version} - - - org.apache.ozone - hdds-rocks-native - ${hdds.rocks.native.version} - test-jar - org.apache.ozone hdds-server-framework @@ -1961,6 +1950,7 @@ org.rocksdb.Statistics org.rocksdb.RocksDB.* + org.rocksdb.EntryType ${project.build.directory}/generated-sources/java