Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ on:
default: ''
required: false
env:
BUILD_ARGS: "-Pdist -Psrc -Dmaven.javadoc.skip=true -Drocks_tools_native"
BUILD_ARGS: "-Pdist -Psrc -Dmaven.javadoc.skip=true"
# Minimum required Java version for running Ozone is defined in pom.xml (javac.version).
TEST_JAVA_VERSION: 21 # JDK version used by CI build and tests; should match the JDK version in apache/ozone-runner image
# MAVEN_ARGS and MAVEN_OPTS are duplicated in check.yml, please keep in sync
Expand Down Expand Up @@ -290,7 +290,7 @@ jobs:
pre-script: sudo hostname localhost
ratis-args: ${{ inputs.ratis_args }}
script: integration
script-args: -Ptest-${{ matrix.profile }} -Drocks_tools_native
script-args: -Ptest-${{ matrix.profile }}
sha: ${{ needs.build-info.outputs.sha }}
split: ${{ matrix.profile }}
timeout-minutes: 90
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/intermittent-test-check.yml
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ jobs:
java-version: ${{ github.event.inputs.java-version }}
- name: Build (most) of Ozone
run: |
args="-DskipRecon -DskipShade -Dmaven.javadoc.skip=true -Drocks_tools_native"
args="-DskipRecon -DskipShade -Dmaven.javadoc.skip=true"
if [[ "$RATIS_VERSION" != "" ]]; then
args="$args -Dratis.version=${{ needs.ratis.outputs.ratis-version }}"
args="$args -Dratis.thirdparty.version=${{ needs.ratis.outputs.thirdparty-version }}"
Expand Down Expand Up @@ -201,7 +201,7 @@ jobs:
export OZONE_REPO_CACHED=true
fi

args="-DexcludedGroups=slow|unhealthy -DskipShade -Drocks_tools_native"
args="-DexcludedGroups=slow|unhealthy -DskipShade"
if [[ "$RATIS_VERSION" != "" ]]; then
args="$args -Dratis.version=${{ needs.ratis.outputs.ratis-version }}"
args="$args -Dratis.thirdparty.version=${{ needs.ratis.outputs.thirdparty-version }}"
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/populate-cache.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ jobs:

- name: Fetch dependencies
if: steps.restore-cache.outputs.cache-hit != 'true'
run: mvn --batch-mode --no-transfer-progress --show-version -Pgo-offline -Pdist -Drocks_tools_native clean verify
run: mvn --batch-mode --no-transfer-progress --show-version -Pgo-offline -Pdist clean verify

- name: Delete Ozone jars from repo
if: steps.restore-cache.outputs.cache-hit != 'true'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -655,8 +655,10 @@ public final class OzoneConfigKeys {
public static final int
OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT = 2000;

@Deprecated
public static final String OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB =
"ozone.om.snapshot.load.native.lib";
@Deprecated
public static final boolean OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT = true;

public static final String OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT =
Expand Down
2 changes: 0 additions & 2 deletions hadoop-hdds/docs/content/start/FromSource.md
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,6 @@ mvn clean package -DskipTests=true -Pdist
* Use `-DskipShade` to skip shaded Ozone FS jar file creation. Saves time, but you can't test integration with other software that uses Ozone as a Hadoop-compatible file system.
* Use `-DskipRecon` to skip building Recon Web UI. It saves about 2 minutes.
* Use `-Dmaven.javadoc.skip=true` to skip building javadocs.
* Use `-Drocks_tools_native` to build the RocksDB native code for the Ozone Snapshot feature. This is optional and not required for building Ozone. It is only needed if you want to build the RocksDB native code for Ozone.


## How to run Ozone from build

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import static org.rocksdb.RocksDB.listColumnFamilies;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import java.io.Closeable;
import java.io.File;
import java.nio.ByteBuffer;
Expand Down Expand Up @@ -56,7 +57,6 @@
import org.apache.hadoop.hdds.utils.db.managed.ManagedTransactionLogIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch;
import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions;
import org.apache.ozone.rocksdiff.RocksDiffUtils;
import org.apache.ratis.util.MemoizedSupplier;
import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.ColumnFamilyDescriptor;
Expand Down Expand Up @@ -883,36 +883,36 @@ private int getLastLevel() throws RocksDatabaseException {
*/
public void deleteFilesNotMatchingPrefix(TablePrefixInfo prefixInfo) throws RocksDatabaseException {
try (UncheckedAutoCloseable ignored = acquire()) {
Map<String, String[]> sstFileRangeMap = new HashMap<>();
for (LiveFileMetaData liveFileMetaData : getSstFileList()) {
String sstFileColumnFamily = StringUtils.bytes2String(liveFileMetaData.columnFamilyName());
int lastLevel = getLastLevel();

// RocksDB #deleteFile API allows only to delete the last level of
// SST Files. Any level < last level won't get deleted and
// only last file of level 0 can be deleted
// and will throw warning in the rocksdb manifest.
// Instead, perform the level check here
// itself to avoid failed delete attempts for lower level files.
if (liveFileMetaData.level() != lastLevel || lastLevel == 0) {
continue;
}

String prefixForColumnFamily = prefixInfo.getTablePrefix(sstFileColumnFamily);
String firstDbKey = StringUtils.bytes2String(liveFileMetaData.smallestKey());
String lastDbKey = StringUtils.bytes2String(liveFileMetaData.largestKey());
boolean isKeyWithPrefixPresent = RocksDiffUtils.isKeyWithPrefixPresent(
prefixForColumnFamily, firstDbKey, lastDbKey);
if (!isKeyWithPrefixPresent) {
LOG.info("Deleting sst file: {} with start key: {} and end key: {} "
+ "corresponding to column family {} from db: {}. "
+ "Prefix for the column family: {}.",
liveFileMetaData.fileName(),
firstDbKey, lastDbKey,
StringUtils.bytes2String(liveFileMetaData.columnFamilyName()),
db.get().getName(),
prefixForColumnFamily);
db.deleteFile(liveFileMetaData);
sstFileRangeMap.compute(sstFileColumnFamily, (key, value) -> {
if (value == null) {
return new String[]{firstDbKey, lastDbKey};
}
value[0] = firstDbKey.compareTo(value[0]) < 0 ? firstDbKey : value[0];
value[1] = lastDbKey.compareTo(value[1]) > 0 ? lastDbKey : value[1];
return value;
});
}
for (String tableName : prefixInfo.getTableNames()) {
String prefixForColumnFamily = prefixInfo.getTablePrefix(tableName);
ColumnFamilyHandle ch = getColumnFamilyHandle(tableName);
if (ch == null || prefixForColumnFamily == null || prefixForColumnFamily.isEmpty()) {
continue;
}
String smallestDBKey = sstFileRangeMap.get(tableName)[0];
String largestDBKey = sstFileRangeMap.get(tableName)[1];
String nextLargestDBKey = StringUtils.getLexicographicallyHigherString(prefixForColumnFamily);
LOG.info("Deleting sst files in range [{}, {}) and [{}, {}) corresponding to column family {} from db: {}. " +
"Prefix for the column family: {}.",
smallestDBKey, prefixForColumnFamily, nextLargestDBKey, largestDBKey,
tableName, db.get().getName(), prefixForColumnFamily);
db.deleteFile(ch, ImmutableList.of(StringUtils.string2Bytes(smallestDBKey),
StringUtils.string2Bytes(prefixForColumnFamily), StringUtils.string2Bytes(nextLargestDBKey),
StringUtils.string2Bytes(largestDBKey)));
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,13 +100,13 @@ Answer<Integer> newAnswer(String name, byte... b) {
public void testForEachRemaining() throws Exception {
when(rocksIteratorMock.isValid())
.thenReturn(true, true, true, true, true, true, true, false);
when(rocksIteratorMock.key(any()))
when(rocksIteratorMock.key(any(ByteBuffer.class)))
.then(newAnswerInt("key1", 0x00))
.then(newAnswerInt("key2", 0x00))
.then(newAnswerInt("key3", 0x01))
.then(newAnswerInt("key4", 0x02))
.thenThrow(new NoSuchElementException());
when(rocksIteratorMock.value(any()))
when(rocksIteratorMock.value(any(ByteBuffer.class)))
.then(newAnswerInt("val1", 0x7f))
.then(newAnswerInt("val2", 0x7f))
.then(newAnswerInt("val3", 0x7e))
Expand Down Expand Up @@ -152,8 +152,8 @@ public void testNextCallsIsValidThenGetsTheValueAndStepsToNext()
}

verifier.verify(rocksIteratorMock).isValid();
verifier.verify(rocksIteratorMock).key(any());
verifier.verify(rocksIteratorMock).value(any());
verifier.verify(rocksIteratorMock).key(any(ByteBuffer.class));
verifier.verify(rocksIteratorMock).value(any(ByteBuffer.class));
verifier.verify(rocksIteratorMock).next();

CodecTestUtil.gc();
Expand Down Expand Up @@ -192,9 +192,9 @@ public void testSeekToLastSeeks() throws Exception {
@Test
public void testSeekReturnsTheActualKey() throws Exception {
when(rocksIteratorMock.isValid()).thenReturn(true);
when(rocksIteratorMock.key(any()))
when(rocksIteratorMock.key(any(ByteBuffer.class)))
.then(newAnswerInt("key1", 0x00));
when(rocksIteratorMock.value(any()))
when(rocksIteratorMock.value(any(ByteBuffer.class)))
.then(newAnswerInt("val1", 0x7f));

try (RDBStoreCodecBufferIterator i = newIterator();
Expand All @@ -208,8 +208,8 @@ public void testSeekReturnsTheActualKey() throws Exception {
verifier.verify(rocksIteratorMock, times(1))
.seek(any(ByteBuffer.class));
verifier.verify(rocksIteratorMock, times(1)).isValid();
verifier.verify(rocksIteratorMock, times(1)).key(any());
verifier.verify(rocksIteratorMock, times(1)).value(any());
verifier.verify(rocksIteratorMock, times(1)).key(any(ByteBuffer.class));
verifier.verify(rocksIteratorMock, times(1)).value(any(ByteBuffer.class));
assertArrayEquals(new byte[]{0x00}, val.getKey().getArray());
assertArrayEquals(new byte[]{0x7f}, val.getValue().getArray());
}
Expand All @@ -220,7 +220,7 @@ public void testSeekReturnsTheActualKey() throws Exception {
@Test
public void testGettingTheKeyIfIteratorIsValid() throws Exception {
when(rocksIteratorMock.isValid()).thenReturn(true);
when(rocksIteratorMock.key(any()))
when(rocksIteratorMock.key(any(ByteBuffer.class)))
.then(newAnswerInt("key1", 0x00));

byte[] key = null;
Expand All @@ -233,7 +233,7 @@ public void testGettingTheKeyIfIteratorIsValid() throws Exception {
InOrder verifier = inOrder(rocksIteratorMock);

verifier.verify(rocksIteratorMock, times(1)).isValid();
verifier.verify(rocksIteratorMock, times(1)).key(any());
verifier.verify(rocksIteratorMock, times(1)).key(any(ByteBuffer.class));
assertArrayEquals(new byte[]{0x00}, key);

CodecTestUtil.gc();
Expand All @@ -242,9 +242,9 @@ public void testGettingTheKeyIfIteratorIsValid() throws Exception {
@Test
public void testGettingTheValueIfIteratorIsValid() throws Exception {
when(rocksIteratorMock.isValid()).thenReturn(true);
when(rocksIteratorMock.key(any()))
when(rocksIteratorMock.key(any(ByteBuffer.class)))
.then(newAnswerInt("key1", 0x00));
when(rocksIteratorMock.value(any()))
when(rocksIteratorMock.value(any(ByteBuffer.class)))
.then(newAnswerInt("val1", 0x7f));

byte[] key = null;
Expand All @@ -260,7 +260,7 @@ public void testGettingTheValueIfIteratorIsValid() throws Exception {
InOrder verifier = inOrder(rocksIteratorMock);

verifier.verify(rocksIteratorMock, times(1)).isValid();
verifier.verify(rocksIteratorMock, times(1)).key(any());
verifier.verify(rocksIteratorMock, times(1)).key(any(ByteBuffer.class));
assertArrayEquals(new byte[]{0x00}, key);
assertArrayEquals(new byte[]{0x7f}, value);

Expand All @@ -272,7 +272,7 @@ public void testRemovingFromDBActuallyDeletesFromTable() throws Exception {
final byte[] testKey = new byte[10];
ThreadLocalRandom.current().nextBytes(testKey);
when(rocksIteratorMock.isValid()).thenReturn(true);
when(rocksIteratorMock.key(any()))
when(rocksIteratorMock.key(any(ByteBuffer.class)))
.then(newAnswer("key1", testKey));

try (RDBStoreCodecBufferIterator i = newIterator(null)) {
Expand Down Expand Up @@ -320,7 +320,7 @@ public void testNullPrefixedIterator() throws Exception {
when(rocksIteratorMock.isValid()).thenReturn(true);
assertTrue(i.hasNext());
verify(rocksIteratorMock, times(1)).isValid();
verify(rocksIteratorMock, times(0)).key(any());
verify(rocksIteratorMock, times(0)).key(any(ByteBuffer.class));

i.seekToLast();
verify(rocksIteratorMock, times(1)).seekToLast();
Expand All @@ -343,11 +343,11 @@ public void testNormalPrefixedIterator() throws Exception {
clearInvocations(rocksIteratorMock);

when(rocksIteratorMock.isValid()).thenReturn(true);
when(rocksIteratorMock.key(any()))
when(rocksIteratorMock.key(any(ByteBuffer.class)))
.then(newAnswer("key1", prefixBytes));
assertTrue(i.hasNext());
verify(rocksIteratorMock, times(1)).isValid();
verify(rocksIteratorMock, times(1)).key(any());
verify(rocksIteratorMock, times(1)).key(any(ByteBuffer.class));

Exception e =
assertThrows(Exception.class, () -> i.seekToLast(), "Prefixed iterator does not support seekToLast");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,26 +24,32 @@
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.DBOptions;
import org.rocksdb.Logger;
import org.rocksdb.LoggerInterface;

/**
* Managed DBOptions.
*/
public class ManagedDBOptions extends DBOptions {

private final UncheckedAutoCloseable leakTracker = track(this);
private final AtomicReference<Logger> loggerRef = new AtomicReference<>();
private final AtomicReference<LoggerInterface> loggerRef = new AtomicReference<>();

@Override
public DBOptions setLogger(Logger logger) {
IOUtils.close(LOG, loggerRef.getAndSet(logger));
public DBOptions setLogger(LoggerInterface logger) {
LoggerInterface oldLogger = loggerRef.getAndSet(logger);
if (oldLogger instanceof AutoCloseable) {
IOUtils.close(LOG, ((AutoCloseable)oldLogger));
}
return super.setLogger(logger);
}

@Override
public void close() {
try {
IOUtils.close(LOG, loggerRef.getAndSet(null));
LoggerInterface oldLogger = loggerRef.getAndSet(null);
if (oldLogger instanceof AutoCloseable) {
IOUtils.close(LOG, ((AutoCloseable)oldLogger));
}
super.close();
} finally {
leakTracker.close();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hdds.utils.db.managed;

import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track;

import org.apache.ratis.util.UncheckedAutoCloseable;
import org.rocksdb.ParsedEntryInfo;

/**
* ManagedParsedEntry is a subclass of ParsedEntryInfo that ensures proper
* tracking and closure of resources to prevent resource leakage. This class
* leverages an internal leak tracker to monitor lifecycle events and ensures
* that native resources are released correctly when the object is closed.
*
* It overrides the {@code close} method to integrate the cleanup logic for
* resource tracking, delegating the resource closure to its parent class, and
* subsequently ensuring the associated leak tracker is closed as well.
*/
public class ManagedParsedEntry extends ParsedEntryInfo {
private final UncheckedAutoCloseable leakTracker = track(this);

@Override
public void close() {
try {
super.close();
} finally {
leakTracker.close();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,11 @@

package org.apache.hadoop.hdds.utils.db.managed;

import java.io.File;
import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.commons.io.FilenameUtils;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
Expand Down Expand Up @@ -115,19 +114,20 @@ public static ManagedRocksDB openWithLatestOptions(
* Delete liveMetaDataFile from rocks db using RocksDB#deleteFile Api.
* This function makes the RocksDB#deleteFile Api synchronized by waiting
* for the deletes to happen.
* @param fileToBeDeleted File to be deleted.
* @param columnFamilyHandle column family handle.
* @param ranges list of ranges to be deleted.
* @throws RocksDatabaseException if the underlying db throws an exception
* or the file is not deleted within a time limit.
*/
public void deleteFile(LiveFileMetaData fileToBeDeleted) throws RocksDatabaseException {
String sstFileName = fileToBeDeleted.fileName();
File file = new File(fileToBeDeleted.path(), fileToBeDeleted.fileName());
public void deleteFile(ColumnFamilyHandle columnFamilyHandle, List<byte[]> ranges) throws RocksDatabaseException {
String columnFamilyName = null;
try {
get().deleteFile(sstFileName);
columnFamilyName = StringUtils.bytes2String(columnFamilyHandle.getName());
get().deleteFilesInRanges(columnFamilyHandle, ranges, false);
} catch (RocksDBException e) {
throw new RocksDatabaseException("Failed to delete " + file, e);
throw new RocksDatabaseException("Failed to delete files in ranges corresponding to columnFamily: "
+ columnFamilyName, e);
}
ManagedRocksObjectUtils.waitForFileDelete(file, Duration.ofSeconds(60));
}

public static Map<String, LiveFileMetaData> getLiveMetadataForSSTFiles(RocksDB db) {
Expand Down
Loading