diff --git a/ft/tests/CMakeLists.txt b/ft/tests/CMakeLists.txt
index 270ec9766..73930dda6 100644
--- a/ft/tests/CMakeLists.txt
+++ b/ft/tests/CMakeLists.txt
@@ -123,22 +123,4 @@ if(BUILD_TESTING OR BUILD_FT_TESTS)
get_filename_component(test_basename "${test}" NAME)
add_ft_test_aux(test-${test_basename} test-upgrade-recovery-logs ${test})
endforeach(test)
-
- ## give some tests, that time out normally, 1 hour to complete
- set(long_tests
- ft/ftloader-test-extractor-3a
- ft/log-test7
- ft/recovery-bad-last-entry
- ft/subblock-test-compression
- ft/upgrade_test_simple
- )
- set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600)
- ## some take even longer, with valgrind
- set(extra_long_tests
- ft/benchmark-test
- ft/benchmark-test_256
- ft/is_empty
- ft/subblock-test-checksum
- )
- set_tests_properties(${extra_long_tests} PROPERTIES TIMEOUT 7200)
endif(BUILD_TESTING OR BUILD_FT_TESTS)
diff --git a/locktree/lock_request.cc b/locktree/lock_request.cc
index 19ec146a3..64e53821b 100644
--- a/locktree/lock_request.cc
+++ b/locktree/lock_request.cc
@@ -93,10 +93,6 @@ void lock_request::destroy(void) {
toku_cond_destroy(&m_wait_cond);
}
-void lock_request::clearmem(char c) {
- memset(this, c, sizeof(* this));
-}
-
// set the lock request parameters. this API allows a lock request to be reused.
void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, lock_request::type lock_type, bool big_txn, void *extra) {
invariant(m_state != state::PENDING);
@@ -321,16 +317,20 @@ int lock_request::retry(void) {
m_txnid, m_left_key, m_right_key, &conflicts, m_big_txn);
}
- // if the acquisition succeeded then remove ourselves from the
- // set of lock requests, complete, and signal the waiting thread.
- if (r == 0) {
+ // if the acquisition succeeded or if out of locks
+ // then remove ourselves from the set of lock requests, complete
+ // the lock request, and signal the waiting threads.
+ if (r == 0 || r == TOKUDB_OUT_OF_LOCKS) {
remove_from_lock_requests();
complete(r);
if (m_retry_test_callback)
m_retry_test_callback(); // test callback
toku_cond_broadcast(&m_wait_cond);
- } else {
+ } else if (r == DB_LOCK_NOTGRANTED) {
+ // get the conflicting txnid and remain pending
m_conflicting_txnid = conflicts.get(0);
+ } else {
+ invariant(0);
}
conflicts.destroy();
diff --git a/locktree/lock_request.h b/locktree/lock_request.h
index 36c3fd260..76ac953ba 100644
--- a/locktree/lock_request.h
+++ b/locktree/lock_request.h
@@ -89,7 +89,6 @@ class lock_request {
// effect: Destroys a lock request.
void destroy(void);
- void clearmem(char c);
// effect: Resets the lock request parameters, allowing it to be reused.
// requires: Lock request was already created at some point
diff --git a/locktree/tests/lock_request_create_set.cc b/locktree/tests/lock_request_create_set.cc
index 8ae685b98..cb0d9aa7d 100644
--- a/locktree/tests/lock_request_create_set.cc
+++ b/locktree/tests/lock_request_create_set.cc
@@ -41,7 +41,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
namespace toku {
// create and set the object's internals, destroy should not crash.
-void lock_request_unit_test::test_create_destroy(void) {
+void lock_request_unit_test::run(void) {
lock_request request;
request.create();
@@ -66,7 +66,7 @@ void lock_request_unit_test::test_create_destroy(void) {
int main(void) {
toku::lock_request_unit_test test;
- test.test_create_destroy();
+ test.run();
return 0;
}
diff --git a/locktree/tests/lock_request_get_set_keys.cc b/locktree/tests/lock_request_get_set_keys.cc
index fd57b70f5..56b785dfe 100644
--- a/locktree/tests/lock_request_get_set_keys.cc
+++ b/locktree/tests/lock_request_get_set_keys.cc
@@ -43,7 +43,7 @@ namespace toku {
// make setting keys and getting them back works properly.
// at a high level, we want to make sure keys are copied
// when appropriate and plays nice with +/- infinity.
-void lock_request_unit_test::test_get_set_keys(void) {
+void lock_request_unit_test::run(void) {
lock_request request;
request.create();
@@ -82,7 +82,7 @@ void lock_request_unit_test::test_get_set_keys(void) {
int main(void) {
toku::lock_request_unit_test test;
- test.test_get_set_keys();
+ test.run();
return 0;
}
diff --git a/locktree/tests/lock_request_killed.cc b/locktree/tests/lock_request_killed.cc
index ec4644442..404550620 100644
--- a/locktree/tests/lock_request_killed.cc
+++ b/locktree/tests/lock_request_killed.cc
@@ -63,7 +63,7 @@ static int my_killed_callback(void) {
}
// make sure deadlocks are detected when a lock request starts
-void lock_request_unit_test::test_wait_time_callback(void) {
+void lock_request_unit_test::run(void) {
int r;
locktree lt;
@@ -118,7 +118,7 @@ void lock_request_unit_test::test_wait_time_callback(void) {
int main(void) {
toku::lock_request_unit_test test;
- test.test_wait_time_callback();
+ test.run();
return 0;
}
diff --git a/locktree/tests/lock_request_not_killed.cc b/locktree/tests/lock_request_not_killed.cc
index 647b4d3c4..9ec9a57fe 100644
--- a/locktree/tests/lock_request_not_killed.cc
+++ b/locktree/tests/lock_request_not_killed.cc
@@ -58,7 +58,7 @@ static int my_killed_callback(void) {
}
// make sure deadlocks are detected when a lock request starts
-void lock_request_unit_test::test_wait_time_callback(void) {
+void lock_request_unit_test::run(void) {
int r;
locktree lt;
@@ -112,7 +112,7 @@ void lock_request_unit_test::test_wait_time_callback(void) {
int main(void) {
toku::lock_request_unit_test test;
- test.test_wait_time_callback();
+ test.run();
return 0;
}
diff --git a/locktree/tests/lock_request_retry_out_of_locks.cc b/locktree/tests/lock_request_retry_out_of_locks.cc
new file mode 100644
index 000000000..d98417858
--- /dev/null
+++ b/locktree/tests/lock_request_retry_out_of_locks.cc
@@ -0,0 +1,117 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+
+// Verify that lock request retry returns TOKUDB_OUT_OF_LOCKS when
+// all of the locktree memory is used.
+
+#include "lock_request_unit_test.h"
+
+namespace toku {
+
+ static void locktree_release_lock(locktree *lt,
+ TXNID txn_id,
+ const DBT *left,
+ const DBT *right) {
+ range_buffer buffer;
+ buffer.create();
+ buffer.append(left, right);
+ lt->release_locks(txn_id, &buffer);
+ buffer.destroy();
+ }
+
+ void lock_request_unit_test::run(void) {
+ int r;
+
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, nullptr, nullptr);
+
+ DICTIONARY_ID dict_id = {1};
+ locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+
+ // set max lock memory small so that we can test the limit
+ // with just 2 locks
+ mgr.set_max_lock_memory(300);
+
+ // create a small key
+ DBT small_dbt;
+ int64_t small_key = 1;
+ toku_fill_dbt(&small_dbt, &small_key, sizeof small_key);
+ small_dbt.flags = DB_DBT_USERMEM;
+ const DBT *small_ptr = &small_dbt;
+
+ // create a large key
+ DBT large_dbt;
+ union { int64_t n; char c[64]; } large_key;
+ memset(&large_key, 0, sizeof large_key);
+ large_key.n = 2;
+ toku_fill_dbt(&large_dbt, &large_key, sizeof large_key);
+ large_dbt.flags = DB_DBT_USERMEM;
+ const DBT *large_dbt_ptr = &large_dbt;
+
+ TXNID txn_a = { 1 };
+ TXNID txn_b = { 2 };
+
+ // a locks small key
+ lock_request a;
+ a.create();
+ a.set(lt, txn_a, small_ptr, small_ptr, lock_request::type::WRITE, false);
+ r = a.start();
+ assert(r == 0);
+ assert(a.m_state == lock_request::state::COMPLETE);
+
+ // b tries to lock small key, fails since it is already locked
+ lock_request b;
+ b.create();
+ b.set(lt, txn_b, small_ptr, small_ptr, lock_request::type::WRITE, false);
+ r = b.start();
+ assert(r == DB_LOCK_NOTGRANTED);
+ assert(b.m_state == lock_request::state::PENDING);
+
+ // a locks large key. lock memory is over the limit
+ lock_request c;
+ c.create();
+ c.set(lt, txn_a, large_dbt_ptr, large_dbt_ptr, lock_request::type::WRITE, false);
+ r = c.start();
+ assert(r == 0);
+ assert(c.m_state == lock_request::state::COMPLETE);
+
+ // a releases small key, lock memory is still over the limit
+ locktree_release_lock(lt, txn_a, small_ptr, small_ptr);
+
+ // retry all lock requests, should complete lock request
+ // b with a TOKUDB_OUT_OF_LOCKS result
+ lock_request::retry_all_lock_requests(lt);
+
+ assert(b.m_state == lock_request::state::COMPLETE);
+ assert(b.m_complete_r == TOKUDB_OUT_OF_LOCKS);
+
+ // b waits for small key, gets out of locks
+ r = b.wait(0);
+ assert(r == TOKUDB_OUT_OF_LOCKS);
+ assert(b.m_state == lock_request::state::COMPLETE);
+ assert(b.m_complete_r == TOKUDB_OUT_OF_LOCKS);
+
+ // a releases large key
+ locktree_release_lock(lt, txn_a, large_dbt_ptr, large_dbt_ptr);
+
+ // b locks small key, gets its
+ r = b.start();
+ assert(r == 0);
+
+ // b releases lock so we can exit cleanly
+ locktree_release_lock(lt, txn_b, small_ptr, small_ptr);
+
+ a.destroy();
+ b.destroy();
+
+ mgr.release_lt(lt);
+ mgr.destroy();
+ }
+
+} /* namespace toku */
+
+int main(void) {
+ toku::lock_request_unit_test test;
+ test.run();
+ return 0;
+}
diff --git a/locktree/tests/lock_request_start_deadlock.cc b/locktree/tests/lock_request_start_deadlock.cc
index 343becfc7..c13d5b085 100644
--- a/locktree/tests/lock_request_start_deadlock.cc
+++ b/locktree/tests/lock_request_start_deadlock.cc
@@ -41,7 +41,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
namespace toku {
// make sure deadlocks are detected when a lock request starts
-void lock_request_unit_test::test_start_deadlock(void) {
+void lock_request_unit_test::run(void) {
int r;
locktree lt;
@@ -114,7 +114,7 @@ void lock_request_unit_test::test_start_deadlock(void) {
int main(void) {
toku::lock_request_unit_test test;
- test.test_start_deadlock();
+ test.run();
return 0;
}
diff --git a/locktree/tests/lock_request_start_pending.cc b/locktree/tests/lock_request_start_pending.cc
index ce6510354..6c133b7a0 100644
--- a/locktree/tests/lock_request_start_pending.cc
+++ b/locktree/tests/lock_request_start_pending.cc
@@ -42,7 +42,7 @@ namespace toku {
// starting a lock request without immediate success should get
// stored in the lock request set as pending.
-void lock_request_unit_test::test_start_pending(void) {
+void lock_request_unit_test::run(void) {
int r;
locktree lt;
lock_request request;
@@ -100,7 +100,7 @@ void lock_request_unit_test::test_start_pending(void) {
int main(void) {
toku::lock_request_unit_test test;
- test.test_start_pending();
+ test.run();
return 0;
}
diff --git a/locktree/tests/lock_request_start_retry_race.cc b/locktree/tests/lock_request_start_retry_race.cc
index 83436a651..374bc8625 100644
--- a/locktree/tests/lock_request_start_retry_race.cc
+++ b/locktree/tests/lock_request_start_retry_race.cc
@@ -83,7 +83,6 @@ namespace toku {
}
request.destroy();
- request.clearmem(0xab);
toku_pthread_yield();
if ((i % 10) == 0)
diff --git a/locktree/tests/lock_request_start_retry_race_3.cc b/locktree/tests/lock_request_start_retry_race_3.cc
index 288cb0855..4c900765f 100644
--- a/locktree/tests/lock_request_start_retry_race_3.cc
+++ b/locktree/tests/lock_request_start_retry_race_3.cc
@@ -96,7 +96,6 @@ namespace toku {
}
request.destroy();
- request.clearmem(0xab);
toku_pthread_yield();
if ((i % 10) == 0)
diff --git a/locktree/tests/lock_request_start_retry_wait_race_2.cc b/locktree/tests/lock_request_start_retry_wait_race_2.cc
index cd3dc7b37..ce74dc3ab 100644
--- a/locktree/tests/lock_request_start_retry_wait_race_2.cc
+++ b/locktree/tests/lock_request_start_retry_wait_race_2.cc
@@ -98,7 +98,6 @@ namespace toku {
}
request.destroy();
- request.clearmem(0xab);
toku_pthread_yield();
if ((i % 10) == 0)
diff --git a/locktree/tests/lock_request_unit_test.h b/locktree/tests/lock_request_unit_test.h
index 81e6db25b..c5ad7aae4 100644
--- a/locktree/tests/lock_request_unit_test.h
+++ b/locktree/tests/lock_request_unit_test.h
@@ -47,23 +47,7 @@ namespace toku {
class lock_request_unit_test {
public:
- // create and set the object's internals, destroy should not crash.
- void test_create_destroy(void);
-
- // make setting keys and getting them back works properly.
- // at a high level, we want to make sure keys are copied
- // when appropriate and plays nice with +/- infinity.
- void test_get_set_keys(void);
-
- // starting a lock request without immediate success should get
- // stored in the lock request set as pending.
- void test_start_pending(void);
-
- // make sure deadlocks are detected when a lock request starts
- void test_start_deadlock(void);
-
- // test that the get_wait_time callback works
- void test_wait_time_callback(void);
+ void run(void);
private:
// releases a single range lock and retries all lock requests.
diff --git a/locktree/tests/lock_request_wait_out_of_locks.cc b/locktree/tests/lock_request_wait_out_of_locks.cc
new file mode 100644
index 000000000..1c3750065
--- /dev/null
+++ b/locktree/tests/lock_request_wait_out_of_locks.cc
@@ -0,0 +1,114 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+
+// Verify that lock request wait returns TOKUDB_OUT_OF_LOCKS when
+// all of the locktree memory is used.
+
+#include "lock_request_unit_test.h"
+
+namespace toku {
+
+ static void locktree_release_lock(locktree *lt,
+ TXNID txn_id,
+ const DBT *left,
+ const DBT *right) {
+ range_buffer buffer;
+ buffer.create();
+ buffer.append(left, right);
+ lt->release_locks(txn_id, &buffer);
+ buffer.destroy();
+ }
+
+ void lock_request_unit_test::run(void) {
+ int r;
+
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, nullptr, nullptr);
+
+ DICTIONARY_ID dict_id = {1};
+ locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+
+ // set max lock memory small so that we can test the limit
+ // with just 2 locks
+ mgr.set_max_lock_memory(300);
+
+ // create a small key
+ DBT small_dbt;
+ int64_t small_key = 1;
+ toku_fill_dbt(&small_dbt, &small_key, sizeof small_key);
+ small_dbt.flags = DB_DBT_USERMEM;
+ const DBT *small_dbt_ptr = &small_dbt;
+
+ // create a large key
+ DBT large_dbt;
+ union { int64_t n; char c[64]; } large_key;
+ memset(&large_key, 0, sizeof large_key);
+ large_key.n = 2;
+ toku_fill_dbt(&large_dbt, &large_key, sizeof large_key);
+ large_dbt.flags = DB_DBT_USERMEM;
+ const DBT *large_dbt_ptr = &large_dbt;
+
+ TXNID txn_a = { 1 };
+ TXNID txn_b = { 2 };
+
+ // a locks small key
+ lock_request a;
+ a.create();
+ a.set(lt, txn_a, small_dbt_ptr, small_dbt_ptr, lock_request::type::WRITE, false);
+ r = a.start();
+ assert(r == 0);
+ assert(a.m_state == lock_request::state::COMPLETE);
+
+ // b tries to lock small key, fails since small key already locked
+ lock_request b;
+ b.create();
+ b.set(lt, txn_b, small_dbt_ptr, small_dbt_ptr, lock_request::type::WRITE, false);
+ r = b.start();
+ assert(r == DB_LOCK_NOTGRANTED);
+ assert(b.m_state == lock_request::state::PENDING);
+
+ // a locks large key. this uses all of the lock memory
+ lock_request c;
+ c.create();
+ c.set(lt, txn_a, large_dbt_ptr, large_dbt_ptr, lock_request::type::WRITE, false);
+ r = c.start();
+ assert(r == 0);
+ assert(c.m_state == lock_request::state::COMPLETE);
+
+ // a releases small key. the lock memory is still over the limit
+ locktree_release_lock(lt, txn_a, small_dbt_ptr, small_dbt_ptr);
+
+ // b waits for small key, gets out of locks since lock memory is over the limit
+ assert(b.m_state == lock_request::state::PENDING);
+ r = b.wait(0);
+ assert(r == TOKUDB_OUT_OF_LOCKS);
+ assert(b.m_state == lock_request::state::COMPLETE);
+
+ // retry pending lock requests
+ lock_request::retry_all_lock_requests(lt);
+
+ // a releases large key
+ locktree_release_lock(lt, txn_a, large_dbt_ptr, large_dbt_ptr);
+
+ // b locks small key, gets it
+ assert(b.m_state == lock_request::state::COMPLETE);
+ r = b.start();
+ assert(r == 0);
+
+ // b releases small key so we can exit cleanly
+ locktree_release_lock(lt, txn_b, small_dbt_ptr, small_dbt_ptr);
+
+ a.destroy();
+ b.destroy();
+
+ mgr.release_lt(lt);
+ mgr.destroy();
+ }
+
+} /* namespace toku */
+
+int main(void) {
+ toku::lock_request_unit_test test;
+ test.run();
+ return 0;
+}
diff --git a/locktree/tests/lock_request_wait_time_callback.cc b/locktree/tests/lock_request_wait_time_callback.cc
index 1647cee1d..e1c9a1d95 100644
--- a/locktree/tests/lock_request_wait_time_callback.cc
+++ b/locktree/tests/lock_request_wait_time_callback.cc
@@ -43,7 +43,7 @@ namespace toku {
static const uint64_t my_lock_wait_time = 10 * 1000; // 10 sec
// make sure deadlocks are detected when a lock request starts
-void lock_request_unit_test::test_wait_time_callback(void) {
+void lock_request_unit_test::run(void) {
int r;
locktree lt;
@@ -90,7 +90,7 @@ void lock_request_unit_test::test_wait_time_callback(void) {
int main(void) {
toku::lock_request_unit_test test;
- test.test_wait_time_callback();
+ test.run();
return 0;
}
diff --git a/locktree/tests/test.h b/locktree/tests/test.h
index 921f2468b..8f051e708 100644
--- a/locktree/tests/test.h
+++ b/locktree/tests/test.h
@@ -93,8 +93,8 @@ namespace toku {
if (toku_dbt_is_infinite(key1) || toku_dbt_is_infinite(key2)) {
return toku_dbt_infinite_compare(key1, key2);
} else {
- invariant(key1->size == sizeof(int64_t));
- invariant(key2->size == sizeof(int64_t));
+ invariant(key1->size >= sizeof(int64_t));
+ invariant(key2->size >= sizeof(int64_t));
int64_t a = *(int64_t*) key1->data;
int64_t b = *(int64_t*) key2->data;
if (a < b) {
diff --git a/portability/tests/test-xid.cc b/portability/tests/test-xid.cc
index 9ee68906b..6143d3b58 100644
--- a/portability/tests/test-xid.cc
+++ b/portability/tests/test-xid.cc
@@ -54,7 +54,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
// since we implement the same thing here as in toku_os_gettid, this test
// is pretty pointless
-static int gettid(void) {
+static int test_gettid(void) {
#if defined(__NR_gettid)
return syscall(__NR_gettid);
#elif defined(SYS_gettid)
@@ -68,6 +68,6 @@ static int gettid(void) {
int main(void) {
assert(toku_os_getpid() == getpid());
- assert(toku_os_gettid() == gettid());
+ assert(toku_os_gettid() == test_gettid());
return 0;
}
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index c01a8f0d6..1a3d3e0d8 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -197,7 +197,6 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
endif ()
add_ydb_drd_test_aux(drd_tiny_${test} ${test} --num_seconds 5 --num_elements 150 --join_timeout 3000)
- set_tests_properties(ydb/drd_tiny_${test} PROPERTIES TIMEOUT 3600)
add_test(ydb/drd_mid_${test}/prepare ${test} --only_create --num_elements 10000)
setup_toku_test_properties(ydb/drd_mid_${test}/prepare drd_mid_${test})
@@ -205,7 +204,6 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
set_tests_properties(ydb/drd_mid_${test} PROPERTIES
DEPENDS ydb/drd_mid_${test}/prepare
REQUIRED_FILES "drd_mid_${test}.ctest-data"
- TIMEOUT 15000
)
add_test(ydb/drd_large_${test}/prepare ${test} --only_create --num_elements 150000)
@@ -214,7 +212,6 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
set_tests_properties(ydb/drd_large_${test} PROPERTIES
DEPENDS ydb/drd_large_${test}/prepare
REQUIRED_FILES "drd_large_${test}.ctest-data"
- TIMEOUT 30000
)
endif()
endforeach(src)
@@ -252,8 +249,7 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
setup_toku_test_properties(${testnamebase} "${envdirbase}")
set_tests_properties(${testnamebase} PROPERTIES
DEPENDS ${testnamebase}/copy
- REQUIRED_FILES "${envdir}"
- TIMEOUT 10800)
+ REQUIRED_FILES "${envdir}")
endforeach(size)
endif ()
endforeach(p_or_s)
@@ -433,61 +429,4 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
string(REGEX REPLACE ";" ";ydb/" tdb_tests_that_should_fail "${tdb_tests_that_should_fail}")
set_tests_properties(${tdb_tests_that_should_fail} PROPERTIES WILL_FAIL TRUE)
- ## give some tests, that time out normally, 1 hour to complete
- set(long_tests
- ydb/drd_test_groupcommit_count.tdb
- ydb/env-put-multiple.tdb
- ydb/filesize.tdb
- ydb/loader-cleanup-test0.tdb
- ydb/loader-cleanup-test0z.tdb
- ydb/manyfiles.tdb
- ydb/recover-loader-test.abortrecover
- ydb/recovery_fileops_stress.tdb
- ydb/root_fifo_1.tdb
- ydb/root_fifo_2.tdb
- ydb/root_fifo_31.tdb
- ydb/root_fifo_32.tdb
- ydb/shutdown-3344.tdb
- ydb/stat64-create-modify-times.tdb
- ydb/test1572.tdb
- ydb/test_abort4_19_0.tdb
- ydb/test_abort4_19_1.tdb
- ydb/test_abort5.tdb
- ydb/test_archive1.tdb
- ydb/test_logmax.tdb
- ydb/test_query.tdb
- ydb/test_txn_abort5.tdb
- ydb/test_txn_abort5a.tdb
- ydb/test_txn_abort6.tdb
- ydb/test_txn_nested2.tdb
- ydb/test_txn_nested4.tdb
- ydb/test_txn_nested5.tdb
- ydb/test_update_broadcast_stress.tdb
- )
- set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600)
- ## some take even longer, with valgrind
- set(extra_long_tests
- ydb/drd_test_4015.tdb
- ydb/hotindexer-with-queries.tdb
- ydb/hot-optimize-table-tests.tdb
- ydb/loader-cleanup-test2.tdb
- ydb/loader-cleanup-test2z.tdb
- ydb/loader-dup-test0.tdb
- ydb/loader-stress-del.nop.loader
- ydb/loader-stress-del.p.loader
- ydb/loader-stress-del.comp.loader
- ydb/test3039.tdb
- ydb/test_update_stress.tdb
- )
- set_tests_properties(${extra_long_tests} PROPERTIES TIMEOUT 7200)
- ## these really take a long time with valgrind
- set(phenomenally_long_tests
- ydb/checkpoint_stress.tdb
- ydb/loader-stress-test4.tdb
- ydb/loader-stress-test4z.tdb
- ydb/recover_stress.tdb
- ydb/test3529.tdb
- ydb/test_insert_unique.tdb
- )
- set_tests_properties(${phenomenally_long_tests} PROPERTIES TIMEOUT 14400)
endif(BUILD_TESTING OR BUILD_SRC_TESTS)
diff --git a/src/tests/put-wait-retry-out-of-locks.cc b/src/tests/put-wait-retry-out-of-locks.cc
new file mode 100644
index 000000000..24f3d1280
--- /dev/null
+++ b/src/tests/put-wait-retry-out-of-locks.cc
@@ -0,0 +1,164 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see .
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see .
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Verify that a DB put that is waiting on a previously locked key get TOKUDB_OUT_OF_LOCKS
+// result when another key is released AND the lock memory used is still over the limit.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static int put_small_key(DB *db, DB_TXN *txn) {
+ char k[8] = {};
+ DBT key = { .data = &k, .size = sizeof k};
+ DBT val = {};
+ int r = db->put(db, txn, &key, &val, 0);
+ return r;
+}
+
+static int put_large_key(DB *db, DB_TXN *txn) {
+ char k[200] = {};
+ DBT key = { .data = &k, .size = sizeof k};
+ DBT val = {};
+ int r = db->put(db, txn, &key, &val, 0);
+ return r;
+}
+
+struct test_c_args {
+ DB *db;
+ DB_TXN *txn;
+};
+
+static void *test_c(void *arg) {
+ struct test_c_args *a = (struct test_c_args *) arg;
+ int r = put_small_key(a->db, a->txn);
+ assert(r == TOKUDB_OUT_OF_LOCKS);
+ return arg;
+}
+
+int test_main(int argc, char * const argv[]) {
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup the test environment
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = nullptr;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+
+ // Set a small lock memory limit
+ const uint64_t lock_memory_wanted = 300;
+ r = db_env->set_lk_max_memory(db_env, lock_memory_wanted); assert(r == 0);
+ uint64_t lock_memory_limit;
+ r = db_env->get_lk_max_memory(db_env, &lock_memory_limit); assert(r == 0 && lock_memory_limit == lock_memory_wanted);
+
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = nullptr;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ r = db->open(db, nullptr, db_filename, nullptr, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // create the txn's
+ DB_TXN *txn_a = nullptr;
+ r = db_env->txn_begin(db_env, nullptr, &txn_a, 0); assert(r == 0);
+
+ DB_TXN *txn_b = nullptr;
+ r = db_env->txn_begin(db_env, nullptr, &txn_b, 0); assert(r == 0);
+
+ DB_TXN *txn_c = nullptr;
+ r = db_env->txn_begin(db_env, nullptr, &txn_c, 0); assert(r == 0);
+
+ // Put a small key into the DB.
+ // Before: lock memory used is 0.
+ // After: lock memory used is under the limit.
+ r = put_small_key(db, txn_a);
+ assert(r == 0);
+
+ // Create a thread that will attempt to lock the same key as txn_a.
+ // Effect: this thread will be blocking on the lock request for this
+ // key
+ toku_pthread_t tid_c;
+ test_c_args a = { db, txn_c };
+ r = toku_pthread_create(toku_uninstrumented, &tid_c, nullptr, test_c, &a);
+ assert(r == 0);
+
+ // give thread c some time to get blocked
+ sleep(1);
+
+ // Put a large key into the DB, which should succeed.
+ // Before: lock memory used is under the limit
+ // After: lock memory used is over the limit due to the addition of the large key
+ r = put_large_key(db, txn_b);
+ assert(r == 0);
+
+ // abort txn a, should release lock on the small key but lock memory
+ // is still over the limit, so test c put lock retry should get
+ // TOKUDB_OUT_OF_LOCKS
+ r = txn_a->abort(txn_a); assert(r == 0);
+
+ // cleanup
+ void *ret;
+ r = toku_pthread_join(tid_c, &ret); assert(r == 0);
+ r = txn_b->abort(txn_b); assert(r == 0);
+ r = txn_c->abort(txn_c); assert(r == 0);
+ r = db->close(db, 0); assert(r == 0); db = nullptr;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = nullptr;
+
+ return 0;
+}
diff --git a/src/tests/simple-put-out-of-locks.cc b/src/tests/simple-put-out-of-locks.cc
new file mode 100644
index 000000000..40165a0c8
--- /dev/null
+++ b/src/tests/simple-put-out-of-locks.cc
@@ -0,0 +1,125 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see .
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see .
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Verify that a DB put fails with TOKUDB_OUT_OF_LOCKS result when the lock memory
+// use is over the limit.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static int put_small_key(DB *db, DB_TXN *txn) {
+ char k[8] = {};
+ DBT key = { .data = &k, .size = sizeof k};
+ DBT val = {};
+ int r = db->put(db, txn, &key, &val, 0);
+ return r;
+}
+
+static int put_large_key(DB *db, DB_TXN *txn) {
+ char k[200] = {};
+ DBT key = { .data = &k, .size = sizeof k};
+ DBT val = {};
+ int r = db->put(db, txn, &key, &val, 0);
+ return r;
+}
+
+int test_main(int argc, char * const argv[]) {
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup the test
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ // create the env
+ DB_ENV *db_env = nullptr;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+
+ // Setup a small lock memory limit
+ const uint64_t lock_memory_wanted = 300;
+ r = db_env->set_lk_max_memory(db_env, lock_memory_wanted); assert(r == 0);
+ uint64_t lock_memory_limit;
+ r = db_env->get_lk_max_memory(db_env, &lock_memory_limit); assert(r == 0 && lock_memory_limit == lock_memory_wanted);
+
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = nullptr;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ r = db->open(db, nullptr, db_filename, nullptr, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ DB_TXN *txn_a = nullptr;
+ r = db_env->txn_begin(db_env, nullptr, &txn_a, 0); assert(r == 0);
+
+ // Put a large key into the DB.
+ // Before: lock memory use is 0.
+ // After: lock memory is over the limit
+ r = put_large_key(db, txn_a);
+ assert(r == 0);
+
+ // Try to put a small key into the DB.
+ // Should get TOKUDB_OUT_OF_LOCKS since lock memory is over the limit.
+ r = put_small_key(db, txn_a);
+ assert(r == TOKUDB_OUT_OF_LOCKS);
+
+ // cleanup
+ r = txn_a->abort(txn_a); assert(r == 0);
+ r = db->close(db, 0); assert(r == 0); db = nullptr;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = nullptr;
+
+ return 0;
+}
diff --git a/src/tests/test_memcmp_magic.cc b/src/tests/test_memcmp_magic.cc
index 8b56e716a..21e131bc0 100644
--- a/src/tests/test_memcmp_magic.cc
+++ b/src/tests/test_memcmp_magic.cc
@@ -103,7 +103,7 @@ static void test_memcmp_magic_sort_order(void) {
DB_ENV *env;
r = db_env_create(&env, 0); CKERR(r);
r = env->set_default_bt_compare(env, comparison_function_unused); CKERR(r);
- r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, S_IRUSR+S_IWUSR); CKERR(r);
const int magic = 49;
diff --git a/src/tests/xa-bigtxn-discard-abort.cc b/src/tests/xa-bigtxn-discard-abort.cc
index 9f1b904df..4163d9918 100644
--- a/src/tests/xa-bigtxn-discard-abort.cc
+++ b/src/tests/xa-bigtxn-discard-abort.cc
@@ -62,7 +62,7 @@ static void populate_foo(DB_ENV *env, DB_TXN *txn) {
DB *db = nullptr;
r = db_create(&db, env, 0);
CKERR(r);
- r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, 0);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU);
CKERR(r);
for (int i = 0; i < test_nrows; i++) {
@@ -81,7 +81,7 @@ static void check_foo(DB_ENV *env, DB_TXN *txn) {
DB *db;
r = db_create(&db, env, 0);
CKERR(r);
- r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, 0);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU);
CKERR(r);
DBC *c = nullptr;
diff --git a/src/tests/xa-bigtxn-discard-commit.cc b/src/tests/xa-bigtxn-discard-commit.cc
index ecbfa18bd..733df036e 100644
--- a/src/tests/xa-bigtxn-discard-commit.cc
+++ b/src/tests/xa-bigtxn-discard-commit.cc
@@ -59,7 +59,7 @@ static void populate_foo(DB_ENV *env, DB_TXN *txn) {
DB *db = nullptr;
r = db_create(&db, env, 0);
CKERR(r);
- r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, 0);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU);
CKERR(r);
for (int i = 0; i < test_nrows; i++) {
@@ -78,7 +78,7 @@ static void check_foo(DB_ENV *env, DB_TXN *txn) {
DB *db;
r = db_create(&db, env, 0);
CKERR(r);
- r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, 0);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU);
CKERR(r);
DBC *c = nullptr;
diff --git a/util/tests/CMakeLists.txt b/util/tests/CMakeLists.txt
index 8d53dd89a..780bf8d66 100644
--- a/util/tests/CMakeLists.txt
+++ b/util/tests/CMakeLists.txt
@@ -17,8 +17,4 @@ if(BUILD_TESTING)
add_test(util/${test} ${test})
endforeach(test)
- set(long_tests
- util/helgrind_test_partitioned_counter
- )
- set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600)
endif(BUILD_TESTING)