diff options
Diffstat (limited to 'quantum/wear_leveling')
-rw-r--r-- | quantum/wear_leveling/tests/backing_mocks.cpp | 154 | ||||
-rw-r--r-- | quantum/wear_leveling/tests/backing_mocks.hpp | 210 | ||||
-rw-r--r-- | quantum/wear_leveling/tests/rules.mk | 66 | ||||
-rw-r--r-- | quantum/wear_leveling/tests/testlist.mk | 6 | ||||
-rw-r--r-- | quantum/wear_leveling/tests/wear_leveling_2byte.cpp | 228 | ||||
-rw-r--r-- | quantum/wear_leveling/tests/wear_leveling_2byte_optimized_writes.cpp | 295 | ||||
-rw-r--r-- | quantum/wear_leveling/tests/wear_leveling_4byte.cpp | 193 | ||||
-rw-r--r-- | quantum/wear_leveling/tests/wear_leveling_8byte.cpp | 178 | ||||
-rw-r--r-- | quantum/wear_leveling/tests/wear_leveling_general.cpp | 204 | ||||
-rw-r--r-- | quantum/wear_leveling/wear_leveling.c | 768 | ||||
-rw-r--r-- | quantum/wear_leveling/wear_leveling.h | 54 | ||||
-rw-r--r-- | quantum/wear_leveling/wear_leveling_internal.h | 151 |
12 files changed, 2507 insertions, 0 deletions
diff --git a/quantum/wear_leveling/tests/backing_mocks.cpp b/quantum/wear_leveling/tests/backing_mocks.cpp new file mode 100644 index 0000000000..1dbb26f8e7 --- /dev/null +++ b/quantum/wear_leveling/tests/backing_mocks.cpp @@ -0,0 +1,154 @@ +// Copyright 2022 Nick Brassel (@tzarc) +// SPDX-License-Identifier: GPL-2.0-or-later +#include "gtest/gtest.h" +#include "gmock/gmock.h" +#include "backing_mocks.hpp" + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Backing Store Mock implementation +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +void MockBackingStore::reset_instance() { + for (auto&& e : backing_storage) + e.reset(); + + locked = true; + + backing_erasure_count = 0; + backing_max_write_count = 0; + backing_total_write_count = 0; + + backing_init_invoke_count = 0; + backing_unlock_invoke_count = 0; + backing_erase_invoke_count = 0; + backing_write_invoke_count = 0; + backing_lock_invoke_count = 0; + + init_success_callback = [](std::uint64_t) { return true; }; + erase_success_callback = [](std::uint64_t) { return true; }; + unlock_success_callback = [](std::uint64_t) { return true; }; + write_success_callback = [](std::uint64_t, std::uint32_t) { return true; }; + lock_success_callback = [](std::uint64_t) { return true; }; + + write_log.clear(); +} + +bool MockBackingStore::init(void) { + ++backing_init_invoke_count; + + if (init_success_callback) { + return init_success_callback(backing_init_invoke_count); + } + return true; +} + +bool MockBackingStore::unlock(void) { + ++backing_unlock_invoke_count; + + EXPECT_TRUE(is_locked()) << "Attempted to unlock but was not locked"; + locked = false; + + if (unlock_success_callback) { + return unlock_success_callback(backing_unlock_invoke_count); + } + return true; +} + +bool MockBackingStore::erase(void) { + ++backing_erase_invoke_count; + + // Erase each slot + for (std::size_t i = 0; i < backing_storage.size(); ++i) { + // Drop out of erase early with failure if we need to + if (erase_success_callback && !erase_success_callback(backing_erase_invoke_count)) { + append_log(true); + return false; + } + + backing_storage[i].erase(); + } + + // Keep track of the erase in the write log so that we can verify during tests + append_log(true); + + ++backing_erasure_count; + return true; +} + +bool MockBackingStore::write(uint32_t address, backing_store_int_t value) { + ++backing_write_invoke_count; + + // precondition: value's buffer size already matches BACKING_STORE_WRITE_SIZE + EXPECT_TRUE(address % BACKING_STORE_WRITE_SIZE == 0) << "Supplied address was not aligned with the backing store integral size"; + EXPECT_TRUE(address + BACKING_STORE_WRITE_SIZE <= WEAR_LEVELING_BACKING_SIZE) << "Address would result of out-of-bounds access"; + EXPECT_FALSE(is_locked()) << "Write was attempted without being unlocked first"; + + // Drop out of write early with failure if we need to + if (write_success_callback && !write_success_callback(backing_write_invoke_count, address)) { + return false; + } + + // Write the complement as we're simulating flash memory -- 0xFF means 0x00 + std::size_t index = address / BACKING_STORE_WRITE_SIZE; + backing_storage[index].set(~value); + + // Keep track of the write log so that we can verify during tests + append_log(address, value); + + // Keep track of the total number of writes into the backing store + ++backing_total_write_count; + + return true; +} + +bool MockBackingStore::lock(void) { + ++backing_lock_invoke_count; + + EXPECT_FALSE(is_locked()) << "Attempted to lock but was not unlocked"; + locked = true; + + if (lock_success_callback) { + return lock_success_callback(backing_lock_invoke_count); + } + return true; +} + +bool MockBackingStore::read(uint32_t address, backing_store_int_t& value) const { + // precondition: value's buffer size already matches BACKING_STORE_WRITE_SIZE + EXPECT_TRUE(address % BACKING_STORE_WRITE_SIZE == 0) << "Supplied address was not aligned with the backing store integral size"; + EXPECT_TRUE(address + BACKING_STORE_WRITE_SIZE <= WEAR_LEVELING_BACKING_SIZE) << "Address would result of out-of-bounds access"; + + // Read and take the complement as we're simulating flash memory -- 0xFF means 0x00 + std::size_t index = address / BACKING_STORE_WRITE_SIZE; + value = ~backing_storage[index].get(); + + return true; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Backing Implementation +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +extern "C" bool backing_store_init(void) { + return MockBackingStore::Instance().init(); +} + +extern "C" bool backing_store_unlock(void) { + return MockBackingStore::Instance().unlock(); +} + +extern "C" bool backing_store_erase(void) { + return MockBackingStore::Instance().erase(); +} + +extern "C" bool backing_store_write(uint32_t address, backing_store_int_t value) { + return MockBackingStore::Instance().write(address, value); +} + +extern "C" bool backing_store_lock(void) { + return MockBackingStore::Instance().lock(); +} + +extern "C" bool backing_store_read(uint32_t address, backing_store_int_t* value) { + return MockBackingStore::Instance().read(address, *value); +} diff --git a/quantum/wear_leveling/tests/backing_mocks.hpp b/quantum/wear_leveling/tests/backing_mocks.hpp new file mode 100644 index 0000000000..e7af7895f3 --- /dev/null +++ b/quantum/wear_leveling/tests/backing_mocks.hpp @@ -0,0 +1,210 @@ +// Copyright 2022 Nick Brassel (@tzarc) +// SPDX-License-Identifier: GPL-2.0-or-later +#pragma once +#include <algorithm> +#include <array> +#include <cstdint> +#include <cstdlib> +#include <functional> +#include <type_traits> +#include <vector> + +extern "C" { +#include "fnv.h" +#include "wear_leveling.h" +#include "wear_leveling_internal.h" +}; + +// Maximum number of mock write log entries to keep +using MOCK_WRITE_LOG_MAX_ENTRIES = std::integral_constant<std::size_t, 1024>; +// Complement to the backing store integral, for emulating flash erases of all bytes=0xFF +using BACKING_STORE_INTEGRAL_COMPLEMENT = std::integral_constant<backing_store_int_t, ((backing_store_int_t)(~(backing_store_int_t)0))>; +// Total number of elements stored in the backing arrays +using BACKING_STORE_ELEMENT_COUNT = std::integral_constant<std::size_t, (WEAR_LEVELING_BACKING_SIZE / sizeof(backing_store_int_t))>; + +class MockBackingStoreElement { + private: + backing_store_int_t value; + std::size_t writes; + std::size_t erases; + + public: + MockBackingStoreElement() : value(BACKING_STORE_INTEGRAL_COMPLEMENT::value), writes(0), erases(0) {} + void reset() { + erase(); + writes = 0; + erases = 0; + } + void erase() { + if (!is_erased()) { + ++erases; + } + value = BACKING_STORE_INTEGRAL_COMPLEMENT::value; + } + backing_store_int_t get() const { + return value; + } + void set(const backing_store_int_t& v) { + EXPECT_TRUE(is_erased()) << "Attempted write at index which isn't empty."; + value = v; + ++writes; + } + std::size_t num_writes() const { + return writes; + } + std::size_t num_erases() const { + return erases; + } + bool is_erased() const { + return value == BACKING_STORE_INTEGRAL_COMPLEMENT::value; + } +}; + +struct MockBackingStoreLogEntry { + MockBackingStoreLogEntry(uint32_t address, backing_store_int_t value) : address(address), value(value), erased(false) {} + MockBackingStoreLogEntry(bool erased) : address(0), value(0), erased(erased) {} + uint32_t address = 0; // The address of the operation + backing_store_int_t value = 0; // The value of the operation + bool erased = false; // Whether the entire backing store was erased +}; + +class MockBackingStore { + private: + MockBackingStore() { + reset_instance(); + } + + // Type containing each of the entries and the write counts + using storage_t = std::array<MockBackingStoreElement, BACKING_STORE_ELEMENT_COUNT::value>; + + // Whether the backing store is locked + bool locked; + // The actual data stored in the emulated flash + storage_t backing_storage; + // The number of erase cycles that have occurred + std::uint64_t backing_erasure_count; + // The max number of writes to an element of the backing store + std::uint64_t backing_max_write_count; + // The total number of writes to all elements of the backing store + std::uint64_t backing_total_write_count; + // The write log for the backing store + std::vector<MockBackingStoreLogEntry> write_log; + + // The number of times each API was invoked + std::uint64_t backing_init_invoke_count; + std::uint64_t backing_unlock_invoke_count; + std::uint64_t backing_erase_invoke_count; + std::uint64_t backing_write_invoke_count; + std::uint64_t backing_lock_invoke_count; + + // Whether init should succeed + std::function<bool(std::uint64_t)> init_success_callback; + // Whether erase should succeed + std::function<bool(std::uint64_t)> erase_success_callback; + // Whether unlocks should succeed + std::function<bool(std::uint64_t)> unlock_success_callback; + // Whether writes should succeed + std::function<bool(std::uint64_t, std::uint32_t)> write_success_callback; + // Whether locks should succeed + std::function<bool(std::uint64_t)> lock_success_callback; + + template <typename... Args> + void append_log(Args&&... args) { + if (write_log.size() < MOCK_WRITE_LOG_MAX_ENTRIES::value) { + write_log.emplace_back(std::forward<Args>(args)...); + } + } + + public: + static MockBackingStore& Instance() { + static MockBackingStore instance; + return instance; + } + + std::uint64_t erasure_count() const { + return backing_erasure_count; + } + std::uint64_t max_write_count() const { + return backing_max_write_count; + } + std::uint64_t total_write_count() const { + return backing_total_write_count; + } + + // The number of times each API was invoked + std::uint64_t init_invoke_count() const { + return backing_init_invoke_count; + } + std::uint64_t unlock_invoke_count() const { + return backing_unlock_invoke_count; + } + std::uint64_t erase_invoke_count() const { + return backing_erase_invoke_count; + } + std::uint64_t write_invoke_count() const { + return backing_write_invoke_count; + } + std::uint64_t lock_invoke_count() const { + return backing_lock_invoke_count; + } + + // Clear out the internal data for the next run + void reset_instance(); + + bool is_locked() const { + return locked; + } + + // APIs for the backing store + bool init(); + bool unlock(); + bool erase(); + bool write(std::uint32_t address, backing_store_int_t value); + bool lock(); + bool read(std::uint32_t address, backing_store_int_t& value) const; + + // Control over when init/writes/erases should succeed + void set_init_callback(std::function<bool(std::uint64_t)> callback) { + init_success_callback = callback; + } + void set_erase_callback(std::function<bool(std::uint64_t)> callback) { + erase_success_callback = callback; + } + void set_unlock_callback(std::function<bool(std::uint64_t)> callback) { + unlock_success_callback = callback; + } + void set_write_callback(std::function<bool(std::uint64_t, std::uint32_t)> callback) { + write_success_callback = callback; + } + void set_lock_callback(std::function<bool(std::uint64_t)> callback) { + lock_success_callback = callback; + } + + auto storage_begin() const -> decltype(backing_storage.begin()) { + return backing_storage.begin(); + } + auto storage_end() const -> decltype(backing_storage.end()) { + return backing_storage.end(); + } + + auto storage_begin() -> decltype(backing_storage.begin()) { + return backing_storage.begin(); + } + auto storage_end() -> decltype(backing_storage.end()) { + return backing_storage.end(); + } + + auto log_begin() -> decltype(write_log.begin()) { + return write_log.begin(); + } + auto log_end() -> decltype(write_log.end()) { + return write_log.end(); + } + + auto log_begin() const -> decltype(write_log.begin()) { + return write_log.begin(); + } + auto log_end() const -> decltype(write_log.end()) { + return write_log.end(); + } +}; diff --git a/quantum/wear_leveling/tests/rules.mk b/quantum/wear_leveling/tests/rules.mk new file mode 100644 index 0000000000..4d7a964049 --- /dev/null +++ b/quantum/wear_leveling/tests/rules.mk @@ -0,0 +1,66 @@ +wear_leveling_common_DEFS := \ + -DWEAR_LEVELING_TESTS +wear_leveling_common_SRC := \ + $(LIB_PATH)/fnv/qmk_fnv_type_validation.c \ + $(LIB_PATH)/fnv/hash_32a.c \ + $(LIB_PATH)/fnv/hash_64a.c \ + $(QUANTUM_PATH)/wear_leveling/wear_leveling.c \ + $(QUANTUM_PATH)/wear_leveling/tests/backing_mocks.cpp +wear_leveling_common_INC := \ + $(LIB_PATH)/fnv \ + $(QUANTUM_PATH)/wear_leveling + +wear_leveling_general_DEFS := \ + $(wear_leveling_common_DEFS) \ + -DBACKING_STORE_WRITE_SIZE=2 \ + -DWEAR_LEVELING_BACKING_SIZE=48 \ + -DWEAR_LEVELING_LOGICAL_SIZE=16 +wear_leveling_general_SRC := \ + $(wear_leveling_common_SRC) \ + $(QUANTUM_PATH)/wear_leveling/tests/wear_leveling_general.cpp +wear_leveling_general_INC := \ + $(wear_leveling_common_INC) + +wear_leveling_2byte_optimized_writes_DEFS := \ + $(wear_leveling_common_DEFS) \ + -DBACKING_STORE_WRITE_SIZE=2 \ + -DWEAR_LEVELING_BACKING_SIZE=65536 \ + -DWEAR_LEVELING_LOGICAL_SIZE=32768 +wear_leveling_2byte_optimized_writes_SRC := \ + $(wear_leveling_common_SRC) \ + $(QUANTUM_PATH)/wear_leveling/tests/wear_leveling_2byte_optimized_writes.cpp +wear_leveling_2byte_optimized_writes_INC := \ + $(wear_leveling_common_INC) + +wear_leveling_2byte_DEFS := \ + $(wear_leveling_common_DEFS) \ + -DBACKING_STORE_WRITE_SIZE=2 \ + -DWEAR_LEVELING_BACKING_SIZE=48 \ + -DWEAR_LEVELING_LOGICAL_SIZE=16 +wear_leveling_2byte_SRC := \ + $(wear_leveling_common_SRC) \ + $(QUANTUM_PATH)/wear_leveling/tests/wear_leveling_2byte.cpp +wear_leveling_2byte_INC := \ + $(wear_leveling_common_INC) + +wear_leveling_4byte_DEFS := \ + $(wear_leveling_common_DEFS) \ + -DBACKING_STORE_WRITE_SIZE=4 \ + -DWEAR_LEVELING_BACKING_SIZE=48 \ + -DWEAR_LEVELING_LOGICAL_SIZE=16 +wear_leveling_4byte_SRC := \ + $(wear_leveling_common_SRC) \ + $(QUANTUM_PATH)/wear_leveling/tests/wear_leveling_4byte.cpp +wear_leveling_4byte_INC := \ + $(wear_leveling_common_INC) + +wear_leveling_8byte_DEFS := \ + $(wear_leveling_common_DEFS) \ + -DBACKING_STORE_WRITE_SIZE=8 \ + -DWEAR_LEVELING_BACKING_SIZE=48 \ + -DWEAR_LEVELING_LOGICAL_SIZE=16 +wear_leveling_8byte_SRC := \ + $(wear_leveling_common_SRC) \ + $(QUANTUM_PATH)/wear_leveling/tests/wear_leveling_8byte.cpp +wear_leveling_8byte_INC := \ + $(wear_leveling_common_INC)
\ No newline at end of file diff --git a/quantum/wear_leveling/tests/testlist.mk b/quantum/wear_leveling/tests/testlist.mk new file mode 100644 index 0000000000..32cfc178b4 --- /dev/null +++ b/quantum/wear_leveling/tests/testlist.mk @@ -0,0 +1,6 @@ +TEST_LIST += \ + wear_leveling_general \ + wear_leveling_2byte_optimized_writes \ + wear_leveling_2byte \ + wear_leveling_4byte \ + wear_leveling_8byte diff --git a/quantum/wear_leveling/tests/wear_leveling_2byte.cpp b/quantum/wear_leveling/tests/wear_leveling_2byte.cpp new file mode 100644 index 0000000000..b749c32b04 --- /dev/null +++ b/quantum/wear_leveling/tests/wear_leveling_2byte.cpp @@ -0,0 +1,228 @@ +// Copyright 2022 Nick Brassel (@tzarc) +// SPDX-License-Identifier: GPL-2.0-or-later +#include <numeric> +#include "gtest/gtest.h" +#include "gmock/gmock.h" +#include "backing_mocks.hpp" + +class WearLeveling2Byte : public ::testing::Test { + protected: + void SetUp() override { + MockBackingStore::Instance().reset_instance(); + wear_leveling_init(); + } +}; + +static std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> verify_data; + +static wear_leveling_status_t test_write(const uint32_t address, const void* value, size_t length) { + memcpy(&verify_data[address], value, length); + return wear_leveling_write(address, value, length); +} + +/** + * This test verifies that the first write after initialisation occurs after the FNV1a_64 hash location. + */ +TEST_F(WearLeveling2Byte, FirstWriteOccursAfterHash) { + auto& inst = MockBackingStore::Instance(); + uint8_t test_value = 0x15; + test_write(0x02, &test_value, sizeof(test_value)); + EXPECT_EQ(inst.log_begin()->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid first write address."; +} + +/** + * This test verifies that the first write after initialisation occurs after the FNV1a_64 hash location, after an erase has occurred. + */ +TEST_F(WearLeveling2Byte, FirstWriteOccursAfterHash_AfterErase) { + auto& inst = MockBackingStore::Instance(); + uint8_t test_value = 0x15; + wear_leveling_erase(); + test_write(0x02, &test_value, sizeof(test_value)); + EXPECT_EQ((inst.log_begin() + 1)->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid first write address."; +} + +/** + * This test forces consolidation by writing enough to the write log that it overflows, consolidating the data into the + * base logical area. + */ +TEST_F(WearLeveling2Byte, ConsolidationOverflow) { + auto& inst = MockBackingStore::Instance(); + + // Generate a test block of data which forces OPTIMIZED_64 writes + std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> testvalue; + + // Write the data + std::iota(testvalue.begin(), testvalue.end(), 0x20); + EXPECT_EQ(test_write(0, testvalue.data(), testvalue.size()), WEAR_LEVELING_CONSOLIDATED) << "Write returned incorrect status"; + uint8_t dummy = 0x40; + EXPECT_EQ(test_write(0x04, &dummy, sizeof(dummy)), WEAR_LEVELING_SUCCESS) << "Write returned incorrect status"; + + // All writes are at address<64, so each logical byte written will generate 1 write log entry, thus 1 backing store write. + // Expected log: + // [0..11]: optimised64, backing address 0x18, logical address 0x00 + // [12]: erase + // [13..20]: consolidated data, backing address 0x00, logical address 0x00 + // [21..24]: FNV1a_64 result, backing address 0x10 + // [25]: optimised64, backing address 0x18, logical address 0x04 + EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), 26); + + // Verify the backing store writes for the write log + std::size_t index; + write_log_entry_t e; + for (index = 0; index < 12; ++index) { + auto write_iter = inst.log_begin() + index; + EXPECT_EQ(write_iter->address, WEAR_LEVELING_LOGICAL_SIZE + 8 + (index * BACKING_STORE_WRITE_SIZE)) << "Invalid write log address"; + e.raw16[0] = write_iter->value; + EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_OPTIMIZED_64) << "Invalid write log entry type"; + } + + // Verify the backing store erase + { + index = 12; + auto write_iter = inst.log_begin() + index; + e.raw16[0] = write_iter->value; + EXPECT_TRUE(write_iter->erased) << "Backing store erase did not occur as required"; + } + + // Verify the backing store writes for consolidation + for (index = 13; index < 21; ++index) { + auto write_iter = inst.log_begin() + index; + EXPECT_EQ(write_iter->address, (index - 13) * BACKING_STORE_WRITE_SIZE) << "Invalid write log entry address"; + } + + // Verify the FNV1a_64 write + { + EXPECT_EQ((inst.log_begin() + 21)->address, WEAR_LEVELING_LOGICAL_SIZE) << "Invalid write log address"; + e.raw16[0] = (inst.log_begin() + 21)->value; + e.raw16[1] = (inst.log_begin() + 22)->value; + e.raw16[2] = (inst.log_begin() + 23)->value; + e.raw16[3] = (inst.log_begin() + 24)->value; + EXPECT_EQ(e.raw64, fnv_64a_buf(testvalue.data(), testvalue.size(), FNV1A_64_INIT)) << "Invalid checksum"; // Note that checksum is based on testvalue, as we overwrote one byte and need to consult the consolidated data, not the current + } + + // Verify the final write + EXPECT_EQ((inst.log_begin() + 25)->address, WEAR_LEVELING_LOGICAL_SIZE + 8) << "Invalid write log address"; + + // Verify the data is what we expected + std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> readback; + EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data"; + EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback did not match"; + + // Re-init and re-read, verifying the reload capability + EXPECT_NE(wear_leveling_init(), WEAR_LEVELING_FAILED) << "Re-initialisation failed"; + EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data"; + EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback did not match"; +} + +/** + * This test verifies multibyte readback gets canceled with an out-of-bounds address. + */ +TEST_F(WearLeveling2Byte, PlaybackReadbackMultibyte_OOB) { + auto& inst = MockBackingStore::Instance(); + auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t)); + + // Invalid FNV1a_64 hash + (logstart + 0)->set(0); + (logstart + 1)->set(0); + (logstart + 2)->set(0); + (logstart + 3)->set(0); + + // Set up a 2-byte logical write of [0x11,0x12] at logical offset 0x01 + auto entry0 = LOG_ENTRY_MAKE_MULTIBYTE(0x01, 2); + entry0.raw8[3] = 0x11; + entry0.raw8[4] = 0x12; + (logstart + 4)->set(~entry0.raw16[0]); + (logstart + 5)->set(~entry0.raw16[1]); + (logstart + 6)->set(~entry0.raw16[2]); + + // Set up a 2-byte logical write of [0x13,0x14] at logical offset 0x1000 (out of bounds) + auto entry1 = LOG_ENTRY_MAKE_MULTIBYTE(0x1000, 2); + entry1.raw8[3] = 0x13; + entry1.raw8[4] = 0x14; + (logstart + 7)->set(~entry1.raw16[0]); + (logstart + 8)->set(~entry1.raw16[1]); + (logstart + 9)->set(~entry1.raw16[2]); + + // Set up a 2-byte logical write of [0x15,0x16] at logical offset 0x01 + auto entry2 = LOG_ENTRY_MAKE_MULTIBYTE(0x01, 2); + entry2.raw8[3] = 0x15; + entry2.raw8[4] = 0x16; + (logstart + 10)->set(~entry2.raw16[0]); + (logstart + 11)->set(~entry2.raw16[1]); + (logstart + 12)->set(~entry2.raw16[2]); + + EXPECT_EQ(inst.erasure_count(), 0) << "Invalid initial erase count"; + EXPECT_EQ(wear_leveling_init(), WEAR_LEVELING_CONSOLIDATED) << "Readback should have failed and triggered consolidation"; + EXPECT_EQ(inst.erasure_count(), 1) << "Invalid final erase count"; + + uint8_t buf[2]; + wear_leveling_read(0x01, buf, sizeof(buf)); + EXPECT_EQ(buf[0], 0x11) << "Readback should have maintained the previous pre-failure value from the write log"; + EXPECT_EQ(buf[1], 0x12) << "Readback should have maintained the previous pre-failure value from the write log"; +} + +/** + * This test verifies optimized 64 readback gets canceled with an out-of-bounds address. + */ +TEST_F(WearLeveling2Byte, PlaybackReadbackOptimized64_OOB) { + auto& inst = MockBackingStore::Instance(); + auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t)); + + // Invalid FNV1a_64 hash + (logstart + 0)->set(0); + (logstart + 1)->set(0); + (logstart + 2)->set(0); + (logstart + 3)->set(0); + + // Set up a 1-byte logical write of 0x11 at logical offset 0x01 + auto entry0 = LOG_ENTRY_MAKE_OPTIMIZED_64(0x01, 0x11); + (logstart + 4)->set(~entry0.raw16[0]); + + // Set up a 1-byte logical write of 0x11 at logical offset 0x30 (out of bounds) + auto entry1 = LOG_ENTRY_MAKE_OPTIMIZED_64(0x30, 0x11); + (logstart + 5)->set(~entry1.raw16[0]); + + // Set up a 1-byte logical write of 0x12 at logical offset 0x01 + auto entry2 = LOG_ENTRY_MAKE_OPTIMIZED_64(0x01, 0x12); + (logstart + 6)->set(~entry2.raw16[0]); + + EXPECT_EQ(inst.erasure_count(), 0) << "Invalid initial erase count"; + EXPECT_EQ(wear_leveling_init(), WEAR_LEVELING_CONSOLIDATED) << "Readback should have failed and triggered consolidation"; + EXPECT_EQ(inst.erasure_count(), 1) << "Invalid final erase count"; + uint8_t tmp; + wear_leveling_read(0x01, &tmp, sizeof(tmp)); + EXPECT_EQ(tmp, 0x11) << "Readback should have maintained the previous pre-failure value from the write log"; +} + +/** + * This test verifies word 0/1 readback gets canceled with an out-of-bounds address. + */ +TEST_F(WearLeveling2Byte, PlaybackReadbackWord01_OOB) { + auto& inst = MockBackingStore::Instance(); + auto logstart = inst.storage_begin() + (WEAR_LEVELING_LOGICAL_SIZE / sizeof(backing_store_int_t)); + + // Invalid FNV1a_64 hash + (logstart + 0)->set(0); + (logstart + 1)->set(0); + (logstart + 2)->set(0); + (logstart + 3)->set(0); + + // Set up a 1-byte logical write of 1 at logical offset 0x02 + auto entry0 = LOG_ENTRY_MAKE_WORD_01(0x02, 1); + (logstart + 4)->set(~entry0.raw16[0]); + + // Set up a 1-byte logical write of 1 at logical offset 0x1000 (out of bounds) + auto entry1 = LOG_ENTRY_MAKE_WORD_01(0x1000, 1); + (logstart + 5)->set(~entry1.raw16[0]); + + // Set up a 1-byte logical write of 0 at logical offset 0x02 + auto entry2 = LOG_ENTRY_MAKE_WORD_01(0x02, 0); + (logstart + 6)->set(~entry2.raw16[0]); + + EXPECT_EQ(inst.erasure_count(), 0) << "Invalid initial erase count"; + EXPECT_EQ(wear_leveling_init(), WEAR_LEVELING_CONSOLIDATED) << "Readback should have failed and triggered consolidation"; + EXPECT_EQ(inst.erasure_count(), 1) << "Invalid final erase count"; + uint8_t tmp; + wear_leveling_read(0x02, &tmp, sizeof(tmp)); + EXPECT_EQ(tmp, 1) << "Readback should have maintained the previous pre-failure value from the write log"; +} diff --git a/quantum/wear_leveling/tests/wear_leveling_2byte_optimized_writes.cpp b/quantum/wear_leveling/tests/wear_leveling_2byte_optimized_writes.cpp new file mode 100644 index 0000000000..0b03113c89 --- /dev/null +++ b/quantum/wear_leveling/tests/wear_leveling_2byte_optimized_writes.cpp @@ -0,0 +1,295 @@ +// Copyright 2022 Nick Brassel (@tzarc) +// SPDX-License-Identifier: GPL-2.0-or-later +#include <numeric> +#include "gtest/gtest.h" +#include "gmock/gmock.h" +#include "backing_mocks.hpp" + +class WearLeveling2ByteOptimizedWrites : public ::testing::Test { + protected: + void SetUp() override { + MockBackingStore::Instance().reset_instance(); + wear_leveling_init(); + } +}; + +static std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> verify_data; + +static wear_leveling_status_t test_write(const uint32_t address, const void* value, size_t length) { + memcpy(&verify_data[address], value, length); + return wear_leveling_write(address, value, length); +} + +/** + * This test ensures the correct number of backing store writes occurs with a multibyte write, given the input buffer size. + */ +TEST_F(WearLeveling2ByteOptimizedWrites, MultibyteBackingStoreWriteCounts) { + auto& inst = MockBackingStore::Instance(); + + for (std::size_t length = 1; length <= 5; ++length) { + // Clear things out + std::fill(verify_data.begin(), verify_data.end(), 0); + inst.reset_instance(); + wear_leveling_init(); + + // Generate a test block of data + std::vector<std::uint8_t> testvalue(length); + std::iota(testvalue.begin(), testvalue.end(), 0x20); + + // Write the data + EXPECT_EQ(test_write(2000, testvalue.data(), testvalue.size()), WEAR_LEVELING_SUCCESS) << "Write failed with incorrect status"; + + std::size_t expected; + if (length > 3) { + expected = 4; + } else if (length > 1) { + expected = 3; + } else { + expected = 2; + } + + // Check that we got the expected number of write log entries + EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), expected); + } +} + +/** + * This test runs through writing U16 values of `0` or `1` over the entire logical address range, to even addresses only. + * - Addresses <16384 will result in a single optimised backing write + * - Higher addresses will result in a multibyte write of 3 backing writes + */ +TEST_F(WearLeveling2ByteOptimizedWrites, WriteOneThenZeroToEvenAddresses) { + auto& inst = MockBackingStore::Instance(); + + // Only attempt writes for each address up to a limit that would NOT force a consolidated data write. + std::size_t writes_per_loop = (MOCK_WRITE_LOG_MAX_ENTRIES::value / 6) - 1; // Worst case is 6 writes for each pair of writes of 0/1 + std::size_t final_address; + for (uint32_t address = 0; address < WEAR_LEVELING_LOGICAL_SIZE; address += (writes_per_loop * 2)) { + // Clear things out + std::fill(verify_data.begin(), verify_data.end(), 0); + inst.reset_instance(); + wear_leveling_init(); + + // Loop through all the addresses in this range + std::size_t expected = 0; + for (uint32_t offset = 0; offset < (writes_per_loop * 2); offset += 2) { + // If we're about to exceed the limit of the logical store, skip the writes + if (address + offset + 2 > WEAR_LEVELING_LOGICAL_SIZE) { + break; + } + + // The default erased value of the wear-leveling cache is zero, so we write a one first, then a zero, to ensure a backing store write occurs. + uint16_t val = 1; + EXPECT_EQ(test_write(address + offset, &val, sizeof(val)), WEAR_LEVELING_SUCCESS) << "Write failed with incorrect status"; + val = 0; + EXPECT_EQ(test_write(address + offset, &val, sizeof(val)), WEAR_LEVELING_SUCCESS) << "Write failed with incorrect status"; + + std::size_t backing_store_writes_expected = 0; + if (address + offset < 16384) { + // A U16 value of 0/1 at an even address <16384 will result in 1 backing write each, so we need 2 backing writes for 2 logical writes + backing_store_writes_expected = 2; + } else { + // All other addresses result in a multibyte write (3 backing store writes) to write two local bytes of data + backing_store_writes_expected = 6; + } + + // Keep track of the total number of expected writes to the backing store + expected += backing_store_writes_expected; + + // Verify we're at the correct number of writes + EXPECT_EQ(std::distance(inst.log_begin(), inst.log_end()), expected) << "Write log doesn't match required number of backing store writes for address " << (address + offset); + + // Verify that the write log entries we expect are actually present + std::size_t write_index = expected - backing_store_writes_expected; + auto write_iter = inst.log_begin() + write_index; + write_log_entry_t e; + if (address + offset < 16384) { + // A U16 value of 0/1 at an even address <16384 will result in 1 backing write each, so we need 2 backing writes for 2 logical writes + for (std::size_t i = 0; i < 2; ++i) { + e.raw16[0] = write_iter->value; + EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_WORD_01) << "Invalid write log entry type at " << (address + offset); + ++write_iter; + } + } else { + // Multibyte write + e.raw16[0] = write_iter->value; + EXPECT_EQ(LOG_ENTRY_GET_TYPE(e), LOG_ENTRY_TYPE_MULTIBYTE) << "Invalid write log entry type at " << (address + offset); + EXPECT_EQ(LOG_ENTRY_MULTIBYTE_GET_LENGTH(e), 2) << "Invalid write log entry length at " << (address + offset); + ++write_iter; + } + + // Keep track of the final address written, so we can verify the entire logical range was handled + final_address = address + offset; + } + + // Verify the number of writes that occurred to the backing store + size_t backing_write_count = std::distance(inst.log_begin(), inst.log_end()); + EXPECT_EQ(backing_write_count, expected) << "Invalid write count at address " << address; + + // Verify the data is what we expected + std::array<std::uint8_t, WEAR_LEVELING_LOGICAL_SIZE> readback; + EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data"; + EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback for address " << address << " did not match"; + + // Re-init and re-read, testing the reload capability + EXPECT_NE(wear_leveling_init(), WEAR_LEVELING_FAILED) << "Re-initialisation failed"; + EXPECT_EQ(wear_leveling_read(0, readback.data(), WEAR_LEVELING_LOGICAL_SIZE), WEAR_LEVELING_SUCCESS) << "Failed to read back the saved data"; + EXPECT_TRUE(memcmp(readback.data(), verify_data.data(), WEAR_LEVELING_LOGICAL_SIZE) == 0) << "Readback for address " << address << " did not match"; + } + + // Verify the full range of the logical area got written + EXPECT_EQ(final_address, WEAR_LEVELING_LOGICAL_SIZE - 2) << "Invalid final write address"; +} + +/** + * This test runs through writing U16 values of `0` or `1` over the entir |