20#include "pw_allocator/block/detailed_block.h"
21#include "pw_allocator/block_allocator.h"
22#include "pw_allocator/bucket/fast_sorted.h"
23#include "pw_allocator/bucket/sorted.h"
25namespace pw::allocator {
28template <
typename OffsetType>
29using TlsfBlock = DetailedBlock<OffsetType, GenericFastSortedItem>;
82template <
typename BlockType = TlsfBlock<u
int32_t>,
83 size_t kMinSize = TlsfDefaults::kMinSize,
84 size_t kNumShelves = TlsfDefaults::kNumShelves>
90 static constexpr size_t kNumBucketsPerShelf = 16;
91 static constexpr size_t kBucketBits =
92 internal::CountRZero(kNumBucketsPerShelf);
93 using Shelf = std::array<BucketType, kNumBucketsPerShelf>;
95 static_assert(kMinSize >= kNumBucketsPerShelf,
96 "kMinSize must be at least 16.");
99 "kMinSize must be large enough to hold a FastSortedBucket item.");
100 static_assert((kMinSize & (kMinSize - 1)) == 0,
101 "kMinSize must be a power of two.");
103 static_assert(kNumShelves <= 32,
"kNumShelves cannot be larger than 32");
140 void UpdateBitmaps(
const TlsfIndices& indices,
bool empty);
142 uint32_t shelf_bitmap_ = 0;
143 std::array<uint16_t, kNumShelves> bucket_bitmaps_;
144 std::array<Shelf, kNumShelves> shelves_;
150template <
typename BlockType,
size_t kMinSize,
size_t kNumShelves>
152 size_t size = kMinSize;
153 size_t step = kMinSize / kNumBucketsPerShelf;
154 for (Shelf& shelf : shelves_) {
157 bucket.set_max_inner_size(size - 1);
163 BucketType& largest = shelves_[kNumShelves - 1][kNumBucketsPerShelf - 1];
166 bucket_bitmaps_.fill(0);
169template <
typename BlockType,
size_t kMinSize,
size_t kNumShelves>
173 if (layout.size() < small_bucket_.max_inner_size()) {
174 BlockType* block = small_bucket_.RemoveCompatible(layout);
175 if (block !=
nullptr) {
176 return BlockType::AllocFirst(std::move(block), layout);
181 for (
TlsfIndices indices = MapToIndices(layout.size());
182 FindNextAvailable(indices);
185 shelves_[indices.shelf][indices.bucket];
187 if (block !=
nullptr) {
188 UpdateBitmaps(indices, bucket.
empty());
189 return BlockType::AllocFirst(std::move(block), layout);
197template <
typename BlockType,
size_t kMinSize,
size_t kNumShelves>
200 if (block.InnerSize() <=
sizeof(
SortedItem)) {
201 std::ignore = small_bucket_.Remove(block);
204 TlsfIndices indices = MapToIndices(block.InnerSize());
206 shelves_[indices.shelf][indices.bucket];
207 if (large_bucket.
Remove(block)) {
208 UpdateBitmaps(indices, large_bucket.
empty());
212template <
typename BlockType,
size_t kMinSize,
size_t kNumShelves>
215 if (block.InnerSize() <=
sizeof(
SortedItem)) {
216 std::ignore = small_bucket_.Add(block);
219 TlsfIndices indices = MapToIndices(block.InnerSize());
221 shelves_[indices.shelf][indices.bucket];
222 std::ignore = large_bucket.
Add(block);
223 UpdateBitmaps(indices,
false);
226template <
typename BlockType,
size_t kMinSize,
size_t kNumShelves>
229 if (size <= kMinSize) {
234 auto shelf = internal::CountRZero(cpp20::bit_floor(size));
236 auto bucket =
static_cast<uint16_t
>((size >> (shelf - kBucketBits)) & 0xF);
239 shelf -= internal::CountRZero(kMinSize);
240 if (shelf >= kNumShelves) {
241 shelf = kNumShelves - 1;
242 bucket = kNumBucketsPerShelf - 1;
244 return TlsfIndices{.shelf =
static_cast<uint32_t
>(shelf), .bucket = bucket};
247template <
typename BlockType,
size_t kMinSize,
size_t kNumShelves>
248bool TlsfAllocator<BlockType, kMinSize, kNumShelves>::FindNextAvailable(
249 TlsfIndices& indices) {
251 if (indices.bucket == kNumBucketsPerShelf) {
257 if (indices.shelf >= kNumShelves) {
262 uint16_t bucket_bitmap =
263 bucket_bitmaps_[indices.shelf] & (~uint32_t(0) << indices.bucket);
264 if (bucket_bitmap != 0) {
267 indices.bucket = internal::CountRZero(bucket_bitmap);
274 uint32_t shelf_bitmap = shelf_bitmap_ & (~uint32_t(0) << (indices.shelf + 1));
275 if (shelf_bitmap != 0) {
276 indices.shelf = internal::CountRZero(shelf_bitmap);
277 indices.bucket = internal::CountRZero(bucket_bitmaps_[indices.shelf]);
285template <
typename BlockType,
size_t kMinSize,
size_t kNumShelves>
286void TlsfAllocator<BlockType, kMinSize, kNumShelves>::UpdateBitmaps(
287 const TlsfIndices& indices,
bool empty) {
288 auto bucket_bitmap =
static_cast<uint16_t
>(1 << indices.bucket);
290 bucket_bitmaps_[indices.shelf] &= ~bucket_bitmap;
292 bucket_bitmaps_[indices.shelf] |= bucket_bitmap;
295 uint32_t shelf_bitmap = uint32_t(1) << indices.shelf;
296 if (bucket_bitmaps_[indices.shelf] == 0) {
297 shelf_bitmap_ &= ~shelf_bitmap;
299 shelf_bitmap_ |= shelf_bitmap;
Definition: block_allocator.h:104
void Init(ByteSpan region)
Definition: block_allocator.h:281
Definition: fast_sorted.h:63
Definition: tlsf_allocator.h:85
BlockResult< BlockType > ChooseBlock(Layout layout) override
Definition: tlsf_allocator.h:171
TlsfAllocator(ByteSpan region)
Definition: tlsf_allocator.h:114
void ReserveBlock(BlockType &block) override
Definition: tlsf_allocator.h:198
constexpr TlsfAllocator()
Constexpr constructor. Callers must explicitly call Init.
Definition: tlsf_allocator.h:151
void RecycleBlock(BlockType &block) override
Definition: tlsf_allocator.h:213
constexpr void set_max_inner_size(size_t max_inner_size)
Definition: base.h:76
bool Remove(BlockType &block)
Definition: base.h:110
bool Add(BlockType &block)
Definition: base.h:87
constexpr bool empty() const
Returns whether this buckets contains any free blocks.
Definition: base.h:68
BlockType * RemoveCompatible(Layout layout)
Definition: base.h:121
Definition: fast_sorted.h:51
Definition: tlsf_allocator.h:34
static constexpr size_t kMinSize
Definition: tlsf_allocator.h:37
static constexpr size_t kNumShelves
Definition: tlsf_allocator.h:41
Pair used to index a bucket in a two dimensional array.
Definition: tlsf_allocator.h:45