C/C++ API Reference
Loading...
Searching...
No Matches
tlsf_allocator.h
1// Copyright 2024 The Pigweed Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may not
4// use this file except in compliance with the License. You may obtain a copy of
5// the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations under
13// the License.
14#pragma once
15
16#include <array>
17#include <cstddef>
18#include <cstdint>
19
20#include "pw_allocator/block/detailed_block.h"
21#include "pw_allocator/block_allocator.h"
22#include "pw_allocator/bucket/fast_sorted.h"
23#include "pw_allocator/bucket/sorted.h"
24
25namespace pw::allocator {
26
28
30template <typename OffsetType>
32
39 static constexpr size_t kMinSize = 64;
40
43 static constexpr size_t kNumShelves = 10;
44};
45
48 size_t shelf;
49 size_t bucket;
50};
51
84template <typename BlockType = TlsfBlock<uint32_t>,
85 size_t kMinSize = TlsfDefaults::kMinSize,
86 size_t kNumShelves = TlsfDefaults::kNumShelves>
87class TlsfAllocator : public BlockAllocator<BlockType> {
88 private:
90
91 static constexpr size_t kNumBucketsPerShelf = 16;
92 static constexpr size_t kBucketBits =
93 internal::CountRZero(kNumBucketsPerShelf);
94
97 using Shelf = std::array<LargeBucket, kNumBucketsPerShelf>;
98
99 static_assert(kMinSize >= kNumBucketsPerShelf,
100 "kMinSize must be at least 16.");
101 static_assert(
102 kMinSize >= sizeof(GenericFastSortedItem),
103 "kMinSize must be large enough to hold a FastSortedBucket item.");
104 static_assert((kMinSize & (kMinSize - 1)) == 0,
105 "kMinSize must be a power of two.");
106
107 static_assert(kNumShelves <= 32, "kNumShelves cannot be larger than 32");
108
109 public:
111 constexpr TlsfAllocator();
112
118 explicit TlsfAllocator(ByteSpan region) : TlsfAllocator() {
119 Base::Init(region);
120 }
121
122 private:
124 size_t DoGetMaxAllocatable() override;
125
128
130 void ReserveBlock(BlockType& block) override;
131
133 void RecycleBlock(BlockType& block) override;
134
137 static TlsfIndices MapToIndices(size_t size);
138
143 bool FindNextAvailable(TlsfIndices& indices);
144
147 void UpdateBitmaps(const TlsfIndices& indices, bool empty);
148
149 uint32_t shelf_bitmap_ = 0;
150 std::array<uint16_t, kNumShelves> bucket_bitmaps_;
151 std::array<Shelf, kNumShelves> shelves_;
152 SmallBucket small_bucket_;
153};
154
156
157// Template method implementations.
158
159template <typename BlockType, size_t kMinSize, size_t kNumShelves>
161 size_t size = kMinSize;
162 size_t step = kMinSize / kNumBucketsPerShelf;
163 for (Shelf& shelf : shelves_) {
164 for (LargeBucket& bucket : shelf) {
165 size += step;
166 bucket.set_max_inner_size(size - 1);
167 }
168 step *= 2;
169 }
170
171 // The largest bucket is unbounded.
172 LargeBucket& largest = shelves_[kNumShelves - 1][kNumBucketsPerShelf - 1];
173 largest.set_max_inner_size(std::numeric_limits<size_t>::max());
174
175 bucket_bitmaps_.fill(0);
176}
177
178template <typename BlockType, size_t kMinSize, size_t kNumShelves>
180 size_t shelf_index =
181 shelf_bitmap_ == 0 ? 0 : (31 - internal::CountLZero(shelf_bitmap_));
182 uint16_t bucket_bitmap = bucket_bitmaps_[shelf_index];
183 size_t bucket_index =
184 bucket_bitmap == 0 ? 0 : (15 - internal::CountLZero(bucket_bitmap));
185 const LargeBucket& bucket = shelves_[shelf_index][bucket_index];
186 const BlockType* largest =
187 bucket.empty() ? small_bucket_.FindLargest() : bucket.FindLargest();
188 return largest == nullptr ? 0 : largest->InnerSize();
189}
190
191template <typename BlockType, size_t kMinSize, size_t kNumShelves>
194 // Check the small bucket.
195 if (layout.size() < small_bucket_.max_inner_size()) {
196 BlockType* block = small_bucket_.RemoveCompatible(layout);
197 if (block != nullptr) {
198 return BlockType::AllocFirst(std::move(block), layout);
199 }
200 }
201
202 // Check the buckets on the shelves.
203 for (TlsfIndices indices = MapToIndices(layout.size());
204 FindNextAvailable(indices);
205 indices.bucket++) {
206 LargeBucket& bucket = shelves_[indices.shelf][indices.bucket];
207 BlockType* block = bucket.RemoveCompatible(layout);
208 if (block != nullptr) {
209 UpdateBitmaps(indices, bucket.empty());
210 return BlockType::AllocFirst(std::move(block), layout);
211 }
212 }
213
214 // No sufficiently large block found.
215 return BlockResult<BlockType>(nullptr, Status::NotFound());
216}
217
218template <typename BlockType, size_t kMinSize, size_t kNumShelves>
220 BlockType& block) {
221 if (block.InnerSize() < sizeof(typename LargeBucket::ItemType)) {
222 std::ignore = small_bucket_.Remove(block);
223 return;
224 }
225 TlsfIndices indices = MapToIndices(block.InnerSize());
226 LargeBucket& large_bucket = shelves_[indices.shelf][indices.bucket];
227 if (large_bucket.Remove(block)) {
228 UpdateBitmaps(indices, large_bucket.empty());
229 }
230}
231
232template <typename BlockType, size_t kMinSize, size_t kNumShelves>
234 BlockType& block) {
235 if (block.InnerSize() < sizeof(typename LargeBucket::ItemType)) {
236 std::ignore = small_bucket_.Add(block);
237 return;
238 }
239 TlsfIndices indices = MapToIndices(block.InnerSize());
240 LargeBucket& large_bucket = shelves_[indices.shelf][indices.bucket];
241 std::ignore = large_bucket.Add(block);
242 UpdateBitmaps(indices, false);
243}
244
245template <typename BlockType, size_t kMinSize, size_t kNumShelves>
247 size_t size) {
248 if (size <= kMinSize) {
249 return TlsfIndices{.shelf = 0, .bucket = 0};
250 }
251
252 // Most significant bit set determines the shelf.
253 auto shelf = internal::CountRZero(cpp20::bit_floor(size));
254 // Each shelf has 16 buckets, so next 4 bits determine the bucket.
255 auto bucket = static_cast<uint16_t>((size >> (shelf - kBucketBits)) & 0xF);
256
257 // Adjust for minimum size, and clamp to the valid range.
258 shelf -= internal::CountRZero(kMinSize);
259 if (shelf >= kNumShelves) {
260 shelf = kNumShelves - 1;
261 bucket = kNumBucketsPerShelf - 1;
262 }
263 return TlsfIndices{.shelf = static_cast<uint32_t>(shelf), .bucket = bucket};
264}
265
266template <typename BlockType, size_t kMinSize, size_t kNumShelves>
267bool TlsfAllocator<BlockType, kMinSize, kNumShelves>::FindNextAvailable(
268 TlsfIndices& indices) {
269 // Are we past the end of a shelf? If so, move up a shelf.
270 if (indices.bucket == kNumBucketsPerShelf) {
271 indices.shelf++;
272 indices.bucket = 0;
273 }
274
275 // Have we passed the top shelf? If so, no larger blocks are available.
276 if (indices.shelf >= kNumShelves) {
277 return false;
278 }
279
280 // Use the bitmaps to find the next largest non-empty bucket.
281 uint16_t bucket_bitmap =
282 bucket_bitmaps_[indices.shelf] & (~uint32_t(0) << indices.bucket);
283 if (bucket_bitmap != 0) {
284 // There's at least one non-empty bucket on the current shelf whose
285 // blocks are at least as large as the requested size.
286 indices.bucket = internal::CountRZero(bucket_bitmap);
287 return true;
288 }
289
290 // The buckets for large enough blocks on this shelf are all empty.
291 // Move up to the first shelf with non-empty buckets and find the
292 // non-empty bucket with the smallest blocks.
293 uint32_t shelf_bitmap = shelf_bitmap_ & (~uint32_t(0) << (indices.shelf + 1));
294 if (shelf_bitmap != 0) {
295 indices.shelf = internal::CountRZero(shelf_bitmap);
296 indices.bucket = internal::CountRZero(bucket_bitmaps_[indices.shelf]);
297 return true;
298 }
299
300 // No larger blocks are available.
301 return false;
302}
303
304template <typename BlockType, size_t kMinSize, size_t kNumShelves>
305void TlsfAllocator<BlockType, kMinSize, kNumShelves>::UpdateBitmaps(
306 const TlsfIndices& indices, bool empty) {
307 auto bucket_bitmap = static_cast<uint16_t>(1 << indices.bucket);
308 if (empty) {
309 bucket_bitmaps_[indices.shelf] &= ~bucket_bitmap;
310 } else {
311 bucket_bitmaps_[indices.shelf] |= bucket_bitmap;
312 }
313
314 uint32_t shelf_bitmap = uint32_t(1) << indices.shelf;
315 if (bucket_bitmaps_[indices.shelf] == 0) {
316 shelf_bitmap_ &= ~shelf_bitmap;
317 } else {
318 shelf_bitmap_ |= shelf_bitmap;
319 }
320}
321
322} // namespace pw::allocator
Definition: block_allocator.h:106
void Init(ByteSpan region)
Definition: block_allocator.h:310
Definition: result.h:116
Definition: detailed_block.h:88
Definition: fast_sorted.h:65
Definition: fast_sorted.h:35
Definition: sorted.h:116
Definition: layout.h:58
Definition: tlsf_allocator.h:87
size_t DoGetMaxAllocatable() override
Definition: tlsf_allocator.h:179
BlockResult< BlockType > ChooseBlock(Layout layout) override
Definition: tlsf_allocator.h:193
TlsfAllocator(ByteSpan region)
Definition: tlsf_allocator.h:118
void ReserveBlock(BlockType &block) override
Definition: tlsf_allocator.h:219
constexpr TlsfAllocator()
Constexpr constructor. Callers must explicitly call Init.
Definition: tlsf_allocator.h:160
void RecycleBlock(BlockType &block) override
Definition: tlsf_allocator.h:233
constexpr void set_max_inner_size(size_t max_inner_size)
Definition: base.h:78
bool Remove(BlockType &block)
Definition: base.h:118
const BlockType * FindLargest() const
Definition: base.h:103
bool Add(BlockType &block)
Definition: base.h:89
constexpr bool empty() const
Returns whether this buckets contains any free blocks.
Definition: base.h:70
BlockType * RemoveCompatible(Layout layout)
Definition: base.h:129
constexpr U CountRZero(T t)
Definition: base.h:224
constexpr U CountLZero(T t)
Definition: base.h:233
Definition: fast_sorted.h:53
Definition: tlsf_allocator.h:36
static constexpr size_t kMinSize
Definition: tlsf_allocator.h:39
static constexpr size_t kNumShelves
Definition: tlsf_allocator.h:43
Pair used to index a bucket in a two dimensional array.
Definition: tlsf_allocator.h:47