Pigweed
C/C++ API Reference
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Loading...
Searching...
No Matches
tlsf_allocator.h
1// Copyright 2024 The Pigweed Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may not
4// use this file except in compliance with the License. You may obtain a copy of
5// the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations under
13// the License.
14#pragma once
15
16#include <array>
17#include <cstddef>
18#include <cstdint>
19
20#include "pw_allocator/block/detailed_block.h"
21#include "pw_allocator/block_allocator.h"
22#include "pw_allocator/bucket/fast_sorted.h"
23#include "pw_allocator/bucket/sorted.h"
24
25namespace pw::allocator {
26
28template <typename OffsetType>
29using TlsfBlock = DetailedBlock<OffsetType, GenericFastSortedItem>;
30
37 static constexpr size_t kMinSize = 64;
38
41 static constexpr size_t kNumShelves = 10;
42};
43
46 size_t shelf;
47 size_t bucket;
48};
49
82template <typename BlockType = TlsfBlock<uint32_t>,
83 size_t kMinSize = TlsfDefaults::kMinSize,
84 size_t kNumShelves = TlsfDefaults::kNumShelves>
85class TlsfAllocator : public BlockAllocator<BlockType> {
86 private:
89
90 static constexpr size_t kNumBucketsPerShelf = 16;
91 static constexpr size_t kBucketBits =
92 internal::CountRZero(kNumBucketsPerShelf);
93 using Shelf = std::array<BucketType, kNumBucketsPerShelf>;
94
95 static_assert(kMinSize >= kNumBucketsPerShelf,
96 "kMinSize must be at least 16.");
97 static_assert(
98 kMinSize >= sizeof(GenericFastSortedItem),
99 "kMinSize must be large enough to hold a FastSortedBucket item.");
100 static_assert((kMinSize & (kMinSize - 1)) == 0,
101 "kMinSize must be a power of two.");
102
103 static_assert(kNumShelves <= 32, "kNumShelves cannot be larger than 32");
104
105 public:
107 constexpr TlsfAllocator();
108
114 explicit TlsfAllocator(ByteSpan region) : TlsfAllocator() {
115 Base::Init(region);
116 }
117
118 private:
121
123 void ReserveBlock(BlockType& block) override;
124
126 void RecycleBlock(BlockType& block) override;
127
130 static TlsfIndices MapToIndices(size_t size);
131
136 bool FindNextAvailable(TlsfIndices& indices);
137
140 void UpdateBitmaps(const TlsfIndices& indices, bool empty);
141
142 uint32_t shelf_bitmap_ = 0;
143 std::array<uint16_t, kNumShelves> bucket_bitmaps_;
144 std::array<Shelf, kNumShelves> shelves_;
145 ForwardSortedBucket<BlockType> small_bucket_;
146};
147
148// Template method implementations.
149
150template <typename BlockType, size_t kMinSize, size_t kNumShelves>
152 size_t size = kMinSize;
153 size_t step = kMinSize / kNumBucketsPerShelf;
154 for (Shelf& shelf : shelves_) {
155 for (BucketType& bucket : shelf) {
156 size += step;
157 bucket.set_max_inner_size(size - 1);
158 }
159 step *= 2;
160 }
161
162 // The largest bucket is unbounded.
163 BucketType& largest = shelves_[kNumShelves - 1][kNumBucketsPerShelf - 1];
164 largest.set_max_inner_size(std::numeric_limits<size_t>::max());
165
166 bucket_bitmaps_.fill(0);
167}
168
169template <typename BlockType, size_t kMinSize, size_t kNumShelves>
172 // Check the small bucket.
173 if (layout.size() < small_bucket_.max_inner_size()) {
174 BlockType* block = small_bucket_.RemoveCompatible(layout);
175 if (block != nullptr) {
176 return BlockType::AllocFirst(std::move(block), layout);
177 }
178 }
179
180 // Check the buckets on the shelves.
181 for (TlsfIndices indices = MapToIndices(layout.size());
182 FindNextAvailable(indices);
183 indices.bucket++) {
185 shelves_[indices.shelf][indices.bucket];
186 BlockType* block = bucket.RemoveCompatible(layout);
187 if (block != nullptr) {
188 UpdateBitmaps(indices, bucket.empty());
189 return BlockType::AllocFirst(std::move(block), layout);
190 }
191 }
192
193 // No sufficiently large block found.
194 return BlockResult<BlockType>(nullptr, Status::NotFound());
195}
196
197template <typename BlockType, size_t kMinSize, size_t kNumShelves>
199 BlockType& block) {
200 if (block.InnerSize() <= sizeof(SortedItem)) {
201 std::ignore = small_bucket_.Remove(block);
202 return;
203 }
204 TlsfIndices indices = MapToIndices(block.InnerSize());
205 FastSortedBucket<BlockType>& large_bucket =
206 shelves_[indices.shelf][indices.bucket];
207 if (large_bucket.Remove(block)) {
208 UpdateBitmaps(indices, large_bucket.empty());
209 }
210}
211
212template <typename BlockType, size_t kMinSize, size_t kNumShelves>
214 BlockType& block) {
215 if (block.InnerSize() <= sizeof(SortedItem)) {
216 std::ignore = small_bucket_.Add(block);
217 return;
218 }
219 TlsfIndices indices = MapToIndices(block.InnerSize());
220 FastSortedBucket<BlockType>& large_bucket =
221 shelves_[indices.shelf][indices.bucket];
222 std::ignore = large_bucket.Add(block);
223 UpdateBitmaps(indices, false);
224}
225
226template <typename BlockType, size_t kMinSize, size_t kNumShelves>
228 size_t size) {
229 if (size <= kMinSize) {
230 return TlsfIndices{.shelf = 0, .bucket = 0};
231 }
232
233 // Most significant bit set determines the shelf.
234 auto shelf = internal::CountRZero(cpp20::bit_floor(size));
235 // Each shelf has 16 buckets, so next 4 bits determine the bucket.
236 auto bucket = static_cast<uint16_t>((size >> (shelf - kBucketBits)) & 0xF);
237
238 // Adjust for minimum size, and clamp to the valid range.
239 shelf -= internal::CountRZero(kMinSize);
240 if (shelf >= kNumShelves) {
241 shelf = kNumShelves - 1;
242 bucket = kNumBucketsPerShelf - 1;
243 }
244 return TlsfIndices{.shelf = static_cast<uint32_t>(shelf), .bucket = bucket};
245}
246
247template <typename BlockType, size_t kMinSize, size_t kNumShelves>
248bool TlsfAllocator<BlockType, kMinSize, kNumShelves>::FindNextAvailable(
249 TlsfIndices& indices) {
250 // Are we past the end of a shelf? If so, move up a shelf.
251 if (indices.bucket == kNumBucketsPerShelf) {
252 indices.shelf++;
253 indices.bucket = 0;
254 }
255
256 // Have we passed the top shelf? If so, no larger blocks are available.
257 if (indices.shelf >= kNumShelves) {
258 return false;
259 }
260
261 // Use the bitmaps to find the next largest non-empty bucket.
262 uint16_t bucket_bitmap =
263 bucket_bitmaps_[indices.shelf] & (~uint32_t(0) << indices.bucket);
264 if (bucket_bitmap != 0) {
265 // There's at least one non-empty bucket on the current shelf whose
266 // blocks are at least as large as the requested size.
267 indices.bucket = internal::CountRZero(bucket_bitmap);
268 return true;
269 }
270
271 // The buckets for large enough blocks on this shelf are all empty.
272 // Move up to the first shelf with non-empty buckets and find the
273 // non-empty bucket with the smallest blocks.
274 uint32_t shelf_bitmap = shelf_bitmap_ & (~uint32_t(0) << (indices.shelf + 1));
275 if (shelf_bitmap != 0) {
276 indices.shelf = internal::CountRZero(shelf_bitmap);
277 indices.bucket = internal::CountRZero(bucket_bitmaps_[indices.shelf]);
278 return true;
279 }
280
281 // No larger blocks are available.
282 return false;
283}
284
285template <typename BlockType, size_t kMinSize, size_t kNumShelves>
286void TlsfAllocator<BlockType, kMinSize, kNumShelves>::UpdateBitmaps(
287 const TlsfIndices& indices, bool empty) {
288 auto bucket_bitmap = static_cast<uint16_t>(1 << indices.bucket);
289 if (empty) {
290 bucket_bitmaps_[indices.shelf] &= ~bucket_bitmap;
291 } else {
292 bucket_bitmaps_[indices.shelf] |= bucket_bitmap;
293 }
294
295 uint32_t shelf_bitmap = uint32_t(1) << indices.shelf;
296 if (bucket_bitmaps_[indices.shelf] == 0) {
297 shelf_bitmap_ &= ~shelf_bitmap;
298 } else {
299 shelf_bitmap_ |= shelf_bitmap;
300 }
301}
302
303} // namespace pw::allocator
Definition: block_allocator.h:104
void Init(ByteSpan region)
Definition: block_allocator.h:281
Definition: result.h:114
Definition: fast_sorted.h:63
Definition: sorted.h:103
Definition: layout.h:56
Definition: sorted.h:30
Definition: tlsf_allocator.h:85
BlockResult< BlockType > ChooseBlock(Layout layout) override
Definition: tlsf_allocator.h:171
TlsfAllocator(ByteSpan region)
Definition: tlsf_allocator.h:114
void ReserveBlock(BlockType &block) override
Definition: tlsf_allocator.h:198
constexpr TlsfAllocator()
Constexpr constructor. Callers must explicitly call Init.
Definition: tlsf_allocator.h:151
void RecycleBlock(BlockType &block) override
Definition: tlsf_allocator.h:213
constexpr void set_max_inner_size(size_t max_inner_size)
Definition: base.h:76
bool Remove(BlockType &block)
Definition: base.h:110
bool Add(BlockType &block)
Definition: base.h:87
constexpr bool empty() const
Returns whether this buckets contains any free blocks.
Definition: base.h:68
BlockType * RemoveCompatible(Layout layout)
Definition: base.h:121
Definition: fast_sorted.h:51
Definition: tlsf_allocator.h:34
static constexpr size_t kMinSize
Definition: tlsf_allocator.h:37
static constexpr size_t kNumShelves
Definition: tlsf_allocator.h:41
Pair used to index a bucket in a two dimensional array.
Definition: tlsf_allocator.h:45