Pigweed
C/C++ API Reference
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Loading...
Searching...
No Matches
dl_allocator.h
1// Copyright 2024 The Pigweed Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may not
4// use this file except in compliance with the License. You may obtain a copy of
5// the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations under
13// the License.
14#pragma once
15
16#include <cstddef>
17#include <limits>
18
19#include "pw_allocator/block/detailed_block.h"
20#include "pw_allocator/block_allocator.h"
21#include "pw_allocator/bucket/fast_sorted.h"
22#include "pw_allocator/bucket/unordered.h"
23#include "pw_allocator/config.h"
24
25namespace pw::allocator {
26
28template <typename OffsetType>
29using DlBlock = DetailedBlock<OffsetType, GenericFastSortedItem>;
30
44template <typename BlockType = DlBlock<uintptr_t>>
45class DlAllocator : public BlockAllocator<BlockType> {
46 private:
51
52 static constexpr size_t kMinSize = 8;
53
54 // Cache free blocks with sizes up to 80 bytes.
55 static constexpr size_t kNumFastBins = 10;
56
57 // The number of small bins must be a power of two.
58 static constexpr size_t kNumSmallBins = 64;
59 static_assert((kNumSmallBins & (kNumSmallBins - 1)) == 0);
60
61 // The number of large bins is the sum of all powers of two smaller than the
62 // number of small bins.
63 static constexpr size_t kNumLargeBins = kNumSmallBins - 1;
64
65 // Bit maps are implemented as an array of `uintptr_t`s.
66 static constexpr size_t kBitmapBits = std::numeric_limits<uintptr_t>::digits;
67 static constexpr size_t kNumBitmaps =
68 AlignUp(kNumSmallBins + kNumLargeBins, kBitmapBits) / kBitmapBits;
69
70 public:
72 constexpr DlAllocator();
73
79 DlAllocator(ByteSpan region) : DlAllocator() { Base::Init(region); }
80
81 ~DlAllocator() override { Flush(); }
82
83 private:
85 BlockResult<BlockType> ChooseBlock(Layout layout) override;
86
88 void ReserveBlock(BlockType& block) override;
89
91 void RecycleBlock(BlockType& block) override;
92
94 void Flush() override { ReleaseFastBins(); }
95
97 void DeallocateBlock(BlockType*&& block) override;
98
105 size_t GetIndex(size_t size);
106
114 void ReleaseFastBins();
115
120 bool FindNextAvailable(size_t& index);
121
124 void UpdateBitmaps(size_t index, bool empty);
125
126 std::array<FastBin, kNumFastBins> fast_bins_;
127 std::array<SmallBin, kNumSmallBins> small_bins_;
128 std::array<LargeBin, kNumLargeBins> large_bins_;
129 std::array<uintptr_t, kNumBitmaps> bitmaps_;
130};
131
132// Template method implementations.
133
134template <typename BlockType>
136 size_t index = 0;
137 size_t size = 0;
138 size_t bin_size = kMinSize;
139 size_t bins_in_round = kNumSmallBins;
140 while (bins_in_round > 1) {
141 for (size_t i = 0; i < bins_in_round; ++i) {
142 size += bin_size;
143 if (index < kNumFastBins) {
144 fast_bins_[index].set_max_inner_size(size);
145 }
146 if (index < kNumSmallBins) {
147 small_bins_[index].set_max_inner_size(size);
148 } else {
149 large_bins_[index - kNumSmallBins].set_max_inner_size(size);
150 }
151 ++index;
152 }
153 bin_size *= 8;
154 bins_in_round /= 2;
155 }
156 bitmaps_.fill(0);
157}
158
159template <typename BlockType>
161 layout = Layout(std::max(layout.size(), kMinSize), layout.alignment());
162 size_t index = GetIndex(layout.size());
163
164 if (index < kNumSmallBins) {
165 // First, check if there's a chunk available in the fast bucket.
166 if (index < kNumFastBins) {
167 FastBin& fast_bin = fast_bins_[index];
168 BlockType* block = fast_bin.RemoveCompatible(layout);
169 if (block != nullptr) {
170 return BlockResult<BlockType>(block);
171 }
172 }
173 // If the corresponding small bucket is empty, release the fast bins to
174 // consolidate chunks and maybe get the requested size.
175 if (small_bins_[index].empty()) {
176 ReleaseFastBins();
177 }
178 } else {
179 // Always release fast bins on large requests.
180 ReleaseFastBins();
181 }
182
183 // Check the small, fixed-size buckets.
184 for (; FindNextAvailable(index) && index < kNumSmallBins; ++index) {
185 SmallBin& small_bin = small_bins_[index];
186 BlockType* block = small_bin.RemoveCompatible(layout);
187 if (block != nullptr) {
188 UpdateBitmaps(index, small_bin.empty());
189 return BlockType::AllocFirst(std::move(block), layout);
190 }
191 }
192
193 // Check the larger, sorted buckets.
194 for (; FindNextAvailable(index); ++index) {
195 LargeBin& large_bin = large_bins_[index - kNumSmallBins];
196 BlockType* block = large_bin.RemoveCompatible(layout);
197 if (block != nullptr) {
198 UpdateBitmaps(index, large_bin.empty());
199 return BlockType::AllocFirst(std::move(block), layout);
200 }
201 }
202
203 // No sufficiently large block found.
204 return BlockResult<BlockType>(nullptr, Status::NotFound());
205}
206
207template <typename BlockType>
209 size_t index = GetIndex(block.InnerSize());
210 if (index < kNumSmallBins) {
211 SmallBin& small_bin = small_bins_[index];
212 std::ignore = small_bin.Remove(block);
213 UpdateBitmaps(index, small_bin.empty());
214 } else {
215 LargeBin& large_bin = large_bins_[index - kNumSmallBins];
216 std::ignore = large_bin.Remove(block);
217 UpdateBitmaps(index, large_bin.empty());
218 }
219}
220
221template <typename BlockType>
223 size_t index = GetIndex(block.InnerSize());
224 if (index < kNumSmallBins) {
225 std::ignore = small_bins_[index].Add(block);
226 } else {
227 std::ignore = large_bins_[index - kNumSmallBins].Add(block);
228 }
229 UpdateBitmaps(index, false);
230}
231
232template <typename BlockType>
234 // Add small blocks to the fast bins without actually freeing them.
235 size_t index = block->InnerSize() / kMinSize;
236 if (index < kNumFastBins) {
237 std::ignore = fast_bins_[index].Add(*block);
238 } else {
239 Base::DeallocateBlock(std::move(block));
240 }
241}
242
243template <typename BlockType>
244size_t DlAllocator<BlockType>::GetIndex(size_t size) {
245 size_t index = 0;
246 size_t bin_size = kMinSize;
247 size_t bins_in_round = kNumSmallBins;
248 size = AlignUp(size, kMinSize) - kMinSize;
249 while (bins_in_round > 1) {
250 size_t round_size = bin_size * bins_in_round;
251 if (size < round_size) {
252 return index + (size / bin_size);
253 }
254 size -= round_size;
255 index += bins_in_round;
256 bin_size *= 8;
257 bins_in_round /= 2;
258 }
259 return index;
260}
261
262template <typename BlockType>
263void DlAllocator<BlockType>::ReleaseFastBins() {
264 for (auto& fast_bin : fast_bins_) {
265 while (!fast_bin.empty()) {
266 BlockType* block = fast_bin.RemoveAny();
267 if constexpr (Hardening::kIncludesDebugChecks) {
268 PW_ASSERT(block != nullptr);
269 }
270 Base::DeallocateBlock(std::move(block));
271 }
272 }
273}
274
275template <typename BlockType>
276bool DlAllocator<BlockType>::FindNextAvailable(size_t& index) {
277 size_t bitmap_index = index / kBitmapBits;
278 size_t bitmap_offset = index % kBitmapBits;
279 uintptr_t bitmap = bitmaps_[bitmap_index] & (~uintptr_t(0) << bitmap_offset);
280 while (bitmap == 0) {
281 ++bitmap_index;
282 if (bitmap_index >= kNumBitmaps) {
283 return false;
284 }
285 bitmap = bitmaps_[bitmap_index];
286 }
287 auto bitmap_log2 = internal::CountRZero(bitmap);
288 index = (bitmap_index * kBitmapBits) + bitmap_log2;
289 return true;
290}
291
292template <typename BlockType>
293void DlAllocator<BlockType>::UpdateBitmaps(size_t index, bool empty) {
294 size_t bitmap_index = index / kBitmapBits;
295 size_t bitmap_offset = index % kBitmapBits;
296 uintptr_t bitmap = uintptr_t(1) << bitmap_offset;
297 if (empty) {
298 bitmaps_[bitmap_index] &= ~bitmap;
299 } else {
300 bitmaps_[bitmap_index] |= bitmap;
301 }
302}
303
304} // namespace pw::allocator
Definition: block_allocator.h:104
void Init(ByteSpan region)
Definition: block_allocator.h:281
Definition: result.h:114
Definition: dl_allocator.h:45
void RecycleBlock(BlockType &block) override
Definition: dl_allocator.h:222
void DeallocateBlock(BlockType *&&block) override
Definition: dl_allocator.h:233
BlockResult< BlockType > ChooseBlock(Layout layout) override
Definition: dl_allocator.h:160
void Flush() override
Definition: dl_allocator.h:94
void ReserveBlock(BlockType &block) override
Definition: dl_allocator.h:208
DlAllocator(ByteSpan region)
Definition: dl_allocator.h:79
constexpr DlAllocator()
Constexpr constructor. Callers must explicitly call Init.
Definition: dl_allocator.h:135
Definition: fast_sorted.h:63
Definition: layout.h:56
Definition: unordered.h:42
bool Remove(BlockType &block)
Definition: base.h:110
constexpr bool empty() const
Returns whether this buckets contains any free blocks.
Definition: base.h:68
BlockType * RemoveCompatible(Layout layout)
Definition: base.h:121
constexpr size_t AlignUp(uintptr_t value, size_t alignment)
Returns the value rounded up to the nearest multiple of alignment.
Definition: alignment.h:52