1 /**
2  * Composable allocators
3  *
4  * This module uses a composing system - allocators implementing various
5  * strategies allocate memory in bulk from another backend allocator,
6  * "chained" in as a template alias or string parameter.
7  *
8  * Various allocation strategies allow for various capabilities - e.g.
9  * some strategies may not keep metadata required to free the memory of
10  * individual instances. Code should test the presence of primitives
11  * (methods in allocator mixin instances) accordingly.
12  *
13  * Most allocators have two parts: data and implementation. The split is
14  * done to allow all implementation layers to share the same "this"
15  * pointer. Each "Impl" part takes its "data" part as an alias.
16  *
17  * The underlying allocator (or its implementation instantiation) is
18  * passed in as an alias template parameter. This means that it has to be
19  * a symbol, the address of which is known in the scope of the allocator
20  * - thus, something scoped in the same class/struct, or a global variable.
21  *
22  * Allocator kinds:
23  *
24  * * Homogenous allocators, once instantiated, can only allocate values
25  *   only of the type specified in the template parameter. Attempting to
26  *   allocate a different type will result in a compile-time error.
27  *
28  * * Heterogenous allocators are not bound by one type. One instance can
29  *   allocate values of multiple types.
30  *
31  * Allocator primitives:
32  *
33  * allocate
34  *   Return a pointer to a new instance.
35  *   The returned object is not initialized.
36  *   The only mandatory primitive.
37  *
38  * create
39  *   Allocate and initialize/construct a new instance of T, with the
40  *   supplied parameters.
41  *
42  * free
43  *   Free memory at the given pointer, assuming it was allocated by the
44  *   same allocator.
45  *
46  * destroy
47  *   Finalize and free the given pointer.
48  *
49  * allocateMany
50  *   Allocate an array of values, with the given size. Allocators which
51  *   support this primitive are referred to as "bulk allocators".
52  *
53  * freeMany
54  *   Free memory for the given array of values.
55  *
56  * resize
57  *   Resize an array of values. Reallocate if needed.
58  *
59  * freeAll
60  *   Free all memory allocated using the given allocator, at once.
61  *   Deallocates storage from underlying allocator, if applicable.
62  *
63  * clear
64  *   Mark all memory allocated by the top-layer allocator as free.
65  *   Does not deallocate memory from underlying allocator.
66  *
67  * References:
68  *   http://accu.org/content/conf2008/Alexandrescu-memory-allocation.screen.pdf
69  *
70  * License:
71  *   This Source Code Form is subject to the terms of
72  *   the Mozilla Public License, v. 2.0. If a copy of
73  *   the MPL was not distributed with this file, You
74  *   can obtain one at http://mozilla.org/MPL/2.0/.
75  *
76  * Authors:
77  *   Vladimir Panteleev <vladimir@thecybershadow.net>
78  */
79 
80 module ae.utils.alloc;
81 
82 import std.conv : emplace;
83 import std.traits : fullyQualifiedName;
84 
85 import ae.utils.meta : RefType, FromRefType, StorageType;
86 import ae.utils.meta.caps : haveFieldAliasBinding;
87 
88 /// Generates code to create forwarding aliases to the given mixin/template member.
89 /// Used as a replacement for "alias M this", which doesn't seem to work with mixins
90 /// and templates.
91 static template mixAliasForward(alias M, string name = __traits(identifier, M))
92 {
93 	static string mixAliasForward()
94 	{
95 		import std.string, std.algorithm;
96 		return [__traits(allMembers, M)]
97 			.filter!(n => n.length)
98 			.map!(n => "alias %s.%s %s;\n".format(name, n, n))
99 			.join();
100 	}
101 }
102 
103 /// Instantiates a struct from a type containing a Data/Impl template pair.
104 struct WrapParts(T)
105 {
106 	T.Data data;
107 	alias impl = T.Impl!data;
108 //	pragma(msg, __traits(identifier, impl));
109 //	pragma(msg, mixAliasForward!(impl, q{impl}));
110 
111 	mixin({
112 		import std.string, std.algorithm, std.range;
113 		return
114 			chain(
115 				[__traits(allMembers, T.Impl!data)]
116 				.filter!(n => n.length)
117 				.map!(n => "alias %s.%s %s;\n".format("impl", n, n))
118 			,
119 				[__traits(allMembers, T)]
120 				.filter!(n => n.length)
121 				.filter!(n => n != "Impl")
122 				.filter!(n => n != "Data")
123 				.map!(n => "alias %s.%s %s;\n".format("T", n, n))
124 			)
125 			.join()
126 		;}()
127 	);
128 }
129 
130 /// Creates a template which, when instantiated, forwards its arguments to T
131 /// and uses WrapParts on the result.
132 template PartsWrapper(alias T)
133 {
134 	template PartsWrapper(Args...)
135 	{
136 		alias PartsWrapper = WrapParts!(T!Args);
137 	}
138 }
139 
140 // TODO:
141 // - GROWFUN callable alias parameter instead of BLOCKSIZE?
142 // - Consolidate RegionAllocator and GrowingBufferAllocator
143 // - Add new primitive for bulk allocation which returns a range?
144 //   (to allow non-contiguous bulk allocation, but avoid
145 //   allocating an array of pointers to store the result)
146 // - Forbid allocating types with indirections when the base type
147 //   is not a pointer?
148 // - More thorough testing
149 
150 /// Common declarations for an allocator mixin
151 mixin template AllocatorCommon()
152 {
153 	static assert(haveFieldAliasBinding, "Your compiler doesn't support field alias template parameter binding, which is required for " ~ __MODULE__ ~ ".");
154 
155 	alias ae.utils.meta.StorageType StorageType;
156 
157 	static if (is(ALLOCATOR_TYPE))
158 		alias StorageType!ALLOCATOR_TYPE VALUE_TYPE;
159 
160 	static if (is(BASE_TYPE))
161 		alias StorageType!BASE_TYPE BASE_VALUE_TYPE;
162 }
163 
164 /// Default "create" implementation.
165 RefType!T create(T, A, Args...)(ref A a, Args args)
166 	if (is(typeof(a.allocate!T())))
167 {
168 	alias StorageType!T V;
169 
170 	auto r = a.allocate!T();
171 	emplace!T(cast(void[])((cast(V*)r)[0..1]), args);
172 	return r;
173 }
174 
175 void destroy(R, A)(ref A a, R r)
176 //	TODO: contract
177 {
178 	clear(r);
179 	static if (is(typeof(&a.free)))
180 		a.free(r);
181 }
182 
183 /// Creates T/R/V aliases from context, and checks ALLOCATOR_TYPE if appropriate.
184 mixin template AllocTypes()
185 {
186 	static if (is(R) && !is(T)) alias FromRefType!R T;
187 	static if (is(T) && !is(R)) alias RefType!T R;
188 	static if (is(T) && !is(V)) alias StorageType!T V;
189 	static if (is(ALLOCATOR_TYPE)) static assert(is(ALLOCATOR_TYPE==T), "This allocator can " ~
190 		"only allocate instances of " ~ ALLOCATOR_TYPE.stringof ~ ", not " ~ T.stringof);
191 	static if (is(BASE_TYPE) && is(V))
192 	{
193 		enum ALLOC_SIZE = (V.sizeof + BASE_TYPE.sizeof-1) / BASE_TYPE.sizeof;
194 	}
195 }
196 
197 /// Allocator proxy which injects custom code after object creation.
198 /// Context of INIT_CODE:
199 ///   p - newly-allocated value.
200 struct InitializingAllocatorProxy(string INIT_CODE, alias ALLOCATOR = heapAllocator)
201 {
202 	mixin AllocatorCommon;
203 
204 	RefType!T allocate(T)()
205 	{
206 		auto p = ALLOCATOR.allocate!T();
207 		mixin(INIT_CODE);
208 		return p;
209 	}
210 
211 	// TODO: Proxy other methods
212 }
213 
214 /// Allocator proxy which keeps track how many allocations were made.
215 struct StatAllocatorProxy(alias ALLOCATOR = heapAllocator)
216 {
217     mixin AllocatorCommon;
218 
219 	size_t allocated;
220 
221 	RefType!T allocate(T)()
222 	{
223 		allocated += StorageType!T.sizeof;
224 		return ALLOCATOR.allocate!T();
225 	}
226 
227 	StorageType!T[] allocateMany(T)(size_t n)
228 	{
229 		allocated += n * StorageType!T.sizeof;
230 		return ALLOCATOR.allocateMany!T(n);
231 	}
232 
233 	// TODO: Proxy other methods
234 }
235 
236 /// The internal unit allocation type of FreeListAllocator.
237 /// (Exposed to allow specializing underlying allocators on it.)
238 template FreeListNode(T)
239 {
240 	mixin AllocTypes;
241 
242 	mixin template NodeContents()
243 	{
244 		V data;
245 		FreeListNode* next; /// Next free node
246 		static FreeListNode* fromRef(R r) { return cast(FreeListNode*)r; }
247 	}
248 
249 	debug
250 		struct FreeListNode { mixin NodeContents; }
251 	else
252 		union  FreeListNode { mixin NodeContents; }
253 }
254 
255 
256 /// Homogenous linked list allocator.
257 /// Supports O(1) deletion.
258 /// Does not support bulk allocation.
259 struct FreeListAllocator(ALLOCATOR_TYPE, alias ALLOCATOR = heapAllocator)
260 {
261 	mixin AllocatorCommon;
262 
263 	alias FreeListNode!ALLOCATOR_TYPE Node;
264 
265 	struct Data
266 	{
267 		Node* head = null; /// First free node
268 	}
269 
270 	static template Impl(alias data)
271 	{
272 		RefType!T allocate(T)()
273 		{
274 			mixin AllocTypes;
275 
276 			if (data.head is null)
277 			{
278 				auto node = ALLOCATOR.allocate!Node();
279 				return cast(R)&node.data;
280 			}
281 			auto node = data.head;
282 			data.head = data.head.next;
283 			return cast(R)&node.data;
284 		}
285 
286 		void free(R)(R r)
287 		{
288 			auto node = Node.fromRef(r);
289 			node.next = data.head;
290 			data.head = node;
291 		}
292 	}
293 }
294 
295 /// Backend allocator Allocates from D's managed heap directly.
296 struct HeapAllocator
297 {
298 // static: // https://d.puremagic.com/issues/show_bug.cgi?id=12207
299 const:
300 	RefType!T allocate(T)()
301 	{
302 		return new T;
303 	}
304 
305 	StorageType!T[] allocateMany(T)(size_t n)
306 	{
307 		return new StorageType!T[n];
308 	}
309 
310 	RefType!T create(T, A...)(A args)
311 	{
312 		return new T(args);
313 	}
314 
315 	V[] resize(V)(V[] v, size_t n)
316 	{
317 		v.length = n;
318 		return v;
319 	}
320 
321 	void free(R)(R r)
322 	{
323 		delete r;
324 	}
325 	alias free destroy;
326 
327 	void freeMany(V)(V[] v)
328 	{
329 		delete v;
330 	}
331 }
332 
333 immutable HeapAllocator heapAllocator;
334 
335 RefType!T allocate(T, A)(ref A a)
336 	if (is(typeof(&a.allocateMany!T)))
337 {
338 	return cast(RefType!T)(a.allocateMany!T(1).ptr);
339 }
340 
341 void free(A, R)(ref A a, R r)
342 	if (is(typeof(&a.freeMany)))
343 {
344 	a.freeMany((cast(V*)r)[0..1]);
345 }
346 
347 /// Backend allocator using the Data type from ae.sys.data.
348 struct DataAllocator
349 {
350 	mixin AllocatorCommon;
351 
352 	import ae.sys.data : SysData = Data;
353 
354 	struct Data
355 	{
356 		struct Node
357 		{
358 			Node* next;
359 			SysData data;
360 		}
361 
362 		// Needed to make data referenced in Data instances reachable by the GC
363 		Node* root;
364 	}
365 
366 	static template Impl(alias data)
367 	{
368 		import ae.sys.data : SysData = Data;
369 
370 		StorageType!T[] allocateMany(T)(size_t n)
371 		{
372 			mixin AllocTypes;
373 
374 			data.root = new data.Node(data.root, SysData(V.sizeof * n));
375 			return cast(V[])data.root.data.mcontents;
376 		}
377 
378 		void freeAll()
379 		{
380 			while (data.root)
381 			{
382 				data.root.data.deleteContents();
383 				data.root = data.root.next;
384 			}
385 		}
386 	}
387 }
388 
389 struct GCRootAllocatorProxy(alias ALLOCATOR)
390 {
391 	mixin AllocatorCommon;
392 
393 	import core.memory;
394 
395 	StorageType!T[] allocateMany(T)(size_t n)
396 	{
397 		auto result = ALLOCATOR.allocateMany!T(n);
398 		auto bytes = cast(ubyte[])result;
399 		GC.addRange(bytes.ptr, bytes.length);
400 		return result;
401 	}
402 
403 	void freeMany(V)(V[] v)
404 	{
405 		GC.removeRange(v.ptr);
406 		ALLOCATOR.freeMany(v);
407 	}
408 }
409 
410 /// Backend for direct OS page allocation.
411 struct PageAllocator
412 {
413 	version(Windows)
414 		import core.sys.windows.windows;
415 	else
416 	version(Posix)
417 		import core.sys.posix.sys.mman;
418 
419 	StorageType!T[] allocateMany(T)(size_t n)
420 	{
421 		mixin AllocTypes;
422 
423 		auto size = V.sizeof * n;
424 
425 		version(Windows)
426 		{
427 			auto p = VirtualAlloc(null, size, MEM_COMMIT, PAGE_READWRITE);
428 		}
429 		else
430 		version(Posix)
431 		{
432 			auto p = mmap(null, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
433 			p = (p == MAP_FAILED) ? null : p;
434 		}
435 
436 		return (cast(V*)p)[0..n];
437 	}
438 
439 	void freeMany(V)(V[] v)
440 	{
441 		mixin AllocTypes;
442 
443 		version(Windows)
444 			VirtualFree(v.ptr, 0, MEM_RELEASE);
445 		else
446 		version(Posix)
447 			munmap(v.ptr, v.length * V.sizeof);
448 	}
449 }
450 
451 /// Common code for pointer-bumping allocator implementations.
452 ///
453 /// Context:
454 ///   ptr - pointer to next free element
455 ///   end - pointer to end of buffer
456 ///   bufferExhausted - method called when ptr==end
457 ///     (takes new size to allocate as parameter)
458 ///   BLOCKSIZE - default parameter to bufferExhausted
459 mixin template PointerBumpCommon()
460 {
461 	/// Shared code for allocate / allocateMany.
462 	/// Context:
463 	///   data - alias to struct holding ptr and end
464 	///   Size - number of BASE_TYPE items to allocate
465 	///     (can be a constant or variable).
466 	private enum mixAllocateN =
467 	q{
468 		if (data.ptr + Size > data.end)
469 			bufferExhausted(Size > BLOCKSIZE ? Size : BLOCKSIZE);
470 
471 		auto result = data.ptr[0..Size];
472 		data.ptr += Size;
473 	};
474 
475 	RefType!T allocate(T)()
476 	{
477 		mixin AllocTypes;
478 
479 		static if (ALLOC_SIZE == 1)
480 		{
481 			if (ptr==end)
482 				bufferExhausted(BLOCKSIZE);
483 			return cast(R)(ptr++);
484 		}
485 		else
486 		{
487 			enum Size = ALLOC_SIZE;
488 			mixin(mixAllocateN);
489 			return cast(R)result.ptr;
490 		}
491 	}
492 
493 	StorageType!T[] allocateMany(T)(size_t n)
494 	{
495 		mixin AllocTypes;
496 		static assert(V.sizeof % BASE.sizeof == 0, "Aligned/contiguous allocation impossible");
497 		auto Size = ALLOC_SIZE * n;
498 		mixin(mixAllocateN);
499 		return cast(V[])result;
500 	}
501 }
502 
503 /// Classic region.
504 /// Compose over another allocator to allocate values in bulk (minimum of BLOCKSIZE).
505 /// No deletion, but is slightly faster that FreeListAllocator.
506 /// BASE_TYPE indicates the type used for upstream allocations.
507 /// It is not possible to bulk-allocate types smaller than BASE_TYPE,
508 /// or those the size of which is not divisible by BASE_TYPE's size.
509 /// (This restriction allows for allocations of single BASE_TYPE-sized items to be
510 /// a little faster.)
511 // TODO: support non-bulk allocators (without allocateMany support)?
512 struct RegionAllocator(BASE_TYPE=void*, size_t BLOCKSIZE=1024, alias ALLOCATOR = heapAllocator)
513 {
514 	mixin AllocatorCommon;
515 
516 	struct Data
517 	{
518 		BASE_VALUE_TYPE* ptr=null, end=null;
519 	}
520 
521 	static template Impl(alias data)
522 	{
523 		/// Forget we ever allocated anything
524 		void reset() { data.ptr=data.end=null; }
525 
526 		private void newBlock(size_t size) // size counts BASE_VALUE_TYPE
527 		{
528 			BASE_VALUE_TYPE[] arr = ALLOCATOR.allocateMany!BASE_TYPE(size);
529 			data.ptr = arr.ptr;
530 			data.end = data.ptr + arr.length;
531 		}
532 
533 		alias newBlock bufferExhausted;
534 		mixin PointerBumpCommon;
535 	}
536 }
537 
538 /// Allocator proxy which keeps track of all allocations,
539 /// and implements freeAll by discarding them all at once
540 /// via the underlying allocator's freeMany.
541 struct TrackingAllocatorProxy(ALLOCATOR_TYPE, alias ALLOCATOR = heapAllocator)
542 {
543 	mixin AllocatorCommon;
544 
545 	struct Data
546 	{
547 		VALUE_TYPE[][] blocks; // TODO: use linked list or something
548 	}
549 
550 	static template Impl(alias data)
551 	{
552 		VALUE_TYPE[] allocateMany(T)(size_t n)
553 		{
554 			mixin AllocTypes;
555 
556 			VALUE_TYPE[] arr = ALLOCATOR.allocateMany!ALLOCATOR_TYPE(n);
557 			data.blocks ~= arr;
558 			return arr;
559 		}
560 
561 		RefType!T allocate(T)()
562 		{
563 			mixin AllocTypes;
564 
565 			return cast(R)(allocateMany!T(1).ptr);
566 		}
567 
568 		void freeAll()
569 		{
570 			foreach (block; data.blocks)
571 				ALLOCATOR.freeMany(block);
572 			data.blocks = null;
573 		}
574 	}
575 }
576 
577 /// Growing buffer bulk allocator.
578 /// Allows reusing the same buffer, which is grown and retained as needed.
579 /// Requires .resize support from underlying allocator.
580 /// Smaller buffers are discarded (neither freed nor reused).
581 struct GrowingBufferAllocator(BASE_TYPE=void*, alias ALLOCATOR = heapAllocator)
582 {
583 	mixin AllocatorCommon;
584 
585 	struct Data
586 	{
587 		BASE_VALUE_TYPE* buf, ptr, end;
588 	}
589 
590 	static template Impl(alias data)
591 	{
592 		void bufferExhausted(size_t n)
593 		{
594 			import std.algorithm;
595 			auto newSize = max(4096 / BASE_VALUE_TYPE.sizeof, (data.end-data.buf)*2, n);
596 			auto pos = data.ptr - data.buf;
597 			auto arr = ALLOCATOR.resize(data.buf[0..data.end-data.buf], newSize);
598 			data.buf = arr.ptr;
599 			data.end = data.buf + arr.length;
600 			data.ptr = data.buf + pos;
601 		}
602 
603 		void clear()
604 		{
605 			data.ptr = data.buf;
606 		}
607 
608 		enum BLOCKSIZE=0;
609 		mixin PointerBumpCommon;
610 	}
611 }
612 
613 /// Thrown when the buffer of an allocator is exhausted.
614 class BufferExhaustedException : Exception { this() { super("Allocator buffer exhausted"); } }
615 
616 /// Homogenous allocator which uses a given buffer.
617 /// Throws BufferExhaustedException if the buffer is exhausted.
618 struct BufferAllocator(BASE_TYPE=ubyte)
619 {
620 	mixin AllocatorCommon;
621 
622 	struct Data
623 	{
624 		BASE_VALUE_TYPE* ptr=null, end=null;
625 	}
626 
627 	static template Impl(alias data)
628 	{
629 		void setBuffer(BASE_VALUE_TYPE[] buf)
630 		{
631 			data.ptr = buf.ptr;
632 			data.end = data.ptr + buf.length;
633 		}
634 
635 		this(BASE_VALUE_TYPE[] buf) { setBuffer(buf); }
636 
637 		static void bufferExhausted(size_t n)
638 		{
639 			throw new BufferExhaustedException();
640 		}
641 
642 		enum BLOCKSIZE=0;
643 		mixin PointerBumpCommon;
644 	}
645 }
646 
647 /// Homogenous allocator which uses a static buffer of a given size.
648 /// Throws BufferExhaustedException if the buffer is exhausted.
649 /// Needs to be manually initialized before use.
650 struct StaticBufferAllocator(size_t SIZE, BASE_TYPE=ubyte)
651 {
652 	mixin AllocatorCommon;
653 
654 	struct Data
655 	{
656 		StorageType!BASE_TYPE[SIZE] buffer;
657 		StorageType!BASE_TYPE* ptr;
658 		@property StorageType!BASE_TYPE* end() { return buffer.ptr + buffer.length; }
659 	}
660 
661 	static template Impl(alias data)
662 	{
663 		void initialize()
664 		{
665 			data.ptr = data.buffer.ptr;
666 		}
667 
668 		void bufferExhausted(size_t n)
669 		{
670 			throw new BufferExhaustedException();
671 		}
672 
673 		enum BLOCKSIZE=0;
674 		mixin PointerBumpCommon;
675 
676 		alias initialize clear;
677 	}
678 }
679 
680 /// A bulk allocator which behaves like a StaticBufferAllocator initially,
681 /// but once the static buffer is exhausted, it switches to a fallback
682 /// bulk allocator.
683 /// Needs to be manually initialized before use.
684 /// ALLOCATOR is the fallback allocator.
685 struct HybridBufferAllocator(size_t SIZE, BASE_TYPE=ubyte, alias ALLOCATOR=heapAllocator)
686 {
687 	mixin AllocatorCommon;
688 
689 	struct Data
690 	{
691 		BASE_VALUE_TYPE[SIZE] buffer;
692 		BASE_VALUE_TYPE* ptr, end;
693 	}
694 
695 	static template Impl(alias data)
696 	{
697 		void initialize()
698 		{
699 			data.ptr = data.buffer.ptr;
700 			data.end = data.buffer.ptr + data.buffer.length;
701 		}
702 
703 		void bufferExhausted(size_t n)
704 		{
705 			auto arr = ALLOCATOR.allocateMany!BASE_TYPE(n);
706 			data.ptr = arr.ptr;
707 			data.end = data.ptr + arr.length;
708 		}
709 
710 		enum BLOCKSIZE = SIZE;
711 		mixin PointerBumpCommon;
712 
713 		static if (is(typeof(&ALLOCATOR.clear)))
714 		{
715 			void clear()
716 			{
717 				if (data.end == data.buffer.ptr + data.buffer.length)
718 					data.ptr = data.buffer.ptr;
719 				else
720 					ALLOCATOR.clear();
721 			}
722 		}
723 	}
724 }
725 
726 static if (haveFieldAliasBinding)
727 unittest
728 {
729 	static class C { int x=2; this() {} this(int p) { x = p; } }
730 
731 	void testAllocator(A, string INIT="")()
732 	{
733 		A a;
734 		mixin(INIT);
735 		auto c1 = a.create!C();
736 		assert(c1.x == 2);
737 
738 		auto c2 = a.create!C(5);
739 		assert(c2.x == 5);
740 	}
741 
742 	testAllocator!(WrapParts!(FreeListAllocator!C))();
743 	testAllocator!(           HeapAllocator)();
744 	testAllocator!(WrapParts!(DataAllocator))();
745 	testAllocator!(           PageAllocator)();
746 	testAllocator!(WrapParts!(RegionAllocator!()))();
747 	testAllocator!(WrapParts!(GrowingBufferAllocator!()))();
748 	testAllocator!(WrapParts!(StaticBufferAllocator!1024), q{a.initialize();})();
749 	testAllocator!(WrapParts!(HybridBufferAllocator!1024))();
750 }