1 /**
2  * Composable allocators
3  *
4  * This module uses a composing system - allocators implementing various
5  * strategies allocate memory in bulk from another backend allocator,
6  * "chained" in as a template alias or string parameter.
7  *
8  * Various allocation strategies allow for various capabilities - e.g.
9  * some strategies may not keep metadata required to free the memory of
10  * individual instances. Code should test the presence of primitives
11  * (methods in allocator mixin instances) accordingly.
12  *
13  * Most allocators have two parts: data and implementation. The split is
14  * done to allow all implementation layers to share the same "this"
15  * pointer. Each "Impl" part takes its "data" part as an alias.
16  *
17  * The underlying allocator (or its implementation instantiation) is
18  * passed in as an alias template parameter. This means that it has to be
19  * a symbol, the address of which is known in the scope of the allocator
20  * - thus, something scoped in the same class/struct, or a global variable.
21  *
22  * Allocator kinds:
23  *
24  * * Homogenous allocators, once instantiated, can only allocate values
25  *   only of the type specified in the template parameter. Attempting to
26  *   allocate a different type will result in a compile-time error.
27  *
28  * * Heterogenous allocators are not bound by one type. One instance can
29  *   allocate values of multiple types.
30  *
31  * Allocator primitives:
32  *
33  * allocate
34  *   Return a pointer to a new instance.
35  *   The returned object is not initialized.
36  *   The only mandatory primitive.
37  *
38  * create
39  *   Allocate and initialize/construct a new instance of T, with the
40  *   supplied parameters.
41  *
42  * free
43  *   Free memory at the given pointer, assuming it was allocated by the
44  *   same allocator.
45  *
46  * destroy
47  *   Finalize and free the given pointer.
48  *
49  * allocateMany
50  *   Allocate an array of values, with the given size. Allocators which
51  *   support this primitive are referred to as "bulk allocators".
52  *
53  * freeMany
54  *   Free memory for the given array of values.
55  *
56  * resize
57  *   Resize an array of values. Reallocate if needed.
58  *
59  * freeAll
60  *   Free all memory allocated using the given allocator, at once.
61  *   Deallocates storage from underlying allocator, if applicable.
62  *
63  * clear
64  *   Mark all memory allocated by the top-layer allocator as free.
65  *   Does not deallocate memory from underlying allocator.
66  *
67  * References:
68  *   http://accu.org/content/conf2008/Alexandrescu-memory-allocation.screen.pdf
69  *
70  * License:
71  *   This Source Code Form is subject to the terms of
72  *   the Mozilla Public License, v. 2.0. If a copy of
73  *   the MPL was not distributed with this file, You
74  *   can obtain one at http://mozilla.org/MPL/2.0/.
75  *
76  * Authors:
77  *   Vladimir Panteleev <vladimir@thecybershadow.net>
78  */
79 
80 module ae.utils.alloc;
81 
82 import std.conv : emplace;
83 import std.traits : fullyQualifiedName;
84 
85 import ae.utils.meta : RefType, FromRefType, StorageType;
86 import ae.utils.meta.caps : haveFieldAliasBinding;
87 
88 /// Generates code to create forwarding aliases to the given mixin/template member.
89 /// Used as a replacement for "alias M this", which doesn't seem to work with mixins
90 /// and templates.
91 static template mixAliasForward(alias M, string name = __traits(identifier, M))
92 {
93 	static string mixAliasForward()
94 	{
95 		import std.string, std.algorithm;
96 		return [__traits(allMembers, M)]
97 			.filter!(n => n.length)
98 			.map!(n => "alias %s.%s %s;\n".format(name, n, n))
99 			.join();
100 	}
101 }
102 
103 /// Instantiates a struct from a type containing a Data/Impl template pair.
104 struct WrapParts(T)
105 {
106 	T.Data data;
107 	alias impl = T.Impl!data;
108 //	pragma(msg, __traits(identifier, impl));
109 //	pragma(msg, mixAliasForward!(impl, q{impl}));
110 
111 	mixin({
112 		import std.string, std.algorithm, std.range;
113 		return
114 			chain(
115 				[__traits(allMembers, T.Impl!data)]
116 				.filter!(n => n.length)
117 				.map!(n => "alias %s.%s %s;\n".format("impl", n, n))
118 			,
119 				[__traits(allMembers, T)]
120 				.filter!(n => n.length)
121 				.filter!(n => n != "Impl")
122 				.filter!(n => n != "Data")
123 				.map!(n => "alias %s.%s %s;\n".format("T", n, n))
124 			)
125 			.join()
126 		;}()
127 	);
128 }
129 
130 /// Creates a template which, when instantiated, forwards its arguments to T
131 /// and uses WrapParts on the result.
132 template PartsWrapper(alias T)
133 {
134 	template PartsWrapper(Args...)
135 	{
136 		alias PartsWrapper = WrapParts!(T!Args);
137 	}
138 }
139 
140 // TODO:
141 // - GROWFUN callable alias parameter instead of BLOCKSIZE?
142 // - Consolidate RegionAllocator and GrowingBufferAllocator
143 // - Add new primitive for bulk allocation which returns a range?
144 //   (to allow non-contiguous bulk allocation, but avoid
145 //   allocating an array of pointers to store the result)
146 // - Forbid allocating types with indirections when the base type
147 //   is not a pointer?
148 // - More thorough testing
149 
150 /// Common declarations for an allocator mixin
151 mixin template AllocatorCommon()
152 {
153 	static assert(haveFieldAliasBinding, "Your compiler doesn't support field alias template parameter binding, which is required for " ~ __MODULE__ ~ ".");
154 
155 	alias ae.utils.meta.StorageType StorageType;
156 
157 	static if (is(ALLOCATOR_TYPE))
158 		alias StorageType!ALLOCATOR_TYPE VALUE_TYPE;
159 
160 	static if (is(BASE_TYPE))
161 		alias StorageType!BASE_TYPE BASE_VALUE_TYPE;
162 }
163 
164 /// Default "create" implementation.
165 RefType!T create(T, A, Args...)(ref A a, Args args)
166 	if (is(typeof(a.allocate!T())))
167 {
168 	alias StorageType!T V;
169 
170 	auto r = a.allocate!T();
171 	emplace!T(cast(void[])((cast(V*)r)[0..1]), args);
172 	return r;
173 }
174 
175 void destroy(R, A)(ref A a, R r)
176 //	TODO: contract
177 {
178 	clear(r);
179 	static if (is(typeof(&a.free)))
180 		a.free(r);
181 }
182 
183 /// Creates T/R/V aliases from context, and checks ALLOCATOR_TYPE if appropriate.
184 mixin template AllocTypes()
185 {
186 	static if (is(R) && !is(T)) alias FromRefType!R T;
187 	static if (is(T) && !is(R)) alias RefType!T R;
188 	static if (is(T) && !is(V)) alias StorageType!T V;
189 	static if (is(ALLOCATOR_TYPE)) static assert(is(ALLOCATOR_TYPE==T), "This allocator can "
190 		"only allocate instances of " ~ ALLOCATOR_TYPE.stringof ~ ", not " ~ T.stringof);
191 	static if (is(BASE_TYPE) && is(V))
192 	{
193 		enum ALLOC_SIZE = (V.sizeof + BASE_TYPE.sizeof-1) / BASE_TYPE.sizeof;
194 	}
195 }
196 
197 /// Allocator proxy which injects custom code after object creation.
198 /// Context of INIT_CODE:
199 ///   p - newly-allocated value.
200 struct InitializingAllocatorProxy(string INIT_CODE, alias ALLOCATOR = heapAllocator)
201 {
202 	mixin AllocatorCommon;
203 
204 	RefType!T allocate(T)()
205 	{
206 		auto p = ALLOCATOR.allocate!T();
207 		mixin(INIT_CODE);
208 		return p;
209 	}
210 
211 	// TODO: Proxy other methods
212 }
213 
214 /// Allocator proxy which keeps track how many allocations were made.
215 struct StatAllocatorProxy(alias ALLOCATOR = heapAllocator)
216 {
217     mixin AllocatorCommon;
218 
219 	size_t allocated;
220 
221 	RefType!T allocate(T)()
222 	{
223 		allocated += StorageType!T.sizeof;
224 		return ALLOCATOR.allocate!T();
225 	}
226 
227 	StorageType!T[] allocateMany(T)(size_t n)
228 	{
229 		allocated += n * StorageType!T.sizeof;
230 		return ALLOCATOR.allocateMany!T(n);
231 	}
232 
233 	// TODO: Proxy other methods
234 }
235 
236 /// The internal unit allocation type of FreeListAllocator.
237 /// (Exposed to allow specializing underlying allocators on it.)
238 template FreeListNode(T)
239 {
240 	mixin AllocTypes;
241 
242 	mixin template NodeContents()
243 	{
244 		V data;
245 		FreeListNode* next; /// Next free node
246 		static FreeListNode* fromRef(R r) { return cast(FreeListNode*)r; }
247 	}
248 
249 	debug
250 		struct FreeListNode { mixin NodeContents; }
251 	else
252 		union  FreeListNode { mixin NodeContents; }
253 }
254 
255 
256 /// Homogenous linked list allocator.
257 /// Supports O(1) deletion.
258 /// Does not support bulk allocation.
259 struct FreeListAllocator(ALLOCATOR_TYPE, alias ALLOCATOR = heapAllocator)
260 {
261 	mixin AllocatorCommon;
262 
263 	alias FreeListNode!ALLOCATOR_TYPE Node;
264 
265 	struct Data
266 	{
267 		Node* head = null; /// First free node
268 	}
269 
270 	static template Impl(alias data)
271 	{
272 		RefType!T allocate(T)()
273 		{
274 			mixin AllocTypes;
275 
276 			if (data.head is null)
277 			{
278 				auto node = ALLOCATOR.allocate!Node();
279 				return cast(R)&node.data;
280 			}
281 			auto node = data.head;
282 			data.head = data.head.next;
283 			return cast(R)&node.data;
284 		}
285 
286 		void free(R)(R r)
287 		{
288 			auto node = Node.fromRef(r);
289 			node.next = data.head;
290 			data.head = node;
291 		}
292 	}
293 }
294 
295 /// Backend allocator Allocates from D's managed heap directly.
296 struct HeapAllocator
297 {
298 // static: // https://d.puremagic.com/issues/show_bug.cgi?id=12207
299 const:
300 	RefType!T allocate(T)()
301 	{
302 		return new T;
303 	}
304 
305 	StorageType!T[] allocateMany(T)(size_t n)
306 	{
307 		return new StorageType!T[n];
308 	}
309 
310 	RefType!T create(T, A...)(A args)
311 	{
312 		return new T(args);
313 	}
314 
315 	V[] resize(V)(V[] v, size_t n)
316 	{
317 		v.length = n;
318 		return v;
319 	}
320 
321 	void free(R)(R r)
322 	{
323 		delete r;
324 	}
325 	alias free destroy;
326 
327 	void freeMany(V)(V[] v)
328 	{
329 		delete v;
330 	}
331 }
332 
333 immutable HeapAllocator heapAllocator;
334 
335 RefType!T allocate(T, A)(ref A a)
336 	if (is(typeof(&a.allocateMany!T)))
337 {
338 	return cast(RefType!T)(a.allocateMany!T(1).ptr);
339 }
340 
341 void free(A, R)(ref A a, R r)
342 	if (is(typeof(&a.freeMany)))
343 {
344 	a.freeMany((cast(V*)r)[0..1]);
345 }
346 
347 /// Backend allocator using the Data type from ae.sys.data.
348 struct DataAllocator
349 {
350 	mixin AllocatorCommon;
351 
352 	import ae.sys.data : SysData = Data;
353 
354 	struct Data
355 	{
356 		// Needed to make data referenced in Data instances reachable by the GC
357 		SysData[] datas; // TODO: use linked list or something
358 	}
359 
360 	static template Impl(alias data)
361 	{
362 		import ae.sys.data : SysData = Data;
363 
364 		StorageType!T[] allocateMany(T)(size_t n)
365 		{
366 			mixin AllocTypes;
367 
368 			auto sysData = SysData(V.sizeof * n);
369 			data.datas ~= sysData;
370 			return cast(V[])sysData.mcontents;
371 		}
372 
373 		void freeAll()
374 		{
375 			foreach (sysData; data.datas)
376 				sysData.deleteContents();
377 			data.datas = null;
378 		}
379 	}
380 }
381 
382 struct GCRootAllocatorProxy(alias ALLOCATOR)
383 {
384 	mixin AllocatorCommon;
385 
386 	import core.memory;
387 
388 	StorageType!T[] allocateMany(T)(size_t n)
389 	{
390 		auto result = ALLOCATOR.allocateMany!T(n);
391 		auto bytes = cast(ubyte[])result;
392 		GC.addRange(bytes.ptr, bytes.length);
393 		return result;
394 	}
395 
396 	void freeMany(V)(V[] v)
397 	{
398 		GC.removeRange(v.ptr);
399 		ALLOCATOR.freeMany(v);
400 	}
401 }
402 
403 /// Backend for direct OS page allocation.
404 struct PageAllocator
405 {
406 	version(Windows)
407 		import core.sys.windows.windows;
408 	else
409 	version(Posix)
410 		import core.sys.posix.sys.mman;
411 
412 	StorageType!T[] allocateMany(T)(size_t n)
413 	{
414 		mixin AllocTypes;
415 
416 		auto size = V.sizeof * n;
417 
418 		version(Windows)
419 		{
420 			auto p = VirtualAlloc(null, size, MEM_COMMIT, PAGE_READWRITE);
421 		}
422 		else
423 		version(Posix)
424 		{
425 			auto p = mmap(null, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
426 			p = (p == MAP_FAILED) ? null : p;
427 		}
428 
429 		return (cast(V*)p)[0..n];
430 	}
431 
432 	void freeMany(V)(V[] v)
433 	{
434 		mixin AllocTypes;
435 
436 		version(Windows)
437 			VirtualFree(v.ptr, 0, MEM_RELEASE);
438 		else
439 		version(Posix)
440 			munmap(v.ptr, v.length * V.sizeof);
441 	}
442 }
443 
444 /// Common code for pointer-bumping allocator implementations.
445 ///
446 /// Context:
447 ///   ptr - pointer to next free element
448 ///   end - pointer to end of buffer
449 ///   bufferExhausted - method called when ptr==end
450 ///     (takes new size to allocate as parameter)
451 ///   BLOCKSIZE - default parameter to bufferExhausted
452 mixin template PointerBumpCommon()
453 {
454 	/// Shared code for allocate / allocateMany.
455 	/// Context:
456 	///   data - alias to struct holding ptr and end
457 	///   Size - number of BASE_TYPE items to allocate
458 	///     (can be a constant or variable).
459 	private enum mixAllocateN =
460 	q{
461 		if (data.ptr + Size > data.end)
462 			bufferExhausted(Size > BLOCKSIZE ? Size : BLOCKSIZE);
463 
464 		auto result = data.ptr[0..Size];
465 		data.ptr += Size;
466 	};
467 
468 	RefType!T allocate(T)()
469 	{
470 		mixin AllocTypes;
471 
472 		static if (ALLOC_SIZE == 1)
473 		{
474 			if (ptr==end)
475 				bufferExhausted(BLOCKSIZE);
476 			return cast(R)(ptr++);
477 		}
478 		else
479 		{
480 			enum Size = ALLOC_SIZE;
481 			mixin(mixAllocateN);
482 			return cast(R)result.ptr;
483 		}
484 	}
485 
486 	StorageType!T[] allocateMany(T)(size_t n)
487 	{
488 		mixin AllocTypes;
489 		static assert(V.sizeof % BASE.sizeof == 0, "Aligned/contiguous allocation impossible");
490 		auto Size = ALLOC_SIZE * n;
491 		mixin(mixAllocateN);
492 		return cast(V[])result;
493 	}
494 }
495 
496 /// Classic region.
497 /// Compose over another allocator to allocate values in bulk (minimum of BLOCKSIZE).
498 /// No deletion, but is slightly faster that FreeListAllocator.
499 /// BASE_TYPE indicates the type used for upstream allocations.
500 /// It is not possible to bulk-allocate types smaller than BASE_TYPE,
501 /// or those the size of which is not divisible by BASE_TYPE's size.
502 /// (This restriction allows for allocations of single BASE_TYPE-sized items to be
503 /// a little faster.)
504 // TODO: support non-bulk allocators (without allocateMany support)?
505 struct RegionAllocator(BASE_TYPE=void*, size_t BLOCKSIZE=1024, alias ALLOCATOR = heapAllocator)
506 {
507 	mixin AllocatorCommon;
508 
509 	struct Data
510 	{
511 		BASE_VALUE_TYPE* ptr=null, end=null;
512 	}
513 
514 	static template Impl(alias data)
515 	{
516 		/// Forget we ever allocated anything
517 		void reset() { data.ptr=data.end=null; }
518 
519 		private void newBlock(size_t size) // size counts BASE_VALUE_TYPE
520 		{
521 			BASE_VALUE_TYPE[] arr = ALLOCATOR.allocateMany!BASE_TYPE(size);
522 			data.ptr = arr.ptr;
523 			data.end = data.ptr + arr.length;
524 		}
525 
526 		alias newBlock bufferExhausted;
527 		mixin PointerBumpCommon;
528 	}
529 }
530 
531 /// Allocator proxy which keeps track of all allocations,
532 /// and implements freeAll by discarding them all at once
533 /// via the underlying allocator's freeMany.
534 struct TrackingAllocatorProxy(ALLOCATOR_TYPE, alias ALLOCATOR = heapAllocator)
535 {
536 	mixin AllocatorCommon;
537 
538 	struct Data
539 	{
540 		VALUE_TYPE[][] blocks; // TODO: use linked list or something
541 	}
542 
543 	static template Impl(alias data)
544 	{
545 		VALUE_TYPE[] allocateMany(T)(size_t n)
546 		{
547 			mixin AllocTypes;
548 
549 			VALUE_TYPE[] arr = ALLOCATOR.allocateMany!ALLOCATOR_TYPE(n);
550 			data.blocks ~= arr;
551 			return arr;
552 		}
553 
554 		RefType!T allocate(T)()
555 		{
556 			mixin AllocTypes;
557 
558 			return cast(R)(allocateMany!T(1).ptr);
559 		}
560 
561 		void freeAll()
562 		{
563 			foreach (block; data.blocks)
564 				ALLOCATOR.freeMany(block);
565 			data.blocks = null;
566 		}
567 	}
568 }
569 
570 /// Growing buffer bulk allocator.
571 /// Allows reusing the same buffer, which is grown and retained as needed.
572 /// Requires .resize support from underlying allocator.
573 /// Smaller buffers are discarded (neither freed nor reused).
574 struct GrowingBufferAllocator(BASE_TYPE=void*, alias ALLOCATOR = heapAllocator)
575 {
576 	mixin AllocatorCommon;
577 
578 	struct Data
579 	{
580 		BASE_VALUE_TYPE* buf, ptr, end;
581 	}
582 
583 	static template Impl(alias data)
584 	{
585 		void bufferExhausted(size_t n)
586 		{
587 			import std.algorithm;
588 			auto newSize = max(4096 / BASE_VALUE_TYPE.sizeof, (data.end-data.buf)*2, n);
589 			auto pos = data.ptr - data.buf;
590 			auto arr = ALLOCATOR.resize(data.buf[0..data.end-data.buf], newSize);
591 			data.buf = arr.ptr;
592 			data.end = data.buf + arr.length;
593 			data.ptr = data.buf + pos;
594 		}
595 
596 		void clear()
597 		{
598 			data.ptr = data.buf;
599 		}
600 
601 		enum BLOCKSIZE=0;
602 		mixin PointerBumpCommon;
603 	}
604 }
605 
606 /// Thrown when the buffer of an allocator is exhausted.
607 class BufferExhaustedException : Exception { this() { super("Allocator buffer exhausted"); } }
608 
609 /// Homogenous allocator which uses a given buffer.
610 /// Throws BufferExhaustedException if the buffer is exhausted.
611 struct BufferAllocator(BASE_TYPE=ubyte)
612 {
613 	mixin AllocatorCommon;
614 
615 	struct Data
616 	{
617 		BASE_VALUE_TYPE* ptr=null, end=null;
618 	}
619 
620 	static template Impl(alias data)
621 	{
622 		void setBuffer(BASE_VALUE_TYPE[] buf)
623 		{
624 			data.ptr = buf.ptr;
625 			data.end = data.ptr + buf.length;
626 		}
627 
628 		this(BASE_VALUE_TYPE[] buf) { setBuffer(buf); }
629 
630 		static void bufferExhausted(size_t n)
631 		{
632 			throw new BufferExhaustedException();
633 		}
634 
635 		enum BLOCKSIZE=0;
636 		mixin PointerBumpCommon;
637 	}
638 }
639 
640 /// Homogenous allocator which uses a static buffer of a given size.
641 /// Throws BufferExhaustedException if the buffer is exhausted.
642 /// Needs to be manually initialized before use.
643 struct StaticBufferAllocator(size_t SIZE, BASE_TYPE=ubyte)
644 {
645 	mixin AllocatorCommon;
646 
647 	struct Data
648 	{
649 		StorageType!BASE_TYPE[SIZE] buffer;
650 		StorageType!BASE_TYPE* ptr;
651 		@property StorageType!BASE_TYPE* end() { return buffer.ptr + buffer.length; }
652 	}
653 
654 	static template Impl(alias data)
655 	{
656 		void initialize()
657 		{
658 			data.ptr = data.buffer.ptr;
659 		}
660 
661 		void bufferExhausted(size_t n)
662 		{
663 			throw new BufferExhaustedException();
664 		}
665 
666 		enum BLOCKSIZE=0;
667 		mixin PointerBumpCommon;
668 
669 		alias initialize clear;
670 	}
671 }
672 
673 /// A bulk allocator which behaves like a StaticBufferAllocator initially,
674 /// but once the static buffer is exhausted, it switches to a fallback
675 /// bulk allocator.
676 /// Needs to be manually initialized before use.
677 /// ALLOCATOR is the fallback allocator.
678 struct HybridBufferAllocator(size_t SIZE, BASE_TYPE=ubyte, alias ALLOCATOR=heapAllocator)
679 {
680 	mixin AllocatorCommon;
681 
682 	struct Data
683 	{
684 		BASE_VALUE_TYPE[SIZE] buffer;
685 		BASE_VALUE_TYPE* ptr, end;
686 	}
687 
688 	static template Impl(alias data)
689 	{
690 		void initialize()
691 		{
692 			data.ptr = data.buffer.ptr;
693 			data.end = data.buffer.ptr + data.buffer.length;
694 		}
695 
696 		void bufferExhausted(size_t n)
697 		{
698 			auto arr = ALLOCATOR.allocateMany!BASE_TYPE(n);
699 			data.ptr = arr.ptr;
700 			data.end = data.ptr + arr.length;
701 		}
702 
703 		enum BLOCKSIZE = SIZE;
704 		mixin PointerBumpCommon;
705 
706 		static if (is(typeof(&ALLOCATOR.clear)))
707 		{
708 			void clear()
709 			{
710 				if (data.end == data.buffer.ptr + data.buffer.length)
711 					data.ptr = data.buffer.ptr;
712 				else
713 					ALLOCATOR.clear();
714 			}
715 		}
716 	}
717 }
718 
719 static if (haveFieldAliasBinding)
720 unittest
721 {
722 	static class C { int x=2; this() {} this(int p) { x = p; } }
723 
724 	void testAllocator(A, string INIT="")()
725 	{
726 		A a;
727 		mixin(INIT);
728 		auto c1 = a.create!C();
729 		assert(c1.x == 2);
730 
731 		auto c2 = a.create!C(5);
732 		assert(c2.x == 5);
733 	}
734 
735 	testAllocator!(WrapParts!(FreeListAllocator!C))();
736 	testAllocator!(           HeapAllocator)();
737 	testAllocator!(WrapParts!(DataAllocator))();
738 	testAllocator!(           PageAllocator)();
739 	testAllocator!(WrapParts!(RegionAllocator!()))();
740 	testAllocator!(WrapParts!(GrowingBufferAllocator!()))();
741 	testAllocator!(WrapParts!(StaticBufferAllocator!1024), q{a.initialize();})();
742 	testAllocator!(WrapParts!(HybridBufferAllocator!1024))();
743 }