1 /**
2  * Composable allocators
3  *
4  * This module uses a composing system - allocators implementing various
5  * strategies allocate memory in bulk from another backend allocator,
6  * "chained" in as a template alias or string parameter.
7  *
8  * Various allocation strategies allow for various capabilities - e.g.
9  * some strategies may not keep metadata required to free the memory of
10  * individual instances. Code should test the presence of primitives
11  * (methods in allocator mixin instances) accordingly.
12  *
13  * Most allocators have two parts: data and implementation. The split is
14  * done to allow all implementation layers to share the same "this"
15  * pointer. Each "Impl" part takes its "data" part as an alias.
16  *
17  * The underlying allocator (or its implementation instantiation) is
18  * passed in as an alias template parameter. This means that it has to be
19  * a symbol, the address of which is known in the scope of the allocator
20  * - thus, something scoped in the same class/struct, or a global variable.
21  *
22  * Allocator kinds:
23  *
24  * * Homogeneous allocators, once instantiated, can only allocate values
25  *   only of the type specified in the template parameter. Attempting to
26  *   allocate a different type will result in a compile-time error.
27  *
28  * * Heterogenous allocators are not bound by one type. One instance can
29  *   allocate values of multiple types.
30  *
31  * Allocator primitives:
32  *
33  * allocate
34  *   Return a pointer to a new instance.
35  *   The returned object is not initialized.
36  *   The only mandatory primitive.
37  *
38  * create
39  *   Allocate and initialize/construct a new instance of T, with the
40  *   supplied parameters.
41  *
42  * free
43  *   Free memory at the given pointer, assuming it was allocated by the
44  *   same allocator.
45  *
46  * destroy
47  *   Finalize and free the given pointer.
48  *
49  * allocateMany
50  *   Allocate an array of values, with the given size. Allocators which
51  *   support this primitive are referred to as "bulk allocators".
52  *
53  * freeMany
54  *   Free memory for the given array of values.
55  *
56  * resize
57  *   Resize an array of values. Reallocate if needed.
58  *
59  * freeAll
60  *   Free all memory allocated using the given allocator, at once.
61  *   Deallocates storage from underlying allocator, if applicable.
62  *
63  * clear
64  *   Mark all memory allocated by the top-layer allocator as free.
65  *   Does not deallocate memory from underlying allocator.
66  *
67  * References:
68  *   http://accu.org/content/conf2008/Alexandrescu-memory-allocation.screen.pdf
69  *
70  * License:
71  *   This Source Code Form is subject to the terms of
72  *   the Mozilla Public License, v. 2.0. If a copy of
73  *   the MPL was not distributed with this file, You
74  *   can obtain one at http://mozilla.org/MPL/2.0/.
75  *
76  * Authors:
77  *   Vladimir Panteleev <ae@cy.md>
78  */
79 
80 deprecated module ae.utils.alloc;
81 deprecated:
82 
83 import std.conv : emplace;
84 import std.traits : fullyQualifiedName;
85 
86 import ae.utils.meta : RefType, FromRefType, StorageType;
87 import ae.utils.meta.caps : haveFieldAliasBinding;
88 
89 /// Generates code to create forwarding aliases to the given mixin/template member.
90 /// Used as a replacement for "alias M this", which doesn't seem to work with mixins
91 /// and templates.
92 static template mixAliasForward(alias M, string name = __traits(identifier, M))
93 {
94 	static string mixAliasForward()
95 	{
96 		import std.string, std.algorithm;
97 		return [__traits(allMembers, M)]
98 			.filter!(n => n.length)
99 			.map!(n => "alias %s.%s %s;\n".format(name, n, n))
100 			.join();
101 	}
102 }
103 
104 /// Instantiates a struct from a type containing a Data/Impl template pair.
105 struct WrapParts(T)
106 {
107 	T.Data data;
108 	alias impl = T.Impl!data;
109 //	pragma(msg, __traits(identifier, impl));
110 //	pragma(msg, mixAliasForward!(impl, q{impl}));
111 
112 	mixin({
113 		import std.string, std.algorithm, std.range;
114 		return
115 			chain(
116 				[__traits(allMembers, T.Impl!data)]
117 				.filter!(n => n.length)
118 				.map!(n => "alias %s.%s %s;\n".format("impl", n, n))
119 			,
120 				[__traits(allMembers, T)]
121 				.filter!(n => n.length)
122 				.filter!(n => n != "Impl")
123 				.filter!(n => n != "Data")
124 				.map!(n => "alias %s.%s %s;\n".format("T", n, n))
125 			)
126 			.join()
127 		;}()
128 	);
129 }
130 
131 /// Creates a template which, when instantiated, forwards its arguments to T
132 /// and uses WrapParts on the result.
133 template PartsWrapper(alias T)
134 {
135 	template PartsWrapper(Args...)
136 	{
137 		alias PartsWrapper = WrapParts!(T!Args);
138 	}
139 }
140 
141 // TODO:
142 // - GROWFUN callable alias parameter instead of BLOCKSIZE?
143 // - Consolidate RegionAllocator and GrowingBufferAllocator
144 // - Add new primitive for bulk allocation which returns a range?
145 //   (to allow non-contiguous bulk allocation, but avoid
146 //   allocating an array of pointers to store the result)
147 // - Forbid allocating types with indirections when the base type
148 //   is not a pointer?
149 // - More thorough testing
150 
151 /// Common declarations for an allocator mixin
152 mixin template AllocatorCommon()
153 {
154 	static assert(haveFieldAliasBinding, "Your compiler doesn't support field alias template parameter binding, which is required for " ~ __MODULE__ ~ ".");
155 
156 	alias ae.utils.meta.StorageType StorageType;
157 
158 	static if (is(ALLOCATOR_TYPE))
159 		alias StorageType!ALLOCATOR_TYPE VALUE_TYPE;
160 
161 	static if (is(BASE_TYPE))
162 		alias StorageType!BASE_TYPE BASE_VALUE_TYPE;
163 }
164 
165 /// Default "create" implementation.
166 RefType!T create(T, A, Args...)(ref A a, Args args)
167 	if (is(typeof(a.allocate!T())))
168 {
169 	alias StorageType!T V;
170 
171 	auto r = a.allocate!T();
172 	emplace!T(cast(void[])((cast(V*)r)[0..1]), args);
173 	return r;
174 }
175 
176 void destroy(R, A)(ref A a, R r)
177 //	TODO: contract
178 {
179 	clear(r);
180 	static if (is(typeof(&a.free)))
181 		a.free(r);
182 }
183 
184 /// Creates T/R/V aliases from context, and checks ALLOCATOR_TYPE if appropriate.
185 mixin template AllocTypes()
186 {
187 	static if (is(R) && !is(T)) alias FromRefType!R T;
188 	static if (is(T) && !is(R)) alias RefType!T R;
189 	static if (is(T) && !is(V)) alias StorageType!T V;
190 	static if (is(ALLOCATOR_TYPE)) static assert(is(ALLOCATOR_TYPE==T), "This allocator can " ~
191 		"only allocate instances of " ~ ALLOCATOR_TYPE.stringof ~ ", not " ~ T.stringof);
192 	static if (is(BASE_TYPE) && is(V))
193 	{
194 		enum ALLOC_SIZE = (V.sizeof + BASE_TYPE.sizeof-1) / BASE_TYPE.sizeof;
195 	}
196 }
197 
198 /// Allocator proxy which injects custom code after object creation.
199 /// Context of INIT_CODE:
200 ///   p - newly-allocated value.
201 struct InitializingAllocatorProxy(string INIT_CODE, alias ALLOCATOR = heapAllocator)
202 {
203 	mixin AllocatorCommon;
204 
205 	RefType!T allocate(T)()
206 	{
207 		auto p = ALLOCATOR.allocate!T();
208 		mixin(INIT_CODE);
209 		return p;
210 	}
211 
212 	// TODO: Proxy other methods
213 }
214 
215 /// Allocator proxy which keeps track how many allocations were made.
216 struct StatAllocatorProxy(alias ALLOCATOR = heapAllocator)
217 {
218     mixin AllocatorCommon;
219 
220 	size_t allocated;
221 
222 	RefType!T allocate(T)()
223 	{
224 		allocated += StorageType!T.sizeof;
225 		return ALLOCATOR.allocate!T();
226 	}
227 
228 	StorageType!T[] allocateMany(T)(size_t n)
229 	{
230 		allocated += n * StorageType!T.sizeof;
231 		return ALLOCATOR.allocateMany!T(n);
232 	}
233 
234 	// TODO: Proxy other methods
235 }
236 
237 /// The internal unit allocation type of FreeListAllocator.
238 /// (Exposed to allow specializing underlying allocators on it.)
239 template FreeListNode(T)
240 {
241 	mixin AllocTypes;
242 
243 	mixin template NodeContents()
244 	{
245 		V data;
246 		FreeListNode* next; /// Next free node
247 		static FreeListNode* fromRef(R r) { return cast(FreeListNode*)r; }
248 	}
249 
250 	debug
251 		struct FreeListNode { mixin NodeContents; }
252 	else
253 		union  FreeListNode { mixin NodeContents; }
254 }
255 
256 
257 /// Homogeneous linked list allocator.
258 /// Supports O(1) deletion.
259 /// Does not support bulk allocation.
260 struct FreeListAllocator(ALLOCATOR_TYPE, alias ALLOCATOR = heapAllocator)
261 {
262 	mixin AllocatorCommon;
263 
264 	alias FreeListNode!ALLOCATOR_TYPE Node;
265 
266 	struct Data
267 	{
268 		Node* head = null; /// First free node
269 	}
270 
271 	static template Impl(alias data)
272 	{
273 		RefType!T allocate(T)()
274 		{
275 			mixin AllocTypes;
276 
277 			if (data.head is null)
278 			{
279 				auto node = ALLOCATOR.allocate!Node();
280 				return cast(R)&node.data;
281 			}
282 			auto node = data.head;
283 			data.head = data.head.next;
284 			return cast(R)&node.data;
285 		}
286 
287 		void free(R)(R r)
288 		{
289 			auto node = Node.fromRef(r);
290 			node.next = data.head;
291 			data.head = node;
292 		}
293 	}
294 }
295 
296 /// Backend allocator Allocates from D's managed heap directly.
297 struct HeapAllocator
298 {
299 // static: // https://issues.dlang.org/show_bug.cgi?id=12207
300 const:
301 	RefType!T allocate(T)()
302 	{
303 		return new T;
304 	}
305 
306 	StorageType!T[] allocateMany(T)(size_t n)
307 	{
308 		return new StorageType!T[n];
309 	}
310 
311 	RefType!T create(T, A...)(A args)
312 	{
313 		return new T(args);
314 	}
315 
316 	V[] resize(V)(V[] v, size_t n)
317 	{
318 		v.length = n;
319 		return v;
320 	}
321 
322 	void free(R)(R r)
323 	{
324 		delete r;
325 	}
326 	alias free destroy;
327 
328 	void freeMany(V)(V[] v)
329 	{
330 		delete v;
331 	}
332 }
333 
334 immutable HeapAllocator heapAllocator;
335 
336 RefType!T allocate(T, A)(ref A a)
337 	if (is(typeof(&a.allocateMany!T)))
338 {
339 	return cast(RefType!T)(a.allocateMany!T(1).ptr);
340 }
341 
342 void free(A, R)(ref A a, R r)
343 	if (is(typeof(&a.freeMany)))
344 {
345 	a.freeMany((cast(V*)r)[0..1]);
346 }
347 
348 /// Backend allocator using the Data type from ae.sys.data.
349 struct DataAllocator
350 {
351 	mixin AllocatorCommon;
352 
353 	import ae.sys.data : SysData = Data;
354 
355 	struct Data
356 	{
357 		struct Node
358 		{
359 			Node* next;
360 			SysData data;
361 		}
362 
363 		// Needed to make data referenced in Data instances reachable by the GC
364 		Node* root;
365 	}
366 
367 	static template Impl(alias data)
368 	{
369 		import ae.sys.data : SysData = Data;
370 
371 		StorageType!T[] allocateMany(T)(size_t n)
372 		{
373 			mixin AllocTypes;
374 
375 			data.root = new data.Node(data.root, SysData(V.sizeof * n));
376 			return cast(V[])data.root.data.mcontents;
377 		}
378 
379 		void freeAll()
380 		{
381 			while (data.root)
382 			{
383 				data.root.data.deleteContents();
384 				data.root = data.root.next;
385 			}
386 		}
387 	}
388 }
389 
390 struct GCRootAllocatorProxy(alias ALLOCATOR)
391 {
392 	mixin AllocatorCommon;
393 
394 	import core.memory;
395 
396 	StorageType!T[] allocateMany(T)(size_t n)
397 	{
398 		auto result = ALLOCATOR.allocateMany!T(n);
399 		auto bytes = cast(ubyte[])result;
400 		GC.addRange(bytes.ptr, bytes.length);
401 		return result;
402 	}
403 
404 	void freeMany(V)(V[] v)
405 	{
406 		GC.removeRange(v.ptr);
407 		ALLOCATOR.freeMany(v);
408 	}
409 }
410 
411 /// Backend for direct OS page allocation.
412 struct PageAllocator
413 {
414 	version(Windows)
415 		import core.sys.windows.windows;
416 	else
417 	version(Posix)
418 		import core.sys.posix.sys.mman;
419 
420 	StorageType!T[] allocateMany(T)(size_t n)
421 	{
422 		mixin AllocTypes;
423 
424 		auto size = V.sizeof * n;
425 
426 		version(Windows)
427 		{
428 			auto p = VirtualAlloc(null, size, MEM_COMMIT, PAGE_READWRITE);
429 		}
430 		else
431 		version(Posix)
432 		{
433 			auto p = mmap(null, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
434 			p = (p == MAP_FAILED) ? null : p;
435 		}
436 
437 		return (cast(V*)p)[0..n];
438 	}
439 
440 	void freeMany(V)(V[] v)
441 	{
442 		mixin AllocTypes;
443 
444 		version(Windows)
445 			VirtualFree(v.ptr, 0, MEM_RELEASE);
446 		else
447 		version(Posix)
448 			munmap(v.ptr, v.length * V.sizeof);
449 	}
450 }
451 
452 /// Common code for pointer-bumping allocator implementations.
453 ///
454 /// Context:
455 ///   ptr - pointer to next free element
456 ///   end - pointer to end of buffer
457 ///   bufferExhausted - method called when ptr==end
458 ///     (takes new size to allocate as parameter)
459 ///   BLOCKSIZE - default parameter to bufferExhausted
460 mixin template PointerBumpCommon()
461 {
462 	/// Shared code for allocate / allocateMany.
463 	/// Context:
464 	///   data - alias to struct holding ptr and end
465 	///   Size - number of BASE_TYPE items to allocate
466 	///     (can be a constant or variable).
467 	private enum mixAllocateN =
468 	q{
469 		if (data.ptr + Size > data.end)
470 			bufferExhausted(Size > BLOCKSIZE ? Size : BLOCKSIZE);
471 
472 		auto result = data.ptr[0..Size];
473 		data.ptr += Size;
474 	};
475 
476 	RefType!T allocate(T)()
477 	{
478 		mixin AllocTypes;
479 
480 		static if (ALLOC_SIZE == 1)
481 		{
482 			if (ptr==end)
483 				bufferExhausted(BLOCKSIZE);
484 			return cast(R)(ptr++);
485 		}
486 		else
487 		{
488 			enum Size = ALLOC_SIZE;
489 			mixin(mixAllocateN);
490 			return cast(R)result.ptr;
491 		}
492 	}
493 
494 	StorageType!T[] allocateMany(T)(size_t n)
495 	{
496 		mixin AllocTypes;
497 		static assert(V.sizeof % BASE.sizeof == 0, "Aligned/contiguous allocation impossible");
498 		auto Size = ALLOC_SIZE * n;
499 		mixin(mixAllocateN);
500 		return cast(V[])result;
501 	}
502 }
503 
504 /// Classic region.
505 /// Compose over another allocator to allocate values in bulk (minimum of BLOCKSIZE).
506 /// No deletion, but is slightly faster that FreeListAllocator.
507 /// BASE_TYPE indicates the type used for upstream allocations.
508 /// It is not possible to bulk-allocate types smaller than BASE_TYPE,
509 /// or those the size of which is not divisible by BASE_TYPE's size.
510 /// (This restriction allows for allocations of single BASE_TYPE-sized items to be
511 /// a little faster.)
512 // TODO: support non-bulk allocators (without allocateMany support)?
513 struct RegionAllocator(BASE_TYPE=void*, size_t BLOCKSIZE=1024, alias ALLOCATOR = heapAllocator)
514 {
515 	mixin AllocatorCommon;
516 
517 	struct Data
518 	{
519 		BASE_VALUE_TYPE* ptr=null, end=null;
520 	}
521 
522 	static template Impl(alias data)
523 	{
524 		/// Forget we ever allocated anything
525 		void reset() { data.ptr=data.end=null; }
526 
527 		private void newBlock(size_t size) // size counts BASE_VALUE_TYPE
528 		{
529 			BASE_VALUE_TYPE[] arr = ALLOCATOR.allocateMany!BASE_TYPE(size);
530 			data.ptr = arr.ptr;
531 			data.end = data.ptr + arr.length;
532 		}
533 
534 		alias newBlock bufferExhausted;
535 		mixin PointerBumpCommon;
536 	}
537 }
538 
539 /// Allocator proxy which keeps track of all allocations,
540 /// and implements freeAll by discarding them all at once
541 /// via the underlying allocator's freeMany.
542 struct TrackingAllocatorProxy(ALLOCATOR_TYPE, alias ALLOCATOR = heapAllocator)
543 {
544 	mixin AllocatorCommon;
545 
546 	struct Data
547 	{
548 		VALUE_TYPE[][] blocks; // TODO: use linked list or something
549 	}
550 
551 	static template Impl(alias data)
552 	{
553 		VALUE_TYPE[] allocateMany(T)(size_t n)
554 		{
555 			mixin AllocTypes;
556 
557 			VALUE_TYPE[] arr = ALLOCATOR.allocateMany!ALLOCATOR_TYPE(n);
558 			data.blocks ~= arr;
559 			return arr;
560 		}
561 
562 		RefType!T allocate(T)()
563 		{
564 			mixin AllocTypes;
565 
566 			return cast(R)(allocateMany!T(1).ptr);
567 		}
568 
569 		void freeAll()
570 		{
571 			foreach (block; data.blocks)
572 				ALLOCATOR.freeMany(block);
573 			data.blocks = null;
574 		}
575 	}
576 }
577 
578 /// Growing buffer bulk allocator.
579 /// Allows reusing the same buffer, which is grown and retained as needed.
580 /// Requires .resize support from underlying allocator.
581 /// Smaller buffers are discarded (neither freed nor reused).
582 struct GrowingBufferAllocator(BASE_TYPE=void*, alias ALLOCATOR = heapAllocator)
583 {
584 	mixin AllocatorCommon;
585 
586 	struct Data
587 	{
588 		BASE_VALUE_TYPE* buf, ptr, end;
589 	}
590 
591 	static template Impl(alias data)
592 	{
593 		void bufferExhausted(size_t n)
594 		{
595 			import std.algorithm;
596 			auto newSize = max(4096 / BASE_VALUE_TYPE.sizeof, (data.end-data.buf)*2, n);
597 			auto pos = data.ptr - data.buf;
598 			auto arr = ALLOCATOR.resize(data.buf[0..data.end-data.buf], newSize);
599 			data.buf = arr.ptr;
600 			data.end = data.buf + arr.length;
601 			data.ptr = data.buf + pos;
602 		}
603 
604 		void clear()
605 		{
606 			data.ptr = data.buf;
607 		}
608 
609 		enum BLOCKSIZE=0;
610 		mixin PointerBumpCommon;
611 	}
612 }
613 
614 /// Thrown when the buffer of an allocator is exhausted.
615 class BufferExhaustedException : Exception { this() { super("Allocator buffer exhausted"); } }
616 
617 /// Homogeneous allocator which uses a given buffer.
618 /// Throws BufferExhaustedException if the buffer is exhausted.
619 struct BufferAllocator(BASE_TYPE=ubyte)
620 {
621 	mixin AllocatorCommon;
622 
623 	struct Data
624 	{
625 		BASE_VALUE_TYPE* ptr=null, end=null;
626 	}
627 
628 	static template Impl(alias data)
629 	{
630 		void setBuffer(BASE_VALUE_TYPE[] buf)
631 		{
632 			data.ptr = buf.ptr;
633 			data.end = data.ptr + buf.length;
634 		}
635 
636 		this(BASE_VALUE_TYPE[] buf) { setBuffer(buf); }
637 
638 		static void bufferExhausted(size_t n)
639 		{
640 			throw new BufferExhaustedException();
641 		}
642 
643 		enum BLOCKSIZE=0;
644 		mixin PointerBumpCommon;
645 	}
646 }
647 
648 /// Homogeneous allocator which uses a static buffer of a given size.
649 /// Throws BufferExhaustedException if the buffer is exhausted.
650 /// Needs to be manually initialized before use.
651 struct StaticBufferAllocator(size_t SIZE, BASE_TYPE=ubyte)
652 {
653 	mixin AllocatorCommon;
654 
655 	struct Data
656 	{
657 		StorageType!BASE_TYPE[SIZE] buffer;
658 		StorageType!BASE_TYPE* ptr;
659 		@property StorageType!BASE_TYPE* end() { return buffer.ptr + buffer.length; }
660 	}
661 
662 	static template Impl(alias data)
663 	{
664 		void initialize()
665 		{
666 			data.ptr = data.buffer.ptr;
667 		}
668 
669 		void bufferExhausted(size_t n)
670 		{
671 			throw new BufferExhaustedException();
672 		}
673 
674 		enum BLOCKSIZE=0;
675 		mixin PointerBumpCommon;
676 
677 		alias initialize clear;
678 	}
679 }
680 
681 /// A bulk allocator which behaves like a StaticBufferAllocator initially,
682 /// but once the static buffer is exhausted, it switches to a fallback
683 /// bulk allocator.
684 /// Needs to be manually initialized before use.
685 /// ALLOCATOR is the fallback allocator.
686 struct HybridBufferAllocator(size_t SIZE, BASE_TYPE=ubyte, alias ALLOCATOR=heapAllocator)
687 {
688 	mixin AllocatorCommon;
689 
690 	struct Data
691 	{
692 		BASE_VALUE_TYPE[SIZE] buffer;
693 		BASE_VALUE_TYPE* ptr, end;
694 	}
695 
696 	static template Impl(alias data)
697 	{
698 		void initialize()
699 		{
700 			data.ptr = data.buffer.ptr;
701 			data.end = data.buffer.ptr + data.buffer.length;
702 		}
703 
704 		void bufferExhausted(size_t n)
705 		{
706 			auto arr = ALLOCATOR.allocateMany!BASE_TYPE(n);
707 			data.ptr = arr.ptr;
708 			data.end = data.ptr + arr.length;
709 		}
710 
711 		enum BLOCKSIZE = SIZE;
712 		mixin PointerBumpCommon;
713 
714 		static if (is(typeof(&ALLOCATOR.clear)))
715 		{
716 			void clear()
717 			{
718 				if (data.end == data.buffer.ptr + data.buffer.length)
719 					data.ptr = data.buffer.ptr;
720 				else
721 					ALLOCATOR.clear();
722 			}
723 		}
724 	}
725 }
726 
727 static if (haveFieldAliasBinding)
728 unittest
729 {
730 	static class C { int x=2; this() {} this(int p) { x = p; } }
731 
732 	void testAllocator(A, string INIT="")()
733 	{
734 		A a;
735 		mixin(INIT);
736 		auto c1 = a.create!C();
737 		assert(c1.x == 2);
738 
739 		auto c2 = a.create!C(5);
740 		assert(c2.x == 5);
741 	}
742 
743 	testAllocator!(WrapParts!(FreeListAllocator!C))();
744 	testAllocator!(           HeapAllocator)();
745 	testAllocator!(WrapParts!(DataAllocator))();
746 	testAllocator!(           PageAllocator)();
747 	testAllocator!(WrapParts!(RegionAllocator!()))();
748 	testAllocator!(WrapParts!(GrowingBufferAllocator!()))();
749 	testAllocator!(WrapParts!(StaticBufferAllocator!1024), q{a.initialize();})();
750 	testAllocator!(WrapParts!(HybridBufferAllocator!1024))();
751 }