1 /** 2 * This module contains all functions related to an object's lifetime: 3 * allocation, resizing, deallocation, and finalization. 4 * 5 * Copyright: Copyright Digital Mars 2000 - 2012. 6 * License: Distributed under the 7 * $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost Software License 1.0). 8 * (See accompanying file LICENSE) 9 * Authors: Walter Bright, Sean Kelly, Steven Schveighoffer 10 * Source: $(DRUNTIMESRC rt/_lifetime.d) 11 */ 12 13 module rt.lifetime; 14 15 import core.attribute : weak; 16 import core.memory; 17 debug(PRINTF) import core.stdc.stdio; 18 static import rt.tlsgc; 19 20 alias BlkInfo = GC.BlkInfo; 21 alias BlkAttr = GC.BlkAttr; 22 23 private 24 { 25 alias bool function(Object) CollectHandler; 26 __gshared CollectHandler collectHandler = null; 27 28 extern (C) void _d_monitordelete(Object h, bool det); 29 30 enum : size_t 31 { 32 PAGESIZE = 4096, 33 BIGLENGTHMASK = ~(PAGESIZE - 1), 34 SMALLPAD = 1, 35 MEDPAD = ushort.sizeof, 36 LARGEPREFIX = 16, // 16 bytes padding at the front of the array 37 LARGEPAD = LARGEPREFIX + 1, 38 MAXSMALLSIZE = 256-SMALLPAD, 39 MAXMEDSIZE = (PAGESIZE / 2) - MEDPAD 40 } 41 } 42 43 // Now-removed symbol, kept around for ABI 44 // Some programs are dynamically linked, so best to err on the side of keeping symbols around for a while (especially extern(C) ones) 45 // https://github.com/dlang/druntime/pull/3361 46 deprecated extern (C) void lifetime_init() 47 { 48 } 49 50 /** 51 Allocate memory using the garbage collector 52 53 DMD uses this to allocate closures: 54 --- 55 void f(byte[24] x) 56 { 57 return () => x; // `x` is on stack, must be moved to heap to keep it alive 58 } 59 --- 60 61 Params: 62 sz = number of bytes to allocate 63 64 Returns: pointer to `sz` bytes of free, uninitialized memory, managed by the GC. 65 */ 66 extern (C) void* _d_allocmemory(size_t sz) @weak 67 { 68 return GC.malloc(sz); 69 } 70 71 /** 72 Create a new class instance. 73 74 Allocates memory and sets fields to their initial value, but does not call a constructor. 75 76 --- 77 new Object() // _d_newclass(typeid(Object)) 78 --- 79 Params: 80 ci = `TypeInfo_Class` object, to provide instance size and initial bytes to copy 81 82 Returns: newly created object 83 */ 84 extern (C) Object _d_newclass(const ClassInfo ci) @weak 85 { 86 import core.stdc.stdlib; 87 import core.exception : onOutOfMemoryError; 88 void* p; 89 auto init = ci.initializer; 90 91 debug(PRINTF) printf("_d_newclass(ci = %p, %s)\n", ci, cast(char *)ci.name); 92 if (ci.m_flags & TypeInfo_Class.ClassFlags.isCOMclass) 93 { /* COM objects are not garbage collected, they are reference counted 94 * using AddRef() and Release(). They get free'd by C's free() 95 * function called by Release() when Release()'s reference count goes 96 * to zero. 97 */ 98 p = malloc(init.length); 99 if (!p) 100 onOutOfMemoryError(); 101 } 102 else 103 { 104 // TODO: should this be + 1 to avoid having pointers to the next block? 105 BlkAttr attr = BlkAttr.NONE; 106 // extern(C++) classes don't have a classinfo pointer in their vtable so the GC can't finalize them 107 if (ci.m_flags & TypeInfo_Class.ClassFlags.hasDtor 108 && !(ci.m_flags & TypeInfo_Class.ClassFlags.isCPPclass)) 109 attr |= BlkAttr.FINALIZE; 110 if (ci.m_flags & TypeInfo_Class.ClassFlags.noPointers) 111 attr |= BlkAttr.NO_SCAN; 112 p = GC.malloc(init.length, attr, ci); 113 debug(PRINTF) printf(" p = %p\n", p); 114 } 115 116 debug(PRINTF) 117 { 118 printf("p = %p\n", p); 119 printf("ci = %p, ci.init.ptr = %p, len = %llu\n", ci, init.ptr, cast(ulong)init.length); 120 printf("vptr = %p\n", *cast(void**) init); 121 printf("vtbl[0] = %p\n", (*cast(void***) init)[0]); 122 printf("vtbl[1] = %p\n", (*cast(void***) init)[1]); 123 printf("init[0] = %x\n", (cast(uint*) init)[0]); 124 printf("init[1] = %x\n", (cast(uint*) init)[1]); 125 printf("init[2] = %x\n", (cast(uint*) init)[2]); 126 printf("init[3] = %x\n", (cast(uint*) init)[3]); 127 printf("init[4] = %x\n", (cast(uint*) init)[4]); 128 } 129 130 // initialize it 131 p[0 .. init.length] = init[]; 132 133 debug(PRINTF) printf("initialization done\n"); 134 return cast(Object) p; 135 } 136 137 138 /** 139 * 140 */ 141 extern (C) void _d_delinterface(void** p) 142 { 143 if (*p) 144 { 145 Interface* pi = **cast(Interface ***)*p; 146 Object o = cast(Object)(*p - pi.offset); 147 148 _d_delclass(&o); 149 *p = null; 150 } 151 } 152 153 154 // used for deletion 155 private extern (D) alias void function (Object) fp_t; 156 157 158 /** 159 * 160 */ 161 extern (C) void _d_delclass(Object* p) @weak 162 { 163 if (*p) 164 { 165 debug(PRINTF) printf("_d_delclass(%p)\n", *p); 166 167 ClassInfo **pc = cast(ClassInfo **)*p; 168 if (*pc) 169 { 170 ClassInfo c = **pc; 171 172 rt_finalize(cast(void*) *p); 173 174 if (c.deallocator) 175 { 176 fp_t fp = cast(fp_t)c.deallocator; 177 (*fp)(*p); // call deallocator 178 *p = null; 179 return; 180 } 181 } 182 else 183 { 184 rt_finalize(cast(void*) *p); 185 } 186 GC.free(cast(void*) *p); 187 *p = null; 188 } 189 } 190 191 // strip const/immutable/shared/inout from type info 192 inout(TypeInfo) unqualify(return scope inout(TypeInfo) cti) pure nothrow @nogc 193 { 194 TypeInfo ti = cast() cti; 195 while (ti) 196 { 197 // avoid dynamic type casts 198 auto tti = typeid(ti); 199 if (tti is typeid(TypeInfo_Const)) 200 ti = (cast(TypeInfo_Const)cast(void*)ti).base; 201 else if (tti is typeid(TypeInfo_Invariant)) 202 ti = (cast(TypeInfo_Invariant)cast(void*)ti).base; 203 else if (tti is typeid(TypeInfo_Shared)) 204 ti = (cast(TypeInfo_Shared)cast(void*)ti).base; 205 else if (tti is typeid(TypeInfo_Inout)) 206 ti = (cast(TypeInfo_Inout)cast(void*)ti).base; 207 else 208 break; 209 } 210 return ti; 211 } 212 213 // size used to store the TypeInfo at the end of an allocation for structs that have a destructor 214 size_t structTypeInfoSize(const TypeInfo ti) pure nothrow @nogc 215 { 216 if (ti && typeid(ti) is typeid(TypeInfo_Struct)) // avoid a complete dynamic type cast 217 { 218 auto sti = cast(TypeInfo_Struct)cast(void*)ti; 219 if (sti.xdtor) 220 return size_t.sizeof; 221 } 222 return 0; 223 } 224 225 /** dummy class used to lock for shared array appending */ 226 private class ArrayAllocLengthLock 227 {} 228 229 230 /** 231 Set the allocated length of the array block. This is called 232 any time an array is appended to or its length is set. 233 234 The allocated block looks like this for blocks < PAGESIZE: 235 236 |elem0|elem1|elem2|...|elemN-1|emptyspace|N*elemsize| 237 238 239 The size of the allocated length at the end depends on the block size: 240 241 a block of 16 to 256 bytes has an 8-bit length. 242 243 a block with 512 to pagesize/2 bytes has a 16-bit length. 244 245 For blocks >= pagesize, the length is a size_t and is at the beginning of the 246 block. The reason we have to do this is because the block can extend into 247 more pages, so we cannot trust the block length if it sits at the end of the 248 block, because it might have just been extended. If we can prove in the 249 future that the block is unshared, we may be able to change this, but I'm not 250 sure it's important. 251 252 In order to do put the length at the front, we have to provide 16 bytes 253 buffer space in case the block has to be aligned properly. In x86, certain 254 SSE instructions will only work if the data is 16-byte aligned. In addition, 255 we need the sentinel byte to prevent accidental pointers to the next block. 256 Because of the extra overhead, we only do this for page size and above, where 257 the overhead is minimal compared to the block size. 258 259 So for those blocks, it looks like: 260 261 |N*elemsize|padding|elem0|elem1|...|elemN-1|emptyspace|sentinelbyte| 262 263 where elem0 starts 16 bytes after the first byte. 264 */ 265 bool __setArrayAllocLength(ref BlkInfo info, size_t newlength, bool isshared, const TypeInfo tinext, size_t oldlength = ~0) pure nothrow 266 { 267 import core.atomic; 268 269 size_t typeInfoSize = structTypeInfoSize(tinext); 270 271 if (info.size <= 256) 272 { 273 import core.checkedint; 274 275 bool overflow; 276 auto newlength_padded = addu(newlength, 277 addu(SMALLPAD, typeInfoSize, overflow), 278 overflow); 279 280 if (newlength_padded > info.size || overflow) 281 // new size does not fit inside block 282 return false; 283 284 auto length = cast(ubyte *)(info.base + info.size - typeInfoSize - SMALLPAD); 285 if (oldlength != ~0) 286 { 287 if (isshared) 288 { 289 return cas(cast(shared)length, cast(ubyte)oldlength, cast(ubyte)newlength); 290 } 291 else 292 { 293 if (*length == cast(ubyte)oldlength) 294 *length = cast(ubyte)newlength; 295 else 296 return false; 297 } 298 } 299 else 300 { 301 // setting the initial length, no cas needed 302 *length = cast(ubyte)newlength; 303 } 304 if (typeInfoSize) 305 { 306 auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof); 307 *typeInfo = cast() tinext; 308 } 309 } 310 else if (info.size < PAGESIZE) 311 { 312 if (newlength + MEDPAD + typeInfoSize > info.size) 313 // new size does not fit inside block 314 return false; 315 auto length = cast(ushort *)(info.base + info.size - typeInfoSize - MEDPAD); 316 if (oldlength != ~0) 317 { 318 if (isshared) 319 { 320 return cas(cast(shared)length, cast(ushort)oldlength, cast(ushort)newlength); 321 } 322 else 323 { 324 if (*length == oldlength) 325 *length = cast(ushort)newlength; 326 else 327 return false; 328 } 329 } 330 else 331 { 332 // setting the initial length, no cas needed 333 *length = cast(ushort)newlength; 334 } 335 if (typeInfoSize) 336 { 337 auto typeInfo = cast(TypeInfo*)(info.base + info.size - size_t.sizeof); 338 *typeInfo = cast() tinext; 339 } 340 } 341 else 342 { 343 if (newlength + LARGEPAD > info.size) 344 // new size does not fit inside block 345 return false; 346 auto length = cast(size_t *)(info.base); 347 if (oldlength != ~0) 348 { 349 if (isshared) 350 { 351 return cas(cast(shared)length, cast(size_t)oldlength, cast(size_t)newlength); 352 } 353 else 354 { 355 if (*length == oldlength) 356 *length = newlength; 357 else 358 return false; 359 } 360 } 361 else 362 { 363 // setting the initial length, no cas needed 364 *length = newlength; 365 } 366 if (typeInfoSize) 367 { 368 auto typeInfo = cast(TypeInfo*)(info.base + size_t.sizeof); 369 *typeInfo = cast()tinext; 370 } 371 } 372 return true; // resize succeeded 373 } 374 375 /** 376 get the allocation size of the array for the given block (without padding or type info) 377 */ 378 private size_t __arrayAllocLength(ref BlkInfo info, const TypeInfo tinext) pure nothrow 379 { 380 if (info.size <= 256) 381 return *cast(ubyte *)(info.base + info.size - structTypeInfoSize(tinext) - SMALLPAD); 382 383 if (info.size < PAGESIZE) 384 return *cast(ushort *)(info.base + info.size - structTypeInfoSize(tinext) - MEDPAD); 385 386 return *cast(size_t *)(info.base); 387 } 388 389 /** 390 get the start of the array for the given block 391 */ 392 private void *__arrayStart(return scope BlkInfo info) nothrow pure 393 { 394 return info.base + ((info.size & BIGLENGTHMASK) ? LARGEPREFIX : 0); 395 } 396 397 /** 398 get the padding required to allocate size bytes. Note that the padding is 399 NOT included in the passed in size. Therefore, do NOT call this function 400 with the size of an allocated block. 401 */ 402 private size_t __arrayPad(size_t size, const TypeInfo tinext) nothrow pure @trusted 403 { 404 return size > MAXMEDSIZE ? LARGEPAD : ((size > MAXSMALLSIZE ? MEDPAD : SMALLPAD) + structTypeInfoSize(tinext)); 405 } 406 407 /** 408 clear padding that might not be zeroed by the GC (it assumes it is within the 409 requested size from the start, but it is actually at the end of the allocated block) 410 */ 411 private void __arrayClearPad(ref BlkInfo info, size_t arrsize, size_t padsize) nothrow pure 412 { 413 import core.stdc.string; 414 if (padsize > MEDPAD && !(info.attr & BlkAttr.NO_SCAN) && info.base) 415 { 416 if (info.size < PAGESIZE) 417 memset(info.base + arrsize, 0, padsize); 418 else 419 memset(info.base, 0, LARGEPREFIX); 420 } 421 } 422 423 /** 424 allocate an array memory block by applying the proper padding and 425 assigning block attributes if not inherited from the existing block 426 */ 427 private BlkInfo __arrayAlloc(size_t arrsize, const scope TypeInfo ti, const TypeInfo tinext) nothrow pure 428 { 429 import core.checkedint; 430 431 size_t typeInfoSize = structTypeInfoSize(tinext); 432 size_t padsize = arrsize > MAXMEDSIZE ? LARGEPAD : ((arrsize > MAXSMALLSIZE ? MEDPAD : SMALLPAD) + typeInfoSize); 433 434 bool overflow; 435 auto padded_size = addu(arrsize, padsize, overflow); 436 437 if (overflow) 438 return BlkInfo(); 439 440 uint attr = (!(tinext.flags & 1) ? BlkAttr.NO_SCAN : 0) | BlkAttr.APPENDABLE; 441 if (typeInfoSize) 442 attr |= BlkAttr.STRUCTFINAL | BlkAttr.FINALIZE; 443 444 auto bi = GC.qalloc(padded_size, attr, tinext); 445 __arrayClearPad(bi, arrsize, padsize); 446 return bi; 447 } 448 449 private BlkInfo __arrayAlloc(size_t arrsize, ref BlkInfo info, const scope TypeInfo ti, const TypeInfo tinext) 450 { 451 import core.checkedint; 452 453 if (!info.base) 454 return __arrayAlloc(arrsize, ti, tinext); 455 456 immutable padsize = __arrayPad(arrsize, tinext); 457 bool overflow; 458 auto padded_size = addu(arrsize, padsize, overflow); 459 if (overflow) 460 { 461 return BlkInfo(); 462 } 463 464 auto bi = GC.qalloc(padded_size, info.attr, tinext); 465 __arrayClearPad(bi, arrsize, padsize); 466 return bi; 467 } 468 469 /** 470 cache for the lookup of the block info 471 */ 472 private enum N_CACHE_BLOCKS=8; 473 474 // note this is TLS, so no need to sync. 475 BlkInfo *__blkcache_storage; 476 477 static if (N_CACHE_BLOCKS==1) 478 { 479 version=single_cache; 480 } 481 else 482 { 483 //version=simple_cache; // uncomment to test simple cache strategy 484 //version=random_cache; // uncomment to test random cache strategy 485 486 // ensure N_CACHE_BLOCKS is power of 2. 487 static assert(!((N_CACHE_BLOCKS - 1) & N_CACHE_BLOCKS)); 488 489 version (random_cache) 490 { 491 int __nextRndNum = 0; 492 } 493 int __nextBlkIdx; 494 } 495 496 @property BlkInfo *__blkcache() nothrow 497 { 498 if (!__blkcache_storage) 499 { 500 import core.stdc.stdlib; 501 import core.stdc.string; 502 // allocate the block cache for the first time 503 immutable size = BlkInfo.sizeof * N_CACHE_BLOCKS; 504 __blkcache_storage = cast(BlkInfo *)malloc(size); 505 memset(__blkcache_storage, 0, size); 506 } 507 return __blkcache_storage; 508 } 509 510 // called when thread is exiting. 511 static ~this() 512 { 513 // free the blkcache 514 if (__blkcache_storage) 515 { 516 import core.stdc.stdlib; 517 free(__blkcache_storage); 518 __blkcache_storage = null; 519 } 520 } 521 522 523 // we expect this to be called with the lock in place 524 void processGCMarks(BlkInfo* cache, scope rt.tlsgc.IsMarkedDg isMarked) nothrow 525 { 526 // called after the mark routine to eliminate block cache data when it 527 // might be ready to sweep 528 529 debug(PRINTF) printf("processing GC Marks, %x\n", cache); 530 if (cache) 531 { 532 debug(PRINTF) foreach (i; 0 .. N_CACHE_BLOCKS) 533 { 534 printf("cache entry %d has base ptr %x\tsize %d\tflags %x\n", i, cache[i].base, cache[i].size, cache[i].attr); 535 } 536 auto cache_end = cache + N_CACHE_BLOCKS; 537 for (;cache < cache_end; ++cache) 538 { 539 if (cache.base != null && !isMarked(cache.base)) 540 { 541 debug(PRINTF) printf("clearing cache entry at %x\n", cache.base); 542 cache.base = null; // clear that data. 543 } 544 } 545 } 546 } 547 548 unittest 549 { 550 // Bugzilla 10701 - segfault in GC 551 ubyte[] result; result.length = 4096; 552 GC.free(result.ptr); 553 GC.collect(); 554 } 555 556 /** 557 Get the cached block info of an interior pointer. Returns null if the 558 interior pointer's block is not cached. 559 560 NOTE: The base ptr in this struct can be cleared asynchronously by the GC, 561 so any use of the returned BlkInfo should copy it and then check the 562 base ptr of the copy before actually using it. 563 564 TODO: Change this function so the caller doesn't have to be aware of this 565 issue. Either return by value and expect the caller to always check 566 the base ptr as an indication of whether the struct is valid, or set 567 the BlkInfo as a side-effect and return a bool to indicate success. 568 */ 569 BlkInfo *__getBlkInfo(void *interior) nothrow 570 { 571 BlkInfo *ptr = __blkcache; 572 version (single_cache) 573 { 574 if (ptr.base && ptr.base <= interior && (interior - ptr.base) < ptr.size) 575 return ptr; 576 return null; // not in cache. 577 } 578 else version (simple_cache) 579 { 580 foreach (i; 0..N_CACHE_BLOCKS) 581 { 582 if (ptr.base && ptr.base <= interior && (interior - ptr.base) < ptr.size) 583 return ptr; 584 ptr++; 585 } 586 } 587 else 588 { 589 // try to do a smart lookup, using __nextBlkIdx as the "head" 590 auto curi = ptr + __nextBlkIdx; 591 for (auto i = curi; i >= ptr; --i) 592 { 593 if (i.base && i.base <= interior && cast(size_t)(interior - i.base) < i.size) 594 return i; 595 } 596 597 for (auto i = ptr + N_CACHE_BLOCKS - 1; i > curi; --i) 598 { 599 if (i.base && i.base <= interior && cast(size_t)(interior - i.base) < i.size) 600 return i; 601 } 602 } 603 return null; // not in cache. 604 } 605 606 void __insertBlkInfoCache(BlkInfo bi, BlkInfo *curpos) nothrow 607 { 608 version (single_cache) 609 { 610 *__blkcache = bi; 611 } 612 else 613 { 614 version (simple_cache) 615 { 616 if (curpos) 617 *curpos = bi; 618 else 619 { 620 // note, this is a super-simple algorithm that does not care about 621 // most recently used. It simply uses a round-robin technique to 622 // cache block info. This means that the ordering of the cache 623 // doesn't mean anything. Certain patterns of allocation may 624 // render the cache near-useless. 625 __blkcache[__nextBlkIdx] = bi; 626 __nextBlkIdx = (__nextBlkIdx+1) & (N_CACHE_BLOCKS - 1); 627 } 628 } 629 else version (random_cache) 630 { 631 // strategy: if the block currently is in the cache, move the 632 // current block index to the a random element and evict that 633 // element. 634 auto cache = __blkcache; 635 if (!curpos) 636 { 637 __nextBlkIdx = (__nextRndNum = 1664525 * __nextRndNum + 1013904223) & (N_CACHE_BLOCKS - 1); 638 curpos = cache + __nextBlkIdx; 639 } 640 else 641 { 642 __nextBlkIdx = curpos - cache; 643 } 644 *curpos = bi; 645 } 646 else 647 { 648 // 649 // strategy: If the block currently is in the cache, swap it with 650 // the head element. Otherwise, move the head element up by one, 651 // and insert it there. 652 // 653 auto cache = __blkcache; 654 if (!curpos) 655 { 656 __nextBlkIdx = (__nextBlkIdx+1) & (N_CACHE_BLOCKS - 1); 657 curpos = cache + __nextBlkIdx; 658 } 659 else if (curpos !is cache + __nextBlkIdx) 660 { 661 *curpos = cache[__nextBlkIdx]; 662 curpos = cache + __nextBlkIdx; 663 } 664 *curpos = bi; 665 } 666 } 667 } 668 669 /** 670 Shrink the "allocated" length of an array to be the exact size of the array. 671 672 It doesn't matter what the current allocated length of the array is, the 673 user is telling the runtime that he knows what he is doing. 674 675 Params: 676 ti = `TypeInfo` of array type 677 arr = array to shrink. Its `.length` is element length, not byte length, despite `void` type 678 */ 679 extern(C) void _d_arrayshrinkfit(const TypeInfo ti, void[] arr) /+nothrow+/ 680 { 681 // note, we do not care about shared. We are setting the length no matter 682 // what, so no lock is required. 683 debug(PRINTF) printf("_d_arrayshrinkfit, elemsize = %d, arr.ptr = x%x arr.length = %d\n", ti.next.tsize, arr.ptr, arr.length); 684 auto tinext = unqualify(ti.next); 685 auto size = tinext.tsize; // array element size 686 auto cursize = arr.length * size; 687 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 688 auto bic = isshared ? null : __getBlkInfo(arr.ptr); 689 auto info = bic ? *bic : GC.query(arr.ptr); 690 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 691 { 692 auto newsize = (arr.ptr - __arrayStart(info)) + cursize; 693 694 debug(PRINTF) printf("setting allocated size to %d\n", (arr.ptr - info.base) + cursize); 695 696 // destroy structs that become unused memory when array size is shrinked 697 if (typeid(tinext) is typeid(TypeInfo_Struct)) // avoid a complete dynamic type cast 698 { 699 auto sti = cast(TypeInfo_Struct)cast(void*)tinext; 700 if (sti.xdtor) 701 { 702 auto oldsize = __arrayAllocLength(info, tinext); 703 if (oldsize > cursize) 704 finalize_array(arr.ptr + cursize, oldsize - cursize, sti); 705 } 706 } 707 // Note: Since we "assume" the append is safe, it means it is not shared. 708 // Since it is not shared, we also know it won't throw (no lock). 709 if (!__setArrayAllocLength(info, newsize, false, tinext)) 710 { 711 import core.exception : onInvalidMemoryOperationError; 712 onInvalidMemoryOperationError(); 713 } 714 715 // cache the block if not already done. 716 if (!isshared && !bic) 717 __insertBlkInfoCache(info, null); 718 } 719 } 720 721 package bool hasPostblit(in TypeInfo ti) nothrow pure 722 { 723 return (&ti.postblit).funcptr !is &TypeInfo.postblit; 724 } 725 726 void __doPostblit(void *ptr, size_t len, const TypeInfo ti) 727 { 728 if (!hasPostblit(ti)) 729 return; 730 731 if (auto tis = cast(TypeInfo_Struct)ti) 732 { 733 // this is a struct, check the xpostblit member 734 auto pblit = tis.xpostblit; 735 if (!pblit) 736 // postblit not specified, no point in looping. 737 return; 738 739 // optimized for struct, call xpostblit directly for each element 740 immutable size = ti.tsize; 741 const eptr = ptr + len; 742 for (;ptr < eptr;ptr += size) 743 pblit(ptr); 744 } 745 else 746 { 747 // generic case, call the typeinfo's postblit function 748 immutable size = ti.tsize; 749 const eptr = ptr + len; 750 for (;ptr < eptr;ptr += size) 751 ti.postblit(ptr); 752 } 753 } 754 755 756 /** 757 Set the array capacity. 758 759 If the array capacity isn't currently large enough 760 to hold the requested capacity (in number of elements), then the array is 761 resized/reallocated to the appropriate size. 762 763 Pass in a requested capacity of 0 to get the current capacity. 764 765 Params: 766 ti = type info of element type 767 newcapacity = requested new capacity 768 p = pointer to array to set. Its `length` is left unchanged. 769 770 Returns: the number of elements that can actually be stored once the resizing is done 771 */ 772 extern(C) size_t _d_arraysetcapacity(const TypeInfo ti, size_t newcapacity, void[]* p) @weak 773 in 774 { 775 assert(ti); 776 assert(!(*p).length || (*p).ptr); 777 } 778 do 779 { 780 import core.stdc.string; 781 import core.exception : onOutOfMemoryError; 782 783 // step 1, get the block 784 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 785 auto bic = isshared ? null : __getBlkInfo((*p).ptr); 786 auto info = bic ? *bic : GC.query((*p).ptr); 787 auto tinext = unqualify(ti.next); 788 auto size = tinext.tsize; 789 version (D_InlineAsm_X86) 790 { 791 size_t reqsize = void; 792 793 asm 794 { 795 mov EAX, newcapacity; 796 mul EAX, size; 797 mov reqsize, EAX; 798 jnc Lcontinue; 799 } 800 } 801 else version (D_InlineAsm_X86_64) 802 { 803 size_t reqsize = void; 804 805 asm 806 { 807 mov RAX, newcapacity; 808 mul RAX, size; 809 mov reqsize, RAX; 810 jnc Lcontinue; 811 } 812 } 813 else 814 { 815 import core.checkedint : mulu; 816 817 bool overflow = false; 818 size_t reqsize = mulu(size, newcapacity, overflow); 819 if (!overflow) 820 goto Lcontinue; 821 } 822 Loverflow: 823 onOutOfMemoryError(); 824 assert(0); 825 Lcontinue: 826 827 // step 2, get the actual "allocated" size. If the allocated size does not 828 // match what we expect, then we will need to reallocate anyways. 829 830 // TODO: this probably isn't correct for shared arrays 831 size_t curallocsize = void; 832 size_t curcapacity = void; 833 size_t offset = void; 834 size_t arraypad = void; 835 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 836 { 837 if (info.size <= 256) 838 { 839 arraypad = SMALLPAD + structTypeInfoSize(tinext); 840 curallocsize = *(cast(ubyte *)(info.base + info.size - arraypad)); 841 } 842 else if (info.size < PAGESIZE) 843 { 844 arraypad = MEDPAD + structTypeInfoSize(tinext); 845 curallocsize = *(cast(ushort *)(info.base + info.size - arraypad)); 846 } 847 else 848 { 849 curallocsize = *(cast(size_t *)(info.base)); 850 arraypad = LARGEPAD; 851 } 852 853 854 offset = (*p).ptr - __arrayStart(info); 855 if (offset + (*p).length * size != curallocsize) 856 { 857 curcapacity = 0; 858 } 859 else 860 { 861 // figure out the current capacity of the block from the point 862 // of view of the array. 863 curcapacity = info.size - offset - arraypad; 864 } 865 } 866 else 867 { 868 curallocsize = curcapacity = offset = 0; 869 } 870 debug(PRINTF) printf("_d_arraysetcapacity, p = x%d,%d, newcapacity=%d, info.size=%d, reqsize=%d, curallocsize=%d, curcapacity=%d, offset=%d\n", (*p).ptr, (*p).length, newcapacity, info.size, reqsize, curallocsize, curcapacity, offset); 871 872 if (curcapacity >= reqsize) 873 { 874 // no problems, the current allocated size is large enough. 875 return curcapacity / size; 876 } 877 878 // step 3, try to extend the array in place. 879 if (info.size >= PAGESIZE && curcapacity != 0) 880 { 881 auto extendsize = reqsize + offset + LARGEPAD - info.size; 882 auto u = GC.extend(info.base, extendsize, extendsize); 883 if (u) 884 { 885 // extend worked, save the new current allocated size 886 if (bic) 887 bic.size = u; // update cache 888 curcapacity = u - offset - LARGEPAD; 889 return curcapacity / size; 890 } 891 } 892 893 // step 4, if extending doesn't work, allocate a new array with at least the requested allocated size. 894 auto datasize = (*p).length * size; 895 // copy attributes from original block, or from the typeinfo if the 896 // original block doesn't exist. 897 info = __arrayAlloc(reqsize, info, ti, tinext); 898 if (info.base is null) 899 goto Loverflow; 900 // copy the data over. 901 // note that malloc will have initialized the data we did not request to 0. 902 auto tgt = __arrayStart(info); 903 memcpy(tgt, (*p).ptr, datasize); 904 905 // handle postblit 906 __doPostblit(tgt, datasize, tinext); 907 908 if (!(info.attr & BlkAttr.NO_SCAN)) 909 { 910 // need to memset the newly requested data, except for the data that 911 // malloc returned that we didn't request. 912 void *endptr = tgt + reqsize; 913 void *begptr = tgt + datasize; 914 915 // sanity check 916 assert(endptr >= begptr); 917 memset(begptr, 0, endptr - begptr); 918 } 919 920 // set up the correct length 921 __setArrayAllocLength(info, datasize, isshared, tinext); 922 if (!isshared) 923 __insertBlkInfoCache(info, bic); 924 925 *p = (cast(void*)tgt)[0 .. (*p).length]; 926 927 // determine the padding. This has to be done manually because __arrayPad 928 // assumes you are not counting the pad size, and info.size does include 929 // the pad. 930 if (info.size <= 256) 931 arraypad = SMALLPAD + structTypeInfoSize(tinext); 932 else if (info.size < PAGESIZE) 933 arraypad = MEDPAD + structTypeInfoSize(tinext); 934 else 935 arraypad = LARGEPAD; 936 937 curcapacity = info.size - arraypad; 938 return curcapacity / size; 939 } 940 941 /** 942 Allocate an array with the garbage collector. 943 944 Has three variants: 945 - `_d_newarrayU` leave elements uninitialized 946 - `_d_newarrayT` initializes to 0 (e.g `new int[]`) 947 - `_d_newarrayiT` initializes based on initializer retrieved from TypeInfo (e.g `new float[]`) 948 949 Params: 950 ti = the type of the resulting array, (may also be the corresponding `array.ptr` type) 951 length = `.length` of resulting array 952 Returns: newly allocated array 953 */ 954 extern (C) void[] _d_newarrayU(const scope TypeInfo ti, size_t length) pure nothrow @weak 955 { 956 import core.exception : onOutOfMemoryError; 957 958 auto tinext = unqualify(ti.next); 959 auto size = tinext.tsize; 960 961 debug(PRINTF) printf("_d_newarrayU(length = x%x, size = %d)\n", length, size); 962 if (length == 0 || size == 0) 963 return null; 964 965 version (D_InlineAsm_X86) 966 { 967 asm pure nothrow @nogc 968 { 969 mov EAX,size ; 970 mul EAX,length ; 971 mov size,EAX ; 972 jnc Lcontinue ; 973 } 974 } 975 else version (D_InlineAsm_X86_64) 976 { 977 asm pure nothrow @nogc 978 { 979 mov RAX,size ; 980 mul RAX,length ; 981 mov size,RAX ; 982 jnc Lcontinue ; 983 } 984 } 985 else 986 { 987 import core.checkedint : mulu; 988 989 bool overflow = false; 990 size = mulu(size, length, overflow); 991 if (!overflow) 992 goto Lcontinue; 993 } 994 Loverflow: 995 onOutOfMemoryError(); 996 assert(0); 997 Lcontinue: 998 999 auto info = __arrayAlloc(size, ti, tinext); 1000 if (!info.base) 1001 goto Loverflow; 1002 debug(PRINTF) printf(" p = %p\n", info.base); 1003 // update the length of the array 1004 auto arrstart = __arrayStart(info); 1005 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 1006 __setArrayAllocLength(info, size, isshared, tinext); 1007 return arrstart[0..length]; 1008 } 1009 1010 /// ditto 1011 extern (C) void[] _d_newarrayT(const TypeInfo ti, size_t length) pure nothrow @weak 1012 { 1013 import core.stdc.string; 1014 1015 void[] result = _d_newarrayU(ti, length); 1016 auto tinext = unqualify(ti.next); 1017 auto size = tinext.tsize; 1018 1019 memset(result.ptr, 0, size * length); 1020 return result; 1021 } 1022 1023 /// ditto 1024 extern (C) void[] _d_newarrayiT(const TypeInfo ti, size_t length) pure nothrow @weak 1025 { 1026 import core.internal.traits : AliasSeq; 1027 1028 void[] result = _d_newarrayU(ti, length); 1029 auto tinext = unqualify(ti.next); 1030 auto size = tinext.tsize; 1031 1032 auto init = tinext.initializer(); 1033 1034 switch (init.length) 1035 { 1036 foreach (T; AliasSeq!(ubyte, ushort, uint, ulong)) 1037 { 1038 case T.sizeof: 1039 if (tinext.talign % T.alignof == 0) 1040 { 1041 (cast(T*)result.ptr)[0 .. size * length / T.sizeof] = *cast(T*)init.ptr; 1042 return result; 1043 } 1044 goto default; 1045 } 1046 1047 default: 1048 { 1049 import core.stdc.string; 1050 immutable sz = init.length; 1051 for (size_t u = 0; u < size * length; u += sz) 1052 memcpy(result.ptr + u, init.ptr, sz); 1053 return result; 1054 } 1055 } 1056 } 1057 1058 1059 /* 1060 * Helper for creating multi-dimensional arrays 1061 */ 1062 private void[] _d_newarrayOpT(alias op)(const TypeInfo ti, size_t[] dims) 1063 { 1064 debug(PRINTF) printf("_d_newarrayOpT(ndims = %d)\n", dims.length); 1065 if (dims.length == 0) 1066 return null; 1067 1068 void[] foo(const TypeInfo ti, size_t[] dims) 1069 { 1070 auto tinext = unqualify(ti.next); 1071 auto dim = dims[0]; 1072 1073 debug(PRINTF) printf("foo(ti = %p, ti.next = %p, dim = %d, ndims = %d\n", ti, ti.next, dim, dims.length); 1074 if (dims.length == 1) 1075 { 1076 auto r = op(ti, dim); 1077 return *cast(void[]*)(&r); 1078 } 1079 1080 auto allocsize = (void[]).sizeof * dim; 1081 auto info = __arrayAlloc(allocsize, ti, tinext); 1082 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 1083 __setArrayAllocLength(info, allocsize, isshared, tinext); 1084 auto p = __arrayStart(info)[0 .. dim]; 1085 1086 foreach (i; 0..dim) 1087 { 1088 (cast(void[]*)p.ptr)[i] = foo(tinext, dims[1..$]); 1089 } 1090 return p; 1091 } 1092 1093 auto result = foo(ti, dims); 1094 debug(PRINTF) printf("result = %llx\n", result.ptr); 1095 1096 return result; 1097 } 1098 1099 1100 /** 1101 Create a new multi-dimensional array 1102 1103 Has two variants: 1104 - `_d_newarraymTX` which initializes to 0 1105 - `_d_newarraymiTX` which initializes elements based on `TypeInfo` 1106 1107 --- 1108 void main() 1109 { 1110 new int[][](10, 20); 1111 // _d_newarraymTX(typeid(float), [10, 20]); 1112 1113 new float[][][](10, 20, 30); 1114 // _d_newarraymiTX(typeid(float), [10, 20, 30]); 1115 } 1116 --- 1117 1118 Params: 1119 ti = `TypeInfo` of the array type 1120 dims = array length values for each dimension 1121 1122 Returns: 1123 newly allocated array 1124 */ 1125 extern (C) void[] _d_newarraymTX(const TypeInfo ti, size_t[] dims) @weak 1126 { 1127 debug(PRINTF) printf("_d_newarraymT(dims.length = %d)\n", dims.length); 1128 1129 if (dims.length == 0) 1130 return null; 1131 else 1132 { 1133 return _d_newarrayOpT!(_d_newarrayT)(ti, dims); 1134 } 1135 } 1136 1137 /// ditto 1138 extern (C) void[] _d_newarraymiTX(const TypeInfo ti, size_t[] dims) @weak 1139 { 1140 debug(PRINTF) printf("_d_newarraymiT(dims.length = %d)\n", dims.length); 1141 1142 if (dims.length == 0) 1143 return null; 1144 else 1145 { 1146 return _d_newarrayOpT!(_d_newarrayiT)(ti, dims); 1147 } 1148 } 1149 1150 /** 1151 Allocate an uninitialized non-array item. 1152 1153 This is an optimization to avoid things needed for arrays like the __arrayPad(size). 1154 1155 - `_d_newitemU` leaves the item uninitialized 1156 - `_d_newitemT` zero initializes the item 1157 - `_d_newitemiT` uses a non-zero initializer from `TypeInfo` 1158 1159 Used to allocate struct instances on the heap. 1160 --- 1161 struct Sz {int x = 0;} 1162 struct Si {int x = 3;} 1163 1164 void main() 1165 { 1166 new Sz(); // _d_newitemT(typeid(Sz)) 1167 new Si(); // _d_newitemiT(typeid(Si)) 1168 } 1169 --- 1170 1171 Params: 1172 _ti = `TypeInfo` of item to allocate 1173 Returns: 1174 newly allocated item 1175 */ 1176 extern (C) void* _d_newitemU(scope const TypeInfo _ti) pure nothrow @weak 1177 { 1178 auto ti = unqualify(_ti); 1179 auto flags = !(ti.flags & 1) ? BlkAttr.NO_SCAN : 0; 1180 immutable tiSize = structTypeInfoSize(ti); 1181 immutable itemSize = ti.tsize; 1182 immutable size = itemSize + tiSize; 1183 if (tiSize) 1184 flags |= BlkAttr.STRUCTFINAL | BlkAttr.FINALIZE; 1185 1186 auto blkInf = GC.qalloc(size, flags, ti); 1187 auto p = blkInf.base; 1188 1189 if (tiSize) 1190 { 1191 // the GC might not have cleared the padding area in the block 1192 *cast(TypeInfo*)(p + (itemSize & ~(size_t.sizeof - 1))) = null; 1193 *cast(TypeInfo*)(p + blkInf.size - tiSize) = cast() ti; 1194 } 1195 1196 return p; 1197 } 1198 1199 /// ditto 1200 extern (C) void* _d_newitemT(const TypeInfo _ti) pure nothrow @weak 1201 { 1202 import core.stdc.string; 1203 auto p = _d_newitemU(_ti); 1204 memset(p, 0, _ti.tsize); 1205 return p; 1206 } 1207 1208 /// Same as above, for item with non-zero initializer. 1209 extern (C) void* _d_newitemiT(const TypeInfo _ti) pure nothrow @weak 1210 { 1211 import core.stdc.string; 1212 auto p = _d_newitemU(_ti); 1213 auto init = _ti.initializer(); 1214 assert(init.length <= _ti.tsize); 1215 memcpy(p, init.ptr, init.length); 1216 return p; 1217 } 1218 1219 debug(PRINTF) 1220 { 1221 extern(C) void printArrayCache() 1222 { 1223 auto ptr = __blkcache; 1224 printf("CACHE: \n"); 1225 foreach (i; 0 .. N_CACHE_BLOCKS) 1226 { 1227 printf(" %d\taddr:% .8x\tsize:% .10d\tflags:% .8x\n", i, ptr[i].base, ptr[i].size, ptr[i].attr); 1228 } 1229 } 1230 } 1231 1232 /** 1233 * 1234 */ 1235 extern (C) void _d_delmemory(void* *p) @weak 1236 { 1237 if (*p) 1238 { 1239 GC.free(*p); 1240 *p = null; 1241 } 1242 } 1243 1244 1245 /** 1246 * 1247 */ 1248 extern (C) void _d_callinterfacefinalizer(void *p) @weak 1249 { 1250 if (p) 1251 { 1252 Interface *pi = **cast(Interface ***)p; 1253 Object o = cast(Object)(p - pi.offset); 1254 rt_finalize(cast(void*)o); 1255 } 1256 } 1257 1258 1259 /** 1260 * 1261 */ 1262 extern (C) void _d_callfinalizer(void* p) @weak 1263 { 1264 rt_finalize( p ); 1265 } 1266 1267 1268 /** 1269 * 1270 */ 1271 extern (C) void rt_setCollectHandler(CollectHandler h) 1272 { 1273 collectHandler = h; 1274 } 1275 1276 1277 /** 1278 * 1279 */ 1280 extern (C) CollectHandler rt_getCollectHandler() 1281 { 1282 return collectHandler; 1283 } 1284 1285 1286 /** 1287 * 1288 */ 1289 extern (C) int rt_hasFinalizerInSegment(void* p, size_t size, uint attr, scope const(void)[] segment) nothrow 1290 { 1291 if (attr & BlkAttr.STRUCTFINAL) 1292 { 1293 if (attr & BlkAttr.APPENDABLE) 1294 return hasArrayFinalizerInSegment(p, size, segment); 1295 return hasStructFinalizerInSegment(p, size, segment); 1296 } 1297 1298 // otherwise class 1299 auto ppv = cast(void**) p; 1300 if (!p || !*ppv) 1301 return false; 1302 1303 auto c = *cast(ClassInfo*)*ppv; 1304 do 1305 { 1306 auto pf = c.destructor; 1307 if (cast(size_t)(pf - segment.ptr) < segment.length) return true; 1308 } 1309 while ((c = c.base) !is null); 1310 1311 return false; 1312 } 1313 1314 int hasStructFinalizerInSegment(void* p, size_t size, in void[] segment) nothrow 1315 { 1316 if (!p) 1317 return false; 1318 1319 auto ti = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1320 return cast(size_t)(cast(void*)ti.xdtor - segment.ptr) < segment.length; 1321 } 1322 1323 int hasArrayFinalizerInSegment(void* p, size_t size, in void[] segment) nothrow 1324 { 1325 if (!p) 1326 return false; 1327 1328 TypeInfo_Struct si = void; 1329 if (size < PAGESIZE) 1330 si = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1331 else 1332 si = *cast(TypeInfo_Struct*)(p + size_t.sizeof); 1333 1334 return cast(size_t)(cast(void*)si.xdtor - segment.ptr) < segment.length; 1335 } 1336 1337 // called by the GC 1338 void finalize_array2(void* p, size_t size) nothrow 1339 { 1340 debug(PRINTF) printf("rt_finalize_array2(p = %p)\n", p); 1341 1342 TypeInfo_Struct si = void; 1343 if (size <= 256) 1344 { 1345 si = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1346 size = *cast(ubyte*)(p + size - size_t.sizeof - SMALLPAD); 1347 } 1348 else if (size < PAGESIZE) 1349 { 1350 si = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1351 size = *cast(ushort*)(p + size - size_t.sizeof - MEDPAD); 1352 } 1353 else 1354 { 1355 si = *cast(TypeInfo_Struct*)(p + size_t.sizeof); 1356 size = *cast(size_t*)p; 1357 p += LARGEPREFIX; 1358 } 1359 1360 try 1361 { 1362 finalize_array(p, size, si); 1363 } 1364 catch (Exception e) 1365 { 1366 import core.exception : onFinalizeError; 1367 onFinalizeError(si, e); 1368 } 1369 } 1370 1371 void finalize_array(void* p, size_t size, const TypeInfo_Struct si) 1372 { 1373 // Due to the fact that the delete operator calls destructors 1374 // for arrays from the last element to the first, we maintain 1375 // compatibility here by doing the same. 1376 auto tsize = si.tsize; 1377 for (auto curP = p + size - tsize; curP >= p; curP -= tsize) 1378 { 1379 // call destructor 1380 si.destroy(curP); 1381 } 1382 } 1383 1384 // called by the GC 1385 void finalize_struct(void* p, size_t size) nothrow 1386 { 1387 debug(PRINTF) printf("finalize_struct(p = %p)\n", p); 1388 1389 auto ti = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof); 1390 try 1391 { 1392 ti.destroy(p); // call destructor 1393 } 1394 catch (Exception e) 1395 { 1396 import core.exception : onFinalizeError; 1397 onFinalizeError(ti, e); 1398 } 1399 } 1400 1401 /** 1402 * 1403 */ 1404 extern (C) void rt_finalize2(void* p, bool det = true, bool resetMemory = true) nothrow 1405 { 1406 debug(PRINTF) printf("rt_finalize2(p = %p)\n", p); 1407 1408 auto ppv = cast(void**) p; 1409 if (!p || !*ppv) 1410 return; 1411 1412 auto pc = cast(ClassInfo*) *ppv; 1413 try 1414 { 1415 if (det || collectHandler is null || collectHandler(cast(Object) p)) 1416 { 1417 auto c = *pc; 1418 do 1419 { 1420 if (c.destructor) 1421 (cast(fp_t) c.destructor)(cast(Object) p); // call destructor 1422 } 1423 while ((c = c.base) !is null); 1424 } 1425 1426 if (ppv[1]) // if monitor is not null 1427 _d_monitordelete(cast(Object) p, det); 1428 1429 if (resetMemory) 1430 { 1431 auto w = (*pc).initializer; 1432 p[0 .. w.length] = w[]; 1433 } 1434 } 1435 catch (Exception e) 1436 { 1437 import core.exception : onFinalizeError; 1438 onFinalizeError(*pc, e); 1439 } 1440 finally 1441 { 1442 *ppv = null; // zero vptr even if `resetMemory` is false 1443 } 1444 } 1445 1446 /// Backwards compatibility 1447 extern (C) void rt_finalize(void* p, bool det = true) nothrow 1448 { 1449 rt_finalize2(p, det, true); 1450 } 1451 1452 extern (C) void rt_finalizeFromGC(void* p, size_t size, uint attr) nothrow 1453 { 1454 // to verify: reset memory necessary? 1455 if (!(attr & BlkAttr.STRUCTFINAL)) 1456 rt_finalize2(p, false, false); // class 1457 else if (attr & BlkAttr.APPENDABLE) 1458 finalize_array2(p, size); // array of structs 1459 else 1460 finalize_struct(p, size); // struct 1461 } 1462 1463 1464 /** 1465 Resize a dynamic array by setting the `.length` property 1466 1467 Newly created elements are initialized to their default value. 1468 1469 Has two variants: 1470 - `_d_arraysetlengthT` for arrays with elements that initialize to 0 1471 - `_d_arraysetlengthiT` for non-zero initializers retrieved from `TypeInfo` 1472 1473 --- 1474 void main() 1475 { 1476 int[] a = [1, 2]; 1477 a.length = 3; // gets lowered to `_d_arraysetlengthT(typeid(int[]), 3, &a)` 1478 } 1479 --- 1480 1481 Params: 1482 ti = `TypeInfo` of array 1483 newlength = new value for the array's `.length` 1484 p = pointer to array to update the `.length` of. 1485 While it's cast to `void[]`, its `.length` is still treated as element length. 1486 Returns: `*p` after being updated 1487 */ 1488 extern (C) void[] _d_arraysetlengthT(const TypeInfo ti, size_t newlength, void[]* p) @weak 1489 in 1490 { 1491 assert(ti); 1492 assert(!(*p).length || (*p).ptr); 1493 } 1494 do 1495 { 1496 import core.stdc.string; 1497 import core.exception : onOutOfMemoryError; 1498 1499 debug(PRINTF) 1500 { 1501 //printf("_d_arraysetlengthT(p = %p, sizeelem = %d, newlength = %d)\n", p, sizeelem, newlength); 1502 if (p) 1503 printf("\tp.ptr = %p, p.length = %d\n", (*p).ptr, (*p).length); 1504 } 1505 1506 if (newlength <= (*p).length) 1507 { 1508 *p = (*p)[0 .. newlength]; 1509 void* newdata = (*p).ptr; 1510 return newdata[0 .. newlength]; 1511 } 1512 auto tinext = unqualify(ti.next); 1513 size_t sizeelem = tinext.tsize; 1514 1515 /* Calculate: newsize = newlength * sizeelem 1516 */ 1517 bool overflow = false; 1518 version (D_InlineAsm_X86) 1519 { 1520 size_t newsize = void; 1521 1522 asm pure nothrow @nogc 1523 { 1524 mov EAX, newlength; 1525 mul EAX, sizeelem; 1526 mov newsize, EAX; 1527 setc overflow; 1528 } 1529 } 1530 else version (D_InlineAsm_X86_64) 1531 { 1532 size_t newsize = void; 1533 1534 asm pure nothrow @nogc 1535 { 1536 mov RAX, newlength; 1537 mul RAX, sizeelem; 1538 mov newsize, RAX; 1539 setc overflow; 1540 } 1541 } 1542 else 1543 { 1544 import core.checkedint : mulu; 1545 const size_t newsize = mulu(sizeelem, newlength, overflow); 1546 } 1547 if (overflow) 1548 { 1549 onOutOfMemoryError(); 1550 assert(0); 1551 } 1552 1553 debug(PRINTF) printf("newsize = %x, newlength = %x\n", newsize, newlength); 1554 1555 const isshared = typeid(ti) is typeid(TypeInfo_Shared); 1556 1557 if (!(*p).ptr) 1558 { 1559 // pointer was null, need to allocate 1560 auto info = __arrayAlloc(newsize, ti, tinext); 1561 if (info.base is null) 1562 { 1563 onOutOfMemoryError(); 1564 assert(0); 1565 } 1566 __setArrayAllocLength(info, newsize, isshared, tinext); 1567 if (!isshared) 1568 __insertBlkInfoCache(info, null); 1569 void* newdata = cast(byte *)__arrayStart(info); 1570 memset(newdata, 0, newsize); 1571 *p = newdata[0 .. newlength]; 1572 return *p; 1573 } 1574 1575 const size_t size = (*p).length * sizeelem; 1576 auto bic = isshared ? null : __getBlkInfo((*p).ptr); 1577 auto info = bic ? *bic : GC.query((*p).ptr); 1578 1579 /* Attempt to extend past the end of the existing array. 1580 * If not possible, allocate new space for entire array and copy. 1581 */ 1582 bool allocateAndCopy = false; 1583 void* newdata = (*p).ptr; 1584 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 1585 { 1586 // calculate the extent of the array given the base. 1587 const size_t offset = (*p).ptr - __arrayStart(info); 1588 if (info.size >= PAGESIZE) 1589 { 1590 // size of array is at the front of the block 1591 if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1592 { 1593 // check to see if it failed because there is not 1594 // enough space 1595 if (*(cast(size_t*)info.base) == size + offset) 1596 { 1597 // not enough space, try extending 1598 auto extendsize = newsize + offset + LARGEPAD - info.size; 1599 auto u = GC.extend(info.base, extendsize, extendsize); 1600 if (u) 1601 { 1602 // extend worked, now try setting the length 1603 // again. 1604 info.size = u; 1605 if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1606 { 1607 if (!isshared) 1608 __insertBlkInfoCache(info, bic); 1609 memset(newdata + size, 0, newsize - size); 1610 *p = newdata[0 .. newlength]; 1611 return *p; 1612 } 1613 } 1614 } 1615 1616 // couldn't do it, reallocate 1617 allocateAndCopy = true; 1618 } 1619 else if (!isshared && !bic) 1620 { 1621 // add this to the cache, it wasn't present previously. 1622 __insertBlkInfoCache(info, null); 1623 } 1624 } 1625 else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1626 { 1627 // could not resize in place 1628 allocateAndCopy = true; 1629 } 1630 else if (!isshared && !bic) 1631 { 1632 // add this to the cache, it wasn't present previously. 1633 __insertBlkInfoCache(info, null); 1634 } 1635 } 1636 else 1637 allocateAndCopy = true; 1638 1639 if (allocateAndCopy) 1640 { 1641 if (info.base) 1642 { 1643 if (bic) 1644 { 1645 // a chance that flags have changed since this was cached, we should fetch the most recent flags 1646 info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE; 1647 } 1648 info = __arrayAlloc(newsize, info, ti, tinext); 1649 } 1650 else 1651 { 1652 info = __arrayAlloc(newsize, ti, tinext); 1653 } 1654 1655 if (info.base is null) 1656 { 1657 onOutOfMemoryError(); 1658 assert(0); 1659 } 1660 1661 __setArrayAllocLength(info, newsize, isshared, tinext); 1662 if (!isshared) 1663 __insertBlkInfoCache(info, bic); 1664 newdata = cast(byte *)__arrayStart(info); 1665 newdata[0 .. size] = (*p).ptr[0 .. size]; 1666 1667 /* Do postblit processing, as we are making a copy and the 1668 * original array may have references. 1669 * Note that this may throw. 1670 */ 1671 __doPostblit(newdata, size, tinext); 1672 } 1673 1674 // Zero the unused portion of the newly allocated space 1675 memset(newdata + size, 0, newsize - size); 1676 1677 *p = newdata[0 .. newlength]; 1678 return *p; 1679 } 1680 1681 /// ditto 1682 extern (C) void[] _d_arraysetlengthiT(const TypeInfo ti, size_t newlength, void[]* p) @weak 1683 in 1684 { 1685 assert(!(*p).length || (*p).ptr); 1686 } 1687 do 1688 { 1689 import core.stdc.string; 1690 import core.exception : onOutOfMemoryError; 1691 1692 debug(PRINTF) 1693 { 1694 //printf("_d_arraysetlengthiT(p = %p, sizeelem = %d, newlength = %d)\n", p, sizeelem, newlength); 1695 if (p) 1696 printf("\tp.ptr = %p, p.length = %d\n", (*p).ptr, (*p).length); 1697 } 1698 1699 if (newlength <= (*p).length) 1700 { 1701 *p = (*p)[0 .. newlength]; 1702 void* newdata = (*p).ptr; 1703 return newdata[0 .. newlength]; 1704 } 1705 auto tinext = unqualify(ti.next); 1706 size_t sizeelem = tinext.tsize; 1707 1708 /* Calculate: newsize = newlength * sizeelem 1709 */ 1710 bool overflow = false; 1711 version (D_InlineAsm_X86) 1712 { 1713 size_t newsize = void; 1714 1715 asm pure nothrow @nogc 1716 { 1717 mov EAX, newlength; 1718 mul EAX, sizeelem; 1719 mov newsize, EAX; 1720 setc overflow; 1721 } 1722 } 1723 else version (D_InlineAsm_X86_64) 1724 { 1725 size_t newsize = void; 1726 1727 asm pure nothrow @nogc 1728 { 1729 mov RAX, newlength; 1730 mul RAX, sizeelem; 1731 mov newsize, RAX; 1732 setc overflow; 1733 } 1734 } 1735 else 1736 { 1737 import core.checkedint : mulu; 1738 const size_t newsize = mulu(sizeelem, newlength, overflow); 1739 } 1740 if (overflow) 1741 { 1742 onOutOfMemoryError(); 1743 assert(0); 1744 } 1745 1746 debug(PRINTF) printf("newsize = %x, newlength = %x\n", newsize, newlength); 1747 1748 const isshared = typeid(ti) is typeid(TypeInfo_Shared); 1749 1750 static void doInitialize(void *start, void *end, const void[] initializer) 1751 { 1752 if (initializer.length == 1) 1753 { 1754 memset(start, *(cast(ubyte*)initializer.ptr), end - start); 1755 } 1756 else 1757 { 1758 auto q = initializer.ptr; 1759 immutable initsize = initializer.length; 1760 for (; start < end; start += initsize) 1761 { 1762 memcpy(start, q, initsize); 1763 } 1764 } 1765 } 1766 1767 if (!(*p).ptr) 1768 { 1769 // pointer was null, need to allocate 1770 auto info = __arrayAlloc(newsize, ti, tinext); 1771 if (info.base is null) 1772 { 1773 onOutOfMemoryError(); 1774 assert(0); 1775 } 1776 __setArrayAllocLength(info, newsize, isshared, tinext); 1777 if (!isshared) 1778 __insertBlkInfoCache(info, null); 1779 void* newdata = cast(byte *)__arrayStart(info); 1780 doInitialize(newdata, newdata + newsize, tinext.initializer); 1781 *p = newdata[0 .. newlength]; 1782 return *p; 1783 } 1784 1785 const size_t size = (*p).length * sizeelem; 1786 auto bic = isshared ? null : __getBlkInfo((*p).ptr); 1787 auto info = bic ? *bic : GC.query((*p).ptr); 1788 1789 /* Attempt to extend past the end of the existing array. 1790 * If not possible, allocate new space for entire array and copy. 1791 */ 1792 bool allocateAndCopy = false; 1793 void* newdata = (*p).ptr; 1794 1795 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 1796 { 1797 // calculate the extent of the array given the base. 1798 const size_t offset = (*p).ptr - __arrayStart(info); 1799 if (info.size >= PAGESIZE) 1800 { 1801 // size of array is at the front of the block 1802 if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1803 { 1804 // check to see if it failed because there is not 1805 // enough space 1806 if (*(cast(size_t*)info.base) == size + offset) 1807 { 1808 // not enough space, try extending 1809 auto extendsize = newsize + offset + LARGEPAD - info.size; 1810 auto u = GC.extend(info.base, extendsize, extendsize); 1811 if (u) 1812 { 1813 // extend worked, now try setting the length 1814 // again. 1815 info.size = u; 1816 if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1817 { 1818 if (!isshared) 1819 __insertBlkInfoCache(info, bic); 1820 doInitialize(newdata + size, newdata + newsize, tinext.initializer); 1821 *p = newdata[0 .. newlength]; 1822 return *p; 1823 } 1824 } 1825 } 1826 1827 // couldn't do it, reallocate 1828 allocateAndCopy = true; 1829 } 1830 else if (!isshared && !bic) 1831 { 1832 // add this to the cache, it wasn't present previously. 1833 __insertBlkInfoCache(info, null); 1834 } 1835 } 1836 else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 1837 { 1838 // could not resize in place 1839 allocateAndCopy = true; 1840 } 1841 else if (!isshared && !bic) 1842 { 1843 // add this to the cache, it wasn't present previously. 1844 __insertBlkInfoCache(info, null); 1845 } 1846 } 1847 else 1848 allocateAndCopy = true; 1849 1850 if (allocateAndCopy) 1851 { 1852 if (info.base) 1853 { 1854 if (bic) 1855 { 1856 // a chance that flags have changed since this was cached, we should fetch the most recent flags 1857 info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE; 1858 } 1859 info = __arrayAlloc(newsize, info, ti, tinext); 1860 } 1861 else 1862 { 1863 info = __arrayAlloc(newsize, ti, tinext); 1864 } 1865 1866 if (info.base is null) 1867 { 1868 onOutOfMemoryError(); 1869 assert(0); 1870 } 1871 1872 __setArrayAllocLength(info, newsize, isshared, tinext); 1873 if (!isshared) 1874 __insertBlkInfoCache(info, bic); 1875 newdata = cast(byte *)__arrayStart(info); 1876 newdata[0 .. size] = (*p).ptr[0 .. size]; 1877 1878 /* Do postblit processing, as we are making a copy and the 1879 * original array may have references. 1880 * Note that this may throw. 1881 */ 1882 __doPostblit(newdata, size, tinext); 1883 } 1884 1885 // Initialize the unused portion of the newly allocated space 1886 doInitialize(newdata + size, newdata + newsize, tinext.initializer); 1887 *p = newdata[0 .. newlength]; 1888 return *p; 1889 } 1890 1891 1892 /** 1893 Given an array of length `size` that needs to be expanded to `newlength`, 1894 compute a new capacity. 1895 1896 Better version by Dave Fladebo: 1897 This uses an inverse logorithmic algorithm to pre-allocate a bit more 1898 space for larger arrays. 1899 - Arrays smaller than PAGESIZE bytes are left as-is, so for the most 1900 common cases, memory allocation is 1 to 1. The small overhead added 1901 doesn't affect small array perf. (it's virtually the same as 1902 current). 1903 - Larger arrays have some space pre-allocated. 1904 - As the arrays grow, the relative pre-allocated space shrinks. 1905 - The logorithmic algorithm allocates relatively more space for 1906 mid-size arrays, making it very fast for medium arrays (for 1907 mid-to-large arrays, this turns out to be quite a bit faster than the 1908 equivalent realloc() code in C, on Linux at least. Small arrays are 1909 just as fast as GCC). 1910 - Perhaps most importantly, overall memory usage and stress on the GC 1911 is decreased significantly for demanding environments. 1912 1913 Params: 1914 newlength = new `.length` 1915 size = old `.length` 1916 Returns: new capacity for array 1917 */ 1918 size_t newCapacity(size_t newlength, size_t size) 1919 { 1920 version (none) 1921 { 1922 size_t newcap = newlength * size; 1923 } 1924 else 1925 { 1926 size_t newcap = newlength * size; 1927 size_t newext = 0; 1928 1929 if (newcap > PAGESIZE) 1930 { 1931 //double mult2 = 1.0 + (size / log10(pow(newcap * 2.0,2.0))); 1932 1933 // redo above line using only integer math 1934 1935 /*static int log2plus1(size_t c) 1936 { int i; 1937 1938 if (c == 0) 1939 i = -1; 1940 else 1941 for (i = 1; c >>= 1; i++) 1942 { 1943 } 1944 return i; 1945 }*/ 1946 1947 /* The following setting for mult sets how much bigger 1948 * the new size will be over what is actually needed. 1949 * 100 means the same size, more means proportionally more. 1950 * More means faster but more memory consumption. 1951 */ 1952 //long mult = 100 + (1000L * size) / (6 * log2plus1(newcap)); 1953 //long mult = 100 + (1000L * size) / log2plus1(newcap); 1954 import core.bitop; 1955 long mult = 100 + (1000L) / (bsr(newcap) + 1); 1956 1957 // testing shows 1.02 for large arrays is about the point of diminishing return 1958 // 1959 // Commented out because the multipler will never be < 102. In order for it to be < 2, 1960 // then 1000L / (bsr(x) + 1) must be > 2. The highest bsr(x) + 1 1961 // could be is 65 (64th bit set), and 1000L / 64 is much larger 1962 // than 2. We need 500 bit integers for 101 to be achieved :) 1963 /*if (mult < 102) 1964 mult = 102;*/ 1965 /*newext = cast(size_t)((newcap * mult) / 100); 1966 newext -= newext % size;*/ 1967 // This version rounds up to the next element, and avoids using 1968 // mod. 1969 newext = cast(size_t)((newlength * mult + 99) / 100) * size; 1970 debug(PRINTF) printf("mult: %2.2f, alloc: %2.2f\n",mult/100.0,newext / cast(double)size); 1971 } 1972 newcap = newext > newcap ? newext : newcap; 1973 debug(PRINTF) printf("newcap = %d, newlength = %d, size = %d\n", newcap, newlength, size); 1974 } 1975 return newcap; 1976 } 1977 1978 1979 /** 1980 Extend an array by n elements. 1981 1982 Caller must initialize those elements. 1983 1984 Params: 1985 ti = type info of array type (not element type) 1986 px = array to append to, cast to `byte[]` while keeping the same `.length`. Will be updated. 1987 n = number of elements to append 1988 Returns: `px` after being appended to 1989 */ 1990 extern (C) 1991 byte[] _d_arrayappendcTX(const TypeInfo ti, return scope ref byte[] px, size_t n) @weak 1992 { 1993 import core.stdc.string; 1994 // This is a cut&paste job from _d_arrayappendT(). Should be refactored. 1995 1996 // only optimize array append where ti is not a shared type 1997 auto tinext = unqualify(ti.next); 1998 auto sizeelem = tinext.tsize; // array element size 1999 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 2000 auto bic = isshared ? null : __getBlkInfo(px.ptr); 2001 auto info = bic ? *bic : GC.query(px.ptr); 2002 auto length = px.length; 2003 auto newlength = length + n; 2004 auto newsize = newlength * sizeelem; 2005 auto size = length * sizeelem; 2006 size_t newcap = void; // for scratch space 2007 2008 // calculate the extent of the array given the base. 2009 size_t offset = cast(void*)px.ptr - __arrayStart(info); 2010 if (info.base && (info.attr & BlkAttr.APPENDABLE)) 2011 { 2012 if (info.size >= PAGESIZE) 2013 { 2014 // size of array is at the front of the block 2015 if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 2016 { 2017 // check to see if it failed because there is not 2018 // enough space 2019 newcap = newCapacity(newlength, sizeelem); 2020 if (*(cast(size_t*)info.base) == size + offset) 2021 { 2022 // not enough space, try extending 2023 auto extendoffset = offset + LARGEPAD - info.size; 2024 auto u = GC.extend(info.base, newsize + extendoffset, newcap + extendoffset); 2025 if (u) 2026 { 2027 // extend worked, now try setting the length 2028 // again. 2029 info.size = u; 2030 if (__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 2031 { 2032 if (!isshared) 2033 __insertBlkInfoCache(info, bic); 2034 goto L1; 2035 } 2036 } 2037 } 2038 2039 // couldn't do it, reallocate 2040 goto L2; 2041 } 2042 else if (!isshared && !bic) 2043 { 2044 __insertBlkInfoCache(info, null); 2045 } 2046 } 2047 else if (!__setArrayAllocLength(info, newsize + offset, isshared, tinext, size + offset)) 2048 { 2049 // could not resize in place 2050 newcap = newCapacity(newlength, sizeelem); 2051 goto L2; 2052 } 2053 else if (!isshared && !bic) 2054 { 2055 __insertBlkInfoCache(info, null); 2056 } 2057 } 2058 else 2059 { 2060 // not appendable or is null 2061 newcap = newCapacity(newlength, sizeelem); 2062 if (info.base) 2063 { 2064 L2: 2065 if (bic) 2066 { 2067 // a chance that flags have changed since this was cached, we should fetch the most recent flags 2068 info.attr = GC.getAttr(info.base) | BlkAttr.APPENDABLE; 2069 } 2070 info = __arrayAlloc(newcap, info, ti, tinext); 2071 } 2072 else 2073 { 2074 info = __arrayAlloc(newcap, ti, tinext); 2075 } 2076 __setArrayAllocLength(info, newsize, isshared, tinext); 2077 if (!isshared) 2078 __insertBlkInfoCache(info, bic); 2079 auto newdata = cast(byte *)__arrayStart(info); 2080 memcpy(newdata, px.ptr, length * sizeelem); 2081 // do postblit processing 2082 __doPostblit(newdata, length * sizeelem, tinext); 2083 (cast(void **)(&px))[1] = newdata; 2084 } 2085 2086 L1: 2087 *cast(size_t *)&px = newlength; 2088 return px; 2089 } 2090 2091 2092 /** 2093 Append `dchar` to `char[]`, converting UTF-32 to UTF-8 2094 2095 --- 2096 void main() 2097 { 2098 char[] s; 2099 s ~= 'α'; 2100 } 2101 --- 2102 2103 Params: 2104 x = array to append to cast to `byte[]`. Will be modified. 2105 c = `dchar` to append 2106 Returns: updated `x` cast to `void[]` 2107 */ 2108 extern (C) void[] _d_arrayappendcd(ref byte[] x, dchar c) @weak 2109 { 2110 // c could encode into from 1 to 4 characters 2111 char[4] buf = void; 2112 char[] appendthis; // passed to appendT 2113 if (c <= 0x7F) 2114 { 2115 buf.ptr[0] = cast(char)c; 2116 appendthis = buf[0..1]; 2117 } 2118 else if (c <= 0x7FF) 2119 { 2120 buf.ptr[0] = cast(char)(0xC0 | (c >> 6)); 2121 buf.ptr[1] = cast(char)(0x80 | (c & 0x3F)); 2122 appendthis = buf[0..2]; 2123 } 2124 else if (c <= 0xFFFF) 2125 { 2126 buf.ptr[0] = cast(char)(0xE0 | (c >> 12)); 2127 buf.ptr[1] = cast(char)(0x80 | ((c >> 6) & 0x3F)); 2128 buf.ptr[2] = cast(char)(0x80 | (c & 0x3F)); 2129 appendthis = buf[0..3]; 2130 } 2131 else if (c <= 0x10FFFF) 2132 { 2133 buf.ptr[0] = cast(char)(0xF0 | (c >> 18)); 2134 buf.ptr[1] = cast(char)(0x80 | ((c >> 12) & 0x3F)); 2135 buf.ptr[2] = cast(char)(0x80 | ((c >> 6) & 0x3F)); 2136 buf.ptr[3] = cast(char)(0x80 | (c & 0x3F)); 2137 appendthis = buf[0..4]; 2138 } 2139 else 2140 { 2141 import core.exception : onUnicodeError; 2142 onUnicodeError("Invalid UTF-8 sequence", 0); // invalid utf character 2143 } 2144 2145 // 2146 // TODO: This always assumes the array type is shared, because we do not 2147 // get a typeinfo from the compiler. Assuming shared is the safest option. 2148 // Once the compiler is fixed, the proper typeinfo should be forwarded. 2149 // 2150 2151 // Hack because _d_arrayappendT takes `x` as a reference 2152 auto xx = cast(shared(char)[])x; 2153 object._d_arrayappendT(xx, cast(shared(char)[])appendthis); 2154 x = cast(byte[])xx; 2155 return x; 2156 } 2157 2158 unittest 2159 { 2160 import core.exception : UnicodeException; 2161 2162 /* Using inline try {} catch {} blocks fails to catch the UnicodeException 2163 * thrown. 2164 * https://issues.dlang.org/show_bug.cgi?id=16799 2165 */ 2166 static void assertThrown(T : Throwable = Exception, E)(lazy E expr, string msg) 2167 { 2168 try 2169 expr; 2170 catch (T e) { 2171 assert(e.msg == msg); 2172 return; 2173 } 2174 } 2175 2176 static void f() 2177 { 2178 string ret; 2179 int i = -1; 2180 ret ~= i; 2181 } 2182 2183 assertThrown!UnicodeException(f(), "Invalid UTF-8 sequence"); 2184 } 2185 2186 2187 /** 2188 Append `dchar` to `wchar[]`, converting UTF-32 to UTF-16 2189 2190 --- 2191 void main() 2192 { 2193 dchar x; 2194 wchar[] s; 2195 s ~= 'α'; 2196 } 2197 --- 2198 2199 Params: 2200 x = array to append to cast to `byte[]`. Will be modified. 2201 c = `dchar` to append 2202 2203 Returns: updated `x` cast to `void[]` 2204 */ 2205 extern (C) void[] _d_arrayappendwd(ref byte[] x, dchar c) @weak 2206 { 2207 // c could encode into from 1 to 2 w characters 2208 wchar[2] buf = void; 2209 wchar[] appendthis; // passed to appendT 2210 if (c <= 0xFFFF) 2211 { 2212 buf.ptr[0] = cast(wchar) c; 2213 appendthis = buf[0..1]; 2214 } 2215 else 2216 { 2217 buf.ptr[0] = cast(wchar) ((((c - 0x10000) >> 10) & 0x3FF) + 0xD800); 2218 buf.ptr[1] = cast(wchar) (((c - 0x10000) & 0x3FF) + 0xDC00); 2219 appendthis = buf[0..2]; 2220 } 2221 2222 // 2223 // TODO: This always assumes the array type is shared, because we do not 2224 // get a typeinfo from the compiler. Assuming shared is the safest option. 2225 // Once the compiler is fixed, the proper typeinfo should be forwarded. 2226 // 2227 2228 auto xx = (cast(shared(wchar)*)x.ptr)[0 .. x.length]; 2229 object._d_arrayappendT(xx, cast(shared(wchar)[])appendthis); 2230 x = (cast(byte*)xx.ptr)[0 .. xx.length]; 2231 return x; 2232 } 2233 2234 /** 2235 Allocate an array literal 2236 2237 Rely on the caller to do the initialization of the array. 2238 2239 --- 2240 int[] getArr() 2241 { 2242 return [10, 20]; 2243 // auto res = cast(int*) _d_arrayliteralTX(typeid(int[]), 2); 2244 // res[0] = 10; 2245 // res[1] = 20; 2246 // return res[0..2]; 2247 } 2248 --- 2249 2250 Params: 2251 ti = `TypeInfo` of resulting array type 2252 length = `.length` of array literal 2253 2254 Returns: pointer to allocated array 2255 */ 2256 extern (C) 2257 void* _d_arrayliteralTX(const TypeInfo ti, size_t length) @weak 2258 { 2259 auto tinext = unqualify(ti.next); 2260 auto sizeelem = tinext.tsize; // array element size 2261 void* result; 2262 2263 debug(PRINTF) printf("_d_arrayliteralTX(sizeelem = %d, length = %d)\n", sizeelem, length); 2264 if (length == 0 || sizeelem == 0) 2265 result = null; 2266 else 2267 { 2268 auto allocsize = length * sizeelem; 2269 auto info = __arrayAlloc(allocsize, ti, tinext); 2270 auto isshared = typeid(ti) is typeid(TypeInfo_Shared); 2271 __setArrayAllocLength(info, allocsize, isshared, tinext); 2272 result = __arrayStart(info); 2273 } 2274 return result; 2275 } 2276 2277 2278 unittest 2279 { 2280 int[] a; 2281 int[] b; 2282 int i; 2283 2284 a = new int[3]; 2285 a[0] = 1; a[1] = 2; a[2] = 3; 2286 b = a.dup; 2287 assert(b.length == 3); 2288 for (i = 0; i < 3; i++) 2289 assert(b[i] == i + 1); 2290 2291 // test slice appending 2292 b = a[0..1]; 2293 b ~= 4; 2294 for (i = 0; i < 3; i++) 2295 assert(a[i] == i + 1); 2296 2297 // test reserving 2298 char[] arr = new char[4093]; 2299 for (i = 0; i < arr.length; i++) 2300 arr[i] = cast(char)(i % 256); 2301 2302 // note that these two commands used to cause corruption, which may not be 2303 // detected. 2304 arr.reserve(4094); 2305 auto arr2 = arr ~ "123"; 2306 assert(arr2[0..arr.length] == arr); 2307 assert(arr2[arr.length..$] == "123"); 2308 2309 // test postblit on array concat, append, length, etc. 2310 static struct S 2311 { 2312 int x; 2313 int pad; 2314 this(this) 2315 { 2316 ++x; 2317 } 2318 } 2319 void testPostBlit(T)() 2320 { 2321 auto sarr = new T[1]; 2322 debug(SENTINEL) {} else 2323 assert(sarr.capacity == 1); 2324 2325 // length extend 2326 auto sarr2 = sarr; 2327 assert(sarr[0].x == 0); 2328 sarr2.length += 1; 2329 assert(sarr2[0].x == 1); 2330 assert(sarr[0].x == 0); 2331 2332 // append 2333 T s; 2334 sarr2 = sarr; 2335 sarr2 ~= s; 2336 assert(sarr2[0].x == 1); 2337 assert(sarr2[1].x == 1); 2338 assert(sarr[0].x == 0); 2339 assert(s.x == 0); 2340 2341 // concat 2342 sarr2 = sarr ~ sarr; 2343 assert(sarr2[0].x == 1); 2344 assert(sarr2[1].x == 1); 2345 assert(sarr[0].x == 0); 2346 2347 // concat multiple (calls different method) 2348 sarr2 = sarr ~ sarr ~ sarr; 2349 assert(sarr2[0].x == 1); 2350 assert(sarr2[1].x == 1); 2351 assert(sarr2[2].x == 1); 2352 assert(sarr[0].x == 0); 2353 2354 // reserve capacity 2355 sarr2 = sarr; 2356 sarr2.reserve(2); 2357 assert(sarr2[0].x == 1); 2358 assert(sarr[0].x == 0); 2359 } 2360 testPostBlit!(S)(); 2361 testPostBlit!(const(S))(); 2362 } 2363 2364 // cannot define structs inside unit test block, or they become nested structs. 2365 version (CoreUnittest) 2366 { 2367 struct S1 2368 { 2369 int x = 5; 2370 } 2371 struct S2 2372 { 2373 int x; 2374 this(int x) {this.x = x;} 2375 } 2376 struct S3 2377 { 2378 int[4] x; 2379 this(int x) 2380 {this.x[] = x;} 2381 } 2382 struct S4 2383 { 2384 int *x; 2385 } 2386 2387 } 2388 2389 unittest 2390 { 2391 auto s1 = new S1; 2392 assert(s1.x == 5); 2393 assert(GC.getAttr(s1) == BlkAttr.NO_SCAN); 2394 2395 auto s2 = new S2(3); 2396 assert(s2.x == 3); 2397 assert(GC.getAttr(s2) == BlkAttr.NO_SCAN); 2398 2399 auto s3 = new S3(1); 2400 assert(s3.x == [1,1,1,1]); 2401 assert(GC.getAttr(s3) == BlkAttr.NO_SCAN); 2402 debug(SENTINEL) {} else 2403 assert(GC.sizeOf(s3) == 16); 2404 2405 auto s4 = new S4; 2406 assert(s4.x == null); 2407 assert(GC.getAttr(s4) == 0); 2408 } 2409 2410 unittest 2411 { 2412 // Bugzilla 3454 - Inconsistent flag setting in GC.realloc() 2413 static void test(size_t multiplier) 2414 { 2415 auto p = GC.malloc(8 * multiplier, 0); 2416 assert(GC.getAttr(p) == 0); 2417 2418 // no move, set attr 2419 p = GC.realloc(p, 8 * multiplier + 5, BlkAttr.NO_SCAN); 2420 assert(GC.getAttr(p) == BlkAttr.NO_SCAN); 2421 2422 // shrink, copy attr 2423 p = GC.realloc(p, 2 * multiplier, 0); 2424 assert(GC.getAttr(p) == BlkAttr.NO_SCAN); 2425 2426 // extend, copy attr 2427 p = GC.realloc(p, 8 * multiplier, 0); 2428 assert(GC.getAttr(p) == BlkAttr.NO_SCAN); 2429 } 2430 test(16); 2431 test(1024 * 1024); 2432 } 2433 2434 unittest 2435 { 2436 import core.exception; 2437 try 2438 { 2439 size_t x = size_t.max; 2440 byte[] big_buf = new byte[x]; 2441 } 2442 catch (OutOfMemoryError) 2443 { 2444 } 2445 } 2446 2447 unittest 2448 { 2449 // bugzilla 13854 2450 auto arr = new ubyte[PAGESIZE]; // ensure page size 2451 auto info1 = GC.query(arr.ptr); 2452 assert(info1.base !is arr.ptr); // offset is required for page size or larger 2453 2454 auto arr2 = arr[0..1]; 2455 assert(arr2.capacity == 0); // cannot append 2456 arr2 ~= 0; // add a byte 2457 assert(arr2.ptr !is arr.ptr); // reallocated 2458 auto info2 = GC.query(arr2.ptr); 2459 assert(info2.base is arr2.ptr); // no offset, the capacity is small. 2460 2461 // do the same via setting length 2462 arr2 = arr[0..1]; 2463 assert(arr2.capacity == 0); 2464 arr2.length += 1; 2465 assert(arr2.ptr !is arr.ptr); // reallocated 2466 info2 = GC.query(arr2.ptr); 2467 assert(info2.base is arr2.ptr); // no offset, the capacity is small. 2468 2469 // do the same for char[] since we need a type with an initializer to test certain runtime functions 2470 auto carr = new char[PAGESIZE]; 2471 info1 = GC.query(carr.ptr); 2472 assert(info1.base !is carr.ptr); // offset is required for page size or larger 2473 2474 auto carr2 = carr[0..1]; 2475 assert(carr2.capacity == 0); // cannot append 2476 carr2 ~= 0; // add a byte 2477 assert(carr2.ptr !is carr.ptr); // reallocated 2478 info2 = GC.query(carr2.ptr); 2479 assert(info2.base is carr2.ptr); // no offset, the capacity is small. 2480 2481 // do the same via setting length 2482 carr2 = carr[0..1]; 2483 assert(carr2.capacity == 0); 2484 carr2.length += 1; 2485 assert(carr2.ptr !is carr.ptr); // reallocated 2486 info2 = GC.query(carr2.ptr); 2487 assert(info2.base is carr2.ptr); // no offset, the capacity is small. 2488 } 2489 2490 unittest 2491 { 2492 // bugzilla 13878 2493 auto arr = new ubyte[1]; 2494 auto info = GC.query(arr.ptr); 2495 assert(info.attr & BlkAttr.NO_SCAN); // should be NO_SCAN 2496 arr ~= 0; // ensure array is inserted into cache 2497 debug(SENTINEL) {} else 2498 assert(arr.ptr is info.base); 2499 GC.clrAttr(arr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2500 auto arr2 = arr[0..1]; 2501 assert(arr2.capacity == 0); // cannot append 2502 arr2 ~= 0; 2503 assert(arr2.ptr !is arr.ptr); 2504 info = GC.query(arr2.ptr); 2505 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2506 2507 // do the same via setting length 2508 arr = new ubyte[1]; 2509 arr ~= 0; // ensure array is inserted into cache 2510 GC.clrAttr(arr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2511 arr2 = arr[0..1]; 2512 assert(arr2.capacity == 0); 2513 arr2.length += 1; 2514 assert(arr2.ptr !is arr.ptr); // reallocated 2515 info = GC.query(arr2.ptr); 2516 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2517 2518 // do the same for char[] since we need a type with an initializer to test certain runtime functions 2519 auto carr = new char[1]; 2520 info = GC.query(carr.ptr); 2521 assert(info.attr & BlkAttr.NO_SCAN); // should be NO_SCAN 2522 carr ~= 0; // ensure array is inserted into cache 2523 debug(SENTINEL) {} else 2524 assert(carr.ptr is info.base); 2525 GC.clrAttr(carr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2526 auto carr2 = carr[0..1]; 2527 assert(carr2.capacity == 0); // cannot append 2528 carr2 ~= 0; 2529 assert(carr2.ptr !is carr.ptr); 2530 info = GC.query(carr2.ptr); 2531 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2532 2533 // do the same via setting length 2534 carr = new char[1]; 2535 carr ~= 0; // ensure array is inserted into cache 2536 GC.clrAttr(carr.ptr, BlkAttr.NO_SCAN); // remove the attribute 2537 carr2 = carr[0..1]; 2538 assert(carr2.capacity == 0); 2539 carr2.length += 1; 2540 assert(carr2.ptr !is carr.ptr); // reallocated 2541 info = GC.query(carr2.ptr); 2542 assert(!(info.attr & BlkAttr.NO_SCAN)); // ensure attribute sticks 2543 } 2544 2545 // test struct finalizers 2546 debug(SENTINEL) {} else 2547 deprecated unittest 2548 { 2549 __gshared int dtorCount; 2550 static struct S1 2551 { 2552 int x; 2553 2554 ~this() 2555 { 2556 dtorCount++; 2557 } 2558 } 2559 2560 dtorCount = 0; 2561 S1* s2 = new S1; 2562 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2563 assert(dtorCount == 1); 2564 GC.free(s2); 2565 2566 dtorCount = 0; 2567 const(S1)* s3 = new const(S1); 2568 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2569 assert(dtorCount == 1); 2570 GC.free(cast(void*)s3); 2571 2572 dtorCount = 0; 2573 shared(S1)* s4 = new shared(S1); 2574 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2575 assert(dtorCount == 1); 2576 GC.free(cast(void*)s4); 2577 2578 dtorCount = 0; 2579 const(S1)[] carr1 = new const(S1)[5]; 2580 BlkInfo blkinf1 = GC.query(carr1.ptr); 2581 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2582 assert(dtorCount == 5); 2583 GC.free(blkinf1.base); 2584 2585 dtorCount = 0; 2586 S1[] arr2 = new S1[10]; 2587 arr2.length = 6; 2588 arr2.assumeSafeAppend; 2589 assert(dtorCount == 4); // destructors run explicitely? 2590 2591 dtorCount = 0; 2592 BlkInfo blkinf = GC.query(arr2.ptr); 2593 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2594 assert(dtorCount == 6); 2595 GC.free(blkinf.base); 2596 2597 // associative arrays 2598 import rt.aaA : entryDtor; 2599 // throw away all existing AA entries with dtor 2600 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2601 2602 S1[int] aa1; 2603 aa1[0] = S1(0); 2604 aa1[1] = S1(1); 2605 dtorCount = 0; 2606 aa1 = null; 2607 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2608 assert(dtorCount == 2); 2609 2610 int[S1] aa2; 2611 aa2[S1(0)] = 0; 2612 aa2[S1(1)] = 1; 2613 aa2[S1(2)] = 2; 2614 dtorCount = 0; 2615 aa2 = null; 2616 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2617 assert(dtorCount == 3); 2618 2619 S1[2][int] aa3; 2620 aa3[0] = [S1(0),S1(2)]; 2621 aa3[1] = [S1(1),S1(3)]; 2622 dtorCount = 0; 2623 aa3 = null; 2624 GC.runFinalizers((cast(char*)(&entryDtor))[0..1]); 2625 assert(dtorCount == 4); 2626 } 2627 2628 // test struct dtor handling not causing false pointers 2629 unittest 2630 { 2631 // for 64-bit, allocate a struct of size 40 2632 static struct S 2633 { 2634 size_t[4] data; 2635 S* ptr4; 2636 } 2637 auto p1 = new S; 2638 auto p2 = new S; 2639 p2.ptr4 = p1; 2640 2641 // a struct with a dtor with size 32, but the dtor will cause 2642 // allocation to be larger by a pointer 2643 static struct A 2644 { 2645 size_t[3] data; 2646 S* ptr3; 2647 2648 ~this() {} 2649 } 2650 2651 GC.free(p2); 2652 auto a = new A; // reuse same memory 2653 if (cast(void*)a is cast(void*)p2) // reusage not guaranteed 2654 { 2655 auto ptr = cast(S**)(a + 1); 2656 assert(*ptr != p1); // still same data as p2.ptr4? 2657 } 2658 2659 // small array 2660 static struct SArr 2661 { 2662 void*[10] data; 2663 } 2664 auto arr1 = new SArr; 2665 arr1.data[] = p1; 2666 GC.free(arr1); 2667 2668 // allocates 2*A.sizeof + (void*).sizeof (TypeInfo) + 1 (array length) 2669 auto arr2 = new A[2]; 2670 if (cast(void*)arr1 is cast(void*)arr2.ptr) // reusage not guaranteed 2671 { 2672 auto ptr = cast(S**)(arr2.ptr + 2); 2673 assert(*ptr != p1); // still same data as p2.ptr4? 2674 } 2675 2676 // large array 2677 static struct LArr 2678 { 2679 void*[1023] data; 2680 } 2681 auto larr1 = new LArr; 2682 larr1.data[] = p1; 2683 GC.free(larr1); 2684 2685 auto larr2 = new S[255]; 2686 if (cast(void*)larr1 is cast(void*)larr2.ptr - LARGEPREFIX) // reusage not guaranteed 2687 { 2688 auto ptr = cast(S**)larr1; 2689 assert(ptr[0] != p1); // 16 bytes array header 2690 assert(ptr[1] != p1); 2691 version (D_LP64) {} else 2692 { 2693 assert(ptr[2] != p1); 2694 assert(ptr[3] != p1); 2695 } 2696 } 2697 } 2698 2699 // test class finalizers exception handling 2700 unittest 2701 { 2702 bool test(E)() 2703 { 2704 import core.exception; 2705 static class C1 2706 { 2707 E exc; 2708 this(E exc) { this.exc = exc; } 2709 ~this() { throw exc; } 2710 } 2711 2712 bool caught = false; 2713 C1 c = new C1(new E("test onFinalizeError")); 2714 try 2715 { 2716 GC.runFinalizers((cast(uint*)&C1.__dtor)[0..1]); 2717 } 2718 catch (FinalizeError err) 2719 { 2720 caught = true; 2721 } 2722 catch (E) 2723 { 2724 } 2725 GC.free(cast(void*)c); 2726 return caught; 2727 } 2728 2729 assert( test!Exception); 2730 import core.exception : InvalidMemoryOperationError; 2731 assert(!test!InvalidMemoryOperationError); 2732 } 2733 2734 // test struct finalizers exception handling 2735 debug(SENTINEL) {} else 2736 unittest 2737 { 2738 bool test(E)() 2739 { 2740 import core.exception; 2741 static struct S1 2742 { 2743 E exc; 2744 ~this() { throw exc; } 2745 } 2746 2747 bool caught = false; 2748 S1* s = new S1(new E("test onFinalizeError")); 2749 try 2750 { 2751 GC.runFinalizers((cast(char*)(typeid(S1).xdtor))[0..1]); 2752 } 2753 catch (FinalizeError err) 2754 { 2755 caught = true; 2756 } 2757 catch (E) 2758 { 2759 } 2760 GC.free(s); 2761 return caught; 2762 } 2763 2764 assert( test!Exception); 2765 import core.exception : InvalidMemoryOperationError; 2766 assert(!test!InvalidMemoryOperationError); 2767 } 2768 2769 // test bug 14126 2770 unittest 2771 { 2772 static struct S 2773 { 2774 S* thisptr; 2775 ~this() { assert(&this == thisptr); thisptr = null;} 2776 } 2777 2778 S[] test14126 = new S[2048]; // make sure we allocate at least a PAGE 2779 foreach (ref s; test14126) 2780 { 2781 s.thisptr = &s; 2782 } 2783 }