1 /** 2 * The atomic module is intended to provide some basic support for lock-free 3 * concurrent programming. Some common operations are defined, each of which 4 * may be performed using the specified memory barrier or a less granular 5 * barrier if the hardware does not support the version requested. This 6 * model is based on a design by Alexander Terekhov as outlined in 7 * $(LINK2 http://groups.google.com/groups?threadm=3E4820EE.6F408B25%40web.de, 8 * this thread). Another useful reference for memory ordering on modern 9 * architectures is $(LINK2 http://www.linuxjournal.com/article/8211, this 10 * article by Paul McKenney). 11 * 12 * Copyright: Copyright (C) 2005-2006 Sean Kelly. All rights reserved. 13 * License: BSD style: $(LICENSE) 14 * Authors: Sean Kelly 15 */ 16 module tango.core.Atomic; 17 18 19 pragma(msg, "tango.core.Atomic is deprecated. Please use tango.core.sync.Atomic instead."); 20 21 deprecated: 22 23 //////////////////////////////////////////////////////////////////////////////// 24 // Synchronization Options 25 //////////////////////////////////////////////////////////////////////////////// 26 27 28 /** 29 * Memory synchronization flag. If the supplied option is not available on the 30 * current platform then a stronger method will be used instead. 31 */ 32 enum msync 33 { 34 raw, /// not sequenced 35 hlb, /// hoist-load barrier 36 hsb, /// hoist-store barrier 37 slb, /// sink-load barrier 38 ssb, /// sink-store barrier 39 acq, /// hoist-load + hoist-store barrier 40 rel, /// sink-load + sink-store barrier 41 seq, /// fully sequenced (acq + rel) 42 } 43 44 45 //////////////////////////////////////////////////////////////////////////////// 46 // Internal Type Checking 47 //////////////////////////////////////////////////////////////////////////////// 48 49 50 private 51 { 52 version( TangoDoc ) {} else 53 { 54 import tango.core.Traits; 55 56 57 template isValidAtomicType( T ) 58 { 59 const bool isValidAtomicType = T.sizeof == byte.sizeof || 60 T.sizeof == short.sizeof || 61 T.sizeof == int.sizeof || 62 T.sizeof == long.sizeof; 63 } 64 65 66 template isValidNumericType( T ) 67 { 68 const bool isValidNumericType = isIntegerType!( T ) || 69 isPointerType!( T ); 70 } 71 72 73 template isHoistOp( msync ms ) 74 { 75 const bool isHoistOp = ms == msync.hlb || 76 ms == msync.hsb || 77 ms == msync.acq || 78 ms == msync.seq; 79 } 80 81 82 template isSinkOp( msync ms ) 83 { 84 const bool isSinkOp = ms == msync.slb || 85 ms == msync.ssb || 86 ms == msync.rel || 87 ms == msync.seq; 88 } 89 } 90 } 91 92 93 //////////////////////////////////////////////////////////////////////////////// 94 // DDoc Documentation for Atomic Functions 95 //////////////////////////////////////////////////////////////////////////////// 96 97 98 version( TangoDoc ) 99 { 100 //////////////////////////////////////////////////////////////////////////// 101 // Atomic Load 102 //////////////////////////////////////////////////////////////////////////// 103 104 105 /** 106 * Supported msync values: 107 * msync.raw, 108 * msync.hlb, 109 * msync.acq, 110 * msync.seq 111 */ 112 template atomicLoad( msync ms, T ) 113 { 114 /** 115 * Refreshes the contents of 'val' from main memory. This operation is 116 * both lock-free and atomic. 117 * 118 * Params: 119 * val = The value to load. This value must be properly aligned. 120 * 121 * Returns: 122 * The loaded value. 123 */ 124 T atomicLoad( ref T val ) 125 { 126 return val; 127 } 128 } 129 130 131 //////////////////////////////////////////////////////////////////////////// 132 // Atomic Store 133 //////////////////////////////////////////////////////////////////////////// 134 135 136 /** 137 * Supported msync values: 138 * msync.raw, 139 * msync.ssb, 140 * msync.acq, 141 * msync.rel, 142 * msync.seq 143 */ 144 template atomicStore( msync ms, T ) 145 { 146 /** 147 * Stores 'newval' to the memory referenced by 'val'. This operation 148 * is both lock-free and atomic. 149 * 150 * Params: 151 * val = The destination variable. 152 * newval = The value to store. 153 */ 154 void atomicStore( ref T val, T newval ) 155 { 156 157 } 158 } 159 160 161 //////////////////////////////////////////////////////////////////////////// 162 // Atomic StoreIf 163 //////////////////////////////////////////////////////////////////////////// 164 165 166 /** 167 * Supported msync values: 168 * msync.raw, 169 * msync.ssb, 170 * msync.acq, 171 * msync.rel, 172 * msync.seq 173 */ 174 template atomicStoreIf( msync ms, T ) 175 { 176 /** 177 * Stores 'newval' to the memory referenced by 'val' if val is equal to 178 * 'equalTo'. This operation is both lock-free and atomic. 179 * 180 * Params: 181 * val = The destination variable. 182 * newval = The value to store. 183 * equalTo = The comparison value. 184 * 185 * Returns: 186 * true if the store occurred, false if not. 187 */ 188 bool atomicStoreIf( ref T val, T newval, T equalTo ) 189 { 190 return false; 191 } 192 } 193 194 195 //////////////////////////////////////////////////////////////////////////// 196 // Atomic Increment 197 //////////////////////////////////////////////////////////////////////////// 198 199 200 /** 201 * Supported msync values: 202 * msync.raw, 203 * msync.ssb, 204 * msync.acq, 205 * msync.rel, 206 * msync.seq 207 */ 208 template atomicIncrement( msync ms, T ) 209 { 210 /** 211 * This operation is only legal for built-in value and pointer types, 212 * and is equivalent to an atomic "val = val + 1" operation. This 213 * function exists to facilitate use of the optimized increment 214 * instructions provided by some architecures. If no such instruction 215 * exists on the target platform then the behavior will perform the 216 * operation using more traditional means. This operation is both 217 * lock-free and atomic. 218 * 219 * Params: 220 * val = The value to increment. 221 * 222 * Returns: 223 * The result of an atomicLoad of val immediately following the 224 * increment operation. This value is not required to be equal to the 225 * newly stored value. Thus, competing writes are allowed to occur 226 * between the increment and successive load operation. 227 */ 228 T atomicIncrement( ref T val ) 229 { 230 return val; 231 } 232 } 233 234 235 //////////////////////////////////////////////////////////////////////////// 236 // Atomic Decrement 237 //////////////////////////////////////////////////////////////////////////// 238 239 240 /** 241 * Supported msync values: 242 * msync.raw, 243 * msync.ssb, 244 * msync.acq, 245 * msync.rel, 246 * msync.seq 247 */ 248 template atomicDecrement( msync ms, T ) 249 { 250 /** 251 * This operation is only legal for built-in value and pointer types, 252 * and is equivalent to an atomic "val = val - 1" operation. This 253 * function exists to facilitate use of the optimized decrement 254 * instructions provided by some architecures. If no such instruction 255 * exists on the target platform then the behavior will perform the 256 * operation using more traditional means. This operation is both 257 * lock-free and atomic. 258 * 259 * Params: 260 * val = The value to decrement. 261 * 262 * Returns: 263 * The result of an atomicLoad of val immediately following the 264 * increment operation. This value is not required to be equal to the 265 * newly stored value. Thus, competing writes are allowed to occur 266 * between the increment and successive load operation. 267 */ 268 T atomicDecrement( ref T val ) 269 { 270 return val; 271 } 272 } 273 } 274 275 276 //////////////////////////////////////////////////////////////////////////////// 277 // LDC Atomics Implementation 278 //////////////////////////////////////////////////////////////////////////////// 279 280 281 else version( LDC ) 282 { 283 import ldc.intrinsics; 284 285 private AtomicOrdering getOrdering(msync ms) 286 { 287 if (ms == msync.acq) 288 return AtomicOrdering.Acquire; 289 else if (ms == msync.rel) 290 return AtomicOrdering.Release; 291 else if (ms == msync.seq) 292 return AtomicOrdering.SequentiallyConsistent; 293 else if (ms == msync.raw) 294 return AtomicOrdering.NotAtomic; 295 else 296 assert(0); 297 } 298 299 300 //////////////////////////////////////////////////////////////////////////// 301 // Atomic Load 302 //////////////////////////////////////////////////////////////////////////// 303 304 305 template atomicLoad( msync ms = msync.seq, T ) 306 { 307 T atomicLoad(ref T val) 308 { 309 AtomicOrdering ordering = getOrdering(ms == msync.acq ? msync.seq : ms); 310 311 static if (isPointerType!(T)) 312 { 313 return cast(T)llvm_atomic_load!(size_t)(cast(size_t*)&val, ordering); 314 } 315 else static if (is(T == bool)) 316 { 317 return cast(T)llvm_atomic_load!(ubyte)(cast(ubyte*)&val, ordering); 318 } 319 else 320 { 321 return cast(T)llvm_atomic_load!(T)(cast(T*)&val, ordering); 322 } 323 } 324 } 325 326 327 //////////////////////////////////////////////////////////////////////////// 328 // Atomic Store 329 //////////////////////////////////////////////////////////////////////////// 330 331 332 template atomicStore( msync ms = msync.seq, T ) 333 { 334 void atomicStore( ref T val, T newval ) 335 { 336 AtomicOrdering ordering = getOrdering(ms == msync.rel ? msync.seq : ms); 337 338 static if (isPointerType!(T)) 339 { 340 llvm_atomic_store!(size_t)(cast(size_t)newval, cast(size_t*)&val, ordering); 341 } 342 else static if (is(T == bool)) 343 { 344 llvm_atomic_store!(ubyte)(newval, cast(ubyte*)&val, ordering); 345 } 346 else 347 { 348 llvm_atomic_store!(T)(cast(T)newval, cast(T*)&val, ordering); 349 } 350 } 351 } 352 353 354 //////////////////////////////////////////////////////////////////////////// 355 // Atomic Store If 356 //////////////////////////////////////////////////////////////////////////// 357 358 359 template atomicStoreIf( msync ms = msync.seq, T ) 360 { 361 bool atomicStoreIf( ref T val, T newval, T equalTo ) 362 { 363 AtomicOrdering ordering = getOrdering(ms == msync.rel ? msync.seq : ms); 364 365 T oldval = void; 366 static if (isPointerType!(T)) 367 { 368 oldval = cast(T)llvm_atomic_cmp_swap!(size_t)(cast(size_t*)&val, cast(size_t)equalTo, cast(size_t)newval, ordering); 369 } 370 else static if (is(T == bool)) 371 { 372 oldval = llvm_atomic_cmp_swap!(ubyte)(cast(ubyte*)&val, equalTo?1:0, newval?1:0, ordering)?0:1; 373 } 374 else 375 { 376 oldval = llvm_atomic_cmp_swap!(T)(&val, equalTo, newval, ordering); 377 } 378 return oldval == equalTo; 379 } 380 } 381 382 383 //////////////////////////////////////////////////////////////////////////// 384 // Atomic Increment 385 //////////////////////////////////////////////////////////////////////////// 386 387 388 template atomicIncrement( msync ms = msync.seq, T ) 389 { 390 // 391 // NOTE: This operation is only valid for integer or pointer types 392 // 393 static assert( isValidNumericType!(T) ); 394 395 396 T atomicIncrement( ref T val ) 397 { 398 static if (isPointerType!(T)) 399 { 400 llvm_atomic_load_add!(size_t)(cast(size_t*)&val, 1); 401 } 402 else 403 { 404 llvm_atomic_load_add!(T)(&val, cast(T)1); 405 } 406 return val; 407 } 408 } 409 410 411 //////////////////////////////////////////////////////////////////////////// 412 // Atomic Decrement 413 //////////////////////////////////////////////////////////////////////////// 414 415 416 template atomicDecrement( msync ms = msync.seq, T ) 417 { 418 // 419 // NOTE: This operation is only valid for integer or pointer types 420 // 421 static assert( isValidNumericType!(T) ); 422 423 424 T atomicDecrement( ref T val ) 425 { 426 static if (isPointerType!(T)) 427 { 428 llvm_atomic_load_sub!(size_t)(cast(size_t*)&val, 1); 429 } 430 else 431 { 432 llvm_atomic_load_sub!(T)(&val, cast(T)1); 433 } 434 return val; 435 } 436 } 437 } 438 439 //////////////////////////////////////////////////////////////////////////////// 440 // x86 Atomic Function Implementation 441 //////////////////////////////////////////////////////////////////////////////// 442 443 444 else version( D_InlineAsm_X86 ) 445 { 446 version( X86 ) 447 { 448 version( BuildInfo ) 449 { 450 pragma( msg, "tango.core.Atomic: using IA-32 inline asm" ); 451 } 452 453 version(darwin){ 454 extern(C) bool OSAtomicCompareAndSwap64(long oldValue, long newValue, long *theValue); 455 extern(C) bool OSAtomicCompareAndSwap64Barrier(long oldValue, long newValue, long *theValue); 456 } 457 version = Has64BitCAS; 458 version = Has32BitOps; 459 } 460 version( X86_64 ) 461 { 462 version( BuildInfo ) 463 { 464 pragma( msg, "tango.core.Atomic: using AMD64 inline asm" ); 465 } 466 467 version = Has64BitOps; 468 } 469 470 private 471 { 472 //////////////////////////////////////////////////////////////////////// 473 // x86 Value Requirements 474 //////////////////////////////////////////////////////////////////////// 475 476 477 // NOTE: Strictly speaking, the x86 supports atomic operations on 478 // unaligned values. However, this is far slower than the 479 // common case, so such behavior should be prohibited. 480 template atomicValueIsProperlyAligned( T ) 481 { 482 bool atomicValueIsProperlyAligned( size_t addr ) 483 { 484 return addr % T.sizeof == 0; 485 } 486 } 487 488 489 //////////////////////////////////////////////////////////////////////// 490 // x86 Synchronization Requirements 491 //////////////////////////////////////////////////////////////////////// 492 493 494 // NOTE: While x86 loads have acquire semantics for stores, it appears 495 // that independent loads may be reordered by some processors 496 // (notably the AMD64). This implies that the hoist-load barrier 497 // op requires an ordering instruction, which also extends this 498 // requirement to acquire ops (though hoist-store should not need 499 // one if support is added for this later). However, since no 500 // modern architectures will reorder dependent loads to occur 501 // before the load they depend on (except the Alpha), raw loads 502 // are actually a possible means of ordering specific sequences 503 // of loads in some instances. The original atomic<> 504 // implementation provides a 'ddhlb' ordering specifier for 505 // data-dependent loads to handle this situation, but as there 506 // are no plans to support the Alpha there is no reason to add 507 // that option here. 508 // 509 // For reference, the old behavior (acquire semantics for loads) 510 // required a memory barrier if: ms == msync.seq || isSinkOp!(ms) 511 template needsLoadBarrier( msync ms ) 512 { 513 const bool needsLoadBarrier = ms != msync.raw; 514 } 515 516 517 // NOTE: x86 stores implicitly have release semantics so a membar is only 518 // necessary on acquires. 519 template needsStoreBarrier( msync ms ) 520 { 521 const bool needsStoreBarrier = ms == msync.seq || isHoistOp!(ms); 522 } 523 } 524 525 526 //////////////////////////////////////////////////////////////////////////// 527 // Atomic Load 528 //////////////////////////////////////////////////////////////////////////// 529 530 531 template atomicLoad( msync ms = msync.seq, T ) 532 { 533 T atomicLoad( ref T val ) 534 in 535 { 536 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) ); 537 } 538 body 539 { 540 static if( T.sizeof == byte.sizeof ) 541 { 542 //////////////////////////////////////////////////////////////// 543 // 1 Byte Load 544 //////////////////////////////////////////////////////////////// 545 546 547 static if( needsLoadBarrier!(ms) ) 548 { 549 volatile asm 550 { 551 mov DL, 42; 552 mov AL, 42; 553 mov ECX, val; 554 lock; 555 cmpxchg [ECX], DL; 556 } 557 } 558 else 559 { 560 volatile 561 { 562 return val; 563 } 564 } 565 } 566 else static if( T.sizeof == short.sizeof ) 567 { 568 //////////////////////////////////////////////////////////////// 569 // 2 Byte Load 570 //////////////////////////////////////////////////////////////// 571 572 static if( needsLoadBarrier!(ms) ) 573 { 574 volatile asm 575 { 576 mov DX, 42; 577 mov AX, 42; 578 mov ECX, val; 579 lock; 580 cmpxchg [ECX], DX; 581 } 582 } 583 else 584 { 585 volatile 586 { 587 return val; 588 } 589 } 590 } 591 else static if( T.sizeof == int.sizeof ) 592 { 593 //////////////////////////////////////////////////////////////// 594 // 4 Byte Load 595 //////////////////////////////////////////////////////////////// 596 597 598 static if( needsLoadBarrier!(ms) ) 599 { 600 volatile asm 601 { 602 mov EDX, 42; 603 mov EAX, 42; 604 mov ECX, val; 605 lock; 606 cmpxchg [ECX], EDX; 607 } 608 } 609 else 610 { 611 volatile 612 { 613 return val; 614 } 615 } 616 } 617 else static if( T.sizeof == long.sizeof ) 618 { 619 //////////////////////////////////////////////////////////////// 620 // 8 Byte Load 621 //////////////////////////////////////////////////////////////// 622 623 624 version( Has64BitOps ) 625 { 626 //////////////////////////////////////////////////////////// 627 // 8 Byte Load on 64-Bit Processor 628 //////////////////////////////////////////////////////////// 629 630 631 static if( needsLoadBarrier!(ms) ) 632 { 633 volatile asm 634 { 635 mov RAX, val; 636 lock; 637 mov RAX, [RAX]; 638 } 639 } 640 else 641 { 642 volatile 643 { 644 return val; 645 } 646 } 647 } 648 else 649 { 650 //////////////////////////////////////////////////////////// 651 // 8 Byte Load on 32-Bit Processor 652 //////////////////////////////////////////////////////////// 653 654 655 pragma( msg, "This operation is only available on 64-bit platforms." ); 656 static assert( false ); 657 } 658 } 659 else 660 { 661 //////////////////////////////////////////////////////////////// 662 // Not a 1, 2, 4, or 8 Byte Type 663 //////////////////////////////////////////////////////////////// 664 665 666 pragma( msg, "Invalid template type specified." ); 667 static assert( false ); 668 } 669 } 670 } 671 672 673 //////////////////////////////////////////////////////////////////////////// 674 // Atomic Store 675 //////////////////////////////////////////////////////////////////////////// 676 677 678 template atomicStore( msync ms = msync.seq, T ) 679 { 680 void atomicStore( ref T val, T newval ) 681 in 682 { 683 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) ); 684 } 685 body 686 { 687 static if( T.sizeof == byte.sizeof ) 688 { 689 //////////////////////////////////////////////////////////////// 690 // 1 Byte Store 691 //////////////////////////////////////////////////////////////// 692 693 694 static if( needsStoreBarrier!(ms) ) 695 { 696 volatile asm 697 { 698 mov EAX, val; 699 mov DL, newval; 700 lock; 701 xchg [EAX], DL; 702 } 703 } 704 else 705 { 706 volatile asm 707 { 708 mov EAX, val; 709 mov DL, newval; 710 mov [EAX], DL; 711 } 712 } 713 } 714 else static if( T.sizeof == short.sizeof ) 715 { 716 //////////////////////////////////////////////////////////////// 717 // 2 Byte Store 718 //////////////////////////////////////////////////////////////// 719 720 721 static if( needsStoreBarrier!(ms) ) 722 { 723 volatile asm 724 { 725 mov EAX, val; 726 mov DX, newval; 727 lock; 728 xchg [EAX], DX; 729 } 730 } 731 else 732 { 733 volatile asm 734 { 735 mov EAX, val; 736 mov DX, newval; 737 mov [EAX], DX; 738 } 739 } 740 } 741 else static if( T.sizeof == int.sizeof ) 742 { 743 //////////////////////////////////////////////////////////////// 744 // 4 Byte Store 745 //////////////////////////////////////////////////////////////// 746 747 748 static if( needsStoreBarrier!(ms) ) 749 { 750 volatile asm 751 { 752 mov EAX, val; 753 mov EDX, newval; 754 lock; 755 xchg [EAX], EDX; 756 } 757 } 758 else 759 { 760 volatile asm 761 { 762 mov EAX, val; 763 mov EDX, newval; 764 mov [EAX], EDX; 765 } 766 } 767 } 768 else static if( T.sizeof == long.sizeof ) 769 { 770 //////////////////////////////////////////////////////////////// 771 // 8 Byte Store 772 //////////////////////////////////////////////////////////////// 773 774 775 version( Has64BitOps ) 776 { 777 //////////////////////////////////////////////////////////// 778 // 8 Byte Store on 64-Bit Processor 779 //////////////////////////////////////////////////////////// 780 781 782 static if( needsStoreBarrier!(ms) ) 783 { 784 volatile asm 785 { 786 mov RAX, val; 787 mov RDX, newval; 788 lock; 789 xchg [RAX], RDX; 790 } 791 } 792 else 793 { 794 volatile asm 795 { 796 mov RAX, val; 797 mov RDX, newval; 798 mov [RAX], RDX; 799 } 800 } 801 } 802 else 803 { 804 //////////////////////////////////////////////////////////// 805 // 8 Byte Store on 32-Bit Processor 806 //////////////////////////////////////////////////////////// 807 808 809 pragma( msg, "This operation is only available on 64-bit platforms." ); 810 static assert( false ); 811 } 812 } 813 else 814 { 815 //////////////////////////////////////////////////////////////// 816 // Not a 1, 2, 4, or 8 Byte Type 817 //////////////////////////////////////////////////////////////// 818 819 820 pragma( msg, "Invalid template type specified." ); 821 static assert( false ); 822 } 823 } 824 } 825 826 827 //////////////////////////////////////////////////////////////////////////// 828 // Atomic Store If 829 //////////////////////////////////////////////////////////////////////////// 830 831 832 template atomicStoreIf( msync ms = msync.seq, T ) 833 { 834 bool atomicStoreIf( ref T val, T newval, T equalTo ) 835 in 836 { 837 // NOTE: 32 bit x86 systems support 8 byte CAS, which only requires 838 // 4 byte alignment, so use size_t as the align type here. 839 static if( T.sizeof > size_t.sizeof ) 840 assert( atomicValueIsProperlyAligned!(size_t)( cast(size_t) &val ) ); 841 else 842 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) ); 843 } 844 body 845 { 846 static if( T.sizeof == byte.sizeof ) 847 { 848 //////////////////////////////////////////////////////////////// 849 // 1 Byte StoreIf 850 //////////////////////////////////////////////////////////////// 851 852 853 volatile asm 854 { 855 mov DL, newval; 856 mov AL, equalTo; 857 mov ECX, val; 858 lock; // lock always needed to make this op atomic 859 cmpxchg [ECX], DL; 860 setz AL; 861 } 862 } 863 else static if( T.sizeof == short.sizeof ) 864 { 865 //////////////////////////////////////////////////////////////// 866 // 2 Byte StoreIf 867 //////////////////////////////////////////////////////////////// 868 869 870 volatile asm 871 { 872 mov DX, newval; 873 mov AX, equalTo; 874 mov ECX, val; 875 lock; // lock always needed to make this op atomic 876 cmpxchg [ECX], DX; 877 setz AL; 878 } 879 } 880 else static if( T.sizeof == int.sizeof ) 881 { 882 //////////////////////////////////////////////////////////////// 883 // 4 Byte StoreIf 884 //////////////////////////////////////////////////////////////// 885 886 887 volatile asm 888 { 889 mov EDX, newval; 890 mov EAX, equalTo; 891 mov ECX, val; 892 lock; // lock always needed to make this op atomic 893 cmpxchg [ECX], EDX; 894 setz AL; 895 } 896 } 897 else static if( T.sizeof == long.sizeof ) 898 { 899 //////////////////////////////////////////////////////////////// 900 // 8 Byte StoreIf 901 //////////////////////////////////////////////////////////////// 902 903 904 version( Has64BitOps ) 905 { 906 //////////////////////////////////////////////////////////// 907 // 8 Byte StoreIf on 64-Bit Processor 908 //////////////////////////////////////////////////////////// 909 910 911 volatile asm 912 { 913 mov RDX, newval; 914 mov RAX, equalTo; 915 mov RCX, val; 916 lock; // lock always needed to make this op atomic 917 cmpxchg [RCX], RDX; 918 setz AL; 919 } 920 } 921 else version( Has64BitCAS ) 922 { 923 //////////////////////////////////////////////////////////// 924 // 8 Byte StoreIf on 32-Bit Processor 925 //////////////////////////////////////////////////////////// 926 version(darwin){ 927 static if(ms==msync.raw){ 928 return OSAtomicCompareAndSwap64(cast(long)equalTo, cast(long)newval, cast(long*)&val); 929 } else { 930 return OSAtomicCompareAndSwap64Barrier(cast(long)equalTo, cast(long)newval, cast(long*)&val); 931 } 932 } else { 933 volatile asm 934 { 935 push EDI; 936 push EBX; 937 lea EDI, newval; 938 mov EBX, [EDI]; 939 mov ECX, 4[EDI]; 940 lea EDI, equalTo; 941 mov EAX, [EDI]; 942 mov EDX, 4[EDI]; 943 mov EDI, val; 944 lock; // lock always needed to make this op atomic 945 cmpxchg8b [EDI]; 946 setz AL; 947 pop EBX; 948 pop EDI; 949 } 950 } 951 } 952 } 953 else 954 { 955 //////////////////////////////////////////////////////////////// 956 // Not a 1, 2, 4, or 8 Byte Type 957 //////////////////////////////////////////////////////////////// 958 959 960 pragma( msg, "Invalid template type specified." ); 961 static assert( false ); 962 } 963 } 964 } 965 966 967 //////////////////////////////////////////////////////////////////////////// 968 // Atomic Increment 969 //////////////////////////////////////////////////////////////////////////// 970 971 972 template atomicIncrement( msync ms = msync.seq, T ) 973 { 974 // 975 // NOTE: This operation is only valid for integer or pointer types 976 // 977 static assert( isValidNumericType!(T) ); 978 979 980 T atomicIncrement( ref T val ) 981 in 982 { 983 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) ); 984 } 985 body 986 { 987 static if( T.sizeof == byte.sizeof ) 988 { 989 //////////////////////////////////////////////////////////////// 990 // 1 Byte Increment 991 //////////////////////////////////////////////////////////////// 992 993 994 volatile asm 995 { 996 mov EAX, val; 997 lock; // lock always needed to make this op atomic 998 inc [EAX]; 999 mov AL, [EAX]; 1000 } 1001 } 1002 else static if( T.sizeof == short.sizeof ) 1003 { 1004 //////////////////////////////////////////////////////////////// 1005 // 2 Byte Increment 1006 //////////////////////////////////////////////////////////////// 1007 1008 1009 volatile asm 1010 { 1011 mov EAX, val; 1012 lock; // lock always needed to make this op atomic 1013 inc short ptr [EAX]; 1014 mov AX, [EAX]; 1015 } 1016 } 1017 else static if( T.sizeof == int.sizeof ) 1018 { 1019 //////////////////////////////////////////////////////////////// 1020 // 4 Byte Increment 1021 //////////////////////////////////////////////////////////////// 1022 1023 1024 volatile asm 1025 { 1026 mov EAX, val; 1027 lock; // lock always needed to make this op atomic 1028 inc int ptr [EAX]; 1029 mov EAX, [EAX]; 1030 } 1031 } 1032 else static if( T.sizeof == long.sizeof ) 1033 { 1034 //////////////////////////////////////////////////////////////// 1035 // 8 Byte Increment 1036 //////////////////////////////////////////////////////////////// 1037 1038 1039 version( Has64BitOps ) 1040 { 1041 //////////////////////////////////////////////////////////// 1042 // 8 Byte Increment on 64-Bit Processor 1043 //////////////////////////////////////////////////////////// 1044 1045 1046 volatile asm 1047 { 1048 mov RAX, val; 1049 lock; // lock always needed to make this op atomic 1050 inc qword ptr [RAX]; 1051 mov RAX, [RAX]; 1052 } 1053 } 1054 else 1055 { 1056 //////////////////////////////////////////////////////////// 1057 // 8 Byte Increment on 32-Bit Processor 1058 //////////////////////////////////////////////////////////// 1059 1060 1061 pragma( msg, "This operation is only available on 64-bit platforms." ); 1062 static assert( false ); 1063 } 1064 } 1065 else 1066 { 1067 //////////////////////////////////////////////////////////////// 1068 // Not a 1, 2, 4, or 8 Byte Type 1069 //////////////////////////////////////////////////////////////// 1070 1071 1072 pragma( msg, "Invalid template type specified." ); 1073 static assert( false ); 1074 } 1075 } 1076 } 1077 1078 1079 //////////////////////////////////////////////////////////////////////////// 1080 // Atomic Decrement 1081 //////////////////////////////////////////////////////////////////////////// 1082 1083 1084 template atomicDecrement( msync ms = msync.seq, T ) 1085 { 1086 // 1087 // NOTE: This operation is only valid for integer or pointer types 1088 // 1089 static assert( isValidNumericType!(T) ); 1090 1091 1092 T atomicDecrement( ref T val ) 1093 in 1094 { 1095 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) ); 1096 } 1097 body 1098 { 1099 static if( T.sizeof == byte.sizeof ) 1100 { 1101 //////////////////////////////////////////////////////////////// 1102 // 1 Byte Decrement 1103 //////////////////////////////////////////////////////////////// 1104 1105 1106 volatile asm 1107 { 1108 mov EAX, val; 1109 lock; // lock always needed to make this op atomic 1110 dec [EAX]; 1111 mov AL, [EAX]; 1112 } 1113 } 1114 else static if( T.sizeof == short.sizeof ) 1115 { 1116 //////////////////////////////////////////////////////////////// 1117 // 2 Byte Decrement 1118 //////////////////////////////////////////////////////////////// 1119 1120 1121 volatile asm 1122 { 1123 mov EAX, val; 1124 lock; // lock always needed to make this op atomic 1125 dec short ptr [EAX]; 1126 mov AX, [EAX]; 1127 } 1128 } 1129 else static if( T.sizeof == int.sizeof ) 1130 { 1131 //////////////////////////////////////////////////////////////// 1132 // 4 Byte Decrement 1133 //////////////////////////////////////////////////////////////// 1134 1135 1136 volatile asm 1137 { 1138 mov EAX, val; 1139 lock; // lock always needed to make this op atomic 1140 dec int ptr [EAX]; 1141 mov EAX, [EAX]; 1142 } 1143 } 1144 else static if( T.sizeof == long.sizeof ) 1145 { 1146 //////////////////////////////////////////////////////////////// 1147 // 8 Byte Decrement 1148 //////////////////////////////////////////////////////////////// 1149 1150 1151 version( Has64BitOps ) 1152 { 1153 //////////////////////////////////////////////////////////// 1154 // 8 Byte Decrement on 64-Bit Processor 1155 //////////////////////////////////////////////////////////// 1156 1157 1158 volatile asm 1159 { 1160 mov RAX, val; 1161 lock; // lock always needed to make this op atomic 1162 dec qword ptr [RAX]; 1163 mov RAX, [RAX]; 1164 } 1165 } 1166 else 1167 { 1168 //////////////////////////////////////////////////////////// 1169 // 8 Byte Decrement on 32-Bit Processor 1170 //////////////////////////////////////////////////////////// 1171 1172 1173 pragma( msg, "This operation is only available on 64-bit platforms." ); 1174 static assert( false ); 1175 } 1176 } 1177 else 1178 { 1179 //////////////////////////////////////////////////////////////// 1180 // Not a 1, 2, 4, or 8 Byte Type 1181 //////////////////////////////////////////////////////////////// 1182 1183 1184 pragma( msg, "Invalid template type specified." ); 1185 static assert( false ); 1186 } 1187 } 1188 } 1189 } 1190 else 1191 { 1192 version( BuildInfo ) 1193 { 1194 pragma( msg, "tango.core.Atomic: using synchronized ops" ); 1195 } 1196 1197 private 1198 { 1199 //////////////////////////////////////////////////////////////////////// 1200 // Default Value Requirements 1201 //////////////////////////////////////////////////////////////////////// 1202 1203 1204 template atomicValueIsProperlyAligned( T ) 1205 { 1206 bool atomicValueIsProperlyAligned( size_t addr ) 1207 { 1208 return addr % T.sizeof == 0; 1209 } 1210 } 1211 1212 1213 //////////////////////////////////////////////////////////////////////// 1214 // Default Synchronization Requirements 1215 //////////////////////////////////////////////////////////////////////// 1216 1217 1218 template needsLoadBarrier( msync ms ) 1219 { 1220 const bool needsLoadBarrier = ms != msync.raw; 1221 } 1222 1223 1224 template needsStoreBarrier( msync ms ) 1225 { 1226 const bool needsStoreBarrier = ms != msync.raw; 1227 } 1228 } 1229 1230 1231 //////////////////////////////////////////////////////////////////////////// 1232 // Atomic Load 1233 //////////////////////////////////////////////////////////////////////////// 1234 1235 1236 template atomicLoad( msync ms = msync.seq, T ) 1237 { 1238 T atomicLoad( ref T val ) 1239 in 1240 { 1241 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) ); 1242 } 1243 body 1244 { 1245 static if( T.sizeof <= (void*).sizeof ) 1246 { 1247 //////////////////////////////////////////////////////////////// 1248 // <= (void*).sizeof Byte Load 1249 //////////////////////////////////////////////////////////////// 1250 1251 1252 static if( needsLoadBarrier!(ms) ) 1253 { 1254 synchronized 1255 { 1256 return val; 1257 } 1258 } 1259 else 1260 { 1261 volatile 1262 { 1263 return val; 1264 } 1265 } 1266 } 1267 else 1268 { 1269 //////////////////////////////////////////////////////////////// 1270 // > (void*).sizeof Byte Type 1271 //////////////////////////////////////////////////////////////// 1272 1273 1274 pragma( msg, "Invalid template type specified." ); 1275 static assert( false ); 1276 } 1277 } 1278 } 1279 1280 1281 //////////////////////////////////////////////////////////////////////////// 1282 // Atomic Store 1283 //////////////////////////////////////////////////////////////////////////// 1284 1285 1286 template atomicStore( msync ms = msync.seq, T ) 1287 { 1288 void atomicStore( ref T val, T newval ) 1289 in 1290 { 1291 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) ); 1292 } 1293 body 1294 { 1295 static if( T.sizeof <= (void*).sizeof ) 1296 { 1297 //////////////////////////////////////////////////////////////// 1298 // <= (void*).sizeof Byte Store 1299 //////////////////////////////////////////////////////////////// 1300 1301 1302 static if( needsStoreBarrier!(ms) ) 1303 { 1304 synchronized 1305 { 1306 val = newval; 1307 } 1308 } 1309 else 1310 { 1311 volatile 1312 { 1313 val = newval; 1314 } 1315 } 1316 } 1317 else 1318 { 1319 //////////////////////////////////////////////////////////////// 1320 // > (void*).sizeof Byte Type 1321 //////////////////////////////////////////////////////////////// 1322 1323 1324 pragma( msg, "Invalid template type specified." ); 1325 static assert( false ); 1326 } 1327 } 1328 } 1329 1330 1331 //////////////////////////////////////////////////////////////////////////// 1332 // Atomic Store If 1333 //////////////////////////////////////////////////////////////////////////// 1334 1335 1336 template atomicStoreIf( msync ms = msync.seq, T ) 1337 { 1338 bool atomicStoreIf( ref T val, T newval, T equalTo ) 1339 in 1340 { 1341 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) ); 1342 } 1343 body 1344 { 1345 static if( T.sizeof <= (void*).sizeof ) 1346 { 1347 //////////////////////////////////////////////////////////////// 1348 // <= (void*).sizeof Byte StoreIf 1349 //////////////////////////////////////////////////////////////// 1350 1351 1352 synchronized 1353 { 1354 if( val == equalTo ) 1355 { 1356 val = newval; 1357 return true; 1358 } 1359 return false; 1360 } 1361 } 1362 else 1363 { 1364 //////////////////////////////////////////////////////////////// 1365 // > (void*).sizeof Byte Type 1366 //////////////////////////////////////////////////////////////// 1367 1368 1369 pragma( msg, "Invalid template type specified." ); 1370 static assert( false ); 1371 } 1372 } 1373 } 1374 1375 1376 ///////////////////////////////////////////////////////////////////////////// 1377 // Atomic Increment 1378 //////////////////////////////////////////////////////////////////////////// 1379 1380 1381 template atomicIncrement( msync ms = msync.seq, T ) 1382 { 1383 // 1384 // NOTE: This operation is only valid for integer or pointer types 1385 // 1386 static assert( isValidNumericType!(T) ); 1387 1388 1389 T atomicIncrement( ref T val ) 1390 in 1391 { 1392 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) ); 1393 } 1394 body 1395 { 1396 static if( T.sizeof <= (void*).sizeof ) 1397 { 1398 //////////////////////////////////////////////////////////////// 1399 // <= (void*).sizeof Byte Increment 1400 //////////////////////////////////////////////////////////////// 1401 1402 1403 synchronized 1404 { 1405 return ++val; 1406 } 1407 } 1408 else 1409 { 1410 //////////////////////////////////////////////////////////////// 1411 // > (void*).sizeof Byte Type 1412 //////////////////////////////////////////////////////////////// 1413 1414 1415 pragma( msg, "Invalid template type specified." ); 1416 static assert( false ); 1417 } 1418 } 1419 } 1420 1421 1422 //////////////////////////////////////////////////////////////////////////// 1423 // Atomic Decrement 1424 //////////////////////////////////////////////////////////////////////////// 1425 1426 1427 template atomicDecrement( msync ms = msync.seq, T ) 1428 { 1429 // 1430 // NOTE: This operation is only valid for integer or pointer types 1431 // 1432 static assert( isValidNumericType!(T) ); 1433 1434 1435 T atomicDecrement( ref T val ) 1436 in 1437 { 1438 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) ); 1439 } 1440 body 1441 { 1442 static if( T.sizeof <= (void*).sizeof ) 1443 { 1444 //////////////////////////////////////////////////////////////// 1445 // <= (void*).sizeof Byte Decrement 1446 //////////////////////////////////////////////////////////////// 1447 1448 1449 synchronized 1450 { 1451 return --val; 1452 } 1453 } 1454 else 1455 { 1456 //////////////////////////////////////////////////////////////// 1457 // > (void*).sizeof Byte Type 1458 //////////////////////////////////////////////////////////////// 1459 1460 1461 pragma( msg, "Invalid template type specified." ); 1462 static assert( false ); 1463 } 1464 } 1465 } 1466 } 1467 1468 1469 //////////////////////////////////////////////////////////////////////////////// 1470 // Atomic 1471 //////////////////////////////////////////////////////////////////////////////// 1472 1473 1474 /** 1475 * This struct represents a value which will be subject to competing access. 1476 * All accesses to this value will be synchronized with main memory, and 1477 * various memory barriers may be employed for instruction ordering. Any 1478 * primitive type of size equal to or smaller than the memory bus size is 1479 * allowed, so 32-bit machines may use values with size <= int.sizeof and 1480 * 64-bit machines may use values with size <= long.sizeof. The one exception 1481 * to this rule is that architectures that support DCAS will allow double-wide 1482 * storeIf operations. The 32-bit x86 architecture, for example, supports 1483 * 64-bit storeIf operations. 1484 */ 1485 struct Atomic( T ) 1486 { 1487 //////////////////////////////////////////////////////////////////////////// 1488 // Atomic Load 1489 //////////////////////////////////////////////////////////////////////////// 1490 1491 1492 template load( msync ms = msync.seq ) 1493 { 1494 static assert( ms == msync.raw || ms == msync.hlb || 1495 ms == msync.acq || ms == msync.seq, 1496 "ms must be one of: msync.raw, msync.hlb, msync.acq, msync.seq" ); 1497 1498 /** 1499 * Refreshes the contents of this value from main memory. This 1500 * operation is both lock-free and atomic. 1501 * 1502 * Returns: 1503 * The loaded value. 1504 */ 1505 T load() 1506 { 1507 return atomicLoad!(ms,T)( m_val ); 1508 } 1509 } 1510 1511 1512 //////////////////////////////////////////////////////////////////////////// 1513 // Atomic Store 1514 //////////////////////////////////////////////////////////////////////////// 1515 1516 1517 template store( msync ms = msync.seq ) 1518 { 1519 static assert( ms == msync.raw || ms == msync.ssb || 1520 ms == msync.acq || ms == msync.rel || 1521 ms == msync.seq, 1522 "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" ); 1523 1524 /** 1525 * Stores 'newval' to the memory referenced by this value. This 1526 * operation is both lock-free and atomic. 1527 * 1528 * Params: 1529 * newval = The value to store. 1530 */ 1531 void store( T newval ) 1532 { 1533 atomicStore!(ms,T)( m_val, newval ); 1534 } 1535 } 1536 1537 1538 //////////////////////////////////////////////////////////////////////////// 1539 // Atomic StoreIf 1540 //////////////////////////////////////////////////////////////////////////// 1541 1542 1543 template storeIf( msync ms = msync.seq ) 1544 { 1545 static assert( ms == msync.raw || ms == msync.ssb || 1546 ms == msync.acq || ms == msync.rel || 1547 ms == msync.seq, 1548 "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" ); 1549 1550 /** 1551 * Stores 'newval' to the memory referenced by this value if val is 1552 * equal to 'equalTo'. This operation is both lock-free and atomic. 1553 * 1554 * Params: 1555 * newval = The value to store. 1556 * equalTo = The comparison value. 1557 * 1558 * Returns: 1559 * true if the store occurred, false if not. 1560 */ 1561 bool storeIf( T newval, T equalTo ) 1562 { 1563 return atomicStoreIf!(ms,T)( m_val, newval, equalTo ); 1564 } 1565 } 1566 1567 1568 //////////////////////////////////////////////////////////////////////////// 1569 // Numeric Functions 1570 //////////////////////////////////////////////////////////////////////////// 1571 1572 version( TangoDoc ) 1573 { 1574 /** 1575 * The following additional functions are available for integer types. 1576 */ 1577 //////////////////////////////////////////////////////////////////////// 1578 // Atomic Increment 1579 //////////////////////////////////////////////////////////////////////// 1580 1581 1582 template increment( msync ms = msync.seq ) 1583 { 1584 /** 1585 * This operation is only legal for built-in value and pointer 1586 * types, and is equivalent to an atomic "val = val + 1" operation. 1587 * This function exists to facilitate use of the optimized 1588 * increment instructions provided by some architecures. If no 1589 * such instruction exists on the target platform then the 1590 * behavior will perform the operation using more traditional 1591 * means. This operation is both lock-free and atomic. 1592 * 1593 * Returns: 1594 * The result of an atomicLoad of val immediately following the 1595 * increment operation. This value is not required to be equal to 1596 * the newly stored value. Thus, competing writes are allowed to 1597 * occur between the increment and successive load operation. 1598 */ 1599 T increment() 1600 { 1601 return m_val; 1602 } 1603 } 1604 1605 1606 //////////////////////////////////////////////////////////////////////// 1607 // Atomic Decrement 1608 //////////////////////////////////////////////////////////////////////// 1609 1610 1611 template decrement( msync ms = msync.seq ) 1612 { 1613 /** 1614 * This operation is only legal for built-in value and pointer 1615 * types, and is equivalent to an atomic "val = val - 1" operation. 1616 * This function exists to facilitate use of the optimized 1617 * decrement instructions provided by some architecures. If no 1618 * such instruction exists on the target platform then the behavior 1619 * will perform the operation using more traditional means. This 1620 * operation is both lock-free and atomic. 1621 * 1622 * Returns: 1623 * The result of an atomicLoad of val immediately following the 1624 * increment operation. This value is not required to be equal to 1625 * the newly stored value. Thus, competing writes are allowed to 1626 * occur between the increment and successive load operation. 1627 */ 1628 T decrement() 1629 { 1630 return m_val; 1631 } 1632 } 1633 } 1634 else 1635 { 1636 static if( isValidNumericType!(T) ) 1637 { 1638 //////////////////////////////////////////////////////////////////////// 1639 // Atomic Increment 1640 //////////////////////////////////////////////////////////////////////// 1641 1642 1643 template increment( msync ms = msync.seq ) 1644 { 1645 static assert( ms == msync.raw || ms == msync.ssb || 1646 ms == msync.acq || ms == msync.rel || 1647 ms == msync.seq, 1648 "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" ); 1649 T increment() 1650 { 1651 return atomicIncrement!(ms,T)( m_val ); 1652 } 1653 } 1654 1655 1656 //////////////////////////////////////////////////////////////////////// 1657 // Atomic Decrement 1658 //////////////////////////////////////////////////////////////////////// 1659 1660 1661 template decrement( msync ms = msync.seq ) 1662 { 1663 static assert( ms == msync.raw || ms == msync.ssb || 1664 ms == msync.acq || ms == msync.rel || 1665 ms == msync.seq, 1666 "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" ); 1667 T decrement() 1668 { 1669 return atomicDecrement!(ms,T)( m_val ); 1670 } 1671 } 1672 } 1673 } 1674 1675 private: 1676 T m_val; 1677 } 1678 1679 1680 //////////////////////////////////////////////////////////////////////////////// 1681 // Support Code for Unit Tests 1682 //////////////////////////////////////////////////////////////////////////////// 1683 1684 1685 private 1686 { 1687 version( TangoDoc ) {} else 1688 { 1689 template testLoad( msync ms, T ) 1690 { 1691 void testLoad( T val = T.init + 1) 1692 { 1693 T base; 1694 Atomic!(T) atom; 1695 1696 assert( atom.load!(ms)() == base ); 1697 base = val; 1698 atom.m_val = val; 1699 assert( atom.load!(ms)() == base ); 1700 } 1701 } 1702 1703 1704 template testStore( msync ms, T ) 1705 { 1706 void testStore( T val = T.init + 1) 1707 { 1708 T base; 1709 Atomic!(T) atom; 1710 1711 assert( atom.m_val == base ); 1712 base = val; 1713 atom.store!(ms)( base ); 1714 assert( atom.m_val == base ); 1715 } 1716 } 1717 1718 1719 template testStoreIf( msync ms, T ) 1720 { 1721 void testStoreIf( T val = T.init + 1) 1722 { 1723 T base; 1724 Atomic!(T) atom; 1725 1726 assert( atom.m_val == base ); 1727 base = val; 1728 atom.storeIf!(ms)( base, val ); 1729 assert( atom.m_val != base ); 1730 atom.storeIf!(ms)( base, T.init ); 1731 assert( atom.m_val == base ); 1732 } 1733 } 1734 1735 1736 template testIncrement( msync ms, T ) 1737 { 1738 void testIncrement( T val = T.init + 1) 1739 { 1740 T base = val; 1741 T incr = val; 1742 Atomic!(T) atom; 1743 1744 atom.m_val = val; 1745 assert( atom.m_val == base && incr == base ); 1746 base = cast(T)( base + 1 ); 1747 incr = atom.increment!(ms)(); 1748 assert( atom.m_val == base && incr == base ); 1749 } 1750 } 1751 1752 1753 template testDecrement( msync ms, T ) 1754 { 1755 void testDecrement( T val = T.init + 1) 1756 { 1757 T base = val; 1758 T decr = val; 1759 Atomic!(T) atom; 1760 1761 atom.m_val = val; 1762 assert( atom.m_val == base && decr == base ); 1763 base = cast(T)( base - 1 ); 1764 decr = atom.decrement!(ms)(); 1765 assert( atom.m_val == base && decr == base ); 1766 } 1767 } 1768 1769 1770 template testType( T ) 1771 { 1772 void testType( T val = T.init + 1) 1773 { 1774 testLoad!(msync.raw, T)( val ); 1775 testLoad!(msync.hlb, T)( val ); 1776 testLoad!(msync.acq, T)( val ); 1777 testLoad!(msync.seq, T)( val ); 1778 1779 testStore!(msync.raw, T)( val ); 1780 testStore!(msync.ssb, T)( val ); 1781 testStore!(msync.acq, T)( val ); 1782 testStore!(msync.rel, T)( val ); 1783 testStore!(msync.seq, T)( val ); 1784 1785 testStoreIf!(msync.raw, T)( val ); 1786 testStoreIf!(msync.ssb, T)( val ); 1787 testStoreIf!(msync.acq, T)( val ); 1788 testStoreIf!(msync.rel, T)( val ); 1789 testStoreIf!(msync.seq, T)( val ); 1790 1791 static if( isValidNumericType!(T) ) 1792 { 1793 testIncrement!(msync.raw, T)( val ); 1794 testIncrement!(msync.ssb, T)( val ); 1795 testIncrement!(msync.acq, T)( val ); 1796 testIncrement!(msync.rel, T)( val ); 1797 testIncrement!(msync.seq, T)( val ); 1798 1799 testDecrement!(msync.raw, T)( val ); 1800 testDecrement!(msync.ssb, T)( val ); 1801 testDecrement!(msync.acq, T)( val ); 1802 testDecrement!(msync.rel, T)( val ); 1803 testDecrement!(msync.seq, T)( val ); 1804 } 1805 } 1806 } 1807 } 1808 } 1809 1810 1811 //////////////////////////////////////////////////////////////////////////////// 1812 // Unit Tests 1813 //////////////////////////////////////////////////////////////////////////////// 1814 1815 1816 debug( UnitTest ) 1817 { 1818 unittest 1819 { 1820 testType!(bool)(); 1821 1822 testType!(byte)(); 1823 testType!(ubyte)(); 1824 1825 testType!(short)(); 1826 testType!(ushort)(); 1827 1828 testType!(int)(); 1829 testType!(uint)(); 1830 1831 version( Has64BitOps ) 1832 { 1833 testType!(long)(); 1834 testType!(ulong)(); 1835 } 1836 else version( Has64BitCAS ) 1837 { 1838 testStoreIf!(msync.raw, long)(); 1839 testStoreIf!(msync.ssb, long)(); 1840 testStoreIf!(msync.acq, long)(); 1841 testStoreIf!(msync.rel, long)(); 1842 testStoreIf!(msync.seq, long)(); 1843 1844 testStoreIf!(msync.raw, ulong)(); 1845 testStoreIf!(msync.ssb, ulong)(); 1846 testStoreIf!(msync.acq, ulong)(); 1847 testStoreIf!(msync.rel, ulong)(); 1848 testStoreIf!(msync.seq, ulong)(); 1849 } 1850 } 1851 } 1852 1853 1854 //////////////////////////////////////////////////////////////////////////////// 1855 // Unit Tests 1856 //////////////////////////////////////////////////////////////////////////////// 1857 1858 1859 debug(Atomic) 1860 { 1861 void main() 1862 { 1863 Atomic!(int) i; 1864 1865 i.store (1); 1866 i.increment; 1867 i.decrement; 1868 auto x = i.load; 1869 i.store (2); 1870 1871 x = atomicLoad (x); 1872 } 1873 } 1874