Print this page
5042 stop using deprecated atomic functions

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/vm/hat_i86.c
          +++ new/usr/src/uts/i86pc/vm/hat_i86.c
↓ open down ↓ 936 lines elided ↑ open up ↑
 937  937  #if defined(__i386)
 938  938          size = (uintptr_t)ekernelheap - segmap_start;
 939  939  #elif defined(__amd64)
 940  940          size = segmapsize;
 941  941  #endif
 942  942          hat_kmap_init((uintptr_t)segmap_start, size);
 943  943  }
 944  944  
 945  945  /*
 946  946   * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
 947      - * are 32 bit, so for safety we must use cas64() to install these.
      947 + * are 32 bit, so for safety we must use atomic_cas_64() to install these.
 948  948   */
 949  949  #ifdef __i386
 950  950  static void
 951  951  reload_pae32(hat_t *hat, cpu_t *cpu)
 952  952  {
 953  953          x86pte_t *src;
 954  954          x86pte_t *dest;
 955  955          x86pte_t pte;
 956  956          int i;
 957  957  
↓ open down ↓ 2 lines elided ↑ open up ↑
 960  960           * cpu's range of the vlp_page and point cr3 at them.
 961  961           */
 962  962          ASSERT(mmu.pae_hat);
 963  963          src = hat->hat_vlp_ptes;
 964  964          dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
 965  965          for (i = 0; i < VLP_NUM_PTES; ++i) {
 966  966                  for (;;) {
 967  967                          pte = dest[i];
 968  968                          if (pte == src[i])
 969  969                                  break;
 970      -                        if (cas64(dest + i, pte, src[i]) != src[i])
      970 +                        if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
 971  971                                  break;
 972  972                  }
 973  973          }
 974  974  }
 975  975  #endif
 976  976  
 977  977  /*
 978  978   * Switch to a new active hat, maintaining bit masks to track active CPUs.
 979  979   *
 980  980   * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
↓ open down ↓ 1000 lines elided ↑ open up ↑
1981 1981                  if (mmu.max_level == 2)
1982 1982                          reload_cr3();
1983 1983          } else {
1984 1984                  reload_cr3();
1985 1985          }
1986 1986  }
1987 1987  
1988 1988  #define TLB_CPU_HALTED  (01ul)
1989 1989  #define TLB_INVAL_ALL   (02ul)
1990 1990  #define CAS_TLB_INFO(cpu, old, new)     \
1991      -        caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
     1991 +        atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1992 1992  
1993 1993  /*
1994 1994   * Record that a CPU is going idle
1995 1995   */
1996 1996  void
1997 1997  tlb_going_idle(void)
1998 1998  {
1999      -        atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
     1999 +        atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
2000 2000  }
2001 2001  
2002 2002  /*
2003 2003   * Service a delayed TLB flush if coming out of being idle.
2004 2004   * It will be called from cpu idle notification with interrupt disabled.
2005 2005   */
2006 2006  void
2007 2007  tlb_service(void)
2008 2008  {
2009 2009          ulong_t tlb_info;
↓ open down ↓ 2454 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX