1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 #include <sys/systm.h>
  26 #include <sys/membar.h>
  27 #include <sys/machsystm.h>
  28 #include <sys/x_call.h>
  29 #include <sys/platform_module.h>
  30 #include <sys/cpuvar.h>
  31 #include <sys/cpu_module.h>
  32 #include <sys/cmp.h>
  33 #include <sys/dumphdr.h>
  34 
  35 #include <sys/cpu_sgnblk_defs.h>
  36 
  37 static cpuset_t cpu_idle_set;
  38 static kmutex_t cpu_idle_lock;
  39 typedef const char *fn_t;
  40 
  41 /*
  42  * flags to determine if the PROM routines
  43  * should be used to idle/resume/stop cpus
  44  */
  45 static int kern_idle[NCPU];             /* kernel's idle loop */
  46 static int cpu_are_paused;
  47 extern void debug_flush_windows();
  48 
  49 /*
  50  * Initialize the idlestop mutex
  51  */
  52 void
  53 idlestop_init(void)
  54 {
  55         mutex_init(&cpu_idle_lock, NULL, MUTEX_SPIN, (void *)ipltospl(PIL_15));
  56 }
  57 
  58 static void
  59 cpu_idle_self(void)
  60 {
  61         uint_t s;
  62         label_t save;
  63 
  64         s = spl8();
  65         debug_flush_windows();
  66 
  67         CPU->cpu_m.in_prom = 1;
  68         membar_stld();
  69 
  70         save = curthread->t_pcb;
  71         (void) setjmp(&curthread->t_pcb);
  72 
  73         kern_idle[CPU->cpu_id] = 1;
  74         while (kern_idle[CPU->cpu_id])
  75                 dumpsys_helper_nw();
  76 
  77         CPU->cpu_m.in_prom = 0;
  78         membar_stld();
  79 
  80         curthread->t_pcb = save;
  81         splx(s);
  82 }
  83 
  84 void
  85 idle_other_cpus(void)
  86 {
  87         int i, cpuid, ntries;
  88         int failed = 0;
  89 
  90         if (ncpus == 1)
  91                 return;
  92 
  93         mutex_enter(&cpu_idle_lock);
  94 
  95         cpuid = CPU->cpu_id;
  96         ASSERT(cpuid < NCPU);
  97 
  98         cpu_idle_set = cpu_ready_set;
  99         CPUSET_DEL(cpu_idle_set, cpuid);
 100 
 101         if (CPUSET_ISNULL(cpu_idle_set))
 102                 return;
 103 
 104         xt_some(cpu_idle_set, (xcfunc_t *)idle_stop_xcall,
 105             (uint64_t)cpu_idle_self, NULL);
 106 
 107         for (i = 0; i < NCPU; i++) {
 108                 if (!CPU_IN_SET(cpu_idle_set, i))
 109                         continue;
 110 
 111                 ntries = 0x10000;
 112                 while (!cpu[i]->cpu_m.in_prom && ntries) {
 113                         DELAY(50);
 114                         ntries--;
 115                 }
 116 
 117                 /*
 118                  * A cpu failing to idle is an error condition, since
 119                  * we can't be sure anymore of its state.
 120                  */
 121                 if (!cpu[i]->cpu_m.in_prom) {
 122                         cmn_err(CE_WARN, "cpuid 0x%x failed to idle", i);
 123                         failed++;
 124                 }
 125         }
 126 
 127         if (failed) {
 128                 mutex_exit(&cpu_idle_lock);
 129                 cmn_err(CE_PANIC, "idle_other_cpus: not all cpus idled");
 130         }
 131 }
 132 
 133 void
 134 resume_other_cpus(void)
 135 {
 136         int i, ntries;
 137         int cpuid = CPU->cpu_id;
 138         boolean_t failed = B_FALSE;
 139 
 140         if (ncpus == 1)
 141                 return;
 142 
 143         ASSERT(cpuid < NCPU);
 144         ASSERT(MUTEX_HELD(&cpu_idle_lock));
 145 
 146         for (i = 0; i < NCPU; i++) {
 147                 if (!CPU_IN_SET(cpu_idle_set, i))
 148                         continue;
 149 
 150                 kern_idle[i] = 0;
 151                 membar_stld();
 152         }
 153 
 154         for (i = 0; i < NCPU; i++) {
 155                 if (!CPU_IN_SET(cpu_idle_set, i))
 156                         continue;
 157 
 158                 ntries = 0x10000;
 159                 while (cpu[i]->cpu_m.in_prom && ntries) {
 160                         DELAY(50);
 161                         ntries--;
 162                 }
 163 
 164                 /*
 165                  * A cpu failing to resume is an error condition, since
 166                  * intrs may have been directed there.
 167                  */
 168                 if (cpu[i]->cpu_m.in_prom) {
 169                         cmn_err(CE_WARN, "cpuid 0x%x failed to resume", i);
 170                         continue;
 171                 }
 172                 CPUSET_DEL(cpu_idle_set, i);
 173         }
 174 
 175         failed = !CPUSET_ISNULL(cpu_idle_set);
 176 
 177         mutex_exit(&cpu_idle_lock);
 178 
 179         /*
 180          * Non-zero if a cpu failed to resume
 181          */
 182         if (failed)
 183                 cmn_err(CE_PANIC, "resume_other_cpus: not all cpus resumed");
 184 
 185 }
 186 
 187 /*
 188  * Stop all other cpu's before halting or rebooting. We pause the cpu's
 189  * instead of sending a cross call.
 190  */
 191 void
 192 stop_other_cpus(void)
 193 {
 194         mutex_enter(&cpu_lock);
 195         if (cpu_are_paused) {
 196                 mutex_exit(&cpu_lock);
 197                 return;
 198         }
 199 
 200         if (ncpus > 1)
 201                 intr_redist_all_cpus_shutdown();
 202 
 203         pause_cpus(NULL);
 204         cpu_are_paused = 1;
 205 
 206         mutex_exit(&cpu_lock);
 207 }
 208 
 209 int cpu_quiesce_microsecond_sanity_limit = 60 * 1000000;
 210 
 211 void
 212 mp_cpu_quiesce(cpu_t *cp0)
 213 {
 214 
 215         volatile cpu_t  *cp = (volatile cpu_t *) cp0;
 216         int i, sanity_limit = cpu_quiesce_microsecond_sanity_limit;
 217         int             cpuid = cp->cpu_id;
 218         int             found_intr = 1;
 219         static fn_t     f = "mp_cpu_quiesce";
 220 
 221         ASSERT(CPU->cpu_id != cpuid);
 222         ASSERT(MUTEX_HELD(&cpu_lock));
 223         ASSERT(cp->cpu_flags & CPU_QUIESCED);
 224 
 225 
 226         /*
 227          * Declare CPU as no longer being READY to process interrupts and
 228          * wait for them to stop. A CPU that is not READY can no longer
 229          * participate in x-calls or x-traps.
 230          */
 231         cp->cpu_flags &= ~CPU_READY;
 232         CPUSET_DEL(cpu_ready_set, cpuid);
 233         membar_sync();
 234 
 235         for (i = 0; i < sanity_limit; i++) {
 236                 if (cp->cpu_intr_actv == 0 &&
 237                     (cp->cpu_thread == cp->cpu_idle_thread ||
 238                     cp->cpu_thread == cp->cpu_startup_thread)) {
 239                         found_intr = 0;
 240                         break;
 241                 }
 242                 DELAY(1);
 243         }
 244 
 245         if (found_intr) {
 246 
 247                 if (cp->cpu_intr_actv) {
 248                         cmn_err(CE_PANIC, "%s: cpu_intr_actv != 0", f);
 249                 } else if (cp->cpu_thread != cp->cpu_idle_thread &&
 250                     cp->cpu_thread != cp->cpu_startup_thread) {
 251                         cmn_err(CE_PANIC, "%s: CPU %d is not quiesced",
 252                             f, cpuid);
 253                 }
 254 
 255         }
 256 }
 257 
 258 /*
 259  * Start CPU on user request.
 260  */
 261 /* ARGSUSED */
 262 int
 263 mp_cpu_start(struct cpu *cp)
 264 {
 265         ASSERT(MUTEX_HELD(&cpu_lock));
 266         /*
 267          * Platforms that use CPU signatures require the signature
 268          * block update to indicate that this CPU is in the OS now.
 269          */
 270         CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id);
 271 
 272         cmp_error_resteer(cp->cpu_id);
 273 
 274         return (0);                     /* nothing special to do on this arch */
 275 }
 276 
 277 /*
 278  * Stop CPU on user request.
 279  */
 280 /* ARGSUSED */
 281 int
 282 mp_cpu_stop(struct cpu *cp)
 283 {
 284         ASSERT(MUTEX_HELD(&cpu_lock));
 285 
 286         cmp_error_resteer(cp->cpu_id);
 287 
 288         /*
 289          * Platforms that use CPU signatures require the signature
 290          * block update to indicate that this CPU is offlined now.
 291          */
 292         CPU_SIGNATURE(OS_SIG, SIGST_OFFLINE, SIGSUBST_NULL, cp->cpu_id);
 293         return (0);                     /* nothing special to do on this arch */
 294 }
 295 
 296 /*
 297  * Power on CPU.
 298  */
 299 int
 300 mp_cpu_poweron(struct cpu *cp)
 301 {
 302         ASSERT(MUTEX_HELD(&cpu_lock));
 303         if (&plat_cpu_poweron)
 304                 return (plat_cpu_poweron(cp));  /* platform-dependent hook */
 305 
 306         return (ENOTSUP);
 307 }
 308 
 309 /*
 310  * Power off CPU.
 311  */
 312 int
 313 mp_cpu_poweroff(struct cpu *cp)
 314 {
 315         ASSERT(MUTEX_HELD(&cpu_lock));
 316         if (&plat_cpu_poweroff)
 317                 return (plat_cpu_poweroff(cp)); /* platform-dependent hook */
 318 
 319         return (ENOTSUP);
 320 }
 321 
 322 void
 323 mp_cpu_faulted_enter(struct cpu *cp)
 324 {
 325         cpu_faulted_enter(cp);
 326 }
 327 
 328 void
 329 mp_cpu_faulted_exit(struct cpu *cp)
 330 {
 331         cpu_faulted_exit(cp);
 332 }