Print this page
5042 stop using deprecated atomic functions


1201  * ran for and update the statistic for its PIL.
1202  */
1203 void
1204 cpu_intr_swtch_enter(kthread_id_t t)
1205 {
1206         uint64_t        interval;
1207         uint64_t        start;
1208         cpu_t           *cpu;
1209 
1210         ASSERT((t->t_flag & T_INTR_THREAD) != 0);
1211         ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
1212 
1213         /*
1214          * We could be here with a zero timestamp. This could happen if:
1215          * an interrupt thread which no longer has a pinned thread underneath
1216          * it (i.e. it blocked at some point in its past) has finished running
1217          * its handler. intr_thread() updated the interrupt statistic for its
1218          * PIL and zeroed its timestamp. Since there was no pinned thread to
1219          * return to, swtch() gets called and we end up here.
1220          *
1221          * Note that we use atomic ops below (cas64 and atomic_add_64), which
1222          * we don't use in the functions above, because we're not called
1223          * with interrupts blocked, but the epilog/prolog functions are.

1224          */
1225         if (t->t_intr_start) {
1226                 do {
1227                         start = t->t_intr_start;
1228                         interval = tsc_read() - start;
1229                 } while (cas64(&t->t_intr_start, start, 0) != start);
1230                 cpu = CPU;
1231                 cpu->cpu_m.intrstat[t->t_pil][0] += interval;
1232 
1233                 atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
1234                     interval);
1235         } else
1236                 ASSERT(t->t_intr == NULL);
1237 }
1238 
1239 /*
1240  * An interrupt thread is returning from swtch(). Place a starting timestamp
1241  * in its thread structure.
1242  */
1243 void
1244 cpu_intr_swtch_exit(kthread_id_t t)
1245 {
1246         uint64_t ts;
1247 
1248         ASSERT((t->t_flag & T_INTR_THREAD) != 0);
1249         ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
1250 
1251         do {
1252                 ts = t->t_intr_start;
1253         } while (cas64(&t->t_intr_start, ts, tsc_read()) != ts);
1254 }
1255 
1256 /*
1257  * Dispatch a hilevel interrupt (one above LOCK_LEVEL)
1258  */
1259 /*ARGSUSED*/
1260 static void
1261 dispatch_hilevel(uint_t vector, uint_t arg2)
1262 {
1263         sti();
1264         av_dispatch_autovect(vector);
1265         cli();
1266 }
1267 
1268 /*
1269  * Dispatch a soft interrupt
1270  */
1271 /*ARGSUSED*/
1272 static void
1273 dispatch_softint(uint_t oldpil, uint_t arg2)




1201  * ran for and update the statistic for its PIL.
1202  */
1203 void
1204 cpu_intr_swtch_enter(kthread_id_t t)
1205 {
1206         uint64_t        interval;
1207         uint64_t        start;
1208         cpu_t           *cpu;
1209 
1210         ASSERT((t->t_flag & T_INTR_THREAD) != 0);
1211         ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
1212 
1213         /*
1214          * We could be here with a zero timestamp. This could happen if:
1215          * an interrupt thread which no longer has a pinned thread underneath
1216          * it (i.e. it blocked at some point in its past) has finished running
1217          * its handler. intr_thread() updated the interrupt statistic for its
1218          * PIL and zeroed its timestamp. Since there was no pinned thread to
1219          * return to, swtch() gets called and we end up here.
1220          *
1221          * Note that we use atomic ops below (atomic_cas_64 and
1222          * atomic_add_64), which we don't use in the functions above,
1223          * because we're not called with interrupts blocked, but the
1224          * epilog/prolog functions are.
1225          */
1226         if (t->t_intr_start) {
1227                 do {
1228                         start = t->t_intr_start;
1229                         interval = tsc_read() - start;
1230                 } while (atomic_cas_64(&t->t_intr_start, start, 0) != start);
1231                 cpu = CPU;
1232                 cpu->cpu_m.intrstat[t->t_pil][0] += interval;
1233 
1234                 atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
1235                     interval);
1236         } else
1237                 ASSERT(t->t_intr == NULL);
1238 }
1239 
1240 /*
1241  * An interrupt thread is returning from swtch(). Place a starting timestamp
1242  * in its thread structure.
1243  */
1244 void
1245 cpu_intr_swtch_exit(kthread_id_t t)
1246 {
1247         uint64_t ts;
1248 
1249         ASSERT((t->t_flag & T_INTR_THREAD) != 0);
1250         ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
1251 
1252         do {
1253                 ts = t->t_intr_start;
1254         } while (atomic_cas_64(&t->t_intr_start, ts, tsc_read()) != ts);
1255 }
1256 
1257 /*
1258  * Dispatch a hilevel interrupt (one above LOCK_LEVEL)
1259  */
1260 /*ARGSUSED*/
1261 static void
1262 dispatch_hilevel(uint_t vector, uint_t arg2)
1263 {
1264         sti();
1265         av_dispatch_autovect(vector);
1266         cli();
1267 }
1268 
1269 /*
1270  * Dispatch a soft interrupt
1271  */
1272 /*ARGSUSED*/
1273 static void
1274 dispatch_softint(uint_t oldpil, uint_t arg2)