Print this page
5042 stop using deprecated atomic functions

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/os/intr.c
          +++ new/usr/src/uts/i86pc/os/intr.c
↓ open down ↓ 1210 lines elided ↑ open up ↑
1211 1211          ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
1212 1212  
1213 1213          /*
1214 1214           * We could be here with a zero timestamp. This could happen if:
1215 1215           * an interrupt thread which no longer has a pinned thread underneath
1216 1216           * it (i.e. it blocked at some point in its past) has finished running
1217 1217           * its handler. intr_thread() updated the interrupt statistic for its
1218 1218           * PIL and zeroed its timestamp. Since there was no pinned thread to
1219 1219           * return to, swtch() gets called and we end up here.
1220 1220           *
1221      -         * Note that we use atomic ops below (cas64 and atomic_add_64), which
1222      -         * we don't use in the functions above, because we're not called
1223      -         * with interrupts blocked, but the epilog/prolog functions are.
     1221 +         * Note that we use atomic ops below (atomic_cas_64 and
     1222 +         * atomic_add_64), which we don't use in the functions above,
     1223 +         * because we're not called with interrupts blocked, but the
     1224 +         * epilog/prolog functions are.
1224 1225           */
1225 1226          if (t->t_intr_start) {
1226 1227                  do {
1227 1228                          start = t->t_intr_start;
1228 1229                          interval = tsc_read() - start;
1229      -                } while (cas64(&t->t_intr_start, start, 0) != start);
     1230 +                } while (atomic_cas_64(&t->t_intr_start, start, 0) != start);
1230 1231                  cpu = CPU;
1231 1232                  cpu->cpu_m.intrstat[t->t_pil][0] += interval;
1232 1233  
1233 1234                  atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
1234 1235                      interval);
1235 1236          } else
1236 1237                  ASSERT(t->t_intr == NULL);
1237 1238  }
1238 1239  
1239 1240  /*
↓ open down ↓ 3 lines elided ↑ open up ↑
1243 1244  void
1244 1245  cpu_intr_swtch_exit(kthread_id_t t)
1245 1246  {
1246 1247          uint64_t ts;
1247 1248  
1248 1249          ASSERT((t->t_flag & T_INTR_THREAD) != 0);
1249 1250          ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
1250 1251  
1251 1252          do {
1252 1253                  ts = t->t_intr_start;
1253      -        } while (cas64(&t->t_intr_start, ts, tsc_read()) != ts);
     1254 +        } while (atomic_cas_64(&t->t_intr_start, ts, tsc_read()) != ts);
1254 1255  }
1255 1256  
1256 1257  /*
1257 1258   * Dispatch a hilevel interrupt (one above LOCK_LEVEL)
1258 1259   */
1259 1260  /*ARGSUSED*/
1260 1261  static void
1261 1262  dispatch_hilevel(uint_t vector, uint_t arg2)
1262 1263  {
1263 1264          sti();
↓ open down ↓ 360 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX