Print this page
5042 stop using deprecated atomic functions


 533 
 534         ASSERT((t->t_flag & T_INTR_THREAD) != 0);
 535         ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
 536 
 537         /*
 538          * We could be here with a zero timestamp. This could happen if:
 539          * an interrupt thread which no longer has a pinned thread underneath
 540          * it (i.e. it blocked at some point in its past) has finished running
 541          * its handler. intr_thread() updated the interrupt statistic for its
 542          * PIL and zeroed its timestamp. Since there was no pinned thread to
 543          * return to, swtch() gets called and we end up here.
 544          *
 545          * It can also happen if an interrupt thread in intr_thread() calls
 546          * preempt. It will have already taken care of updating stats. In
 547          * this event, the interrupt thread will be runnable.
 548          */
 549         if (t->t_intr_start) {
 550                 do {
 551                         start = t->t_intr_start;
 552                         interval = CLOCK_TICK_COUNTER() - start;
 553                 } while (cas64(&t->t_intr_start, start, 0) != start);
 554                 cpu = CPU;
 555                 if (cpu->cpu_m.divisor > 1)
 556                         interval *= cpu->cpu_m.divisor;
 557                 cpu->cpu_m.intrstat[t->t_pil][0] += interval;
 558 
 559                 atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
 560                     interval);
 561         } else
 562                 ASSERT(t->t_intr == NULL || t->t_state == TS_RUN);
 563 }
 564 
 565 
 566 /*
 567  * An interrupt thread is returning from swtch(). Place a starting timestamp
 568  * in its thread structure.
 569  */
 570 void
 571 cpu_intr_swtch_exit(kthread_id_t t)
 572 {
 573         uint64_t ts;
 574 
 575         ASSERT((t->t_flag & T_INTR_THREAD) != 0);
 576         ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
 577 
 578         do {
 579                 ts = t->t_intr_start;
 580         } while (cas64(&t->t_intr_start, ts, CLOCK_TICK_COUNTER()) != ts);

 581 }
 582 
 583 
 584 int
 585 blacklist(int cmd, const char *scheme, nvlist_t *fmri, const char *class)
 586 {
 587         if (&plat_blacklist)
 588                 return (plat_blacklist(cmd, scheme, fmri, class));
 589 
 590         return (ENOTSUP);
 591 }
 592 
 593 int
 594 kdi_pread(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp)
 595 {
 596         extern void kdi_flush_caches(void);
 597         size_t nread = 0;
 598         uint32_t word;
 599         int slop, i;
 600 




 533 
 534         ASSERT((t->t_flag & T_INTR_THREAD) != 0);
 535         ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
 536 
 537         /*
 538          * We could be here with a zero timestamp. This could happen if:
 539          * an interrupt thread which no longer has a pinned thread underneath
 540          * it (i.e. it blocked at some point in its past) has finished running
 541          * its handler. intr_thread() updated the interrupt statistic for its
 542          * PIL and zeroed its timestamp. Since there was no pinned thread to
 543          * return to, swtch() gets called and we end up here.
 544          *
 545          * It can also happen if an interrupt thread in intr_thread() calls
 546          * preempt. It will have already taken care of updating stats. In
 547          * this event, the interrupt thread will be runnable.
 548          */
 549         if (t->t_intr_start) {
 550                 do {
 551                         start = t->t_intr_start;
 552                         interval = CLOCK_TICK_COUNTER() - start;
 553                 } while (atomic_cas_64(&t->t_intr_start, start, 0) != start);
 554                 cpu = CPU;
 555                 if (cpu->cpu_m.divisor > 1)
 556                         interval *= cpu->cpu_m.divisor;
 557                 cpu->cpu_m.intrstat[t->t_pil][0] += interval;
 558 
 559                 atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
 560                     interval);
 561         } else
 562                 ASSERT(t->t_intr == NULL || t->t_state == TS_RUN);
 563 }
 564 
 565 
 566 /*
 567  * An interrupt thread is returning from swtch(). Place a starting timestamp
 568  * in its thread structure.
 569  */
 570 void
 571 cpu_intr_swtch_exit(kthread_id_t t)
 572 {
 573         uint64_t ts;
 574 
 575         ASSERT((t->t_flag & T_INTR_THREAD) != 0);
 576         ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
 577 
 578         do {
 579                 ts = t->t_intr_start;
 580         } while (atomic_cas_64(&t->t_intr_start, ts, CLOCK_TICK_COUNTER()) !=
 581             ts);
 582 }
 583 
 584 
 585 int
 586 blacklist(int cmd, const char *scheme, nvlist_t *fmri, const char *class)
 587 {
 588         if (&plat_blacklist)
 589                 return (plat_blacklist(cmd, scheme, fmri, class));
 590 
 591         return (ENOTSUP);
 592 }
 593 
 594 int
 595 kdi_pread(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp)
 596 {
 597         extern void kdi_flush_caches(void);
 598         size_t nread = 0;
 599         uint32_t word;
 600         int slop, i;
 601