Print this page
5042 stop using deprecated atomic functions

*** 49,73 **** /* * Implementation for cross-processor calls via interprocessor interrupts * * This implementation uses a message passing architecture to allow multiple * concurrent cross calls to be in flight at any given time. We use the cmpxchg ! * instruction, aka casptr(), to implement simple efficient work queues for ! * message passing between CPUs with almost no need for regular locking. ! * See xc_extract() and xc_insert() below. * * The general idea is that initiating a cross call means putting a message * on a target(s) CPU's work queue. Any synchronization is handled by passing * the message back and forth between initiator and target(s). * * Every CPU has xc_work_cnt, which indicates it has messages to process. * This value is incremented as message traffic is initiated and decremented * with every message that finishes all processing. * * The code needs no mfence or other membar_*() calls. The uses of ! * casptr(), cas32() and atomic_dec_32() for the message passing are ! * implemented with LOCK prefix instructions which are equivalent to mfence. * * One interesting aspect of this implmentation is that it allows 2 or more * CPUs to initiate cross calls to intersecting sets of CPUs at the same time. * The cross call processing by the CPUs will happen in any order with only * a guarantee, for xc_call() and xc_sync(), that an initiator won't return --- 49,74 ---- /* * Implementation for cross-processor calls via interprocessor interrupts * * This implementation uses a message passing architecture to allow multiple * concurrent cross calls to be in flight at any given time. We use the cmpxchg ! * instruction, aka atomic_cas_ptr(), to implement simple efficient work ! * queues for message passing between CPUs with almost no need for regular ! * locking. See xc_extract() and xc_insert() below. * * The general idea is that initiating a cross call means putting a message * on a target(s) CPU's work queue. Any synchronization is handled by passing * the message back and forth between initiator and target(s). * * Every CPU has xc_work_cnt, which indicates it has messages to process. * This value is incremented as message traffic is initiated and decremented * with every message that finishes all processing. * * The code needs no mfence or other membar_*() calls. The uses of ! * atomic_cas_ptr(), atomic_cas_32() and atomic_dec_32() for the message ! * passing are implemented with LOCK prefix instructions which are ! * equivalent to mfence. * * One interesting aspect of this implmentation is that it allows 2 or more * CPUs to initiate cross calls to intersecting sets of CPUs at the same time. * The cross call processing by the CPUs will happen in any order with only * a guarantee, for xc_call() and xc_sync(), that an initiator won't return
*** 142,152 **** xc_increment(struct machcpu *mcpu) { int old; do { old = mcpu->xc_work_cnt; ! } while (cas32((uint32_t *)&mcpu->xc_work_cnt, old, old + 1) != old); return (old); } /* * Put a message into a queue. The insertion is atomic no matter --- 143,153 ---- xc_increment(struct machcpu *mcpu) { int old; do { old = mcpu->xc_work_cnt; ! } while (atomic_cas_32(&mcpu->xc_work_cnt, old, old + 1) != old); return (old); } /* * Put a message into a queue. The insertion is atomic no matter
*** 166,176 **** queue == &cpu[msg->xc_master]->cpu_m.xc_free); do { old_head = (xc_msg_t *)*(volatile xc_msg_t **)queue; msg->xc_next = old_head; ! } while (casptr(queue, old_head, msg) != old_head); } /* * Extract a message from a queue. The extraction is atomic only * when just one thread does extractions from the queue. --- 167,177 ---- queue == &cpu[msg->xc_master]->cpu_m.xc_free); do { old_head = (xc_msg_t *)*(volatile xc_msg_t **)queue; msg->xc_next = old_head; ! } while (atomic_cas_ptr(queue, old_head, msg) != old_head); } /* * Extract a message from a queue. The extraction is atomic only * when just one thread does extractions from the queue.
*** 183,193 **** do { old_head = (xc_msg_t *)*(volatile xc_msg_t **)queue; if (old_head == NULL) return (old_head); ! } while (casptr(queue, old_head, old_head->xc_next) != old_head); old_head->xc_next = NULL; return (old_head); } /* --- 184,195 ---- do { old_head = (xc_msg_t *)*(volatile xc_msg_t **)queue; if (old_head == NULL) return (old_head); ! } while (atomic_cas_ptr(queue, old_head, old_head->xc_next) != ! old_head); old_head->xc_next = NULL; return (old_head); } /*
*** 606,616 **** continue; (void) xc_increment(&cpup->cpu_m); XC_BT_SET(xc_priority_set, c); send_dirint(c, XC_HI_PIL); for (i = 0; i < 10; ++i) { ! (void) casptr(&cpup->cpu_m.xc_msgbox, cpup->cpu_m.xc_msgbox, cpup->cpu_m.xc_msgbox); } } } --- 608,618 ---- continue; (void) xc_increment(&cpup->cpu_m); XC_BT_SET(xc_priority_set, c); send_dirint(c, XC_HI_PIL); for (i = 0; i < 10; ++i) { ! (void) atomic_cas_ptr(&cpup->cpu_m.xc_msgbox, cpup->cpu_m.xc_msgbox, cpup->cpu_m.xc_msgbox); } } }