Print this page
patch delete-swapped_lock
patch remove-dead-disp-code
patch remove-useless-var2
patch remove-load-flag
patch remove-on-swapq-flag
patch remove-dont-swap-flag

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/disp/disp.c
          +++ new/usr/src/uts/common/disp/disp.c
↓ open down ↓ 88 lines elided ↑ open up ↑
  89   89  static void     generic_enq_thread(cpu_t *, int);
  90   90  void            (*disp_enq_thread)(cpu_t *, int) = generic_enq_thread;
  91   91  
  92   92  pri_t   kpreemptpri;            /* priority where kernel preemption applies */
  93   93  pri_t   upreemptpri = 0;        /* priority where normal preemption applies */
  94   94  pri_t   intr_pri;               /* interrupt thread priority base level */
  95   95  
  96   96  #define KPQPRI  -1              /* pri where cpu affinity is dropped for kpq */
  97   97  pri_t   kpqpri = KPQPRI;        /* can be set in /etc/system */
  98   98  disp_t  cpu0_disp;              /* boot CPU's dispatch queue */
  99      -disp_lock_t     swapped_lock;   /* lock swapped threads and swap queue */
 100   99  int     nswapped;               /* total number of swapped threads */
 101      -void    disp_swapped_enq(kthread_t *tp);
 102  100  static void     disp_swapped_setrun(kthread_t *tp);
 103  101  static void     cpu_resched(cpu_t *cp, pri_t tpri);
 104  102  
 105  103  /*
 106  104   * If this is set, only interrupt threads will cause kernel preemptions.
 107  105   * This is done by changing the value of kpreemptpri.  kpreemptpri
 108  106   * will either be the max sysclass pri + 1 or the min interrupt pri.
 109  107   */
 110  108  int     only_intr_kpreempt;
 111  109  
↓ open down ↓ 657 lines elided ↑ open up ↑
 769  767                  }
 770  768                  TRACE_1(TR_FAC_DISP, TR_DISP_END,
 771  769                      "disp_end:tid %p", tp);
 772  770                  return (tp);
 773  771          }
 774  772  
 775  773          dq = &dp->disp_q[pri];
 776  774          tp = dq->dq_first;
 777  775  
 778  776          ASSERT(tp != NULL);
 779      -        ASSERT(tp->t_schedflag & TS_LOAD);      /* thread must be swapped in */
 780  777  
 781  778          DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp);
 782  779  
 783  780          /*
 784  781           * Found it so remove it from queue.
 785  782           */
 786  783          dp->disp_nrunnable--;
 787  784          dq->dq_sruncnt--;
 788  785          if ((dq->dq_first = tp->t_link) == NULL) {
 789  786                  ulong_t *dqactmap = dp->disp_qactmap;
↓ open down ↓ 18 lines elided ↑ open up ↑
 808  805  
 809  806                          ipri = bt_gethighbit(dqactmap, maxrunword);
 810  807                          dp->disp_maxrunpri = ipri;
 811  808                          if (ipri < dp->disp_max_unbound_pri)
 812  809                                  dp->disp_max_unbound_pri = ipri;
 813  810                  }
 814  811          } else {
 815  812                  tp->t_link = NULL;
 816  813          }
 817  814  
 818      -        /*
 819      -         * Set TS_DONT_SWAP flag to prevent another processor from swapping
 820      -         * out this thread before we have a chance to run it.
 821      -         * While running, it is protected against swapping by t_lock.
 822      -         */
 823      -        tp->t_schedflag |= TS_DONT_SWAP;
 824  815          cpup->cpu_dispthread = tp;              /* protected by spl only */
 825  816          cpup->cpu_dispatch_pri = pri;
 826  817          ASSERT(pri == DISP_PRIO(tp));
 827  818          thread_onproc(tp, cpup);                /* set t_state to TS_ONPROC */
 828  819          disp_lock_exit_high(&dp->disp_lock);    /* drop run queue lock */
 829  820  
 830  821          ASSERT(tp != NULL);
 831  822          TRACE_1(TR_FAC_DISP, TR_DISP_END,
 832  823              "disp_end:tid %p", tp);
 833  824  
↓ open down ↓ 350 lines elided ↑ open up ↑
1184 1175          disp_t          *dp;
1185 1176          cpu_t           *cp;
1186 1177          pri_t           tpri;
1187 1178          int             bound;
1188 1179          boolean_t       self;
1189 1180  
1190 1181          ASSERT(THREAD_LOCK_HELD(tp));
1191 1182          ASSERT((tp->t_schedflag & TS_ALLSTART) == 0);
1192 1183          ASSERT(!thread_on_queue(tp));   /* make sure tp isn't on a runq */
1193 1184  
1194      -        /*
1195      -         * If thread is "swapped" or on the swap queue don't
1196      -         * queue it, but wake sched.
1197      -         */
1198      -        if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) {
1199      -                disp_swapped_setrun(tp);
1200      -                return;
1201      -        }
1202      -
1203 1185          self = (tp == curthread);
1204 1186  
1205 1187          if (tp->t_bound_cpu || tp->t_weakbound_cpu)
1206 1188                  bound = 1;
1207 1189          else
1208 1190                  bound = 0;
1209 1191  
1210 1192          tpri = DISP_PRIO(tp);
1211 1193          if (ncpus == 1)
1212 1194                  cp = tp->t_cpu;
↓ open down ↓ 160 lines elided ↑ open up ↑
1373 1355          disp_t          *dp;
1374 1356          dispq_t         *dq;
1375 1357          cpu_t           *cp;
1376 1358          pri_t           tpri;
1377 1359          int             bound;
1378 1360  
1379 1361          ASSERT(THREAD_LOCK_HELD(tp));
1380 1362          ASSERT((tp->t_schedflag & TS_ALLSTART) == 0);
1381 1363          ASSERT(!thread_on_queue(tp));   /* make sure tp isn't on a runq */
1382 1364  
1383      -        /*
1384      -         * If thread is "swapped" or on the swap queue don't
1385      -         * queue it, but wake sched.
1386      -         */
1387      -        if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) {
1388      -                disp_swapped_setrun(tp);
1389      -                return;
1390      -        }
1391      -
1392 1365          if (tp->t_bound_cpu || tp->t_weakbound_cpu)
1393 1366                  bound = 1;
1394 1367          else
1395 1368                  bound = 0;
1396 1369  
1397 1370          tpri = DISP_PRIO(tp);
1398 1371          if (ncpus == 1)
1399 1372                  cp = tp->t_cpu;
1400 1373          else if (!bound) {
1401 1374                  if (tpri >= kpqpri) {
↓ open down ↓ 208 lines elided ↑ open up ↑
1610 1583          kthread_t       *rp;
1611 1584          kthread_t       *trp;
1612 1585          kthread_t       **ptp;
1613 1586          int             tpri;
1614 1587  
1615 1588          ASSERT(THREAD_LOCK_HELD(tp));
1616 1589  
1617 1590          if (tp->t_state != TS_RUN)
1618 1591                  return (0);
1619 1592  
1620      -        /*
1621      -         * The thread is "swapped" or is on the swap queue and
1622      -         * hence no longer on the run queue, so return true.
1623      -         */
1624      -        if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD)
1625      -                return (1);
1626      -
1627 1593          tpri = DISP_PRIO(tp);
1628 1594          dp = tp->t_disp_queue;
1629 1595          ASSERT(tpri < dp->disp_npri);
1630 1596          dq = &dp->disp_q[tpri];
1631 1597          ptp = &dq->dq_first;
1632 1598          rp = *ptp;
1633 1599          trp = NULL;
1634 1600  
1635 1601          ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL);
1636 1602  
↓ open down ↓ 33 lines elided ↑ open up ↑
1670 1636                          if (ipri < dp->disp_max_unbound_pri)
1671 1637                                  dp->disp_max_unbound_pri = ipri;
1672 1638                          dp->disp_maxrunpri = ipri;
1673 1639                  }
1674 1640          }
1675 1641          tp->t_link = NULL;
1676 1642          THREAD_TRANSITION(tp);          /* put in intermediate state */
1677 1643          return (1);
1678 1644  }
1679 1645  
1680      -
1681      -/*
1682      - * dq_sruninc and dq_srundec are public functions for
1683      - * incrementing/decrementing the sruncnts when a thread on
1684      - * a dispatcher queue is made schedulable/unschedulable by
1685      - * resetting the TS_LOAD flag.
1686      - *
1687      - * The caller MUST have the thread lock and therefore the dispatcher
1688      - * queue lock so that the operation which changes
1689      - * the flag, the operation that checks the status of the thread to
1690      - * determine if it's on a disp queue AND the call to this function
1691      - * are one atomic operation with respect to interrupts.
1692      - */
1693      -
1694      -/*
1695      - * Called by sched AFTER TS_LOAD flag is set on a swapped, runnable thread.
1696      - */
1697      -void
1698      -dq_sruninc(kthread_t *t)
1699      -{
1700      -        ASSERT(t->t_state == TS_RUN);
1701      -        ASSERT(t->t_schedflag & TS_LOAD);
1702      -
1703      -        THREAD_TRANSITION(t);
1704      -        setfrontdq(t);
1705      -}
1706      -
1707      -/*
1708      - * See comment on calling conventions above.
1709      - * Called by sched BEFORE TS_LOAD flag is cleared on a runnable thread.
1710      - */
1711      -void
1712      -dq_srundec(kthread_t *t)
1713      -{
1714      -        ASSERT(t->t_schedflag & TS_LOAD);
1715      -
1716      -        (void) dispdeq(t);
1717      -        disp_swapped_enq(t);
1718      -}
1719      -
1720      -/*
1721      - * Change the dispatcher lock of thread to the "swapped_lock"
1722      - * and return with thread lock still held.
1723      - *
1724      - * Called with thread_lock held, in transition state, and at high spl.
1725      - */
1726      -void
1727      -disp_swapped_enq(kthread_t *tp)
1728      -{
1729      -        ASSERT(THREAD_LOCK_HELD(tp));
1730      -        ASSERT(tp->t_schedflag & TS_LOAD);
1731      -
1732      -        switch (tp->t_state) {
1733      -        case TS_RUN:
1734      -                disp_lock_enter_high(&swapped_lock);
1735      -                THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */
1736      -                break;
1737      -        case TS_ONPROC:
1738      -                disp_lock_enter_high(&swapped_lock);
1739      -                THREAD_TRANSITION(tp);
1740      -                wake_sched_sec = 1;             /* tell clock to wake sched */
1741      -                THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */
1742      -                break;
1743      -        default:
1744      -                panic("disp_swapped: tp: %p bad t_state", (void *)tp);
1745      -        }
1746      -}
1747      -
1748      -/*
1749      - * This routine is called by setbackdq/setfrontdq if the thread is
1750      - * not loaded or loaded and on the swap queue.
1751      - *
1752      - * Thread state TS_SLEEP implies that a swapped thread
1753      - * has been woken up and needs to be swapped in by the swapper.
1754      - *
1755      - * Thread state TS_RUN, it implies that the priority of a swapped
1756      - * thread is being increased by scheduling class (e.g. ts_update).
1757      - */
1758      -static void
1759      -disp_swapped_setrun(kthread_t *tp)
1760      -{
1761      -        ASSERT(THREAD_LOCK_HELD(tp));
1762      -        ASSERT((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD);
1763      -
1764      -        switch (tp->t_state) {
1765      -        case TS_SLEEP:
1766      -                disp_lock_enter_high(&swapped_lock);
1767      -                /*
1768      -                 * Wakeup sched immediately (i.e., next tick) if the
1769      -                 * thread priority is above maxclsyspri.
1770      -                 */
1771      -                if (DISP_PRIO(tp) > maxclsyspri)
1772      -                        wake_sched = 1;
1773      -                else
1774      -                        wake_sched_sec = 1;
1775      -                THREAD_RUN(tp, &swapped_lock); /* set TS_RUN state and lock */
1776      -                break;
1777      -        case TS_RUN:                            /* called from ts_update */
1778      -                break;
1779      -        default:
1780      -                panic("disp_swapped_setrun: tp: %p bad t_state", (void *)tp);
1781      -        }
1782      -}
1783      -
1784 1646  /*
1785 1647   *      Make a thread give up its processor.  Find the processor on
1786 1648   *      which this thread is executing, and have that processor
1787 1649   *      preempt.
1788 1650   *
1789 1651   *      We allow System Duty Cycle (SDC) threads to be preempted even if
1790 1652   *      they are running at kernel priorities.  To implement this, we always
1791 1653   *      set cpu_kprunrun; this ensures preempt() will be called.  Since SDC
1792 1654   *      calls cpu_surrender() very often, we only preempt if there is anyone
1793 1655   *      competing with us.
↓ open down ↓ 353 lines elided ↑ open up ↑
2147 2009  void
2148 2010  disp_adjust_unbound_pri(kthread_t *tp)
2149 2011  {
2150 2012          disp_t *dp;
2151 2013          pri_t tpri;
2152 2014  
2153 2015          ASSERT(THREAD_LOCK_HELD(tp));
2154 2016  
2155 2017          /*
2156 2018           * Don't do anything if the thread is not bound, or
2157      -         * currently not runnable or swapped out.
     2019 +         * currently not runnable.
2158 2020           */
2159 2021          if (tp->t_bound_cpu == NULL ||
2160      -            tp->t_state != TS_RUN ||
2161      -            tp->t_schedflag & TS_ON_SWAPQ)
     2022 +            tp->t_state != TS_RUN)
2162 2023                  return;
2163 2024  
2164 2025          tpri = DISP_PRIO(tp);
2165 2026          dp = tp->t_bound_cpu->cpu_disp;
2166 2027          ASSERT(tpri >= 0 && tpri < dp->disp_npri);
2167 2028          if (tpri > dp->disp_max_unbound_pri)
2168 2029                  dp->disp_max_unbound_pri = tpri;
2169 2030  }
2170 2031  
2171 2032  /*
↓ open down ↓ 174 lines elided ↑ open up ↑
2346 2207  #else /* DEBUG */
2347 2208          (void) dispdeq(tp);                     /* drops disp_lock */
2348 2209  #endif /* DEBUG */
2349 2210  
2350 2211          /*
2351 2212           * Reset the disp_queue steal time - we do not know what is the smallest
2352 2213           * value across the queue is.
2353 2214           */
2354 2215          dp->disp_steal = 0;
2355 2216  
2356      -        tp->t_schedflag |= TS_DONT_SWAP;
2357      -
2358 2217          /*
2359 2218           * Setup thread to run on the current CPU.
2360 2219           */
2361 2220          tp->t_disp_queue = cp->cpu_disp;
2362 2221  
2363 2222          cp->cpu_dispthread = tp;                /* protected by spl only */
2364 2223          cp->cpu_dispatch_pri = pri;
2365 2224  
2366 2225          /*
2367 2226           * There can be a memory synchronization race between disp_getbest()
↓ open down ↓ 334 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX