Print this page
patch delete-t_stime
patch remove-swapenq-flag
patch remove-dont-swap-flag
patch remove-swapinout-class-ops

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/disp/fss.c
          +++ new/usr/src/uts/common/disp/fss.c
↓ open down ↓ 205 lines elided ↑ open up ↑
 206  206  static void     fss_exitclass(void *);
 207  207  static int      fss_canexit(kthread_t *, cred_t *);
 208  208  static int      fss_fork(kthread_t *, kthread_t *, void *);
 209  209  static void     fss_forkret(kthread_t *, kthread_t *);
 210  210  static void     fss_parmsget(kthread_t *, void *);
 211  211  static int      fss_parmsset(kthread_t *, void *, id_t, cred_t *);
 212  212  static void     fss_stop(kthread_t *, int, int);
 213  213  static void     fss_exit(kthread_t *);
 214  214  static void     fss_active(kthread_t *);
 215  215  static void     fss_inactive(kthread_t *);
 216      -static pri_t    fss_swapin(kthread_t *, int);
 217      -static pri_t    fss_swapout(kthread_t *, int);
 218  216  static void     fss_trapret(kthread_t *);
 219  217  static void     fss_preempt(kthread_t *);
 220  218  static void     fss_setrun(kthread_t *);
 221  219  static void     fss_sleep(kthread_t *);
 222  220  static void     fss_tick(kthread_t *);
 223  221  static void     fss_wakeup(kthread_t *);
 224  222  static int      fss_donice(kthread_t *, cred_t *, int, int *);
 225  223  static int      fss_doprio(kthread_t *, cred_t *, int, int *);
 226  224  static pri_t    fss_globpri(kthread_t *);
 227  225  static void     fss_yield(kthread_t *);
↓ open down ↓ 16 lines elided ↑ open up ↑
 244  242          fss_exitclass,
 245  243          fss_canexit,
 246  244          fss_fork,
 247  245          fss_forkret,
 248  246          fss_parmsget,
 249  247          fss_parmsset,
 250  248          fss_stop,
 251  249          fss_exit,
 252  250          fss_active,
 253  251          fss_inactive,
 254      -        fss_swapin,
 255      -        fss_swapout,
 256  252          fss_trapret,
 257  253          fss_preempt,
 258  254          fss_setrun,
 259  255          fss_sleep,
 260  256          fss_tick,
 261  257          fss_wakeup,
 262  258          fss_donice,
 263  259          fss_globpri,
 264  260          fss_nullsys,    /* set_process_group */
 265  261          fss_yield,
↓ open down ↓ 1556 lines elided ↑ open up ↑
1822 1818                  thread_unlock(t);
1823 1819          }
1824 1820  }
1825 1821  
1826 1822  static void
1827 1823  fss_nullsys()
1828 1824  {
1829 1825  }
1830 1826  
1831 1827  /*
1832      - * fss_swapin() returns -1 if the thread is loaded or is not eligible to be
1833      - * swapped in. Otherwise, it returns the thread's effective priority based
1834      - * on swapout time and size of process (0 <= epri <= 0 SHRT_MAX).
1835      - */
1836      -/*ARGSUSED*/
1837      -static pri_t
1838      -fss_swapin(kthread_t *t, int flags)
1839      -{
1840      -        fssproc_t *fssproc = FSSPROC(t);
1841      -        long epri = -1;
1842      -        proc_t *pp = ttoproc(t);
1843      -
1844      -        ASSERT(THREAD_LOCK_HELD(t));
1845      -
1846      -        if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
1847      -                time_t swapout_time;
1848      -
1849      -                swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
1850      -                if (INHERITED(t) || (fssproc->fss_flags & FSSKPRI)) {
1851      -                        epri = (long)DISP_PRIO(t) + swapout_time;
1852      -                } else {
1853      -                        /*
1854      -                         * Threads which have been out for a long time,
1855      -                         * have high user mode priority and are associated
1856      -                         * with a small address space are more deserving.
1857      -                         */
1858      -                        epri = fssproc->fss_umdpri;
1859      -                        ASSERT(epri >= 0 && epri <= fss_maxumdpri);
1860      -                        epri += swapout_time - pp->p_swrss / nz(maxpgio)/2;
1861      -                }
1862      -                /*
1863      -                 * Scale epri so that SHRT_MAX / 2 represents zero priority.
1864      -                 */
1865      -                epri += SHRT_MAX / 2;
1866      -                if (epri < 0)
1867      -                        epri = 0;
1868      -                else if (epri > SHRT_MAX)
1869      -                        epri = SHRT_MAX;
1870      -        }
1871      -        return ((pri_t)epri);
1872      -}
1873      -
1874      -/*
1875      - * fss_swapout() returns -1 if the thread isn't loaded or is not eligible to
1876      - * be swapped out. Otherwise, it returns the thread's effective priority
1877      - * based on if the swapper is in softswap or hardswap mode.
1878      - */
1879      -static pri_t
1880      -fss_swapout(kthread_t *t, int flags)
1881      -{
1882      -        fssproc_t *fssproc = FSSPROC(t);
1883      -        long epri = -1;
1884      -        proc_t *pp = ttoproc(t);
1885      -        time_t swapin_time;
1886      -
1887      -        ASSERT(THREAD_LOCK_HELD(t));
1888      -
1889      -        if (INHERITED(t) ||
1890      -            (fssproc->fss_flags & FSSKPRI) ||
1891      -            (t->t_proc_flag & TP_LWPEXIT) ||
1892      -            (t->t_state & (TS_ZOMB|TS_FREE|TS_STOPPED|TS_ONPROC|TS_WAIT)) ||
1893      -            !(t->t_schedflag & TS_LOAD) ||
1894      -            !(SWAP_OK(t)))
1895      -                return (-1);
1896      -
1897      -        ASSERT(t->t_state & (TS_SLEEP | TS_RUN));
1898      -
1899      -        swapin_time = (ddi_get_lbolt() - t->t_stime) / hz;
1900      -
1901      -        if (flags == SOFTSWAP) {
1902      -                if (t->t_state == TS_SLEEP && swapin_time > maxslp) {
1903      -                        epri = 0;
1904      -                } else {
1905      -                        return ((pri_t)epri);
1906      -                }
1907      -        } else {
1908      -                pri_t pri;
1909      -
1910      -                if ((t->t_state == TS_SLEEP && swapin_time > fss_minslp) ||
1911      -                    (t->t_state == TS_RUN && swapin_time > fss_minrun)) {
1912      -                        pri = fss_maxumdpri;
1913      -                        epri = swapin_time -
1914      -                            (rm_asrss(pp->p_as) / nz(maxpgio)/2) - (long)pri;
1915      -                } else {
1916      -                        return ((pri_t)epri);
1917      -                }
1918      -        }
1919      -
1920      -        /*
1921      -         * Scale epri so that SHRT_MAX / 2 represents zero priority.
1922      -         */
1923      -        epri += SHRT_MAX / 2;
1924      -        if (epri < 0)
1925      -                epri = 0;
1926      -        else if (epri > SHRT_MAX)
1927      -                epri = SHRT_MAX;
1928      -
1929      -        return ((pri_t)epri);
1930      -}
1931      -
1932      -/*
1933 1828   * If thread is currently at a kernel mode priority (has slept) and is
1934 1829   * returning to the userland we assign it the appropriate user mode priority
1935 1830   * and time quantum here.  If we're lowering the thread's priority below that
1936 1831   * of other runnable threads then we will set runrun via cpu_surrender() to
1937 1832   * cause preemption.
1938 1833   */
1939 1834  static void
1940 1835  fss_trapret(kthread_t *t)
1941 1836  {
1942 1837          fssproc_t *fssproc = FSSPROC(t);
↓ open down ↓ 10 lines elided ↑ open up ↑
1953 1848                   * If thread has blocked in the kernel
1954 1849                   */
1955 1850                  THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
1956 1851                  cp->cpu_dispatch_pri = DISP_PRIO(t);
1957 1852                  ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
1958 1853                  fssproc->fss_flags &= ~FSSKPRI;
1959 1854  
1960 1855                  if (DISP_MUST_SURRENDER(t))
1961 1856                          cpu_surrender(t);
1962 1857          }
1963      -
1964      -        /*
1965      -         * Swapout lwp if the swapper is waiting for this thread to reach
1966      -         * a safe point.
1967      -         */
1968      -        if (t->t_schedflag & TS_SWAPENQ) {
1969      -                thread_unlock(t);
1970      -                swapout_lwp(ttolwp(t));
1971      -                thread_lock(t);
1972      -        }
1973 1858  }
1974 1859  
1975 1860  /*
1976 1861   * Arrange for thread to be placed in appropriate location on dispatcher queue.
1977 1862   * This is called with the current thread in TS_ONPROC and locked.
1978 1863   */
1979 1864  static void
1980 1865  fss_preempt(kthread_t *t)
1981 1866  {
1982 1867          fssproc_t *fssproc = FSSPROC(t);
↓ open down ↓ 24 lines elided ↑ open up ↑
2007 1892           */
2008 1893          if (CPUCAPS_ON()) {
2009 1894                  (void) cpucaps_charge(t, &fssproc->fss_caps,
2010 1895                      CPUCAPS_CHARGE_ENFORCE);
2011 1896  
2012 1897                  if (!(fssproc->fss_flags & FSSKPRI) && CPUCAPS_ENFORCE(t))
2013 1898                          return;
2014 1899          }
2015 1900  
2016 1901          /*
2017      -         * If preempted in user-land mark the thread as swappable because it
2018      -         * cannot be holding any kernel locks.
2019      -         */
2020      -        ASSERT(t->t_schedflag & TS_DONT_SWAP);
2021      -        if (lwp != NULL && lwp->lwp_state == LWP_USER)
2022      -                t->t_schedflag &= ~TS_DONT_SWAP;
2023      -
2024      -        /*
2025 1902           * Check to see if we're doing "preemption control" here.  If
2026 1903           * we are, and if the user has requested that this thread not
2027 1904           * be preempted, and if preemptions haven't been put off for
2028 1905           * too long, let the preemption happen here but try to make
2029 1906           * sure the thread is rescheduled as soon as possible.  We do
2030 1907           * this by putting it on the front of the highest priority run
2031 1908           * queue in the FSS class.  If the preemption has been put off
2032 1909           * for too long, clear the "nopreempt" bit and let the thread
2033 1910           * be preempted.
2034 1911           */
↓ open down ↓ 3 lines elided ↑ open up ↑
2038 1915                          if (!(fssproc->fss_flags & FSSKPRI)) {
2039 1916                                  /*
2040 1917                                   * If not already remembered, remember current
2041 1918                                   * priority for restoration in fss_yield().
2042 1919                                   */
2043 1920                                  if (!(fssproc->fss_flags & FSSRESTORE)) {
2044 1921                                          fssproc->fss_scpri = t->t_pri;
2045 1922                                          fssproc->fss_flags |= FSSRESTORE;
2046 1923                                  }
2047 1924                                  THREAD_CHANGE_PRI(t, fss_maxumdpri);
2048      -                                t->t_schedflag |= TS_DONT_SWAP;
2049 1925                          }
2050 1926                          schedctl_set_yield(t, 1);
2051 1927                          setfrontdq(t);
2052 1928                          return;
2053 1929                  } else {
2054 1930                          if (fssproc->fss_flags & FSSRESTORE) {
2055 1931                                  THREAD_CHANGE_PRI(t, fssproc->fss_scpri);
2056 1932                                  fssproc->fss_flags &= ~FSSRESTORE;
2057 1933                          }
2058 1934                          schedctl_set_nopreempt(t, 0);
↓ open down ↓ 85 lines elided ↑ open up ↑
2144 2020                   * The thread has done a THREAD_KPRI_REQUEST(), slept, then
2145 2021                   * done THREAD_KPRI_RELEASE() (so no t_kpri_req is 0 again),
2146 2022                   * then slept again all without finishing the current system
2147 2023                   * call so trapret won't have cleared FSSKPRI
2148 2024                   */
2149 2025                  fssproc->fss_flags &= ~FSSKPRI;
2150 2026                  THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2151 2027                  if (DISP_MUST_SURRENDER(curthread))
2152 2028                          cpu_surrender(t);
2153 2029          }
2154      -        t->t_stime = ddi_get_lbolt();   /* time stamp for the swapper */
2155 2030  }
2156 2031  
2157 2032  /*
2158 2033   * A tick interrupt has ocurrend on a running thread. Check to see if our
2159      - * time slice has expired.  We must also clear the TS_DONT_SWAP flag in
2160      - * t_schedflag if the thread is eligible to be swapped out.
     2034 + * time slice has expired.
2161 2035   */
2162 2036  static void
2163 2037  fss_tick(kthread_t *t)
2164 2038  {
2165 2039          fssproc_t *fssproc;
2166 2040          fssproj_t *fssproj;
2167 2041          klwp_t *lwp;
2168 2042          boolean_t call_cpu_surrender = B_FALSE;
2169 2043          boolean_t cpucaps_enforce = B_FALSE;
2170 2044  
↓ open down ↓ 57 lines elided ↑ open up ↑
2228 2102                          new_pri = fssproc->fss_umdpri;
2229 2103                          ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
2230 2104  
2231 2105                          /*
2232 2106                           * When the priority of a thread is changed, it may
2233 2107                           * be necessary to adjust its position on a sleep queue
2234 2108                           * or dispatch queue. The function thread_change_pri
2235 2109                           * accomplishes this.
2236 2110                           */
2237 2111                          if (thread_change_pri(t, new_pri, 0)) {
2238      -                                if ((t->t_schedflag & TS_LOAD) &&
2239      -                                    (lwp = t->t_lwp) &&
2240      -                                    lwp->lwp_state == LWP_USER)
2241      -                                        t->t_schedflag &= ~TS_DONT_SWAP;
2242 2112                                  fssproc->fss_timeleft = fss_quantum;
2243 2113                          } else {
2244 2114                                  call_cpu_surrender = B_TRUE;
2245 2115                          }
2246 2116                  } else if (t->t_state == TS_ONPROC &&
2247 2117                      t->t_pri < t->t_disp_queue->disp_maxrunpri) {
2248 2118                          /*
2249 2119                           * If there is a higher-priority thread which is
2250 2120                           * waiting for a processor, then thread surrenders
2251 2121                           * the processor.
↓ open down ↓ 43 lines elided ↑ open up ↑
2295 2165  static void
2296 2166  fss_wakeup(kthread_t *t)
2297 2167  {
2298 2168          fssproc_t *fssproc;
2299 2169  
2300 2170          ASSERT(THREAD_LOCK_HELD(t));
2301 2171          ASSERT(t->t_state == TS_SLEEP);
2302 2172  
2303 2173          fss_active(t);
2304 2174  
2305      -        t->t_stime = ddi_get_lbolt();           /* time stamp for the swapper */
2306 2175          fssproc = FSSPROC(t);
2307 2176          fssproc->fss_flags &= ~FSSBACKQ;
2308 2177  
2309 2178          if (fssproc->fss_flags & FSSKPRI) {
2310 2179                  /*
2311 2180                   * If we already have a kernel priority assigned, then we
2312 2181                   * just use it.
2313 2182                   */
2314 2183                  setbackdq(t);
2315 2184          } else if (t->t_kpri_req) {
↓ open down ↓ 364 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX