Print this page
patch delete-t_stime
patch remove-swapenq-flag
patch remove-dont-swap-flag
patch remove-swapinout-class-ops


 196 static int      fss_getclinfo(void *);
 197 static int      fss_parmsin(void *);
 198 static int      fss_parmsout(void *, pc_vaparms_t *);
 199 static int      fss_vaparmsin(void *, pc_vaparms_t *);
 200 static int      fss_vaparmsout(void *, pc_vaparms_t *);
 201 static int      fss_getclpri(pcpri_t *);
 202 static int      fss_alloc(void **, int);
 203 static void     fss_free(void *);
 204 
 205 static int      fss_enterclass(kthread_t *, id_t, void *, cred_t *, void *);
 206 static void     fss_exitclass(void *);
 207 static int      fss_canexit(kthread_t *, cred_t *);
 208 static int      fss_fork(kthread_t *, kthread_t *, void *);
 209 static void     fss_forkret(kthread_t *, kthread_t *);
 210 static void     fss_parmsget(kthread_t *, void *);
 211 static int      fss_parmsset(kthread_t *, void *, id_t, cred_t *);
 212 static void     fss_stop(kthread_t *, int, int);
 213 static void     fss_exit(kthread_t *);
 214 static void     fss_active(kthread_t *);
 215 static void     fss_inactive(kthread_t *);
 216 static pri_t    fss_swapin(kthread_t *, int);
 217 static pri_t    fss_swapout(kthread_t *, int);
 218 static void     fss_trapret(kthread_t *);
 219 static void     fss_preempt(kthread_t *);
 220 static void     fss_setrun(kthread_t *);
 221 static void     fss_sleep(kthread_t *);
 222 static void     fss_tick(kthread_t *);
 223 static void     fss_wakeup(kthread_t *);
 224 static int      fss_donice(kthread_t *, cred_t *, int, int *);
 225 static int      fss_doprio(kthread_t *, cred_t *, int, int *);
 226 static pri_t    fss_globpri(kthread_t *);
 227 static void     fss_yield(kthread_t *);
 228 static void     fss_nullsys();
 229 
 230 static struct classfuncs fss_classfuncs = {
 231         /* class functions */
 232         fss_admin,
 233         fss_getclinfo,
 234         fss_parmsin,
 235         fss_parmsout,
 236         fss_vaparmsin,
 237         fss_vaparmsout,
 238         fss_getclpri,
 239         fss_alloc,
 240         fss_free,
 241 
 242         /* thread functions */
 243         fss_enterclass,
 244         fss_exitclass,
 245         fss_canexit,
 246         fss_fork,
 247         fss_forkret,
 248         fss_parmsget,
 249         fss_parmsset,
 250         fss_stop,
 251         fss_exit,
 252         fss_active,
 253         fss_inactive,
 254         fss_swapin,
 255         fss_swapout,
 256         fss_trapret,
 257         fss_preempt,
 258         fss_setrun,
 259         fss_sleep,
 260         fss_tick,
 261         fss_wakeup,
 262         fss_donice,
 263         fss_globpri,
 264         fss_nullsys,    /* set_process_group */
 265         fss_yield,
 266         fss_doprio,
 267 };
 268 
 269 int
 270 _init()
 271 {
 272         return (mod_install(&modlinkage));
 273 }
 274 
 275 int


1812          * calculate how much CPU time it used since it was charged last time.
1813          *
1814          * CPU caps are not enforced on exiting processes - it is usually
1815          * desirable to exit as soon as possible to free resources.
1816          */
1817         if (CPUCAPS_ON()) {
1818                 thread_lock(t);
1819                 fssproc = FSSPROC(t);
1820                 (void) cpucaps_charge(t, &fssproc->fss_caps,
1821                     CPUCAPS_CHARGE_ONLY);
1822                 thread_unlock(t);
1823         }
1824 }
1825 
1826 static void
1827 fss_nullsys()
1828 {
1829 }
1830 
1831 /*
1832  * fss_swapin() returns -1 if the thread is loaded or is not eligible to be
1833  * swapped in. Otherwise, it returns the thread's effective priority based
1834  * on swapout time and size of process (0 <= epri <= 0 SHRT_MAX).
1835  */
1836 /*ARGSUSED*/
1837 static pri_t
1838 fss_swapin(kthread_t *t, int flags)
1839 {
1840         fssproc_t *fssproc = FSSPROC(t);
1841         long epri = -1;
1842         proc_t *pp = ttoproc(t);
1843 
1844         ASSERT(THREAD_LOCK_HELD(t));
1845 
1846         if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
1847                 time_t swapout_time;
1848 
1849                 swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
1850                 if (INHERITED(t) || (fssproc->fss_flags & FSSKPRI)) {
1851                         epri = (long)DISP_PRIO(t) + swapout_time;
1852                 } else {
1853                         /*
1854                          * Threads which have been out for a long time,
1855                          * have high user mode priority and are associated
1856                          * with a small address space are more deserving.
1857                          */
1858                         epri = fssproc->fss_umdpri;
1859                         ASSERT(epri >= 0 && epri <= fss_maxumdpri);
1860                         epri += swapout_time - pp->p_swrss / nz(maxpgio)/2;
1861                 }
1862                 /*
1863                  * Scale epri so that SHRT_MAX / 2 represents zero priority.
1864                  */
1865                 epri += SHRT_MAX / 2;
1866                 if (epri < 0)
1867                         epri = 0;
1868                 else if (epri > SHRT_MAX)
1869                         epri = SHRT_MAX;
1870         }
1871         return ((pri_t)epri);
1872 }
1873 
1874 /*
1875  * fss_swapout() returns -1 if the thread isn't loaded or is not eligible to
1876  * be swapped out. Otherwise, it returns the thread's effective priority
1877  * based on if the swapper is in softswap or hardswap mode.
1878  */
1879 static pri_t
1880 fss_swapout(kthread_t *t, int flags)
1881 {
1882         fssproc_t *fssproc = FSSPROC(t);
1883         long epri = -1;
1884         proc_t *pp = ttoproc(t);
1885         time_t swapin_time;
1886 
1887         ASSERT(THREAD_LOCK_HELD(t));
1888 
1889         if (INHERITED(t) ||
1890             (fssproc->fss_flags & FSSKPRI) ||
1891             (t->t_proc_flag & TP_LWPEXIT) ||
1892             (t->t_state & (TS_ZOMB|TS_FREE|TS_STOPPED|TS_ONPROC|TS_WAIT)) ||
1893             !(t->t_schedflag & TS_LOAD) ||
1894             !(SWAP_OK(t)))
1895                 return (-1);
1896 
1897         ASSERT(t->t_state & (TS_SLEEP | TS_RUN));
1898 
1899         swapin_time = (ddi_get_lbolt() - t->t_stime) / hz;
1900 
1901         if (flags == SOFTSWAP) {
1902                 if (t->t_state == TS_SLEEP && swapin_time > maxslp) {
1903                         epri = 0;
1904                 } else {
1905                         return ((pri_t)epri);
1906                 }
1907         } else {
1908                 pri_t pri;
1909 
1910                 if ((t->t_state == TS_SLEEP && swapin_time > fss_minslp) ||
1911                     (t->t_state == TS_RUN && swapin_time > fss_minrun)) {
1912                         pri = fss_maxumdpri;
1913                         epri = swapin_time -
1914                             (rm_asrss(pp->p_as) / nz(maxpgio)/2) - (long)pri;
1915                 } else {
1916                         return ((pri_t)epri);
1917                 }
1918         }
1919 
1920         /*
1921          * Scale epri so that SHRT_MAX / 2 represents zero priority.
1922          */
1923         epri += SHRT_MAX / 2;
1924         if (epri < 0)
1925                 epri = 0;
1926         else if (epri > SHRT_MAX)
1927                 epri = SHRT_MAX;
1928 
1929         return ((pri_t)epri);
1930 }
1931 
1932 /*
1933  * If thread is currently at a kernel mode priority (has slept) and is
1934  * returning to the userland we assign it the appropriate user mode priority
1935  * and time quantum here.  If we're lowering the thread's priority below that
1936  * of other runnable threads then we will set runrun via cpu_surrender() to
1937  * cause preemption.
1938  */
1939 static void
1940 fss_trapret(kthread_t *t)
1941 {
1942         fssproc_t *fssproc = FSSPROC(t);
1943         cpu_t *cp = CPU;
1944 
1945         ASSERT(THREAD_LOCK_HELD(t));
1946         ASSERT(t == curthread);
1947         ASSERT(cp->cpu_dispthread == t);
1948         ASSERT(t->t_state == TS_ONPROC);
1949 
1950         t->t_kpri_req = 0;
1951         if (fssproc->fss_flags & FSSKPRI) {
1952                 /*
1953                  * If thread has blocked in the kernel
1954                  */
1955                 THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
1956                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1957                 ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
1958                 fssproc->fss_flags &= ~FSSKPRI;
1959 
1960                 if (DISP_MUST_SURRENDER(t))
1961                         cpu_surrender(t);
1962         }
1963 
1964         /*
1965          * Swapout lwp if the swapper is waiting for this thread to reach
1966          * a safe point.
1967          */
1968         if (t->t_schedflag & TS_SWAPENQ) {
1969                 thread_unlock(t);
1970                 swapout_lwp(ttolwp(t));
1971                 thread_lock(t);
1972         }
1973 }
1974 
1975 /*
1976  * Arrange for thread to be placed in appropriate location on dispatcher queue.
1977  * This is called with the current thread in TS_ONPROC and locked.
1978  */
1979 static void
1980 fss_preempt(kthread_t *t)
1981 {
1982         fssproc_t *fssproc = FSSPROC(t);
1983         klwp_t *lwp;
1984         uint_t flags;
1985 
1986         ASSERT(t == curthread);
1987         ASSERT(THREAD_LOCK_HELD(curthread));
1988         ASSERT(t->t_state == TS_ONPROC);
1989 
1990         /*
1991          * If preempted in the kernel, make sure the thread has a kernel
1992          * priority if needed.


1997                 THREAD_CHANGE_PRI(t, minclsyspri);
1998                 ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
1999                 t->t_trapret = 1;    /* so that fss_trapret will run */
2000                 aston(t);
2001         }
2002 
2003         /*
2004          * This thread may be placed on wait queue by CPU Caps. In this case we
2005          * do not need to do anything until it is removed from the wait queue.
2006          * Do not enforce CPU caps on threads running at a kernel priority
2007          */
2008         if (CPUCAPS_ON()) {
2009                 (void) cpucaps_charge(t, &fssproc->fss_caps,
2010                     CPUCAPS_CHARGE_ENFORCE);
2011 
2012                 if (!(fssproc->fss_flags & FSSKPRI) && CPUCAPS_ENFORCE(t))
2013                         return;
2014         }
2015 
2016         /*
2017          * If preempted in user-land mark the thread as swappable because it
2018          * cannot be holding any kernel locks.
2019          */
2020         ASSERT(t->t_schedflag & TS_DONT_SWAP);
2021         if (lwp != NULL && lwp->lwp_state == LWP_USER)
2022                 t->t_schedflag &= ~TS_DONT_SWAP;
2023 
2024         /*
2025          * Check to see if we're doing "preemption control" here.  If
2026          * we are, and if the user has requested that this thread not
2027          * be preempted, and if preemptions haven't been put off for
2028          * too long, let the preemption happen here but try to make
2029          * sure the thread is rescheduled as soon as possible.  We do
2030          * this by putting it on the front of the highest priority run
2031          * queue in the FSS class.  If the preemption has been put off
2032          * for too long, clear the "nopreempt" bit and let the thread
2033          * be preempted.
2034          */
2035         if (t->t_schedctl && schedctl_get_nopreempt(t)) {
2036                 if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
2037                         DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
2038                         if (!(fssproc->fss_flags & FSSKPRI)) {
2039                                 /*
2040                                  * If not already remembered, remember current
2041                                  * priority for restoration in fss_yield().
2042                                  */
2043                                 if (!(fssproc->fss_flags & FSSRESTORE)) {
2044                                         fssproc->fss_scpri = t->t_pri;
2045                                         fssproc->fss_flags |= FSSRESTORE;
2046                                 }
2047                                 THREAD_CHANGE_PRI(t, fss_maxumdpri);
2048                                 t->t_schedflag |= TS_DONT_SWAP;
2049                         }
2050                         schedctl_set_yield(t, 1);
2051                         setfrontdq(t);
2052                         return;
2053                 } else {
2054                         if (fssproc->fss_flags & FSSRESTORE) {
2055                                 THREAD_CHANGE_PRI(t, fssproc->fss_scpri);
2056                                 fssproc->fss_flags &= ~FSSRESTORE;
2057                         }
2058                         schedctl_set_nopreempt(t, 0);
2059                         DTRACE_SCHED1(schedctl__preempt, kthread_t *, t);
2060                         /*
2061                          * Fall through and be preempted below.
2062                          */
2063                 }
2064         }
2065 
2066         flags = fssproc->fss_flags & (FSSBACKQ | FSSKPRI);
2067 
2068         if (flags == FSSBACKQ) {


2134          * for trapret processing as the thread leaves the system call so it
2135          * will drop back to normal priority range.
2136          */
2137         if (t->t_kpri_req) {
2138                 THREAD_CHANGE_PRI(t, minclsyspri);
2139                 fssproc->fss_flags |= FSSKPRI;
2140                 t->t_trapret = 1;    /* so that fss_trapret will run */
2141                 aston(t);
2142         } else if (fssproc->fss_flags & FSSKPRI) {
2143                 /*
2144                  * The thread has done a THREAD_KPRI_REQUEST(), slept, then
2145                  * done THREAD_KPRI_RELEASE() (so no t_kpri_req is 0 again),
2146                  * then slept again all without finishing the current system
2147                  * call so trapret won't have cleared FSSKPRI
2148                  */
2149                 fssproc->fss_flags &= ~FSSKPRI;
2150                 THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2151                 if (DISP_MUST_SURRENDER(curthread))
2152                         cpu_surrender(t);
2153         }
2154         t->t_stime = ddi_get_lbolt();        /* time stamp for the swapper */
2155 }
2156 
2157 /*
2158  * A tick interrupt has ocurrend on a running thread. Check to see if our
2159  * time slice has expired.  We must also clear the TS_DONT_SWAP flag in
2160  * t_schedflag if the thread is eligible to be swapped out.
2161  */
2162 static void
2163 fss_tick(kthread_t *t)
2164 {
2165         fssproc_t *fssproc;
2166         fssproj_t *fssproj;
2167         klwp_t *lwp;
2168         boolean_t call_cpu_surrender = B_FALSE;
2169         boolean_t cpucaps_enforce = B_FALSE;
2170 
2171         ASSERT(MUTEX_HELD(&(ttoproc(t))->p_lock));
2172 
2173         /*
2174          * It's safe to access fsspset and fssproj structures because we're
2175          * holding our p_lock here.
2176          */
2177         thread_lock(t);
2178         fssproc = FSSPROC(t);
2179         fssproj = FSSPROC2FSSPROJ(fssproc);
2180         if (fssproj != NULL) {


2218                                         DTRACE_SCHED1(schedctl__nopreempt,
2219                                             kthread_t *, t);
2220                                         schedctl_set_yield(t, 1);
2221                                         thread_unlock_nopreempt(t);
2222                                         return;
2223                                 }
2224                         }
2225                         fssproc->fss_flags &= ~FSSRESTORE;
2226 
2227                         fss_newpri(fssproc);
2228                         new_pri = fssproc->fss_umdpri;
2229                         ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
2230 
2231                         /*
2232                          * When the priority of a thread is changed, it may
2233                          * be necessary to adjust its position on a sleep queue
2234                          * or dispatch queue. The function thread_change_pri
2235                          * accomplishes this.
2236                          */
2237                         if (thread_change_pri(t, new_pri, 0)) {
2238                                 if ((t->t_schedflag & TS_LOAD) &&
2239                                     (lwp = t->t_lwp) &&
2240                                     lwp->lwp_state == LWP_USER)
2241                                         t->t_schedflag &= ~TS_DONT_SWAP;
2242                                 fssproc->fss_timeleft = fss_quantum;
2243                         } else {
2244                                 call_cpu_surrender = B_TRUE;
2245                         }
2246                 } else if (t->t_state == TS_ONPROC &&
2247                     t->t_pri < t->t_disp_queue->disp_maxrunpri) {
2248                         /*
2249                          * If there is a higher-priority thread which is
2250                          * waiting for a processor, then thread surrenders
2251                          * the processor.
2252                          */
2253                         call_cpu_surrender = B_TRUE;
2254                 }
2255         }
2256 
2257         if (cpucaps_enforce && 2 * fssproc->fss_timeleft > fss_quantum) {
2258                 /*
2259                  * The thread used more than half of its quantum, so assume that
2260                  * it used the whole quantum.
2261                  *


2285         thread_unlock_nopreempt(t);     /* clock thread can't be preempted */
2286 }
2287 
2288 /*
2289  * Processes waking up go to the back of their queue.  We don't need to assign
2290  * a time quantum here because thread is still at a kernel mode priority and
2291  * the time slicing is not done for threads running in the kernel after
2292  * sleeping.  The proper time quantum will be assigned by fss_trapret before the
2293  * thread returns to user mode.
2294  */
2295 static void
2296 fss_wakeup(kthread_t *t)
2297 {
2298         fssproc_t *fssproc;
2299 
2300         ASSERT(THREAD_LOCK_HELD(t));
2301         ASSERT(t->t_state == TS_SLEEP);
2302 
2303         fss_active(t);
2304 
2305         t->t_stime = ddi_get_lbolt();                /* time stamp for the swapper */
2306         fssproc = FSSPROC(t);
2307         fssproc->fss_flags &= ~FSSBACKQ;
2308 
2309         if (fssproc->fss_flags & FSSKPRI) {
2310                 /*
2311                  * If we already have a kernel priority assigned, then we
2312                  * just use it.
2313                  */
2314                 setbackdq(t);
2315         } else if (t->t_kpri_req) {
2316                 /*
2317                  * Give thread a priority boost if we were asked.
2318                  */
2319                 fssproc->fss_flags |= FSSKPRI;
2320                 THREAD_CHANGE_PRI(t, minclsyspri);
2321                 setbackdq(t);
2322                 t->t_trapret = 1;    /* so that fss_trapret will run */
2323                 aston(t);
2324         } else {
2325                 /*




 196 static int      fss_getclinfo(void *);
 197 static int      fss_parmsin(void *);
 198 static int      fss_parmsout(void *, pc_vaparms_t *);
 199 static int      fss_vaparmsin(void *, pc_vaparms_t *);
 200 static int      fss_vaparmsout(void *, pc_vaparms_t *);
 201 static int      fss_getclpri(pcpri_t *);
 202 static int      fss_alloc(void **, int);
 203 static void     fss_free(void *);
 204 
 205 static int      fss_enterclass(kthread_t *, id_t, void *, cred_t *, void *);
 206 static void     fss_exitclass(void *);
 207 static int      fss_canexit(kthread_t *, cred_t *);
 208 static int      fss_fork(kthread_t *, kthread_t *, void *);
 209 static void     fss_forkret(kthread_t *, kthread_t *);
 210 static void     fss_parmsget(kthread_t *, void *);
 211 static int      fss_parmsset(kthread_t *, void *, id_t, cred_t *);
 212 static void     fss_stop(kthread_t *, int, int);
 213 static void     fss_exit(kthread_t *);
 214 static void     fss_active(kthread_t *);
 215 static void     fss_inactive(kthread_t *);


 216 static void     fss_trapret(kthread_t *);
 217 static void     fss_preempt(kthread_t *);
 218 static void     fss_setrun(kthread_t *);
 219 static void     fss_sleep(kthread_t *);
 220 static void     fss_tick(kthread_t *);
 221 static void     fss_wakeup(kthread_t *);
 222 static int      fss_donice(kthread_t *, cred_t *, int, int *);
 223 static int      fss_doprio(kthread_t *, cred_t *, int, int *);
 224 static pri_t    fss_globpri(kthread_t *);
 225 static void     fss_yield(kthread_t *);
 226 static void     fss_nullsys();
 227 
 228 static struct classfuncs fss_classfuncs = {
 229         /* class functions */
 230         fss_admin,
 231         fss_getclinfo,
 232         fss_parmsin,
 233         fss_parmsout,
 234         fss_vaparmsin,
 235         fss_vaparmsout,
 236         fss_getclpri,
 237         fss_alloc,
 238         fss_free,
 239 
 240         /* thread functions */
 241         fss_enterclass,
 242         fss_exitclass,
 243         fss_canexit,
 244         fss_fork,
 245         fss_forkret,
 246         fss_parmsget,
 247         fss_parmsset,
 248         fss_stop,
 249         fss_exit,
 250         fss_active,
 251         fss_inactive,


 252         fss_trapret,
 253         fss_preempt,
 254         fss_setrun,
 255         fss_sleep,
 256         fss_tick,
 257         fss_wakeup,
 258         fss_donice,
 259         fss_globpri,
 260         fss_nullsys,    /* set_process_group */
 261         fss_yield,
 262         fss_doprio,
 263 };
 264 
 265 int
 266 _init()
 267 {
 268         return (mod_install(&modlinkage));
 269 }
 270 
 271 int


1808          * calculate how much CPU time it used since it was charged last time.
1809          *
1810          * CPU caps are not enforced on exiting processes - it is usually
1811          * desirable to exit as soon as possible to free resources.
1812          */
1813         if (CPUCAPS_ON()) {
1814                 thread_lock(t);
1815                 fssproc = FSSPROC(t);
1816                 (void) cpucaps_charge(t, &fssproc->fss_caps,
1817                     CPUCAPS_CHARGE_ONLY);
1818                 thread_unlock(t);
1819         }
1820 }
1821 
1822 static void
1823 fss_nullsys()
1824 {
1825 }
1826 
1827 /*





































































































1828  * If thread is currently at a kernel mode priority (has slept) and is
1829  * returning to the userland we assign it the appropriate user mode priority
1830  * and time quantum here.  If we're lowering the thread's priority below that
1831  * of other runnable threads then we will set runrun via cpu_surrender() to
1832  * cause preemption.
1833  */
1834 static void
1835 fss_trapret(kthread_t *t)
1836 {
1837         fssproc_t *fssproc = FSSPROC(t);
1838         cpu_t *cp = CPU;
1839 
1840         ASSERT(THREAD_LOCK_HELD(t));
1841         ASSERT(t == curthread);
1842         ASSERT(cp->cpu_dispthread == t);
1843         ASSERT(t->t_state == TS_ONPROC);
1844 
1845         t->t_kpri_req = 0;
1846         if (fssproc->fss_flags & FSSKPRI) {
1847                 /*
1848                  * If thread has blocked in the kernel
1849                  */
1850                 THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
1851                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1852                 ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
1853                 fssproc->fss_flags &= ~FSSKPRI;
1854 
1855                 if (DISP_MUST_SURRENDER(t))
1856                         cpu_surrender(t);
1857         }










1858 }
1859 
1860 /*
1861  * Arrange for thread to be placed in appropriate location on dispatcher queue.
1862  * This is called with the current thread in TS_ONPROC and locked.
1863  */
1864 static void
1865 fss_preempt(kthread_t *t)
1866 {
1867         fssproc_t *fssproc = FSSPROC(t);
1868         klwp_t *lwp;
1869         uint_t flags;
1870 
1871         ASSERT(t == curthread);
1872         ASSERT(THREAD_LOCK_HELD(curthread));
1873         ASSERT(t->t_state == TS_ONPROC);
1874 
1875         /*
1876          * If preempted in the kernel, make sure the thread has a kernel
1877          * priority if needed.


1882                 THREAD_CHANGE_PRI(t, minclsyspri);
1883                 ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
1884                 t->t_trapret = 1;    /* so that fss_trapret will run */
1885                 aston(t);
1886         }
1887 
1888         /*
1889          * This thread may be placed on wait queue by CPU Caps. In this case we
1890          * do not need to do anything until it is removed from the wait queue.
1891          * Do not enforce CPU caps on threads running at a kernel priority
1892          */
1893         if (CPUCAPS_ON()) {
1894                 (void) cpucaps_charge(t, &fssproc->fss_caps,
1895                     CPUCAPS_CHARGE_ENFORCE);
1896 
1897                 if (!(fssproc->fss_flags & FSSKPRI) && CPUCAPS_ENFORCE(t))
1898                         return;
1899         }
1900 
1901         /*








1902          * Check to see if we're doing "preemption control" here.  If
1903          * we are, and if the user has requested that this thread not
1904          * be preempted, and if preemptions haven't been put off for
1905          * too long, let the preemption happen here but try to make
1906          * sure the thread is rescheduled as soon as possible.  We do
1907          * this by putting it on the front of the highest priority run
1908          * queue in the FSS class.  If the preemption has been put off
1909          * for too long, clear the "nopreempt" bit and let the thread
1910          * be preempted.
1911          */
1912         if (t->t_schedctl && schedctl_get_nopreempt(t)) {
1913                 if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
1914                         DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
1915                         if (!(fssproc->fss_flags & FSSKPRI)) {
1916                                 /*
1917                                  * If not already remembered, remember current
1918                                  * priority for restoration in fss_yield().
1919                                  */
1920                                 if (!(fssproc->fss_flags & FSSRESTORE)) {
1921                                         fssproc->fss_scpri = t->t_pri;
1922                                         fssproc->fss_flags |= FSSRESTORE;
1923                                 }
1924                                 THREAD_CHANGE_PRI(t, fss_maxumdpri);

1925                         }
1926                         schedctl_set_yield(t, 1);
1927                         setfrontdq(t);
1928                         return;
1929                 } else {
1930                         if (fssproc->fss_flags & FSSRESTORE) {
1931                                 THREAD_CHANGE_PRI(t, fssproc->fss_scpri);
1932                                 fssproc->fss_flags &= ~FSSRESTORE;
1933                         }
1934                         schedctl_set_nopreempt(t, 0);
1935                         DTRACE_SCHED1(schedctl__preempt, kthread_t *, t);
1936                         /*
1937                          * Fall through and be preempted below.
1938                          */
1939                 }
1940         }
1941 
1942         flags = fssproc->fss_flags & (FSSBACKQ | FSSKPRI);
1943 
1944         if (flags == FSSBACKQ) {


2010          * for trapret processing as the thread leaves the system call so it
2011          * will drop back to normal priority range.
2012          */
2013         if (t->t_kpri_req) {
2014                 THREAD_CHANGE_PRI(t, minclsyspri);
2015                 fssproc->fss_flags |= FSSKPRI;
2016                 t->t_trapret = 1;    /* so that fss_trapret will run */
2017                 aston(t);
2018         } else if (fssproc->fss_flags & FSSKPRI) {
2019                 /*
2020                  * The thread has done a THREAD_KPRI_REQUEST(), slept, then
2021                  * done THREAD_KPRI_RELEASE() (so no t_kpri_req is 0 again),
2022                  * then slept again all without finishing the current system
2023                  * call so trapret won't have cleared FSSKPRI
2024                  */
2025                 fssproc->fss_flags &= ~FSSKPRI;
2026                 THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2027                 if (DISP_MUST_SURRENDER(curthread))
2028                         cpu_surrender(t);
2029         }

2030 }
2031 
2032 /*
2033  * A tick interrupt has ocurrend on a running thread. Check to see if our
2034  * time slice has expired.

2035  */
2036 static void
2037 fss_tick(kthread_t *t)
2038 {
2039         fssproc_t *fssproc;
2040         fssproj_t *fssproj;
2041         klwp_t *lwp;
2042         boolean_t call_cpu_surrender = B_FALSE;
2043         boolean_t cpucaps_enforce = B_FALSE;
2044 
2045         ASSERT(MUTEX_HELD(&(ttoproc(t))->p_lock));
2046 
2047         /*
2048          * It's safe to access fsspset and fssproj structures because we're
2049          * holding our p_lock here.
2050          */
2051         thread_lock(t);
2052         fssproc = FSSPROC(t);
2053         fssproj = FSSPROC2FSSPROJ(fssproc);
2054         if (fssproj != NULL) {


2092                                         DTRACE_SCHED1(schedctl__nopreempt,
2093                                             kthread_t *, t);
2094                                         schedctl_set_yield(t, 1);
2095                                         thread_unlock_nopreempt(t);
2096                                         return;
2097                                 }
2098                         }
2099                         fssproc->fss_flags &= ~FSSRESTORE;
2100 
2101                         fss_newpri(fssproc);
2102                         new_pri = fssproc->fss_umdpri;
2103                         ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
2104 
2105                         /*
2106                          * When the priority of a thread is changed, it may
2107                          * be necessary to adjust its position on a sleep queue
2108                          * or dispatch queue. The function thread_change_pri
2109                          * accomplishes this.
2110                          */
2111                         if (thread_change_pri(t, new_pri, 0)) {




2112                                 fssproc->fss_timeleft = fss_quantum;
2113                         } else {
2114                                 call_cpu_surrender = B_TRUE;
2115                         }
2116                 } else if (t->t_state == TS_ONPROC &&
2117                     t->t_pri < t->t_disp_queue->disp_maxrunpri) {
2118                         /*
2119                          * If there is a higher-priority thread which is
2120                          * waiting for a processor, then thread surrenders
2121                          * the processor.
2122                          */
2123                         call_cpu_surrender = B_TRUE;
2124                 }
2125         }
2126 
2127         if (cpucaps_enforce && 2 * fssproc->fss_timeleft > fss_quantum) {
2128                 /*
2129                  * The thread used more than half of its quantum, so assume that
2130                  * it used the whole quantum.
2131                  *


2155         thread_unlock_nopreempt(t);     /* clock thread can't be preempted */
2156 }
2157 
2158 /*
2159  * Processes waking up go to the back of their queue.  We don't need to assign
2160  * a time quantum here because thread is still at a kernel mode priority and
2161  * the time slicing is not done for threads running in the kernel after
2162  * sleeping.  The proper time quantum will be assigned by fss_trapret before the
2163  * thread returns to user mode.
2164  */
2165 static void
2166 fss_wakeup(kthread_t *t)
2167 {
2168         fssproc_t *fssproc;
2169 
2170         ASSERT(THREAD_LOCK_HELD(t));
2171         ASSERT(t->t_state == TS_SLEEP);
2172 
2173         fss_active(t);
2174 

2175         fssproc = FSSPROC(t);
2176         fssproc->fss_flags &= ~FSSBACKQ;
2177 
2178         if (fssproc->fss_flags & FSSKPRI) {
2179                 /*
2180                  * If we already have a kernel priority assigned, then we
2181                  * just use it.
2182                  */
2183                 setbackdq(t);
2184         } else if (t->t_kpri_req) {
2185                 /*
2186                  * Give thread a priority boost if we were asked.
2187                  */
2188                 fssproc->fss_flags |= FSSKPRI;
2189                 THREAD_CHANGE_PRI(t, minclsyspri);
2190                 setbackdq(t);
2191                 t->t_trapret = 1;    /* so that fss_trapret will run */
2192                 aston(t);
2193         } else {
2194                 /*