Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/task.c
+++ new/usr/src/uts/common/os/task.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/atomic.h>
26 26 #include <sys/callb.h>
27 27 #include <sys/cmn_err.h>
28 28 #include <sys/exacct.h>
29 29 #include <sys/id_space.h>
30 30 #include <sys/kmem.h>
31 31 #include <sys/kstat.h>
32 32 #include <sys/modhash.h>
33 33 #include <sys/mutex.h>
34 34 #include <sys/proc.h>
35 35 #include <sys/project.h>
36 36 #include <sys/rctl.h>
37 37 #include <sys/systm.h>
38 38 #include <sys/task.h>
39 39 #include <sys/time.h>
40 40 #include <sys/types.h>
41 41 #include <sys/zone.h>
42 42 #include <sys/cpuvar.h>
43 43 #include <sys/fss.h>
44 44 #include <sys/class.h>
45 45 #include <sys/project.h>
46 46
47 47 /*
48 48 * Tasks
49 49 *
50 50 * A task is a collection of processes, associated with a common project ID
51 51 * and related by a common initial parent. The task primarily represents a
52 52 * natural process sequence with known resource usage, although it can also be
53 53 * viewed as a convenient grouping of processes for signal delivery, processor
54 54 * binding, and administrative operations.
55 55 *
56 56 * Membership and observership
57 57 * We can conceive of situations where processes outside of the task may wish
58 58 * to examine the resource usage of the task. Similarly, a number of the
59 59 * administrative operations on a task can be performed by processes who are
60 60 * not members of the task. Accordingly, we must design a locking strategy
61 61 * where observers of the task, who wish to examine or operate on the task,
62 62 * and members of task, who can perform the mentioned operations, as well as
63 63 * leave the task, see a consistent and correct representation of the task at
64 64 * all times.
65 65 *
66 66 * Locking
67 67 * Because the task membership is a new relation between processes, its
68 68 * locking becomes an additional responsibility of the pidlock/p_lock locking
69 69 * sequence; however, tasks closely resemble sessions and the session locking
70 70 * model is mostly appropriate for the interaction of tasks, processes, and
71 71 * procfs.
72 72 *
73 73 * kmutex_t task_hash_lock
74 74 * task_hash_lock is a global lock protecting the contents of the task
75 75 * ID-to-task pointer hash. Holders of task_hash_lock must not attempt to
76 76 * acquire pidlock or p_lock.
77 77 * uint_t tk_hold_count
78 78 * tk_hold_count, the number of members and observers of the current task,
79 79 * must be manipulated atomically.
80 80 * proc_t *tk_memb_list
81 81 * proc_t *p_tasknext
82 82 * proc_t *p_taskprev
83 83 * The task's membership list is protected by pidlock, and is therefore
84 84 * always acquired before any of its members' p_lock mutexes. The p_task
85 85 * member of the proc structure is protected by pidlock or p_lock for
86 86 * reading, and by both pidlock and p_lock for modification, as is done for
87 87 * p_sessp. The key point is that only the process can modify its p_task,
88 88 * and not any entity on the system. (/proc will use prlock() to prevent
89 89 * the process from leaving, as opposed to pidlock.)
90 90 * kmutex_t tk_usage_lock
91 91 * tk_usage_lock is a per-task lock protecting the contents of the task
92 92 * usage structure and tk_nlwps counter for the task.max-lwps resource
93 93 * control.
94 94 */
95 95
96 96 int task_hash_size = 256;
97 97 static kmutex_t task_hash_lock;
98 98 static mod_hash_t *task_hash;
99 99
100 100 static id_space_t *taskid_space; /* global taskid space */
101 101 static kmem_cache_t *task_cache; /* kmem cache for task structures */
102 102
103 103 rctl_hndl_t rc_task_lwps;
104 104 rctl_hndl_t rc_task_nprocs;
105 105 rctl_hndl_t rc_task_cpu_time;
106 106
107 107 /*
108 108 * Resource usage is committed using task queues; if taskq_dispatch() fails
109 109 * due to resource constraints, the task is placed on a list for background
110 110 * processing by the task_commit_thread() backup thread.
111 111 */
112 112 static kmutex_t task_commit_lock; /* protects list pointers and cv */
113 113 static kcondvar_t task_commit_cv; /* wakeup task_commit_thread */
114 114 static task_t *task_commit_head = NULL;
115 115 static task_t *task_commit_tail = NULL;
116 116 kthread_t *task_commit_thread;
117 117
118 118 static void task_commit();
119 119 static kstat_t *task_kstat_create(task_t *, zone_t *);
120 120 static void task_kstat_delete(task_t *);
121 121
122 122 /*
123 123 * static rctl_qty_t task_usage_lwps(void *taskp)
124 124 *
125 125 * Overview
126 126 * task_usage_lwps() is the usage operation for the resource control
127 127 * associated with the number of LWPs in a task.
128 128 *
129 129 * Return values
130 130 * The number of LWPs in the given task is returned.
131 131 *
132 132 * Caller's context
133 133 * The p->p_lock must be held across the call.
134 134 */
135 135 /*ARGSUSED*/
136 136 static rctl_qty_t
137 137 task_lwps_usage(rctl_t *r, proc_t *p)
138 138 {
139 139 task_t *t;
140 140 rctl_qty_t nlwps;
141 141
142 142 ASSERT(MUTEX_HELD(&p->p_lock));
143 143
144 144 t = p->p_task;
145 145 mutex_enter(&p->p_zone->zone_nlwps_lock);
146 146 nlwps = t->tk_nlwps;
147 147 mutex_exit(&p->p_zone->zone_nlwps_lock);
148 148
149 149 return (nlwps);
150 150 }
151 151
152 152 /*
153 153 * static int task_test_lwps(void *taskp, rctl_val_t *, int64_t incr,
154 154 * int flags)
155 155 *
156 156 * Overview
157 157 * task_test_lwps() is the test-if-valid-increment for the resource control
158 158 * for the number of processes in a task.
159 159 *
160 160 * Return values
161 161 * 0 if the threshold limit was not passed, 1 if the limit was passed.
162 162 *
163 163 * Caller's context
164 164 * p->p_lock must be held across the call.
165 165 */
166 166 /*ARGSUSED*/
167 167 static int
168 168 task_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
169 169 rctl_qty_t incr,
170 170 uint_t flags)
171 171 {
172 172 rctl_qty_t nlwps;
173 173
174 174 ASSERT(MUTEX_HELD(&p->p_lock));
175 175 ASSERT(e->rcep_t == RCENTITY_TASK);
176 176 if (e->rcep_p.task == NULL)
177 177 return (0);
178 178
179 179 ASSERT(MUTEX_HELD(&(e->rcep_p.task->tk_zone->zone_nlwps_lock)));
180 180 nlwps = e->rcep_p.task->tk_nlwps;
181 181
182 182 if (nlwps + incr > rcntl->rcv_value)
183 183 return (1);
184 184
185 185 return (0);
186 186 }
187 187
188 188 /*ARGSUSED*/
189 189 static int
190 190 task_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv) {
191 191
192 192 ASSERT(MUTEX_HELD(&p->p_lock));
193 193 ASSERT(e->rcep_t == RCENTITY_TASK);
194 194 if (e->rcep_p.task == NULL)
195 195 return (0);
196 196
197 197 e->rcep_p.task->tk_nlwps_ctl = nv;
198 198 return (0);
199 199 }
200 200
201 201 /*ARGSUSED*/
202 202 static rctl_qty_t
203 203 task_nprocs_usage(rctl_t *r, proc_t *p)
204 204 {
205 205 task_t *t;
206 206 rctl_qty_t nprocs;
207 207
208 208 ASSERT(MUTEX_HELD(&p->p_lock));
209 209
210 210 t = p->p_task;
211 211 mutex_enter(&p->p_zone->zone_nlwps_lock);
212 212 nprocs = t->tk_nprocs;
213 213 mutex_exit(&p->p_zone->zone_nlwps_lock);
214 214
215 215 return (nprocs);
216 216 }
217 217
218 218 /*ARGSUSED*/
219 219 static int
220 220 task_nprocs_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
221 221 rctl_qty_t incr, uint_t flags)
222 222 {
223 223 rctl_qty_t nprocs;
224 224
225 225 ASSERT(MUTEX_HELD(&p->p_lock));
226 226 ASSERT(e->rcep_t == RCENTITY_TASK);
227 227 if (e->rcep_p.task == NULL)
228 228 return (0);
229 229
230 230 ASSERT(MUTEX_HELD(&(e->rcep_p.task->tk_zone->zone_nlwps_lock)));
231 231 nprocs = e->rcep_p.task->tk_nprocs;
232 232
233 233 if (nprocs + incr > rcntl->rcv_value)
234 234 return (1);
235 235
236 236 return (0);
237 237 }
238 238
239 239 /*ARGSUSED*/
240 240 static int
241 241 task_nprocs_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
242 242 rctl_qty_t nv) {
243 243
244 244 ASSERT(MUTEX_HELD(&p->p_lock));
245 245 ASSERT(e->rcep_t == RCENTITY_TASK);
246 246 if (e->rcep_p.task == NULL)
247 247 return (0);
248 248
249 249 e->rcep_p.task->tk_nprocs_ctl = nv;
250 250 return (0);
251 251 }
252 252
253 253 /*
254 254 * static rctl_qty_t task_usage_cpu_secs(void *taskp)
255 255 *
256 256 * Overview
257 257 * task_usage_cpu_secs() is the usage operation for the resource control
258 258 * associated with the total accrued CPU seconds for a task.
259 259 *
260 260 * Return values
261 261 * The number of CPU seconds consumed by the task is returned.
262 262 *
263 263 * Caller's context
264 264 * The given task must be held across the call.
265 265 */
266 266 /*ARGSUSED*/
267 267 static rctl_qty_t
268 268 task_cpu_time_usage(rctl_t *r, proc_t *p)
269 269 {
270 270 task_t *t = p->p_task;
271 271
272 272 ASSERT(MUTEX_HELD(&p->p_lock));
273 273 return (t->tk_cpu_time);
274 274 }
275 275
276 276 /*
277 277 * int task_cpu_time_incr(task_t *t, rctl_qty_t incr)
278 278 *
279 279 * Overview
280 280 * task_cpu_time_incr() increments the amount of CPU time used
281 281 * by this task.
282 282 *
283 283 * Return values
284 284 * 1 if a second or more time is accumulated
285 285 * 0 otherwise
286 286 *
287 287 * Caller's context
288 288 * This is called by the clock tick accounting function to charge
289 289 * CPU time to a task.
290 290 */
291 291 rctl_qty_t
292 292 task_cpu_time_incr(task_t *t, rctl_qty_t incr)
293 293 {
294 294 rctl_qty_t ret = 0;
295 295
296 296 mutex_enter(&t->tk_cpu_time_lock);
297 297 t->tk_cpu_ticks += incr;
298 298 if (t->tk_cpu_ticks >= hz) {
299 299 t->tk_cpu_time += t->tk_cpu_ticks / hz;
300 300 t->tk_cpu_ticks = t->tk_cpu_ticks % hz;
301 301 ret = t->tk_cpu_time;
302 302 }
303 303 mutex_exit(&t->tk_cpu_time_lock);
304 304
305 305 return (ret);
306 306 }
307 307
308 308 /*
309 309 * static int task_test_cpu_secs(void *taskp, rctl_val_t *, int64_t incr,
310 310 * int flags)
311 311 *
312 312 * Overview
313 313 * task_test_cpu_secs() is the test-if-valid-increment for the resource
314 314 * control for the total accrued CPU seconds for a task.
315 315 *
316 316 * Return values
317 317 * 0 if the threshold limit was not passed, 1 if the limit was passed.
318 318 *
319 319 * Caller's context
320 320 * The given task must be held across the call.
321 321 */
322 322 /*ARGSUSED*/
323 323 static int
324 324 task_cpu_time_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
325 325 struct rctl_val *rcntl, rctl_qty_t incr, uint_t flags)
326 326 {
327 327 ASSERT(MUTEX_HELD(&p->p_lock));
328 328 ASSERT(e->rcep_t == RCENTITY_TASK);
329 329 if (e->rcep_p.task == NULL)
330 330 return (0);
331 331
332 332 if (incr >= rcntl->rcv_value)
333 333 return (1);
334 334
335 335 return (0);
336 336 }
337 337
338 338 static task_t *
339 339 task_find(taskid_t id, zoneid_t zoneid)
340 340 {
341 341 task_t *tk;
342 342
343 343 ASSERT(MUTEX_HELD(&task_hash_lock));
344 344
345 345 if (mod_hash_find(task_hash, (mod_hash_key_t)(uintptr_t)id,
346 346 (mod_hash_val_t *)&tk) == MH_ERR_NOTFOUND ||
347 347 (zoneid != ALL_ZONES && zoneid != tk->tk_zone->zone_id))
348 348 return (NULL);
349 349
350 350 return (tk);
351 351 }
352 352
353 353 /*
354 354 * task_hold_by_id(), task_hold_by_id_zone()
355 355 *
356 356 * Overview
357 357 * task_hold_by_id() is used to take a reference on a task by its task id,
358 358 * supporting the various system call interfaces for obtaining resource data,
359 359 * delivering signals, and so forth.
360 360 *
361 361 * Return values
362 362 * Returns a pointer to the task_t with taskid_t id. The task is returned
363 363 * with its hold count incremented by one. Returns NULL if there
364 364 * is no task with the requested id.
365 365 *
↓ open down ↓ |
365 lines elided |
↑ open up ↑ |
366 366 * Caller's context
367 367 * Caller must not be holding task_hash_lock. No restrictions on context.
368 368 */
369 369 task_t *
370 370 task_hold_by_id_zone(taskid_t id, zoneid_t zoneid)
371 371 {
372 372 task_t *tk;
373 373
374 374 mutex_enter(&task_hash_lock);
375 375 if ((tk = task_find(id, zoneid)) != NULL)
376 - atomic_add_32(&tk->tk_hold_count, 1);
376 + atomic_inc_32(&tk->tk_hold_count);
377 377 mutex_exit(&task_hash_lock);
378 378
379 379 return (tk);
380 380 }
381 381
382 382 task_t *
383 383 task_hold_by_id(taskid_t id)
384 384 {
385 385 zoneid_t zoneid;
386 386
387 387 if (INGLOBALZONE(curproc))
388 388 zoneid = ALL_ZONES;
389 389 else
390 390 zoneid = getzoneid();
391 391 return (task_hold_by_id_zone(id, zoneid));
392 392 }
393 393
394 394 /*
395 395 * void task_hold(task_t *)
396 396 *
397 397 * Overview
398 398 * task_hold() is used to take an additional reference to the given task.
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
399 399 *
400 400 * Return values
401 401 * None.
402 402 *
403 403 * Caller's context
404 404 * No restriction on context.
405 405 */
406 406 void
407 407 task_hold(task_t *tk)
408 408 {
409 - atomic_add_32(&tk->tk_hold_count, 1);
409 + atomic_inc_32(&tk->tk_hold_count);
410 410 }
411 411
412 412 /*
413 413 * void task_rele(task_t *)
414 414 *
415 415 * Overview
416 416 * task_rele() relinquishes a reference on the given task, which was acquired
417 417 * via task_hold() or task_hold_by_id(). If this is the last member or
418 418 * observer of the task, dispatch it for commitment via the accounting
419 419 * subsystem.
420 420 *
421 421 * Return values
422 422 * None.
423 423 *
424 424 * Caller's context
425 425 * Caller must not be holding the task_hash_lock.
426 426 */
427 427 void
428 428 task_rele(task_t *tk)
429 429 {
430 430 mutex_enter(&task_hash_lock);
431 431 if (atomic_add_32_nv(&tk->tk_hold_count, -1) > 0) {
432 432 mutex_exit(&task_hash_lock);
433 433 return;
434 434 }
435 435
436 436 ASSERT(tk->tk_nprocs == 0);
437 437
438 438 mutex_enter(&tk->tk_zone->zone_nlwps_lock);
439 439 tk->tk_proj->kpj_ntasks--;
440 440 mutex_exit(&tk->tk_zone->zone_nlwps_lock);
441 441
442 442 task_kstat_delete(tk);
443 443
444 444 if (mod_hash_destroy(task_hash,
445 445 (mod_hash_key_t)(uintptr_t)tk->tk_tkid) != 0)
446 446 panic("unable to delete task %d", tk->tk_tkid);
447 447 mutex_exit(&task_hash_lock);
448 448
449 449 /*
450 450 * At this point, there are no members or observers of the task, so we
451 451 * can safely send it on for commitment to the accounting subsystem.
452 452 * The task will be destroyed in task_end() subsequent to commitment.
453 453 * Since we may be called with pidlock held, taskq_dispatch() cannot
454 454 * sleep. Commitment is handled by a backup thread in case dispatching
455 455 * the task fails.
456 456 */
457 457 if (taskq_dispatch(exacct_queue, exacct_commit_task, tk,
458 458 TQ_NOSLEEP | TQ_NOQUEUE) == NULL) {
459 459 mutex_enter(&task_commit_lock);
460 460 if (task_commit_head == NULL) {
461 461 task_commit_head = task_commit_tail = tk;
462 462 } else {
463 463 task_commit_tail->tk_commit_next = tk;
464 464 task_commit_tail = tk;
465 465 }
466 466 cv_signal(&task_commit_cv);
467 467 mutex_exit(&task_commit_lock);
468 468 }
469 469 }
470 470
471 471 /*
472 472 * task_t *task_create(projid_t, zone *)
473 473 *
474 474 * Overview
475 475 * A process constructing a new task calls task_create() to construct and
476 476 * preinitialize the task for the appropriate destination project. Only one
477 477 * task, the primordial task0, is not created with task_create().
478 478 *
479 479 * Return values
480 480 * None.
481 481 *
482 482 * Caller's context
483 483 * Caller's context should be safe for KM_SLEEP allocations.
484 484 * The caller should appropriately bump the kpj_ntasks counter on the
485 485 * project that contains this task.
486 486 */
487 487 task_t *
488 488 task_create(projid_t projid, zone_t *zone)
489 489 {
490 490 task_t *tk = kmem_cache_alloc(task_cache, KM_SLEEP);
491 491 task_t *ancestor_tk;
492 492 taskid_t tkid;
493 493 task_usage_t *tu = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
494 494 mod_hash_hndl_t hndl;
495 495 rctl_set_t *set = rctl_set_create();
496 496 rctl_alloc_gp_t *gp;
497 497 rctl_entity_p_t e;
498 498
499 499 bzero(tk, sizeof (task_t));
500 500
501 501 tk->tk_tkid = tkid = id_alloc(taskid_space);
502 502 tk->tk_nlwps = 0;
503 503 tk->tk_nlwps_ctl = INT_MAX;
504 504 tk->tk_nprocs = 0;
505 505 tk->tk_nprocs_ctl = INT_MAX;
506 506 tk->tk_usage = tu;
507 507 tk->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
508 508 tk->tk_proj = project_hold_by_id(projid, zone, PROJECT_HOLD_INSERT);
509 509 tk->tk_flags = TASK_NORMAL;
510 510 tk->tk_commit_next = NULL;
511 511
512 512 /*
513 513 * Copy ancestor task's resource controls.
514 514 */
515 515 zone_task_hold(zone);
516 516 mutex_enter(&curproc->p_lock);
517 517 ancestor_tk = curproc->p_task;
518 518 task_hold(ancestor_tk);
519 519 tk->tk_zone = zone;
520 520 mutex_exit(&curproc->p_lock);
521 521
522 522 for (;;) {
523 523 gp = rctl_set_dup_prealloc(ancestor_tk->tk_rctls);
524 524
525 525 mutex_enter(&ancestor_tk->tk_rctls->rcs_lock);
526 526 if (rctl_set_dup_ready(ancestor_tk->tk_rctls, gp))
527 527 break;
528 528
529 529 mutex_exit(&ancestor_tk->tk_rctls->rcs_lock);
530 530
531 531 rctl_prealloc_destroy(gp);
532 532 }
533 533
534 534 /*
535 535 * At this point, curproc does not have the appropriate linkage
536 536 * through the task to the project. So, rctl_set_dup should only
537 537 * copy the rctls, and leave the callbacks for later.
538 538 */
539 539 e.rcep_p.task = tk;
540 540 e.rcep_t = RCENTITY_TASK;
541 541 tk->tk_rctls = rctl_set_dup(ancestor_tk->tk_rctls, curproc, curproc, &e,
542 542 set, gp, RCD_DUP);
543 543 mutex_exit(&ancestor_tk->tk_rctls->rcs_lock);
544 544
545 545 rctl_prealloc_destroy(gp);
546 546
547 547 /*
548 548 * Record the ancestor task's ID for use by extended accounting.
549 549 */
550 550 tu->tu_anctaskid = ancestor_tk->tk_tkid;
551 551 task_rele(ancestor_tk);
552 552
553 553 /*
554 554 * Put new task structure in the hash table.
555 555 */
556 556 (void) mod_hash_reserve(task_hash, &hndl);
557 557 mutex_enter(&task_hash_lock);
558 558 ASSERT(task_find(tkid, zone->zone_id) == NULL);
559 559 if (mod_hash_insert_reserve(task_hash, (mod_hash_key_t)(uintptr_t)tkid,
560 560 (mod_hash_val_t *)tk, hndl) != 0) {
561 561 mod_hash_cancel(task_hash, &hndl);
562 562 panic("unable to insert task %d(%p)", tkid, (void *)tk);
563 563 }
564 564 mutex_exit(&task_hash_lock);
565 565
566 566 tk->tk_nprocs_kstat = task_kstat_create(tk, zone);
567 567 return (tk);
568 568 }
569 569
570 570 /*
571 571 * void task_attach(task_t *, proc_t *)
572 572 *
573 573 * Overview
574 574 * task_attach() is used to attach a process to a task; this operation is only
575 575 * performed as a result of a fork() or settaskid() system call. The proc_t's
576 576 * p_tasknext and p_taskprev fields will be set such that the proc_t is a
577 577 * member of the doubly-linked list of proc_t's that make up the task.
578 578 *
579 579 * Return values
580 580 * None.
581 581 *
582 582 * Caller's context
583 583 * pidlock and p->p_lock must be held on entry.
584 584 */
585 585 void
586 586 task_attach(task_t *tk, proc_t *p)
587 587 {
588 588 proc_t *first, *prev;
589 589 ASSERT(tk != NULL);
590 590 ASSERT(p != NULL);
591 591 ASSERT(MUTEX_HELD(&pidlock));
592 592 ASSERT(MUTEX_HELD(&p->p_lock));
593 593
594 594 if (tk->tk_memb_list == NULL) {
595 595 p->p_tasknext = p;
596 596 p->p_taskprev = p;
597 597 } else {
598 598 first = tk->tk_memb_list;
599 599 prev = first->p_taskprev;
600 600 first->p_taskprev = p;
601 601 p->p_tasknext = first;
602 602 p->p_taskprev = prev;
603 603 prev->p_tasknext = p;
604 604 }
605 605 tk->tk_memb_list = p;
606 606 task_hold(tk);
607 607 p->p_task = tk;
608 608 }
609 609
610 610 /*
611 611 * task_begin()
612 612 *
613 613 * Overview
614 614 * A process constructing a new task calls task_begin() to initialize the
615 615 * task, by attaching itself as a member.
616 616 *
617 617 * Return values
618 618 * None.
619 619 *
620 620 * Caller's context
621 621 * pidlock and p_lock must be held across the call to task_begin().
622 622 */
623 623 void
624 624 task_begin(task_t *tk, proc_t *p)
625 625 {
626 626 timestruc_t ts;
627 627 task_usage_t *tu;
628 628 rctl_entity_p_t e;
629 629
630 630 ASSERT(MUTEX_HELD(&pidlock));
631 631 ASSERT(MUTEX_HELD(&p->p_lock));
632 632
633 633 mutex_enter(&tk->tk_usage_lock);
634 634 tu = tk->tk_usage;
635 635 gethrestime(&ts);
636 636 tu->tu_startsec = (uint64_t)ts.tv_sec;
637 637 tu->tu_startnsec = (uint64_t)ts.tv_nsec;
638 638 mutex_exit(&tk->tk_usage_lock);
639 639
640 640 /*
641 641 * Join process to the task as a member.
642 642 */
643 643 task_attach(tk, p);
644 644
645 645 /*
646 646 * Now that the linkage from process to task is complete, do the
647 647 * required callback for the task rctl set.
648 648 */
649 649 e.rcep_p.task = tk;
650 650 e.rcep_t = RCENTITY_TASK;
651 651 (void) rctl_set_dup(NULL, NULL, p, &e, tk->tk_rctls, NULL,
652 652 RCD_CALLBACK);
653 653 }
654 654
655 655 /*
656 656 * void task_detach(proc_t *)
657 657 *
658 658 * Overview
659 659 * task_detach() removes the specified process from its task. task_detach
660 660 * sets the process's task membership to NULL, in anticipation of a final exit
661 661 * or of joining a new task. Because task_rele() requires a context safe for
662 662 * KM_SLEEP allocations, a task_detach() is followed by a subsequent
663 663 * task_rele() once appropriate context is available.
664 664 *
665 665 * Because task_detach() involves relinquishing the process's membership in
666 666 * the project, any observational rctls the process may have had on the task
667 667 * or project are destroyed.
668 668 *
669 669 * Return values
670 670 * None.
671 671 *
672 672 * Caller's context
673 673 * pidlock and p_lock held across task_detach().
674 674 */
675 675 void
676 676 task_detach(proc_t *p)
677 677 {
678 678 task_t *tk = p->p_task;
679 679
680 680 ASSERT(MUTEX_HELD(&pidlock));
681 681 ASSERT(MUTEX_HELD(&p->p_lock));
682 682 ASSERT(p->p_task != NULL);
683 683 ASSERT(tk->tk_memb_list != NULL);
684 684
685 685 if (tk->tk_memb_list == p)
686 686 tk->tk_memb_list = p->p_tasknext;
687 687 if (tk->tk_memb_list == p)
688 688 tk->tk_memb_list = NULL;
689 689 p->p_taskprev->p_tasknext = p->p_tasknext;
690 690 p->p_tasknext->p_taskprev = p->p_taskprev;
691 691
692 692 rctl_set_tearoff(p->p_task->tk_rctls, p);
693 693 rctl_set_tearoff(p->p_task->tk_proj->kpj_rctls, p);
694 694
695 695 p->p_task = NULL;
696 696 p->p_tasknext = p->p_taskprev = NULL;
697 697 }
698 698
699 699 /*
700 700 * task_change(task_t *, proc_t *)
701 701 *
702 702 * Overview
703 703 * task_change() removes the specified process from its current task. The
704 704 * process is then attached to the specified task. This routine is called
705 705 * from settaskid() when process is being moved to a new task.
706 706 *
707 707 * Return values
708 708 * None.
709 709 *
710 710 * Caller's context
711 711 * pidlock and p_lock held across task_change()
712 712 */
713 713 void
714 714 task_change(task_t *newtk, proc_t *p)
715 715 {
716 716 task_t *oldtk = p->p_task;
717 717
718 718 ASSERT(MUTEX_HELD(&pidlock));
719 719 ASSERT(MUTEX_HELD(&p->p_lock));
720 720 ASSERT(oldtk != NULL);
721 721 ASSERT(oldtk->tk_memb_list != NULL);
722 722
723 723 mutex_enter(&oldtk->tk_zone->zone_nlwps_lock);
724 724 oldtk->tk_nlwps -= p->p_lwpcnt;
725 725 oldtk->tk_nprocs--;
726 726 mutex_exit(&oldtk->tk_zone->zone_nlwps_lock);
727 727
728 728 mutex_enter(&newtk->tk_zone->zone_nlwps_lock);
729 729 newtk->tk_nlwps += p->p_lwpcnt;
730 730 newtk->tk_nprocs++;
731 731 mutex_exit(&newtk->tk_zone->zone_nlwps_lock);
732 732
733 733 task_detach(p);
734 734 task_begin(newtk, p);
735 735 exacct_move_mstate(p, oldtk, newtk);
736 736 }
737 737
738 738 /*
739 739 * task_end()
740 740 *
741 741 * Overview
742 742 * task_end() contains the actions executed once the final member of
743 743 * a task has released the task, and all actions connected with the task, such
744 744 * as committing an accounting record to a file, are completed. It is called
745 745 * by the known last consumer of the task information. Additionally,
746 746 * task_end() must never refer to any process in the system.
747 747 *
748 748 * Return values
749 749 * None.
750 750 *
751 751 * Caller's context
752 752 * No restrictions on context, beyond that given above.
753 753 */
754 754 void
755 755 task_end(task_t *tk)
756 756 {
757 757 ASSERT(tk->tk_hold_count == 0);
758 758
759 759 project_rele(tk->tk_proj);
760 760 kmem_free(tk->tk_usage, sizeof (task_usage_t));
761 761 kmem_free(tk->tk_inherited, sizeof (task_usage_t));
762 762 if (tk->tk_prevusage != NULL)
763 763 kmem_free(tk->tk_prevusage, sizeof (task_usage_t));
764 764 if (tk->tk_zoneusage != NULL)
765 765 kmem_free(tk->tk_zoneusage, sizeof (task_usage_t));
766 766 rctl_set_free(tk->tk_rctls);
767 767 id_free(taskid_space, tk->tk_tkid);
768 768 zone_task_rele(tk->tk_zone);
769 769 kmem_cache_free(task_cache, tk);
770 770 }
771 771
772 772 static void
773 773 changeproj(proc_t *p, kproject_t *kpj, zone_t *zone, void *projbuf,
774 774 void *zonebuf)
775 775 {
776 776 kproject_t *oldkpj;
777 777 kthread_t *t;
778 778
779 779 ASSERT(MUTEX_HELD(&pidlock));
780 780 ASSERT(MUTEX_HELD(&p->p_lock));
781 781
782 782 if ((t = p->p_tlist) != NULL) {
783 783 do {
784 784 (void) project_hold(kpj);
785 785
786 786 thread_lock(t);
787 787 oldkpj = ttoproj(t);
788 788
789 789 /*
790 790 * Kick this thread so that he doesn't sit
791 791 * on a wrong wait queue.
792 792 */
793 793 if (ISWAITING(t))
794 794 setrun_locked(t);
795 795
796 796 /*
797 797 * The thread wants to go on the project wait queue, but
798 798 * the waitq is changing.
799 799 */
800 800 if (t->t_schedflag & TS_PROJWAITQ)
801 801 t->t_schedflag &= ~ TS_PROJWAITQ;
802 802
803 803 t->t_proj = kpj;
804 804 t->t_pre_sys = 1; /* For cred update */
805 805 thread_unlock(t);
806 806 fss_changeproj(t, kpj, zone, projbuf, zonebuf);
807 807
808 808 project_rele(oldkpj);
809 809 } while ((t = t->t_forw) != p->p_tlist);
810 810 }
811 811 }
812 812
813 813 /*
814 814 * task_join()
815 815 *
816 816 * Overview
817 817 * task_join() contains the actions that must be executed when the first
818 818 * member (curproc) of a newly created task joins it. It may never fail.
819 819 *
820 820 * The caller must make sure holdlwps() is called so that all other lwps are
821 821 * stopped prior to calling this function.
822 822 *
823 823 * NB: It returns with curproc->p_lock held.
824 824 *
825 825 * Return values
826 826 * Pointer to the old task.
827 827 *
828 828 * Caller's context
829 829 * cpu_lock must be held entering the function. It will acquire pidlock,
830 830 * p_crlock and p_lock during execution.
831 831 */
832 832 task_t *
833 833 task_join(task_t *tk, uint_t flags)
834 834 {
835 835 proc_t *p = ttoproc(curthread);
836 836 task_t *prev_tk;
837 837 void *projbuf, *zonebuf;
838 838 zone_t *zone = tk->tk_zone;
839 839 projid_t projid = tk->tk_proj->kpj_id;
840 840 cred_t *oldcr;
841 841
842 842 /*
843 843 * We can't know for sure if holdlwps() was called, but we can check to
844 844 * ensure we're single-threaded.
845 845 */
846 846 ASSERT(curthread == p->p_agenttp || p->p_lwprcnt == 1);
847 847
848 848 /*
849 849 * Changing the credential is always hard because we cannot
850 850 * allocate memory when holding locks but we don't know whether
851 851 * we need to change it. We first get a reference to the current
852 852 * cred if we need to change it. Then we create a credential
853 853 * with an updated project id. Finally we install it, first
854 854 * releasing the reference we had on the p_cred at the time we
855 855 * acquired the lock the first time and later we release the
856 856 * reference to p_cred at the time we acquired the lock the
857 857 * second time.
858 858 */
859 859 mutex_enter(&p->p_crlock);
860 860 if (crgetprojid(p->p_cred) == projid)
861 861 oldcr = NULL;
862 862 else
863 863 crhold(oldcr = p->p_cred);
864 864 mutex_exit(&p->p_crlock);
865 865
866 866 if (oldcr != NULL) {
867 867 cred_t *newcr = crdup(oldcr);
868 868 crsetprojid(newcr, projid);
869 869 crfree(oldcr);
870 870
871 871 mutex_enter(&p->p_crlock);
872 872 oldcr = p->p_cred;
873 873 p->p_cred = newcr;
874 874 mutex_exit(&p->p_crlock);
875 875 crfree(oldcr);
876 876 }
877 877
878 878 /*
879 879 * Make sure that the number of processor sets is constant
880 880 * across this operation.
881 881 */
882 882 ASSERT(MUTEX_HELD(&cpu_lock));
883 883
884 884 projbuf = fss_allocbuf(FSS_NPSET_BUF, FSS_ALLOC_PROJ);
885 885 zonebuf = fss_allocbuf(FSS_NPSET_BUF, FSS_ALLOC_ZONE);
886 886
887 887 mutex_enter(&pidlock);
888 888 mutex_enter(&p->p_lock);
889 889
890 890 prev_tk = p->p_task;
891 891 task_change(tk, p);
892 892
893 893 /*
894 894 * Now move threads one by one to their new project.
895 895 */
896 896 changeproj(p, tk->tk_proj, zone, projbuf, zonebuf);
897 897 if (flags & TASK_FINAL)
898 898 p->p_task->tk_flags |= TASK_FINAL;
899 899
900 900 mutex_exit(&pidlock);
901 901
902 902 fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
903 903 fss_freebuf(projbuf, FSS_ALLOC_PROJ);
904 904 return (prev_tk);
905 905 }
906 906
907 907 /*
908 908 * rctl ops vectors
909 909 */
910 910 static rctl_ops_t task_lwps_ops = {
911 911 rcop_no_action,
912 912 task_lwps_usage,
913 913 task_lwps_set,
914 914 task_lwps_test
915 915 };
916 916
917 917 static rctl_ops_t task_procs_ops = {
918 918 rcop_no_action,
919 919 task_nprocs_usage,
920 920 task_nprocs_set,
921 921 task_nprocs_test
922 922 };
923 923
924 924 static rctl_ops_t task_cpu_time_ops = {
925 925 rcop_no_action,
926 926 task_cpu_time_usage,
927 927 rcop_no_set,
928 928 task_cpu_time_test
929 929 };
930 930
931 931 /*ARGSUSED*/
932 932 /*
933 933 * void task_init(void)
934 934 *
935 935 * Overview
936 936 * task_init() initializes task-related hashes, caches, and the task id
937 937 * space. Additionally, task_init() establishes p0 as a member of task0.
938 938 * Called by main().
939 939 *
940 940 * Return values
941 941 * None.
942 942 *
943 943 * Caller's context
944 944 * task_init() must be called prior to MP startup.
945 945 */
946 946 void
947 947 task_init(void)
948 948 {
949 949 proc_t *p = &p0;
950 950 mod_hash_hndl_t hndl;
951 951 rctl_set_t *set;
952 952 rctl_alloc_gp_t *gp;
953 953 rctl_entity_p_t e;
954 954
955 955 /*
956 956 * Initialize task_cache and taskid_space.
957 957 */
958 958 task_cache = kmem_cache_create("task_cache", sizeof (task_t),
959 959 0, NULL, NULL, NULL, NULL, NULL, 0);
960 960 taskid_space = id_space_create("taskid_space", 0, MAX_TASKID);
961 961
962 962 /*
963 963 * Initialize task hash table.
964 964 */
965 965 task_hash = mod_hash_create_idhash("task_hash", task_hash_size,
966 966 mod_hash_null_valdtor);
967 967
968 968 /*
969 969 * Initialize task-based rctls.
970 970 */
971 971 rc_task_lwps = rctl_register("task.max-lwps", RCENTITY_TASK,
972 972 RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX,
973 973 &task_lwps_ops);
974 974 rc_task_nprocs = rctl_register("task.max-processes", RCENTITY_TASK,
975 975 RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX,
976 976 &task_procs_ops);
977 977 rc_task_cpu_time = rctl_register("task.max-cpu-time", RCENTITY_TASK,
978 978 RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_DENY_NEVER |
979 979 RCTL_GLOBAL_CPU_TIME | RCTL_GLOBAL_INFINITE |
980 980 RCTL_GLOBAL_UNOBSERVABLE | RCTL_GLOBAL_SECONDS, UINT64_MAX,
981 981 UINT64_MAX, &task_cpu_time_ops);
982 982
983 983 /*
984 984 * Create task0 and place p0 in it as a member.
985 985 */
986 986 task0p = kmem_cache_alloc(task_cache, KM_SLEEP);
987 987 bzero(task0p, sizeof (task_t));
988 988
989 989 task0p->tk_tkid = id_alloc(taskid_space);
990 990 task0p->tk_usage = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
991 991 task0p->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
992 992 task0p->tk_proj = project_hold_by_id(0, &zone0,
993 993 PROJECT_HOLD_INSERT);
994 994 task0p->tk_flags = TASK_NORMAL;
995 995 task0p->tk_nlwps = p->p_lwpcnt;
996 996 task0p->tk_nprocs = 1;
997 997 task0p->tk_zone = global_zone;
998 998 task0p->tk_commit_next = NULL;
999 999
1000 1000 set = rctl_set_create();
1001 1001 gp = rctl_set_init_prealloc(RCENTITY_TASK);
1002 1002 mutex_enter(&curproc->p_lock);
1003 1003 e.rcep_p.task = task0p;
1004 1004 e.rcep_t = RCENTITY_TASK;
1005 1005 task0p->tk_rctls = rctl_set_init(RCENTITY_TASK, curproc, &e, set, gp);
1006 1006 mutex_exit(&curproc->p_lock);
1007 1007 rctl_prealloc_destroy(gp);
1008 1008
1009 1009 (void) mod_hash_reserve(task_hash, &hndl);
1010 1010 mutex_enter(&task_hash_lock);
1011 1011 ASSERT(task_find(task0p->tk_tkid, GLOBAL_ZONEID) == NULL);
1012 1012 if (mod_hash_insert_reserve(task_hash,
1013 1013 (mod_hash_key_t)(uintptr_t)task0p->tk_tkid,
1014 1014 (mod_hash_val_t *)task0p, hndl) != 0) {
1015 1015 mod_hash_cancel(task_hash, &hndl);
1016 1016 panic("unable to insert task %d(%p)", task0p->tk_tkid,
1017 1017 (void *)task0p);
1018 1018 }
1019 1019 mutex_exit(&task_hash_lock);
1020 1020
1021 1021 task0p->tk_memb_list = p;
1022 1022
1023 1023 task0p->tk_nprocs_kstat = task_kstat_create(task0p, task0p->tk_zone);
1024 1024
1025 1025 /*
1026 1026 * Initialize task pointers for p0, including doubly linked list of task
1027 1027 * members.
1028 1028 */
1029 1029 p->p_task = task0p;
1030 1030 p->p_taskprev = p->p_tasknext = p;
1031 1031 task_hold(task0p);
1032 1032 }
1033 1033
1034 1034 static int
1035 1035 task_nprocs_kstat_update(kstat_t *ksp, int rw)
1036 1036 {
1037 1037 task_t *tk = ksp->ks_private;
1038 1038 task_kstat_t *ktk = ksp->ks_data;
1039 1039
1040 1040 if (rw == KSTAT_WRITE)
1041 1041 return (EACCES);
1042 1042
1043 1043 ktk->ktk_usage.value.ui64 = tk->tk_nprocs;
1044 1044 ktk->ktk_value.value.ui64 = tk->tk_nprocs_ctl;
1045 1045 return (0);
1046 1046 }
1047 1047
1048 1048 static kstat_t *
1049 1049 task_kstat_create(task_t *tk, zone_t *zone)
1050 1050 {
1051 1051 kstat_t *ksp;
1052 1052 task_kstat_t *ktk;
1053 1053 char *zonename = zone->zone_name;
1054 1054
1055 1055 ksp = rctl_kstat_create_task(tk, "nprocs", KSTAT_TYPE_NAMED,
1056 1056 sizeof (task_kstat_t) / sizeof (kstat_named_t),
1057 1057 KSTAT_FLAG_VIRTUAL);
1058 1058
1059 1059 if (ksp == NULL)
1060 1060 return (NULL);
1061 1061
1062 1062 ktk = ksp->ks_data = kmem_alloc(sizeof (task_kstat_t), KM_SLEEP);
1063 1063 ksp->ks_data_size += strlen(zonename) + 1;
1064 1064 kstat_named_init(&ktk->ktk_zonename, "zonename", KSTAT_DATA_STRING);
1065 1065 kstat_named_setstr(&ktk->ktk_zonename, zonename);
1066 1066 kstat_named_init(&ktk->ktk_usage, "usage", KSTAT_DATA_UINT64);
1067 1067 kstat_named_init(&ktk->ktk_value, "value", KSTAT_DATA_UINT64);
1068 1068 ksp->ks_update = task_nprocs_kstat_update;
1069 1069 ksp->ks_private = tk;
1070 1070 kstat_install(ksp);
1071 1071
1072 1072 return (ksp);
1073 1073 }
1074 1074
1075 1075 static void
1076 1076 task_kstat_delete(task_t *tk)
1077 1077 {
1078 1078 void *data;
1079 1079
1080 1080 if (tk->tk_nprocs_kstat != NULL) {
1081 1081 data = tk->tk_nprocs_kstat->ks_data;
1082 1082 kstat_delete(tk->tk_nprocs_kstat);
1083 1083 kmem_free(data, sizeof (task_kstat_t));
1084 1084 tk->tk_nprocs_kstat = NULL;
1085 1085 }
1086 1086 }
1087 1087
1088 1088 void
1089 1089 task_commit_thread_init()
1090 1090 {
1091 1091 mutex_init(&task_commit_lock, NULL, MUTEX_DEFAULT, NULL);
1092 1092 cv_init(&task_commit_cv, NULL, CV_DEFAULT, NULL);
1093 1093 task_commit_thread = thread_create(NULL, 0, task_commit, NULL, 0,
1094 1094 &p0, TS_RUN, minclsyspri);
1095 1095 }
1096 1096
1097 1097 /*
1098 1098 * Backup thread to commit task resource usage when taskq_dispatch() fails.
1099 1099 */
1100 1100 static void
1101 1101 task_commit()
1102 1102 {
1103 1103 callb_cpr_t cprinfo;
1104 1104
1105 1105 CALLB_CPR_INIT(&cprinfo, &task_commit_lock, callb_generic_cpr,
1106 1106 "task_commit_thread");
1107 1107
1108 1108 mutex_enter(&task_commit_lock);
1109 1109
1110 1110 for (;;) {
1111 1111 while (task_commit_head == NULL) {
1112 1112 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1113 1113 cv_wait(&task_commit_cv, &task_commit_lock);
1114 1114 CALLB_CPR_SAFE_END(&cprinfo, &task_commit_lock);
1115 1115 }
1116 1116 while (task_commit_head != NULL) {
1117 1117 task_t *tk;
1118 1118
1119 1119 tk = task_commit_head;
1120 1120 task_commit_head = task_commit_head->tk_commit_next;
1121 1121 if (task_commit_head == NULL)
1122 1122 task_commit_tail = NULL;
1123 1123 mutex_exit(&task_commit_lock);
1124 1124 exacct_commit_task(tk);
1125 1125 mutex_enter(&task_commit_lock);
1126 1126 }
1127 1127 }
1128 1128 }
↓ open down ↓ |
709 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX