179 if (avl_numnodes(&as->a_wpage) == 0)
180 return (0);
181
182 /*
183 * as->a_wpage can only be changed while the process is totally stopped.
184 * Don't grab p_lock here. Holding p_lock while grabbing the address
185 * space lock leads to deadlocks with the clock thread. Note that if an
186 * as_fault() is servicing a fault to a watched page on behalf of an
187 * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
188 * will be set to wp_oprot). Since this is done while holding as writer
189 * lock, we need to grab as lock (reader lock is good enough).
190 *
191 * p_maplock prevents simultaneous execution of this function. Under
192 * normal circumstances, holdwatch() will stop all other threads, so the
193 * lock isn't really needed. But there may be multiple threads within
194 * stop() when SWATCHOK is set, so we need to handle multiple threads
195 * at once. See holdwatch() for the details of this dance.
196 */
197
198 mutex_enter(&p->p_maplock);
199 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
200
201 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
202 if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
203 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
204
205 for (; pwp != NULL && pwp->wp_vaddr < eaddr;
206 pwp = AVL_NEXT(&as->a_wpage, pwp)) {
207
208 /*
209 * If the requested protection has not been
210 * removed, we need not remap this page.
211 */
212 prot = pwp->wp_prot;
213 if (kernel || (prot & PROT_USER))
214 if (prot & prot_rw)
215 continue;
216 /*
217 * If the requested access does not exist in the page's
218 * original protections, we need not remap this page.
219 * If the page does not exist yet, we can't test it.
220 */
221 if ((prot = pwp->wp_oprot) != 0) {
222 if (!(kernel || (prot & PROT_USER)))
223 continue;
224 if (!(prot & prot_rw))
225 continue;
226 }
227
228 if (mapin) {
229 /*
230 * Before mapping the page in, ensure that
231 * all other lwps are held in the kernel.
232 */
233 if (p->p_mapcnt == 0) {
234 /*
235 * Release as lock while in holdwatch()
236 * in case other threads need to grab it.
237 */
238 AS_LOCK_EXIT(as, &as->a_lock);
239 mutex_exit(&p->p_maplock);
240 if (holdwatch() != 0) {
241 /*
242 * We stopped in holdwatch().
243 * Start all over again because the
244 * watched page list may have changed.
245 */
246 goto startover;
247 }
248 mutex_enter(&p->p_maplock);
249 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
250 }
251 p->p_mapcnt++;
252 }
253
254 addr = pwp->wp_vaddr;
255 rv++;
256
257 prot = pwp->wp_prot;
258 if (mapin) {
259 if (kernel)
260 pwp->wp_kmap[xrw]++;
261 else
262 pwp->wp_umap[xrw]++;
263 pwp->wp_flags |= WP_NOWATCH;
264 if (pwp->wp_kmap[X] + pwp->wp_umap[X])
265 /* cannot have exec-only protection */
266 prot |= PROT_READ|PROT_EXEC;
267 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
268 prot |= PROT_READ;
269 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
289 /* cannot have exec-only protection */
290 prot |= PROT_READ|PROT_EXEC;
291 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
292 prot |= PROT_READ;
293 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
294 /* cannot have write-only protection */
295 prot |= PROT_READ|PROT_WRITE;
296 #if 0 /* damned broken mmu feature! */
297 if (sum(pwp->wp_umap) == 0)
298 prot &= ~PROT_USER;
299 #endif
300 }
301 }
302
303
304 if (pwp->wp_oprot != 0) { /* if page exists */
305 struct seg *seg;
306 uint_t oprot;
307 int err, retrycnt = 0;
308
309 AS_LOCK_EXIT(as, &as->a_lock);
310 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
311 retry:
312 seg = as_segat(as, addr);
313 ASSERT(seg != NULL);
314 SEGOP_GETPROT(seg, addr, 0, &oprot);
315 if (prot != oprot) {
316 err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
317 if (err == IE_RETRY) {
318 ASSERT(retrycnt == 0);
319 retrycnt++;
320 goto retry;
321 }
322 }
323 AS_LOCK_EXIT(as, &as->a_lock);
324 } else
325 AS_LOCK_EXIT(as, &as->a_lock);
326
327 /*
328 * When all pages are mapped back to their normal state,
329 * continue the other lwps.
330 */
331 if (!mapin) {
332 ASSERT(p->p_mapcnt > 0);
333 p->p_mapcnt--;
334 if (p->p_mapcnt == 0) {
335 mutex_exit(&p->p_maplock);
336 mutex_enter(&p->p_lock);
337 continuelwps(p);
338 mutex_exit(&p->p_lock);
339 mutex_enter(&p->p_maplock);
340 }
341 }
342
343 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
344 }
345
346 AS_LOCK_EXIT(as, &as->a_lock);
347 mutex_exit(&p->p_maplock);
348
349 return (rv);
350 }
351
352 /*
353 * Restore the original page protections on an address range.
354 * If 'kernel' is non-zero, just do it for the kernel.
355 * pr_mappage() returns non-zero if it actually changed anything.
356 *
357 * pr_mappage() and pr_unmappage() must be executed in matched pairs,
358 * but pairs may be nested within other pairs. The reference counts
359 * sort it all out. See pr_do_mappage(), above.
360 */
361 static int
362 pr_mappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
363 {
364 return (pr_do_mappage(addr, size, 1, rw, kernel));
365 }
366
377
378 /*
379 * Function called by an lwp after it resumes from stop().
380 */
381 void
382 setallwatch(void)
383 {
384 proc_t *p = curproc;
385 struct as *as = curproc->p_as;
386 struct watched_page *pwp, *next;
387 struct seg *seg;
388 caddr_t vaddr;
389 uint_t prot;
390 int err, retrycnt;
391
392 if (p->p_wprot == NULL)
393 return;
394
395 ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
396
397 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
398
399 pwp = p->p_wprot;
400 while (pwp != NULL) {
401
402 vaddr = pwp->wp_vaddr;
403 retrycnt = 0;
404 retry:
405 ASSERT(pwp->wp_flags & WP_SETPROT);
406 if ((seg = as_segat(as, vaddr)) != NULL &&
407 !(pwp->wp_flags & WP_NOWATCH)) {
408 prot = pwp->wp_prot;
409 err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
410 if (err == IE_RETRY) {
411 ASSERT(retrycnt == 0);
412 retrycnt++;
413 goto retry;
414 }
415 }
416
417 next = pwp->wp_list;
418
419 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
420 /*
421 * No watched areas remain in this page.
422 * Free the watched_page structure.
423 */
424 avl_remove(&as->a_wpage, pwp);
425 kmem_free(pwp, sizeof (struct watched_page));
426 } else {
427 pwp->wp_flags &= ~WP_SETPROT;
428 }
429
430 pwp = next;
431 }
432 p->p_wprot = NULL;
433
434 AS_LOCK_EXIT(as, &as->a_lock);
435 }
436
437
438
439 /* Must be called with as lock held */
440 int
441 pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as)
442 {
443 register struct watched_page *pwp;
444 struct watched_page tpw;
445 uint_t prot;
446 int rv = 0;
447
448 switch (rw) {
449 case S_READ:
450 case S_WRITE:
451 case S_EXEC:
452 break;
453 default:
454 return (0);
488 }
489
490 return (rv);
491 }
492
493
494 /*
495 * trap() calls here to determine if a fault is in a watched page.
496 * We return nonzero if this is true and the load/store would fail.
497 */
498 int
499 pr_is_watchpage(caddr_t addr, enum seg_rw rw)
500 {
501 struct as *as = curproc->p_as;
502 int rv;
503
504 if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
505 return (0);
506
507 /* Grab the lock because of XHAT (see comment in pr_mappage()) */
508 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
509 rv = pr_is_watchpage_as(addr, rw, as);
510 AS_LOCK_EXIT(as, &as->a_lock);
511
512 return (rv);
513 }
514
515
516
517 /*
518 * trap() calls here to determine if a fault is a watchpoint.
519 */
520 int
521 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
522 enum seg_rw rw)
523 {
524 proc_t *p = curproc;
525 caddr_t addr = *paddr;
526 caddr_t eaddr = addr + size;
527 register struct watched_area *pwa;
528 struct watched_area twa;
529 int rv = 0;
530 int ta = 0;
|
179 if (avl_numnodes(&as->a_wpage) == 0)
180 return (0);
181
182 /*
183 * as->a_wpage can only be changed while the process is totally stopped.
184 * Don't grab p_lock here. Holding p_lock while grabbing the address
185 * space lock leads to deadlocks with the clock thread. Note that if an
186 * as_fault() is servicing a fault to a watched page on behalf of an
187 * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
188 * will be set to wp_oprot). Since this is done while holding as writer
189 * lock, we need to grab as lock (reader lock is good enough).
190 *
191 * p_maplock prevents simultaneous execution of this function. Under
192 * normal circumstances, holdwatch() will stop all other threads, so the
193 * lock isn't really needed. But there may be multiple threads within
194 * stop() when SWATCHOK is set, so we need to handle multiple threads
195 * at once. See holdwatch() for the details of this dance.
196 */
197
198 mutex_enter(&p->p_maplock);
199 AS_LOCK_ENTER(as, RW_READER);
200
201 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
202 if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
203 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
204
205 for (; pwp != NULL && pwp->wp_vaddr < eaddr;
206 pwp = AVL_NEXT(&as->a_wpage, pwp)) {
207
208 /*
209 * If the requested protection has not been
210 * removed, we need not remap this page.
211 */
212 prot = pwp->wp_prot;
213 if (kernel || (prot & PROT_USER))
214 if (prot & prot_rw)
215 continue;
216 /*
217 * If the requested access does not exist in the page's
218 * original protections, we need not remap this page.
219 * If the page does not exist yet, we can't test it.
220 */
221 if ((prot = pwp->wp_oprot) != 0) {
222 if (!(kernel || (prot & PROT_USER)))
223 continue;
224 if (!(prot & prot_rw))
225 continue;
226 }
227
228 if (mapin) {
229 /*
230 * Before mapping the page in, ensure that
231 * all other lwps are held in the kernel.
232 */
233 if (p->p_mapcnt == 0) {
234 /*
235 * Release as lock while in holdwatch()
236 * in case other threads need to grab it.
237 */
238 AS_LOCK_EXIT(as);
239 mutex_exit(&p->p_maplock);
240 if (holdwatch() != 0) {
241 /*
242 * We stopped in holdwatch().
243 * Start all over again because the
244 * watched page list may have changed.
245 */
246 goto startover;
247 }
248 mutex_enter(&p->p_maplock);
249 AS_LOCK_ENTER(as, RW_READER);
250 }
251 p->p_mapcnt++;
252 }
253
254 addr = pwp->wp_vaddr;
255 rv++;
256
257 prot = pwp->wp_prot;
258 if (mapin) {
259 if (kernel)
260 pwp->wp_kmap[xrw]++;
261 else
262 pwp->wp_umap[xrw]++;
263 pwp->wp_flags |= WP_NOWATCH;
264 if (pwp->wp_kmap[X] + pwp->wp_umap[X])
265 /* cannot have exec-only protection */
266 prot |= PROT_READ|PROT_EXEC;
267 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
268 prot |= PROT_READ;
269 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
289 /* cannot have exec-only protection */
290 prot |= PROT_READ|PROT_EXEC;
291 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
292 prot |= PROT_READ;
293 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
294 /* cannot have write-only protection */
295 prot |= PROT_READ|PROT_WRITE;
296 #if 0 /* damned broken mmu feature! */
297 if (sum(pwp->wp_umap) == 0)
298 prot &= ~PROT_USER;
299 #endif
300 }
301 }
302
303
304 if (pwp->wp_oprot != 0) { /* if page exists */
305 struct seg *seg;
306 uint_t oprot;
307 int err, retrycnt = 0;
308
309 AS_LOCK_EXIT(as);
310 AS_LOCK_ENTER(as, RW_WRITER);
311 retry:
312 seg = as_segat(as, addr);
313 ASSERT(seg != NULL);
314 SEGOP_GETPROT(seg, addr, 0, &oprot);
315 if (prot != oprot) {
316 err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
317 if (err == IE_RETRY) {
318 ASSERT(retrycnt == 0);
319 retrycnt++;
320 goto retry;
321 }
322 }
323 AS_LOCK_EXIT(as);
324 } else
325 AS_LOCK_EXIT(as);
326
327 /*
328 * When all pages are mapped back to their normal state,
329 * continue the other lwps.
330 */
331 if (!mapin) {
332 ASSERT(p->p_mapcnt > 0);
333 p->p_mapcnt--;
334 if (p->p_mapcnt == 0) {
335 mutex_exit(&p->p_maplock);
336 mutex_enter(&p->p_lock);
337 continuelwps(p);
338 mutex_exit(&p->p_lock);
339 mutex_enter(&p->p_maplock);
340 }
341 }
342
343 AS_LOCK_ENTER(as, RW_READER);
344 }
345
346 AS_LOCK_EXIT(as);
347 mutex_exit(&p->p_maplock);
348
349 return (rv);
350 }
351
352 /*
353 * Restore the original page protections on an address range.
354 * If 'kernel' is non-zero, just do it for the kernel.
355 * pr_mappage() returns non-zero if it actually changed anything.
356 *
357 * pr_mappage() and pr_unmappage() must be executed in matched pairs,
358 * but pairs may be nested within other pairs. The reference counts
359 * sort it all out. See pr_do_mappage(), above.
360 */
361 static int
362 pr_mappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
363 {
364 return (pr_do_mappage(addr, size, 1, rw, kernel));
365 }
366
377
378 /*
379 * Function called by an lwp after it resumes from stop().
380 */
381 void
382 setallwatch(void)
383 {
384 proc_t *p = curproc;
385 struct as *as = curproc->p_as;
386 struct watched_page *pwp, *next;
387 struct seg *seg;
388 caddr_t vaddr;
389 uint_t prot;
390 int err, retrycnt;
391
392 if (p->p_wprot == NULL)
393 return;
394
395 ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
396
397 AS_LOCK_ENTER(as, RW_WRITER);
398
399 pwp = p->p_wprot;
400 while (pwp != NULL) {
401
402 vaddr = pwp->wp_vaddr;
403 retrycnt = 0;
404 retry:
405 ASSERT(pwp->wp_flags & WP_SETPROT);
406 if ((seg = as_segat(as, vaddr)) != NULL &&
407 !(pwp->wp_flags & WP_NOWATCH)) {
408 prot = pwp->wp_prot;
409 err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
410 if (err == IE_RETRY) {
411 ASSERT(retrycnt == 0);
412 retrycnt++;
413 goto retry;
414 }
415 }
416
417 next = pwp->wp_list;
418
419 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
420 /*
421 * No watched areas remain in this page.
422 * Free the watched_page structure.
423 */
424 avl_remove(&as->a_wpage, pwp);
425 kmem_free(pwp, sizeof (struct watched_page));
426 } else {
427 pwp->wp_flags &= ~WP_SETPROT;
428 }
429
430 pwp = next;
431 }
432 p->p_wprot = NULL;
433
434 AS_LOCK_EXIT(as);
435 }
436
437
438
439 /* Must be called with as lock held */
440 int
441 pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as)
442 {
443 register struct watched_page *pwp;
444 struct watched_page tpw;
445 uint_t prot;
446 int rv = 0;
447
448 switch (rw) {
449 case S_READ:
450 case S_WRITE:
451 case S_EXEC:
452 break;
453 default:
454 return (0);
488 }
489
490 return (rv);
491 }
492
493
494 /*
495 * trap() calls here to determine if a fault is in a watched page.
496 * We return nonzero if this is true and the load/store would fail.
497 */
498 int
499 pr_is_watchpage(caddr_t addr, enum seg_rw rw)
500 {
501 struct as *as = curproc->p_as;
502 int rv;
503
504 if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
505 return (0);
506
507 /* Grab the lock because of XHAT (see comment in pr_mappage()) */
508 AS_LOCK_ENTER(as, RW_READER);
509 rv = pr_is_watchpage_as(addr, rw, as);
510 AS_LOCK_EXIT(as);
511
512 return (rv);
513 }
514
515
516
517 /*
518 * trap() calls here to determine if a fault is a watchpoint.
519 */
520 int
521 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
522 enum seg_rw rw)
523 {
524 proc_t *p = curproc;
525 caddr_t addr = *paddr;
526 caddr_t eaddr = addr + size;
527 register struct watched_area *pwa;
528 struct watched_area twa;
529 int rv = 0;
530 int ta = 0;
|