Print this page
patch lower-case-segops
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/watchpoint.c
+++ new/usr/src/uts/common/os/watchpoint.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 -#pragma ident "%Z%%M% %I% %E% SMI"
28 -
29 27 #include <sys/types.h>
30 28 #include <sys/t_lock.h>
31 29 #include <sys/param.h>
32 30 #include <sys/cred.h>
33 31 #include <sys/debug.h>
34 32 #include <sys/inline.h>
35 33 #include <sys/kmem.h>
36 34 #include <sys/proc.h>
37 35 #include <sys/regset.h>
38 36 #include <sys/sysmacros.h>
39 37 #include <sys/systm.h>
40 38 #include <sys/prsystm.h>
41 39 #include <sys/buf.h>
42 40 #include <sys/signal.h>
43 41 #include <sys/user.h>
44 42 #include <sys/cpuvar.h>
45 43
46 44 #include <sys/fault.h>
47 45 #include <sys/syscall.h>
48 46 #include <sys/procfs.h>
49 47 #include <sys/cmn_err.h>
50 48 #include <sys/stack.h>
51 49 #include <sys/watchpoint.h>
52 50 #include <sys/copyops.h>
53 51 #include <sys/schedctl.h>
54 52
55 53 #include <sys/mman.h>
56 54 #include <vm/as.h>
57 55 #include <vm/seg.h>
58 56
59 57 /*
60 58 * Copy ops vector for watchpoints.
61 59 */
62 60 static int watch_copyin(const void *, void *, size_t);
63 61 static int watch_xcopyin(const void *, void *, size_t);
64 62 static int watch_copyout(const void *, void *, size_t);
65 63 static int watch_xcopyout(const void *, void *, size_t);
66 64 static int watch_copyinstr(const char *, char *, size_t, size_t *);
67 65 static int watch_copyoutstr(const char *, char *, size_t, size_t *);
68 66 static int watch_fuword8(const void *, uint8_t *);
69 67 static int watch_fuword16(const void *, uint16_t *);
70 68 static int watch_fuword32(const void *, uint32_t *);
71 69 static int watch_suword8(void *, uint8_t);
72 70 static int watch_suword16(void *, uint16_t);
73 71 static int watch_suword32(void *, uint32_t);
74 72 static int watch_physio(int (*)(struct buf *), struct buf *,
75 73 dev_t, int, void (*)(struct buf *), struct uio *);
76 74 #ifdef _LP64
77 75 static int watch_fuword64(const void *, uint64_t *);
78 76 static int watch_suword64(void *, uint64_t);
79 77 #endif
80 78
81 79 struct copyops watch_copyops = {
82 80 watch_copyin,
83 81 watch_xcopyin,
84 82 watch_copyout,
85 83 watch_xcopyout,
86 84 watch_copyinstr,
87 85 watch_copyoutstr,
88 86 watch_fuword8,
89 87 watch_fuword16,
90 88 watch_fuword32,
91 89 #ifdef _LP64
92 90 watch_fuword64,
93 91 #else
94 92 NULL,
95 93 #endif
96 94 watch_suword8,
97 95 watch_suword16,
98 96 watch_suword32,
99 97 #ifdef _LP64
100 98 watch_suword64,
101 99 #else
102 100 NULL,
103 101 #endif
104 102 watch_physio
105 103 };
106 104
107 105 /*
108 106 * Map the 'rw' argument to a protection flag.
109 107 */
110 108 static int
111 109 rw_to_prot(enum seg_rw rw)
112 110 {
113 111 switch (rw) {
114 112 case S_EXEC:
115 113 return (PROT_EXEC);
116 114 case S_READ:
117 115 return (PROT_READ);
118 116 case S_WRITE:
119 117 return (PROT_WRITE);
120 118 default:
121 119 return (PROT_NONE); /* can't happen */
122 120 }
123 121 }
124 122
125 123 /*
126 124 * Map the 'rw' argument to an index into an array of exec/write/read things.
127 125 * The index follows the precedence order: exec .. write .. read
128 126 */
129 127 static int
130 128 rw_to_index(enum seg_rw rw)
131 129 {
132 130 switch (rw) {
133 131 default: /* default case "can't happen" */
134 132 case S_EXEC:
135 133 return (0);
136 134 case S_WRITE:
137 135 return (1);
138 136 case S_READ:
139 137 return (2);
140 138 }
141 139 }
142 140
143 141 /*
144 142 * Map an index back to a seg_rw.
145 143 */
146 144 static enum seg_rw S_rw[4] = {
147 145 S_EXEC,
148 146 S_WRITE,
149 147 S_READ,
150 148 S_READ,
151 149 };
152 150
153 151 #define X 0
154 152 #define W 1
155 153 #define R 2
156 154 #define sum(a) (a[X] + a[W] + a[R])
157 155
158 156 /*
159 157 * Common code for pr_mappage() and pr_unmappage().
160 158 */
161 159 static int
162 160 pr_do_mappage(caddr_t addr, size_t size, int mapin, enum seg_rw rw, int kernel)
163 161 {
164 162 proc_t *p = curproc;
165 163 struct as *as = p->p_as;
166 164 char *eaddr = addr + size;
167 165 int prot_rw = rw_to_prot(rw);
168 166 int xrw = rw_to_index(rw);
169 167 int rv = 0;
170 168 struct watched_page *pwp;
171 169 struct watched_page tpw;
172 170 avl_index_t where;
173 171 uint_t prot;
174 172
175 173 ASSERT(as != &kas);
176 174
177 175 startover:
178 176 ASSERT(rv == 0);
179 177 if (avl_numnodes(&as->a_wpage) == 0)
180 178 return (0);
181 179
182 180 /*
183 181 * as->a_wpage can only be changed while the process is totally stopped.
184 182 * Don't grab p_lock here. Holding p_lock while grabbing the address
185 183 * space lock leads to deadlocks with the clock thread. Note that if an
186 184 * as_fault() is servicing a fault to a watched page on behalf of an
187 185 * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
188 186 * will be set to wp_oprot). Since this is done while holding as writer
189 187 * lock, we need to grab as lock (reader lock is good enough).
190 188 *
191 189 * p_maplock prevents simultaneous execution of this function. Under
192 190 * normal circumstances, holdwatch() will stop all other threads, so the
193 191 * lock isn't really needed. But there may be multiple threads within
194 192 * stop() when SWATCHOK is set, so we need to handle multiple threads
195 193 * at once. See holdwatch() for the details of this dance.
196 194 */
197 195
198 196 mutex_enter(&p->p_maplock);
199 197 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
200 198
201 199 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
202 200 if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
203 201 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
204 202
205 203 for (; pwp != NULL && pwp->wp_vaddr < eaddr;
206 204 pwp = AVL_NEXT(&as->a_wpage, pwp)) {
207 205
208 206 /*
209 207 * If the requested protection has not been
210 208 * removed, we need not remap this page.
211 209 */
212 210 prot = pwp->wp_prot;
213 211 if (kernel || (prot & PROT_USER))
214 212 if (prot & prot_rw)
215 213 continue;
216 214 /*
217 215 * If the requested access does not exist in the page's
218 216 * original protections, we need not remap this page.
219 217 * If the page does not exist yet, we can't test it.
220 218 */
221 219 if ((prot = pwp->wp_oprot) != 0) {
222 220 if (!(kernel || (prot & PROT_USER)))
223 221 continue;
224 222 if (!(prot & prot_rw))
225 223 continue;
226 224 }
227 225
228 226 if (mapin) {
229 227 /*
230 228 * Before mapping the page in, ensure that
231 229 * all other lwps are held in the kernel.
232 230 */
233 231 if (p->p_mapcnt == 0) {
234 232 /*
235 233 * Release as lock while in holdwatch()
236 234 * in case other threads need to grab it.
237 235 */
238 236 AS_LOCK_EXIT(as, &as->a_lock);
239 237 mutex_exit(&p->p_maplock);
240 238 if (holdwatch() != 0) {
241 239 /*
242 240 * We stopped in holdwatch().
243 241 * Start all over again because the
244 242 * watched page list may have changed.
245 243 */
246 244 goto startover;
247 245 }
248 246 mutex_enter(&p->p_maplock);
249 247 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
250 248 }
251 249 p->p_mapcnt++;
252 250 }
253 251
254 252 addr = pwp->wp_vaddr;
255 253 rv++;
256 254
257 255 prot = pwp->wp_prot;
258 256 if (mapin) {
259 257 if (kernel)
260 258 pwp->wp_kmap[xrw]++;
261 259 else
262 260 pwp->wp_umap[xrw]++;
263 261 pwp->wp_flags |= WP_NOWATCH;
264 262 if (pwp->wp_kmap[X] + pwp->wp_umap[X])
265 263 /* cannot have exec-only protection */
266 264 prot |= PROT_READ|PROT_EXEC;
267 265 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
268 266 prot |= PROT_READ;
269 267 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
270 268 /* cannot have write-only protection */
271 269 prot |= PROT_READ|PROT_WRITE;
272 270 #if 0 /* damned broken mmu feature! */
273 271 if (sum(pwp->wp_umap) == 0)
274 272 prot &= ~PROT_USER;
275 273 #endif
276 274 } else {
277 275 ASSERT(pwp->wp_flags & WP_NOWATCH);
278 276 if (kernel) {
279 277 ASSERT(pwp->wp_kmap[xrw] != 0);
280 278 --pwp->wp_kmap[xrw];
281 279 } else {
282 280 ASSERT(pwp->wp_umap[xrw] != 0);
283 281 --pwp->wp_umap[xrw];
284 282 }
285 283 if (sum(pwp->wp_kmap) + sum(pwp->wp_umap) == 0)
286 284 pwp->wp_flags &= ~WP_NOWATCH;
287 285 else {
288 286 if (pwp->wp_kmap[X] + pwp->wp_umap[X])
289 287 /* cannot have exec-only protection */
290 288 prot |= PROT_READ|PROT_EXEC;
291 289 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
292 290 prot |= PROT_READ;
293 291 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
294 292 /* cannot have write-only protection */
295 293 prot |= PROT_READ|PROT_WRITE;
296 294 #if 0 /* damned broken mmu feature! */
297 295 if (sum(pwp->wp_umap) == 0)
298 296 prot &= ~PROT_USER;
299 297 #endif
300 298 }
301 299 }
302 300
303 301
↓ open down ↓ |
265 lines elided |
↑ open up ↑ |
304 302 if (pwp->wp_oprot != 0) { /* if page exists */
305 303 struct seg *seg;
306 304 uint_t oprot;
307 305 int err, retrycnt = 0;
308 306
309 307 AS_LOCK_EXIT(as, &as->a_lock);
310 308 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
311 309 retry:
312 310 seg = as_segat(as, addr);
313 311 ASSERT(seg != NULL);
314 - SEGOP_GETPROT(seg, addr, 0, &oprot);
312 + (void) segop_getprot(seg, addr, 0, &oprot);
315 313 if (prot != oprot) {
316 - err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
314 + err = segop_setprot(seg, addr, PAGESIZE, prot);
317 315 if (err == IE_RETRY) {
318 316 ASSERT(retrycnt == 0);
319 317 retrycnt++;
320 318 goto retry;
321 319 }
322 320 }
323 321 AS_LOCK_EXIT(as, &as->a_lock);
324 322 } else
325 323 AS_LOCK_EXIT(as, &as->a_lock);
326 324
327 325 /*
328 326 * When all pages are mapped back to their normal state,
329 327 * continue the other lwps.
330 328 */
331 329 if (!mapin) {
332 330 ASSERT(p->p_mapcnt > 0);
333 331 p->p_mapcnt--;
334 332 if (p->p_mapcnt == 0) {
335 333 mutex_exit(&p->p_maplock);
336 334 mutex_enter(&p->p_lock);
337 335 continuelwps(p);
338 336 mutex_exit(&p->p_lock);
339 337 mutex_enter(&p->p_maplock);
340 338 }
341 339 }
342 340
343 341 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
344 342 }
345 343
346 344 AS_LOCK_EXIT(as, &as->a_lock);
347 345 mutex_exit(&p->p_maplock);
348 346
349 347 return (rv);
350 348 }
351 349
352 350 /*
353 351 * Restore the original page protections on an address range.
354 352 * If 'kernel' is non-zero, just do it for the kernel.
355 353 * pr_mappage() returns non-zero if it actually changed anything.
356 354 *
357 355 * pr_mappage() and pr_unmappage() must be executed in matched pairs,
358 356 * but pairs may be nested within other pairs. The reference counts
359 357 * sort it all out. See pr_do_mappage(), above.
360 358 */
361 359 static int
362 360 pr_mappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
363 361 {
364 362 return (pr_do_mappage(addr, size, 1, rw, kernel));
365 363 }
366 364
367 365 /*
368 366 * Set the modified page protections on a watched page.
369 367 * Inverse of pr_mappage().
370 368 * Needs to be called only if pr_mappage() returned non-zero.
371 369 */
372 370 static void
373 371 pr_unmappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
374 372 {
375 373 (void) pr_do_mappage(addr, size, 0, rw, kernel);
376 374 }
377 375
378 376 /*
379 377 * Function called by an lwp after it resumes from stop().
380 378 */
381 379 void
382 380 setallwatch(void)
383 381 {
384 382 proc_t *p = curproc;
385 383 struct as *as = curproc->p_as;
386 384 struct watched_page *pwp, *next;
387 385 struct seg *seg;
388 386 caddr_t vaddr;
389 387 uint_t prot;
390 388 int err, retrycnt;
391 389
392 390 if (p->p_wprot == NULL)
393 391 return;
394 392
395 393 ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
396 394
397 395 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
398 396
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
399 397 pwp = p->p_wprot;
400 398 while (pwp != NULL) {
401 399
402 400 vaddr = pwp->wp_vaddr;
403 401 retrycnt = 0;
404 402 retry:
405 403 ASSERT(pwp->wp_flags & WP_SETPROT);
406 404 if ((seg = as_segat(as, vaddr)) != NULL &&
407 405 !(pwp->wp_flags & WP_NOWATCH)) {
408 406 prot = pwp->wp_prot;
409 - err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
407 + err = segop_setprot(seg, vaddr, PAGESIZE, prot);
410 408 if (err == IE_RETRY) {
411 409 ASSERT(retrycnt == 0);
412 410 retrycnt++;
413 411 goto retry;
414 412 }
415 413 }
416 414
417 415 next = pwp->wp_list;
418 416
419 417 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
420 418 /*
421 419 * No watched areas remain in this page.
422 420 * Free the watched_page structure.
423 421 */
424 422 avl_remove(&as->a_wpage, pwp);
425 423 kmem_free(pwp, sizeof (struct watched_page));
426 424 } else {
427 425 pwp->wp_flags &= ~WP_SETPROT;
428 426 }
429 427
430 428 pwp = next;
431 429 }
432 430 p->p_wprot = NULL;
433 431
434 432 AS_LOCK_EXIT(as, &as->a_lock);
435 433 }
436 434
437 435
438 436
439 437 /* Must be called with as lock held */
440 438 int
441 439 pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as)
442 440 {
443 441 register struct watched_page *pwp;
444 442 struct watched_page tpw;
445 443 uint_t prot;
446 444 int rv = 0;
447 445
448 446 switch (rw) {
449 447 case S_READ:
450 448 case S_WRITE:
451 449 case S_EXEC:
452 450 break;
453 451 default:
454 452 return (0);
455 453 }
456 454
457 455 /*
458 456 * as->a_wpage can only be modified while the process is totally
459 457 * stopped. We need, and should use, no locks here.
460 458 */
461 459 if (as != &kas && avl_numnodes(&as->a_wpage) != 0) {
462 460 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
463 461 pwp = avl_find(&as->a_wpage, &tpw, NULL);
464 462 if (pwp != NULL) {
465 463 ASSERT(addr >= pwp->wp_vaddr &&
466 464 addr < pwp->wp_vaddr + PAGESIZE);
467 465 if (pwp->wp_oprot != 0) {
468 466 prot = pwp->wp_prot;
469 467 switch (rw) {
470 468 case S_READ:
471 469 rv = ((prot & (PROT_USER|PROT_READ))
472 470 != (PROT_USER|PROT_READ));
473 471 break;
474 472 case S_WRITE:
475 473 rv = ((prot & (PROT_USER|PROT_WRITE))
476 474 != (PROT_USER|PROT_WRITE));
477 475 break;
478 476 case S_EXEC:
479 477 rv = ((prot & (PROT_USER|PROT_EXEC))
480 478 != (PROT_USER|PROT_EXEC));
481 479 break;
482 480 default:
483 481 /* can't happen! */
484 482 break;
485 483 }
486 484 }
487 485 }
488 486 }
489 487
490 488 return (rv);
491 489 }
492 490
493 491
494 492 /*
495 493 * trap() calls here to determine if a fault is in a watched page.
496 494 * We return nonzero if this is true and the load/store would fail.
497 495 */
498 496 int
499 497 pr_is_watchpage(caddr_t addr, enum seg_rw rw)
500 498 {
501 499 struct as *as = curproc->p_as;
502 500 int rv;
503 501
504 502 if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
505 503 return (0);
506 504
507 505 /* Grab the lock because of XHAT (see comment in pr_mappage()) */
508 506 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
509 507 rv = pr_is_watchpage_as(addr, rw, as);
510 508 AS_LOCK_EXIT(as, &as->a_lock);
511 509
512 510 return (rv);
513 511 }
514 512
515 513
516 514
517 515 /*
518 516 * trap() calls here to determine if a fault is a watchpoint.
519 517 */
520 518 int
521 519 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
522 520 enum seg_rw rw)
523 521 {
524 522 proc_t *p = curproc;
525 523 caddr_t addr = *paddr;
526 524 caddr_t eaddr = addr + size;
527 525 register struct watched_area *pwa;
528 526 struct watched_area twa;
529 527 int rv = 0;
530 528 int ta = 0;
531 529 size_t len = 0;
532 530
533 531 switch (rw) {
534 532 case S_READ:
535 533 case S_WRITE:
536 534 case S_EXEC:
537 535 break;
538 536 default:
539 537 *pta = 0;
540 538 return (0);
541 539 }
542 540
543 541 /*
544 542 * p->p_warea is protected by p->p_lock.
545 543 */
546 544 mutex_enter(&p->p_lock);
547 545
548 546 /* BEGIN CSTYLED */
549 547 /*
550 548 * This loop is somewhat complicated because the fault region can span
551 549 * multiple watched areas. For example:
552 550 *
553 551 * addr eaddr
554 552 * +-----------------+
555 553 * | fault region |
556 554 * +-------+--------+----+---+------------+
557 555 * | prot not right | | prot correct |
558 556 * +----------------+ +----------------+
559 557 * wa_vaddr wa_eaddr
560 558 * wa_vaddr wa_eaddr
561 559 *
562 560 * We start at the area greater than or equal to the starting address.
563 561 * As long as some portion of the fault region overlaps the current
564 562 * area, we continue checking permissions until we find an appropriate
565 563 * match.
566 564 */
567 565 /* END CSTYLED */
568 566 twa.wa_vaddr = addr;
569 567 twa.wa_eaddr = eaddr;
570 568
571 569 for (pwa = pr_find_watched_area(p, &twa, NULL);
572 570 pwa != NULL && eaddr > pwa->wa_vaddr && addr < pwa->wa_eaddr;
573 571 pwa = AVL_NEXT(&p->p_warea, pwa)) {
574 572
575 573 switch (rw) {
576 574 case S_READ:
577 575 if (pwa->wa_flags & WA_READ)
578 576 rv = TRAP_RWATCH;
579 577 break;
580 578 case S_WRITE:
581 579 if (pwa->wa_flags & WA_WRITE)
582 580 rv = TRAP_WWATCH;
583 581 break;
584 582 case S_EXEC:
585 583 if (pwa->wa_flags & WA_EXEC)
586 584 rv = TRAP_XWATCH;
587 585 break;
588 586 default:
589 587 /* can't happen */
590 588 break;
591 589 }
592 590
593 591 /*
594 592 * If protections didn't match, check the next watched
595 593 * area
596 594 */
597 595 if (rv != 0) {
598 596 if (addr < pwa->wa_vaddr)
599 597 addr = pwa->wa_vaddr;
600 598 len = pwa->wa_eaddr - addr;
601 599 if (pwa->wa_flags & WA_TRAPAFTER)
602 600 ta = 1;
603 601 break;
604 602 }
605 603 }
606 604
607 605 mutex_exit(&p->p_lock);
608 606
609 607 *paddr = addr;
610 608 *pta = ta;
611 609 if (plen != NULL)
612 610 *plen = len;
613 611 return (rv);
614 612 }
615 613
616 614 /*
617 615 * Set up to perform a single-step at user level for the
618 616 * case of a trapafter watchpoint. Called from trap().
619 617 */
620 618 void
621 619 do_watch_step(caddr_t vaddr, size_t sz, enum seg_rw rw,
622 620 int watchcode, greg_t pc)
623 621 {
624 622 register klwp_t *lwp = ttolwp(curthread);
625 623 struct lwp_watch *pw = &lwp->lwp_watch[rw_to_index(rw)];
626 624
627 625 /*
628 626 * Check to see if we are already performing this special
629 627 * watchpoint single-step. We must not do pr_mappage() twice.
630 628 */
631 629
632 630 /* special check for two read traps on the same instruction */
633 631 if (rw == S_READ && pw->wpaddr != NULL &&
634 632 !(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize)) {
635 633 ASSERT(lwp->lwp_watchtrap != 0);
636 634 pw++; /* use the extra S_READ struct */
637 635 }
638 636
639 637 if (pw->wpaddr != NULL) {
640 638 ASSERT(lwp->lwp_watchtrap != 0);
641 639 ASSERT(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize);
642 640 if (pw->wpcode == 0) {
643 641 pw->wpcode = watchcode;
644 642 pw->wppc = pc;
645 643 }
646 644 } else {
647 645 int mapped = pr_mappage(vaddr, sz, rw, 0);
648 646 prstep(lwp, 1);
649 647 lwp->lwp_watchtrap = 1;
650 648 pw->wpaddr = vaddr;
651 649 pw->wpsize = sz;
652 650 pw->wpcode = watchcode;
653 651 pw->wpmapped = mapped;
654 652 pw->wppc = pc;
655 653 }
656 654 }
657 655
658 656 /*
659 657 * Undo the effects of do_watch_step().
660 658 * Called from trap() after the single-step is finished.
661 659 * Also called from issig_forreal() and stop() with a NULL
662 660 * argument to avoid having these things set more than once.
663 661 */
664 662 int
665 663 undo_watch_step(k_siginfo_t *sip)
666 664 {
667 665 register klwp_t *lwp = ttolwp(curthread);
668 666 int fault = 0;
669 667
670 668 if (lwp->lwp_watchtrap) {
671 669 struct lwp_watch *pw = lwp->lwp_watch;
672 670 int i;
673 671
674 672 for (i = 0; i < 4; i++, pw++) {
675 673 if (pw->wpaddr == NULL)
676 674 continue;
677 675 if (pw->wpmapped)
678 676 pr_unmappage(pw->wpaddr, pw->wpsize, S_rw[i],
679 677 0);
680 678 if (pw->wpcode != 0) {
681 679 if (sip != NULL) {
682 680 sip->si_signo = SIGTRAP;
683 681 sip->si_code = pw->wpcode;
684 682 sip->si_addr = pw->wpaddr;
685 683 sip->si_trapafter = 1;
686 684 sip->si_pc = (caddr_t)pw->wppc;
687 685 }
688 686 fault = FLTWATCH;
689 687 pw->wpcode = 0;
690 688 }
691 689 pw->wpaddr = NULL;
692 690 pw->wpsize = 0;
693 691 pw->wpmapped = 0;
694 692 }
695 693 lwp->lwp_watchtrap = 0;
696 694 }
697 695
698 696 return (fault);
699 697 }
700 698
701 699 /*
702 700 * Handle a watchpoint that occurs while doing copyin()
703 701 * or copyout() in a system call.
704 702 * Return non-zero if the fault or signal is cleared
705 703 * by a debugger while the lwp is stopped.
706 704 */
707 705 static int
708 706 sys_watchpoint(caddr_t addr, int watchcode, int ta)
709 707 {
710 708 extern greg_t getuserpc(void); /* XXX header file */
711 709 k_sigset_t smask;
712 710 register proc_t *p = ttoproc(curthread);
713 711 register klwp_t *lwp = ttolwp(curthread);
714 712 register sigqueue_t *sqp;
715 713 int rval;
716 714
717 715 /* assert no locks are held */
718 716 /* ASSERT(curthread->t_nlocks == 0); */
719 717
720 718 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
721 719 sqp->sq_info.si_signo = SIGTRAP;
722 720 sqp->sq_info.si_code = watchcode;
723 721 sqp->sq_info.si_addr = addr;
724 722 sqp->sq_info.si_trapafter = ta;
725 723 sqp->sq_info.si_pc = (caddr_t)getuserpc();
726 724
727 725 mutex_enter(&p->p_lock);
728 726
729 727 /* this will be tested and cleared by the caller */
730 728 lwp->lwp_sysabort = 0;
731 729
732 730 if (prismember(&p->p_fltmask, FLTWATCH)) {
733 731 lwp->lwp_curflt = (uchar_t)FLTWATCH;
734 732 lwp->lwp_siginfo = sqp->sq_info;
735 733 stop(PR_FAULTED, FLTWATCH);
736 734 if (lwp->lwp_curflt == 0) {
737 735 mutex_exit(&p->p_lock);
738 736 kmem_free(sqp, sizeof (sigqueue_t));
739 737 return (1);
740 738 }
741 739 lwp->lwp_curflt = 0;
742 740 }
743 741
744 742 /*
745 743 * post the SIGTRAP signal.
746 744 * Block all other signals so we only stop showing SIGTRAP.
747 745 */
748 746 if (signal_is_blocked(curthread, SIGTRAP) ||
749 747 sigismember(&p->p_ignore, SIGTRAP)) {
750 748 /* SIGTRAP is blocked or ignored, forget the rest. */
751 749 mutex_exit(&p->p_lock);
752 750 kmem_free(sqp, sizeof (sigqueue_t));
753 751 return (0);
754 752 }
755 753 sigdelq(p, curthread, SIGTRAP);
756 754 sigaddqa(p, curthread, sqp);
757 755 schedctl_finish_sigblock(curthread);
758 756 smask = curthread->t_hold;
759 757 sigfillset(&curthread->t_hold);
760 758 sigdiffset(&curthread->t_hold, &cantmask);
761 759 sigdelset(&curthread->t_hold, SIGTRAP);
762 760 mutex_exit(&p->p_lock);
763 761
764 762 rval = ((ISSIG_FAST(curthread, lwp, p, FORREAL))? 0 : 1);
765 763
766 764 /* restore the original signal mask */
767 765 mutex_enter(&p->p_lock);
768 766 curthread->t_hold = smask;
769 767 mutex_exit(&p->p_lock);
770 768
771 769 return (rval);
772 770 }
773 771
774 772 /*
775 773 * Wrappers for the copyin()/copyout() functions to deal
776 774 * with watchpoints that fire while in system calls.
777 775 */
778 776
779 777 static int
780 778 watch_xcopyin(const void *uaddr, void *kaddr, size_t count)
781 779 {
782 780 klwp_t *lwp = ttolwp(curthread);
783 781 caddr_t watch_uaddr = (caddr_t)uaddr;
784 782 caddr_t watch_kaddr = (caddr_t)kaddr;
785 783 int error = 0;
786 784 label_t ljb;
787 785 size_t part;
788 786 int mapped;
789 787
790 788 while (count && error == 0) {
791 789 int watchcode;
792 790 caddr_t vaddr;
793 791 size_t len;
794 792 int ta;
795 793
796 794 if ((part = PAGESIZE -
797 795 (((uintptr_t)uaddr) & PAGEOFFSET)) > count)
798 796 part = count;
799 797
800 798 if (!pr_is_watchpage(watch_uaddr, S_READ))
801 799 watchcode = 0;
802 800 else {
803 801 vaddr = watch_uaddr;
804 802 watchcode = pr_is_watchpoint(&vaddr, &ta,
805 803 part, &len, S_READ);
806 804 if (watchcode && ta == 0)
807 805 part = vaddr - watch_uaddr;
808 806 }
809 807
810 808 /*
811 809 * Copy the initial part, up to a watched address, if any.
812 810 */
813 811 if (part != 0) {
814 812 mapped = pr_mappage(watch_uaddr, part, S_READ, 1);
815 813 if (on_fault(&ljb))
816 814 error = EFAULT;
817 815 else
818 816 copyin_noerr(watch_uaddr, watch_kaddr, part);
819 817 no_fault();
820 818 if (mapped)
821 819 pr_unmappage(watch_uaddr, part, S_READ, 1);
822 820 watch_uaddr += part;
823 821 watch_kaddr += part;
824 822 count -= part;
825 823 }
826 824 /*
827 825 * If trapafter was specified, then copy through the
828 826 * watched area before taking the watchpoint trap.
829 827 */
830 828 while (count && watchcode && ta && len > part && error == 0) {
831 829 len -= part;
832 830 if ((part = PAGESIZE) > count)
833 831 part = count;
834 832 if (part > len)
835 833 part = len;
836 834 mapped = pr_mappage(watch_uaddr, part, S_READ, 1);
837 835 if (on_fault(&ljb))
838 836 error = EFAULT;
839 837 else
840 838 copyin_noerr(watch_uaddr, watch_kaddr, part);
841 839 no_fault();
842 840 if (mapped)
843 841 pr_unmappage(watch_uaddr, part, S_READ, 1);
844 842 watch_uaddr += part;
845 843 watch_kaddr += part;
846 844 count -= part;
847 845 }
848 846
849 847 error:
850 848 /* if we hit a watched address, do the watchpoint logic */
851 849 if (watchcode &&
852 850 (!sys_watchpoint(vaddr, watchcode, ta) ||
853 851 lwp->lwp_sysabort)) {
854 852 lwp->lwp_sysabort = 0;
855 853 error = EFAULT;
856 854 break;
857 855 }
858 856 }
859 857
860 858 return (error);
861 859 }
862 860
863 861 static int
864 862 watch_copyin(const void *kaddr, void *uaddr, size_t count)
865 863 {
866 864 return (watch_xcopyin(kaddr, uaddr, count) ? -1 : 0);
867 865 }
868 866
869 867
870 868 static int
871 869 watch_xcopyout(const void *kaddr, void *uaddr, size_t count)
872 870 {
873 871 klwp_t *lwp = ttolwp(curthread);
874 872 caddr_t watch_uaddr = (caddr_t)uaddr;
875 873 caddr_t watch_kaddr = (caddr_t)kaddr;
876 874 int error = 0;
877 875 label_t ljb;
878 876
879 877 while (count && error == 0) {
880 878 int watchcode;
881 879 caddr_t vaddr;
882 880 size_t part;
883 881 size_t len;
884 882 int ta;
885 883 int mapped;
886 884
887 885 if ((part = PAGESIZE -
888 886 (((uintptr_t)uaddr) & PAGEOFFSET)) > count)
889 887 part = count;
890 888
891 889 if (!pr_is_watchpage(watch_uaddr, S_WRITE))
892 890 watchcode = 0;
893 891 else {
894 892 vaddr = watch_uaddr;
895 893 watchcode = pr_is_watchpoint(&vaddr, &ta,
896 894 part, &len, S_WRITE);
897 895 if (watchcode) {
898 896 if (ta == 0)
899 897 part = vaddr - watch_uaddr;
900 898 else {
901 899 len += vaddr - watch_uaddr;
902 900 if (part > len)
903 901 part = len;
904 902 }
905 903 }
906 904 }
907 905
908 906 /*
909 907 * Copy the initial part, up to a watched address, if any.
910 908 */
911 909 if (part != 0) {
912 910 mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1);
913 911 if (on_fault(&ljb))
914 912 error = EFAULT;
915 913 else
916 914 copyout_noerr(watch_kaddr, watch_uaddr, part);
917 915 no_fault();
918 916 if (mapped)
919 917 pr_unmappage(watch_uaddr, part, S_WRITE, 1);
920 918 watch_uaddr += part;
921 919 watch_kaddr += part;
922 920 count -= part;
923 921 }
924 922
925 923 /*
926 924 * If trapafter was specified, then copy through the
927 925 * watched area before taking the watchpoint trap.
928 926 */
929 927 while (count && watchcode && ta && len > part && error == 0) {
930 928 len -= part;
931 929 if ((part = PAGESIZE) > count)
932 930 part = count;
933 931 if (part > len)
934 932 part = len;
935 933 mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1);
936 934 if (on_fault(&ljb))
937 935 error = EFAULT;
938 936 else
939 937 copyout_noerr(watch_kaddr, watch_uaddr, part);
940 938 no_fault();
941 939 if (mapped)
942 940 pr_unmappage(watch_uaddr, part, S_WRITE, 1);
943 941 watch_uaddr += part;
944 942 watch_kaddr += part;
945 943 count -= part;
946 944 }
947 945
948 946 /* if we hit a watched address, do the watchpoint logic */
949 947 if (watchcode &&
950 948 (!sys_watchpoint(vaddr, watchcode, ta) ||
951 949 lwp->lwp_sysabort)) {
952 950 lwp->lwp_sysabort = 0;
953 951 error = EFAULT;
954 952 break;
955 953 }
956 954 }
957 955
958 956 return (error);
959 957 }
960 958
961 959 static int
962 960 watch_copyout(const void *kaddr, void *uaddr, size_t count)
963 961 {
964 962 return (watch_xcopyout(kaddr, uaddr, count) ? -1 : 0);
965 963 }
966 964
967 965 static int
968 966 watch_copyinstr(
969 967 const char *uaddr,
970 968 char *kaddr,
971 969 size_t maxlength,
972 970 size_t *lencopied)
973 971 {
974 972 klwp_t *lwp = ttolwp(curthread);
975 973 size_t resid;
976 974 int error = 0;
977 975 label_t ljb;
978 976
979 977 if ((resid = maxlength) == 0)
980 978 return (ENAMETOOLONG);
981 979
982 980 while (resid && error == 0) {
983 981 int watchcode;
984 982 caddr_t vaddr;
985 983 size_t part;
986 984 size_t len;
987 985 size_t size;
988 986 int ta;
989 987 int mapped;
990 988
991 989 if ((part = PAGESIZE -
992 990 (((uintptr_t)uaddr) & PAGEOFFSET)) > resid)
993 991 part = resid;
994 992
995 993 if (!pr_is_watchpage((caddr_t)uaddr, S_READ))
996 994 watchcode = 0;
997 995 else {
998 996 vaddr = (caddr_t)uaddr;
999 997 watchcode = pr_is_watchpoint(&vaddr, &ta,
1000 998 part, &len, S_READ);
1001 999 if (watchcode) {
1002 1000 if (ta == 0)
1003 1001 part = vaddr - uaddr;
1004 1002 else {
1005 1003 len += vaddr - uaddr;
1006 1004 if (part > len)
1007 1005 part = len;
1008 1006 }
1009 1007 }
1010 1008 }
1011 1009
1012 1010 /*
1013 1011 * Copy the initial part, up to a watched address, if any.
1014 1012 */
1015 1013 if (part != 0) {
1016 1014 mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1);
1017 1015 if (on_fault(&ljb))
1018 1016 error = EFAULT;
1019 1017 else
1020 1018 error = copyinstr_noerr(uaddr, kaddr, part,
1021 1019 &size);
1022 1020 no_fault();
1023 1021 if (mapped)
1024 1022 pr_unmappage((caddr_t)uaddr, part, S_READ, 1);
1025 1023 uaddr += size;
1026 1024 kaddr += size;
1027 1025 resid -= size;
1028 1026 if (error == ENAMETOOLONG && resid > 0)
1029 1027 error = 0;
1030 1028 if (error != 0 || (watchcode &&
1031 1029 (uaddr < vaddr || kaddr[-1] == '\0')))
1032 1030 break; /* didn't reach the watched area */
1033 1031 }
1034 1032
1035 1033 /*
1036 1034 * If trapafter was specified, then copy through the
1037 1035 * watched area before taking the watchpoint trap.
1038 1036 */
1039 1037 while (resid && watchcode && ta && len > part && error == 0 &&
1040 1038 size == part && kaddr[-1] != '\0') {
1041 1039 len -= part;
1042 1040 if ((part = PAGESIZE) > resid)
1043 1041 part = resid;
1044 1042 if (part > len)
1045 1043 part = len;
1046 1044 mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1);
1047 1045 if (on_fault(&ljb))
1048 1046 error = EFAULT;
1049 1047 else
1050 1048 error = copyinstr_noerr(uaddr, kaddr, part,
1051 1049 &size);
1052 1050 no_fault();
1053 1051 if (mapped)
1054 1052 pr_unmappage((caddr_t)uaddr, part, S_READ, 1);
1055 1053 uaddr += size;
1056 1054 kaddr += size;
1057 1055 resid -= size;
1058 1056 if (error == ENAMETOOLONG && resid > 0)
1059 1057 error = 0;
1060 1058 }
1061 1059
1062 1060 /* if we hit a watched address, do the watchpoint logic */
1063 1061 if (watchcode &&
1064 1062 (!sys_watchpoint(vaddr, watchcode, ta) ||
1065 1063 lwp->lwp_sysabort)) {
1066 1064 lwp->lwp_sysabort = 0;
1067 1065 error = EFAULT;
1068 1066 break;
1069 1067 }
1070 1068
1071 1069 if (error == 0 && part != 0 &&
1072 1070 (size < part || kaddr[-1] == '\0'))
1073 1071 break;
1074 1072 }
1075 1073
1076 1074 if (error != EFAULT && lencopied)
1077 1075 *lencopied = maxlength - resid;
1078 1076 return (error);
1079 1077 }
1080 1078
1081 1079 static int
1082 1080 watch_copyoutstr(
1083 1081 const char *kaddr,
1084 1082 char *uaddr,
1085 1083 size_t maxlength,
1086 1084 size_t *lencopied)
1087 1085 {
1088 1086 klwp_t *lwp = ttolwp(curthread);
1089 1087 size_t resid;
1090 1088 int error = 0;
1091 1089 label_t ljb;
1092 1090
1093 1091 if ((resid = maxlength) == 0)
1094 1092 return (ENAMETOOLONG);
1095 1093
1096 1094 while (resid && error == 0) {
1097 1095 int watchcode;
1098 1096 caddr_t vaddr;
1099 1097 size_t part;
1100 1098 size_t len;
1101 1099 size_t size;
1102 1100 int ta;
1103 1101 int mapped;
1104 1102
1105 1103 if ((part = PAGESIZE -
1106 1104 (((uintptr_t)uaddr) & PAGEOFFSET)) > resid)
1107 1105 part = resid;
1108 1106
1109 1107 if (!pr_is_watchpage(uaddr, S_WRITE)) {
1110 1108 watchcode = 0;
1111 1109 } else {
1112 1110 vaddr = uaddr;
1113 1111 watchcode = pr_is_watchpoint(&vaddr, &ta,
1114 1112 part, &len, S_WRITE);
1115 1113 if (watchcode && ta == 0)
1116 1114 part = vaddr - uaddr;
1117 1115 }
1118 1116
1119 1117 /*
1120 1118 * Copy the initial part, up to a watched address, if any.
1121 1119 */
1122 1120 if (part != 0) {
1123 1121 mapped = pr_mappage(uaddr, part, S_WRITE, 1);
1124 1122 if (on_fault(&ljb))
1125 1123 error = EFAULT;
1126 1124 else
1127 1125 error = copyoutstr_noerr(kaddr, uaddr, part,
1128 1126 &size);
1129 1127 no_fault();
1130 1128 if (mapped)
1131 1129 pr_unmappage(uaddr, part, S_WRITE, 1);
1132 1130 uaddr += size;
1133 1131 kaddr += size;
1134 1132 resid -= size;
1135 1133 if (error == ENAMETOOLONG && resid > 0)
1136 1134 error = 0;
1137 1135 if (error != 0 || (watchcode &&
1138 1136 (uaddr < vaddr || kaddr[-1] == '\0')))
1139 1137 break; /* didn't reach the watched area */
1140 1138 }
1141 1139
1142 1140 /*
1143 1141 * If trapafter was specified, then copy through the
1144 1142 * watched area before taking the watchpoint trap.
1145 1143 */
1146 1144 while (resid && watchcode && ta && len > part && error == 0 &&
1147 1145 size == part && kaddr[-1] != '\0') {
1148 1146 len -= part;
1149 1147 if ((part = PAGESIZE) > resid)
1150 1148 part = resid;
1151 1149 if (part > len)
1152 1150 part = len;
1153 1151 mapped = pr_mappage(uaddr, part, S_WRITE, 1);
1154 1152 if (on_fault(&ljb))
1155 1153 error = EFAULT;
1156 1154 else
1157 1155 error = copyoutstr_noerr(kaddr, uaddr, part,
1158 1156 &size);
1159 1157 no_fault();
1160 1158 if (mapped)
1161 1159 pr_unmappage(uaddr, part, S_WRITE, 1);
1162 1160 uaddr += size;
1163 1161 kaddr += size;
1164 1162 resid -= size;
1165 1163 if (error == ENAMETOOLONG && resid > 0)
1166 1164 error = 0;
1167 1165 }
1168 1166
1169 1167 /* if we hit a watched address, do the watchpoint logic */
1170 1168 if (watchcode &&
1171 1169 (!sys_watchpoint(vaddr, watchcode, ta) ||
1172 1170 lwp->lwp_sysabort)) {
1173 1171 lwp->lwp_sysabort = 0;
1174 1172 error = EFAULT;
1175 1173 break;
1176 1174 }
1177 1175
1178 1176 if (error == 0 && part != 0 &&
1179 1177 (size < part || kaddr[-1] == '\0'))
1180 1178 break;
1181 1179 }
1182 1180
1183 1181 if (error != EFAULT && lencopied)
1184 1182 *lencopied = maxlength - resid;
1185 1183 return (error);
1186 1184 }
1187 1185
1188 1186 typedef int (*fuword_func)(const void *, void *);
1189 1187
1190 1188 /*
1191 1189 * Generic form of watch_fuword8(), watch_fuword16(), etc.
1192 1190 */
1193 1191 static int
1194 1192 watch_fuword(const void *addr, void *dst, fuword_func func, size_t size)
1195 1193 {
1196 1194 klwp_t *lwp = ttolwp(curthread);
1197 1195 int watchcode;
1198 1196 caddr_t vaddr;
1199 1197 int mapped;
1200 1198 int rv = 0;
1201 1199 int ta;
1202 1200 label_t ljb;
1203 1201
1204 1202 for (;;) {
1205 1203
1206 1204 vaddr = (caddr_t)addr;
1207 1205 watchcode = pr_is_watchpoint(&vaddr, &ta, size, NULL, S_READ);
1208 1206 if (watchcode == 0 || ta != 0) {
1209 1207 mapped = pr_mappage((caddr_t)addr, size, S_READ, 1);
1210 1208 if (on_fault(&ljb))
1211 1209 rv = -1;
1212 1210 else
1213 1211 (*func)(addr, dst);
1214 1212 no_fault();
1215 1213 if (mapped)
1216 1214 pr_unmappage((caddr_t)addr, size, S_READ, 1);
1217 1215 }
1218 1216 if (watchcode &&
1219 1217 (!sys_watchpoint(vaddr, watchcode, ta) ||
1220 1218 lwp->lwp_sysabort)) {
1221 1219 lwp->lwp_sysabort = 0;
1222 1220 rv = -1;
1223 1221 break;
1224 1222 }
1225 1223 if (watchcode == 0 || ta != 0)
1226 1224 break;
1227 1225 }
1228 1226
1229 1227 return (rv);
1230 1228 }
1231 1229
1232 1230 static int
1233 1231 watch_fuword8(const void *addr, uint8_t *dst)
1234 1232 {
1235 1233 return (watch_fuword(addr, dst, (fuword_func)fuword8_noerr,
1236 1234 sizeof (*dst)));
1237 1235 }
1238 1236
1239 1237 static int
1240 1238 watch_fuword16(const void *addr, uint16_t *dst)
1241 1239 {
1242 1240 return (watch_fuword(addr, dst, (fuword_func)fuword16_noerr,
1243 1241 sizeof (*dst)));
1244 1242 }
1245 1243
1246 1244 static int
1247 1245 watch_fuword32(const void *addr, uint32_t *dst)
1248 1246 {
1249 1247 return (watch_fuword(addr, dst, (fuword_func)fuword32_noerr,
1250 1248 sizeof (*dst)));
1251 1249 }
1252 1250
1253 1251 #ifdef _LP64
1254 1252 static int
1255 1253 watch_fuword64(const void *addr, uint64_t *dst)
1256 1254 {
1257 1255 return (watch_fuword(addr, dst, (fuword_func)fuword64_noerr,
1258 1256 sizeof (*dst)));
1259 1257 }
1260 1258 #endif
1261 1259
1262 1260
1263 1261 static int
1264 1262 watch_suword8(void *addr, uint8_t value)
1265 1263 {
1266 1264 klwp_t *lwp = ttolwp(curthread);
1267 1265 int watchcode;
1268 1266 caddr_t vaddr;
1269 1267 int mapped;
1270 1268 int rv = 0;
1271 1269 int ta;
1272 1270 label_t ljb;
1273 1271
1274 1272 for (;;) {
1275 1273
1276 1274 vaddr = (caddr_t)addr;
1277 1275 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1278 1276 S_WRITE);
1279 1277 if (watchcode == 0 || ta != 0) {
1280 1278 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1281 1279 S_WRITE, 1);
1282 1280 if (on_fault(&ljb))
1283 1281 rv = -1;
1284 1282 else
1285 1283 suword8_noerr(addr, value);
1286 1284 no_fault();
1287 1285 if (mapped)
1288 1286 pr_unmappage((caddr_t)addr, sizeof (value),
1289 1287 S_WRITE, 1);
1290 1288 }
1291 1289 if (watchcode &&
1292 1290 (!sys_watchpoint(vaddr, watchcode, ta) ||
1293 1291 lwp->lwp_sysabort)) {
1294 1292 lwp->lwp_sysabort = 0;
1295 1293 rv = -1;
1296 1294 break;
1297 1295 }
1298 1296 if (watchcode == 0 || ta != 0)
1299 1297 break;
1300 1298 }
1301 1299
1302 1300 return (rv);
1303 1301 }
1304 1302
1305 1303 static int
1306 1304 watch_suword16(void *addr, uint16_t value)
1307 1305 {
1308 1306 klwp_t *lwp = ttolwp(curthread);
1309 1307 int watchcode;
1310 1308 caddr_t vaddr;
1311 1309 int mapped;
1312 1310 int rv = 0;
1313 1311 int ta;
1314 1312 label_t ljb;
1315 1313
1316 1314 for (;;) {
1317 1315
1318 1316 vaddr = (caddr_t)addr;
1319 1317 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1320 1318 S_WRITE);
1321 1319 if (watchcode == 0 || ta != 0) {
1322 1320 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1323 1321 S_WRITE, 1);
1324 1322 if (on_fault(&ljb))
1325 1323 rv = -1;
1326 1324 else
1327 1325 suword16_noerr(addr, value);
1328 1326 no_fault();
1329 1327 if (mapped)
1330 1328 pr_unmappage((caddr_t)addr, sizeof (value),
1331 1329 S_WRITE, 1);
1332 1330 }
1333 1331 if (watchcode &&
1334 1332 (!sys_watchpoint(vaddr, watchcode, ta) ||
1335 1333 lwp->lwp_sysabort)) {
1336 1334 lwp->lwp_sysabort = 0;
1337 1335 rv = -1;
1338 1336 break;
1339 1337 }
1340 1338 if (watchcode == 0 || ta != 0)
1341 1339 break;
1342 1340 }
1343 1341
1344 1342 return (rv);
1345 1343 }
1346 1344
1347 1345 static int
1348 1346 watch_suword32(void *addr, uint32_t value)
1349 1347 {
1350 1348 klwp_t *lwp = ttolwp(curthread);
1351 1349 int watchcode;
1352 1350 caddr_t vaddr;
1353 1351 int mapped;
1354 1352 int rv = 0;
1355 1353 int ta;
1356 1354 label_t ljb;
1357 1355
1358 1356 for (;;) {
1359 1357
1360 1358 vaddr = (caddr_t)addr;
1361 1359 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1362 1360 S_WRITE);
1363 1361 if (watchcode == 0 || ta != 0) {
1364 1362 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1365 1363 S_WRITE, 1);
1366 1364 if (on_fault(&ljb))
1367 1365 rv = -1;
1368 1366 else
1369 1367 suword32_noerr(addr, value);
1370 1368 no_fault();
1371 1369 if (mapped)
1372 1370 pr_unmappage((caddr_t)addr, sizeof (value),
1373 1371 S_WRITE, 1);
1374 1372 }
1375 1373 if (watchcode &&
1376 1374 (!sys_watchpoint(vaddr, watchcode, ta) ||
1377 1375 lwp->lwp_sysabort)) {
1378 1376 lwp->lwp_sysabort = 0;
1379 1377 rv = -1;
1380 1378 break;
1381 1379 }
1382 1380 if (watchcode == 0 || ta != 0)
1383 1381 break;
1384 1382 }
1385 1383
1386 1384 return (rv);
1387 1385 }
1388 1386
1389 1387 #ifdef _LP64
1390 1388 static int
1391 1389 watch_suword64(void *addr, uint64_t value)
1392 1390 {
1393 1391 klwp_t *lwp = ttolwp(curthread);
1394 1392 int watchcode;
1395 1393 caddr_t vaddr;
1396 1394 int mapped;
1397 1395 int rv = 0;
1398 1396 int ta;
1399 1397 label_t ljb;
1400 1398
1401 1399 for (;;) {
1402 1400
1403 1401 vaddr = (caddr_t)addr;
1404 1402 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1405 1403 S_WRITE);
1406 1404 if (watchcode == 0 || ta != 0) {
1407 1405 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1408 1406 S_WRITE, 1);
1409 1407 if (on_fault(&ljb))
1410 1408 rv = -1;
1411 1409 else
1412 1410 suword64_noerr(addr, value);
1413 1411 no_fault();
1414 1412 if (mapped)
1415 1413 pr_unmappage((caddr_t)addr, sizeof (value),
1416 1414 S_WRITE, 1);
1417 1415 }
1418 1416 if (watchcode &&
1419 1417 (!sys_watchpoint(vaddr, watchcode, ta) ||
1420 1418 lwp->lwp_sysabort)) {
1421 1419 lwp->lwp_sysabort = 0;
1422 1420 rv = -1;
1423 1421 break;
1424 1422 }
1425 1423 if (watchcode == 0 || ta != 0)
1426 1424 break;
1427 1425 }
1428 1426
1429 1427 return (rv);
1430 1428 }
1431 1429 #endif /* _LP64 */
1432 1430
1433 1431 /*
1434 1432 * Check for watched addresses in the given address space.
1435 1433 * Return 1 if this is true, otherwise 0.
1436 1434 */
1437 1435 static int
1438 1436 pr_is_watched(caddr_t base, size_t len, int rw)
1439 1437 {
1440 1438 caddr_t saddr = (caddr_t)((uintptr_t)base & (uintptr_t)PAGEMASK);
1441 1439 caddr_t eaddr = base + len;
1442 1440 caddr_t paddr;
1443 1441
1444 1442 for (paddr = saddr; paddr < eaddr; paddr += PAGESIZE) {
1445 1443 if (pr_is_watchpage(paddr, rw))
1446 1444 return (1);
1447 1445 }
1448 1446
1449 1447 return (0);
1450 1448 }
1451 1449
1452 1450 /*
1453 1451 * Wrapper for the physio() function.
1454 1452 * Splits one uio operation with multiple iovecs into uio operations with
1455 1453 * only one iovecs to do the watchpoint handling separately for each iovecs.
1456 1454 */
1457 1455 static int
1458 1456 watch_physio(int (*strat)(struct buf *), struct buf *bp, dev_t dev,
1459 1457 int rw, void (*mincnt)(struct buf *), struct uio *uio)
1460 1458 {
1461 1459 struct uio auio;
1462 1460 struct iovec *iov;
1463 1461 caddr_t base;
1464 1462 size_t len;
1465 1463 int seg_rw;
1466 1464 int error = 0;
1467 1465
1468 1466 if (uio->uio_segflg == UIO_SYSSPACE)
1469 1467 return (default_physio(strat, bp, dev, rw, mincnt, uio));
1470 1468
1471 1469 seg_rw = (rw == B_READ) ? S_WRITE : S_READ;
1472 1470
1473 1471 while (uio->uio_iovcnt > 0) {
1474 1472 if (uio->uio_resid == 0) {
1475 1473 /*
1476 1474 * Make sure to return the uio structure with the
1477 1475 * same values as default_physio() does.
1478 1476 */
1479 1477 uio->uio_iov++;
1480 1478 uio->uio_iovcnt--;
1481 1479 continue;
1482 1480 }
1483 1481
1484 1482 iov = uio->uio_iov;
1485 1483 len = MIN(iov->iov_len, uio->uio_resid);
1486 1484
1487 1485 auio.uio_iovcnt = 1;
1488 1486 auio.uio_iov = iov;
1489 1487 auio.uio_resid = len;
1490 1488 auio.uio_loffset = uio->uio_loffset;
1491 1489 auio.uio_llimit = uio->uio_llimit;
1492 1490 auio.uio_fmode = uio->uio_fmode;
1493 1491 auio.uio_extflg = uio->uio_extflg;
1494 1492 auio.uio_segflg = uio->uio_segflg;
1495 1493
1496 1494 base = iov->iov_base;
1497 1495
1498 1496 if (!pr_is_watched(base, len, seg_rw)) {
1499 1497 /*
1500 1498 * The given memory references don't cover a
1501 1499 * watched page.
1502 1500 */
1503 1501 error = default_physio(strat, bp, dev, rw, mincnt,
1504 1502 &auio);
1505 1503
1506 1504 /* Update uio with values from auio. */
1507 1505 len -= auio.uio_resid;
1508 1506 uio->uio_resid -= len;
1509 1507 uio->uio_loffset += len;
1510 1508
1511 1509 /*
1512 1510 * Return if an error occurred or not all data
1513 1511 * was copied.
1514 1512 */
1515 1513 if (auio.uio_resid || error)
1516 1514 break;
1517 1515 uio->uio_iov++;
1518 1516 uio->uio_iovcnt--;
1519 1517 } else {
1520 1518 int mapped, watchcode, ta;
1521 1519 caddr_t vaddr = base;
1522 1520 klwp_t *lwp = ttolwp(curthread);
1523 1521
1524 1522 watchcode = pr_is_watchpoint(&vaddr, &ta, len,
1525 1523 NULL, seg_rw);
1526 1524
1527 1525 if (watchcode == 0 || ta != 0) {
1528 1526 /*
1529 1527 * Do the io if the given memory references
1530 1528 * don't cover a watched area (watchcode=0)
1531 1529 * or if WA_TRAPAFTER was specified.
1532 1530 */
1533 1531 mapped = pr_mappage(base, len, seg_rw, 1);
1534 1532 error = default_physio(strat, bp, dev, rw,
1535 1533 mincnt, &auio);
1536 1534 if (mapped)
1537 1535 pr_unmappage(base, len, seg_rw, 1);
1538 1536
1539 1537 len -= auio.uio_resid;
1540 1538 uio->uio_resid -= len;
1541 1539 uio->uio_loffset += len;
1542 1540 }
1543 1541
1544 1542 /*
1545 1543 * If we hit a watched address, do the watchpoint logic.
1546 1544 */
1547 1545 if (watchcode &&
1548 1546 (!sys_watchpoint(vaddr, watchcode, ta) ||
1549 1547 lwp->lwp_sysabort)) {
1550 1548 lwp->lwp_sysabort = 0;
1551 1549 return (EFAULT);
1552 1550 }
1553 1551
1554 1552 /*
1555 1553 * Check for errors from default_physio().
1556 1554 */
1557 1555 if (watchcode == 0 || ta != 0) {
1558 1556 if (auio.uio_resid || error)
1559 1557 break;
1560 1558 uio->uio_iov++;
1561 1559 uio->uio_iovcnt--;
1562 1560 }
1563 1561 }
1564 1562 }
1565 1563
1566 1564 return (error);
1567 1565 }
1568 1566
1569 1567 int
1570 1568 wa_compare(const void *a, const void *b)
1571 1569 {
1572 1570 const watched_area_t *pa = a;
1573 1571 const watched_area_t *pb = b;
1574 1572
1575 1573 if (pa->wa_vaddr < pb->wa_vaddr)
1576 1574 return (-1);
1577 1575 else if (pa->wa_vaddr > pb->wa_vaddr)
1578 1576 return (1);
1579 1577 else
1580 1578 return (0);
1581 1579 }
1582 1580
1583 1581 int
1584 1582 wp_compare(const void *a, const void *b)
1585 1583 {
1586 1584 const watched_page_t *pa = a;
1587 1585 const watched_page_t *pb = b;
1588 1586
1589 1587 if (pa->wp_vaddr < pb->wp_vaddr)
1590 1588 return (-1);
1591 1589 else if (pa->wp_vaddr > pb->wp_vaddr)
1592 1590 return (1);
1593 1591 else
1594 1592 return (0);
1595 1593 }
1596 1594
1597 1595 /*
1598 1596 * Given an address range, finds the first watched area which overlaps some or
1599 1597 * all of the range.
1600 1598 */
1601 1599 watched_area_t *
1602 1600 pr_find_watched_area(proc_t *p, watched_area_t *pwa, avl_index_t *where)
1603 1601 {
1604 1602 caddr_t vaddr = pwa->wa_vaddr;
1605 1603 caddr_t eaddr = pwa->wa_eaddr;
1606 1604 watched_area_t *wap;
1607 1605 avl_index_t real_where;
1608 1606
1609 1607 /* First, check if there is an exact match. */
1610 1608 wap = avl_find(&p->p_warea, pwa, &real_where);
1611 1609
1612 1610
1613 1611 /* Check to see if we overlap with the previous area. */
1614 1612 if (wap == NULL) {
1615 1613 wap = avl_nearest(&p->p_warea, real_where, AVL_BEFORE);
1616 1614 if (wap != NULL &&
1617 1615 (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr))
1618 1616 wap = NULL;
1619 1617 }
1620 1618
1621 1619 /* Try the next area. */
1622 1620 if (wap == NULL) {
1623 1621 wap = avl_nearest(&p->p_warea, real_where, AVL_AFTER);
1624 1622 if (wap != NULL &&
1625 1623 (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr))
1626 1624 wap = NULL;
1627 1625 }
1628 1626
1629 1627 if (where)
1630 1628 *where = real_where;
1631 1629
1632 1630 return (wap);
1633 1631 }
1634 1632
1635 1633 void
1636 1634 watch_enable(kthread_id_t t)
1637 1635 {
1638 1636 t->t_proc_flag |= TP_WATCHPT;
1639 1637 install_copyops(t, &watch_copyops);
1640 1638 }
1641 1639
1642 1640 void
1643 1641 watch_disable(kthread_id_t t)
1644 1642 {
1645 1643 t->t_proc_flag &= ~TP_WATCHPT;
1646 1644 remove_copyops(t);
1647 1645 }
1648 1646
1649 1647 int
1650 1648 copyin_nowatch(const void *uaddr, void *kaddr, size_t len)
1651 1649 {
1652 1650 int watched, ret;
1653 1651
1654 1652 watched = watch_disable_addr(uaddr, len, S_READ);
1655 1653 ret = copyin(uaddr, kaddr, len);
1656 1654 if (watched)
1657 1655 watch_enable_addr(uaddr, len, S_READ);
1658 1656
1659 1657 return (ret);
1660 1658 }
1661 1659
1662 1660 int
1663 1661 copyout_nowatch(const void *kaddr, void *uaddr, size_t len)
1664 1662 {
1665 1663 int watched, ret;
1666 1664
1667 1665 watched = watch_disable_addr(uaddr, len, S_WRITE);
1668 1666 ret = copyout(kaddr, uaddr, len);
1669 1667 if (watched)
1670 1668 watch_enable_addr(uaddr, len, S_WRITE);
1671 1669
1672 1670 return (ret);
1673 1671 }
1674 1672
1675 1673 #ifdef _LP64
1676 1674 int
1677 1675 fuword64_nowatch(const void *addr, uint64_t *value)
1678 1676 {
1679 1677 int watched, ret;
1680 1678
1681 1679 watched = watch_disable_addr(addr, sizeof (*value), S_READ);
1682 1680 ret = fuword64(addr, value);
1683 1681 if (watched)
1684 1682 watch_enable_addr(addr, sizeof (*value), S_READ);
1685 1683
1686 1684 return (ret);
1687 1685 }
1688 1686 #endif
1689 1687
1690 1688 int
1691 1689 fuword32_nowatch(const void *addr, uint32_t *value)
1692 1690 {
1693 1691 int watched, ret;
1694 1692
1695 1693 watched = watch_disable_addr(addr, sizeof (*value), S_READ);
1696 1694 ret = fuword32(addr, value);
1697 1695 if (watched)
1698 1696 watch_enable_addr(addr, sizeof (*value), S_READ);
1699 1697
1700 1698 return (ret);
1701 1699 }
1702 1700
1703 1701 #ifdef _LP64
1704 1702 int
1705 1703 suword64_nowatch(void *addr, uint64_t value)
1706 1704 {
1707 1705 int watched, ret;
1708 1706
1709 1707 watched = watch_disable_addr(addr, sizeof (value), S_WRITE);
1710 1708 ret = suword64(addr, value);
1711 1709 if (watched)
1712 1710 watch_enable_addr(addr, sizeof (value), S_WRITE);
1713 1711
1714 1712 return (ret);
1715 1713 }
1716 1714 #endif
1717 1715
1718 1716 int
1719 1717 suword32_nowatch(void *addr, uint32_t value)
1720 1718 {
1721 1719 int watched, ret;
1722 1720
1723 1721 watched = watch_disable_addr(addr, sizeof (value), S_WRITE);
1724 1722 ret = suword32(addr, value);
1725 1723 if (watched)
1726 1724 watch_enable_addr(addr, sizeof (value), S_WRITE);
1727 1725
1728 1726 return (ret);
1729 1727 }
1730 1728
1731 1729 int
1732 1730 watch_disable_addr(const void *addr, size_t len, enum seg_rw rw)
1733 1731 {
1734 1732 if (pr_watch_active(curproc))
1735 1733 return (pr_mappage((caddr_t)addr, len, rw, 1));
1736 1734 return (0);
1737 1735 }
1738 1736
1739 1737 void
1740 1738 watch_enable_addr(const void *addr, size_t len, enum seg_rw rw)
1741 1739 {
1742 1740 if (pr_watch_active(curproc))
1743 1741 pr_unmappage((caddr_t)addr, len, rw, 1);
1744 1742 }
↓ open down ↓ |
1325 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX