9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
28
29 #pragma ident "%Z%%M% %I% %E% SMI"
30
31 #include <sys/atomic.h>
32 #include <sys/errno.h>
33 #include <sys/stat.h>
34 #include <sys/modctl.h>
35 #include <sys/conf.h>
36 #include <sys/systm.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/cpuvar.h>
40 #include <sys/kmem.h>
41 #include <sys/strsubr.h>
42 #include <sys/sysmacros.h>
43 #include <sys/frame.h>
44 #include <sys/stack.h>
45 #include <sys/proc.h>
46 #include <sys/priv.h>
47 #include <sys/policy.h>
48 #include <sys/ontrap.h>
49 #include <sys/vmsystm.h>
50 #include <sys/prsystm.h>
55 #include <vm/seg_vn.h>
56 #include <vm/seg_spt.h>
57 #include <vm/seg_kmem.h>
58
59 extern struct seg_ops segdev_ops; /* needs a header file */
60 extern struct seg_ops segspt_shmops; /* needs a header file */
61
62 static int
63 page_valid(struct seg *seg, caddr_t addr)
64 {
65 struct segvn_data *svd;
66 vnode_t *vp;
67 vattr_t vattr;
68
69 /*
70 * Fail if the page doesn't map to a page in the underlying
71 * mapped file, if an underlying mapped file exists.
72 */
73 vattr.va_mask = AT_SIZE;
74 if (seg->s_ops == &segvn_ops &&
75 SEGOP_GETVP(seg, addr, &vp) == 0 &&
76 vp != NULL && vp->v_type == VREG &&
77 VOP_GETATTR(vp, &vattr, 0, CRED(), NULL) == 0) {
78 u_offset_t size = roundup(vattr.va_size, (u_offset_t)PAGESIZE);
79 u_offset_t offset = SEGOP_GETOFFSET(seg, addr);
80
81 if (offset >= size)
82 return (0);
83 }
84
85 /*
86 * Fail if this is an ISM shared segment and the address is
87 * not within the real size of the spt segment that backs it.
88 */
89 if (seg->s_ops == &segspt_shmops &&
90 addr >= seg->s_base + spt_realsize(seg))
91 return (0);
92
93 /*
94 * Fail if the segment is mapped from /dev/null.
95 * The key is that the mapping comes from segdev and the
96 * type is neither MAP_SHARED nor MAP_PRIVATE.
97 */
98 if (seg->s_ops == &segdev_ops &&
99 ((SEGOP_GETTYPE(seg, addr) & (MAP_SHARED | MAP_PRIVATE)) == 0))
100 return (0);
101
102 /*
103 * Fail if the page is a MAP_NORESERVE page that has
104 * not actually materialized.
105 * We cheat by knowing that segvn is the only segment
106 * driver that supports MAP_NORESERVE.
107 */
108 if (seg->s_ops == &segvn_ops &&
109 (svd = (struct segvn_data *)seg->s_data) != NULL &&
110 (svd->vp == NULL || svd->vp->v_type != VREG) &&
111 (svd->flags & MAP_NORESERVE)) {
112 /*
113 * Guilty knowledge here. We know that
114 * segvn_incore returns more than just the
115 * low-order bit that indicates the page is
116 * actually in memory. If any bits are set,
117 * then there is backing store for the page.
118 */
119 char incore = 0;
120 (void) SEGOP_INCORE(seg, addr, PAGESIZE, &incore);
121 if (incore == 0)
122 return (0);
123 }
124 return (1);
125 }
126
127 /*
128 * Map address "addr" in address space "as" into a kernel virtual address.
129 * The memory is guaranteed to be resident and locked down.
130 */
131 static caddr_t
132 mapin(struct as *as, caddr_t addr, int writing)
133 {
134 page_t *pp;
135 caddr_t kaddr;
136 pfn_t pfnum;
137
138 /*
139 * NB: Because of past mistakes, we have bits being returned
140 * by getpfnum that are actually the page type bits of the pte.
192 uint_t prot;
193 uint_t prot_rw = writing ? PROT_WRITE : PROT_READ;
194 int protchanged;
195 on_trap_data_t otd;
196 int retrycnt;
197 struct as *as = p->p_as;
198 enum seg_rw rw;
199
200 /*
201 * Locate segment containing address of interest.
202 */
203 page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK);
204 retrycnt = 0;
205 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
206 retry:
207 if ((seg = as_segat(as, page)) == NULL ||
208 !page_valid(seg, page)) {
209 AS_LOCK_EXIT(as, &as->a_lock);
210 return (ENXIO);
211 }
212 SEGOP_GETPROT(seg, page, 0, &prot);
213
214 protchanged = 0;
215 if ((prot & prot_rw) == 0) {
216 protchanged = 1;
217 err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | prot_rw);
218
219 if (err == IE_RETRY) {
220 protchanged = 0;
221 ASSERT(retrycnt == 0);
222 retrycnt++;
223 goto retry;
224 }
225
226 if (err != 0) {
227 AS_LOCK_EXIT(as, &as->a_lock);
228 return (ENXIO);
229 }
230 }
231
232 /*
233 * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break
234 * sharing to avoid a copy on write of a softlocked page by another
235 * thread. But since we locked the address space as a writer no other
236 * thread can cause a copy on write. S_READ_NOCOW is passed as the
237 * access type to tell segvn that it's ok not to do a copy-on-write
238 * for this SOFTLOCK fault.
239 */
240 if (writing)
241 rw = S_WRITE;
242 else if (seg->s_ops == &segvn_ops)
243 rw = S_READ_NOCOW;
244 else
245 rw = S_READ;
246
247 if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) {
248 if (protchanged)
249 (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);
250 AS_LOCK_EXIT(as, &as->a_lock);
251 return (ENXIO);
252 }
253 CPU_STATS_ADD_K(vm, softlock, 1);
254
255 /*
256 * Make sure we're not trying to read or write off the end of the page.
257 */
258 ASSERT(len <= page + PAGESIZE - addr);
259
260 /*
261 * Map in the locked page, copy to our local buffer,
262 * then map the page out and unlock it.
263 */
264 vaddr = mapin(as, addr, writing);
265
266 /*
267 * Since we are copying memory on behalf of the user process,
268 * protect against memory error correction faults.
269 */
286 else
287 bcopy(vaddr, buf, len);
288 }
289 } else {
290 error = EIO;
291 }
292 no_trap();
293
294 /*
295 * If we're writing to an executable page, we may need to sychronize
296 * the I$ with the modifications we made through the D$.
297 */
298 if (writing && (prot & PROT_EXEC))
299 sync_icache(vaddr, (uint_t)len);
300
301 mapout(as, addr, vaddr, writing);
302
303 if (rw == S_READ_NOCOW)
304 rw = S_READ;
305
306 (void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw);
307
308 if (protchanged)
309 (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);
310
311 AS_LOCK_EXIT(as, &as->a_lock);
312
313 return (error);
314 }
315
316 int
317 uread(proc_t *p, void *buf, size_t len, uintptr_t a)
318 {
319 return (urw(p, 0, buf, len, a));
320 }
321
322 int
323 uwrite(proc_t *p, void *buf, size_t len, uintptr_t a)
324 {
325 return (urw(p, 1, buf, len, a));
326 }
|
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
28
29 #include <sys/atomic.h>
30 #include <sys/errno.h>
31 #include <sys/stat.h>
32 #include <sys/modctl.h>
33 #include <sys/conf.h>
34 #include <sys/systm.h>
35 #include <sys/ddi.h>
36 #include <sys/sunddi.h>
37 #include <sys/cpuvar.h>
38 #include <sys/kmem.h>
39 #include <sys/strsubr.h>
40 #include <sys/sysmacros.h>
41 #include <sys/frame.h>
42 #include <sys/stack.h>
43 #include <sys/proc.h>
44 #include <sys/priv.h>
45 #include <sys/policy.h>
46 #include <sys/ontrap.h>
47 #include <sys/vmsystm.h>
48 #include <sys/prsystm.h>
53 #include <vm/seg_vn.h>
54 #include <vm/seg_spt.h>
55 #include <vm/seg_kmem.h>
56
57 extern struct seg_ops segdev_ops; /* needs a header file */
58 extern struct seg_ops segspt_shmops; /* needs a header file */
59
60 static int
61 page_valid(struct seg *seg, caddr_t addr)
62 {
63 struct segvn_data *svd;
64 vnode_t *vp;
65 vattr_t vattr;
66
67 /*
68 * Fail if the page doesn't map to a page in the underlying
69 * mapped file, if an underlying mapped file exists.
70 */
71 vattr.va_mask = AT_SIZE;
72 if (seg->s_ops == &segvn_ops &&
73 segop_getvp(seg, addr, &vp) == 0 &&
74 vp != NULL && vp->v_type == VREG &&
75 VOP_GETATTR(vp, &vattr, 0, CRED(), NULL) == 0) {
76 u_offset_t size = roundup(vattr.va_size, (u_offset_t)PAGESIZE);
77 u_offset_t offset = segop_getoffset(seg, addr);
78
79 if (offset >= size)
80 return (0);
81 }
82
83 /*
84 * Fail if this is an ISM shared segment and the address is
85 * not within the real size of the spt segment that backs it.
86 */
87 if (seg->s_ops == &segspt_shmops &&
88 addr >= seg->s_base + spt_realsize(seg))
89 return (0);
90
91 /*
92 * Fail if the segment is mapped from /dev/null.
93 * The key is that the mapping comes from segdev and the
94 * type is neither MAP_SHARED nor MAP_PRIVATE.
95 */
96 if (seg->s_ops == &segdev_ops &&
97 ((segop_gettype(seg, addr) & (MAP_SHARED | MAP_PRIVATE)) == 0))
98 return (0);
99
100 /*
101 * Fail if the page is a MAP_NORESERVE page that has
102 * not actually materialized.
103 * We cheat by knowing that segvn is the only segment
104 * driver that supports MAP_NORESERVE.
105 */
106 if (seg->s_ops == &segvn_ops &&
107 (svd = (struct segvn_data *)seg->s_data) != NULL &&
108 (svd->vp == NULL || svd->vp->v_type != VREG) &&
109 (svd->flags & MAP_NORESERVE)) {
110 /*
111 * Guilty knowledge here. We know that
112 * segvn_incore returns more than just the
113 * low-order bit that indicates the page is
114 * actually in memory. If any bits are set,
115 * then there is backing store for the page.
116 */
117 char incore = 0;
118 (void) segop_incore(seg, addr, PAGESIZE, &incore);
119 if (incore == 0)
120 return (0);
121 }
122 return (1);
123 }
124
125 /*
126 * Map address "addr" in address space "as" into a kernel virtual address.
127 * The memory is guaranteed to be resident and locked down.
128 */
129 static caddr_t
130 mapin(struct as *as, caddr_t addr, int writing)
131 {
132 page_t *pp;
133 caddr_t kaddr;
134 pfn_t pfnum;
135
136 /*
137 * NB: Because of past mistakes, we have bits being returned
138 * by getpfnum that are actually the page type bits of the pte.
190 uint_t prot;
191 uint_t prot_rw = writing ? PROT_WRITE : PROT_READ;
192 int protchanged;
193 on_trap_data_t otd;
194 int retrycnt;
195 struct as *as = p->p_as;
196 enum seg_rw rw;
197
198 /*
199 * Locate segment containing address of interest.
200 */
201 page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK);
202 retrycnt = 0;
203 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
204 retry:
205 if ((seg = as_segat(as, page)) == NULL ||
206 !page_valid(seg, page)) {
207 AS_LOCK_EXIT(as, &as->a_lock);
208 return (ENXIO);
209 }
210 (void) segop_getprot(seg, page, 0, &prot);
211
212 protchanged = 0;
213 if ((prot & prot_rw) == 0) {
214 protchanged = 1;
215 err = segop_setprot(seg, page, PAGESIZE, prot | prot_rw);
216
217 if (err == IE_RETRY) {
218 protchanged = 0;
219 ASSERT(retrycnt == 0);
220 retrycnt++;
221 goto retry;
222 }
223
224 if (err != 0) {
225 AS_LOCK_EXIT(as, &as->a_lock);
226 return (ENXIO);
227 }
228 }
229
230 /*
231 * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break
232 * sharing to avoid a copy on write of a softlocked page by another
233 * thread. But since we locked the address space as a writer no other
234 * thread can cause a copy on write. S_READ_NOCOW is passed as the
235 * access type to tell segvn that it's ok not to do a copy-on-write
236 * for this SOFTLOCK fault.
237 */
238 if (writing)
239 rw = S_WRITE;
240 else if (seg->s_ops == &segvn_ops)
241 rw = S_READ_NOCOW;
242 else
243 rw = S_READ;
244
245 if (segop_fault(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) {
246 if (protchanged)
247 (void) segop_setprot(seg, page, PAGESIZE, prot);
248 AS_LOCK_EXIT(as, &as->a_lock);
249 return (ENXIO);
250 }
251 CPU_STATS_ADD_K(vm, softlock, 1);
252
253 /*
254 * Make sure we're not trying to read or write off the end of the page.
255 */
256 ASSERT(len <= page + PAGESIZE - addr);
257
258 /*
259 * Map in the locked page, copy to our local buffer,
260 * then map the page out and unlock it.
261 */
262 vaddr = mapin(as, addr, writing);
263
264 /*
265 * Since we are copying memory on behalf of the user process,
266 * protect against memory error correction faults.
267 */
284 else
285 bcopy(vaddr, buf, len);
286 }
287 } else {
288 error = EIO;
289 }
290 no_trap();
291
292 /*
293 * If we're writing to an executable page, we may need to sychronize
294 * the I$ with the modifications we made through the D$.
295 */
296 if (writing && (prot & PROT_EXEC))
297 sync_icache(vaddr, (uint_t)len);
298
299 mapout(as, addr, vaddr, writing);
300
301 if (rw == S_READ_NOCOW)
302 rw = S_READ;
303
304 (void) segop_fault(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw);
305
306 if (protchanged)
307 (void) segop_setprot(seg, page, PAGESIZE, prot);
308
309 AS_LOCK_EXIT(as, &as->a_lock);
310
311 return (error);
312 }
313
314 int
315 uread(proc_t *p, void *buf, size_t len, uintptr_t a)
316 {
317 return (urw(p, 0, buf, len, a));
318 }
319
320 int
321 uwrite(proc_t *p, void *buf, size_t len, uintptr_t a)
322 {
323 return (urw(p, 1, buf, len, a));
324 }
|