Print this page
6149 use NULL capable segop as a shorthand for no-capabilities
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_map.c
+++ new/usr/src/uts/common/vm/seg_map.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27 27 /* All Rights Reserved */
28 28
29 29 /*
30 30 * Portions of this source code were derived from Berkeley 4.3 BSD
31 31 * under license from the Regents of the University of California.
32 32 */
33 33
34 34 /*
35 35 * VM - generic vnode mapping segment.
36 36 *
37 37 * The segmap driver is used only by the kernel to get faster (than seg_vn)
38 38 * mappings [lower routine overhead; more persistent cache] to random
39 39 * vnode/offsets. Note than the kernel may (and does) use seg_vn as well.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/param.h>
45 45 #include <sys/sysmacros.h>
46 46 #include <sys/buf.h>
47 47 #include <sys/systm.h>
48 48 #include <sys/vnode.h>
49 49 #include <sys/mman.h>
50 50 #include <sys/errno.h>
51 51 #include <sys/cred.h>
52 52 #include <sys/kmem.h>
53 53 #include <sys/vtrace.h>
54 54 #include <sys/cmn_err.h>
55 55 #include <sys/debug.h>
56 56 #include <sys/thread.h>
57 57 #include <sys/dumphdr.h>
58 58 #include <sys/bitmap.h>
59 59 #include <sys/lgrp.h>
60 60
61 61 #include <vm/seg_kmem.h>
62 62 #include <vm/hat.h>
63 63 #include <vm/as.h>
64 64 #include <vm/seg.h>
65 65 #include <vm/seg_kpm.h>
66 66 #include <vm/seg_map.h>
67 67 #include <vm/page.h>
68 68 #include <vm/pvn.h>
69 69 #include <vm/rm.h>
70 70
71 71 /*
72 72 * Private seg op routines.
73 73 */
74 74 static void segmap_free(struct seg *seg);
75 75 faultcode_t segmap_fault(struct hat *hat, struct seg *seg, caddr_t addr,
76 76 size_t len, enum fault_type type, enum seg_rw rw);
77 77 static faultcode_t segmap_faulta(struct seg *seg, caddr_t addr);
78 78 static int segmap_checkprot(struct seg *seg, caddr_t addr, size_t len,
79 79 uint_t prot);
80 80 static int segmap_kluster(struct seg *seg, caddr_t addr, ssize_t);
81 81 static int segmap_getprot(struct seg *seg, caddr_t addr, size_t len,
↓ open down ↓ |
81 lines elided |
↑ open up ↑ |
82 82 uint_t *protv);
83 83 static u_offset_t segmap_getoffset(struct seg *seg, caddr_t addr);
84 84 static int segmap_gettype(struct seg *seg, caddr_t addr);
85 85 static int segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
86 86 static void segmap_dump(struct seg *seg);
87 87 static int segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
88 88 struct page ***ppp, enum lock_type type,
89 89 enum seg_rw rw);
90 90 static void segmap_badop(void);
91 91 static int segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
92 -static int segmap_capable(struct seg *seg, segcapability_t capability);
93 92
94 93 /* segkpm support */
95 94 static caddr_t segmap_pagecreate_kpm(struct seg *, vnode_t *, u_offset_t,
96 95 struct smap *, enum seg_rw);
97 96 struct smap *get_smap_kpm(caddr_t, page_t **);
98 97
99 98 #define SEGMAP_BADOP(t) (t(*)())segmap_badop
100 99
101 100 static struct seg_ops segmap_ops = {
102 101 .dup = SEGMAP_BADOP(int),
103 102 .unmap = SEGMAP_BADOP(int),
104 103 .free = segmap_free,
105 104 .fault = segmap_fault,
106 105 .faulta = segmap_faulta,
107 106 .setprot = SEGMAP_BADOP(int),
108 107 .checkprot = segmap_checkprot,
109 108 .kluster = segmap_kluster,
110 109 .swapout = SEGMAP_BADOP(size_t),
111 110 .sync = SEGMAP_BADOP(int),
112 111 .incore = SEGMAP_BADOP(size_t),
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
113 112 .lockop = SEGMAP_BADOP(int),
114 113 .getprot = segmap_getprot,
115 114 .getoffset = segmap_getoffset,
116 115 .gettype = segmap_gettype,
117 116 .getvp = segmap_getvp,
118 117 .advise = SEGMAP_BADOP(int),
119 118 .dump = segmap_dump,
120 119 .pagelock = segmap_pagelock,
121 120 .setpagesize = SEGMAP_BADOP(int),
122 121 .getmemid = segmap_getmemid,
123 - .capable = segmap_capable,
124 122 };
125 123
126 124 /*
127 125 * Private segmap routines.
128 126 */
129 127 static void segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
130 128 size_t len, enum seg_rw rw, struct smap *smp);
131 129 static void segmap_smapadd(struct smap *smp);
132 130 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
133 131 u_offset_t off, int hashid);
134 132 static void segmap_hashout(struct smap *smp);
135 133
136 134
137 135 /*
138 136 * Statistics for segmap operations.
139 137 *
140 138 * No explicit locking to protect these stats.
141 139 */
142 140 struct segmapcnt segmapcnt = {
143 141 { "fault", KSTAT_DATA_ULONG },
144 142 { "faulta", KSTAT_DATA_ULONG },
145 143 { "getmap", KSTAT_DATA_ULONG },
146 144 { "get_use", KSTAT_DATA_ULONG },
147 145 { "get_reclaim", KSTAT_DATA_ULONG },
148 146 { "get_reuse", KSTAT_DATA_ULONG },
149 147 { "get_unused", KSTAT_DATA_ULONG },
150 148 { "get_nofree", KSTAT_DATA_ULONG },
151 149 { "rel_async", KSTAT_DATA_ULONG },
152 150 { "rel_write", KSTAT_DATA_ULONG },
153 151 { "rel_free", KSTAT_DATA_ULONG },
154 152 { "rel_abort", KSTAT_DATA_ULONG },
155 153 { "rel_dontneed", KSTAT_DATA_ULONG },
156 154 { "release", KSTAT_DATA_ULONG },
157 155 { "pagecreate", KSTAT_DATA_ULONG },
158 156 { "free_notfree", KSTAT_DATA_ULONG },
159 157 { "free_dirty", KSTAT_DATA_ULONG },
160 158 { "free", KSTAT_DATA_ULONG },
161 159 { "stolen", KSTAT_DATA_ULONG },
162 160 { "get_nomtx", KSTAT_DATA_ULONG }
163 161 };
164 162
165 163 kstat_named_t *segmapcnt_ptr = (kstat_named_t *)&segmapcnt;
166 164 uint_t segmapcnt_ndata = sizeof (segmapcnt) / sizeof (kstat_named_t);
167 165
168 166 /*
169 167 * Return number of map pages in segment.
170 168 */
171 169 #define MAP_PAGES(seg) ((seg)->s_size >> MAXBSHIFT)
172 170
173 171 /*
174 172 * Translate addr into smap number within segment.
175 173 */
176 174 #define MAP_PAGE(seg, addr) (((addr) - (seg)->s_base) >> MAXBSHIFT)
177 175
178 176 /*
179 177 * Translate addr in seg into struct smap pointer.
180 178 */
181 179 #define GET_SMAP(seg, addr) \
182 180 &(((struct segmap_data *)((seg)->s_data))->smd_sm[MAP_PAGE(seg, addr)])
183 181
184 182 /*
185 183 * Bit in map (16 bit bitmap).
186 184 */
187 185 #define SMAP_BIT_MASK(bitindex) (1 << ((bitindex) & 0xf))
188 186
189 187 static int smd_colormsk = 0;
190 188 static int smd_ncolor = 0;
191 189 static int smd_nfree = 0;
192 190 static int smd_freemsk = 0;
193 191 #ifdef DEBUG
194 192 static int *colors_used;
195 193 #endif
196 194 static struct smap *smd_smap;
197 195 static struct smaphash *smd_hash;
198 196 #ifdef SEGMAP_HASHSTATS
199 197 static unsigned int *smd_hash_len;
200 198 #endif
201 199 static struct smfree *smd_free;
202 200 static ulong_t smd_hashmsk = 0;
203 201
204 202 #define SEGMAP_MAXCOLOR 2
205 203 #define SEGMAP_CACHE_PAD 64
206 204
207 205 union segmap_cpu {
208 206 struct {
209 207 uint32_t scpu_free_ndx[SEGMAP_MAXCOLOR];
210 208 struct smap *scpu_last_smap;
211 209 ulong_t scpu_getmap;
212 210 ulong_t scpu_release;
213 211 ulong_t scpu_get_reclaim;
214 212 ulong_t scpu_fault;
215 213 ulong_t scpu_pagecreate;
216 214 ulong_t scpu_get_reuse;
217 215 } scpu;
218 216 char scpu_pad[SEGMAP_CACHE_PAD];
219 217 };
220 218 static union segmap_cpu *smd_cpu;
221 219
222 220 /*
223 221 * There are three locks in seg_map:
224 222 * - per freelist mutexes
225 223 * - per hashchain mutexes
226 224 * - per smap mutexes
227 225 *
228 226 * The lock ordering is to get the smap mutex to lock down the slot
229 227 * first then the hash lock (for hash in/out (vp, off) list) or the
230 228 * freelist lock to put the slot back on the free list.
231 229 *
232 230 * The hash search is done by only holding the hashchain lock, when a wanted
233 231 * slot is found, we drop the hashchain lock then lock the slot so there
234 232 * is no overlapping of hashchain and smap locks. After the slot is
235 233 * locked, we verify again if the slot is still what we are looking
236 234 * for.
237 235 *
238 236 * Allocation of a free slot is done by holding the freelist lock,
239 237 * then locking the smap slot at the head of the freelist. This is
240 238 * in reversed lock order so mutex_tryenter() is used.
241 239 *
242 240 * The smap lock protects all fields in smap structure except for
243 241 * the link fields for hash/free lists which are protected by
244 242 * hashchain and freelist locks.
245 243 */
246 244
247 245 #define SHASHMTX(hashid) (&smd_hash[hashid].sh_mtx)
248 246
249 247 #define SMP2SMF(smp) (&smd_free[(smp - smd_smap) & smd_freemsk])
250 248 #define SMP2SMF_NDX(smp) (ushort_t)((smp - smd_smap) & smd_freemsk)
251 249
252 250 #define SMAPMTX(smp) (&smp->sm_mtx)
253 251
254 252 #define SMAP_HASHFUNC(vp, off, hashid) \
255 253 { \
256 254 hashid = ((((uintptr_t)(vp) >> 6) + ((uintptr_t)(vp) >> 3) + \
257 255 ((off) >> MAXBSHIFT)) & smd_hashmsk); \
258 256 }
259 257
260 258 /*
261 259 * The most frequently updated kstat counters are kept in the
262 260 * per cpu array to avoid hot cache blocks. The update function
263 261 * sums the cpu local counters to update the global counters.
264 262 */
265 263
266 264 /* ARGSUSED */
267 265 int
268 266 segmap_kstat_update(kstat_t *ksp, int rw)
269 267 {
270 268 int i;
271 269 ulong_t getmap, release, get_reclaim;
272 270 ulong_t fault, pagecreate, get_reuse;
273 271
274 272 if (rw == KSTAT_WRITE)
275 273 return (EACCES);
276 274 getmap = release = get_reclaim = (ulong_t)0;
277 275 fault = pagecreate = get_reuse = (ulong_t)0;
278 276 for (i = 0; i < max_ncpus; i++) {
279 277 getmap += smd_cpu[i].scpu.scpu_getmap;
280 278 release += smd_cpu[i].scpu.scpu_release;
281 279 get_reclaim += smd_cpu[i].scpu.scpu_get_reclaim;
282 280 fault += smd_cpu[i].scpu.scpu_fault;
283 281 pagecreate += smd_cpu[i].scpu.scpu_pagecreate;
284 282 get_reuse += smd_cpu[i].scpu.scpu_get_reuse;
285 283 }
286 284 segmapcnt.smp_getmap.value.ul = getmap;
287 285 segmapcnt.smp_release.value.ul = release;
288 286 segmapcnt.smp_get_reclaim.value.ul = get_reclaim;
289 287 segmapcnt.smp_fault.value.ul = fault;
290 288 segmapcnt.smp_pagecreate.value.ul = pagecreate;
291 289 segmapcnt.smp_get_reuse.value.ul = get_reuse;
292 290 return (0);
293 291 }
294 292
295 293 int
296 294 segmap_create(struct seg *seg, void *argsp)
297 295 {
298 296 struct segmap_data *smd;
299 297 struct smap *smp;
300 298 struct smfree *sm;
301 299 struct segmap_crargs *a = (struct segmap_crargs *)argsp;
302 300 struct smaphash *shashp;
303 301 union segmap_cpu *scpu;
304 302 long i, npages;
305 303 size_t hashsz;
306 304 uint_t nfreelist;
307 305 extern void prefetch_smap_w(void *);
308 306 extern int max_ncpus;
309 307
310 308 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
311 309
312 310 if (((uintptr_t)seg->s_base | seg->s_size) & MAXBOFFSET) {
313 311 panic("segkmap not MAXBSIZE aligned");
314 312 /*NOTREACHED*/
315 313 }
316 314
317 315 smd = kmem_zalloc(sizeof (struct segmap_data), KM_SLEEP);
318 316
319 317 seg->s_data = (void *)smd;
320 318 seg->s_ops = &segmap_ops;
321 319 smd->smd_prot = a->prot;
322 320
323 321 /*
324 322 * Scale the number of smap freelists to be
325 323 * proportional to max_ncpus * number of virtual colors.
326 324 * The caller can over-ride this scaling by providing
327 325 * a non-zero a->nfreelist argument.
328 326 */
329 327 nfreelist = a->nfreelist;
330 328 if (nfreelist == 0)
331 329 nfreelist = max_ncpus;
332 330 else if (nfreelist < 0 || nfreelist > 4 * max_ncpus) {
333 331 cmn_err(CE_WARN, "segmap_create: nfreelist out of range "
334 332 "%d, using %d", nfreelist, max_ncpus);
335 333 nfreelist = max_ncpus;
336 334 }
337 335 if (!ISP2(nfreelist)) {
338 336 /* round up nfreelist to the next power of two. */
339 337 nfreelist = 1 << (highbit(nfreelist));
340 338 }
341 339
342 340 /*
343 341 * Get the number of virtual colors - must be a power of 2.
344 342 */
345 343 if (a->shmsize)
346 344 smd_ncolor = a->shmsize >> MAXBSHIFT;
347 345 else
348 346 smd_ncolor = 1;
349 347 ASSERT((smd_ncolor & (smd_ncolor - 1)) == 0);
350 348 ASSERT(smd_ncolor <= SEGMAP_MAXCOLOR);
351 349 smd_colormsk = smd_ncolor - 1;
352 350 smd->smd_nfree = smd_nfree = smd_ncolor * nfreelist;
353 351 smd_freemsk = smd_nfree - 1;
354 352
355 353 /*
356 354 * Allocate and initialize the freelist headers.
357 355 * Note that sm_freeq[1] starts out as the release queue. This
358 356 * is known when the smap structures are initialized below.
359 357 */
360 358 smd_free = smd->smd_free =
361 359 kmem_zalloc(smd_nfree * sizeof (struct smfree), KM_SLEEP);
362 360 for (i = 0; i < smd_nfree; i++) {
363 361 sm = &smd->smd_free[i];
364 362 mutex_init(&sm->sm_freeq[0].smq_mtx, NULL, MUTEX_DEFAULT, NULL);
365 363 mutex_init(&sm->sm_freeq[1].smq_mtx, NULL, MUTEX_DEFAULT, NULL);
366 364 sm->sm_allocq = &sm->sm_freeq[0];
367 365 sm->sm_releq = &sm->sm_freeq[1];
368 366 }
369 367
370 368 /*
371 369 * Allocate and initialize the smap hash chain headers.
372 370 * Compute hash size rounding down to the next power of two.
373 371 */
374 372 npages = MAP_PAGES(seg);
375 373 smd->smd_npages = npages;
376 374 hashsz = npages / SMAP_HASHAVELEN;
377 375 hashsz = 1 << (highbit(hashsz)-1);
378 376 smd_hashmsk = hashsz - 1;
379 377 smd_hash = smd->smd_hash =
380 378 kmem_alloc(hashsz * sizeof (struct smaphash), KM_SLEEP);
381 379 #ifdef SEGMAP_HASHSTATS
382 380 smd_hash_len =
383 381 kmem_zalloc(hashsz * sizeof (unsigned int), KM_SLEEP);
384 382 #endif
385 383 for (i = 0, shashp = smd_hash; i < hashsz; i++, shashp++) {
386 384 shashp->sh_hash_list = NULL;
387 385 mutex_init(&shashp->sh_mtx, NULL, MUTEX_DEFAULT, NULL);
388 386 }
389 387
390 388 /*
391 389 * Allocate and initialize the smap structures.
392 390 * Link all slots onto the appropriate freelist.
393 391 * The smap array is large enough to affect boot time
394 392 * on large systems, so use memory prefetching and only
395 393 * go through the array 1 time. Inline a optimized version
396 394 * of segmap_smapadd to add structures to freelists with
397 395 * knowledge that no locks are needed here.
398 396 */
399 397 smd_smap = smd->smd_sm =
400 398 kmem_alloc(sizeof (struct smap) * npages, KM_SLEEP);
401 399
402 400 for (smp = &smd->smd_sm[MAP_PAGES(seg) - 1];
403 401 smp >= smd->smd_sm; smp--) {
404 402 struct smap *smpfreelist;
405 403 struct sm_freeq *releq;
406 404
407 405 prefetch_smap_w((char *)smp);
408 406
409 407 smp->sm_vp = NULL;
410 408 smp->sm_hash = NULL;
411 409 smp->sm_off = 0;
412 410 smp->sm_bitmap = 0;
413 411 smp->sm_refcnt = 0;
414 412 mutex_init(&smp->sm_mtx, NULL, MUTEX_DEFAULT, NULL);
415 413 smp->sm_free_ndx = SMP2SMF_NDX(smp);
416 414
417 415 sm = SMP2SMF(smp);
418 416 releq = sm->sm_releq;
419 417
420 418 smpfreelist = releq->smq_free;
421 419 if (smpfreelist == 0) {
422 420 releq->smq_free = smp->sm_next = smp->sm_prev = smp;
423 421 } else {
424 422 smp->sm_next = smpfreelist;
425 423 smp->sm_prev = smpfreelist->sm_prev;
426 424 smpfreelist->sm_prev = smp;
427 425 smp->sm_prev->sm_next = smp;
428 426 releq->smq_free = smp->sm_next;
429 427 }
430 428
431 429 /*
432 430 * sm_flag = 0 (no SM_QNDX_ZERO) implies smap on sm_freeq[1]
433 431 */
434 432 smp->sm_flags = 0;
435 433
436 434 #ifdef SEGKPM_SUPPORT
437 435 /*
438 436 * Due to the fragile prefetch loop no
439 437 * separate function is used here.
440 438 */
441 439 smp->sm_kpme_next = NULL;
442 440 smp->sm_kpme_prev = NULL;
443 441 smp->sm_kpme_page = NULL;
444 442 #endif
445 443 }
446 444
447 445 /*
448 446 * Allocate the per color indices that distribute allocation
449 447 * requests over the free lists. Each cpu will have a private
450 448 * rotor index to spread the allocations even across the available
451 449 * smap freelists. Init the scpu_last_smap field to the first
452 450 * smap element so there is no need to check for NULL.
453 451 */
454 452 smd_cpu =
455 453 kmem_zalloc(sizeof (union segmap_cpu) * max_ncpus, KM_SLEEP);
456 454 for (i = 0, scpu = smd_cpu; i < max_ncpus; i++, scpu++) {
457 455 int j;
458 456 for (j = 0; j < smd_ncolor; j++)
459 457 scpu->scpu.scpu_free_ndx[j] = j;
460 458 scpu->scpu.scpu_last_smap = smd_smap;
461 459 }
462 460
463 461 vpm_init();
464 462
465 463 #ifdef DEBUG
466 464 /*
467 465 * Keep track of which colors are used more often.
468 466 */
469 467 colors_used = kmem_zalloc(smd_nfree * sizeof (int), KM_SLEEP);
470 468 #endif /* DEBUG */
471 469
472 470 return (0);
473 471 }
474 472
475 473 static void
476 474 segmap_free(seg)
477 475 struct seg *seg;
478 476 {
479 477 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
480 478 }
481 479
482 480 /*
483 481 * Do a F_SOFTUNLOCK call over the range requested.
484 482 * The range must have already been F_SOFTLOCK'ed.
485 483 */
486 484 static void
487 485 segmap_unlock(
488 486 struct hat *hat,
489 487 struct seg *seg,
490 488 caddr_t addr,
491 489 size_t len,
492 490 enum seg_rw rw,
493 491 struct smap *smp)
494 492 {
495 493 page_t *pp;
496 494 caddr_t adr;
497 495 u_offset_t off;
498 496 struct vnode *vp;
499 497 kmutex_t *smtx;
500 498
501 499 ASSERT(smp->sm_refcnt > 0);
502 500
503 501 #ifdef lint
504 502 seg = seg;
505 503 #endif
506 504
507 505 if (segmap_kpm && IS_KPM_ADDR(addr)) {
508 506
509 507 /*
510 508 * We're called only from segmap_fault and this was a
511 509 * NOP in case of a kpm based smap, so dangerous things
512 510 * must have happened in the meantime. Pages are prefaulted
513 511 * and locked in segmap_getmapflt and they will not be
514 512 * unlocked until segmap_release.
515 513 */
516 514 panic("segmap_unlock: called with kpm addr %p", (void *)addr);
517 515 /*NOTREACHED*/
518 516 }
519 517
520 518 vp = smp->sm_vp;
521 519 off = smp->sm_off + (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
522 520
523 521 hat_unlock(hat, addr, P2ROUNDUP(len, PAGESIZE));
524 522 for (adr = addr; adr < addr + len; adr += PAGESIZE, off += PAGESIZE) {
525 523 ushort_t bitmask;
526 524
527 525 /*
528 526 * Use page_find() instead of page_lookup() to
529 527 * find the page since we know that it has
530 528 * "shared" lock.
531 529 */
532 530 pp = page_find(vp, off);
533 531 if (pp == NULL) {
534 532 panic("segmap_unlock: page not found");
535 533 /*NOTREACHED*/
536 534 }
537 535
538 536 if (rw == S_WRITE) {
539 537 hat_setrefmod(pp);
540 538 } else if (rw != S_OTHER) {
541 539 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
542 540 "segmap_fault:pp %p vp %p offset %llx", pp, vp, off);
543 541 hat_setref(pp);
544 542 }
545 543
546 544 /*
547 545 * Clear bitmap, if the bit corresponding to "off" is set,
548 546 * since the page and translation are being unlocked.
549 547 */
550 548 bitmask = SMAP_BIT_MASK((off - smp->sm_off) >> PAGESHIFT);
551 549
552 550 /*
553 551 * Large Files: Following assertion is to verify
554 552 * the correctness of the cast to (int) above.
555 553 */
556 554 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
557 555 smtx = SMAPMTX(smp);
558 556 mutex_enter(smtx);
559 557 if (smp->sm_bitmap & bitmask) {
560 558 smp->sm_bitmap &= ~bitmask;
561 559 }
562 560 mutex_exit(smtx);
563 561
564 562 page_unlock(pp);
565 563 }
566 564 }
567 565
568 566 #define MAXPPB (MAXBSIZE/4096) /* assumes minimum page size of 4k */
569 567
570 568 /*
571 569 * This routine is called via a machine specific fault handling
572 570 * routine. It is also called by software routines wishing to
573 571 * lock or unlock a range of addresses.
574 572 *
575 573 * Note that this routine expects a page-aligned "addr".
576 574 */
577 575 faultcode_t
578 576 segmap_fault(
579 577 struct hat *hat,
580 578 struct seg *seg,
581 579 caddr_t addr,
582 580 size_t len,
583 581 enum fault_type type,
584 582 enum seg_rw rw)
585 583 {
586 584 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
587 585 struct smap *smp;
588 586 page_t *pp, **ppp;
589 587 struct vnode *vp;
590 588 u_offset_t off;
591 589 page_t *pl[MAXPPB + 1];
592 590 uint_t prot;
593 591 u_offset_t addroff;
594 592 caddr_t adr;
595 593 int err;
596 594 u_offset_t sm_off;
597 595 int hat_flag;
598 596
599 597 if (segmap_kpm && IS_KPM_ADDR(addr)) {
600 598 int newpage;
601 599 kmutex_t *smtx;
602 600
603 601 /*
604 602 * Pages are successfully prefaulted and locked in
605 603 * segmap_getmapflt and can't be unlocked until
606 604 * segmap_release. No hat mappings have to be locked
607 605 * and they also can't be unlocked as long as the
608 606 * caller owns an active kpm addr.
609 607 */
610 608 #ifndef DEBUG
611 609 if (type != F_SOFTUNLOCK)
612 610 return (0);
613 611 #endif
614 612
615 613 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
616 614 panic("segmap_fault: smap not found "
617 615 "for addr %p", (void *)addr);
618 616 /*NOTREACHED*/
619 617 }
620 618
621 619 smtx = SMAPMTX(smp);
622 620 #ifdef DEBUG
623 621 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
624 622 if (newpage) {
625 623 cmn_err(CE_WARN, "segmap_fault: newpage? smp %p",
626 624 (void *)smp);
627 625 }
628 626
629 627 if (type != F_SOFTUNLOCK) {
630 628 mutex_exit(smtx);
631 629 return (0);
632 630 }
633 631 #endif
634 632 mutex_exit(smtx);
635 633 vp = smp->sm_vp;
636 634 sm_off = smp->sm_off;
637 635
638 636 if (vp == NULL)
639 637 return (FC_MAKE_ERR(EIO));
640 638
641 639 ASSERT(smp->sm_refcnt > 0);
642 640
643 641 addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
644 642 if (addroff + len > MAXBSIZE)
645 643 panic("segmap_fault: endaddr %p exceeds MAXBSIZE chunk",
646 644 (void *)(addr + len));
647 645
648 646 off = sm_off + addroff;
649 647
650 648 pp = page_find(vp, off);
651 649
652 650 if (pp == NULL)
653 651 panic("segmap_fault: softunlock page not found");
654 652
655 653 /*
656 654 * Set ref bit also here in case of S_OTHER to avoid the
657 655 * overhead of supporting other cases than F_SOFTUNLOCK
658 656 * with segkpm. We can do this because the underlying
659 657 * pages are locked anyway.
660 658 */
661 659 if (rw == S_WRITE) {
662 660 hat_setrefmod(pp);
663 661 } else {
664 662 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
665 663 "segmap_fault:pp %p vp %p offset %llx",
666 664 pp, vp, off);
667 665 hat_setref(pp);
668 666 }
669 667
670 668 return (0);
671 669 }
672 670
673 671 smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++;
674 672 smp = GET_SMAP(seg, addr);
675 673 vp = smp->sm_vp;
676 674 sm_off = smp->sm_off;
677 675
678 676 if (vp == NULL)
679 677 return (FC_MAKE_ERR(EIO));
680 678
681 679 ASSERT(smp->sm_refcnt > 0);
682 680
683 681 addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
684 682 if (addroff + len > MAXBSIZE) {
685 683 panic("segmap_fault: endaddr %p "
686 684 "exceeds MAXBSIZE chunk", (void *)(addr + len));
687 685 /*NOTREACHED*/
688 686 }
689 687 off = sm_off + addroff;
690 688
691 689 /*
692 690 * First handle the easy stuff
693 691 */
694 692 if (type == F_SOFTUNLOCK) {
695 693 segmap_unlock(hat, seg, addr, len, rw, smp);
696 694 return (0);
697 695 }
698 696
699 697 TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE,
700 698 "segmap_getpage:seg %p addr %p vp %p", seg, addr, vp);
701 699 err = VOP_GETPAGE(vp, (offset_t)off, len, &prot, pl, MAXBSIZE,
702 700 seg, addr, rw, CRED(), NULL);
703 701
704 702 if (err)
705 703 return (FC_MAKE_ERR(err));
706 704
707 705 prot &= smd->smd_prot;
708 706
709 707 /*
710 708 * Handle all pages returned in the pl[] array.
711 709 * This loop is coded on the assumption that if
712 710 * there was no error from the VOP_GETPAGE routine,
713 711 * that the page list returned will contain all the
714 712 * needed pages for the vp from [off..off + len].
715 713 */
716 714 ppp = pl;
717 715 while ((pp = *ppp++) != NULL) {
718 716 u_offset_t poff;
719 717 ASSERT(pp->p_vnode == vp);
720 718 hat_flag = HAT_LOAD;
721 719
722 720 /*
723 721 * Verify that the pages returned are within the range
724 722 * of this segmap region. Note that it is theoretically
725 723 * possible for pages outside this range to be returned,
726 724 * but it is not very likely. If we cannot use the
727 725 * page here, just release it and go on to the next one.
728 726 */
729 727 if (pp->p_offset < sm_off ||
730 728 pp->p_offset >= sm_off + MAXBSIZE) {
731 729 (void) page_release(pp, 1);
732 730 continue;
733 731 }
734 732
735 733 ASSERT(hat == kas.a_hat);
736 734 poff = pp->p_offset;
737 735 adr = addr + (poff - off);
738 736 if (adr >= addr && adr < addr + len) {
739 737 hat_setref(pp);
740 738 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
741 739 "segmap_fault:pp %p vp %p offset %llx",
742 740 pp, vp, poff);
743 741 if (type == F_SOFTLOCK)
744 742 hat_flag = HAT_LOAD_LOCK;
745 743 }
746 744
747 745 /*
748 746 * Deal with VMODSORT pages here. If we know this is a write
749 747 * do the setmod now and allow write protection.
750 748 * As long as it's modified or not S_OTHER, remove write
751 749 * protection. With S_OTHER it's up to the FS to deal with this.
752 750 */
753 751 if (IS_VMODSORT(vp)) {
754 752 if (rw == S_WRITE)
755 753 hat_setmod(pp);
756 754 else if (rw != S_OTHER && !hat_ismod(pp))
757 755 prot &= ~PROT_WRITE;
758 756 }
759 757
760 758 hat_memload(hat, adr, pp, prot, hat_flag);
761 759 if (hat_flag != HAT_LOAD_LOCK)
762 760 page_unlock(pp);
763 761 }
764 762 return (0);
765 763 }
766 764
767 765 /*
768 766 * This routine is used to start I/O on pages asynchronously.
769 767 */
770 768 static faultcode_t
771 769 segmap_faulta(struct seg *seg, caddr_t addr)
772 770 {
773 771 struct smap *smp;
774 772 struct vnode *vp;
775 773 u_offset_t off;
776 774 int err;
777 775
778 776 if (segmap_kpm && IS_KPM_ADDR(addr)) {
779 777 int newpage;
780 778 kmutex_t *smtx;
781 779
782 780 /*
783 781 * Pages are successfully prefaulted and locked in
784 782 * segmap_getmapflt and can't be unlocked until
785 783 * segmap_release. No hat mappings have to be locked
786 784 * and they also can't be unlocked as long as the
787 785 * caller owns an active kpm addr.
788 786 */
789 787 #ifdef DEBUG
790 788 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
791 789 panic("segmap_faulta: smap not found "
792 790 "for addr %p", (void *)addr);
793 791 /*NOTREACHED*/
794 792 }
795 793
796 794 smtx = SMAPMTX(smp);
797 795 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
798 796 mutex_exit(smtx);
799 797 if (newpage)
800 798 cmn_err(CE_WARN, "segmap_faulta: newpage? smp %p",
801 799 (void *)smp);
802 800 #endif
803 801 return (0);
804 802 }
805 803
806 804 segmapcnt.smp_faulta.value.ul++;
807 805 smp = GET_SMAP(seg, addr);
808 806
809 807 ASSERT(smp->sm_refcnt > 0);
810 808
811 809 vp = smp->sm_vp;
812 810 off = smp->sm_off;
813 811
814 812 if (vp == NULL) {
815 813 cmn_err(CE_WARN, "segmap_faulta - no vp");
816 814 return (FC_MAKE_ERR(EIO));
817 815 }
818 816
819 817 TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE,
820 818 "segmap_getpage:seg %p addr %p vp %p", seg, addr, vp);
821 819
822 820 err = VOP_GETPAGE(vp, (offset_t)(off + ((offset_t)((uintptr_t)addr
823 821 & MAXBOFFSET))), PAGESIZE, (uint_t *)NULL, (page_t **)NULL, 0,
824 822 seg, addr, S_READ, CRED(), NULL);
825 823
826 824 if (err)
827 825 return (FC_MAKE_ERR(err));
828 826 return (0);
829 827 }
830 828
831 829 /*ARGSUSED*/
832 830 static int
833 831 segmap_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
834 832 {
835 833 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
836 834
837 835 ASSERT(seg->s_as && RW_LOCK_HELD(&seg->s_as->a_lock));
838 836
839 837 /*
840 838 * Need not acquire the segment lock since
841 839 * "smd_prot" is a read-only field.
842 840 */
843 841 return (((smd->smd_prot & prot) != prot) ? EACCES : 0);
844 842 }
845 843
846 844 static int
847 845 segmap_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
848 846 {
849 847 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
850 848 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
851 849
852 850 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
853 851
854 852 if (pgno != 0) {
855 853 do {
856 854 protv[--pgno] = smd->smd_prot;
857 855 } while (pgno != 0);
858 856 }
859 857 return (0);
860 858 }
861 859
862 860 static u_offset_t
863 861 segmap_getoffset(struct seg *seg, caddr_t addr)
864 862 {
865 863 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
866 864
867 865 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
868 866
869 867 return ((u_offset_t)smd->smd_sm->sm_off + (addr - seg->s_base));
870 868 }
871 869
872 870 /*ARGSUSED*/
873 871 static int
874 872 segmap_gettype(struct seg *seg, caddr_t addr)
875 873 {
876 874 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
877 875
878 876 return (MAP_SHARED);
879 877 }
880 878
881 879 /*ARGSUSED*/
882 880 static int
883 881 segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
884 882 {
885 883 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
886 884
887 885 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
888 886
889 887 /* XXX - This doesn't make any sense */
890 888 *vpp = smd->smd_sm->sm_vp;
891 889 return (0);
892 890 }
893 891
894 892 /*
895 893 * Check to see if it makes sense to do kluster/read ahead to
896 894 * addr + delta relative to the mapping at addr. We assume here
897 895 * that delta is a signed PAGESIZE'd multiple (which can be negative).
898 896 *
899 897 * For segmap we always "approve" of this action from our standpoint.
900 898 */
901 899 /*ARGSUSED*/
902 900 static int
903 901 segmap_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
904 902 {
905 903 return (0);
906 904 }
907 905
908 906 static void
909 907 segmap_badop()
910 908 {
911 909 panic("segmap_badop");
912 910 /*NOTREACHED*/
913 911 }
914 912
915 913 /*
916 914 * Special private segmap operations
917 915 */
918 916
919 917 /*
920 918 * Add smap to the appropriate free list.
921 919 */
922 920 static void
923 921 segmap_smapadd(struct smap *smp)
924 922 {
925 923 struct smfree *sm;
926 924 struct smap *smpfreelist;
927 925 struct sm_freeq *releq;
928 926
929 927 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
930 928
931 929 if (smp->sm_refcnt != 0) {
932 930 panic("segmap_smapadd");
933 931 /*NOTREACHED*/
934 932 }
935 933
936 934 sm = &smd_free[smp->sm_free_ndx];
937 935 /*
938 936 * Add to the tail of the release queue
939 937 * Note that sm_releq and sm_allocq could toggle
940 938 * before we get the lock. This does not affect
941 939 * correctness as the 2 queues are only maintained
942 940 * to reduce lock pressure.
943 941 */
944 942 releq = sm->sm_releq;
945 943 if (releq == &sm->sm_freeq[0])
946 944 smp->sm_flags |= SM_QNDX_ZERO;
947 945 else
948 946 smp->sm_flags &= ~SM_QNDX_ZERO;
949 947 mutex_enter(&releq->smq_mtx);
950 948 smpfreelist = releq->smq_free;
951 949 if (smpfreelist == 0) {
952 950 int want;
953 951
954 952 releq->smq_free = smp->sm_next = smp->sm_prev = smp;
955 953 /*
956 954 * Both queue mutexes held to set sm_want;
957 955 * snapshot the value before dropping releq mutex.
958 956 * If sm_want appears after the releq mutex is dropped,
959 957 * then the smap just freed is already gone.
960 958 */
961 959 want = sm->sm_want;
962 960 mutex_exit(&releq->smq_mtx);
963 961 /*
964 962 * See if there was a waiter before dropping the releq mutex
965 963 * then recheck after obtaining sm_freeq[0] mutex as
966 964 * the another thread may have already signaled.
967 965 */
968 966 if (want) {
969 967 mutex_enter(&sm->sm_freeq[0].smq_mtx);
970 968 if (sm->sm_want)
971 969 cv_signal(&sm->sm_free_cv);
972 970 mutex_exit(&sm->sm_freeq[0].smq_mtx);
973 971 }
974 972 } else {
975 973 smp->sm_next = smpfreelist;
976 974 smp->sm_prev = smpfreelist->sm_prev;
977 975 smpfreelist->sm_prev = smp;
978 976 smp->sm_prev->sm_next = smp;
979 977 mutex_exit(&releq->smq_mtx);
980 978 }
981 979 }
982 980
983 981
984 982 static struct smap *
985 983 segmap_hashin(struct smap *smp, struct vnode *vp, u_offset_t off, int hashid)
986 984 {
987 985 struct smap **hpp;
988 986 struct smap *tmp;
989 987 kmutex_t *hmtx;
990 988
991 989 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
992 990 ASSERT(smp->sm_vp == NULL);
993 991 ASSERT(smp->sm_hash == NULL);
994 992 ASSERT(smp->sm_prev == NULL);
995 993 ASSERT(smp->sm_next == NULL);
996 994 ASSERT(hashid >= 0 && hashid <= smd_hashmsk);
997 995
998 996 hmtx = SHASHMTX(hashid);
999 997
1000 998 mutex_enter(hmtx);
1001 999 /*
1002 1000 * First we need to verify that no one has created a smp
1003 1001 * with (vp,off) as its tag before we us.
1004 1002 */
1005 1003 for (tmp = smd_hash[hashid].sh_hash_list;
1006 1004 tmp != NULL; tmp = tmp->sm_hash)
1007 1005 if (tmp->sm_vp == vp && tmp->sm_off == off)
1008 1006 break;
1009 1007
1010 1008 if (tmp == NULL) {
1011 1009 /*
1012 1010 * No one created one yet.
1013 1011 *
1014 1012 * Funniness here - we don't increment the ref count on the
1015 1013 * vnode * even though we have another pointer to it here.
1016 1014 * The reason for this is that we don't want the fact that
1017 1015 * a seg_map entry somewhere refers to a vnode to prevent the
1018 1016 * vnode * itself from going away. This is because this
1019 1017 * reference to the vnode is a "soft one". In the case where
1020 1018 * a mapping is being used by a rdwr [or directory routine?]
1021 1019 * there already has to be a non-zero ref count on the vnode.
1022 1020 * In the case where the vp has been freed and the the smap
1023 1021 * structure is on the free list, there are no pages in memory
1024 1022 * that can refer to the vnode. Thus even if we reuse the same
1025 1023 * vnode/smap structure for a vnode which has the same
1026 1024 * address but represents a different object, we are ok.
1027 1025 */
1028 1026 smp->sm_vp = vp;
1029 1027 smp->sm_off = off;
1030 1028
1031 1029 hpp = &smd_hash[hashid].sh_hash_list;
1032 1030 smp->sm_hash = *hpp;
1033 1031 *hpp = smp;
1034 1032 #ifdef SEGMAP_HASHSTATS
1035 1033 smd_hash_len[hashid]++;
1036 1034 #endif
1037 1035 }
1038 1036 mutex_exit(hmtx);
1039 1037
1040 1038 return (tmp);
1041 1039 }
1042 1040
1043 1041 static void
1044 1042 segmap_hashout(struct smap *smp)
1045 1043 {
1046 1044 struct smap **hpp, *hp;
1047 1045 struct vnode *vp;
1048 1046 kmutex_t *mtx;
1049 1047 int hashid;
1050 1048 u_offset_t off;
1051 1049
1052 1050 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
1053 1051
1054 1052 vp = smp->sm_vp;
1055 1053 off = smp->sm_off;
1056 1054
1057 1055 SMAP_HASHFUNC(vp, off, hashid); /* macro assigns hashid */
1058 1056 mtx = SHASHMTX(hashid);
1059 1057 mutex_enter(mtx);
1060 1058
1061 1059 hpp = &smd_hash[hashid].sh_hash_list;
1062 1060 for (;;) {
1063 1061 hp = *hpp;
1064 1062 if (hp == NULL) {
1065 1063 panic("segmap_hashout");
1066 1064 /*NOTREACHED*/
1067 1065 }
1068 1066 if (hp == smp)
1069 1067 break;
1070 1068 hpp = &hp->sm_hash;
1071 1069 }
1072 1070
1073 1071 *hpp = smp->sm_hash;
1074 1072 smp->sm_hash = NULL;
1075 1073 #ifdef SEGMAP_HASHSTATS
1076 1074 smd_hash_len[hashid]--;
1077 1075 #endif
1078 1076 mutex_exit(mtx);
1079 1077
1080 1078 smp->sm_vp = NULL;
1081 1079 smp->sm_off = (u_offset_t)0;
1082 1080
1083 1081 }
1084 1082
1085 1083 /*
1086 1084 * Attempt to free unmodified, unmapped, and non locked segmap
1087 1085 * pages.
1088 1086 */
1089 1087 void
1090 1088 segmap_pagefree(struct vnode *vp, u_offset_t off)
1091 1089 {
1092 1090 u_offset_t pgoff;
1093 1091 page_t *pp;
1094 1092
1095 1093 for (pgoff = off; pgoff < off + MAXBSIZE; pgoff += PAGESIZE) {
1096 1094
1097 1095 if ((pp = page_lookup_nowait(vp, pgoff, SE_EXCL)) == NULL)
1098 1096 continue;
1099 1097
1100 1098 switch (page_release(pp, 1)) {
1101 1099 case PGREL_NOTREL:
1102 1100 segmapcnt.smp_free_notfree.value.ul++;
1103 1101 break;
1104 1102 case PGREL_MOD:
1105 1103 segmapcnt.smp_free_dirty.value.ul++;
1106 1104 break;
1107 1105 case PGREL_CLEAN:
1108 1106 segmapcnt.smp_free.value.ul++;
1109 1107 break;
1110 1108 }
1111 1109 }
1112 1110 }
1113 1111
1114 1112 /*
1115 1113 * Locks held on entry: smap lock
1116 1114 * Locks held on exit : smap lock.
1117 1115 */
1118 1116
1119 1117 static void
1120 1118 grab_smp(struct smap *smp, page_t *pp)
1121 1119 {
1122 1120 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
1123 1121 ASSERT(smp->sm_refcnt == 0);
1124 1122
1125 1123 if (smp->sm_vp != (struct vnode *)NULL) {
1126 1124 struct vnode *vp = smp->sm_vp;
1127 1125 u_offset_t off = smp->sm_off;
1128 1126 /*
1129 1127 * Destroy old vnode association and
1130 1128 * unload any hardware translations to
1131 1129 * the old object.
1132 1130 */
1133 1131 smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reuse++;
1134 1132 segmap_hashout(smp);
1135 1133
1136 1134 /*
1137 1135 * This node is off freelist and hashlist,
1138 1136 * so there is no reason to drop/reacquire sm_mtx
1139 1137 * across calls to hat_unload.
1140 1138 */
1141 1139 if (segmap_kpm) {
1142 1140 caddr_t vaddr;
1143 1141 int hat_unload_needed = 0;
1144 1142
1145 1143 /*
1146 1144 * unload kpm mapping
1147 1145 */
1148 1146 if (pp != NULL) {
1149 1147 vaddr = hat_kpm_page2va(pp, 1);
1150 1148 hat_kpm_mapout(pp, GET_KPME(smp), vaddr);
1151 1149 page_unlock(pp);
1152 1150 }
1153 1151
1154 1152 /*
1155 1153 * Check if we have (also) the rare case of a
1156 1154 * non kpm mapping.
1157 1155 */
1158 1156 if (smp->sm_flags & SM_NOTKPM_RELEASED) {
1159 1157 hat_unload_needed = 1;
1160 1158 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
1161 1159 }
1162 1160
1163 1161 if (hat_unload_needed) {
1164 1162 hat_unload(kas.a_hat, segkmap->s_base +
1165 1163 ((smp - smd_smap) * MAXBSIZE),
1166 1164 MAXBSIZE, HAT_UNLOAD);
1167 1165 }
1168 1166
1169 1167 } else {
1170 1168 ASSERT(smp->sm_flags & SM_NOTKPM_RELEASED);
1171 1169 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
1172 1170 hat_unload(kas.a_hat, segkmap->s_base +
1173 1171 ((smp - smd_smap) * MAXBSIZE),
1174 1172 MAXBSIZE, HAT_UNLOAD);
1175 1173 }
1176 1174 segmap_pagefree(vp, off);
1177 1175 }
1178 1176 }
1179 1177
1180 1178 static struct smap *
1181 1179 get_free_smp(int free_ndx)
1182 1180 {
1183 1181 struct smfree *sm;
1184 1182 kmutex_t *smtx;
1185 1183 struct smap *smp, *first;
1186 1184 struct sm_freeq *allocq, *releq;
1187 1185 struct kpme *kpme;
1188 1186 page_t *pp = NULL;
1189 1187 int end_ndx, page_locked = 0;
1190 1188
1191 1189 end_ndx = free_ndx;
1192 1190 sm = &smd_free[free_ndx];
1193 1191
1194 1192 retry_queue:
1195 1193 allocq = sm->sm_allocq;
1196 1194 mutex_enter(&allocq->smq_mtx);
1197 1195
1198 1196 if ((smp = allocq->smq_free) == NULL) {
1199 1197
1200 1198 skip_queue:
1201 1199 /*
1202 1200 * The alloc list is empty or this queue is being skipped;
1203 1201 * first see if the allocq toggled.
1204 1202 */
1205 1203 if (sm->sm_allocq != allocq) {
1206 1204 /* queue changed */
1207 1205 mutex_exit(&allocq->smq_mtx);
1208 1206 goto retry_queue;
1209 1207 }
1210 1208 releq = sm->sm_releq;
1211 1209 if (!mutex_tryenter(&releq->smq_mtx)) {
1212 1210 /* cannot get releq; a free smp may be there now */
1213 1211 mutex_exit(&allocq->smq_mtx);
1214 1212
1215 1213 /*
1216 1214 * This loop could spin forever if this thread has
1217 1215 * higher priority than the thread that is holding
1218 1216 * releq->smq_mtx. In order to force the other thread
1219 1217 * to run, we'll lock/unlock the mutex which is safe
1220 1218 * since we just unlocked the allocq mutex.
1221 1219 */
1222 1220 mutex_enter(&releq->smq_mtx);
1223 1221 mutex_exit(&releq->smq_mtx);
1224 1222 goto retry_queue;
1225 1223 }
1226 1224 if (releq->smq_free == NULL) {
1227 1225 /*
1228 1226 * This freelist is empty.
1229 1227 * This should not happen unless clients
1230 1228 * are failing to release the segmap
1231 1229 * window after accessing the data.
1232 1230 * Before resorting to sleeping, try
1233 1231 * the next list of the same color.
1234 1232 */
1235 1233 free_ndx = (free_ndx + smd_ncolor) & smd_freemsk;
1236 1234 if (free_ndx != end_ndx) {
1237 1235 mutex_exit(&releq->smq_mtx);
1238 1236 mutex_exit(&allocq->smq_mtx);
1239 1237 sm = &smd_free[free_ndx];
1240 1238 goto retry_queue;
1241 1239 }
1242 1240 /*
1243 1241 * Tried all freelists of the same color once,
1244 1242 * wait on this list and hope something gets freed.
1245 1243 */
1246 1244 segmapcnt.smp_get_nofree.value.ul++;
1247 1245 sm->sm_want++;
1248 1246 mutex_exit(&sm->sm_freeq[1].smq_mtx);
1249 1247 cv_wait(&sm->sm_free_cv,
1250 1248 &sm->sm_freeq[0].smq_mtx);
1251 1249 sm->sm_want--;
1252 1250 mutex_exit(&sm->sm_freeq[0].smq_mtx);
1253 1251 sm = &smd_free[free_ndx];
1254 1252 goto retry_queue;
1255 1253 } else {
1256 1254 /*
1257 1255 * Something on the rele queue; flip the alloc
1258 1256 * and rele queues and retry.
1259 1257 */
1260 1258 sm->sm_allocq = releq;
1261 1259 sm->sm_releq = allocq;
1262 1260 mutex_exit(&allocq->smq_mtx);
1263 1261 mutex_exit(&releq->smq_mtx);
1264 1262 if (page_locked) {
1265 1263 delay(hz >> 2);
1266 1264 page_locked = 0;
1267 1265 }
1268 1266 goto retry_queue;
1269 1267 }
1270 1268 } else {
1271 1269 /*
1272 1270 * Fastpath the case we get the smap mutex
1273 1271 * on the first try.
1274 1272 */
1275 1273 first = smp;
1276 1274 next_smap:
1277 1275 smtx = SMAPMTX(smp);
1278 1276 if (!mutex_tryenter(smtx)) {
1279 1277 /*
1280 1278 * Another thread is trying to reclaim this slot.
1281 1279 * Skip to the next queue or smap.
1282 1280 */
1283 1281 if ((smp = smp->sm_next) == first) {
1284 1282 goto skip_queue;
1285 1283 } else {
1286 1284 goto next_smap;
1287 1285 }
1288 1286 } else {
1289 1287 /*
1290 1288 * if kpme exists, get shared lock on the page
1291 1289 */
1292 1290 if (segmap_kpm && smp->sm_vp != NULL) {
1293 1291
1294 1292 kpme = GET_KPME(smp);
1295 1293 pp = kpme->kpe_page;
1296 1294
1297 1295 if (pp != NULL) {
1298 1296 if (!page_trylock(pp, SE_SHARED)) {
1299 1297 smp = smp->sm_next;
1300 1298 mutex_exit(smtx);
1301 1299 page_locked = 1;
1302 1300
1303 1301 pp = NULL;
1304 1302
1305 1303 if (smp == first) {
1306 1304 goto skip_queue;
1307 1305 } else {
1308 1306 goto next_smap;
1309 1307 }
1310 1308 } else {
1311 1309 if (kpme->kpe_page == NULL) {
1312 1310 page_unlock(pp);
1313 1311 pp = NULL;
1314 1312 }
1315 1313 }
1316 1314 }
1317 1315 }
1318 1316
1319 1317 /*
1320 1318 * At this point, we've selected smp. Remove smp
1321 1319 * from its freelist. If smp is the first one in
1322 1320 * the freelist, update the head of the freelist.
1323 1321 */
1324 1322 if (first == smp) {
1325 1323 ASSERT(first == allocq->smq_free);
1326 1324 allocq->smq_free = smp->sm_next;
1327 1325 }
1328 1326
1329 1327 /*
1330 1328 * if the head of the freelist still points to smp,
1331 1329 * then there are no more free smaps in that list.
1332 1330 */
1333 1331 if (allocq->smq_free == smp)
1334 1332 /*
1335 1333 * Took the last one
1336 1334 */
1337 1335 allocq->smq_free = NULL;
1338 1336 else {
1339 1337 smp->sm_prev->sm_next = smp->sm_next;
1340 1338 smp->sm_next->sm_prev = smp->sm_prev;
1341 1339 }
1342 1340 mutex_exit(&allocq->smq_mtx);
1343 1341 smp->sm_prev = smp->sm_next = NULL;
1344 1342
1345 1343 /*
1346 1344 * if pp != NULL, pp must have been locked;
1347 1345 * grab_smp() unlocks pp.
1348 1346 */
1349 1347 ASSERT((pp == NULL) || PAGE_LOCKED(pp));
1350 1348 grab_smp(smp, pp);
1351 1349 /* return smp locked. */
1352 1350 ASSERT(SMAPMTX(smp) == smtx);
1353 1351 ASSERT(MUTEX_HELD(smtx));
1354 1352 return (smp);
1355 1353 }
1356 1354 }
1357 1355 }
1358 1356
1359 1357 /*
1360 1358 * Special public segmap operations
1361 1359 */
1362 1360
1363 1361 /*
1364 1362 * Create pages (without using VOP_GETPAGE) and load up translations to them.
1365 1363 * If softlock is TRUE, then set things up so that it looks like a call
1366 1364 * to segmap_fault with F_SOFTLOCK.
1367 1365 *
1368 1366 * Returns 1, if a page is created by calling page_create_va(), or 0 otherwise.
1369 1367 *
1370 1368 * All fields in the generic segment (struct seg) are considered to be
1371 1369 * read-only for "segmap" even though the kernel address space (kas) may
1372 1370 * not be locked, hence no lock is needed to access them.
1373 1371 */
1374 1372 int
1375 1373 segmap_pagecreate(struct seg *seg, caddr_t addr, size_t len, int softlock)
1376 1374 {
1377 1375 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
1378 1376 page_t *pp;
1379 1377 u_offset_t off;
1380 1378 struct smap *smp;
1381 1379 struct vnode *vp;
1382 1380 caddr_t eaddr;
1383 1381 int newpage = 0;
1384 1382 uint_t prot;
1385 1383 kmutex_t *smtx;
1386 1384 int hat_flag;
1387 1385
1388 1386 ASSERT(seg->s_as == &kas);
1389 1387
1390 1388 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1391 1389 /*
1392 1390 * Pages are successfully prefaulted and locked in
1393 1391 * segmap_getmapflt and can't be unlocked until
1394 1392 * segmap_release. The SM_KPM_NEWPAGE flag is set
1395 1393 * in segmap_pagecreate_kpm when new pages are created.
1396 1394 * and it is returned as "newpage" indication here.
1397 1395 */
1398 1396 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
1399 1397 panic("segmap_pagecreate: smap not found "
1400 1398 "for addr %p", (void *)addr);
1401 1399 /*NOTREACHED*/
1402 1400 }
1403 1401
1404 1402 smtx = SMAPMTX(smp);
1405 1403 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
1406 1404 smp->sm_flags &= ~SM_KPM_NEWPAGE;
1407 1405 mutex_exit(smtx);
1408 1406
1409 1407 return (newpage);
1410 1408 }
1411 1409
1412 1410 smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++;
1413 1411
1414 1412 eaddr = addr + len;
1415 1413 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1416 1414
1417 1415 smp = GET_SMAP(seg, addr);
1418 1416
1419 1417 /*
1420 1418 * We don't grab smp mutex here since we assume the smp
1421 1419 * has a refcnt set already which prevents the slot from
1422 1420 * changing its id.
1423 1421 */
1424 1422 ASSERT(smp->sm_refcnt > 0);
1425 1423
1426 1424 vp = smp->sm_vp;
1427 1425 off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET));
1428 1426 prot = smd->smd_prot;
1429 1427
1430 1428 for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) {
1431 1429 hat_flag = HAT_LOAD;
1432 1430 pp = page_lookup(vp, off, SE_SHARED);
1433 1431 if (pp == NULL) {
1434 1432 ushort_t bitindex;
1435 1433
1436 1434 if ((pp = page_create_va(vp, off,
1437 1435 PAGESIZE, PG_WAIT, seg, addr)) == NULL) {
1438 1436 panic("segmap_pagecreate: page_create failed");
1439 1437 /*NOTREACHED*/
1440 1438 }
1441 1439 newpage = 1;
1442 1440 page_io_unlock(pp);
1443 1441
1444 1442 /*
1445 1443 * Since pages created here do not contain valid
1446 1444 * data until the caller writes into them, the
1447 1445 * "exclusive" lock will not be dropped to prevent
1448 1446 * other users from accessing the page. We also
1449 1447 * have to lock the translation to prevent a fault
1450 1448 * from occurring when the virtual address mapped by
1451 1449 * this page is written into. This is necessary to
1452 1450 * avoid a deadlock since we haven't dropped the
1453 1451 * "exclusive" lock.
1454 1452 */
1455 1453 bitindex = (ushort_t)((off - smp->sm_off) >> PAGESHIFT);
1456 1454
1457 1455 /*
1458 1456 * Large Files: The following assertion is to
1459 1457 * verify the cast above.
1460 1458 */
1461 1459 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
1462 1460 smtx = SMAPMTX(smp);
1463 1461 mutex_enter(smtx);
1464 1462 smp->sm_bitmap |= SMAP_BIT_MASK(bitindex);
1465 1463 mutex_exit(smtx);
1466 1464
1467 1465 hat_flag = HAT_LOAD_LOCK;
1468 1466 } else if (softlock) {
1469 1467 hat_flag = HAT_LOAD_LOCK;
1470 1468 }
1471 1469
1472 1470 if (IS_VMODSORT(pp->p_vnode) && (prot & PROT_WRITE))
1473 1471 hat_setmod(pp);
1474 1472
1475 1473 hat_memload(kas.a_hat, addr, pp, prot, hat_flag);
1476 1474
1477 1475 if (hat_flag != HAT_LOAD_LOCK)
1478 1476 page_unlock(pp);
1479 1477
1480 1478 TRACE_5(TR_FAC_VM, TR_SEGMAP_PAGECREATE,
1481 1479 "segmap_pagecreate:seg %p addr %p pp %p vp %p offset %llx",
1482 1480 seg, addr, pp, vp, off);
1483 1481 }
1484 1482
1485 1483 return (newpage);
1486 1484 }
1487 1485
1488 1486 void
1489 1487 segmap_pageunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
1490 1488 {
1491 1489 struct smap *smp;
1492 1490 ushort_t bitmask;
1493 1491 page_t *pp;
1494 1492 struct vnode *vp;
1495 1493 u_offset_t off;
1496 1494 caddr_t eaddr;
1497 1495 kmutex_t *smtx;
1498 1496
1499 1497 ASSERT(seg->s_as == &kas);
1500 1498
1501 1499 eaddr = addr + len;
1502 1500 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1503 1501
1504 1502 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1505 1503 /*
1506 1504 * Pages are successfully prefaulted and locked in
1507 1505 * segmap_getmapflt and can't be unlocked until
1508 1506 * segmap_release, so no pages or hat mappings have
1509 1507 * to be unlocked at this point.
1510 1508 */
1511 1509 #ifdef DEBUG
1512 1510 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
1513 1511 panic("segmap_pageunlock: smap not found "
1514 1512 "for addr %p", (void *)addr);
1515 1513 /*NOTREACHED*/
1516 1514 }
1517 1515
1518 1516 ASSERT(smp->sm_refcnt > 0);
1519 1517 mutex_exit(SMAPMTX(smp));
1520 1518 #endif
1521 1519 return;
1522 1520 }
1523 1521
1524 1522 smp = GET_SMAP(seg, addr);
1525 1523 smtx = SMAPMTX(smp);
1526 1524
1527 1525 ASSERT(smp->sm_refcnt > 0);
1528 1526
1529 1527 vp = smp->sm_vp;
1530 1528 off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET));
1531 1529
1532 1530 for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) {
1533 1531 bitmask = SMAP_BIT_MASK((int)(off - smp->sm_off) >> PAGESHIFT);
1534 1532
1535 1533 /*
1536 1534 * Large Files: Following assertion is to verify
1537 1535 * the correctness of the cast to (int) above.
1538 1536 */
1539 1537 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
1540 1538
1541 1539 /*
1542 1540 * If the bit corresponding to "off" is set,
1543 1541 * clear this bit in the bitmap, unlock translations,
1544 1542 * and release the "exclusive" lock on the page.
1545 1543 */
1546 1544 if (smp->sm_bitmap & bitmask) {
1547 1545 mutex_enter(smtx);
1548 1546 smp->sm_bitmap &= ~bitmask;
1549 1547 mutex_exit(smtx);
1550 1548
1551 1549 hat_unlock(kas.a_hat, addr, PAGESIZE);
1552 1550
1553 1551 /*
1554 1552 * Use page_find() instead of page_lookup() to
1555 1553 * find the page since we know that it has
1556 1554 * "exclusive" lock.
1557 1555 */
1558 1556 pp = page_find(vp, off);
1559 1557 if (pp == NULL) {
1560 1558 panic("segmap_pageunlock: page not found");
1561 1559 /*NOTREACHED*/
1562 1560 }
1563 1561 if (rw == S_WRITE) {
1564 1562 hat_setrefmod(pp);
1565 1563 } else if (rw != S_OTHER) {
1566 1564 hat_setref(pp);
1567 1565 }
1568 1566
1569 1567 page_unlock(pp);
1570 1568 }
1571 1569 }
1572 1570 }
1573 1571
1574 1572 caddr_t
1575 1573 segmap_getmap(struct seg *seg, struct vnode *vp, u_offset_t off)
1576 1574 {
1577 1575 return (segmap_getmapflt(seg, vp, off, MAXBSIZE, 0, S_OTHER));
1578 1576 }
1579 1577
1580 1578 /*
1581 1579 * This is the magic virtual address that offset 0 of an ELF
1582 1580 * file gets mapped to in user space. This is used to pick
1583 1581 * the vac color on the freelist.
1584 1582 */
1585 1583 #define ELF_OFFZERO_VA (0x10000)
1586 1584 /*
1587 1585 * segmap_getmap allocates a MAXBSIZE big slot to map the vnode vp
1588 1586 * in the range <off, off + len). off doesn't need to be MAXBSIZE aligned.
1589 1587 * The return address is always MAXBSIZE aligned.
1590 1588 *
1591 1589 * If forcefault is nonzero and the MMU translations haven't yet been created,
1592 1590 * segmap_getmap will call segmap_fault(..., F_INVAL, rw) to create them.
1593 1591 */
1594 1592 caddr_t
1595 1593 segmap_getmapflt(
1596 1594 struct seg *seg,
1597 1595 struct vnode *vp,
1598 1596 u_offset_t off,
1599 1597 size_t len,
1600 1598 int forcefault,
1601 1599 enum seg_rw rw)
1602 1600 {
1603 1601 struct smap *smp, *nsmp;
1604 1602 extern struct vnode *common_specvp();
1605 1603 caddr_t baseaddr; /* MAXBSIZE aligned */
1606 1604 u_offset_t baseoff;
1607 1605 int newslot;
1608 1606 caddr_t vaddr;
1609 1607 int color, hashid;
1610 1608 kmutex_t *hashmtx, *smapmtx;
1611 1609 struct smfree *sm;
1612 1610 page_t *pp;
1613 1611 struct kpme *kpme;
1614 1612 uint_t prot;
1615 1613 caddr_t base;
1616 1614 page_t *pl[MAXPPB + 1];
1617 1615 int error;
1618 1616 int is_kpm = 1;
1619 1617
1620 1618 ASSERT(seg->s_as == &kas);
1621 1619 ASSERT(seg == segkmap);
1622 1620
1623 1621 baseoff = off & (offset_t)MAXBMASK;
1624 1622 if (off + len > baseoff + MAXBSIZE) {
1625 1623 panic("segmap_getmap bad len");
1626 1624 /*NOTREACHED*/
1627 1625 }
1628 1626
1629 1627 /*
1630 1628 * If this is a block device we have to be sure to use the
1631 1629 * "common" block device vnode for the mapping.
1632 1630 */
1633 1631 if (vp->v_type == VBLK)
1634 1632 vp = common_specvp(vp);
1635 1633
1636 1634 smd_cpu[CPU->cpu_seqid].scpu.scpu_getmap++;
1637 1635
1638 1636 if (segmap_kpm == 0 ||
1639 1637 (forcefault == SM_PAGECREATE && rw != S_WRITE)) {
1640 1638 is_kpm = 0;
1641 1639 }
1642 1640
1643 1641 SMAP_HASHFUNC(vp, off, hashid); /* macro assigns hashid */
1644 1642 hashmtx = SHASHMTX(hashid);
1645 1643
1646 1644 retry_hash:
1647 1645 mutex_enter(hashmtx);
1648 1646 for (smp = smd_hash[hashid].sh_hash_list;
1649 1647 smp != NULL; smp = smp->sm_hash)
1650 1648 if (smp->sm_vp == vp && smp->sm_off == baseoff)
1651 1649 break;
1652 1650 mutex_exit(hashmtx);
1653 1651
1654 1652 vrfy_smp:
1655 1653 if (smp != NULL) {
1656 1654
1657 1655 ASSERT(vp->v_count != 0);
1658 1656
1659 1657 /*
1660 1658 * Get smap lock and recheck its tag. The hash lock
1661 1659 * is dropped since the hash is based on (vp, off)
1662 1660 * and (vp, off) won't change when we have smap mtx.
1663 1661 */
1664 1662 smapmtx = SMAPMTX(smp);
1665 1663 mutex_enter(smapmtx);
1666 1664 if (smp->sm_vp != vp || smp->sm_off != baseoff) {
1667 1665 mutex_exit(smapmtx);
1668 1666 goto retry_hash;
1669 1667 }
1670 1668
1671 1669 if (smp->sm_refcnt == 0) {
1672 1670
1673 1671 smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reclaim++;
1674 1672
1675 1673 /*
1676 1674 * Could still be on the free list. However, this
1677 1675 * could also be an smp that is transitioning from
1678 1676 * the free list when we have too much contention
1679 1677 * for the smapmtx's. In this case, we have an
1680 1678 * unlocked smp that is not on the free list any
1681 1679 * longer, but still has a 0 refcnt. The only way
1682 1680 * to be sure is to check the freelist pointers.
1683 1681 * Since we now have the smapmtx, we are guaranteed
1684 1682 * that the (vp, off) won't change, so we are safe
1685 1683 * to reclaim it. get_free_smp() knows that this
1686 1684 * can happen, and it will check the refcnt.
1687 1685 */
1688 1686
1689 1687 if ((smp->sm_next != NULL)) {
1690 1688 struct sm_freeq *freeq;
1691 1689
1692 1690 ASSERT(smp->sm_prev != NULL);
1693 1691 sm = &smd_free[smp->sm_free_ndx];
1694 1692
1695 1693 if (smp->sm_flags & SM_QNDX_ZERO)
1696 1694 freeq = &sm->sm_freeq[0];
1697 1695 else
1698 1696 freeq = &sm->sm_freeq[1];
1699 1697
1700 1698 mutex_enter(&freeq->smq_mtx);
1701 1699 if (freeq->smq_free != smp) {
1702 1700 /*
1703 1701 * fastpath normal case
1704 1702 */
1705 1703 smp->sm_prev->sm_next = smp->sm_next;
1706 1704 smp->sm_next->sm_prev = smp->sm_prev;
1707 1705 } else if (smp == smp->sm_next) {
1708 1706 /*
1709 1707 * Taking the last smap on freelist
1710 1708 */
1711 1709 freeq->smq_free = NULL;
1712 1710 } else {
1713 1711 /*
1714 1712 * Reclaiming 1st smap on list
1715 1713 */
1716 1714 freeq->smq_free = smp->sm_next;
1717 1715 smp->sm_prev->sm_next = smp->sm_next;
1718 1716 smp->sm_next->sm_prev = smp->sm_prev;
1719 1717 }
1720 1718 mutex_exit(&freeq->smq_mtx);
1721 1719 smp->sm_prev = smp->sm_next = NULL;
1722 1720 } else {
1723 1721 ASSERT(smp->sm_prev == NULL);
1724 1722 segmapcnt.smp_stolen.value.ul++;
1725 1723 }
1726 1724
1727 1725 } else {
1728 1726 segmapcnt.smp_get_use.value.ul++;
1729 1727 }
1730 1728 smp->sm_refcnt++; /* another user */
1731 1729
1732 1730 /*
1733 1731 * We don't invoke segmap_fault via TLB miss, so we set ref
1734 1732 * and mod bits in advance. For S_OTHER we set them in
1735 1733 * segmap_fault F_SOFTUNLOCK.
1736 1734 */
1737 1735 if (is_kpm) {
1738 1736 if (rw == S_WRITE) {
1739 1737 smp->sm_flags |= SM_WRITE_DATA;
1740 1738 } else if (rw == S_READ) {
1741 1739 smp->sm_flags |= SM_READ_DATA;
1742 1740 }
1743 1741 }
1744 1742 mutex_exit(smapmtx);
1745 1743
1746 1744 newslot = 0;
1747 1745 } else {
1748 1746
1749 1747 uint32_t free_ndx, *free_ndxp;
1750 1748 union segmap_cpu *scpu;
1751 1749
1752 1750 /*
1753 1751 * On a PAC machine or a machine with anti-alias
1754 1752 * hardware, smd_colormsk will be zero.
1755 1753 *
1756 1754 * On a VAC machine- pick color by offset in the file
1757 1755 * so we won't get VAC conflicts on elf files.
1758 1756 * On data files, color does not matter but we
1759 1757 * don't know what kind of file it is so we always
1760 1758 * pick color by offset. This causes color
1761 1759 * corresponding to file offset zero to be used more
1762 1760 * heavily.
1763 1761 */
1764 1762 color = (baseoff >> MAXBSHIFT) & smd_colormsk;
1765 1763 scpu = smd_cpu+CPU->cpu_seqid;
1766 1764 free_ndxp = &scpu->scpu.scpu_free_ndx[color];
1767 1765 free_ndx = (*free_ndxp += smd_ncolor) & smd_freemsk;
1768 1766 #ifdef DEBUG
1769 1767 colors_used[free_ndx]++;
1770 1768 #endif /* DEBUG */
1771 1769
1772 1770 /*
1773 1771 * Get a locked smp slot from the free list.
1774 1772 */
1775 1773 smp = get_free_smp(free_ndx);
1776 1774 smapmtx = SMAPMTX(smp);
1777 1775
1778 1776 ASSERT(smp->sm_vp == NULL);
1779 1777
1780 1778 if ((nsmp = segmap_hashin(smp, vp, baseoff, hashid)) != NULL) {
1781 1779 /*
1782 1780 * Failed to hashin, there exists one now.
1783 1781 * Return the smp we just allocated.
1784 1782 */
1785 1783 segmap_smapadd(smp);
1786 1784 mutex_exit(smapmtx);
1787 1785
1788 1786 smp = nsmp;
1789 1787 goto vrfy_smp;
1790 1788 }
1791 1789 smp->sm_refcnt++; /* another user */
1792 1790
1793 1791 /*
1794 1792 * We don't invoke segmap_fault via TLB miss, so we set ref
1795 1793 * and mod bits in advance. For S_OTHER we set them in
1796 1794 * segmap_fault F_SOFTUNLOCK.
1797 1795 */
1798 1796 if (is_kpm) {
1799 1797 if (rw == S_WRITE) {
1800 1798 smp->sm_flags |= SM_WRITE_DATA;
1801 1799 } else if (rw == S_READ) {
1802 1800 smp->sm_flags |= SM_READ_DATA;
1803 1801 }
1804 1802 }
1805 1803 mutex_exit(smapmtx);
1806 1804
1807 1805 newslot = 1;
1808 1806 }
1809 1807
1810 1808 if (!is_kpm)
1811 1809 goto use_segmap_range;
1812 1810
1813 1811 /*
1814 1812 * Use segkpm
1815 1813 */
1816 1814 /* Lint directive required until 6746211 is fixed */
1817 1815 /*CONSTCOND*/
1818 1816 ASSERT(PAGESIZE == MAXBSIZE);
1819 1817
1820 1818 /*
1821 1819 * remember the last smp faulted on this cpu.
1822 1820 */
1823 1821 (smd_cpu+CPU->cpu_seqid)->scpu.scpu_last_smap = smp;
1824 1822
1825 1823 if (forcefault == SM_PAGECREATE) {
1826 1824 baseaddr = segmap_pagecreate_kpm(seg, vp, baseoff, smp, rw);
1827 1825 return (baseaddr);
1828 1826 }
1829 1827
1830 1828 if (newslot == 0 &&
1831 1829 (pp = GET_KPME(smp)->kpe_page) != NULL) {
1832 1830
1833 1831 /* fastpath */
1834 1832 switch (rw) {
1835 1833 case S_READ:
1836 1834 case S_WRITE:
1837 1835 if (page_trylock(pp, SE_SHARED)) {
1838 1836 if (PP_ISFREE(pp) ||
1839 1837 !(pp->p_vnode == vp &&
1840 1838 pp->p_offset == baseoff)) {
1841 1839 page_unlock(pp);
1842 1840 pp = page_lookup(vp, baseoff,
1843 1841 SE_SHARED);
1844 1842 }
1845 1843 } else {
1846 1844 pp = page_lookup(vp, baseoff, SE_SHARED);
1847 1845 }
1848 1846
1849 1847 if (pp == NULL) {
1850 1848 ASSERT(GET_KPME(smp)->kpe_page == NULL);
1851 1849 break;
1852 1850 }
1853 1851
1854 1852 if (rw == S_WRITE &&
1855 1853 hat_page_getattr(pp, P_MOD | P_REF) !=
1856 1854 (P_MOD | P_REF)) {
1857 1855 page_unlock(pp);
1858 1856 break;
1859 1857 }
1860 1858
1861 1859 /*
1862 1860 * We have the p_selock as reader, grab_smp
1863 1861 * can't hit us, we have bumped the smap
1864 1862 * refcnt and hat_pageunload needs the
1865 1863 * p_selock exclusive.
1866 1864 */
1867 1865 kpme = GET_KPME(smp);
1868 1866 if (kpme->kpe_page == pp) {
1869 1867 baseaddr = hat_kpm_page2va(pp, 0);
1870 1868 } else if (kpme->kpe_page == NULL) {
1871 1869 baseaddr = hat_kpm_mapin(pp, kpme);
1872 1870 } else {
1873 1871 panic("segmap_getmapflt: stale "
1874 1872 "kpme page, kpme %p", (void *)kpme);
1875 1873 /*NOTREACHED*/
1876 1874 }
1877 1875
1878 1876 /*
1879 1877 * We don't invoke segmap_fault via TLB miss,
1880 1878 * so we set ref and mod bits in advance.
1881 1879 * For S_OTHER and we set them in segmap_fault
1882 1880 * F_SOFTUNLOCK.
1883 1881 */
1884 1882 if (rw == S_READ && !hat_isref(pp))
1885 1883 hat_setref(pp);
1886 1884
1887 1885 return (baseaddr);
1888 1886 default:
1889 1887 break;
1890 1888 }
1891 1889 }
1892 1890
1893 1891 base = segkpm_create_va(baseoff);
1894 1892 error = VOP_GETPAGE(vp, (offset_t)baseoff, len, &prot, pl, MAXBSIZE,
1895 1893 seg, base, rw, CRED(), NULL);
1896 1894
1897 1895 pp = pl[0];
1898 1896 if (error || pp == NULL) {
1899 1897 /*
1900 1898 * Use segmap address slot and let segmap_fault deal
1901 1899 * with the error cases. There is no error return
1902 1900 * possible here.
1903 1901 */
1904 1902 goto use_segmap_range;
1905 1903 }
1906 1904
1907 1905 ASSERT(pl[1] == NULL);
1908 1906
1909 1907 /*
1910 1908 * When prot is not returned w/ PROT_ALL the returned pages
1911 1909 * are not backed by fs blocks. For most of the segmap users
1912 1910 * this is no problem, they don't write to the pages in the
1913 1911 * same request and therefore don't rely on a following
1914 1912 * trap driven segmap_fault. With SM_LOCKPROTO users it
1915 1913 * is more secure to use segkmap adresses to allow
1916 1914 * protection segmap_fault's.
1917 1915 */
1918 1916 if (prot != PROT_ALL && forcefault == SM_LOCKPROTO) {
1919 1917 /*
1920 1918 * Use segmap address slot and let segmap_fault
1921 1919 * do the error return.
1922 1920 */
1923 1921 ASSERT(rw != S_WRITE);
1924 1922 ASSERT(PAGE_LOCKED(pp));
1925 1923 page_unlock(pp);
1926 1924 forcefault = 0;
1927 1925 goto use_segmap_range;
1928 1926 }
1929 1927
1930 1928 /*
1931 1929 * We have the p_selock as reader, grab_smp can't hit us, we
1932 1930 * have bumped the smap refcnt and hat_pageunload needs the
1933 1931 * p_selock exclusive.
1934 1932 */
1935 1933 kpme = GET_KPME(smp);
1936 1934 if (kpme->kpe_page == pp) {
1937 1935 baseaddr = hat_kpm_page2va(pp, 0);
1938 1936 } else if (kpme->kpe_page == NULL) {
1939 1937 baseaddr = hat_kpm_mapin(pp, kpme);
1940 1938 } else {
1941 1939 panic("segmap_getmapflt: stale kpme page after "
1942 1940 "VOP_GETPAGE, kpme %p", (void *)kpme);
1943 1941 /*NOTREACHED*/
1944 1942 }
1945 1943
1946 1944 smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++;
1947 1945
1948 1946 return (baseaddr);
1949 1947
1950 1948
1951 1949 use_segmap_range:
1952 1950 baseaddr = seg->s_base + ((smp - smd_smap) * MAXBSIZE);
1953 1951 TRACE_4(TR_FAC_VM, TR_SEGMAP_GETMAP,
1954 1952 "segmap_getmap:seg %p addr %p vp %p offset %llx",
1955 1953 seg, baseaddr, vp, baseoff);
1956 1954
1957 1955 /*
1958 1956 * Prefault the translations
1959 1957 */
1960 1958 vaddr = baseaddr + (off - baseoff);
1961 1959 if (forcefault && (newslot || !hat_probe(kas.a_hat, vaddr))) {
1962 1960
1963 1961 caddr_t pgaddr = (caddr_t)((uintptr_t)vaddr &
1964 1962 (uintptr_t)PAGEMASK);
1965 1963
1966 1964 (void) segmap_fault(kas.a_hat, seg, pgaddr,
1967 1965 (vaddr + len - pgaddr + PAGESIZE - 1) & (uintptr_t)PAGEMASK,
1968 1966 F_INVAL, rw);
1969 1967 }
1970 1968
1971 1969 return (baseaddr);
1972 1970 }
1973 1971
1974 1972 int
1975 1973 segmap_release(struct seg *seg, caddr_t addr, uint_t flags)
1976 1974 {
1977 1975 struct smap *smp;
1978 1976 int error;
1979 1977 int bflags = 0;
1980 1978 struct vnode *vp;
1981 1979 u_offset_t offset;
1982 1980 kmutex_t *smtx;
1983 1981 int is_kpm = 0;
1984 1982 page_t *pp;
1985 1983
1986 1984 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1987 1985
1988 1986 if (((uintptr_t)addr & MAXBOFFSET) != 0) {
1989 1987 panic("segmap_release: addr %p not "
1990 1988 "MAXBSIZE aligned", (void *)addr);
1991 1989 /*NOTREACHED*/
1992 1990 }
1993 1991
1994 1992 if ((smp = get_smap_kpm(addr, &pp)) == NULL) {
1995 1993 panic("segmap_release: smap not found "
1996 1994 "for addr %p", (void *)addr);
1997 1995 /*NOTREACHED*/
1998 1996 }
1999 1997
2000 1998 TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP,
2001 1999 "segmap_relmap:seg %p addr %p smp %p",
2002 2000 seg, addr, smp);
2003 2001
2004 2002 smtx = SMAPMTX(smp);
2005 2003
2006 2004 /*
2007 2005 * For compatibility reasons segmap_pagecreate_kpm sets this
2008 2006 * flag to allow a following segmap_pagecreate to return
2009 2007 * this as "newpage" flag. When segmap_pagecreate is not
2010 2008 * called at all we clear it now.
2011 2009 */
2012 2010 smp->sm_flags &= ~SM_KPM_NEWPAGE;
2013 2011 is_kpm = 1;
2014 2012 if (smp->sm_flags & SM_WRITE_DATA) {
2015 2013 hat_setrefmod(pp);
2016 2014 } else if (smp->sm_flags & SM_READ_DATA) {
2017 2015 hat_setref(pp);
2018 2016 }
2019 2017 } else {
2020 2018 if (addr < seg->s_base || addr >= seg->s_base + seg->s_size ||
2021 2019 ((uintptr_t)addr & MAXBOFFSET) != 0) {
2022 2020 panic("segmap_release: bad addr %p", (void *)addr);
2023 2021 /*NOTREACHED*/
2024 2022 }
2025 2023 smp = GET_SMAP(seg, addr);
2026 2024
2027 2025 TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP,
2028 2026 "segmap_relmap:seg %p addr %p smp %p",
2029 2027 seg, addr, smp);
2030 2028
2031 2029 smtx = SMAPMTX(smp);
2032 2030 mutex_enter(smtx);
2033 2031 smp->sm_flags |= SM_NOTKPM_RELEASED;
2034 2032 }
2035 2033
2036 2034 ASSERT(smp->sm_refcnt > 0);
2037 2035
2038 2036 /*
2039 2037 * Need to call VOP_PUTPAGE() if any flags (except SM_DONTNEED)
2040 2038 * are set.
2041 2039 */
2042 2040 if ((flags & ~SM_DONTNEED) != 0) {
2043 2041 if (flags & SM_WRITE)
2044 2042 segmapcnt.smp_rel_write.value.ul++;
2045 2043 if (flags & SM_ASYNC) {
2046 2044 bflags |= B_ASYNC;
2047 2045 segmapcnt.smp_rel_async.value.ul++;
2048 2046 }
2049 2047 if (flags & SM_INVAL) {
2050 2048 bflags |= B_INVAL;
2051 2049 segmapcnt.smp_rel_abort.value.ul++;
2052 2050 }
2053 2051 if (flags & SM_DESTROY) {
2054 2052 bflags |= (B_INVAL|B_TRUNC);
2055 2053 segmapcnt.smp_rel_abort.value.ul++;
2056 2054 }
2057 2055 if (smp->sm_refcnt == 1) {
2058 2056 /*
2059 2057 * We only bother doing the FREE and DONTNEED flags
2060 2058 * if no one else is still referencing this mapping.
2061 2059 */
2062 2060 if (flags & SM_FREE) {
2063 2061 bflags |= B_FREE;
2064 2062 segmapcnt.smp_rel_free.value.ul++;
2065 2063 }
2066 2064 if (flags & SM_DONTNEED) {
2067 2065 bflags |= B_DONTNEED;
2068 2066 segmapcnt.smp_rel_dontneed.value.ul++;
2069 2067 }
2070 2068 }
2071 2069 } else {
2072 2070 smd_cpu[CPU->cpu_seqid].scpu.scpu_release++;
2073 2071 }
2074 2072
2075 2073 vp = smp->sm_vp;
2076 2074 offset = smp->sm_off;
2077 2075
2078 2076 if (--smp->sm_refcnt == 0) {
2079 2077
2080 2078 smp->sm_flags &= ~(SM_WRITE_DATA | SM_READ_DATA);
2081 2079
2082 2080 if (flags & (SM_INVAL|SM_DESTROY)) {
2083 2081 segmap_hashout(smp); /* remove map info */
2084 2082 if (is_kpm) {
2085 2083 hat_kpm_mapout(pp, GET_KPME(smp), addr);
2086 2084 if (smp->sm_flags & SM_NOTKPM_RELEASED) {
2087 2085 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
2088 2086 hat_unload(kas.a_hat, segkmap->s_base +
2089 2087 ((smp - smd_smap) * MAXBSIZE),
2090 2088 MAXBSIZE, HAT_UNLOAD);
2091 2089 }
2092 2090
2093 2091 } else {
2094 2092 if (segmap_kpm)
2095 2093 segkpm_mapout_validkpme(GET_KPME(smp));
2096 2094
2097 2095 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
2098 2096 hat_unload(kas.a_hat, addr, MAXBSIZE,
2099 2097 HAT_UNLOAD);
2100 2098 }
2101 2099 }
2102 2100 segmap_smapadd(smp); /* add to free list */
2103 2101 }
2104 2102
2105 2103 mutex_exit(smtx);
2106 2104
2107 2105 if (is_kpm)
2108 2106 page_unlock(pp);
2109 2107 /*
2110 2108 * Now invoke VOP_PUTPAGE() if any flags (except SM_DONTNEED)
2111 2109 * are set.
2112 2110 */
2113 2111 if ((flags & ~SM_DONTNEED) != 0) {
2114 2112 error = VOP_PUTPAGE(vp, offset, MAXBSIZE,
2115 2113 bflags, CRED(), NULL);
2116 2114 } else {
2117 2115 error = 0;
2118 2116 }
2119 2117
2120 2118 return (error);
2121 2119 }
2122 2120
2123 2121 /*
2124 2122 * Dump the pages belonging to this segmap segment.
2125 2123 */
2126 2124 static void
2127 2125 segmap_dump(struct seg *seg)
2128 2126 {
2129 2127 struct segmap_data *smd;
2130 2128 struct smap *smp, *smp_end;
2131 2129 page_t *pp;
2132 2130 pfn_t pfn;
2133 2131 u_offset_t off;
2134 2132 caddr_t addr;
2135 2133
2136 2134 smd = (struct segmap_data *)seg->s_data;
2137 2135 addr = seg->s_base;
2138 2136 for (smp = smd->smd_sm, smp_end = smp + smd->smd_npages;
2139 2137 smp < smp_end; smp++) {
2140 2138
2141 2139 if (smp->sm_refcnt) {
2142 2140 for (off = 0; off < MAXBSIZE; off += PAGESIZE) {
2143 2141 int we_own_it = 0;
2144 2142
2145 2143 /*
2146 2144 * If pp == NULL, the page either does
2147 2145 * not exist or is exclusively locked.
2148 2146 * So determine if it exists before
2149 2147 * searching for it.
2150 2148 */
2151 2149 if ((pp = page_lookup_nowait(smp->sm_vp,
2152 2150 smp->sm_off + off, SE_SHARED)))
2153 2151 we_own_it = 1;
2154 2152 else
2155 2153 pp = page_exists(smp->sm_vp,
2156 2154 smp->sm_off + off);
2157 2155
2158 2156 if (pp) {
2159 2157 pfn = page_pptonum(pp);
2160 2158 dump_addpage(seg->s_as,
2161 2159 addr + off, pfn);
2162 2160 if (we_own_it)
2163 2161 page_unlock(pp);
2164 2162 }
2165 2163 dump_timeleft = dump_timeout;
2166 2164 }
2167 2165 }
2168 2166 addr += MAXBSIZE;
2169 2167 }
2170 2168 }
2171 2169
2172 2170 /*ARGSUSED*/
2173 2171 static int
2174 2172 segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
2175 2173 struct page ***ppp, enum lock_type type, enum seg_rw rw)
2176 2174 {
↓ open down ↓ |
2043 lines elided |
↑ open up ↑ |
2177 2175 return (ENOTSUP);
2178 2176 }
2179 2177
2180 2178 static int
2181 2179 segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2182 2180 {
2183 2181 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
2184 2182
2185 2183 memidp->val[0] = (uintptr_t)smd->smd_sm->sm_vp;
2186 2184 memidp->val[1] = smd->smd_sm->sm_off + (uintptr_t)(addr - seg->s_base);
2187 - return (0);
2188 -}
2189 -
2190 -/*ARGSUSED*/
2191 -static int
2192 -segmap_capable(struct seg *seg, segcapability_t capability)
2193 -{
2194 2185 return (0);
2195 2186 }
2196 2187
2197 2188
2198 2189 #ifdef SEGKPM_SUPPORT
2199 2190
2200 2191 /*
2201 2192 * segkpm support routines
2202 2193 */
2203 2194
2204 2195 static caddr_t
2205 2196 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
2206 2197 struct smap *smp, enum seg_rw rw)
2207 2198 {
2208 2199 caddr_t base;
2209 2200 page_t *pp;
2210 2201 int newpage = 0;
2211 2202 struct kpme *kpme;
2212 2203
2213 2204 ASSERT(smp->sm_refcnt > 0);
2214 2205
2215 2206 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
2216 2207 kmutex_t *smtx;
2217 2208
2218 2209 base = segkpm_create_va(off);
2219 2210
2220 2211 if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT,
2221 2212 seg, base)) == NULL) {
2222 2213 panic("segmap_pagecreate_kpm: "
2223 2214 "page_create failed");
2224 2215 /*NOTREACHED*/
2225 2216 }
2226 2217
2227 2218 newpage = 1;
2228 2219 page_io_unlock(pp);
2229 2220 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
2230 2221
2231 2222 /*
2232 2223 * Mark this here until the following segmap_pagecreate
2233 2224 * or segmap_release.
2234 2225 */
2235 2226 smtx = SMAPMTX(smp);
2236 2227 mutex_enter(smtx);
2237 2228 smp->sm_flags |= SM_KPM_NEWPAGE;
2238 2229 mutex_exit(smtx);
2239 2230 }
2240 2231
2241 2232 kpme = GET_KPME(smp);
2242 2233 if (!newpage && kpme->kpe_page == pp)
2243 2234 base = hat_kpm_page2va(pp, 0);
2244 2235 else
2245 2236 base = hat_kpm_mapin(pp, kpme);
2246 2237
2247 2238 /*
2248 2239 * FS code may decide not to call segmap_pagecreate and we
2249 2240 * don't invoke segmap_fault via TLB miss, so we have to set
2250 2241 * ref and mod bits in advance.
2251 2242 */
2252 2243 if (rw == S_WRITE) {
2253 2244 hat_setrefmod(pp);
2254 2245 } else {
2255 2246 ASSERT(rw == S_READ);
2256 2247 hat_setref(pp);
2257 2248 }
2258 2249
2259 2250 smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++;
2260 2251
2261 2252 return (base);
2262 2253 }
2263 2254
2264 2255 /*
2265 2256 * Find the smap structure corresponding to the
2266 2257 * KPM addr and return it locked.
2267 2258 */
2268 2259 struct smap *
2269 2260 get_smap_kpm(caddr_t addr, page_t **ppp)
2270 2261 {
2271 2262 struct smap *smp;
2272 2263 struct vnode *vp;
2273 2264 u_offset_t offset;
2274 2265 caddr_t baseaddr = (caddr_t)((uintptr_t)addr & MAXBMASK);
2275 2266 int hashid;
2276 2267 kmutex_t *hashmtx;
2277 2268 page_t *pp;
2278 2269 union segmap_cpu *scpu;
2279 2270
2280 2271 pp = hat_kpm_vaddr2page(baseaddr);
2281 2272
2282 2273 ASSERT(pp && !PP_ISFREE(pp));
2283 2274 ASSERT(PAGE_LOCKED(pp));
2284 2275 ASSERT(((uintptr_t)pp->p_offset & MAXBOFFSET) == 0);
2285 2276
2286 2277 vp = pp->p_vnode;
2287 2278 offset = pp->p_offset;
2288 2279 ASSERT(vp != NULL);
2289 2280
2290 2281 /*
2291 2282 * Assume the last smap used on this cpu is the one needed.
2292 2283 */
2293 2284 scpu = smd_cpu+CPU->cpu_seqid;
2294 2285 smp = scpu->scpu.scpu_last_smap;
2295 2286 mutex_enter(&smp->sm_mtx);
2296 2287 if (smp->sm_vp == vp && smp->sm_off == offset) {
2297 2288 ASSERT(smp->sm_refcnt > 0);
2298 2289 } else {
2299 2290 /*
2300 2291 * Assumption wrong, find the smap on the hash chain.
2301 2292 */
2302 2293 mutex_exit(&smp->sm_mtx);
2303 2294 SMAP_HASHFUNC(vp, offset, hashid); /* macro assigns hashid */
2304 2295 hashmtx = SHASHMTX(hashid);
2305 2296
2306 2297 mutex_enter(hashmtx);
2307 2298 smp = smd_hash[hashid].sh_hash_list;
2308 2299 for (; smp != NULL; smp = smp->sm_hash) {
2309 2300 if (smp->sm_vp == vp && smp->sm_off == offset)
2310 2301 break;
2311 2302 }
2312 2303 mutex_exit(hashmtx);
2313 2304 if (smp) {
2314 2305 mutex_enter(&smp->sm_mtx);
2315 2306 ASSERT(smp->sm_vp == vp && smp->sm_off == offset);
2316 2307 }
2317 2308 }
2318 2309
2319 2310 if (ppp)
2320 2311 *ppp = smp ? pp : NULL;
2321 2312
2322 2313 return (smp);
2323 2314 }
2324 2315
2325 2316 #else /* SEGKPM_SUPPORT */
2326 2317
2327 2318 /* segkpm stubs */
2328 2319
2329 2320 /*ARGSUSED*/
2330 2321 static caddr_t
2331 2322 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
2332 2323 struct smap *smp, enum seg_rw rw)
2333 2324 {
2334 2325 return (NULL);
2335 2326 }
2336 2327
2337 2328 /*ARGSUSED*/
2338 2329 struct smap *
2339 2330 get_smap_kpm(caddr_t addr, page_t **ppp)
2340 2331 {
2341 2332 return (NULL);
2342 2333 }
2343 2334
2344 2335 #endif /* SEGKPM_SUPPORT */
↓ open down ↓ |
141 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX