Print this page
[mq]: core-v2
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/vm/vm_machdep.c
+++ new/usr/src/uts/i86pc/vm/vm_machdep.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright (c) 2010, Intel Corporation.
26 26 * All rights reserved.
27 27 */
28 28
29 29 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
30 30 /* All Rights Reserved */
31 31
32 32 /*
33 33 * Portions of this source code were derived from Berkeley 4.3 BSD
34 34 * under license from the Regents of the University of California.
35 35 */
36 36
37 37 /*
38 38 * UNIX machine dependent virtual memory support.
39 39 */
40 40
41 41 #include <sys/types.h>
42 42 #include <sys/param.h>
43 43 #include <sys/systm.h>
44 44 #include <sys/user.h>
45 45 #include <sys/proc.h>
46 46 #include <sys/kmem.h>
47 47 #include <sys/vmem.h>
48 48 #include <sys/buf.h>
49 49 #include <sys/cpuvar.h>
50 50 #include <sys/lgrp.h>
51 51 #include <sys/disp.h>
52 52 #include <sys/vm.h>
53 53 #include <sys/mman.h>
54 54 #include <sys/vnode.h>
55 55 #include <sys/cred.h>
56 56 #include <sys/exec.h>
57 57 #include <sys/exechdr.h>
58 58 #include <sys/debug.h>
59 59 #include <sys/vmsystm.h>
60 60 #include <sys/swap.h>
61 61 #include <sys/dumphdr.h>
62 62
63 63 #include <vm/hat.h>
64 64 #include <vm/as.h>
65 65 #include <vm/seg.h>
66 66 #include <vm/seg_kp.h>
67 67 #include <vm/seg_vn.h>
68 68 #include <vm/page.h>
69 69 #include <vm/seg_kmem.h>
70 70 #include <vm/seg_kpm.h>
71 71 #include <vm/vm_dep.h>
72 72
73 73 #include <sys/cpu.h>
74 74 #include <sys/vm_machparam.h>
75 75 #include <sys/memlist.h>
76 76 #include <sys/bootconf.h> /* XXX the memlist stuff belongs in memlist_plat.h */
77 77 #include <vm/hat_i86.h>
78 78 #include <sys/x86_archext.h>
79 79 #include <sys/elf_386.h>
80 80 #include <sys/cmn_err.h>
81 81 #include <sys/archsystm.h>
82 82 #include <sys/machsystm.h>
83 83
84 84 #include <sys/vtrace.h>
85 85 #include <sys/ddidmareq.h>
86 86 #include <sys/promif.h>
87 87 #include <sys/memnode.h>
88 88 #include <sys/stack.h>
89 89 #include <util/qsort.h>
90 90 #include <sys/taskq.h>
91 91
92 92 #ifdef __xpv
93 93
94 94 #include <sys/hypervisor.h>
95 95 #include <sys/xen_mmu.h>
96 96 #include <sys/balloon_impl.h>
97 97
98 98 /*
99 99 * domain 0 pages usable for DMA are kept pre-allocated and kept in
100 100 * distinct lists, ordered by increasing mfn.
101 101 */
102 102 static kmutex_t io_pool_lock;
103 103 static kmutex_t contig_list_lock;
104 104 static page_t *io_pool_4g; /* pool for 32 bit dma limited devices */
105 105 static page_t *io_pool_16m; /* pool for 24 bit dma limited legacy devices */
106 106 static long io_pool_cnt;
107 107 static long io_pool_cnt_max = 0;
108 108 #define DEFAULT_IO_POOL_MIN 128
109 109 static long io_pool_cnt_min = DEFAULT_IO_POOL_MIN;
110 110 static long io_pool_cnt_lowater = 0;
111 111 static long io_pool_shrink_attempts; /* how many times did we try to shrink */
112 112 static long io_pool_shrinks; /* how many times did we really shrink */
113 113 static long io_pool_grows; /* how many times did we grow */
114 114 static mfn_t start_mfn = 1;
115 115 static caddr_t io_pool_kva; /* use to alloc pages when needed */
116 116
117 117 static int create_contig_pfnlist(uint_t);
118 118
119 119 /*
120 120 * percentage of phys mem to hold in the i/o pool
121 121 */
122 122 #define DEFAULT_IO_POOL_PCT 2
123 123 static long io_pool_physmem_pct = DEFAULT_IO_POOL_PCT;
124 124 static void page_io_pool_sub(page_t **, page_t *, page_t *);
125 125 int ioalloc_dbg = 0;
126 126
127 127 #endif /* __xpv */
128 128
129 129 uint_t vac_colors = 1;
130 130
131 131 int largepagesupport = 0;
132 132 extern uint_t page_create_new;
133 133 extern uint_t page_create_exists;
134 134 extern uint_t page_create_putbacks;
135 135 /*
136 136 * Allow users to disable the kernel's use of SSE.
137 137 */
138 138 extern int use_sse_pagecopy, use_sse_pagezero;
139 139
140 140 /*
141 141 * combined memory ranges from mnode and memranges[] to manage single
142 142 * mnode/mtype dimension in the page lists.
143 143 */
144 144 typedef struct {
145 145 pfn_t mnr_pfnlo;
146 146 pfn_t mnr_pfnhi;
147 147 int mnr_mnode;
148 148 int mnr_memrange; /* index into memranges[] */
149 149 int mnr_next; /* next lower PA mnoderange */
150 150 int mnr_exists;
151 151 /* maintain page list stats */
152 152 pgcnt_t mnr_mt_clpgcnt; /* cache list cnt */
153 153 pgcnt_t mnr_mt_flpgcnt[MMU_PAGE_SIZES]; /* free list cnt per szc */
154 154 pgcnt_t mnr_mt_totcnt; /* sum of cache and free lists */
155 155 #ifdef DEBUG
156 156 struct mnr_mts { /* mnode/mtype szc stats */
157 157 pgcnt_t mnr_mts_pgcnt;
158 158 int mnr_mts_colors;
159 159 pgcnt_t *mnr_mtsc_pgcnt;
160 160 } *mnr_mts;
161 161 #endif
162 162 } mnoderange_t;
163 163
164 164 #define MEMRANGEHI(mtype) \
165 165 ((mtype > 0) ? memranges[mtype - 1] - 1: physmax)
166 166 #define MEMRANGELO(mtype) (memranges[mtype])
167 167
168 168 #define MTYPE_FREEMEM(mt) (mnoderanges[mt].mnr_mt_totcnt)
169 169
170 170 /*
171 171 * As the PC architecture evolved memory up was clumped into several
172 172 * ranges for various historical I/O devices to do DMA.
173 173 * < 16Meg - ISA bus
174 174 * < 2Gig - ???
175 175 * < 4Gig - PCI bus or drivers that don't understand PAE mode
176 176 *
177 177 * These are listed in reverse order, so that we can skip over unused
178 178 * ranges on machines with small memories.
179 179 *
180 180 * For now under the Hypervisor, we'll only ever have one memrange.
181 181 */
182 182 #define PFN_4GIG 0x100000
183 183 #define PFN_16MEG 0x1000
184 184 /* Indices into the memory range (arch_memranges) array. */
185 185 #define MRI_4G 0
186 186 #define MRI_2G 1
187 187 #define MRI_16M 2
188 188 #define MRI_0 3
189 189 static pfn_t arch_memranges[NUM_MEM_RANGES] = {
190 190 PFN_4GIG, /* pfn range for 4G and above */
191 191 0x80000, /* pfn range for 2G-4G */
192 192 PFN_16MEG, /* pfn range for 16M-2G */
193 193 0x00000, /* pfn range for 0-16M */
194 194 };
195 195 pfn_t *memranges = &arch_memranges[0];
196 196 int nranges = NUM_MEM_RANGES;
197 197
198 198 /*
199 199 * This combines mem_node_config and memranges into one data
200 200 * structure to be used for page list management.
201 201 */
202 202 mnoderange_t *mnoderanges;
203 203 int mnoderangecnt;
204 204 int mtype4g;
205 205 int mtype16m;
206 206 int mtypetop; /* index of highest pfn'ed mnoderange */
207 207
208 208 /*
209 209 * 4g memory management variables for systems with more than 4g of memory:
210 210 *
211 211 * physical memory below 4g is required for 32bit dma devices and, currently,
212 212 * for kmem memory. On systems with more than 4g of memory, the pool of memory
213 213 * below 4g can be depleted without any paging activity given that there is
214 214 * likely to be sufficient memory above 4g.
215 215 *
216 216 * physmax4g is set true if the largest pfn is over 4g. The rest of the
217 217 * 4g memory management code is enabled only when physmax4g is true.
218 218 *
219 219 * maxmem4g is the count of the maximum number of pages on the page lists
220 220 * with physical addresses below 4g. It can be a lot less then 4g given that
221 221 * BIOS may reserve large chunks of space below 4g for hot plug pci devices,
222 222 * agp aperture etc.
223 223 *
224 224 * freemem4g maintains the count of the number of available pages on the
225 225 * page lists with physical addresses below 4g.
226 226 *
227 227 * DESFREE4G specifies the desired amount of below 4g memory. It defaults to
228 228 * 6% (desfree4gshift = 4) of maxmem4g.
229 229 *
230 230 * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G
231 231 * and the amount of physical memory above 4g is greater than freemem4g.
232 232 * In this case, page_get_* routines will restrict below 4g allocations
233 233 * for requests that don't specifically require it.
234 234 */
235 235
236 236 #define DESFREE4G (maxmem4g >> desfree4gshift)
237 237
238 238 #define RESTRICT4G_ALLOC \
239 239 (physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem))
240 240
241 241 static pgcnt_t maxmem4g;
242 242 static pgcnt_t freemem4g;
243 243 static int physmax4g;
244 244 static int desfree4gshift = 4; /* maxmem4g shift to derive DESFREE4G */
245 245
246 246 /*
247 247 * 16m memory management:
248 248 *
249 249 * reserve some amount of physical memory below 16m for legacy devices.
250 250 *
251 251 * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above
252 252 * 16m or if the 16m pool drops below DESFREE16M.
253 253 *
254 254 * In this case, general page allocations via page_get_{free,cache}list
255 255 * routines will be restricted from allocating from the 16m pool. Allocations
256 256 * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations
257 257 * are not restricted.
258 258 */
259 259
260 260 #define FREEMEM16M MTYPE_FREEMEM(mtype16m)
261 261 #define DESFREE16M desfree16m
262 262 #define RESTRICT16M_ALLOC(freemem, pgcnt, flags) \
263 263 ((freemem != 0) && ((flags & PG_PANIC) == 0) && \
264 264 ((freemem >= (FREEMEM16M)) || \
265 265 (FREEMEM16M < (DESFREE16M + pgcnt))))
266 266
267 267 static pgcnt_t desfree16m = 0x380;
268 268
269 269 /*
270 270 * This can be patched via /etc/system to allow old non-PAE aware device
271 271 * drivers to use kmem_alloc'd memory on 32 bit systems with > 4Gig RAM.
272 272 */
273 273 int restricted_kmemalloc = 0;
274 274
275 275 #ifdef VM_STATS
276 276 struct {
277 277 ulong_t pga_alloc;
278 278 ulong_t pga_notfullrange;
279 279 ulong_t pga_nulldmaattr;
280 280 ulong_t pga_allocok;
281 281 ulong_t pga_allocfailed;
282 282 ulong_t pgma_alloc;
283 283 ulong_t pgma_allocok;
284 284 ulong_t pgma_allocfailed;
285 285 ulong_t pgma_allocempty;
286 286 } pga_vmstats;
287 287 #endif
288 288
289 289 uint_t mmu_page_sizes;
290 290
291 291 /* How many page sizes the users can see */
292 292 uint_t mmu_exported_page_sizes;
293 293
294 294 /* page sizes that legacy applications can see */
295 295 uint_t mmu_legacy_page_sizes;
296 296
297 297 /*
298 298 * Number of pages in 1 GB. Don't enable automatic large pages if we have
299 299 * fewer than this many pages.
300 300 */
301 301 pgcnt_t shm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
302 302 pgcnt_t privm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
303 303
304 304 /*
305 305 * Maximum and default segment size tunables for user private
306 306 * and shared anon memory, and user text and initialized data.
307 307 * These can be patched via /etc/system to allow large pages
308 308 * to be used for mapping application private and shared anon memory.
309 309 */
310 310 size_t mcntl0_lpsize = MMU_PAGESIZE;
311 311 size_t max_uheap_lpsize = MMU_PAGESIZE;
312 312 size_t default_uheap_lpsize = MMU_PAGESIZE;
313 313 size_t max_ustack_lpsize = MMU_PAGESIZE;
314 314 size_t default_ustack_lpsize = MMU_PAGESIZE;
315 315 size_t max_privmap_lpsize = MMU_PAGESIZE;
316 316 size_t max_uidata_lpsize = MMU_PAGESIZE;
317 317 size_t max_utext_lpsize = MMU_PAGESIZE;
318 318 size_t max_shm_lpsize = MMU_PAGESIZE;
319 319
320 320
321 321 /*
322 322 * initialized by page_coloring_init().
323 323 */
324 324 uint_t page_colors;
325 325 uint_t page_colors_mask;
326 326 uint_t page_coloring_shift;
327 327 int cpu_page_colors;
328 328 static uint_t l2_colors;
329 329
330 330 /*
331 331 * Page freelists and cachelists are dynamically allocated once mnoderangecnt
332 332 * and page_colors are calculated from the l2 cache n-way set size. Within a
333 333 * mnode range, the page freelist and cachelist are hashed into bins based on
334 334 * color. This makes it easier to search for a page within a specific memory
335 335 * range.
336 336 */
337 337 #define PAGE_COLORS_MIN 16
338 338
339 339 page_t ****page_freelists;
340 340 page_t ***page_cachelists;
341 341
342 342
343 343 /*
344 344 * Used by page layer to know about page sizes
345 345 */
346 346 hw_pagesize_t hw_page_array[MAX_NUM_LEVEL + 1];
347 347
348 348 kmutex_t *fpc_mutex[NPC_MUTEX];
349 349 kmutex_t *cpc_mutex[NPC_MUTEX];
350 350
351 351 /* Lock to protect mnoderanges array for memory DR operations. */
352 352 static kmutex_t mnoderange_lock;
353 353
354 354 /*
355 355 * Only let one thread at a time try to coalesce large pages, to
356 356 * prevent them from working against each other.
357 357 */
358 358 static kmutex_t contig_lock;
359 359 #define CONTIG_LOCK() mutex_enter(&contig_lock);
360 360 #define CONTIG_UNLOCK() mutex_exit(&contig_lock);
361 361
362 362 #define PFN_16M (mmu_btop((uint64_t)0x1000000))
363 363
364 364 /*
365 365 * Return the optimum page size for a given mapping
366 366 */
367 367 /*ARGSUSED*/
368 368 size_t
369 369 map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl)
370 370 {
371 371 level_t l = 0;
372 372 size_t pgsz = MMU_PAGESIZE;
373 373 size_t max_lpsize;
374 374 uint_t mszc;
375 375
376 376 ASSERT(maptype != MAPPGSZ_VA);
377 377
378 378 if (maptype != MAPPGSZ_ISM && physmem < privm_lpg_min_physmem) {
379 379 return (MMU_PAGESIZE);
380 380 }
381 381
382 382 switch (maptype) {
383 383 case MAPPGSZ_HEAP:
384 384 case MAPPGSZ_STK:
385 385 max_lpsize = memcntl ? mcntl0_lpsize : (maptype ==
386 386 MAPPGSZ_HEAP ? max_uheap_lpsize : max_ustack_lpsize);
387 387 if (max_lpsize == MMU_PAGESIZE) {
388 388 return (MMU_PAGESIZE);
389 389 }
390 390 if (len == 0) {
391 391 len = (maptype == MAPPGSZ_HEAP) ? p->p_brkbase +
392 392 p->p_brksize - p->p_bssbase : p->p_stksize;
393 393 }
394 394 len = (maptype == MAPPGSZ_HEAP) ? MAX(len,
395 395 default_uheap_lpsize) : MAX(len, default_ustack_lpsize);
396 396
397 397 /*
398 398 * use the pages size that best fits len
399 399 */
400 400 for (l = mmu.umax_page_level; l > 0; --l) {
401 401 if (LEVEL_SIZE(l) > max_lpsize || len < LEVEL_SIZE(l)) {
402 402 continue;
403 403 } else {
404 404 pgsz = LEVEL_SIZE(l);
405 405 }
406 406 break;
407 407 }
408 408
409 409 mszc = (maptype == MAPPGSZ_HEAP ? p->p_brkpageszc :
410 410 p->p_stkpageszc);
411 411 if (addr == 0 && (pgsz < hw_page_array[mszc].hp_size)) {
412 412 pgsz = hw_page_array[mszc].hp_size;
413 413 }
414 414 return (pgsz);
415 415
416 416 case MAPPGSZ_ISM:
417 417 for (l = mmu.umax_page_level; l > 0; --l) {
418 418 if (len >= LEVEL_SIZE(l))
419 419 return (LEVEL_SIZE(l));
420 420 }
421 421 return (LEVEL_SIZE(0));
422 422 }
423 423 return (pgsz);
424 424 }
425 425
426 426 static uint_t
427 427 map_szcvec(caddr_t addr, size_t size, uintptr_t off, size_t max_lpsize,
428 428 size_t min_physmem)
429 429 {
430 430 caddr_t eaddr = addr + size;
431 431 uint_t szcvec = 0;
432 432 caddr_t raddr;
433 433 caddr_t readdr;
434 434 size_t pgsz;
435 435 int i;
436 436
437 437 if (physmem < min_physmem || max_lpsize <= MMU_PAGESIZE) {
438 438 return (0);
439 439 }
440 440
441 441 for (i = mmu_exported_page_sizes - 1; i > 0; i--) {
442 442 pgsz = page_get_pagesize(i);
443 443 if (pgsz > max_lpsize) {
444 444 continue;
445 445 }
446 446 raddr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz);
447 447 readdr = (caddr_t)P2ALIGN((uintptr_t)eaddr, pgsz);
448 448 if (raddr < addr || raddr >= readdr) {
449 449 continue;
450 450 }
451 451 if (P2PHASE((uintptr_t)addr ^ off, pgsz)) {
452 452 continue;
453 453 }
454 454 /*
455 455 * Set szcvec to the remaining page sizes.
456 456 */
457 457 szcvec = ((1 << (i + 1)) - 1) & ~1;
458 458 break;
459 459 }
460 460 return (szcvec);
461 461 }
462 462
463 463 /*
464 464 * Return a bit vector of large page size codes that
465 465 * can be used to map [addr, addr + len) region.
466 466 */
467 467 /*ARGSUSED*/
468 468 uint_t
469 469 map_pgszcvec(caddr_t addr, size_t size, uintptr_t off, int flags, int type,
470 470 int memcntl)
471 471 {
472 472 size_t max_lpsize = mcntl0_lpsize;
473 473
474 474 if (mmu.max_page_level == 0)
475 475 return (0);
476 476
477 477 if (flags & MAP_TEXT) {
478 478 if (!memcntl)
479 479 max_lpsize = max_utext_lpsize;
480 480 return (map_szcvec(addr, size, off, max_lpsize,
481 481 shm_lpg_min_physmem));
482 482
483 483 } else if (flags & MAP_INITDATA) {
484 484 if (!memcntl)
485 485 max_lpsize = max_uidata_lpsize;
486 486 return (map_szcvec(addr, size, off, max_lpsize,
487 487 privm_lpg_min_physmem));
488 488
489 489 } else if (type == MAPPGSZC_SHM) {
490 490 if (!memcntl)
491 491 max_lpsize = max_shm_lpsize;
492 492 return (map_szcvec(addr, size, off, max_lpsize,
493 493 shm_lpg_min_physmem));
494 494
495 495 } else if (type == MAPPGSZC_HEAP) {
496 496 if (!memcntl)
497 497 max_lpsize = max_uheap_lpsize;
498 498 return (map_szcvec(addr, size, off, max_lpsize,
499 499 privm_lpg_min_physmem));
500 500
501 501 } else if (type == MAPPGSZC_STACK) {
502 502 if (!memcntl)
503 503 max_lpsize = max_ustack_lpsize;
504 504 return (map_szcvec(addr, size, off, max_lpsize,
505 505 privm_lpg_min_physmem));
506 506
507 507 } else {
508 508 if (!memcntl)
509 509 max_lpsize = max_privmap_lpsize;
510 510 return (map_szcvec(addr, size, off, max_lpsize,
511 511 privm_lpg_min_physmem));
512 512 }
513 513 }
514 514
515 515 /*
516 516 * Handle a pagefault.
517 517 */
518 518 faultcode_t
519 519 pagefault(
520 520 caddr_t addr,
521 521 enum fault_type type,
522 522 enum seg_rw rw,
523 523 int iskernel)
524 524 {
525 525 struct as *as;
526 526 struct hat *hat;
527 527 struct proc *p;
528 528 kthread_t *t;
529 529 faultcode_t res;
530 530 caddr_t base;
531 531 size_t len;
532 532 int err;
533 533 int mapped_red;
534 534 uintptr_t ea;
535 535
536 536 ASSERT_STACK_ALIGNED();
537 537
538 538 if (INVALID_VADDR(addr))
539 539 return (FC_NOMAP);
540 540
541 541 mapped_red = segkp_map_red();
542 542
543 543 if (iskernel) {
544 544 as = &kas;
545 545 hat = as->a_hat;
546 546 } else {
547 547 t = curthread;
548 548 p = ttoproc(t);
549 549 as = p->p_as;
550 550 hat = as->a_hat;
551 551 }
552 552
553 553 /*
554 554 * Dispatch pagefault.
555 555 */
556 556 res = as_fault(hat, as, addr, 1, type, rw);
557 557
558 558 /*
559 559 * If this isn't a potential unmapped hole in the user's
560 560 * UNIX data or stack segments, just return status info.
561 561 */
562 562 if (res != FC_NOMAP || iskernel)
563 563 goto out;
564 564
565 565 /*
566 566 * Check to see if we happened to faulted on a currently unmapped
567 567 * part of the UNIX data or stack segments. If so, create a zfod
568 568 * mapping there and then try calling the fault routine again.
569 569 */
570 570 base = p->p_brkbase;
571 571 len = p->p_brksize;
572 572
573 573 if (addr < base || addr >= base + len) { /* data seg? */
574 574 base = (caddr_t)p->p_usrstack - p->p_stksize;
575 575 len = p->p_stksize;
576 576 if (addr < base || addr >= p->p_usrstack) { /* stack seg? */
577 577 /* not in either UNIX data or stack segments */
578 578 res = FC_NOMAP;
579 579 goto out;
580 580 }
581 581 }
582 582
583 583 /*
584 584 * the rest of this function implements a 3.X 4.X 5.X compatibility
585 585 * This code is probably not needed anymore
586 586 */
587 587 if (p->p_model == DATAMODEL_ILP32) {
588 588
589 589 /* expand the gap to the page boundaries on each side */
590 590 ea = P2ROUNDUP((uintptr_t)base + len, MMU_PAGESIZE);
591 591 base = (caddr_t)P2ALIGN((uintptr_t)base, MMU_PAGESIZE);
592 592 len = ea - (uintptr_t)base;
593 593
594 594 as_rangelock(as);
595 595 if (as_gap(as, MMU_PAGESIZE, &base, &len, AH_CONTAIN, addr) ==
596 596 0) {
597 597 err = as_map(as, base, len, segvn_create, zfod_argsp);
598 598 as_rangeunlock(as);
599 599 if (err) {
600 600 res = FC_MAKE_ERR(err);
601 601 goto out;
602 602 }
603 603 } else {
604 604 /*
605 605 * This page is already mapped by another thread after
606 606 * we returned from as_fault() above. We just fall
607 607 * through as_fault() below.
608 608 */
609 609 as_rangeunlock(as);
610 610 }
611 611
612 612 res = as_fault(hat, as, addr, 1, F_INVAL, rw);
613 613 }
614 614
615 615 out:
616 616 if (mapped_red)
617 617 segkp_unmap_red();
618 618
619 619 return (res);
620 620 }
621 621
622 622 void
623 623 map_addr(caddr_t *addrp, size_t len, offset_t off, int vacalign, uint_t flags)
624 624 {
625 625 struct proc *p = curproc;
626 626 caddr_t userlimit = (flags & _MAP_LOW32) ?
627 627 (caddr_t)_userlimit32 : p->p_as->a_userlimit;
628 628
629 629 map_addr_proc(addrp, len, off, vacalign, userlimit, curproc, flags);
630 630 }
631 631
632 632 /*ARGSUSED*/
633 633 int
634 634 map_addr_vacalign_check(caddr_t addr, u_offset_t off)
635 635 {
636 636 return (0);
637 637 }
638 638
639 639 /*
640 640 * map_addr_proc() is the routine called when the system is to
641 641 * choose an address for the user. We will pick an address
642 642 * range which is the highest available below userlimit.
643 643 *
644 644 * Every mapping will have a redzone of a single page on either side of
645 645 * the request. This is done to leave one page unmapped between segments.
646 646 * This is not required, but it's useful for the user because if their
647 647 * program strays across a segment boundary, it will catch a fault
648 648 * immediately making debugging a little easier. Currently the redzone
649 649 * is mandatory.
650 650 *
651 651 * addrp is a value/result parameter.
652 652 * On input it is a hint from the user to be used in a completely
653 653 * machine dependent fashion. We decide to completely ignore this hint.
654 654 * If MAP_ALIGN was specified, addrp contains the minimal alignment, which
655 655 * must be some "power of two" multiple of pagesize.
656 656 *
657 657 * On output it is NULL if no address can be found in the current
658 658 * processes address space or else an address that is currently
659 659 * not mapped for len bytes with a page of red zone on either side.
660 660 *
661 661 * vacalign is not needed on x86 (it's for viturally addressed caches)
662 662 */
663 663 /*ARGSUSED*/
664 664 void
665 665 map_addr_proc(
666 666 caddr_t *addrp,
667 667 size_t len,
668 668 offset_t off,
669 669 int vacalign,
670 670 caddr_t userlimit,
671 671 struct proc *p,
672 672 uint_t flags)
673 673 {
674 674 struct as *as = p->p_as;
675 675 caddr_t addr;
676 676 caddr_t base;
677 677 size_t slen;
678 678 size_t align_amount;
679 679
680 680 ASSERT32(userlimit == as->a_userlimit);
681 681
682 682 base = p->p_brkbase;
683 683 #if defined(__amd64)
684 684 /*
685 685 * XX64 Yes, this needs more work.
686 686 */
687 687 if (p->p_model == DATAMODEL_NATIVE) {
688 688 if (userlimit < as->a_userlimit) {
689 689 /*
690 690 * This happens when a program wants to map
691 691 * something in a range that's accessible to a
692 692 * program in a smaller address space. For example,
693 693 * a 64-bit program calling mmap32(2) to guarantee
694 694 * that the returned address is below 4Gbytes.
695 695 */
696 696 ASSERT((uintptr_t)userlimit < ADDRESS_C(0xffffffff));
697 697
698 698 if (userlimit > base)
699 699 slen = userlimit - base;
700 700 else {
701 701 *addrp = NULL;
702 702 return;
703 703 }
704 704 } else {
705 705 /*
706 706 * XX64 This layout is probably wrong .. but in
707 707 * the event we make the amd64 address space look
708 708 * like sparcv9 i.e. with the stack -above- the
709 709 * heap, this bit of code might even be correct.
710 710 */
711 711 slen = p->p_usrstack - base -
712 712 ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK);
713 713 }
714 714 } else
715 715 #endif
716 716 slen = userlimit - base;
717 717
718 718 /* Make len be a multiple of PAGESIZE */
719 719 len = (len + PAGEOFFSET) & PAGEMASK;
720 720
721 721 /*
722 722 * figure out what the alignment should be
723 723 *
724 724 * XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same????
725 725 */
726 726 if (len <= ELF_386_MAXPGSZ) {
727 727 /*
728 728 * Align virtual addresses to ensure that ELF shared libraries
729 729 * are mapped with the appropriate alignment constraints by
730 730 * the run-time linker.
731 731 */
732 732 align_amount = ELF_386_MAXPGSZ;
733 733 } else {
734 734 /*
735 735 * For 32-bit processes, only those which have specified
736 736 * MAP_ALIGN and an addr will be aligned on a larger page size.
737 737 * Not doing so can potentially waste up to 1G of process
738 738 * address space.
739 739 */
740 740 int lvl = (p->p_model == DATAMODEL_ILP32) ? 1 :
741 741 mmu.umax_page_level;
742 742
743 743 while (lvl && len < LEVEL_SIZE(lvl))
744 744 --lvl;
745 745
746 746 align_amount = LEVEL_SIZE(lvl);
747 747 }
748 748 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount))
749 749 align_amount = (uintptr_t)*addrp;
750 750
751 751 ASSERT(ISP2(align_amount));
752 752 ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
753 753
754 754 off = off & (align_amount - 1);
755 755 /*
756 756 * Look for a large enough hole starting below userlimit.
757 757 * After finding it, use the upper part.
758 758 */
759 759 if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
760 760 PAGESIZE, off) == 0) {
761 761 caddr_t as_addr;
762 762
763 763 /*
764 764 * addr is the highest possible address to use since we have
765 765 * a PAGESIZE redzone at the beginning and end.
766 766 */
767 767 addr = base + slen - (PAGESIZE + len);
768 768 as_addr = addr;
769 769 /*
770 770 * Round address DOWN to the alignment amount and
771 771 * add the offset in.
772 772 * If addr is greater than as_addr, len would not be large
773 773 * enough to include the redzone, so we must adjust down
774 774 * by the alignment amount.
775 775 */
776 776 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1)));
777 777 addr += (uintptr_t)off;
778 778 if (addr > as_addr) {
779 779 addr -= align_amount;
780 780 }
781 781
782 782 ASSERT(addr > base);
783 783 ASSERT(addr + len < base + slen);
784 784 ASSERT(((uintptr_t)addr & (align_amount - 1)) ==
785 785 ((uintptr_t)(off)));
786 786 *addrp = addr;
787 787 } else {
788 788 *addrp = NULL; /* no more virtual space */
789 789 }
790 790 }
791 791
792 792 int valid_va_range_aligned_wraparound;
793 793
794 794 /*
795 795 * Determine whether [*basep, *basep + *lenp) contains a mappable range of
796 796 * addresses at least "minlen" long, where the base of the range is at "off"
797 797 * phase from an "align" boundary and there is space for a "redzone"-sized
798 798 * redzone on either side of the range. On success, 1 is returned and *basep
799 799 * and *lenp are adjusted to describe the acceptable range (including
800 800 * the redzone). On failure, 0 is returned.
801 801 */
802 802 /*ARGSUSED3*/
803 803 int
804 804 valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
805 805 size_t align, size_t redzone, size_t off)
806 806 {
807 807 uintptr_t hi, lo;
808 808 size_t tot_len;
809 809
810 810 ASSERT(align == 0 ? off == 0 : off < align);
811 811 ASSERT(ISP2(align));
812 812 ASSERT(align == 0 || align >= PAGESIZE);
813 813
814 814 lo = (uintptr_t)*basep;
815 815 hi = lo + *lenp;
816 816 tot_len = minlen + 2 * redzone; /* need at least this much space */
817 817
818 818 /*
819 819 * If hi rolled over the top, try cutting back.
820 820 */
821 821 if (hi < lo) {
822 822 *lenp = 0UL - lo - 1UL;
823 823 /* See if this really happens. If so, then we figure out why */
824 824 valid_va_range_aligned_wraparound++;
825 825 hi = lo + *lenp;
826 826 }
827 827 if (*lenp < tot_len) {
828 828 return (0);
829 829 }
830 830
831 831 #if defined(__amd64)
832 832 /*
833 833 * Deal with a possible hole in the address range between
834 834 * hole_start and hole_end that should never be mapped.
835 835 */
836 836 if (lo < hole_start) {
837 837 if (hi > hole_start) {
838 838 if (hi < hole_end) {
839 839 hi = hole_start;
840 840 } else {
841 841 /* lo < hole_start && hi >= hole_end */
842 842 if (dir == AH_LO) {
843 843 /*
844 844 * prefer lowest range
845 845 */
846 846 if (hole_start - lo >= tot_len)
847 847 hi = hole_start;
848 848 else if (hi - hole_end >= tot_len)
849 849 lo = hole_end;
850 850 else
851 851 return (0);
852 852 } else {
853 853 /*
854 854 * prefer highest range
855 855 */
856 856 if (hi - hole_end >= tot_len)
857 857 lo = hole_end;
858 858 else if (hole_start - lo >= tot_len)
859 859 hi = hole_start;
860 860 else
861 861 return (0);
862 862 }
863 863 }
864 864 }
865 865 } else {
866 866 /* lo >= hole_start */
867 867 if (hi < hole_end)
868 868 return (0);
869 869 if (lo < hole_end)
870 870 lo = hole_end;
871 871 }
872 872 #endif
873 873
874 874 if (hi - lo < tot_len)
875 875 return (0);
876 876
877 877 if (align > 1) {
878 878 uintptr_t tlo = lo + redzone;
879 879 uintptr_t thi = hi - redzone;
880 880 tlo = (uintptr_t)P2PHASEUP(tlo, align, off);
881 881 if (tlo < lo + redzone) {
882 882 return (0);
883 883 }
884 884 if (thi < tlo || thi - tlo < minlen) {
885 885 return (0);
886 886 }
887 887 }
888 888
889 889 *basep = (caddr_t)lo;
890 890 *lenp = hi - lo;
891 891 return (1);
892 892 }
893 893
894 894 /*
895 895 * Determine whether [*basep, *basep + *lenp) contains a mappable range of
896 896 * addresses at least "minlen" long. On success, 1 is returned and *basep
897 897 * and *lenp are adjusted to describe the acceptable range. On failure, 0
898 898 * is returned.
899 899 */
900 900 int
901 901 valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
902 902 {
903 903 return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
904 904 }
905 905
906 906 /*
907 907 * Determine whether [addr, addr+len] are valid user addresses.
908 908 */
909 909 /*ARGSUSED*/
910 910 int
911 911 valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as,
912 912 caddr_t userlimit)
913 913 {
914 914 caddr_t eaddr = addr + len;
915 915
916 916 if (eaddr <= addr || addr >= userlimit || eaddr > userlimit)
917 917 return (RANGE_BADADDR);
918 918
919 919 #if defined(__amd64)
920 920 /*
921 921 * Check for the VA hole
922 922 */
923 923 if (eaddr > (caddr_t)hole_start && addr < (caddr_t)hole_end)
924 924 return (RANGE_BADADDR);
925 925 #endif
926 926
927 927 return (RANGE_OKAY);
928 928 }
929 929
930 930 /*
931 931 * Return 1 if the page frame is onboard memory, else 0.
932 932 */
933 933 int
934 934 pf_is_memory(pfn_t pf)
935 935 {
936 936 if (pfn_is_foreign(pf))
937 937 return (0);
938 938 return (address_in_memlist(phys_install, pfn_to_pa(pf), 1));
939 939 }
940 940
941 941 /*
942 942 * return the memrange containing pfn
943 943 */
944 944 int
945 945 memrange_num(pfn_t pfn)
946 946 {
947 947 int n;
948 948
949 949 for (n = 0; n < nranges - 1; ++n) {
950 950 if (pfn >= memranges[n])
951 951 break;
952 952 }
953 953 return (n);
954 954 }
955 955
956 956 /*
957 957 * return the mnoderange containing pfn
958 958 */
959 959 /*ARGSUSED*/
960 960 int
961 961 pfn_2_mtype(pfn_t pfn)
962 962 {
963 963 #if defined(__xpv)
964 964 return (0);
965 965 #else
966 966 int n;
967 967
968 968 /* Always start from highest pfn and work our way down */
969 969 for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
970 970 if (pfn >= mnoderanges[n].mnr_pfnlo) {
971 971 break;
972 972 }
973 973 }
974 974 return (n);
975 975 #endif
976 976 }
977 977
978 978 #if !defined(__xpv)
979 979 /*
980 980 * is_contigpage_free:
981 981 * returns a page list of contiguous pages. It minimally has to return
982 982 * minctg pages. Caller determines minctg based on the scatter-gather
983 983 * list length.
984 984 *
985 985 * pfnp is set to the next page frame to search on return.
986 986 */
987 987 static page_t *
988 988 is_contigpage_free(
989 989 pfn_t *pfnp,
990 990 pgcnt_t *pgcnt,
991 991 pgcnt_t minctg,
992 992 uint64_t pfnseg,
993 993 int iolock)
994 994 {
995 995 int i = 0;
996 996 pfn_t pfn = *pfnp;
997 997 page_t *pp;
998 998 page_t *plist = NULL;
999 999
1000 1000 /*
1001 1001 * fail if pfn + minctg crosses a segment boundary.
1002 1002 * Adjust for next starting pfn to begin at segment boundary.
↓ open down ↓ |
1002 lines elided |
↑ open up ↑ |
1003 1003 */
1004 1004
1005 1005 if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) {
1006 1006 *pfnp = roundup(*pfnp, pfnseg + 1);
1007 1007 return (NULL);
1008 1008 }
1009 1009
1010 1010 do {
1011 1011 retry:
1012 1012 pp = page_numtopp_nolock(pfn + i);
1013 - if ((pp == NULL) || IS_DUMP_PAGE(pp) ||
1014 - (page_trylock(pp, SE_EXCL) == 0)) {
1013 + if ((pp == NULL) || (page_trylock(pp, SE_EXCL) == 0)) {
1015 1014 (*pfnp)++;
1016 1015 break;
1017 1016 }
1018 1017 if (page_pptonum(pp) != pfn + i) {
1019 1018 page_unlock(pp);
1020 1019 goto retry;
1021 1020 }
1022 1021
1023 1022 if (!(PP_ISFREE(pp))) {
1024 1023 page_unlock(pp);
1025 1024 (*pfnp)++;
1026 1025 break;
1027 1026 }
1028 1027
1029 1028 if (!PP_ISAGED(pp)) {
1030 1029 page_list_sub(pp, PG_CACHE_LIST);
1031 1030 page_hashout(pp, (kmutex_t *)NULL);
1032 1031 } else {
1033 1032 page_list_sub(pp, PG_FREE_LIST);
1034 1033 }
1035 1034
1036 1035 if (iolock)
1037 1036 page_io_lock(pp);
1038 1037 page_list_concat(&plist, &pp);
1039 1038
1040 1039 /*
1041 1040 * exit loop when pgcnt satisfied or segment boundary reached.
1042 1041 */
1043 1042
1044 1043 } while ((++i < *pgcnt) && ((pfn + i) & pfnseg));
1045 1044
1046 1045 *pfnp += i; /* set to next pfn to search */
1047 1046
1048 1047 if (i >= minctg) {
1049 1048 *pgcnt -= i;
1050 1049 return (plist);
1051 1050 }
1052 1051
1053 1052 /*
1054 1053 * failure: minctg not satisfied.
1055 1054 *
1056 1055 * if next request crosses segment boundary, set next pfn
1057 1056 * to search from the segment boundary.
1058 1057 */
1059 1058 if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg))
1060 1059 *pfnp = roundup(*pfnp, pfnseg + 1);
1061 1060
1062 1061 /* clean up any pages already allocated */
1063 1062
1064 1063 while (plist) {
1065 1064 pp = plist;
1066 1065 page_sub(&plist, pp);
1067 1066 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
1068 1067 if (iolock)
1069 1068 page_io_unlock(pp);
1070 1069 page_unlock(pp);
1071 1070 }
1072 1071
1073 1072 return (NULL);
1074 1073 }
1075 1074 #endif /* !__xpv */
1076 1075
1077 1076 /*
1078 1077 * verify that pages being returned from allocator have correct DMA attribute
1079 1078 */
1080 1079 #ifndef DEBUG
1081 1080 #define check_dma(a, b, c) (void)(0)
1082 1081 #else
1083 1082 static void
1084 1083 check_dma(ddi_dma_attr_t *dma_attr, page_t *pp, int cnt)
1085 1084 {
1086 1085 if (dma_attr == NULL)
1087 1086 return;
1088 1087
1089 1088 while (cnt-- > 0) {
1090 1089 if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) <
1091 1090 dma_attr->dma_attr_addr_lo)
1092 1091 panic("PFN (pp=%p) below dma_attr_addr_lo", (void *)pp);
1093 1092 if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) >=
1094 1093 dma_attr->dma_attr_addr_hi)
1095 1094 panic("PFN (pp=%p) above dma_attr_addr_hi", (void *)pp);
1096 1095 pp = pp->p_next;
1097 1096 }
1098 1097 }
1099 1098 #endif
1100 1099
1101 1100 #if !defined(__xpv)
1102 1101 static page_t *
1103 1102 page_get_contigpage(pgcnt_t *pgcnt, ddi_dma_attr_t *mattr, int iolock)
1104 1103 {
1105 1104 pfn_t pfn;
1106 1105 int sgllen;
1107 1106 uint64_t pfnseg;
1108 1107 pgcnt_t minctg;
1109 1108 page_t *pplist = NULL, *plist;
1110 1109 uint64_t lo, hi;
1111 1110 pgcnt_t pfnalign = 0;
1112 1111 static pfn_t startpfn;
1113 1112 static pgcnt_t lastctgcnt;
1114 1113 uintptr_t align;
1115 1114
1116 1115 CONTIG_LOCK();
1117 1116
1118 1117 if (mattr) {
1119 1118 lo = mmu_btop((mattr->dma_attr_addr_lo + MMU_PAGEOFFSET));
1120 1119 hi = mmu_btop(mattr->dma_attr_addr_hi);
1121 1120 if (hi >= physmax)
1122 1121 hi = physmax - 1;
1123 1122 sgllen = mattr->dma_attr_sgllen;
1124 1123 pfnseg = mmu_btop(mattr->dma_attr_seg);
1125 1124
1126 1125 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
1127 1126 if (align > MMU_PAGESIZE)
1128 1127 pfnalign = mmu_btop(align);
1129 1128
1130 1129 /*
1131 1130 * in order to satisfy the request, must minimally
1132 1131 * acquire minctg contiguous pages
1133 1132 */
1134 1133 minctg = howmany(*pgcnt, sgllen);
1135 1134
1136 1135 ASSERT(hi >= lo);
1137 1136
1138 1137 /*
1139 1138 * start from where last searched if the minctg >= lastctgcnt
1140 1139 */
1141 1140 if (minctg < lastctgcnt || startpfn < lo || startpfn > hi)
1142 1141 startpfn = lo;
1143 1142 } else {
1144 1143 hi = physmax - 1;
1145 1144 lo = 0;
1146 1145 sgllen = 1;
1147 1146 pfnseg = mmu.highest_pfn;
1148 1147 minctg = *pgcnt;
1149 1148
1150 1149 if (minctg < lastctgcnt)
1151 1150 startpfn = lo;
1152 1151 }
1153 1152 lastctgcnt = minctg;
1154 1153
1155 1154 ASSERT(pfnseg + 1 >= (uint64_t)minctg);
1156 1155
1157 1156 /* conserve 16m memory - start search above 16m when possible */
1158 1157 if (hi > PFN_16M && startpfn < PFN_16M)
1159 1158 startpfn = PFN_16M;
1160 1159
1161 1160 pfn = startpfn;
1162 1161 if (pfnalign)
1163 1162 pfn = P2ROUNDUP(pfn, pfnalign);
1164 1163
1165 1164 while (pfn + minctg - 1 <= hi) {
1166 1165
1167 1166 plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
1168 1167 if (plist) {
1169 1168 page_list_concat(&pplist, &plist);
1170 1169 sgllen--;
1171 1170 /*
1172 1171 * return when contig pages no longer needed
1173 1172 */
1174 1173 if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
1175 1174 startpfn = pfn;
1176 1175 CONTIG_UNLOCK();
1177 1176 check_dma(mattr, pplist, *pgcnt);
1178 1177 return (pplist);
1179 1178 }
1180 1179 minctg = howmany(*pgcnt, sgllen);
1181 1180 }
1182 1181 if (pfnalign)
1183 1182 pfn = P2ROUNDUP(pfn, pfnalign);
1184 1183 }
1185 1184
1186 1185 /* cannot find contig pages in specified range */
1187 1186 if (startpfn == lo) {
1188 1187 CONTIG_UNLOCK();
1189 1188 return (NULL);
1190 1189 }
1191 1190
1192 1191 /* did not start with lo previously */
1193 1192 pfn = lo;
1194 1193 if (pfnalign)
1195 1194 pfn = P2ROUNDUP(pfn, pfnalign);
1196 1195
1197 1196 /* allow search to go above startpfn */
1198 1197 while (pfn < startpfn) {
1199 1198
1200 1199 plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
1201 1200 if (plist != NULL) {
1202 1201
1203 1202 page_list_concat(&pplist, &plist);
1204 1203 sgllen--;
1205 1204
1206 1205 /*
1207 1206 * return when contig pages no longer needed
1208 1207 */
1209 1208 if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
1210 1209 startpfn = pfn;
1211 1210 CONTIG_UNLOCK();
1212 1211 check_dma(mattr, pplist, *pgcnt);
1213 1212 return (pplist);
1214 1213 }
1215 1214 minctg = howmany(*pgcnt, sgllen);
1216 1215 }
1217 1216 if (pfnalign)
1218 1217 pfn = P2ROUNDUP(pfn, pfnalign);
1219 1218 }
1220 1219 CONTIG_UNLOCK();
1221 1220 return (NULL);
1222 1221 }
1223 1222 #endif /* !__xpv */
1224 1223
1225 1224 /*
1226 1225 * mnode_range_cnt() calculates the number of memory ranges for mnode and
1227 1226 * memranges[]. Used to determine the size of page lists and mnoderanges.
1228 1227 */
1229 1228 int
1230 1229 mnode_range_cnt(int mnode)
1231 1230 {
1232 1231 #if defined(__xpv)
1233 1232 ASSERT(mnode == 0);
1234 1233 return (1);
1235 1234 #else /* __xpv */
1236 1235 int mri;
1237 1236 int mnrcnt = 0;
1238 1237
1239 1238 if (mem_node_config[mnode].exists != 0) {
1240 1239 mri = nranges - 1;
1241 1240
1242 1241 /* find the memranges index below contained in mnode range */
1243 1242
1244 1243 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1245 1244 mri--;
1246 1245
1247 1246 /*
1248 1247 * increment mnode range counter when memranges or mnode
1249 1248 * boundary is reached.
1250 1249 */
1251 1250 while (mri >= 0 &&
1252 1251 mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
1253 1252 mnrcnt++;
1254 1253 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1255 1254 mri--;
1256 1255 else
1257 1256 break;
1258 1257 }
1259 1258 }
1260 1259 ASSERT(mnrcnt <= MAX_MNODE_MRANGES);
1261 1260 return (mnrcnt);
1262 1261 #endif /* __xpv */
1263 1262 }
1264 1263
1265 1264 /*
1266 1265 * mnode_range_setup() initializes mnoderanges.
1267 1266 */
1268 1267 void
1269 1268 mnode_range_setup(mnoderange_t *mnoderanges)
1270 1269 {
1271 1270 mnoderange_t *mp = mnoderanges;
1272 1271 int mnode, mri;
1273 1272 int mindex = 0; /* current index into mnoderanges array */
1274 1273 int i, j;
1275 1274 pfn_t hipfn;
1276 1275 int last, hi;
1277 1276
1278 1277 for (mnode = 0; mnode < max_mem_nodes; mnode++) {
1279 1278 if (mem_node_config[mnode].exists == 0)
1280 1279 continue;
1281 1280
1282 1281 mri = nranges - 1;
1283 1282
1284 1283 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1285 1284 mri--;
1286 1285
1287 1286 while (mri >= 0 && mem_node_config[mnode].physmax >=
1288 1287 MEMRANGELO(mri)) {
1289 1288 mnoderanges->mnr_pfnlo = MAX(MEMRANGELO(mri),
1290 1289 mem_node_config[mnode].physbase);
1291 1290 mnoderanges->mnr_pfnhi = MIN(MEMRANGEHI(mri),
1292 1291 mem_node_config[mnode].physmax);
1293 1292 mnoderanges->mnr_mnode = mnode;
1294 1293 mnoderanges->mnr_memrange = mri;
1295 1294 mnoderanges->mnr_exists = 1;
1296 1295 mnoderanges++;
1297 1296 mindex++;
1298 1297 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1299 1298 mri--;
1300 1299 else
1301 1300 break;
1302 1301 }
1303 1302 }
1304 1303
1305 1304 /*
1306 1305 * For now do a simple sort of the mnoderanges array to fill in
1307 1306 * the mnr_next fields. Since mindex is expected to be relatively
1308 1307 * small, using a simple O(N^2) algorithm.
1309 1308 */
1310 1309 for (i = 0; i < mindex; i++) {
1311 1310 if (mp[i].mnr_pfnlo == 0) /* find lowest */
1312 1311 break;
1313 1312 }
1314 1313 ASSERT(i < mindex);
1315 1314 last = i;
1316 1315 mtype16m = last;
1317 1316 mp[last].mnr_next = -1;
1318 1317 for (i = 0; i < mindex - 1; i++) {
1319 1318 hipfn = (pfn_t)(-1);
1320 1319 hi = -1;
1321 1320 /* find next highest mnode range */
1322 1321 for (j = 0; j < mindex; j++) {
1323 1322 if (mp[j].mnr_pfnlo > mp[last].mnr_pfnlo &&
1324 1323 mp[j].mnr_pfnlo < hipfn) {
1325 1324 hipfn = mp[j].mnr_pfnlo;
1326 1325 hi = j;
1327 1326 }
1328 1327 }
1329 1328 mp[hi].mnr_next = last;
1330 1329 last = hi;
1331 1330 }
1332 1331 mtypetop = last;
1333 1332 }
1334 1333
1335 1334 #ifndef __xpv
1336 1335 /*
1337 1336 * Update mnoderanges for memory hot-add DR operations.
1338 1337 */
1339 1338 static void
1340 1339 mnode_range_add(int mnode)
1341 1340 {
1342 1341 int *prev;
1343 1342 int n, mri;
1344 1343 pfn_t start, end;
1345 1344 extern void membar_sync(void);
1346 1345
1347 1346 ASSERT(0 <= mnode && mnode < max_mem_nodes);
1348 1347 ASSERT(mem_node_config[mnode].exists);
1349 1348 start = mem_node_config[mnode].physbase;
1350 1349 end = mem_node_config[mnode].physmax;
1351 1350 ASSERT(start <= end);
1352 1351 mutex_enter(&mnoderange_lock);
1353 1352
1354 1353 #ifdef DEBUG
1355 1354 /* Check whether it interleaves with other memory nodes. */
1356 1355 for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
1357 1356 ASSERT(mnoderanges[n].mnr_exists);
1358 1357 if (mnoderanges[n].mnr_mnode == mnode)
1359 1358 continue;
1360 1359 ASSERT(start > mnoderanges[n].mnr_pfnhi ||
1361 1360 end < mnoderanges[n].mnr_pfnlo);
1362 1361 }
1363 1362 #endif /* DEBUG */
1364 1363
1365 1364 mri = nranges - 1;
1366 1365 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1367 1366 mri--;
1368 1367 while (mri >= 0 && mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
1369 1368 /* Check whether mtype already exists. */
1370 1369 for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
1371 1370 if (mnoderanges[n].mnr_mnode == mnode &&
1372 1371 mnoderanges[n].mnr_memrange == mri) {
1373 1372 mnoderanges[n].mnr_pfnlo = MAX(MEMRANGELO(mri),
1374 1373 start);
1375 1374 mnoderanges[n].mnr_pfnhi = MIN(MEMRANGEHI(mri),
1376 1375 end);
1377 1376 break;
1378 1377 }
1379 1378 }
1380 1379
1381 1380 /* Add a new entry if it doesn't exist yet. */
1382 1381 if (n == -1) {
1383 1382 /* Try to find an unused entry in mnoderanges array. */
1384 1383 for (n = 0; n < mnoderangecnt; n++) {
1385 1384 if (mnoderanges[n].mnr_exists == 0)
1386 1385 break;
1387 1386 }
1388 1387 ASSERT(n < mnoderangecnt);
1389 1388 mnoderanges[n].mnr_pfnlo = MAX(MEMRANGELO(mri), start);
1390 1389 mnoderanges[n].mnr_pfnhi = MIN(MEMRANGEHI(mri), end);
1391 1390 mnoderanges[n].mnr_mnode = mnode;
1392 1391 mnoderanges[n].mnr_memrange = mri;
1393 1392 mnoderanges[n].mnr_exists = 1;
1394 1393 /* Page 0 should always be present. */
1395 1394 for (prev = &mtypetop;
1396 1395 mnoderanges[*prev].mnr_pfnlo > start;
1397 1396 prev = &mnoderanges[*prev].mnr_next) {
1398 1397 ASSERT(mnoderanges[*prev].mnr_next >= 0);
1399 1398 ASSERT(mnoderanges[*prev].mnr_pfnlo > end);
1400 1399 }
1401 1400 mnoderanges[n].mnr_next = *prev;
1402 1401 membar_sync();
1403 1402 *prev = n;
1404 1403 }
1405 1404
1406 1405 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1407 1406 mri--;
1408 1407 else
1409 1408 break;
1410 1409 }
1411 1410
1412 1411 mutex_exit(&mnoderange_lock);
1413 1412 }
1414 1413
1415 1414 /*
1416 1415 * Update mnoderanges for memory hot-removal DR operations.
1417 1416 */
1418 1417 static void
1419 1418 mnode_range_del(int mnode)
1420 1419 {
1421 1420 _NOTE(ARGUNUSED(mnode));
1422 1421 ASSERT(0 <= mnode && mnode < max_mem_nodes);
1423 1422 /* TODO: support deletion operation. */
1424 1423 ASSERT(0);
1425 1424 }
1426 1425
1427 1426 void
1428 1427 plat_slice_add(pfn_t start, pfn_t end)
1429 1428 {
1430 1429 mem_node_add_slice(start, end);
1431 1430 if (plat_dr_enabled()) {
1432 1431 mnode_range_add(PFN_2_MEM_NODE(start));
1433 1432 }
1434 1433 }
1435 1434
1436 1435 void
1437 1436 plat_slice_del(pfn_t start, pfn_t end)
1438 1437 {
1439 1438 ASSERT(PFN_2_MEM_NODE(start) == PFN_2_MEM_NODE(end));
1440 1439 ASSERT(plat_dr_enabled());
1441 1440 mnode_range_del(PFN_2_MEM_NODE(start));
1442 1441 mem_node_del_slice(start, end);
1443 1442 }
1444 1443 #endif /* __xpv */
1445 1444
1446 1445 /*ARGSUSED*/
1447 1446 int
1448 1447 mtype_init(vnode_t *vp, caddr_t vaddr, uint_t *flags, size_t pgsz)
1449 1448 {
1450 1449 int mtype = mtypetop;
1451 1450
1452 1451 #if !defined(__xpv)
1453 1452 #if defined(__i386)
1454 1453 /*
1455 1454 * set the mtype range
1456 1455 * - kmem requests need to be below 4g if restricted_kmemalloc is set.
1457 1456 * - for non kmem requests, set range to above 4g if memory below 4g
1458 1457 * runs low.
1459 1458 */
1460 1459 if (restricted_kmemalloc && VN_ISKAS(vp) &&
1461 1460 (caddr_t)(vaddr) >= kernelheap &&
1462 1461 (caddr_t)(vaddr) < ekernelheap) {
1463 1462 ASSERT(physmax4g);
1464 1463 mtype = mtype4g;
1465 1464 if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz),
1466 1465 btop(pgsz), *flags)) {
1467 1466 *flags |= PGI_MT_RANGE16M;
1468 1467 } else {
1469 1468 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1470 1469 VM_STAT_COND_ADD((*flags & PG_PANIC),
1471 1470 vmm_vmstats.pgpanicalloc);
1472 1471 *flags |= PGI_MT_RANGE0;
1473 1472 }
1474 1473 return (mtype);
1475 1474 }
1476 1475 #endif /* __i386 */
1477 1476
1478 1477 if (RESTRICT4G_ALLOC) {
1479 1478 VM_STAT_ADD(vmm_vmstats.restrict4gcnt);
1480 1479 /* here only for > 4g systems */
1481 1480 *flags |= PGI_MT_RANGE4G;
1482 1481 } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), *flags)) {
1483 1482 *flags |= PGI_MT_RANGE16M;
1484 1483 } else {
1485 1484 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1486 1485 VM_STAT_COND_ADD((*flags & PG_PANIC), vmm_vmstats.pgpanicalloc);
1487 1486 *flags |= PGI_MT_RANGE0;
1488 1487 }
1489 1488 #endif /* !__xpv */
1490 1489 return (mtype);
1491 1490 }
1492 1491
1493 1492
1494 1493 /* mtype init for page_get_replacement_page */
1495 1494 /*ARGSUSED*/
1496 1495 int
1497 1496 mtype_pgr_init(int *flags, page_t *pp, int mnode, pgcnt_t pgcnt)
1498 1497 {
1499 1498 int mtype = mtypetop;
1500 1499 #if !defined(__xpv)
1501 1500 if (RESTRICT16M_ALLOC(freemem, pgcnt, *flags)) {
1502 1501 *flags |= PGI_MT_RANGE16M;
1503 1502 } else {
1504 1503 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1505 1504 *flags |= PGI_MT_RANGE0;
1506 1505 }
1507 1506 #endif
1508 1507 return (mtype);
1509 1508 }
1510 1509
1511 1510 /*
1512 1511 * Determine if the mnode range specified in mtype contains memory belonging
1513 1512 * to memory node mnode. If flags & PGI_MT_RANGE is set then mtype contains
1514 1513 * the range from high pfn to 0, 16m or 4g.
1515 1514 *
1516 1515 * Return first mnode range type index found otherwise return -1 if none found.
1517 1516 */
1518 1517 int
1519 1518 mtype_func(int mnode, int mtype, uint_t flags)
1520 1519 {
1521 1520 if (flags & PGI_MT_RANGE) {
1522 1521 int mnr_lim = MRI_0;
1523 1522
1524 1523 if (flags & PGI_MT_NEXT) {
1525 1524 mtype = mnoderanges[mtype].mnr_next;
1526 1525 }
1527 1526 if (flags & PGI_MT_RANGE4G)
1528 1527 mnr_lim = MRI_4G; /* exclude 0-4g range */
1529 1528 else if (flags & PGI_MT_RANGE16M)
1530 1529 mnr_lim = MRI_16M; /* exclude 0-16m range */
1531 1530 while (mtype != -1 &&
1532 1531 mnoderanges[mtype].mnr_memrange <= mnr_lim) {
1533 1532 if (mnoderanges[mtype].mnr_mnode == mnode)
1534 1533 return (mtype);
1535 1534 mtype = mnoderanges[mtype].mnr_next;
1536 1535 }
1537 1536 } else if (mnoderanges[mtype].mnr_mnode == mnode) {
1538 1537 return (mtype);
1539 1538 }
1540 1539 return (-1);
1541 1540 }
1542 1541
1543 1542 /*
1544 1543 * Update the page list max counts with the pfn range specified by the
1545 1544 * input parameters.
1546 1545 */
1547 1546 void
1548 1547 mtype_modify_max(pfn_t startpfn, long cnt)
1549 1548 {
1550 1549 int mtype;
1551 1550 pgcnt_t inc;
1552 1551 spgcnt_t scnt = (spgcnt_t)(cnt);
1553 1552 pgcnt_t acnt = ABS(scnt);
1554 1553 pfn_t endpfn = startpfn + acnt;
1555 1554 pfn_t pfn, lo;
1556 1555
1557 1556 if (!physmax4g)
1558 1557 return;
1559 1558
1560 1559 mtype = mtypetop;
1561 1560 for (pfn = endpfn; pfn > startpfn; ) {
1562 1561 ASSERT(mtype != -1);
1563 1562 lo = mnoderanges[mtype].mnr_pfnlo;
1564 1563 if (pfn > lo) {
1565 1564 if (startpfn >= lo) {
1566 1565 inc = pfn - startpfn;
1567 1566 } else {
1568 1567 inc = pfn - lo;
1569 1568 }
1570 1569 if (mnoderanges[mtype].mnr_memrange != MRI_4G) {
1571 1570 if (scnt > 0)
1572 1571 maxmem4g += inc;
1573 1572 else
1574 1573 maxmem4g -= inc;
1575 1574 }
1576 1575 pfn -= inc;
1577 1576 }
1578 1577 mtype = mnoderanges[mtype].mnr_next;
1579 1578 }
1580 1579 }
1581 1580
1582 1581 int
1583 1582 mtype_2_mrange(int mtype)
1584 1583 {
1585 1584 return (mnoderanges[mtype].mnr_memrange);
1586 1585 }
1587 1586
1588 1587 void
1589 1588 mnodetype_2_pfn(int mnode, int mtype, pfn_t *pfnlo, pfn_t *pfnhi)
1590 1589 {
1591 1590 _NOTE(ARGUNUSED(mnode));
1592 1591 ASSERT(mnoderanges[mtype].mnr_mnode == mnode);
1593 1592 *pfnlo = mnoderanges[mtype].mnr_pfnlo;
1594 1593 *pfnhi = mnoderanges[mtype].mnr_pfnhi;
1595 1594 }
1596 1595
1597 1596 size_t
1598 1597 plcnt_sz(size_t ctrs_sz)
1599 1598 {
1600 1599 #ifdef DEBUG
1601 1600 int szc, colors;
1602 1601
1603 1602 ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * mmu_page_sizes;
1604 1603 for (szc = 0; szc < mmu_page_sizes; szc++) {
1605 1604 colors = page_get_pagecolors(szc);
1606 1605 ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors;
1607 1606 }
1608 1607 #endif
1609 1608 return (ctrs_sz);
1610 1609 }
1611 1610
1612 1611 caddr_t
1613 1612 plcnt_init(caddr_t addr)
1614 1613 {
1615 1614 #ifdef DEBUG
1616 1615 int mt, szc, colors;
1617 1616
1618 1617 for (mt = 0; mt < mnoderangecnt; mt++) {
1619 1618 mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr;
1620 1619 addr += (sizeof (struct mnr_mts) * mmu_page_sizes);
1621 1620 for (szc = 0; szc < mmu_page_sizes; szc++) {
1622 1621 colors = page_get_pagecolors(szc);
1623 1622 mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = colors;
1624 1623 mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt =
1625 1624 (pgcnt_t *)addr;
1626 1625 addr += (sizeof (pgcnt_t) * colors);
1627 1626 }
1628 1627 }
1629 1628 #endif
1630 1629 return (addr);
1631 1630 }
1632 1631
1633 1632 void
1634 1633 plcnt_inc_dec(page_t *pp, int mtype, int szc, long cnt, int flags)
1635 1634 {
1636 1635 _NOTE(ARGUNUSED(pp));
1637 1636 #ifdef DEBUG
1638 1637 int bin = PP_2_BIN(pp);
1639 1638
1640 1639 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mts_pgcnt, cnt);
1641 1640 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mtsc_pgcnt[bin],
1642 1641 cnt);
1643 1642 #endif
1644 1643 ASSERT(mtype == PP_2_MTYPE(pp));
1645 1644 if (physmax4g && mnoderanges[mtype].mnr_memrange != MRI_4G)
1646 1645 atomic_add_long(&freemem4g, cnt);
1647 1646 if (flags & PG_CACHE_LIST)
1648 1647 atomic_add_long(&mnoderanges[mtype].mnr_mt_clpgcnt, cnt);
1649 1648 else
1650 1649 atomic_add_long(&mnoderanges[mtype].mnr_mt_flpgcnt[szc], cnt);
1651 1650 atomic_add_long(&mnoderanges[mtype].mnr_mt_totcnt, cnt);
1652 1651 }
1653 1652
1654 1653 /*
1655 1654 * Returns the free page count for mnode
1656 1655 */
1657 1656 int
1658 1657 mnode_pgcnt(int mnode)
1659 1658 {
1660 1659 int mtype = mtypetop;
1661 1660 int flags = PGI_MT_RANGE0;
1662 1661 pgcnt_t pgcnt = 0;
1663 1662
1664 1663 mtype = mtype_func(mnode, mtype, flags);
1665 1664
1666 1665 while (mtype != -1) {
1667 1666 pgcnt += MTYPE_FREEMEM(mtype);
1668 1667 mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT);
1669 1668 }
1670 1669 return (pgcnt);
1671 1670 }
1672 1671
1673 1672 /*
1674 1673 * Initialize page coloring variables based on the l2 cache parameters.
1675 1674 * Calculate and return memory needed for page coloring data structures.
1676 1675 */
1677 1676 size_t
1678 1677 page_coloring_init(uint_t l2_sz, int l2_linesz, int l2_assoc)
1679 1678 {
1680 1679 _NOTE(ARGUNUSED(l2_linesz));
1681 1680 size_t colorsz = 0;
1682 1681 int i;
1683 1682 int colors;
1684 1683
1685 1684 #if defined(__xpv)
1686 1685 /*
1687 1686 * Hypervisor domains currently don't have any concept of NUMA.
1688 1687 * Hence we'll act like there is only 1 memrange.
1689 1688 */
1690 1689 i = memrange_num(1);
1691 1690 #else /* !__xpv */
1692 1691 /*
1693 1692 * Reduce the memory ranges lists if we don't have large amounts
1694 1693 * of memory. This avoids searching known empty free lists.
1695 1694 * To support memory DR operations, we need to keep memory ranges
1696 1695 * for possible memory hot-add operations.
1697 1696 */
1698 1697 if (plat_dr_physmax > physmax)
1699 1698 i = memrange_num(plat_dr_physmax);
1700 1699 else
1701 1700 i = memrange_num(physmax);
1702 1701 #if defined(__i386)
1703 1702 if (i > MRI_4G)
1704 1703 restricted_kmemalloc = 0;
1705 1704 #endif
1706 1705 /* physmax greater than 4g */
1707 1706 if (i == MRI_4G)
1708 1707 physmax4g = 1;
1709 1708 #endif /* !__xpv */
1710 1709 memranges += i;
1711 1710 nranges -= i;
1712 1711
1713 1712 ASSERT(mmu_page_sizes <= MMU_PAGE_SIZES);
1714 1713
1715 1714 ASSERT(ISP2(l2_linesz));
1716 1715 ASSERT(l2_sz > MMU_PAGESIZE);
1717 1716
1718 1717 /* l2_assoc is 0 for fully associative l2 cache */
1719 1718 if (l2_assoc)
1720 1719 l2_colors = MAX(1, l2_sz / (l2_assoc * MMU_PAGESIZE));
1721 1720 else
1722 1721 l2_colors = 1;
1723 1722
1724 1723 ASSERT(ISP2(l2_colors));
1725 1724
1726 1725 /* for scalability, configure at least PAGE_COLORS_MIN color bins */
1727 1726 page_colors = MAX(l2_colors, PAGE_COLORS_MIN);
1728 1727
1729 1728 /*
1730 1729 * cpu_page_colors is non-zero when a page color may be spread across
1731 1730 * multiple bins.
1732 1731 */
1733 1732 if (l2_colors < page_colors)
1734 1733 cpu_page_colors = l2_colors;
1735 1734
1736 1735 ASSERT(ISP2(page_colors));
1737 1736
1738 1737 page_colors_mask = page_colors - 1;
1739 1738
1740 1739 ASSERT(ISP2(CPUSETSIZE()));
1741 1740 page_coloring_shift = lowbit(CPUSETSIZE());
1742 1741
1743 1742 /* initialize number of colors per page size */
1744 1743 for (i = 0; i <= mmu.max_page_level; i++) {
1745 1744 hw_page_array[i].hp_size = LEVEL_SIZE(i);
1746 1745 hw_page_array[i].hp_shift = LEVEL_SHIFT(i);
1747 1746 hw_page_array[i].hp_pgcnt = LEVEL_SIZE(i) >> LEVEL_SHIFT(0);
1748 1747 hw_page_array[i].hp_colors = (page_colors_mask >>
1749 1748 (hw_page_array[i].hp_shift - hw_page_array[0].hp_shift))
1750 1749 + 1;
1751 1750 colorequivszc[i] = 0;
1752 1751 }
1753 1752
1754 1753 /*
1755 1754 * The value of cpu_page_colors determines if additional color bins
1756 1755 * need to be checked for a particular color in the page_get routines.
1757 1756 */
1758 1757 if (cpu_page_colors != 0) {
1759 1758
1760 1759 int a = lowbit(page_colors) - lowbit(cpu_page_colors);
1761 1760 ASSERT(a > 0);
1762 1761 ASSERT(a < 16);
1763 1762
1764 1763 for (i = 0; i <= mmu.max_page_level; i++) {
1765 1764 if ((colors = hw_page_array[i].hp_colors) <= 1) {
1766 1765 colorequivszc[i] = 0;
1767 1766 continue;
1768 1767 }
1769 1768 while ((colors >> a) == 0)
1770 1769 a--;
1771 1770 ASSERT(a >= 0);
1772 1771
1773 1772 /* higher 4 bits encodes color equiv mask */
1774 1773 colorequivszc[i] = (a << 4);
1775 1774 }
1776 1775 }
1777 1776
1778 1777 /* factor in colorequiv to check additional 'equivalent' bins. */
1779 1778 if (colorequiv > 1) {
1780 1779
1781 1780 int a = lowbit(colorequiv) - 1;
1782 1781 if (a > 15)
1783 1782 a = 15;
1784 1783
1785 1784 for (i = 0; i <= mmu.max_page_level; i++) {
1786 1785 if ((colors = hw_page_array[i].hp_colors) <= 1) {
1787 1786 continue;
1788 1787 }
1789 1788 while ((colors >> a) == 0)
1790 1789 a--;
1791 1790 if ((a << 4) > colorequivszc[i]) {
1792 1791 colorequivszc[i] = (a << 4);
1793 1792 }
1794 1793 }
1795 1794 }
1796 1795
1797 1796 /* size for mnoderanges */
1798 1797 for (mnoderangecnt = 0, i = 0; i < max_mem_nodes; i++)
1799 1798 mnoderangecnt += mnode_range_cnt(i);
1800 1799 if (plat_dr_support_memory()) {
1801 1800 /*
1802 1801 * Reserve enough space for memory DR operations.
1803 1802 * Two extra mnoderanges for possbile fragmentations,
1804 1803 * one for the 2G boundary and the other for the 4G boundary.
1805 1804 * We don't expect a memory board crossing the 16M boundary
1806 1805 * for memory hot-add operations on x86 platforms.
1807 1806 */
1808 1807 mnoderangecnt += 2 + max_mem_nodes - lgrp_plat_node_cnt;
1809 1808 }
1810 1809 colorsz = mnoderangecnt * sizeof (mnoderange_t);
1811 1810
1812 1811 /* size for fpc_mutex and cpc_mutex */
1813 1812 colorsz += (2 * max_mem_nodes * sizeof (kmutex_t) * NPC_MUTEX);
1814 1813
1815 1814 /* size of page_freelists */
1816 1815 colorsz += mnoderangecnt * sizeof (page_t ***);
1817 1816 colorsz += mnoderangecnt * mmu_page_sizes * sizeof (page_t **);
1818 1817
1819 1818 for (i = 0; i < mmu_page_sizes; i++) {
1820 1819 colors = page_get_pagecolors(i);
1821 1820 colorsz += mnoderangecnt * colors * sizeof (page_t *);
1822 1821 }
1823 1822
1824 1823 /* size of page_cachelists */
1825 1824 colorsz += mnoderangecnt * sizeof (page_t **);
1826 1825 colorsz += mnoderangecnt * page_colors * sizeof (page_t *);
1827 1826
1828 1827 return (colorsz);
1829 1828 }
1830 1829
1831 1830 /*
1832 1831 * Called once at startup to configure page_coloring data structures and
1833 1832 * does the 1st page_free()/page_freelist_add().
1834 1833 */
1835 1834 void
1836 1835 page_coloring_setup(caddr_t pcmemaddr)
1837 1836 {
1838 1837 int i;
1839 1838 int j;
1840 1839 int k;
1841 1840 caddr_t addr;
1842 1841 int colors;
1843 1842
1844 1843 /*
1845 1844 * do page coloring setup
1846 1845 */
1847 1846 addr = pcmemaddr;
1848 1847
1849 1848 mnoderanges = (mnoderange_t *)addr;
1850 1849 addr += (mnoderangecnt * sizeof (mnoderange_t));
1851 1850
1852 1851 mnode_range_setup(mnoderanges);
1853 1852
1854 1853 if (physmax4g)
1855 1854 mtype4g = pfn_2_mtype(0xfffff);
1856 1855
1857 1856 for (k = 0; k < NPC_MUTEX; k++) {
1858 1857 fpc_mutex[k] = (kmutex_t *)addr;
1859 1858 addr += (max_mem_nodes * sizeof (kmutex_t));
1860 1859 }
1861 1860 for (k = 0; k < NPC_MUTEX; k++) {
1862 1861 cpc_mutex[k] = (kmutex_t *)addr;
1863 1862 addr += (max_mem_nodes * sizeof (kmutex_t));
1864 1863 }
1865 1864 page_freelists = (page_t ****)addr;
1866 1865 addr += (mnoderangecnt * sizeof (page_t ***));
1867 1866
1868 1867 page_cachelists = (page_t ***)addr;
1869 1868 addr += (mnoderangecnt * sizeof (page_t **));
1870 1869
1871 1870 for (i = 0; i < mnoderangecnt; i++) {
1872 1871 page_freelists[i] = (page_t ***)addr;
1873 1872 addr += (mmu_page_sizes * sizeof (page_t **));
1874 1873
1875 1874 for (j = 0; j < mmu_page_sizes; j++) {
1876 1875 colors = page_get_pagecolors(j);
1877 1876 page_freelists[i][j] = (page_t **)addr;
1878 1877 addr += (colors * sizeof (page_t *));
1879 1878 }
1880 1879 page_cachelists[i] = (page_t **)addr;
1881 1880 addr += (page_colors * sizeof (page_t *));
1882 1881 }
1883 1882 }
1884 1883
1885 1884 #if defined(__xpv)
1886 1885 /*
1887 1886 * Give back 10% of the io_pool pages to the free list.
1888 1887 * Don't shrink the pool below some absolute minimum.
1889 1888 */
1890 1889 static void
1891 1890 page_io_pool_shrink()
1892 1891 {
1893 1892 int retcnt;
1894 1893 page_t *pp, *pp_first, *pp_last, **curpool;
1895 1894 mfn_t mfn;
1896 1895 int bothpools = 0;
1897 1896
1898 1897 mutex_enter(&io_pool_lock);
1899 1898 io_pool_shrink_attempts++; /* should be a kstat? */
1900 1899 retcnt = io_pool_cnt / 10;
1901 1900 if (io_pool_cnt - retcnt < io_pool_cnt_min)
1902 1901 retcnt = io_pool_cnt - io_pool_cnt_min;
1903 1902 if (retcnt <= 0)
1904 1903 goto done;
1905 1904 io_pool_shrinks++; /* should be a kstat? */
1906 1905 curpool = &io_pool_4g;
1907 1906 domore:
1908 1907 /*
1909 1908 * Loop through taking pages from the end of the list
1910 1909 * (highest mfns) till amount to return reached.
1911 1910 */
1912 1911 for (pp = *curpool; pp && retcnt > 0; ) {
1913 1912 pp_first = pp_last = pp->p_prev;
1914 1913 if (pp_first == *curpool)
1915 1914 break;
1916 1915 retcnt--;
1917 1916 io_pool_cnt--;
1918 1917 page_io_pool_sub(curpool, pp_first, pp_last);
1919 1918 if ((mfn = pfn_to_mfn(pp->p_pagenum)) < start_mfn)
1920 1919 start_mfn = mfn;
1921 1920 page_free(pp_first, 1);
1922 1921 pp = *curpool;
1923 1922 }
1924 1923 if (retcnt != 0 && !bothpools) {
1925 1924 /*
1926 1925 * If not enough found in less constrained pool try the
1927 1926 * more constrained one.
1928 1927 */
1929 1928 curpool = &io_pool_16m;
1930 1929 bothpools = 1;
1931 1930 goto domore;
1932 1931 }
1933 1932 done:
1934 1933 mutex_exit(&io_pool_lock);
1935 1934 }
1936 1935
1937 1936 #endif /* __xpv */
1938 1937
1939 1938 uint_t
1940 1939 page_create_update_flags_x86(uint_t flags)
1941 1940 {
1942 1941 #if defined(__xpv)
1943 1942 /*
1944 1943 * Check this is an urgent allocation and free pages are depleted.
1945 1944 */
1946 1945 if (!(flags & PG_WAIT) && freemem < desfree)
1947 1946 page_io_pool_shrink();
1948 1947 #else /* !__xpv */
1949 1948 /*
1950 1949 * page_create_get_something may call this because 4g memory may be
1951 1950 * depleted. Set flags to allow for relocation of base page below
1952 1951 * 4g if necessary.
1953 1952 */
1954 1953 if (physmax4g)
1955 1954 flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI);
1956 1955 #endif /* __xpv */
1957 1956 return (flags);
1958 1957 }
1959 1958
1960 1959 /*ARGSUSED*/
1961 1960 int
1962 1961 bp_color(struct buf *bp)
1963 1962 {
1964 1963 return (0);
1965 1964 }
1966 1965
1967 1966 #if defined(__xpv)
1968 1967
1969 1968 /*
1970 1969 * Take pages out of an io_pool
1971 1970 */
1972 1971 static void
1973 1972 page_io_pool_sub(page_t **poolp, page_t *pp_first, page_t *pp_last)
1974 1973 {
1975 1974 if (*poolp == pp_first) {
1976 1975 *poolp = pp_last->p_next;
1977 1976 if (*poolp == pp_first)
1978 1977 *poolp = NULL;
1979 1978 }
1980 1979 pp_first->p_prev->p_next = pp_last->p_next;
1981 1980 pp_last->p_next->p_prev = pp_first->p_prev;
1982 1981 pp_first->p_prev = pp_last;
1983 1982 pp_last->p_next = pp_first;
1984 1983 }
1985 1984
1986 1985 /*
1987 1986 * Put a page on the io_pool list. The list is ordered by increasing MFN.
1988 1987 */
1989 1988 static void
1990 1989 page_io_pool_add(page_t **poolp, page_t *pp)
1991 1990 {
1992 1991 page_t *look;
1993 1992 mfn_t mfn = mfn_list[pp->p_pagenum];
1994 1993
1995 1994 if (*poolp == NULL) {
1996 1995 *poolp = pp;
1997 1996 pp->p_next = pp;
1998 1997 pp->p_prev = pp;
1999 1998 return;
2000 1999 }
2001 2000
2002 2001 /*
2003 2002 * Since we try to take pages from the high end of the pool
2004 2003 * chances are good that the pages to be put on the list will
2005 2004 * go at or near the end of the list. so start at the end and
2006 2005 * work backwards.
2007 2006 */
2008 2007 look = (*poolp)->p_prev;
2009 2008 while (mfn < mfn_list[look->p_pagenum]) {
2010 2009 look = look->p_prev;
2011 2010 if (look == (*poolp)->p_prev)
2012 2011 break; /* backed all the way to front of list */
2013 2012 }
2014 2013
2015 2014 /* insert after look */
2016 2015 pp->p_prev = look;
2017 2016 pp->p_next = look->p_next;
2018 2017 pp->p_next->p_prev = pp;
2019 2018 look->p_next = pp;
2020 2019 if (mfn < mfn_list[(*poolp)->p_pagenum]) {
2021 2020 /*
2022 2021 * we inserted a new first list element
2023 2022 * adjust pool pointer to newly inserted element
2024 2023 */
2025 2024 *poolp = pp;
2026 2025 }
2027 2026 }
2028 2027
2029 2028 /*
2030 2029 * Add a page to the io_pool. Setting the force flag will force the page
2031 2030 * into the io_pool no matter what.
2032 2031 */
2033 2032 static void
2034 2033 add_page_to_pool(page_t *pp, int force)
2035 2034 {
2036 2035 page_t *highest;
2037 2036 page_t *freep = NULL;
2038 2037
2039 2038 mutex_enter(&io_pool_lock);
2040 2039 /*
2041 2040 * Always keep the scarce low memory pages
2042 2041 */
2043 2042 if (mfn_list[pp->p_pagenum] < PFN_16MEG) {
2044 2043 ++io_pool_cnt;
2045 2044 page_io_pool_add(&io_pool_16m, pp);
2046 2045 goto done;
2047 2046 }
2048 2047 if (io_pool_cnt < io_pool_cnt_max || force || io_pool_4g == NULL) {
2049 2048 ++io_pool_cnt;
2050 2049 page_io_pool_add(&io_pool_4g, pp);
2051 2050 } else {
2052 2051 highest = io_pool_4g->p_prev;
2053 2052 if (mfn_list[pp->p_pagenum] < mfn_list[highest->p_pagenum]) {
2054 2053 page_io_pool_sub(&io_pool_4g, highest, highest);
2055 2054 page_io_pool_add(&io_pool_4g, pp);
2056 2055 freep = highest;
2057 2056 } else {
2058 2057 freep = pp;
2059 2058 }
2060 2059 }
2061 2060 done:
2062 2061 mutex_exit(&io_pool_lock);
2063 2062 if (freep)
2064 2063 page_free(freep, 1);
2065 2064 }
2066 2065
2067 2066
2068 2067 int contig_pfn_cnt; /* no of pfns in the contig pfn list */
2069 2068 int contig_pfn_max; /* capacity of the contig pfn list */
2070 2069 int next_alloc_pfn; /* next position in list to start a contig search */
2071 2070 int contig_pfnlist_updates; /* pfn list update count */
2072 2071 int contig_pfnlist_builds; /* how many times have we (re)built list */
2073 2072 int contig_pfnlist_buildfailed; /* how many times has list build failed */
2074 2073 int create_contig_pending; /* nonzero means taskq creating contig list */
2075 2074 pfn_t *contig_pfn_list = NULL; /* list of contig pfns in ascending mfn order */
2076 2075
2077 2076 /*
2078 2077 * Function to use in sorting a list of pfns by their underlying mfns.
2079 2078 */
2080 2079 static int
2081 2080 mfn_compare(const void *pfnp1, const void *pfnp2)
2082 2081 {
2083 2082 mfn_t mfn1 = mfn_list[*(pfn_t *)pfnp1];
2084 2083 mfn_t mfn2 = mfn_list[*(pfn_t *)pfnp2];
2085 2084
2086 2085 if (mfn1 > mfn2)
2087 2086 return (1);
2088 2087 if (mfn1 < mfn2)
2089 2088 return (-1);
2090 2089 return (0);
2091 2090 }
2092 2091
2093 2092 /*
2094 2093 * Compact the contig_pfn_list by tossing all the non-contiguous
2095 2094 * elements from the list.
2096 2095 */
2097 2096 static void
2098 2097 compact_contig_pfn_list(void)
2099 2098 {
2100 2099 pfn_t pfn, lapfn, prev_lapfn;
2101 2100 mfn_t mfn;
2102 2101 int i, newcnt = 0;
2103 2102
2104 2103 prev_lapfn = 0;
2105 2104 for (i = 0; i < contig_pfn_cnt - 1; i++) {
2106 2105 pfn = contig_pfn_list[i];
2107 2106 lapfn = contig_pfn_list[i + 1];
2108 2107 mfn = mfn_list[pfn];
2109 2108 /*
2110 2109 * See if next pfn is for a contig mfn
2111 2110 */
2112 2111 if (mfn_list[lapfn] != mfn + 1)
2113 2112 continue;
2114 2113 /*
2115 2114 * pfn and lookahead are both put in list
2116 2115 * unless pfn is the previous lookahead.
2117 2116 */
2118 2117 if (pfn != prev_lapfn)
2119 2118 contig_pfn_list[newcnt++] = pfn;
2120 2119 contig_pfn_list[newcnt++] = lapfn;
2121 2120 prev_lapfn = lapfn;
2122 2121 }
2123 2122 for (i = newcnt; i < contig_pfn_cnt; i++)
2124 2123 contig_pfn_list[i] = 0;
2125 2124 contig_pfn_cnt = newcnt;
2126 2125 }
2127 2126
2128 2127 /*ARGSUSED*/
2129 2128 static void
2130 2129 call_create_contiglist(void *arg)
2131 2130 {
2132 2131 (void) create_contig_pfnlist(PG_WAIT);
2133 2132 }
2134 2133
2135 2134 /*
2136 2135 * Create list of freelist pfns that have underlying
2137 2136 * contiguous mfns. The list is kept in ascending mfn order.
2138 2137 * returns 1 if list created else 0.
2139 2138 */
2140 2139 static int
2141 2140 create_contig_pfnlist(uint_t flags)
2142 2141 {
2143 2142 pfn_t pfn;
2144 2143 page_t *pp;
2145 2144 int ret = 1;
2146 2145
2147 2146 mutex_enter(&contig_list_lock);
2148 2147 if (contig_pfn_list != NULL)
2149 2148 goto out;
2150 2149 contig_pfn_max = freemem + (freemem / 10);
2151 2150 contig_pfn_list = kmem_zalloc(contig_pfn_max * sizeof (pfn_t),
2152 2151 (flags & PG_WAIT) ? KM_SLEEP : KM_NOSLEEP);
2153 2152 if (contig_pfn_list == NULL) {
2154 2153 /*
2155 2154 * If we could not create the contig list (because
2156 2155 * we could not sleep for memory). Dispatch a taskq that can
2157 2156 * sleep to get the memory.
2158 2157 */
2159 2158 if (!create_contig_pending) {
2160 2159 if (taskq_dispatch(system_taskq, call_create_contiglist,
2161 2160 NULL, TQ_NOSLEEP) != NULL)
2162 2161 create_contig_pending = 1;
2163 2162 }
2164 2163 contig_pfnlist_buildfailed++; /* count list build failures */
2165 2164 ret = 0;
2166 2165 goto out;
2167 2166 }
2168 2167 create_contig_pending = 0;
2169 2168 ASSERT(contig_pfn_cnt == 0);
2170 2169 for (pfn = 0; pfn < mfn_count; pfn++) {
2171 2170 pp = page_numtopp_nolock(pfn);
2172 2171 if (pp == NULL || !PP_ISFREE(pp))
2173 2172 continue;
2174 2173 contig_pfn_list[contig_pfn_cnt] = pfn;
2175 2174 if (++contig_pfn_cnt == contig_pfn_max)
2176 2175 break;
2177 2176 }
2178 2177 /*
2179 2178 * Sanity check the new list.
2180 2179 */
2181 2180 if (contig_pfn_cnt < 2) { /* no contig pfns */
2182 2181 contig_pfn_cnt = 0;
2183 2182 contig_pfnlist_buildfailed++;
2184 2183 kmem_free(contig_pfn_list, contig_pfn_max * sizeof (pfn_t));
2185 2184 contig_pfn_list = NULL;
2186 2185 contig_pfn_max = 0;
2187 2186 ret = 0;
2188 2187 goto out;
2189 2188 }
2190 2189 qsort(contig_pfn_list, contig_pfn_cnt, sizeof (pfn_t), mfn_compare);
2191 2190 compact_contig_pfn_list();
2192 2191 /*
2193 2192 * Make sure next search of the newly created contiguous pfn
2194 2193 * list starts at the beginning of the list.
2195 2194 */
2196 2195 next_alloc_pfn = 0;
2197 2196 contig_pfnlist_builds++; /* count list builds */
2198 2197 out:
2199 2198 mutex_exit(&contig_list_lock);
2200 2199 return (ret);
2201 2200 }
2202 2201
2203 2202
2204 2203 /*
2205 2204 * Toss the current contig pfnlist. Someone is about to do a massive
2206 2205 * update to pfn<->mfn mappings. So we have them destroy the list and lock
2207 2206 * it till they are done with their update.
2208 2207 */
2209 2208 void
2210 2209 clear_and_lock_contig_pfnlist()
2211 2210 {
2212 2211 pfn_t *listp = NULL;
2213 2212 size_t listsize;
2214 2213
2215 2214 mutex_enter(&contig_list_lock);
2216 2215 if (contig_pfn_list != NULL) {
2217 2216 listp = contig_pfn_list;
2218 2217 listsize = contig_pfn_max * sizeof (pfn_t);
2219 2218 contig_pfn_list = NULL;
2220 2219 contig_pfn_max = contig_pfn_cnt = 0;
2221 2220 }
2222 2221 if (listp != NULL)
2223 2222 kmem_free(listp, listsize);
2224 2223 }
2225 2224
2226 2225 /*
2227 2226 * Unlock the contig_pfn_list. The next attempted use of it will cause
2228 2227 * it to be re-created.
2229 2228 */
2230 2229 void
2231 2230 unlock_contig_pfnlist()
2232 2231 {
2233 2232 mutex_exit(&contig_list_lock);
2234 2233 }
2235 2234
2236 2235 /*
2237 2236 * Update the contiguous pfn list in response to a pfn <-> mfn reassignment
2238 2237 */
2239 2238 void
2240 2239 update_contig_pfnlist(pfn_t pfn, mfn_t oldmfn, mfn_t newmfn)
2241 2240 {
2242 2241 int probe_hi, probe_lo, probe_pos, insert_after, insert_point;
2243 2242 pfn_t probe_pfn;
2244 2243 mfn_t probe_mfn;
2245 2244 int drop_lock = 0;
2246 2245
2247 2246 if (mutex_owner(&contig_list_lock) != curthread) {
2248 2247 drop_lock = 1;
2249 2248 mutex_enter(&contig_list_lock);
2250 2249 }
2251 2250 if (contig_pfn_list == NULL)
2252 2251 goto done;
2253 2252 contig_pfnlist_updates++;
2254 2253 /*
2255 2254 * Find the pfn in the current list. Use a binary chop to locate it.
2256 2255 */
2257 2256 probe_hi = contig_pfn_cnt - 1;
2258 2257 probe_lo = 0;
2259 2258 probe_pos = (probe_hi + probe_lo) / 2;
2260 2259 while ((probe_pfn = contig_pfn_list[probe_pos]) != pfn) {
2261 2260 if (probe_pos == probe_lo) { /* pfn not in list */
2262 2261 probe_pos = -1;
2263 2262 break;
2264 2263 }
2265 2264 if (pfn_to_mfn(probe_pfn) <= oldmfn)
2266 2265 probe_lo = probe_pos;
2267 2266 else
2268 2267 probe_hi = probe_pos;
2269 2268 probe_pos = (probe_hi + probe_lo) / 2;
2270 2269 }
2271 2270 if (probe_pos >= 0) {
2272 2271 /*
2273 2272 * Remove pfn from list and ensure next alloc
2274 2273 * position stays in bounds.
2275 2274 */
2276 2275 if (--contig_pfn_cnt <= next_alloc_pfn)
2277 2276 next_alloc_pfn = 0;
2278 2277 if (contig_pfn_cnt < 2) { /* no contig pfns */
2279 2278 contig_pfn_cnt = 0;
2280 2279 kmem_free(contig_pfn_list,
2281 2280 contig_pfn_max * sizeof (pfn_t));
2282 2281 contig_pfn_list = NULL;
2283 2282 contig_pfn_max = 0;
2284 2283 goto done;
2285 2284 }
2286 2285 ovbcopy(&contig_pfn_list[probe_pos + 1],
2287 2286 &contig_pfn_list[probe_pos],
2288 2287 (contig_pfn_cnt - probe_pos) * sizeof (pfn_t));
2289 2288 }
2290 2289 if (newmfn == MFN_INVALID)
2291 2290 goto done;
2292 2291 /*
2293 2292 * Check if new mfn has adjacent mfns in the list
2294 2293 */
2295 2294 probe_hi = contig_pfn_cnt - 1;
2296 2295 probe_lo = 0;
2297 2296 insert_after = -2;
2298 2297 do {
2299 2298 probe_pos = (probe_hi + probe_lo) / 2;
2300 2299 probe_mfn = pfn_to_mfn(contig_pfn_list[probe_pos]);
2301 2300 if (newmfn == probe_mfn + 1)
2302 2301 insert_after = probe_pos;
2303 2302 else if (newmfn == probe_mfn - 1)
2304 2303 insert_after = probe_pos - 1;
2305 2304 if (probe_pos == probe_lo)
2306 2305 break;
2307 2306 if (probe_mfn <= newmfn)
2308 2307 probe_lo = probe_pos;
2309 2308 else
2310 2309 probe_hi = probe_pos;
2311 2310 } while (insert_after == -2);
2312 2311 /*
2313 2312 * If there is space in the list and there are adjacent mfns
2314 2313 * insert the pfn in to its proper place in the list.
2315 2314 */
2316 2315 if (insert_after != -2 && contig_pfn_cnt + 1 <= contig_pfn_max) {
2317 2316 insert_point = insert_after + 1;
2318 2317 ovbcopy(&contig_pfn_list[insert_point],
2319 2318 &contig_pfn_list[insert_point + 1],
2320 2319 (contig_pfn_cnt - insert_point) * sizeof (pfn_t));
2321 2320 contig_pfn_list[insert_point] = pfn;
2322 2321 contig_pfn_cnt++;
2323 2322 }
2324 2323 done:
2325 2324 if (drop_lock)
2326 2325 mutex_exit(&contig_list_lock);
2327 2326 }
2328 2327
2329 2328 /*
2330 2329 * Called to (re-)populate the io_pool from the free page lists.
2331 2330 */
2332 2331 long
2333 2332 populate_io_pool(void)
2334 2333 {
2335 2334 pfn_t pfn;
2336 2335 mfn_t mfn, max_mfn;
2337 2336 page_t *pp;
2338 2337
2339 2338 /*
2340 2339 * Figure out the bounds of the pool on first invocation.
2341 2340 * We use a percentage of memory for the io pool size.
2342 2341 * we allow that to shrink, but not to less than a fixed minimum
2343 2342 */
2344 2343 if (io_pool_cnt_max == 0) {
2345 2344 io_pool_cnt_max = physmem / (100 / io_pool_physmem_pct);
2346 2345 io_pool_cnt_lowater = io_pool_cnt_max;
2347 2346 /*
2348 2347 * This is the first time in populate_io_pool, grab a va to use
2349 2348 * when we need to allocate pages.
2350 2349 */
2351 2350 io_pool_kva = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
2352 2351 }
2353 2352 /*
2354 2353 * If we are out of pages in the pool, then grow the size of the pool
2355 2354 */
2356 2355 if (io_pool_cnt == 0) {
2357 2356 /*
2358 2357 * Grow the max size of the io pool by 5%, but never more than
2359 2358 * 25% of physical memory.
2360 2359 */
2361 2360 if (io_pool_cnt_max < physmem / 4)
2362 2361 io_pool_cnt_max += io_pool_cnt_max / 20;
2363 2362 }
2364 2363 io_pool_grows++; /* should be a kstat? */
2365 2364
2366 2365 /*
2367 2366 * Get highest mfn on this platform, but limit to the 32 bit DMA max.
2368 2367 */
2369 2368 (void) mfn_to_pfn(start_mfn);
2370 2369 max_mfn = MIN(cached_max_mfn, PFN_4GIG);
2371 2370 for (mfn = start_mfn; mfn < max_mfn; start_mfn = ++mfn) {
2372 2371 pfn = mfn_to_pfn(mfn);
2373 2372 if (pfn & PFN_IS_FOREIGN_MFN)
2374 2373 continue;
2375 2374 /*
2376 2375 * try to allocate it from free pages
2377 2376 */
2378 2377 pp = page_numtopp_alloc(pfn);
2379 2378 if (pp == NULL)
2380 2379 continue;
2381 2380 PP_CLRFREE(pp);
2382 2381 add_page_to_pool(pp, 1);
2383 2382 if (io_pool_cnt >= io_pool_cnt_max)
2384 2383 break;
2385 2384 }
2386 2385
2387 2386 return (io_pool_cnt);
2388 2387 }
2389 2388
2390 2389 /*
2391 2390 * Destroy a page that was being used for DMA I/O. It may or
2392 2391 * may not actually go back to the io_pool.
2393 2392 */
2394 2393 void
2395 2394 page_destroy_io(page_t *pp)
2396 2395 {
2397 2396 mfn_t mfn = mfn_list[pp->p_pagenum];
2398 2397
2399 2398 /*
2400 2399 * When the page was alloc'd a reservation was made, release it now
2401 2400 */
2402 2401 page_unresv(1);
2403 2402 /*
2404 2403 * Unload translations, if any, then hash out the
2405 2404 * page to erase its identity.
2406 2405 */
2407 2406 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
2408 2407 page_hashout(pp, NULL);
2409 2408
2410 2409 /*
2411 2410 * If the page came from the free lists, just put it back to them.
2412 2411 * DomU pages always go on the free lists as well.
2413 2412 */
2414 2413 if (!DOMAIN_IS_INITDOMAIN(xen_info) || mfn >= PFN_4GIG) {
2415 2414 page_free(pp, 1);
2416 2415 return;
2417 2416 }
2418 2417
2419 2418 add_page_to_pool(pp, 0);
2420 2419 }
2421 2420
2422 2421
2423 2422 long contig_searches; /* count of times contig pages requested */
2424 2423 long contig_search_restarts; /* count of contig ranges tried */
2425 2424 long contig_search_failed; /* count of contig alloc failures */
2426 2425
2427 2426 /*
2428 2427 * Free partial page list
2429 2428 */
2430 2429 static void
2431 2430 free_partial_list(page_t **pplist)
2432 2431 {
2433 2432 page_t *pp;
2434 2433
2435 2434 while (*pplist != NULL) {
2436 2435 pp = *pplist;
2437 2436 page_io_pool_sub(pplist, pp, pp);
2438 2437 page_free(pp, 1);
2439 2438 }
2440 2439 }
2441 2440
2442 2441 /*
2443 2442 * Look thru the contiguous pfns that are not part of the io_pool for
2444 2443 * contiguous free pages. Return a list of the found pages or NULL.
2445 2444 */
2446 2445 page_t *
2447 2446 find_contig_free(uint_t npages, uint_t flags, uint64_t pfnseg,
2448 2447 pgcnt_t pfnalign)
2449 2448 {
2450 2449 page_t *pp, *plist = NULL;
2451 2450 mfn_t mfn, prev_mfn, start_mfn;
2452 2451 pfn_t pfn;
2453 2452 int pages_needed, pages_requested;
2454 2453 int search_start;
2455 2454
2456 2455 /*
2457 2456 * create the contig pfn list if not already done
2458 2457 */
2459 2458 retry:
2460 2459 mutex_enter(&contig_list_lock);
2461 2460 if (contig_pfn_list == NULL) {
2462 2461 mutex_exit(&contig_list_lock);
2463 2462 if (!create_contig_pfnlist(flags)) {
2464 2463 return (NULL);
2465 2464 }
2466 2465 goto retry;
2467 2466 }
2468 2467 contig_searches++;
2469 2468 /*
2470 2469 * Search contiguous pfn list for physically contiguous pages not in
2471 2470 * the io_pool. Start the search where the last search left off.
2472 2471 */
2473 2472 pages_requested = pages_needed = npages;
2474 2473 search_start = next_alloc_pfn;
2475 2474 start_mfn = prev_mfn = 0;
2476 2475 while (pages_needed) {
2477 2476 pfn = contig_pfn_list[next_alloc_pfn];
2478 2477 mfn = pfn_to_mfn(pfn);
2479 2478 /*
2480 2479 * Check if mfn is first one or contig to previous one and
2481 2480 * if page corresponding to mfn is free and that mfn
2482 2481 * range is not crossing a segment boundary.
2483 2482 */
2484 2483 if ((prev_mfn == 0 || mfn == prev_mfn + 1) &&
2485 2484 (pp = page_numtopp_alloc(pfn)) != NULL &&
2486 2485 !((mfn & pfnseg) < (start_mfn & pfnseg))) {
2487 2486 PP_CLRFREE(pp);
2488 2487 page_io_pool_add(&plist, pp);
2489 2488 pages_needed--;
2490 2489 if (prev_mfn == 0) {
2491 2490 if (pfnalign &&
2492 2491 mfn != P2ROUNDUP(mfn, pfnalign)) {
2493 2492 /*
2494 2493 * not properly aligned
2495 2494 */
2496 2495 contig_search_restarts++;
2497 2496 free_partial_list(&plist);
2498 2497 pages_needed = pages_requested;
2499 2498 start_mfn = prev_mfn = 0;
2500 2499 goto skip;
2501 2500 }
2502 2501 start_mfn = mfn;
2503 2502 }
2504 2503 prev_mfn = mfn;
2505 2504 } else {
2506 2505 contig_search_restarts++;
2507 2506 free_partial_list(&plist);
2508 2507 pages_needed = pages_requested;
2509 2508 start_mfn = prev_mfn = 0;
2510 2509 }
2511 2510 skip:
2512 2511 if (++next_alloc_pfn == contig_pfn_cnt)
2513 2512 next_alloc_pfn = 0;
2514 2513 if (next_alloc_pfn == search_start)
2515 2514 break; /* all pfns searched */
2516 2515 }
2517 2516 mutex_exit(&contig_list_lock);
2518 2517 if (pages_needed) {
2519 2518 contig_search_failed++;
2520 2519 /*
2521 2520 * Failed to find enough contig pages.
2522 2521 * free partial page list
2523 2522 */
2524 2523 free_partial_list(&plist);
2525 2524 }
2526 2525 return (plist);
2527 2526 }
2528 2527
2529 2528 /*
2530 2529 * Search the reserved io pool pages for a page range with the
2531 2530 * desired characteristics.
2532 2531 */
2533 2532 page_t *
2534 2533 page_io_pool_alloc(ddi_dma_attr_t *mattr, int contig, pgcnt_t minctg)
2535 2534 {
2536 2535 page_t *pp_first, *pp_last;
2537 2536 page_t *pp, **poolp;
2538 2537 pgcnt_t nwanted, pfnalign;
2539 2538 uint64_t pfnseg;
2540 2539 mfn_t mfn, tmfn, hi_mfn, lo_mfn;
2541 2540 int align, attempt = 0;
2542 2541
2543 2542 if (minctg == 1)
2544 2543 contig = 0;
2545 2544 lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2546 2545 hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2547 2546 pfnseg = mmu_btop(mattr->dma_attr_seg);
2548 2547 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2549 2548 if (align > MMU_PAGESIZE)
2550 2549 pfnalign = mmu_btop(align);
2551 2550 else
2552 2551 pfnalign = 0;
2553 2552
2554 2553 try_again:
2555 2554 /*
2556 2555 * See if we want pages for a legacy device
2557 2556 */
2558 2557 if (hi_mfn < PFN_16MEG)
2559 2558 poolp = &io_pool_16m;
2560 2559 else
2561 2560 poolp = &io_pool_4g;
2562 2561 try_smaller:
2563 2562 /*
2564 2563 * Take pages from I/O pool. We'll use pages from the highest
2565 2564 * MFN range possible.
2566 2565 */
2567 2566 pp_first = pp_last = NULL;
2568 2567 mutex_enter(&io_pool_lock);
2569 2568 nwanted = minctg;
2570 2569 for (pp = *poolp; pp && nwanted > 0; ) {
2571 2570 pp = pp->p_prev;
2572 2571
2573 2572 /*
2574 2573 * skip pages above allowable range
2575 2574 */
2576 2575 mfn = mfn_list[pp->p_pagenum];
2577 2576 if (hi_mfn < mfn)
2578 2577 goto skip;
2579 2578
2580 2579 /*
2581 2580 * stop at pages below allowable range
2582 2581 */
2583 2582 if (lo_mfn > mfn)
2584 2583 break;
2585 2584 restart:
2586 2585 if (pp_last == NULL) {
2587 2586 /*
2588 2587 * Check alignment
2589 2588 */
2590 2589 tmfn = mfn - (minctg - 1);
2591 2590 if (pfnalign && tmfn != P2ROUNDUP(tmfn, pfnalign))
2592 2591 goto skip; /* not properly aligned */
2593 2592 /*
2594 2593 * Check segment
2595 2594 */
2596 2595 if ((mfn & pfnseg) < (tmfn & pfnseg))
2597 2596 goto skip; /* crosses seg boundary */
2598 2597 /*
2599 2598 * Start building page list
2600 2599 */
2601 2600 pp_first = pp_last = pp;
2602 2601 nwanted--;
2603 2602 } else {
2604 2603 /*
2605 2604 * check physical contiguity if required
2606 2605 */
2607 2606 if (contig &&
2608 2607 mfn_list[pp_first->p_pagenum] != mfn + 1) {
2609 2608 /*
2610 2609 * not a contiguous page, restart list.
2611 2610 */
2612 2611 pp_last = NULL;
2613 2612 nwanted = minctg;
2614 2613 goto restart;
2615 2614 } else { /* add page to list */
2616 2615 pp_first = pp;
2617 2616 nwanted--;
2618 2617 }
2619 2618 }
2620 2619 skip:
2621 2620 if (pp == *poolp)
2622 2621 break;
2623 2622 }
2624 2623
2625 2624 /*
2626 2625 * If we didn't find memory. Try the more constrained pool, then
2627 2626 * sweep free pages into the DMA pool and try again.
2628 2627 */
2629 2628 if (nwanted != 0) {
2630 2629 mutex_exit(&io_pool_lock);
2631 2630 /*
2632 2631 * If we were looking in the less constrained pool and
2633 2632 * didn't find pages, try the more constrained pool.
2634 2633 */
2635 2634 if (poolp == &io_pool_4g) {
2636 2635 poolp = &io_pool_16m;
2637 2636 goto try_smaller;
2638 2637 }
2639 2638 kmem_reap();
2640 2639 if (++attempt < 4) {
2641 2640 /*
2642 2641 * Grab some more io_pool pages
2643 2642 */
2644 2643 (void) populate_io_pool();
2645 2644 goto try_again; /* go around and retry */
2646 2645 }
2647 2646 return (NULL);
2648 2647 }
2649 2648 /*
2650 2649 * Found the pages, now snip them from the list
2651 2650 */
2652 2651 page_io_pool_sub(poolp, pp_first, pp_last);
2653 2652 io_pool_cnt -= minctg;
2654 2653 /*
2655 2654 * reset low water mark
2656 2655 */
2657 2656 if (io_pool_cnt < io_pool_cnt_lowater)
2658 2657 io_pool_cnt_lowater = io_pool_cnt;
2659 2658 mutex_exit(&io_pool_lock);
2660 2659 return (pp_first);
2661 2660 }
2662 2661
2663 2662 page_t *
2664 2663 page_swap_with_hypervisor(struct vnode *vp, u_offset_t off, caddr_t vaddr,
2665 2664 ddi_dma_attr_t *mattr, uint_t flags, pgcnt_t minctg)
2666 2665 {
2667 2666 uint_t kflags;
2668 2667 int order, extra, extpages, i, contig, nbits, extents;
2669 2668 page_t *pp, *expp, *pp_first, **pplist = NULL;
2670 2669 mfn_t *mfnlist = NULL;
2671 2670
2672 2671 contig = flags & PG_PHYSCONTIG;
2673 2672 if (minctg == 1)
2674 2673 contig = 0;
2675 2674 flags &= ~PG_PHYSCONTIG;
2676 2675 kflags = flags & PG_WAIT ? KM_SLEEP : KM_NOSLEEP;
2677 2676 /*
2678 2677 * Hypervisor will allocate extents, if we want contig
2679 2678 * pages extent must be >= minctg
2680 2679 */
2681 2680 if (contig) {
2682 2681 order = highbit(minctg) - 1;
2683 2682 if (minctg & ((1 << order) - 1))
2684 2683 order++;
2685 2684 extpages = 1 << order;
2686 2685 } else {
2687 2686 order = 0;
2688 2687 extpages = minctg;
2689 2688 }
2690 2689 if (extpages > minctg) {
2691 2690 extra = extpages - minctg;
2692 2691 if (!page_resv(extra, kflags))
2693 2692 return (NULL);
2694 2693 }
2695 2694 pp_first = NULL;
2696 2695 pplist = kmem_alloc(extpages * sizeof (page_t *), kflags);
2697 2696 if (pplist == NULL)
2698 2697 goto balloon_fail;
2699 2698 mfnlist = kmem_alloc(extpages * sizeof (mfn_t), kflags);
2700 2699 if (mfnlist == NULL)
2701 2700 goto balloon_fail;
2702 2701 pp = page_create_va(vp, off, minctg * PAGESIZE, flags, &kvseg, vaddr);
2703 2702 if (pp == NULL)
2704 2703 goto balloon_fail;
2705 2704 pp_first = pp;
2706 2705 if (extpages > minctg) {
2707 2706 /*
2708 2707 * fill out the rest of extent pages to swap
2709 2708 * with the hypervisor
2710 2709 */
2711 2710 for (i = 0; i < extra; i++) {
2712 2711 expp = page_create_va(vp,
2713 2712 (u_offset_t)(uintptr_t)io_pool_kva,
2714 2713 PAGESIZE, flags, &kvseg, io_pool_kva);
2715 2714 if (expp == NULL)
2716 2715 goto balloon_fail;
2717 2716 (void) hat_pageunload(expp, HAT_FORCE_PGUNLOAD);
2718 2717 page_io_unlock(expp);
2719 2718 page_hashout(expp, NULL);
2720 2719 page_io_lock(expp);
2721 2720 /*
2722 2721 * add page to end of list
2723 2722 */
2724 2723 expp->p_prev = pp_first->p_prev;
2725 2724 expp->p_next = pp_first;
2726 2725 expp->p_prev->p_next = expp;
2727 2726 pp_first->p_prev = expp;
2728 2727 }
2729 2728
2730 2729 }
2731 2730 for (i = 0; i < extpages; i++) {
2732 2731 pplist[i] = pp;
2733 2732 pp = pp->p_next;
2734 2733 }
2735 2734 nbits = highbit(mattr->dma_attr_addr_hi);
2736 2735 extents = contig ? 1 : minctg;
2737 2736 if (balloon_replace_pages(extents, pplist, nbits, order,
2738 2737 mfnlist) != extents) {
2739 2738 if (ioalloc_dbg)
2740 2739 cmn_err(CE_NOTE, "request to hypervisor"
2741 2740 " for %d pages, maxaddr %" PRIx64 " failed",
2742 2741 extpages, mattr->dma_attr_addr_hi);
2743 2742 goto balloon_fail;
2744 2743 }
2745 2744
2746 2745 kmem_free(pplist, extpages * sizeof (page_t *));
2747 2746 kmem_free(mfnlist, extpages * sizeof (mfn_t));
2748 2747 /*
2749 2748 * Return any excess pages to free list
2750 2749 */
2751 2750 if (extpages > minctg) {
2752 2751 for (i = 0; i < extra; i++) {
2753 2752 pp = pp_first->p_prev;
2754 2753 page_sub(&pp_first, pp);
2755 2754 page_io_unlock(pp);
2756 2755 page_unresv(1);
2757 2756 page_free(pp, 1);
2758 2757 }
2759 2758 }
2760 2759 return (pp_first);
2761 2760 balloon_fail:
2762 2761 /*
2763 2762 * Return pages to free list and return failure
2764 2763 */
2765 2764 while (pp_first != NULL) {
2766 2765 pp = pp_first;
2767 2766 page_sub(&pp_first, pp);
2768 2767 page_io_unlock(pp);
2769 2768 if (pp->p_vnode != NULL)
2770 2769 page_hashout(pp, NULL);
2771 2770 page_free(pp, 1);
2772 2771 }
2773 2772 if (pplist)
2774 2773 kmem_free(pplist, extpages * sizeof (page_t *));
2775 2774 if (mfnlist)
2776 2775 kmem_free(mfnlist, extpages * sizeof (mfn_t));
2777 2776 page_unresv(extpages - minctg);
2778 2777 return (NULL);
2779 2778 }
2780 2779
2781 2780 static void
2782 2781 return_partial_alloc(page_t *plist)
2783 2782 {
2784 2783 page_t *pp;
2785 2784
2786 2785 while (plist != NULL) {
2787 2786 pp = plist;
2788 2787 page_sub(&plist, pp);
2789 2788 page_io_unlock(pp);
2790 2789 page_destroy_io(pp);
2791 2790 }
2792 2791 }
2793 2792
2794 2793 static page_t *
2795 2794 page_get_contigpages(
2796 2795 struct vnode *vp,
2797 2796 u_offset_t off,
2798 2797 int *npagesp,
2799 2798 uint_t flags,
2800 2799 caddr_t vaddr,
2801 2800 ddi_dma_attr_t *mattr)
2802 2801 {
2803 2802 mfn_t max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
2804 2803 page_t *plist; /* list to return */
2805 2804 page_t *pp, *mcpl;
2806 2805 int contig, anyaddr, npages, getone = 0;
2807 2806 mfn_t lo_mfn;
2808 2807 mfn_t hi_mfn;
2809 2808 pgcnt_t pfnalign = 0;
2810 2809 int align, sgllen;
2811 2810 uint64_t pfnseg;
2812 2811 pgcnt_t minctg;
2813 2812
2814 2813 npages = *npagesp;
2815 2814 ASSERT(mattr != NULL);
2816 2815 lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2817 2816 hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2818 2817 sgllen = mattr->dma_attr_sgllen;
2819 2818 pfnseg = mmu_btop(mattr->dma_attr_seg);
2820 2819 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2821 2820 if (align > MMU_PAGESIZE)
2822 2821 pfnalign = mmu_btop(align);
2823 2822
2824 2823 contig = flags & PG_PHYSCONTIG;
2825 2824 if (npages == -1) {
2826 2825 npages = 1;
2827 2826 pfnalign = 0;
2828 2827 }
2829 2828 /*
2830 2829 * Clear the contig flag if only one page is needed.
2831 2830 */
2832 2831 if (npages == 1) {
2833 2832 getone = 1;
2834 2833 contig = 0;
2835 2834 }
2836 2835
2837 2836 /*
2838 2837 * Check if any page in the system is fine.
2839 2838 */
2840 2839 anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn;
2841 2840 if (!contig && anyaddr && !pfnalign) {
2842 2841 flags &= ~PG_PHYSCONTIG;
2843 2842 plist = page_create_va(vp, off, npages * MMU_PAGESIZE,
2844 2843 flags, &kvseg, vaddr);
2845 2844 if (plist != NULL) {
2846 2845 *npagesp = 0;
2847 2846 return (plist);
2848 2847 }
2849 2848 }
2850 2849 plist = NULL;
2851 2850 minctg = howmany(npages, sgllen);
2852 2851 while (npages > sgllen || getone) {
2853 2852 if (minctg > npages)
2854 2853 minctg = npages;
2855 2854 mcpl = NULL;
2856 2855 /*
2857 2856 * We could want contig pages with no address range limits.
2858 2857 */
2859 2858 if (anyaddr && contig) {
2860 2859 /*
2861 2860 * Look for free contig pages to satisfy the request.
2862 2861 */
2863 2862 mcpl = find_contig_free(minctg, flags, pfnseg,
2864 2863 pfnalign);
2865 2864 }
2866 2865 /*
2867 2866 * Try the reserved io pools next
2868 2867 */
2869 2868 if (mcpl == NULL)
2870 2869 mcpl = page_io_pool_alloc(mattr, contig, minctg);
2871 2870 if (mcpl != NULL) {
2872 2871 pp = mcpl;
2873 2872 do {
2874 2873 if (!page_hashin(pp, vp, off, NULL)) {
2875 2874 panic("page_get_contigpages:"
2876 2875 " hashin failed"
2877 2876 " pp %p, vp %p, off %llx",
2878 2877 (void *)pp, (void *)vp, off);
2879 2878 }
2880 2879 off += MMU_PAGESIZE;
2881 2880 PP_CLRFREE(pp);
2882 2881 PP_CLRAGED(pp);
2883 2882 page_set_props(pp, P_REF);
2884 2883 page_io_lock(pp);
2885 2884 pp = pp->p_next;
2886 2885 } while (pp != mcpl);
2887 2886 } else {
2888 2887 /*
2889 2888 * Hypervisor exchange doesn't handle segment or
2890 2889 * alignment constraints
2891 2890 */
2892 2891 if (mattr->dma_attr_seg < mattr->dma_attr_addr_hi ||
2893 2892 pfnalign)
2894 2893 goto fail;
2895 2894 /*
2896 2895 * Try exchanging pages with the hypervisor
2897 2896 */
2898 2897 mcpl = page_swap_with_hypervisor(vp, off, vaddr, mattr,
2899 2898 flags, minctg);
2900 2899 if (mcpl == NULL)
2901 2900 goto fail;
2902 2901 off += minctg * MMU_PAGESIZE;
2903 2902 }
2904 2903 check_dma(mattr, mcpl, minctg);
2905 2904 /*
2906 2905 * Here with a minctg run of contiguous pages, add them to the
2907 2906 * list we will return for this request.
2908 2907 */
2909 2908 page_list_concat(&plist, &mcpl);
2910 2909 npages -= minctg;
2911 2910 *npagesp = npages;
2912 2911 sgllen--;
2913 2912 if (getone)
2914 2913 break;
2915 2914 }
2916 2915 return (plist);
2917 2916 fail:
2918 2917 return_partial_alloc(plist);
2919 2918 return (NULL);
2920 2919 }
2921 2920
2922 2921 /*
2923 2922 * Allocator for domain 0 I/O pages. We match the required
2924 2923 * DMA attributes and contiguity constraints.
2925 2924 */
2926 2925 /*ARGSUSED*/
2927 2926 page_t *
2928 2927 page_create_io(
2929 2928 struct vnode *vp,
2930 2929 u_offset_t off,
2931 2930 uint_t bytes,
2932 2931 uint_t flags,
2933 2932 struct as *as,
2934 2933 caddr_t vaddr,
2935 2934 ddi_dma_attr_t *mattr)
2936 2935 {
2937 2936 page_t *plist = NULL, *pp;
2938 2937 int npages = 0, contig, anyaddr, pages_req;
2939 2938 mfn_t lo_mfn;
2940 2939 mfn_t hi_mfn;
2941 2940 pgcnt_t pfnalign = 0;
2942 2941 int align;
2943 2942 int is_domu = 0;
2944 2943 int dummy, bytes_got;
2945 2944 mfn_t max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
2946 2945
2947 2946 ASSERT(mattr != NULL);
2948 2947 lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2949 2948 hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2950 2949 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2951 2950 if (align > MMU_PAGESIZE)
2952 2951 pfnalign = mmu_btop(align);
2953 2952
2954 2953 /*
2955 2954 * Clear the contig flag if only one page is needed or the scatter
2956 2955 * gather list length is >= npages.
2957 2956 */
2958 2957 pages_req = npages = mmu_btopr(bytes);
2959 2958 contig = (flags & PG_PHYSCONTIG);
2960 2959 bytes = P2ROUNDUP(bytes, MMU_PAGESIZE);
2961 2960 if (bytes == MMU_PAGESIZE || mattr->dma_attr_sgllen >= npages)
2962 2961 contig = 0;
2963 2962
2964 2963 /*
2965 2964 * Check if any old page in the system is fine.
2966 2965 * DomU should always go down this path.
2967 2966 */
2968 2967 is_domu = !DOMAIN_IS_INITDOMAIN(xen_info);
2969 2968 anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn && !pfnalign;
2970 2969 if ((!contig && anyaddr) || is_domu) {
2971 2970 flags &= ~PG_PHYSCONTIG;
2972 2971 plist = page_create_va(vp, off, bytes, flags, &kvseg, vaddr);
2973 2972 if (plist != NULL)
2974 2973 return (plist);
2975 2974 else if (is_domu)
2976 2975 return (NULL); /* no memory available */
2977 2976 }
2978 2977 /*
2979 2978 * DomU should never reach here
2980 2979 */
2981 2980 if (contig) {
2982 2981 plist = page_get_contigpages(vp, off, &npages, flags, vaddr,
2983 2982 mattr);
2984 2983 if (plist == NULL)
2985 2984 goto fail;
2986 2985 bytes_got = (pages_req - npages) << MMU_PAGESHIFT;
2987 2986 vaddr += bytes_got;
2988 2987 off += bytes_got;
2989 2988 /*
2990 2989 * We now have all the contiguous pages we need, but
2991 2990 * we may still need additional non-contiguous pages.
2992 2991 */
2993 2992 }
2994 2993 /*
2995 2994 * now loop collecting the requested number of pages, these do
2996 2995 * not have to be contiguous pages but we will use the contig
2997 2996 * page alloc code to get the pages since it will honor any
2998 2997 * other constraints the pages may have.
2999 2998 */
3000 2999 while (npages--) {
3001 3000 dummy = -1;
3002 3001 pp = page_get_contigpages(vp, off, &dummy, flags, vaddr, mattr);
3003 3002 if (pp == NULL)
3004 3003 goto fail;
3005 3004 page_add(&plist, pp);
3006 3005 vaddr += MMU_PAGESIZE;
3007 3006 off += MMU_PAGESIZE;
3008 3007 }
3009 3008 return (plist);
3010 3009 fail:
3011 3010 /*
3012 3011 * Failed to get enough pages, return ones we did get
3013 3012 */
3014 3013 return_partial_alloc(plist);
3015 3014 return (NULL);
3016 3015 }
3017 3016
3018 3017 /*
3019 3018 * Lock and return the page with the highest mfn that we can find. last_mfn
3020 3019 * holds the last one found, so the next search can start from there. We
3021 3020 * also keep a counter so that we don't loop forever if the machine has no
3022 3021 * free pages.
3023 3022 *
3024 3023 * This is called from the balloon thread to find pages to give away. new_high
3025 3024 * is used when new mfn's have been added to the system - we will reset our
3026 3025 * search if the new mfn's are higher than our current search position.
3027 3026 */
3028 3027 page_t *
3029 3028 page_get_high_mfn(mfn_t new_high)
3030 3029 {
3031 3030 static mfn_t last_mfn = 0;
3032 3031 pfn_t pfn;
3033 3032 page_t *pp;
3034 3033 ulong_t loop_count = 0;
3035 3034
3036 3035 if (new_high > last_mfn)
3037 3036 last_mfn = new_high;
3038 3037
3039 3038 for (; loop_count < mfn_count; loop_count++, last_mfn--) {
3040 3039 if (last_mfn == 0) {
3041 3040 last_mfn = cached_max_mfn;
3042 3041 }
3043 3042
3044 3043 pfn = mfn_to_pfn(last_mfn);
3045 3044 if (pfn & PFN_IS_FOREIGN_MFN)
3046 3045 continue;
3047 3046
3048 3047 /* See if the page is free. If so, lock it. */
3049 3048 pp = page_numtopp_alloc(pfn);
3050 3049 if (pp == NULL)
3051 3050 continue;
3052 3051 PP_CLRFREE(pp);
3053 3052
3054 3053 ASSERT(PAGE_EXCL(pp));
3055 3054 ASSERT(pp->p_vnode == NULL);
3056 3055 ASSERT(!hat_page_is_mapped(pp));
3057 3056 last_mfn--;
3058 3057 return (pp);
3059 3058 }
3060 3059 return (NULL);
3061 3060 }
3062 3061
3063 3062 #else /* !__xpv */
3064 3063
3065 3064 /*
3066 3065 * get a page from any list with the given mnode
3067 3066 */
3068 3067 static page_t *
3069 3068 page_get_mnode_anylist(ulong_t origbin, uchar_t szc, uint_t flags,
3070 3069 int mnode, int mtype, ddi_dma_attr_t *dma_attr)
3071 3070 {
3072 3071 kmutex_t *pcm;
3073 3072 int i;
3074 3073 page_t *pp;
3075 3074 page_t *first_pp;
3076 3075 uint64_t pgaddr;
3077 3076 ulong_t bin;
3078 3077 int mtypestart;
3079 3078 int plw_initialized;
3080 3079 page_list_walker_t plw;
3081 3080
3082 3081 VM_STAT_ADD(pga_vmstats.pgma_alloc);
3083 3082
3084 3083 ASSERT((flags & PG_MATCH_COLOR) == 0);
3085 3084 ASSERT(szc == 0);
3086 3085 ASSERT(dma_attr != NULL);
3087 3086
3088 3087 MTYPE_START(mnode, mtype, flags);
3089 3088 if (mtype < 0) {
3090 3089 VM_STAT_ADD(pga_vmstats.pgma_allocempty);
3091 3090 return (NULL);
3092 3091 }
3093 3092
3094 3093 mtypestart = mtype;
3095 3094
3096 3095 bin = origbin;
3097 3096
3098 3097 /*
3099 3098 * check up to page_colors + 1 bins - origbin may be checked twice
3100 3099 * because of BIN_STEP skip
3101 3100 */
3102 3101 do {
3103 3102 plw_initialized = 0;
3104 3103
3105 3104 for (plw.plw_count = 0;
↓ open down ↓ |
2081 lines elided |
↑ open up ↑ |
3106 3105 plw.plw_count < page_colors; plw.plw_count++) {
3107 3106
3108 3107 if (PAGE_FREELISTS(mnode, szc, bin, mtype) == NULL)
3109 3108 goto nextfreebin;
3110 3109
3111 3110 pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST);
3112 3111 mutex_enter(pcm);
3113 3112 pp = PAGE_FREELISTS(mnode, szc, bin, mtype);
3114 3113 first_pp = pp;
3115 3114 while (pp != NULL) {
3116 - if (IS_DUMP_PAGE(pp) || page_trylock(pp,
3117 - SE_EXCL) == 0) {
3115 + if (page_trylock(pp, SE_EXCL) == 0) {
3118 3116 pp = pp->p_next;
3119 3117 if (pp == first_pp) {
3120 3118 pp = NULL;
3121 3119 }
3122 3120 continue;
3123 3121 }
3124 3122
3125 3123 ASSERT(PP_ISFREE(pp));
3126 3124 ASSERT(PP_ISAGED(pp));
3127 3125 ASSERT(pp->p_vnode == NULL);
3128 3126 ASSERT(pp->p_hash == NULL);
3129 3127 ASSERT(pp->p_offset == (u_offset_t)-1);
3130 3128 ASSERT(pp->p_szc == szc);
3131 3129 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
3132 3130 /* check if page within DMA attributes */
3133 3131 pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
3134 3132 if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
3135 3133 (pgaddr + MMU_PAGESIZE - 1 <=
3136 3134 dma_attr->dma_attr_addr_hi)) {
3137 3135 break;
3138 3136 }
3139 3137
3140 3138 /* continue looking */
3141 3139 page_unlock(pp);
3142 3140 pp = pp->p_next;
3143 3141 if (pp == first_pp)
3144 3142 pp = NULL;
3145 3143
3146 3144 }
3147 3145 if (pp != NULL) {
3148 3146 ASSERT(mtype == PP_2_MTYPE(pp));
3149 3147 ASSERT(pp->p_szc == 0);
3150 3148
3151 3149 /* found a page with specified DMA attributes */
3152 3150 page_sub(&PAGE_FREELISTS(mnode, szc, bin,
3153 3151 mtype), pp);
3154 3152 page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST);
3155 3153
3156 3154 if ((PP_ISFREE(pp) == 0) ||
3157 3155 (PP_ISAGED(pp) == 0)) {
3158 3156 cmn_err(CE_PANIC, "page %p is not free",
3159 3157 (void *)pp);
3160 3158 }
3161 3159
3162 3160 mutex_exit(pcm);
3163 3161 check_dma(dma_attr, pp, 1);
3164 3162 VM_STAT_ADD(pga_vmstats.pgma_allocok);
3165 3163 return (pp);
3166 3164 }
3167 3165 mutex_exit(pcm);
3168 3166 nextfreebin:
3169 3167 if (plw_initialized == 0) {
3170 3168 page_list_walk_init(szc, 0, bin, 1, 0, &plw);
3171 3169 ASSERT(plw.plw_ceq_dif == page_colors);
3172 3170 plw_initialized = 1;
3173 3171 }
3174 3172
3175 3173 if (plw.plw_do_split) {
3176 3174 pp = page_freelist_split(szc, bin, mnode,
3177 3175 mtype,
3178 3176 mmu_btop(dma_attr->dma_attr_addr_lo),
3179 3177 mmu_btop(dma_attr->dma_attr_addr_hi + 1),
3180 3178 &plw);
3181 3179 if (pp != NULL) {
3182 3180 check_dma(dma_attr, pp, 1);
3183 3181 return (pp);
3184 3182 }
3185 3183 }
3186 3184
3187 3185 bin = page_list_walk_next_bin(szc, bin, &plw);
3188 3186 }
3189 3187
3190 3188 MTYPE_NEXT(mnode, mtype, flags);
3191 3189 } while (mtype >= 0);
3192 3190
3193 3191 /* failed to find a page in the freelist; try it in the cachelist */
3194 3192
3195 3193 /* reset mtype start for cachelist search */
3196 3194 mtype = mtypestart;
3197 3195 ASSERT(mtype >= 0);
3198 3196
3199 3197 /* start with the bin of matching color */
3200 3198 bin = origbin;
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
3201 3199
3202 3200 do {
3203 3201 for (i = 0; i <= page_colors; i++) {
3204 3202 if (PAGE_CACHELISTS(mnode, bin, mtype) == NULL)
3205 3203 goto nextcachebin;
3206 3204 pcm = PC_BIN_MUTEX(mnode, bin, PG_CACHE_LIST);
3207 3205 mutex_enter(pcm);
3208 3206 pp = PAGE_CACHELISTS(mnode, bin, mtype);
3209 3207 first_pp = pp;
3210 3208 while (pp != NULL) {
3211 - if (IS_DUMP_PAGE(pp) || page_trylock(pp,
3212 - SE_EXCL) == 0) {
3209 + if (page_trylock(pp, SE_EXCL) == 0) {
3213 3210 pp = pp->p_next;
3214 3211 if (pp == first_pp)
3215 3212 pp = NULL;
3216 3213 continue;
3217 3214 }
3218 3215 ASSERT(pp->p_vnode);
3219 3216 ASSERT(PP_ISAGED(pp) == 0);
3220 3217 ASSERT(pp->p_szc == 0);
3221 3218 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
3222 3219
3223 3220 /* check if page within DMA attributes */
3224 3221
3225 3222 pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
3226 3223 if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
3227 3224 (pgaddr + MMU_PAGESIZE - 1 <=
3228 3225 dma_attr->dma_attr_addr_hi)) {
3229 3226 break;
3230 3227 }
3231 3228
3232 3229 /* continue looking */
3233 3230 page_unlock(pp);
3234 3231 pp = pp->p_next;
3235 3232 if (pp == first_pp)
3236 3233 pp = NULL;
3237 3234 }
3238 3235
3239 3236 if (pp != NULL) {
3240 3237 ASSERT(mtype == PP_2_MTYPE(pp));
3241 3238 ASSERT(pp->p_szc == 0);
3242 3239
3243 3240 /* found a page with specified DMA attributes */
3244 3241 page_sub(&PAGE_CACHELISTS(mnode, bin,
3245 3242 mtype), pp);
3246 3243 page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST);
3247 3244
3248 3245 mutex_exit(pcm);
3249 3246 ASSERT(pp->p_vnode);
3250 3247 ASSERT(PP_ISAGED(pp) == 0);
3251 3248 check_dma(dma_attr, pp, 1);
3252 3249 VM_STAT_ADD(pga_vmstats.pgma_allocok);
3253 3250 return (pp);
3254 3251 }
3255 3252 mutex_exit(pcm);
3256 3253 nextcachebin:
3257 3254 bin += (i == 0) ? BIN_STEP : 1;
3258 3255 bin &= page_colors_mask;
3259 3256 }
3260 3257 MTYPE_NEXT(mnode, mtype, flags);
3261 3258 } while (mtype >= 0);
3262 3259
3263 3260 VM_STAT_ADD(pga_vmstats.pgma_allocfailed);
3264 3261 return (NULL);
3265 3262 }
3266 3263
3267 3264 /*
3268 3265 * This function is similar to page_get_freelist()/page_get_cachelist()
3269 3266 * but it searches both the lists to find a page with the specified
3270 3267 * color (or no color) and DMA attributes. The search is done in the
3271 3268 * freelist first and then in the cache list within the highest memory
3272 3269 * range (based on DMA attributes) before searching in the lower
3273 3270 * memory ranges.
3274 3271 *
3275 3272 * Note: This function is called only by page_create_io().
3276 3273 */
3277 3274 /*ARGSUSED*/
3278 3275 static page_t *
3279 3276 page_get_anylist(struct vnode *vp, u_offset_t off, struct as *as, caddr_t vaddr,
3280 3277 size_t size, uint_t flags, ddi_dma_attr_t *dma_attr, lgrp_t *lgrp)
3281 3278 {
3282 3279 uint_t bin;
3283 3280 int mtype;
3284 3281 page_t *pp;
3285 3282 int n;
3286 3283 int m;
3287 3284 int szc;
3288 3285 int fullrange;
3289 3286 int mnode;
3290 3287 int local_failed_stat = 0;
3291 3288 lgrp_mnode_cookie_t lgrp_cookie;
3292 3289
3293 3290 VM_STAT_ADD(pga_vmstats.pga_alloc);
3294 3291
3295 3292 /* only base pagesize currently supported */
3296 3293 if (size != MMU_PAGESIZE)
3297 3294 return (NULL);
3298 3295
3299 3296 /*
3300 3297 * If we're passed a specific lgroup, we use it. Otherwise,
3301 3298 * assume first-touch placement is desired.
3302 3299 */
3303 3300 if (!LGRP_EXISTS(lgrp))
3304 3301 lgrp = lgrp_home_lgrp();
3305 3302
3306 3303 /* LINTED */
3307 3304 AS_2_BIN(as, seg, vp, vaddr, bin, 0);
3308 3305
3309 3306 /*
3310 3307 * Only hold one freelist or cachelist lock at a time, that way we
3311 3308 * can start anywhere and not have to worry about lock
3312 3309 * ordering.
3313 3310 */
3314 3311 if (dma_attr == NULL) {
3315 3312 n = mtype16m;
3316 3313 m = mtypetop;
3317 3314 fullrange = 1;
3318 3315 VM_STAT_ADD(pga_vmstats.pga_nulldmaattr);
3319 3316 } else {
3320 3317 pfn_t pfnlo = mmu_btop(dma_attr->dma_attr_addr_lo);
3321 3318 pfn_t pfnhi = mmu_btop(dma_attr->dma_attr_addr_hi);
3322 3319
3323 3320 /*
3324 3321 * We can guarantee alignment only for page boundary.
3325 3322 */
3326 3323 if (dma_attr->dma_attr_align > MMU_PAGESIZE)
3327 3324 return (NULL);
3328 3325
3329 3326 /* Sanity check the dma_attr */
3330 3327 if (pfnlo > pfnhi)
3331 3328 return (NULL);
3332 3329
3333 3330 n = pfn_2_mtype(pfnlo);
3334 3331 m = pfn_2_mtype(pfnhi);
3335 3332
3336 3333 fullrange = ((pfnlo == mnoderanges[n].mnr_pfnlo) &&
3337 3334 (pfnhi >= mnoderanges[m].mnr_pfnhi));
3338 3335 }
3339 3336 VM_STAT_COND_ADD(fullrange == 0, pga_vmstats.pga_notfullrange);
3340 3337
3341 3338 szc = 0;
3342 3339
3343 3340 /* cylcing thru mtype handled by RANGE0 if n == mtype16m */
3344 3341 if (n == mtype16m) {
3345 3342 flags |= PGI_MT_RANGE0;
3346 3343 n = m;
3347 3344 }
3348 3345
3349 3346 /*
3350 3347 * Try local memory node first, but try remote if we can't
3351 3348 * get a page of the right color.
3352 3349 */
3353 3350 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_HIER);
3354 3351 while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) {
3355 3352 /*
3356 3353 * allocate pages from high pfn to low.
3357 3354 */
3358 3355 mtype = m;
3359 3356 do {
3360 3357 if (fullrange != 0) {
3361 3358 pp = page_get_mnode_freelist(mnode,
3362 3359 bin, mtype, szc, flags);
3363 3360 if (pp == NULL) {
3364 3361 pp = page_get_mnode_cachelist(
3365 3362 bin, flags, mnode, mtype);
3366 3363 }
3367 3364 } else {
3368 3365 pp = page_get_mnode_anylist(bin, szc,
3369 3366 flags, mnode, mtype, dma_attr);
3370 3367 }
3371 3368 if (pp != NULL) {
3372 3369 VM_STAT_ADD(pga_vmstats.pga_allocok);
3373 3370 check_dma(dma_attr, pp, 1);
3374 3371 return (pp);
3375 3372 }
3376 3373 } while (mtype != n &&
3377 3374 (mtype = mnoderanges[mtype].mnr_next) != -1);
3378 3375 if (!local_failed_stat) {
3379 3376 lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1);
3380 3377 local_failed_stat = 1;
3381 3378 }
3382 3379 }
3383 3380 VM_STAT_ADD(pga_vmstats.pga_allocfailed);
3384 3381
3385 3382 return (NULL);
3386 3383 }
3387 3384
3388 3385 /*
3389 3386 * page_create_io()
3390 3387 *
3391 3388 * This function is a copy of page_create_va() with an additional
3392 3389 * argument 'mattr' that specifies DMA memory requirements to
3393 3390 * the page list functions. This function is used by the segkmem
3394 3391 * allocator so it is only to create new pages (i.e PG_EXCL is
3395 3392 * set).
3396 3393 *
3397 3394 * Note: This interface is currently used by x86 PSM only and is
3398 3395 * not fully specified so the commitment level is only for
3399 3396 * private interface specific to x86. This interface uses PSM
3400 3397 * specific page_get_anylist() interface.
3401 3398 */
3402 3399
3403 3400 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \
3404 3401 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \
3405 3402 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
3406 3403 break; \
3407 3404 } \
3408 3405 }
3409 3406
3410 3407
3411 3408 page_t *
3412 3409 page_create_io(
3413 3410 struct vnode *vp,
3414 3411 u_offset_t off,
3415 3412 uint_t bytes,
3416 3413 uint_t flags,
3417 3414 struct as *as,
3418 3415 caddr_t vaddr,
3419 3416 ddi_dma_attr_t *mattr) /* DMA memory attributes if any */
3420 3417 {
3421 3418 page_t *plist = NULL;
3422 3419 uint_t plist_len = 0;
3423 3420 pgcnt_t npages;
3424 3421 page_t *npp = NULL;
3425 3422 uint_t pages_req;
3426 3423 page_t *pp;
3427 3424 kmutex_t *phm = NULL;
3428 3425 uint_t index;
3429 3426
3430 3427 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
3431 3428 "page_create_start:vp %p off %llx bytes %u flags %x",
3432 3429 vp, off, bytes, flags);
3433 3430
3434 3431 ASSERT((flags & ~(PG_EXCL | PG_WAIT | PG_PHYSCONTIG)) == 0);
3435 3432
3436 3433 pages_req = npages = mmu_btopr(bytes);
3437 3434
3438 3435 /*
3439 3436 * Do the freemem and pcf accounting.
3440 3437 */
3441 3438 if (!page_create_wait(npages, flags)) {
3442 3439 return (NULL);
3443 3440 }
3444 3441
3445 3442 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
3446 3443 "page_create_success:vp %p off %llx", vp, off);
3447 3444
3448 3445 /*
3449 3446 * If satisfying this request has left us with too little
3450 3447 * memory, start the wheels turning to get some back. The
3451 3448 * first clause of the test prevents waking up the pageout
3452 3449 * daemon in situations where it would decide that there's
3453 3450 * nothing to do.
3454 3451 */
3455 3452 if (nscan < desscan && freemem < minfree) {
3456 3453 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
3457 3454 "pageout_cv_signal:freemem %ld", freemem);
3458 3455 cv_signal(&proc_pageout->p_cv);
3459 3456 }
3460 3457
3461 3458 if (flags & PG_PHYSCONTIG) {
3462 3459
3463 3460 plist = page_get_contigpage(&npages, mattr, 1);
3464 3461 if (plist == NULL) {
3465 3462 page_create_putback(npages);
3466 3463 return (NULL);
3467 3464 }
3468 3465
3469 3466 pp = plist;
3470 3467
3471 3468 do {
3472 3469 if (!page_hashin(pp, vp, off, NULL)) {
3473 3470 panic("pg_creat_io: hashin failed %p %p %llx",
3474 3471 (void *)pp, (void *)vp, off);
3475 3472 }
3476 3473 VM_STAT_ADD(page_create_new);
3477 3474 off += MMU_PAGESIZE;
3478 3475 PP_CLRFREE(pp);
3479 3476 PP_CLRAGED(pp);
3480 3477 page_set_props(pp, P_REF);
3481 3478 pp = pp->p_next;
3482 3479 } while (pp != plist);
3483 3480
3484 3481 if (!npages) {
3485 3482 check_dma(mattr, plist, pages_req);
3486 3483 return (plist);
3487 3484 } else {
3488 3485 vaddr += (pages_req - npages) << MMU_PAGESHIFT;
3489 3486 }
3490 3487
3491 3488 /*
3492 3489 * fall-thru:
3493 3490 *
3494 3491 * page_get_contigpage returns when npages <= sgllen.
3495 3492 * Grab the rest of the non-contig pages below from anylist.
3496 3493 */
3497 3494 }
3498 3495
3499 3496 /*
3500 3497 * Loop around collecting the requested number of pages.
3501 3498 * Most of the time, we have to `create' a new page. With
3502 3499 * this in mind, pull the page off the free list before
3503 3500 * getting the hash lock. This will minimize the hash
3504 3501 * lock hold time, nesting, and the like. If it turns
3505 3502 * out we don't need the page, we put it back at the end.
3506 3503 */
3507 3504 while (npages--) {
3508 3505 phm = NULL;
3509 3506
3510 3507 index = PAGE_HASH_FUNC(vp, off);
3511 3508 top:
3512 3509 ASSERT(phm == NULL);
3513 3510 ASSERT(index == PAGE_HASH_FUNC(vp, off));
3514 3511 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3515 3512
3516 3513 if (npp == NULL) {
3517 3514 /*
3518 3515 * Try to get the page of any color either from
3519 3516 * the freelist or from the cache list.
3520 3517 */
3521 3518 npp = page_get_anylist(vp, off, as, vaddr, MMU_PAGESIZE,
3522 3519 flags & ~PG_MATCH_COLOR, mattr, NULL);
3523 3520 if (npp == NULL) {
3524 3521 if (mattr == NULL) {
3525 3522 /*
3526 3523 * Not looking for a special page;
3527 3524 * panic!
3528 3525 */
3529 3526 panic("no page found %d", (int)npages);
3530 3527 }
3531 3528 /*
3532 3529 * No page found! This can happen
3533 3530 * if we are looking for a page
3534 3531 * within a specific memory range
3535 3532 * for DMA purposes. If PG_WAIT is
3536 3533 * specified then we wait for a
3537 3534 * while and then try again. The
3538 3535 * wait could be forever if we
3539 3536 * don't get the page(s) we need.
3540 3537 *
3541 3538 * Note: XXX We really need a mechanism
3542 3539 * to wait for pages in the desired
3543 3540 * range. For now, we wait for any
3544 3541 * pages and see if we can use it.
3545 3542 */
3546 3543
3547 3544 if ((mattr != NULL) && (flags & PG_WAIT)) {
3548 3545 delay(10);
3549 3546 goto top;
3550 3547 }
3551 3548 goto fail; /* undo accounting stuff */
3552 3549 }
3553 3550
3554 3551 if (PP_ISAGED(npp) == 0) {
3555 3552 /*
3556 3553 * Since this page came from the
3557 3554 * cachelist, we must destroy the
3558 3555 * old vnode association.
3559 3556 */
3560 3557 page_hashout(npp, (kmutex_t *)NULL);
3561 3558 }
3562 3559 }
3563 3560
3564 3561 /*
3565 3562 * We own this page!
3566 3563 */
3567 3564 ASSERT(PAGE_EXCL(npp));
3568 3565 ASSERT(npp->p_vnode == NULL);
3569 3566 ASSERT(!hat_page_is_mapped(npp));
3570 3567 PP_CLRFREE(npp);
3571 3568 PP_CLRAGED(npp);
3572 3569
3573 3570 /*
3574 3571 * Here we have a page in our hot little mits and are
3575 3572 * just waiting to stuff it on the appropriate lists.
3576 3573 * Get the mutex and check to see if it really does
3577 3574 * not exist.
3578 3575 */
3579 3576 phm = PAGE_HASH_MUTEX(index);
3580 3577 mutex_enter(phm);
3581 3578 PAGE_HASH_SEARCH(index, pp, vp, off);
3582 3579 if (pp == NULL) {
3583 3580 VM_STAT_ADD(page_create_new);
3584 3581 pp = npp;
3585 3582 npp = NULL;
3586 3583 if (!page_hashin(pp, vp, off, phm)) {
3587 3584 /*
3588 3585 * Since we hold the page hash mutex and
3589 3586 * just searched for this page, page_hashin
3590 3587 * had better not fail. If it does, that
3591 3588 * means somethread did not follow the
3592 3589 * page hash mutex rules. Panic now and
3593 3590 * get it over with. As usual, go down
3594 3591 * holding all the locks.
3595 3592 */
3596 3593 ASSERT(MUTEX_HELD(phm));
3597 3594 panic("page_create: hashin fail %p %p %llx %p",
3598 3595 (void *)pp, (void *)vp, off, (void *)phm);
3599 3596
3600 3597 }
3601 3598 ASSERT(MUTEX_HELD(phm));
3602 3599 mutex_exit(phm);
3603 3600 phm = NULL;
3604 3601
3605 3602 /*
3606 3603 * Hat layer locking need not be done to set
3607 3604 * the following bits since the page is not hashed
3608 3605 * and was on the free list (i.e., had no mappings).
3609 3606 *
3610 3607 * Set the reference bit to protect
3611 3608 * against immediate pageout
3612 3609 *
3613 3610 * XXXmh modify freelist code to set reference
3614 3611 * bit so we don't have to do it here.
3615 3612 */
3616 3613 page_set_props(pp, P_REF);
3617 3614 } else {
3618 3615 ASSERT(MUTEX_HELD(phm));
3619 3616 mutex_exit(phm);
3620 3617 phm = NULL;
3621 3618 /*
3622 3619 * NOTE: This should not happen for pages associated
3623 3620 * with kernel vnode 'kvp'.
3624 3621 */
3625 3622 /* XX64 - to debug why this happens! */
3626 3623 ASSERT(!VN_ISKAS(vp));
3627 3624 if (VN_ISKAS(vp))
3628 3625 cmn_err(CE_NOTE,
3629 3626 "page_create: page not expected "
3630 3627 "in hash list for kernel vnode - pp 0x%p",
3631 3628 (void *)pp);
3632 3629 VM_STAT_ADD(page_create_exists);
3633 3630 goto fail;
3634 3631 }
3635 3632
3636 3633 /*
3637 3634 * Got a page! It is locked. Acquire the i/o
3638 3635 * lock since we are going to use the p_next and
3639 3636 * p_prev fields to link the requested pages together.
3640 3637 */
3641 3638 page_io_lock(pp);
3642 3639 page_add(&plist, pp);
3643 3640 plist = plist->p_next;
3644 3641 off += MMU_PAGESIZE;
3645 3642 vaddr += MMU_PAGESIZE;
3646 3643 }
3647 3644
3648 3645 check_dma(mattr, plist, pages_req);
3649 3646 return (plist);
3650 3647
3651 3648 fail:
3652 3649 if (npp != NULL) {
3653 3650 /*
3654 3651 * Did not need this page after all.
3655 3652 * Put it back on the free list.
3656 3653 */
3657 3654 VM_STAT_ADD(page_create_putbacks);
3658 3655 PP_SETFREE(npp);
3659 3656 PP_SETAGED(npp);
3660 3657 npp->p_offset = (u_offset_t)-1;
3661 3658 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
3662 3659 page_unlock(npp);
3663 3660 }
3664 3661
3665 3662 /*
3666 3663 * Give up the pages we already got.
3667 3664 */
3668 3665 while (plist != NULL) {
3669 3666 pp = plist;
3670 3667 page_sub(&plist, pp);
3671 3668 page_io_unlock(pp);
3672 3669 plist_len++;
3673 3670 /*LINTED: constant in conditional ctx*/
3674 3671 VN_DISPOSE(pp, B_INVAL, 0, kcred);
3675 3672 }
3676 3673
3677 3674 /*
3678 3675 * VN_DISPOSE does freemem accounting for the pages in plist
3679 3676 * by calling page_free. So, we need to undo the pcf accounting
3680 3677 * for only the remaining pages.
3681 3678 */
3682 3679 VM_STAT_ADD(page_create_putbacks);
3683 3680 page_create_putback(pages_req - plist_len);
3684 3681
3685 3682 return (NULL);
3686 3683 }
3687 3684 #endif /* !__xpv */
3688 3685
3689 3686
3690 3687 /*
3691 3688 * Copy the data from the physical page represented by "frompp" to
3692 3689 * that represented by "topp". ppcopy uses CPU->cpu_caddr1 and
3693 3690 * CPU->cpu_caddr2. It assumes that no one uses either map at interrupt
3694 3691 * level and no one sleeps with an active mapping there.
3695 3692 *
3696 3693 * Note that the ref/mod bits in the page_t's are not affected by
3697 3694 * this operation, hence it is up to the caller to update them appropriately.
3698 3695 */
3699 3696 int
3700 3697 ppcopy(page_t *frompp, page_t *topp)
3701 3698 {
3702 3699 caddr_t pp_addr1;
3703 3700 caddr_t pp_addr2;
3704 3701 hat_mempte_t pte1;
3705 3702 hat_mempte_t pte2;
3706 3703 kmutex_t *ppaddr_mutex;
3707 3704 label_t ljb;
3708 3705 int ret = 1;
3709 3706
3710 3707 ASSERT_STACK_ALIGNED();
3711 3708 ASSERT(PAGE_LOCKED(frompp));
3712 3709 ASSERT(PAGE_LOCKED(topp));
3713 3710
3714 3711 if (kpm_enable) {
3715 3712 pp_addr1 = hat_kpm_page2va(frompp, 0);
3716 3713 pp_addr2 = hat_kpm_page2va(topp, 0);
3717 3714 kpreempt_disable();
3718 3715 } else {
3719 3716 /*
3720 3717 * disable pre-emption so that CPU can't change
3721 3718 */
3722 3719 kpreempt_disable();
3723 3720
3724 3721 pp_addr1 = CPU->cpu_caddr1;
3725 3722 pp_addr2 = CPU->cpu_caddr2;
3726 3723 pte1 = CPU->cpu_caddr1pte;
3727 3724 pte2 = CPU->cpu_caddr2pte;
3728 3725
3729 3726 ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
3730 3727 mutex_enter(ppaddr_mutex);
3731 3728
3732 3729 hat_mempte_remap(page_pptonum(frompp), pp_addr1, pte1,
3733 3730 PROT_READ | HAT_STORECACHING_OK, HAT_LOAD_NOCONSIST);
3734 3731 hat_mempte_remap(page_pptonum(topp), pp_addr2, pte2,
3735 3732 PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
3736 3733 HAT_LOAD_NOCONSIST);
3737 3734 }
3738 3735
3739 3736 if (on_fault(&ljb)) {
3740 3737 ret = 0;
3741 3738 goto faulted;
3742 3739 }
3743 3740 if (use_sse_pagecopy)
3744 3741 #ifdef __xpv
3745 3742 page_copy_no_xmm(pp_addr2, pp_addr1);
3746 3743 #else
3747 3744 hwblkpagecopy(pp_addr1, pp_addr2);
3748 3745 #endif
3749 3746 else
3750 3747 bcopy(pp_addr1, pp_addr2, PAGESIZE);
3751 3748
3752 3749 no_fault();
3753 3750 faulted:
3754 3751 if (!kpm_enable) {
3755 3752 #ifdef __xpv
3756 3753 /*
3757 3754 * We can't leave unused mappings laying about under the
3758 3755 * hypervisor, so blow them away.
3759 3756 */
3760 3757 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr1, 0,
3761 3758 UVMF_INVLPG | UVMF_LOCAL) < 0)
3762 3759 panic("HYPERVISOR_update_va_mapping() failed");
3763 3760 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
3764 3761 UVMF_INVLPG | UVMF_LOCAL) < 0)
3765 3762 panic("HYPERVISOR_update_va_mapping() failed");
3766 3763 #endif
3767 3764 mutex_exit(ppaddr_mutex);
3768 3765 }
3769 3766 kpreempt_enable();
3770 3767 return (ret);
3771 3768 }
3772 3769
3773 3770 void
3774 3771 pagezero(page_t *pp, uint_t off, uint_t len)
3775 3772 {
3776 3773 ASSERT(PAGE_LOCKED(pp));
3777 3774 pfnzero(page_pptonum(pp), off, len);
3778 3775 }
3779 3776
3780 3777 /*
3781 3778 * Zero the physical page from off to off + len given by pfn
3782 3779 * without changing the reference and modified bits of page.
3783 3780 *
3784 3781 * We use this using CPU private page address #2, see ppcopy() for more info.
3785 3782 * pfnzero() must not be called at interrupt level.
3786 3783 */
3787 3784 void
3788 3785 pfnzero(pfn_t pfn, uint_t off, uint_t len)
3789 3786 {
3790 3787 caddr_t pp_addr2;
3791 3788 hat_mempte_t pte2;
3792 3789 kmutex_t *ppaddr_mutex = NULL;
3793 3790
3794 3791 ASSERT_STACK_ALIGNED();
3795 3792 ASSERT(len <= MMU_PAGESIZE);
3796 3793 ASSERT(off <= MMU_PAGESIZE);
3797 3794 ASSERT(off + len <= MMU_PAGESIZE);
3798 3795
3799 3796 if (kpm_enable && !pfn_is_foreign(pfn)) {
3800 3797 pp_addr2 = hat_kpm_pfn2va(pfn);
3801 3798 kpreempt_disable();
3802 3799 } else {
3803 3800 kpreempt_disable();
3804 3801
3805 3802 pp_addr2 = CPU->cpu_caddr2;
3806 3803 pte2 = CPU->cpu_caddr2pte;
3807 3804
3808 3805 ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
3809 3806 mutex_enter(ppaddr_mutex);
3810 3807
3811 3808 hat_mempte_remap(pfn, pp_addr2, pte2,
3812 3809 PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
3813 3810 HAT_LOAD_NOCONSIST);
3814 3811 }
3815 3812
3816 3813 if (use_sse_pagezero) {
3817 3814 #ifdef __xpv
3818 3815 uint_t rem;
3819 3816
3820 3817 /*
3821 3818 * zero a byte at a time until properly aligned for
3822 3819 * block_zero_no_xmm().
3823 3820 */
3824 3821 while (!P2NPHASE(off, ((uint_t)BLOCKZEROALIGN)) && len-- > 0)
3825 3822 pp_addr2[off++] = 0;
3826 3823
3827 3824 /*
3828 3825 * Now use faster block_zero_no_xmm() for any range
3829 3826 * that is properly aligned and sized.
3830 3827 */
3831 3828 rem = P2PHASE(len, ((uint_t)BLOCKZEROALIGN));
3832 3829 len -= rem;
3833 3830 if (len != 0) {
3834 3831 block_zero_no_xmm(pp_addr2 + off, len);
3835 3832 off += len;
3836 3833 }
3837 3834
3838 3835 /*
3839 3836 * zero remainder with byte stores.
3840 3837 */
3841 3838 while (rem-- > 0)
3842 3839 pp_addr2[off++] = 0;
3843 3840 #else
3844 3841 hwblkclr(pp_addr2 + off, len);
3845 3842 #endif
3846 3843 } else {
3847 3844 bzero(pp_addr2 + off, len);
3848 3845 }
3849 3846
3850 3847 if (!kpm_enable || pfn_is_foreign(pfn)) {
3851 3848 #ifdef __xpv
3852 3849 /*
3853 3850 * On the hypervisor this page might get used for a page
3854 3851 * table before any intervening change to this mapping,
3855 3852 * so blow it away.
3856 3853 */
3857 3854 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
3858 3855 UVMF_INVLPG) < 0)
3859 3856 panic("HYPERVISOR_update_va_mapping() failed");
3860 3857 #endif
3861 3858 mutex_exit(ppaddr_mutex);
3862 3859 }
3863 3860
3864 3861 kpreempt_enable();
3865 3862 }
3866 3863
3867 3864 /*
3868 3865 * Platform-dependent page scrub call.
3869 3866 */
3870 3867 void
3871 3868 pagescrub(page_t *pp, uint_t off, uint_t len)
3872 3869 {
3873 3870 /*
3874 3871 * For now, we rely on the fact that pagezero() will
3875 3872 * always clear UEs.
3876 3873 */
3877 3874 pagezero(pp, off, len);
3878 3875 }
3879 3876
3880 3877 /*
3881 3878 * set up two private addresses for use on a given CPU for use in ppcopy()
3882 3879 */
3883 3880 void
3884 3881 setup_vaddr_for_ppcopy(struct cpu *cpup)
3885 3882 {
3886 3883 void *addr;
3887 3884 hat_mempte_t pte_pa;
3888 3885
3889 3886 addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
3890 3887 pte_pa = hat_mempte_setup(addr);
3891 3888 cpup->cpu_caddr1 = addr;
3892 3889 cpup->cpu_caddr1pte = pte_pa;
3893 3890
3894 3891 addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
3895 3892 pte_pa = hat_mempte_setup(addr);
3896 3893 cpup->cpu_caddr2 = addr;
3897 3894 cpup->cpu_caddr2pte = pte_pa;
3898 3895
3899 3896 mutex_init(&cpup->cpu_ppaddr_mutex, NULL, MUTEX_DEFAULT, NULL);
3900 3897 }
3901 3898
3902 3899 /*
3903 3900 * Undo setup_vaddr_for_ppcopy
3904 3901 */
3905 3902 void
3906 3903 teardown_vaddr_for_ppcopy(struct cpu *cpup)
3907 3904 {
3908 3905 mutex_destroy(&cpup->cpu_ppaddr_mutex);
3909 3906
3910 3907 hat_mempte_release(cpup->cpu_caddr2, cpup->cpu_caddr2pte);
3911 3908 cpup->cpu_caddr2pte = 0;
3912 3909 vmem_free(heap_arena, cpup->cpu_caddr2, mmu_ptob(1));
3913 3910 cpup->cpu_caddr2 = 0;
3914 3911
3915 3912 hat_mempte_release(cpup->cpu_caddr1, cpup->cpu_caddr1pte);
3916 3913 cpup->cpu_caddr1pte = 0;
3917 3914 vmem_free(heap_arena, cpup->cpu_caddr1, mmu_ptob(1));
3918 3915 cpup->cpu_caddr1 = 0;
3919 3916 }
3920 3917
3921 3918 /*
3922 3919 * Function for flushing D-cache when performing module relocations
3923 3920 * to an alternate mapping. Unnecessary on Intel / AMD platforms.
3924 3921 */
3925 3922 void
3926 3923 dcache_flushall()
3927 3924 {}
3928 3925
3929 3926 size_t
3930 3927 exec_get_spslew(void)
3931 3928 {
3932 3929 return (0);
3933 3930 }
3934 3931
3935 3932 /*
3936 3933 * Allocate a memory page. The argument 'seed' can be any pseudo-random
3937 3934 * number to vary where the pages come from. This is quite a hacked up
3938 3935 * method -- it works for now, but really needs to be fixed up a bit.
3939 3936 *
3940 3937 * We currently use page_create_va() on the kvp with fake offsets,
3941 3938 * segments and virt address. This is pretty bogus, but was copied from the
3942 3939 * old hat_i86.c code. A better approach would be to specify either mnode
3943 3940 * random or mnode local and takes a page from whatever color has the MOST
3944 3941 * available - this would have a minimal impact on page coloring.
3945 3942 */
3946 3943 page_t *
3947 3944 page_get_physical(uintptr_t seed)
3948 3945 {
3949 3946 page_t *pp;
3950 3947 u_offset_t offset;
3951 3948 static struct seg tmpseg;
3952 3949 static uintptr_t ctr = 0;
3953 3950
3954 3951 /*
3955 3952 * This code is gross, we really need a simpler page allocator.
3956 3953 *
3957 3954 * We need to assign an offset for the page to call page_create_va()
3958 3955 * To avoid conflicts with other pages, we get creative with the offset.
3959 3956 * For 32 bits, we need an offset > 4Gig
3960 3957 * For 64 bits, need an offset somewhere in the VA hole.
3961 3958 */
3962 3959 offset = seed;
3963 3960 if (offset > kernelbase)
3964 3961 offset -= kernelbase;
3965 3962 offset <<= MMU_PAGESHIFT;
3966 3963 #if defined(__amd64)
3967 3964 offset += mmu.hole_start; /* something in VA hole */
3968 3965 #else
3969 3966 offset += 1ULL << 40; /* something > 4 Gig */
3970 3967 #endif
3971 3968
3972 3969 if (page_resv(1, KM_NOSLEEP) == 0)
3973 3970 return (NULL);
3974 3971
3975 3972 #ifdef DEBUG
3976 3973 pp = page_exists(&kvp, offset);
3977 3974 if (pp != NULL)
3978 3975 panic("page already exists %p", (void *)pp);
3979 3976 #endif
3980 3977
3981 3978 pp = page_create_va(&kvp, offset, MMU_PAGESIZE, PG_EXCL,
3982 3979 &tmpseg, (caddr_t)(ctr += MMU_PAGESIZE)); /* changing VA usage */
3983 3980 if (pp != NULL) {
3984 3981 page_io_unlock(pp);
3985 3982 page_downgrade(pp);
3986 3983 }
3987 3984 return (pp);
3988 3985 }
↓ open down ↓ |
766 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX