9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #ifndef _VM_HAT_PTE_H
27 #define _VM_HAT_PTE_H
28
29 #pragma ident "%Z%%M% %I% %E% SMI"
30
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34
35 #include <sys/types.h>
36 #include <sys/mach_mmu.h>
37
38 /*
39 * macros to get/set/clear the PTE fields
40 */
41 #define PTE_SET(p, f) ((p) |= (f))
42 #define PTE_CLR(p, f) ((p) &= ~(x86pte_t)(f))
43 #define PTE_GET(p, f) ((p) & (f))
44
45 /*
46 * Handy macro to check if a pagetable entry or pointer is valid
47 */
48 #define PTE_ISVALID(p) PTE_GET(p, PT_VALID)
49
50 /*
218
219 /*
220 * Macros to access the HAT's private page windows. They're used for
221 * accessing pagetables, ppcopy() and page_zero().
222 * The 1st two macros are used to get an index for the particular use.
223 * The next three give you:
224 * - the virtual address of the window
225 * - the virtual address of the pte that maps the window
226 * - the physical address of the pte that map the window
227 */
228 #define PWIN_TABLE(cpuid) ((cpuid) * 2)
229 #define PWIN_SRC(cpuid) ((cpuid) * 2 + 1) /* for x86pte_copy() */
230 #define PWIN_VA(x) (mmu.pwin_base + ((x) << MMU_PAGESHIFT))
231 #define PWIN_PTE_VA(x) (mmu.pwin_pte_va + ((x) << mmu.pte_size_shift))
232 #define PWIN_PTE_PA(x) (mmu.pwin_pte_pa + ((x) << mmu.pte_size_shift))
233
234 /*
235 * The concept of a VA hole exists in AMD64. This might need to be made
236 * model specific eventually.
237 *
238 * In the 64 bit kernel PTE loads are atomic, but need cas64 on 32 bit kernel.
239 */
240 #if defined(__amd64)
241
242 #ifdef lint
243 #define IN_VA_HOLE(va) (__lintzero)
244 #else
245 #define IN_VA_HOLE(va) (mmu.hole_start <= (va) && (va) < mmu.hole_end)
246 #endif
247
248 #define FMT_PTE "0x%lx"
249 #define GET_PTE(ptr) (*(x86pte_t *)(ptr))
250 #define SET_PTE(ptr, pte) (*(x86pte_t *)(ptr) = pte)
251 #define CAS_PTE(ptr, x, y) cas64(ptr, x, y)
252
253 #elif defined(__i386)
254
255 #define IN_VA_HOLE(va) (__lintzero)
256
257 #define FMT_PTE "0x%llx"
258
259 /* on 32 bit kernels, 64 bit loads aren't atomic, use get_pte64() */
260 extern x86pte_t get_pte64(x86pte_t *ptr);
261 #define GET_PTE(ptr) (mmu.pae_hat ? get_pte64(ptr) : *(x86pte32_t *)(ptr))
262 #define SET_PTE(ptr, pte) \
263 ((mmu.pae_hat ? ((x86pte32_t *)(ptr))[1] = (pte >> 32) : 0), \
264 *(x86pte32_t *)(ptr) = pte)
265 #define CAS_PTE(ptr, x, y) \
266 (mmu.pae_hat ? cas64(ptr, x, y) : \
267 cas32((uint32_t *)(ptr), (uint32_t)(x), (uint32_t)(y)))
268
269 #endif /* __i386 */
270
271 /*
272 * Return a pointer to the pte entry at the given index within a page table.
273 */
274 #define PT_INDEX_PTR(p, x) \
275 ((x86pte_t *)((uintptr_t)(p) + ((x) << mmu.pte_size_shift)))
276
277 /*
278 * Return the physical address of the pte entry at the given index within a
279 * page table.
280 */
281 #define PT_INDEX_PHYSADDR(p, x) \
282 ((paddr_t)(p) + ((x) << mmu.pte_size_shift))
283
284 /*
285 * From pfn to bytes, careful not to lose bits on PAE.
286 */
287 #define pfn_to_pa(pfn) (mmu_ptob((paddr_t)(pfn)))
|
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #ifndef _VM_HAT_PTE_H
27 #define _VM_HAT_PTE_H
28
29 #ifdef __cplusplus
30 extern "C" {
31 #endif
32
33 #include <sys/types.h>
34 #include <sys/mach_mmu.h>
35
36 /*
37 * macros to get/set/clear the PTE fields
38 */
39 #define PTE_SET(p, f) ((p) |= (f))
40 #define PTE_CLR(p, f) ((p) &= ~(x86pte_t)(f))
41 #define PTE_GET(p, f) ((p) & (f))
42
43 /*
44 * Handy macro to check if a pagetable entry or pointer is valid
45 */
46 #define PTE_ISVALID(p) PTE_GET(p, PT_VALID)
47
48 /*
216
217 /*
218 * Macros to access the HAT's private page windows. They're used for
219 * accessing pagetables, ppcopy() and page_zero().
220 * The 1st two macros are used to get an index for the particular use.
221 * The next three give you:
222 * - the virtual address of the window
223 * - the virtual address of the pte that maps the window
224 * - the physical address of the pte that map the window
225 */
226 #define PWIN_TABLE(cpuid) ((cpuid) * 2)
227 #define PWIN_SRC(cpuid) ((cpuid) * 2 + 1) /* for x86pte_copy() */
228 #define PWIN_VA(x) (mmu.pwin_base + ((x) << MMU_PAGESHIFT))
229 #define PWIN_PTE_VA(x) (mmu.pwin_pte_va + ((x) << mmu.pte_size_shift))
230 #define PWIN_PTE_PA(x) (mmu.pwin_pte_pa + ((x) << mmu.pte_size_shift))
231
232 /*
233 * The concept of a VA hole exists in AMD64. This might need to be made
234 * model specific eventually.
235 *
236 * In the 64 bit kernel PTE loads are atomic, but need atomic_cas_64 on 32
237 * bit kernel.
238 */
239 #if defined(__amd64)
240
241 #ifdef lint
242 #define IN_VA_HOLE(va) (__lintzero)
243 #else
244 #define IN_VA_HOLE(va) (mmu.hole_start <= (va) && (va) < mmu.hole_end)
245 #endif
246
247 #define FMT_PTE "0x%lx"
248 #define GET_PTE(ptr) (*(x86pte_t *)(ptr))
249 #define SET_PTE(ptr, pte) (*(x86pte_t *)(ptr) = pte)
250 #define CAS_PTE(ptr, x, y) atomic_cas_64(ptr, x, y)
251
252 #elif defined(__i386)
253
254 #define IN_VA_HOLE(va) (__lintzero)
255
256 #define FMT_PTE "0x%llx"
257
258 /* on 32 bit kernels, 64 bit loads aren't atomic, use get_pte64() */
259 extern x86pte_t get_pte64(x86pte_t *ptr);
260 #define GET_PTE(ptr) (mmu.pae_hat ? get_pte64(ptr) : *(x86pte32_t *)(ptr))
261 #define SET_PTE(ptr, pte) \
262 ((mmu.pae_hat ? ((x86pte32_t *)(ptr))[1] = (pte >> 32) : 0), \
263 *(x86pte32_t *)(ptr) = pte)
264 #define CAS_PTE(ptr, x, y) \
265 (mmu.pae_hat ? atomic_cas_64(ptr, x, y) : \
266 atomic_cas_32((uint32_t *)(ptr), (uint32_t)(x), (uint32_t)(y)))
267
268 #endif /* __i386 */
269
270 /*
271 * Return a pointer to the pte entry at the given index within a page table.
272 */
273 #define PT_INDEX_PTR(p, x) \
274 ((x86pte_t *)((uintptr_t)(p) + ((x) << mmu.pte_size_shift)))
275
276 /*
277 * Return the physical address of the pte entry at the given index within a
278 * page table.
279 */
280 #define PT_INDEX_PHYSADDR(p, x) \
281 ((paddr_t)(p) + ((x) << mmu.pte_size_shift))
282
283 /*
284 * From pfn to bytes, careful not to lose bits on PAE.
285 */
286 #define pfn_to_pa(pfn) (mmu_ptob((paddr_t)(pfn)))
|