203
204 /* Page status bits for segop_incore */
205 #define SEG_PAGE_INCORE 0x01 /* VA has a page backing it */
206 #define SEG_PAGE_LOCKED 0x02 /* VA has a page that is locked */
207 #define SEG_PAGE_HASCOW 0x04 /* VA has a page with a copy-on-write */
208 #define SEG_PAGE_SOFTLOCK 0x08 /* VA has a page with softlock held */
209 #define SEG_PAGE_VNODEBACKED 0x10 /* Segment is backed by a vnode */
210 #define SEG_PAGE_ANON 0x20 /* VA has an anonymous page */
211 #define SEG_PAGE_VNODE 0x40 /* VA has a vnode page backing it */
212
213 #define seg_page(seg, addr) \
214 (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
215
216 #define seg_pages(seg) \
217 (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
218
219 #define IE_NOMEM -1 /* internal to seg layer */
220 #define IE_RETRY -2 /* internal to seg layer */
221 #define IE_REATTACH -3 /* internal to seg layer */
222
223 /* Values for SEGOP_INHERIT */
224 #define SEGP_INH_ZERO 0x01
225
226 int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t);
227
228 /* Delay/retry factors for seg_p_mem_config_pre_del */
229 #define SEGP_PREDEL_DELAY_FACTOR 4
230 /*
231 * As a workaround to being unable to purge the pagelock
232 * cache during a DR delete memory operation, we use
233 * a stall threshold that is twice the maximum seen
234 * during testing. This workaround will be removed
235 * when a suitable fix is found.
236 */
237 #define SEGP_STALL_SECONDS 25
238 #define SEGP_STALL_THRESHOLD \
239 (SEGP_STALL_SECONDS * SEGP_PREDEL_DELAY_FACTOR)
240
241 #ifdef VMDEBUG
242
243 uint_t seg_page(struct seg *, caddr_t);
|
203
204 /* Page status bits for segop_incore */
205 #define SEG_PAGE_INCORE 0x01 /* VA has a page backing it */
206 #define SEG_PAGE_LOCKED 0x02 /* VA has a page that is locked */
207 #define SEG_PAGE_HASCOW 0x04 /* VA has a page with a copy-on-write */
208 #define SEG_PAGE_SOFTLOCK 0x08 /* VA has a page with softlock held */
209 #define SEG_PAGE_VNODEBACKED 0x10 /* Segment is backed by a vnode */
210 #define SEG_PAGE_ANON 0x20 /* VA has an anonymous page */
211 #define SEG_PAGE_VNODE 0x40 /* VA has a vnode page backing it */
212
213 #define seg_page(seg, addr) \
214 (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
215
216 #define seg_pages(seg) \
217 (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
218
219 #define IE_NOMEM -1 /* internal to seg layer */
220 #define IE_RETRY -2 /* internal to seg layer */
221 #define IE_REATTACH -3 /* internal to seg layer */
222
223 /* Values for segop_inherit */
224 #define SEGP_INH_ZERO 0x01
225
226 int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t);
227
228 /* Delay/retry factors for seg_p_mem_config_pre_del */
229 #define SEGP_PREDEL_DELAY_FACTOR 4
230 /*
231 * As a workaround to being unable to purge the pagelock
232 * cache during a DR delete memory operation, we use
233 * a stall threshold that is twice the maximum seen
234 * during testing. This workaround will be removed
235 * when a suitable fix is found.
236 */
237 #define SEGP_STALL_SECONDS 25
238 #define SEGP_STALL_THRESHOLD \
239 (SEGP_STALL_SECONDS * SEGP_PREDEL_DELAY_FACTOR)
240
241 #ifdef VMDEBUG
242
243 uint_t seg_page(struct seg *, caddr_t);
|