Print this page
6146 seg_inherit_notsup is redundant


 206 #define SEG_PAGE_LOCKED         0x02    /* VA has a page that is locked */
 207 #define SEG_PAGE_HASCOW         0x04    /* VA has a page with a copy-on-write */
 208 #define SEG_PAGE_SOFTLOCK       0x08    /* VA has a page with softlock held */
 209 #define SEG_PAGE_VNODEBACKED    0x10    /* Segment is backed by a vnode */
 210 #define SEG_PAGE_ANON           0x20    /* VA has an anonymous page */
 211 #define SEG_PAGE_VNODE          0x40    /* VA has a vnode page backing it */
 212 
 213 #define seg_page(seg, addr) \
 214         (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
 215 
 216 #define seg_pages(seg) \
 217         (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
 218 
 219 #define IE_NOMEM        -1      /* internal to seg layer */
 220 #define IE_RETRY        -2      /* internal to seg layer */
 221 #define IE_REATTACH     -3      /* internal to seg layer */
 222 
 223 /* Values for segop_inherit */
 224 #define SEGP_INH_ZERO   0x01
 225 
 226 int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t);
 227 
 228 /* Delay/retry factors for seg_p_mem_config_pre_del */
 229 #define SEGP_PREDEL_DELAY_FACTOR        4
 230 /*
 231  * As a workaround to being unable to purge the pagelock
 232  * cache during a DR delete memory operation, we use
 233  * a stall threshold that is twice the maximum seen
 234  * during testing.  This workaround will be removed
 235  * when a suitable fix is found.
 236  */
 237 #define SEGP_STALL_SECONDS      25
 238 #define SEGP_STALL_THRESHOLD \
 239         (SEGP_STALL_SECONDS * SEGP_PREDEL_DELAY_FACTOR)
 240 
 241 #ifdef VMDEBUG
 242 
 243 uint_t  seg_page(struct seg *, caddr_t);
 244 uint_t  seg_pages(struct seg *);
 245 
 246 #endif  /* VMDEBUG */
 247 




 206 #define SEG_PAGE_LOCKED         0x02    /* VA has a page that is locked */
 207 #define SEG_PAGE_HASCOW         0x04    /* VA has a page with a copy-on-write */
 208 #define SEG_PAGE_SOFTLOCK       0x08    /* VA has a page with softlock held */
 209 #define SEG_PAGE_VNODEBACKED    0x10    /* Segment is backed by a vnode */
 210 #define SEG_PAGE_ANON           0x20    /* VA has an anonymous page */
 211 #define SEG_PAGE_VNODE          0x40    /* VA has a vnode page backing it */
 212 
 213 #define seg_page(seg, addr) \
 214         (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
 215 
 216 #define seg_pages(seg) \
 217         (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
 218 
 219 #define IE_NOMEM        -1      /* internal to seg layer */
 220 #define IE_RETRY        -2      /* internal to seg layer */
 221 #define IE_REATTACH     -3      /* internal to seg layer */
 222 
 223 /* Values for segop_inherit */
 224 #define SEGP_INH_ZERO   0x01
 225 


 226 /* Delay/retry factors for seg_p_mem_config_pre_del */
 227 #define SEGP_PREDEL_DELAY_FACTOR        4
 228 /*
 229  * As a workaround to being unable to purge the pagelock
 230  * cache during a DR delete memory operation, we use
 231  * a stall threshold that is twice the maximum seen
 232  * during testing.  This workaround will be removed
 233  * when a suitable fix is found.
 234  */
 235 #define SEGP_STALL_SECONDS      25
 236 #define SEGP_STALL_THRESHOLD \
 237         (SEGP_STALL_SECONDS * SEGP_PREDEL_DELAY_FACTOR)
 238 
 239 #ifdef VMDEBUG
 240 
 241 uint_t  seg_page(struct seg *, caddr_t);
 242 uint_t  seg_pages(struct seg *);
 243 
 244 #endif  /* VMDEBUG */
 245