Print this page
6144 use C99 initializers in segment ops structures


 129  * with less than RED_DEEP_THRESHOLD bytes available on the stack,
 130  * then the stack situation has become quite serious;  if much more stack
 131  * is consumed, we have the potential of scrogging the next thread/LWP
 132  * structure.  To help debug the "can't happen" panics which may
 133  * result from this condition, we record hrestime and the calling thread
 134  * in red_deep_hires and red_deep_thread respectively.
 135  */
 136 #define RED_DEEP_THRESHOLD      2000
 137 
 138 hrtime_t        red_deep_hires;
 139 kthread_t       *red_deep_thread;
 140 
 141 uint32_t        red_nmapped;
 142 uint32_t        red_closest = UINT_MAX;
 143 uint32_t        red_ndoubles;
 144 
 145 pgcnt_t anon_segkp_pages_locked;        /* See vm/anon.h */
 146 pgcnt_t anon_segkp_pages_resv;          /* anon reserved by seg_kp */
 147 
 148 static struct   seg_ops segkp_ops = {
 149         SEGKP_BADOP(int),               /* dup */
 150         SEGKP_BADOP(int),               /* unmap */
 151         SEGKP_BADOP(void),              /* free */
 152         segkp_fault,
 153         SEGKP_BADOP(faultcode_t),       /* faulta */
 154         SEGKP_BADOP(int),               /* setprot */
 155         segkp_checkprot,
 156         segkp_kluster,
 157         SEGKP_BADOP(size_t),            /* swapout */
 158         SEGKP_BADOP(int),               /* sync */
 159         SEGKP_BADOP(size_t),            /* incore */
 160         SEGKP_BADOP(int),               /* lockop */
 161         SEGKP_BADOP(int),               /* getprot */
 162         SEGKP_BADOP(u_offset_t),                /* getoffset */
 163         SEGKP_BADOP(int),               /* gettype */
 164         SEGKP_BADOP(int),               /* getvp */
 165         SEGKP_BADOP(int),               /* advise */
 166         segkp_dump,                     /* dump */
 167         segkp_pagelock,                 /* pagelock */
 168         SEGKP_BADOP(int),               /* setpgsz */
 169         segkp_getmemid,                 /* getmemid */
 170         segkp_getpolicy,                /* getpolicy */
 171         segkp_capable,                  /* capable */
 172         seg_inherit_notsup              /* inherit */
 173 };
 174 
 175 
 176 static void
 177 segkp_badop(void)
 178 {
 179         panic("segkp_badop");
 180         /*NOTREACHED*/
 181 }
 182 
 183 static void segkpinit_mem_config(struct seg *);
 184 
 185 static uint32_t segkp_indel;
 186 
 187 /*
 188  * Allocate the segment specific private data struct and fill it in
 189  * with the per kp segment mutex, anon ptr. array and hash table.
 190  */
 191 int
 192 segkp_create(struct seg *seg)




 129  * with less than RED_DEEP_THRESHOLD bytes available on the stack,
 130  * then the stack situation has become quite serious;  if much more stack
 131  * is consumed, we have the potential of scrogging the next thread/LWP
 132  * structure.  To help debug the "can't happen" panics which may
 133  * result from this condition, we record hrestime and the calling thread
 134  * in red_deep_hires and red_deep_thread respectively.
 135  */
 136 #define RED_DEEP_THRESHOLD      2000
 137 
 138 hrtime_t        red_deep_hires;
 139 kthread_t       *red_deep_thread;
 140 
 141 uint32_t        red_nmapped;
 142 uint32_t        red_closest = UINT_MAX;
 143 uint32_t        red_ndoubles;
 144 
 145 pgcnt_t anon_segkp_pages_locked;        /* See vm/anon.h */
 146 pgcnt_t anon_segkp_pages_resv;          /* anon reserved by seg_kp */
 147 
 148 static struct   seg_ops segkp_ops = {
 149         .dup            = SEGKP_BADOP(int),
 150         .unmap          = SEGKP_BADOP(int),
 151         .free           = SEGKP_BADOP(void),
 152         .fault          = segkp_fault,
 153         .faulta         = SEGKP_BADOP(faultcode_t),
 154         .setprot        = SEGKP_BADOP(int),
 155         .checkprot      = segkp_checkprot,
 156         .kluster        = segkp_kluster,
 157         .swapout        = SEGKP_BADOP(size_t),
 158         .sync           = SEGKP_BADOP(int),
 159         .incore         = SEGKP_BADOP(size_t),
 160         .lockop         = SEGKP_BADOP(int),
 161         .getprot        = SEGKP_BADOP(int),
 162         .getoffset      = SEGKP_BADOP(u_offset_t),
 163         .gettype        = SEGKP_BADOP(int),
 164         .getvp          = SEGKP_BADOP(int),
 165         .advise         = SEGKP_BADOP(int),
 166         .dump           = segkp_dump,
 167         .pagelock       = segkp_pagelock,
 168         .setpagesize    = SEGKP_BADOP(int),
 169         .getmemid       = segkp_getmemid,
 170         .getpolicy      = segkp_getpolicy,
 171         .capable        = segkp_capable,
 172         .inherit        = seg_inherit_notsup,
 173 };
 174 
 175 
 176 static void
 177 segkp_badop(void)
 178 {
 179         panic("segkp_badop");
 180         /*NOTREACHED*/
 181 }
 182 
 183 static void segkpinit_mem_config(struct seg *);
 184 
 185 static uint32_t segkp_indel;
 186 
 187 /*
 188  * Allocate the segment specific private data struct and fill it in
 189  * with the per kp segment mutex, anon ptr. array and hash table.
 190  */
 191 int
 192 segkp_create(struct seg *seg)