122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
123 uint_t prot);
124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
125 static size_t segspt_shmswapout(struct seg *seg);
126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
127 register char *vec);
128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
129 int attr, uint_t flags);
130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
131 int attr, int op, ulong_t *lockmap, size_t pos);
132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
133 uint_t *protv);
134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
135 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
138 uint_t behav);
139 static void segspt_shmdump(struct seg *seg);
140 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
141 struct page ***, enum lock_type, enum seg_rw);
142 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
143 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
144 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
145
146 struct seg_ops segspt_shmops = {
147 .dup = segspt_shmdup,
148 .unmap = segspt_shmunmap,
149 .free = segspt_shmfree,
150 .fault = segspt_shmfault,
151 .faulta = segspt_shmfaulta,
152 .setprot = segspt_shmsetprot,
153 .checkprot = segspt_shmcheckprot,
154 .kluster = segspt_shmkluster,
155 .swapout = segspt_shmswapout,
156 .sync = segspt_shmsync,
157 .incore = segspt_shmincore,
158 .lockop = segspt_shmlockop,
159 .getprot = segspt_shmgetprot,
160 .getoffset = segspt_shmgetoffset,
161 .gettype = segspt_shmgettype,
162 .getvp = segspt_shmgetvp,
163 .advise = segspt_shmadvise,
164 .dump = segspt_shmdump,
165 .pagelock = segspt_shmpagelock,
166 .setpagesize = segspt_shmsetpgsz,
167 .getmemid = segspt_shmgetmemid,
168 .getpolicy = segspt_shmgetpolicy,
169 };
170
171 static void segspt_purge(struct seg *seg);
172 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
173 enum seg_rw, int);
174 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
175 page_t **ppa);
176
177
178
179 /*ARGSUSED*/
180 int
181 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
182 uint_t prot, uint_t flags, uint_t share_szc)
183 {
184 int err;
185 struct as *newas;
186 struct segspt_crargs sptcargs;
2998 /*
2999 * Mark any existing pages in the given range for
3000 * migration, flushing the I/O page cache, and using
3001 * underlying segment to calculate anon index and get
3002 * anonmap and vnode pointer from
3003 */
3004 if (shmd->shm_softlockcnt > 0)
3005 segspt_purge(seg);
3006
3007 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3008 }
3009
3010 return (0);
3011 }
3012
3013 /*ARGSUSED*/
3014 void
3015 segspt_shmdump(struct seg *seg)
3016 {
3017 /* no-op for ISM segment */
3018 }
3019
3020 /*ARGSUSED*/
3021 static faultcode_t
3022 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3023 {
3024 return (ENOTSUP);
3025 }
3026
3027 /*
3028 * get a memory ID for an addr in a given segment
3029 */
3030 static int
3031 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3032 {
3033 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3034 struct anon *ap;
3035 size_t anon_index;
3036 struct anon_map *amp = shmd->shm_amp;
3037 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3038 struct seg *sptseg = shmd->shm_sptseg;
3039 anon_sync_obj_t cookie;
3040
3041 anon_index = seg_page(seg, addr);
3042
3043 if (addr > (seg->s_base + sptd->spt_realsize)) {
3044 return (EFAULT);
|
122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
123 uint_t prot);
124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
125 static size_t segspt_shmswapout(struct seg *seg);
126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
127 register char *vec);
128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
129 int attr, uint_t flags);
130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
131 int attr, int op, ulong_t *lockmap, size_t pos);
132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
133 uint_t *protv);
134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
135 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
138 uint_t behav);
139 static void segspt_shmdump(struct seg *seg);
140 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
141 struct page ***, enum lock_type, enum seg_rw);
142 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
143 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
144
145 struct seg_ops segspt_shmops = {
146 .dup = segspt_shmdup,
147 .unmap = segspt_shmunmap,
148 .free = segspt_shmfree,
149 .fault = segspt_shmfault,
150 .faulta = segspt_shmfaulta,
151 .setprot = segspt_shmsetprot,
152 .checkprot = segspt_shmcheckprot,
153 .kluster = segspt_shmkluster,
154 .swapout = segspt_shmswapout,
155 .sync = segspt_shmsync,
156 .incore = segspt_shmincore,
157 .lockop = segspt_shmlockop,
158 .getprot = segspt_shmgetprot,
159 .getoffset = segspt_shmgetoffset,
160 .gettype = segspt_shmgettype,
161 .getvp = segspt_shmgetvp,
162 .advise = segspt_shmadvise,
163 .dump = segspt_shmdump,
164 .pagelock = segspt_shmpagelock,
165 .getmemid = segspt_shmgetmemid,
166 .getpolicy = segspt_shmgetpolicy,
167 };
168
169 static void segspt_purge(struct seg *seg);
170 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
171 enum seg_rw, int);
172 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
173 page_t **ppa);
174
175
176
177 /*ARGSUSED*/
178 int
179 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
180 uint_t prot, uint_t flags, uint_t share_szc)
181 {
182 int err;
183 struct as *newas;
184 struct segspt_crargs sptcargs;
2996 /*
2997 * Mark any existing pages in the given range for
2998 * migration, flushing the I/O page cache, and using
2999 * underlying segment to calculate anon index and get
3000 * anonmap and vnode pointer from
3001 */
3002 if (shmd->shm_softlockcnt > 0)
3003 segspt_purge(seg);
3004
3005 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3006 }
3007
3008 return (0);
3009 }
3010
3011 /*ARGSUSED*/
3012 void
3013 segspt_shmdump(struct seg *seg)
3014 {
3015 /* no-op for ISM segment */
3016 }
3017
3018 /*
3019 * get a memory ID for an addr in a given segment
3020 */
3021 static int
3022 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3023 {
3024 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3025 struct anon *ap;
3026 size_t anon_index;
3027 struct anon_map *amp = shmd->shm_amp;
3028 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3029 struct seg *sptseg = shmd->shm_sptseg;
3030 anon_sync_obj_t cookie;
3031
3032 anon_index = seg_page(seg, addr);
3033
3034 if (addr > (seg->s_base + sptd->spt_realsize)) {
3035 return (EFAULT);
|