119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
121 register size_t len, register uint_t prot);
122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
123 uint_t prot);
124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
125 static size_t segspt_shmswapout(struct seg *seg);
126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
127 register char *vec);
128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
129 int attr, uint_t flags);
130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
131 int attr, int op, ulong_t *lockmap, size_t pos);
132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
133 uint_t *protv);
134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
135 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
138 uint_t behav);
139 static void segspt_shmdump(struct seg *seg);
140 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
141 struct page ***, enum lock_type, enum seg_rw);
142 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
143 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
144
145 struct seg_ops segspt_shmops = {
146 .dup = segspt_shmdup,
147 .unmap = segspt_shmunmap,
148 .free = segspt_shmfree,
149 .fault = segspt_shmfault,
150 .faulta = segspt_shmfaulta,
151 .setprot = segspt_shmsetprot,
152 .checkprot = segspt_shmcheckprot,
153 .kluster = segspt_shmkluster,
154 .swapout = segspt_shmswapout,
155 .sync = segspt_shmsync,
156 .incore = segspt_shmincore,
157 .lockop = segspt_shmlockop,
158 .getprot = segspt_shmgetprot,
159 .getoffset = segspt_shmgetoffset,
160 .gettype = segspt_shmgettype,
161 .getvp = segspt_shmgetvp,
162 .advise = segspt_shmadvise,
163 .dump = segspt_shmdump,
164 .pagelock = segspt_shmpagelock,
165 .getmemid = segspt_shmgetmemid,
166 .getpolicy = segspt_shmgetpolicy,
167 };
168
169 static void segspt_purge(struct seg *seg);
170 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
171 enum seg_rw, int);
172 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
173 page_t **ppa);
174
175
176
177 /*ARGSUSED*/
178 int
179 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
180 uint_t prot, uint_t flags, uint_t share_szc)
181 {
182 int err;
183 struct as *newas;
2989 /*
2990 * If random memory allocation policy set already,
2991 * don't bother reapplying it.
2992 */
2993 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2994 return (0);
2995
2996 /*
2997 * Mark any existing pages in the given range for
2998 * migration, flushing the I/O page cache, and using
2999 * underlying segment to calculate anon index and get
3000 * anonmap and vnode pointer from
3001 */
3002 if (shmd->shm_softlockcnt > 0)
3003 segspt_purge(seg);
3004
3005 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3006 }
3007
3008 return (0);
3009 }
3010
3011 /*ARGSUSED*/
3012 void
3013 segspt_shmdump(struct seg *seg)
3014 {
3015 /* no-op for ISM segment */
3016 }
3017
3018 /*
3019 * get a memory ID for an addr in a given segment
3020 */
3021 static int
3022 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3023 {
3024 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3025 struct anon *ap;
3026 size_t anon_index;
3027 struct anon_map *amp = shmd->shm_amp;
3028 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3029 struct seg *sptseg = shmd->shm_sptseg;
3030 anon_sync_obj_t cookie;
3031
3032 anon_index = seg_page(seg, addr);
3033
3034 if (addr > (seg->s_base + sptd->spt_realsize)) {
3035 return (EFAULT);
|
119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
121 register size_t len, register uint_t prot);
122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
123 uint_t prot);
124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
125 static size_t segspt_shmswapout(struct seg *seg);
126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
127 register char *vec);
128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
129 int attr, uint_t flags);
130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
131 int attr, int op, ulong_t *lockmap, size_t pos);
132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
133 uint_t *protv);
134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
135 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
138 uint_t behav);
139 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
140 struct page ***, enum lock_type, enum seg_rw);
141 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
142 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
143
144 struct seg_ops segspt_shmops = {
145 .dup = segspt_shmdup,
146 .unmap = segspt_shmunmap,
147 .free = segspt_shmfree,
148 .fault = segspt_shmfault,
149 .faulta = segspt_shmfaulta,
150 .setprot = segspt_shmsetprot,
151 .checkprot = segspt_shmcheckprot,
152 .kluster = segspt_shmkluster,
153 .swapout = segspt_shmswapout,
154 .sync = segspt_shmsync,
155 .incore = segspt_shmincore,
156 .lockop = segspt_shmlockop,
157 .getprot = segspt_shmgetprot,
158 .getoffset = segspt_shmgetoffset,
159 .gettype = segspt_shmgettype,
160 .getvp = segspt_shmgetvp,
161 .advise = segspt_shmadvise,
162 .pagelock = segspt_shmpagelock,
163 .getmemid = segspt_shmgetmemid,
164 .getpolicy = segspt_shmgetpolicy,
165 };
166
167 static void segspt_purge(struct seg *seg);
168 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
169 enum seg_rw, int);
170 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
171 page_t **ppa);
172
173
174
175 /*ARGSUSED*/
176 int
177 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
178 uint_t prot, uint_t flags, uint_t share_szc)
179 {
180 int err;
181 struct as *newas;
2987 /*
2988 * If random memory allocation policy set already,
2989 * don't bother reapplying it.
2990 */
2991 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2992 return (0);
2993
2994 /*
2995 * Mark any existing pages in the given range for
2996 * migration, flushing the I/O page cache, and using
2997 * underlying segment to calculate anon index and get
2998 * anonmap and vnode pointer from
2999 */
3000 if (shmd->shm_softlockcnt > 0)
3001 segspt_purge(seg);
3002
3003 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3004 }
3005
3006 return (0);
3007 }
3008
3009 /*
3010 * get a memory ID for an addr in a given segment
3011 */
3012 static int
3013 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3014 {
3015 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3016 struct anon *ap;
3017 size_t anon_index;
3018 struct anon_map *amp = shmd->shm_amp;
3019 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3020 struct seg *sptseg = shmd->shm_sptseg;
3021 anon_sync_obj_t cookie;
3022
3023 anon_index = seg_page(seg, addr);
3024
3025 if (addr > (seg->s_base + sptd->spt_realsize)) {
3026 return (EFAULT);
|