77 */
78 static void segkp_badop(void);
79 static void segkp_dump(struct seg *seg);
80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
81 uint_t prot);
82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
84 struct page ***page, enum lock_type type,
85 enum seg_rw rw);
86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
89 struct segkp_data **tkpd, struct anon_map *amp);
90 static void segkp_release_internal(struct seg *seg,
91 struct segkp_data *kpd, size_t len);
92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
93 size_t len, struct segkp_data *kpd, uint_t flags);
94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
95 size_t len, struct segkp_data *kpd, uint_t flags);
96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
97 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
98
99 /*
100 * Lock used to protect the hash table(s) and caches.
101 */
102 static kmutex_t segkp_lock;
103
104 /*
105 * The segkp caches
106 */
107 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
108
109 #define SEGKP_BADOP(t) (t(*)())segkp_badop
110
111 /*
112 * When there are fewer than red_minavail bytes left on the stack,
113 * segkp_map_red() will map in the redzone (if called). 5000 seems
114 * to work reasonably well...
115 */
116 long red_minavail = 5000;
117
146 .dup = SEGKP_BADOP(int),
147 .unmap = SEGKP_BADOP(int),
148 .free = SEGKP_BADOP(void),
149 .fault = segkp_fault,
150 .faulta = SEGKP_BADOP(faultcode_t),
151 .setprot = SEGKP_BADOP(int),
152 .checkprot = segkp_checkprot,
153 .kluster = segkp_kluster,
154 .swapout = SEGKP_BADOP(size_t),
155 .sync = SEGKP_BADOP(int),
156 .incore = SEGKP_BADOP(size_t),
157 .lockop = SEGKP_BADOP(int),
158 .getprot = SEGKP_BADOP(int),
159 .getoffset = SEGKP_BADOP(u_offset_t),
160 .gettype = SEGKP_BADOP(int),
161 .getvp = SEGKP_BADOP(int),
162 .advise = SEGKP_BADOP(int),
163 .dump = segkp_dump,
164 .pagelock = segkp_pagelock,
165 .setpagesize = SEGKP_BADOP(int),
166 .getmemid = segkp_getmemid,
167 };
168
169
170 static void
171 segkp_badop(void)
172 {
173 panic("segkp_badop");
174 /*NOTREACHED*/
175 }
176
177 static void segkpinit_mem_config(struct seg *);
178
179 static uint32_t segkp_indel;
180
181 /*
182 * Allocate the segment specific private data struct and fill it in
183 * with the per kp segment mutex, anon ptr. array and hash table.
184 */
185 int
186 segkp_create(struct seg *seg)
1374 addr = kpd->kp_base;
1375 eaddr = addr + kpd->kp_len;
1376 while (addr < eaddr) {
1377 ASSERT(seg->s_as == &kas);
1378 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1379 if (pfn != PFN_INVALID)
1380 dump_addpage(seg->s_as, addr, pfn);
1381 addr += PAGESIZE;
1382 dump_timeleft = dump_timeout;
1383 }
1384 }
1385 }
1386 }
1387
1388 /*ARGSUSED*/
1389 static int
1390 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1391 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1392 {
1393 return (ENOTSUP);
1394 }
1395
1396 /*ARGSUSED*/
1397 static int
1398 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1399 {
1400 return (ENODEV);
1401 }
1402
1403 #include <sys/mem_config.h>
1404
1405 /*ARGSUSED*/
1406 static void
1407 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1408 {}
1409
1410 /*
1411 * During memory delete, turn off caches so that pages are not held.
1412 * A better solution may be to unlock the pages while they are
1413 * in the cache so that they may be collected naturally.
1414 */
1415
1416 /*ARGSUSED*/
1417 static int
1418 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1419 {
1420 atomic_inc_32(&segkp_indel);
|
77 */
78 static void segkp_badop(void);
79 static void segkp_dump(struct seg *seg);
80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
81 uint_t prot);
82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
84 struct page ***page, enum lock_type type,
85 enum seg_rw rw);
86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
89 struct segkp_data **tkpd, struct anon_map *amp);
90 static void segkp_release_internal(struct seg *seg,
91 struct segkp_data *kpd, size_t len);
92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
93 size_t len, struct segkp_data *kpd, uint_t flags);
94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
95 size_t len, struct segkp_data *kpd, uint_t flags);
96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
97
98 /*
99 * Lock used to protect the hash table(s) and caches.
100 */
101 static kmutex_t segkp_lock;
102
103 /*
104 * The segkp caches
105 */
106 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
107
108 #define SEGKP_BADOP(t) (t(*)())segkp_badop
109
110 /*
111 * When there are fewer than red_minavail bytes left on the stack,
112 * segkp_map_red() will map in the redzone (if called). 5000 seems
113 * to work reasonably well...
114 */
115 long red_minavail = 5000;
116
145 .dup = SEGKP_BADOP(int),
146 .unmap = SEGKP_BADOP(int),
147 .free = SEGKP_BADOP(void),
148 .fault = segkp_fault,
149 .faulta = SEGKP_BADOP(faultcode_t),
150 .setprot = SEGKP_BADOP(int),
151 .checkprot = segkp_checkprot,
152 .kluster = segkp_kluster,
153 .swapout = SEGKP_BADOP(size_t),
154 .sync = SEGKP_BADOP(int),
155 .incore = SEGKP_BADOP(size_t),
156 .lockop = SEGKP_BADOP(int),
157 .getprot = SEGKP_BADOP(int),
158 .getoffset = SEGKP_BADOP(u_offset_t),
159 .gettype = SEGKP_BADOP(int),
160 .getvp = SEGKP_BADOP(int),
161 .advise = SEGKP_BADOP(int),
162 .dump = segkp_dump,
163 .pagelock = segkp_pagelock,
164 .setpagesize = SEGKP_BADOP(int),
165 };
166
167
168 static void
169 segkp_badop(void)
170 {
171 panic("segkp_badop");
172 /*NOTREACHED*/
173 }
174
175 static void segkpinit_mem_config(struct seg *);
176
177 static uint32_t segkp_indel;
178
179 /*
180 * Allocate the segment specific private data struct and fill it in
181 * with the per kp segment mutex, anon ptr. array and hash table.
182 */
183 int
184 segkp_create(struct seg *seg)
1372 addr = kpd->kp_base;
1373 eaddr = addr + kpd->kp_len;
1374 while (addr < eaddr) {
1375 ASSERT(seg->s_as == &kas);
1376 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1377 if (pfn != PFN_INVALID)
1378 dump_addpage(seg->s_as, addr, pfn);
1379 addr += PAGESIZE;
1380 dump_timeleft = dump_timeout;
1381 }
1382 }
1383 }
1384 }
1385
1386 /*ARGSUSED*/
1387 static int
1388 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1389 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1390 {
1391 return (ENOTSUP);
1392 }
1393
1394 #include <sys/mem_config.h>
1395
1396 /*ARGSUSED*/
1397 static void
1398 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1399 {}
1400
1401 /*
1402 * During memory delete, turn off caches so that pages are not held.
1403 * A better solution may be to unlock the pages while they are
1404 * in the cache so that they may be collected naturally.
1405 */
1406
1407 /*ARGSUSED*/
1408 static int
1409 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1410 {
1411 atomic_inc_32(&segkp_indel);
|