Print this page
5382 pvn_getpages handles lengths <= PAGESIZE just fine
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/hsfs/hsfs_vnops.c
+++ new/usr/src/uts/common/fs/hsfs/hsfs_vnops.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 + * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 26 */
26 27
27 28 /*
28 29 * Vnode operations for the High Sierra filesystem
29 30 */
30 31
31 32 #include <sys/types.h>
32 33 #include <sys/t_lock.h>
33 34 #include <sys/param.h>
34 35 #include <sys/time.h>
35 36 #include <sys/systm.h>
36 37 #include <sys/sysmacros.h>
37 38 #include <sys/resource.h>
38 39 #include <sys/signal.h>
39 40 #include <sys/cred.h>
40 41 #include <sys/user.h>
41 42 #include <sys/buf.h>
42 43 #include <sys/vfs.h>
43 44 #include <sys/vfs_opreg.h>
44 45 #include <sys/stat.h>
45 46 #include <sys/vnode.h>
46 47 #include <sys/mode.h>
47 48 #include <sys/proc.h>
48 49 #include <sys/disp.h>
49 50 #include <sys/file.h>
50 51 #include <sys/fcntl.h>
51 52 #include <sys/flock.h>
52 53 #include <sys/kmem.h>
53 54 #include <sys/uio.h>
54 55 #include <sys/conf.h>
55 56 #include <sys/errno.h>
56 57 #include <sys/mman.h>
57 58 #include <sys/pathname.h>
58 59 #include <sys/debug.h>
59 60 #include <sys/vmsystm.h>
60 61 #include <sys/cmn_err.h>
61 62 #include <sys/fbuf.h>
62 63 #include <sys/dirent.h>
63 64 #include <sys/errno.h>
64 65 #include <sys/dkio.h>
65 66 #include <sys/cmn_err.h>
66 67 #include <sys/atomic.h>
67 68
68 69 #include <vm/hat.h>
69 70 #include <vm/page.h>
70 71 #include <vm/pvn.h>
71 72 #include <vm/as.h>
72 73 #include <vm/seg.h>
73 74 #include <vm/seg_map.h>
74 75 #include <vm/seg_kmem.h>
75 76 #include <vm/seg_vn.h>
76 77 #include <vm/rm.h>
77 78 #include <vm/page.h>
78 79 #include <sys/swap.h>
79 80 #include <sys/avl.h>
80 81 #include <sys/sunldi.h>
81 82 #include <sys/ddi.h>
82 83 #include <sys/sunddi.h>
83 84 #include <sys/sdt.h>
84 85
85 86 /*
86 87 * For struct modlinkage
87 88 */
88 89 #include <sys/modctl.h>
89 90
90 91 #include <sys/fs/hsfs_spec.h>
91 92 #include <sys/fs/hsfs_node.h>
92 93 #include <sys/fs/hsfs_impl.h>
93 94 #include <sys/fs/hsfs_susp.h>
94 95 #include <sys/fs/hsfs_rrip.h>
95 96
96 97 #include <fs/fs_subr.h>
97 98
98 99 /* # of contiguous requests to detect sequential access pattern */
99 100 static int seq_contig_requests = 2;
100 101
101 102 /*
102 103 * This is the max number os taskq threads that will be created
103 104 * if required. Since we are using a Dynamic TaskQ by default only
104 105 * one thread is created initially.
105 106 *
106 107 * NOTE: In the usual hsfs use case this per fs instance number
107 108 * of taskq threads should not place any undue load on a system.
108 109 * Even on an unusual system with say 100 CDROM drives, 800 threads
109 110 * will not be created unless all the drives are loaded and all
110 111 * of them are saturated with I/O at the same time! If there is at
111 112 * all a complaint of system load due to such an unusual case it
112 113 * should be easy enough to change to one per-machine Dynamic TaskQ
113 114 * for all hsfs mounts with a nthreads of say 32.
114 115 */
115 116 static int hsfs_taskq_nthreads = 8; /* # of taskq threads per fs */
116 117
117 118 /* Min count of adjacent bufs that will avoid buf coalescing */
118 119 static int hsched_coalesce_min = 2;
119 120
120 121 /*
121 122 * Kmem caches for heavily used small allocations. Using these kmem
122 123 * caches provides a factor of 3 reduction in system time and greatly
123 124 * aids overall throughput esp. on SPARC.
124 125 */
125 126 struct kmem_cache *hio_cache;
126 127 struct kmem_cache *hio_info_cache;
127 128
128 129 /*
129 130 * This tunable allows us to ignore inode numbers from rrip-1.12.
130 131 * In this case, we fall back to our default inode algorithm.
131 132 */
132 133 extern int use_rrip_inodes;
133 134
134 135 /*
135 136 * Free behind logic from UFS to tame our thirst for
136 137 * the page cache.
137 138 * See usr/src/uts/common/fs/ufs/ufs_vnops.c for more
138 139 * explanation.
139 140 */
140 141 static int freebehind = 1;
141 142 static int smallfile = 0;
142 143 static int cache_read_ahead = 0;
143 144 static u_offset_t smallfile64 = 32 * 1024;
144 145 #define SMALLFILE1_D 1000
145 146 #define SMALLFILE2_D 10
146 147 static u_offset_t smallfile1 = 32 * 1024;
147 148 static u_offset_t smallfile2 = 32 * 1024;
148 149 static clock_t smallfile_update = 0; /* when to recompute */
149 150 static uint_t smallfile1_d = SMALLFILE1_D;
150 151 static uint_t smallfile2_d = SMALLFILE2_D;
151 152
152 153 static int hsched_deadline_compare(const void *x1, const void *x2);
153 154 static int hsched_offset_compare(const void *x1, const void *x2);
154 155 static void hsched_enqueue_io(struct hsfs *fsp, struct hio *hsio, int ra);
155 156 int hsched_invoke_strategy(struct hsfs *fsp);
156 157
157 158 /* ARGSUSED */
158 159 static int
159 160 hsfs_fsync(vnode_t *cp,
160 161 int syncflag,
161 162 cred_t *cred,
162 163 caller_context_t *ct)
163 164 {
164 165 return (0);
165 166 }
166 167
167 168
168 169 /*ARGSUSED*/
169 170 static int
170 171 hsfs_read(struct vnode *vp,
171 172 struct uio *uiop,
172 173 int ioflag,
173 174 struct cred *cred,
174 175 struct caller_context *ct)
175 176 {
176 177 caddr_t base;
177 178 offset_t diff;
178 179 int error;
179 180 struct hsnode *hp;
180 181 uint_t filesize;
181 182 int dofree;
182 183
183 184 hp = VTOH(vp);
184 185 /*
185 186 * if vp is of type VDIR, make sure dirent
186 187 * is filled up with all info (because of ptbl)
187 188 */
188 189 if (vp->v_type == VDIR) {
189 190 if (hp->hs_dirent.ext_size == 0)
190 191 hs_filldirent(vp, &hp->hs_dirent);
191 192 }
192 193 filesize = hp->hs_dirent.ext_size;
193 194
194 195 /* Sanity checks. */
195 196 if (uiop->uio_resid == 0 || /* No data wanted. */
196 197 uiop->uio_loffset > HS_MAXFILEOFF || /* Offset too big. */
197 198 uiop->uio_loffset >= filesize) /* Past EOF. */
198 199 return (0);
199 200
200 201 do {
201 202 /*
202 203 * We want to ask for only the "right" amount of data.
203 204 * In this case that means:-
204 205 *
205 206 * We can't get data from beyond our EOF. If asked,
206 207 * we will give a short read.
207 208 *
208 209 * segmap_getmapflt returns buffers of MAXBSIZE bytes.
209 210 * These buffers are always MAXBSIZE aligned.
210 211 * If our starting offset is not MAXBSIZE aligned,
211 212 * we can only ask for less than MAXBSIZE bytes.
212 213 *
213 214 * If our requested offset and length are such that
214 215 * they belong in different MAXBSIZE aligned slots
215 216 * then we'll be making more than one call on
216 217 * segmap_getmapflt.
217 218 *
218 219 * This diagram shows the variables we use and their
219 220 * relationships.
220 221 *
221 222 * |<-----MAXBSIZE----->|
222 223 * +--------------------------...+
223 224 * |.....mapon->|<--n-->|....*...|EOF
224 225 * +--------------------------...+
225 226 * uio_loffset->|
226 227 * uio_resid....|<---------->|
227 228 * diff.........|<-------------->|
228 229 *
229 230 * So, in this case our offset is not aligned
230 231 * and our request takes us outside of the
231 232 * MAXBSIZE window. We will break this up into
232 233 * two segmap_getmapflt calls.
233 234 */
234 235 size_t nbytes;
235 236 offset_t mapon;
236 237 size_t n;
237 238 uint_t flags;
238 239
239 240 mapon = uiop->uio_loffset & MAXBOFFSET;
240 241 diff = filesize - uiop->uio_loffset;
241 242 nbytes = (size_t)MIN(MAXBSIZE - mapon, uiop->uio_resid);
242 243 n = MIN(diff, nbytes);
243 244 if (n <= 0) {
244 245 /* EOF or request satisfied. */
245 246 return (0);
246 247 }
247 248
248 249 /*
249 250 * Freebehind computation taken from:
250 251 * usr/src/uts/common/fs/ufs/ufs_vnops.c
251 252 */
252 253 if (drv_hztousec(ddi_get_lbolt()) >= smallfile_update) {
253 254 uint64_t percpufreeb;
254 255 if (smallfile1_d == 0) smallfile1_d = SMALLFILE1_D;
255 256 if (smallfile2_d == 0) smallfile2_d = SMALLFILE2_D;
256 257 percpufreeb = ptob((uint64_t)freemem) / ncpus_online;
257 258 smallfile1 = percpufreeb / smallfile1_d;
258 259 smallfile2 = percpufreeb / smallfile2_d;
259 260 smallfile1 = MAX(smallfile1, smallfile);
260 261 smallfile1 = MAX(smallfile1, smallfile64);
261 262 smallfile2 = MAX(smallfile1, smallfile2);
262 263 smallfile_update = drv_hztousec(ddi_get_lbolt())
263 264 + 1000000;
264 265 }
265 266
266 267 dofree = freebehind &&
267 268 hp->hs_prev_offset == uiop->uio_loffset &&
268 269 hp->hs_ra_bytes > 0;
269 270
270 271 base = segmap_getmapflt(segkmap, vp,
271 272 (u_offset_t)uiop->uio_loffset, n, 1, S_READ);
272 273
273 274 error = uiomove(base + mapon, n, UIO_READ, uiop);
274 275
275 276 if (error == 0) {
276 277 /*
277 278 * if read a whole block, or read to eof,
278 279 * won't need this buffer again soon.
279 280 */
280 281 if (n + mapon == MAXBSIZE ||
281 282 uiop->uio_loffset == filesize)
282 283 flags = SM_DONTNEED;
283 284 else
284 285 flags = 0;
285 286
286 287 if (dofree) {
287 288 flags = SM_FREE | SM_ASYNC;
288 289 if ((cache_read_ahead == 0) &&
289 290 uiop->uio_loffset > smallfile2)
290 291 flags |= SM_DONTNEED;
291 292 }
292 293
293 294 error = segmap_release(segkmap, base, flags);
294 295 } else
295 296 (void) segmap_release(segkmap, base, 0);
296 297 } while (error == 0 && uiop->uio_resid > 0);
297 298
298 299 return (error);
299 300 }
300 301
301 302 /*ARGSUSED2*/
302 303 static int
303 304 hsfs_getattr(
304 305 struct vnode *vp,
305 306 struct vattr *vap,
306 307 int flags,
307 308 struct cred *cred,
308 309 caller_context_t *ct)
309 310 {
310 311 struct hsnode *hp;
311 312 struct vfs *vfsp;
312 313 struct hsfs *fsp;
313 314
314 315 hp = VTOH(vp);
315 316 fsp = VFS_TO_HSFS(vp->v_vfsp);
316 317 vfsp = vp->v_vfsp;
317 318
318 319 if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
319 320 hs_filldirent(vp, &hp->hs_dirent);
320 321 }
321 322 vap->va_type = IFTOVT(hp->hs_dirent.mode);
322 323 vap->va_mode = hp->hs_dirent.mode;
323 324 vap->va_uid = hp->hs_dirent.uid;
324 325 vap->va_gid = hp->hs_dirent.gid;
325 326
326 327 vap->va_fsid = vfsp->vfs_dev;
327 328 vap->va_nodeid = (ino64_t)hp->hs_nodeid;
328 329 vap->va_nlink = hp->hs_dirent.nlink;
329 330 vap->va_size = (offset_t)hp->hs_dirent.ext_size;
330 331
331 332 vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
332 333 vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
333 334 vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
334 335 vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
335 336 vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
336 337 vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
337 338 if (vp->v_type == VCHR || vp->v_type == VBLK)
338 339 vap->va_rdev = hp->hs_dirent.r_dev;
339 340 else
340 341 vap->va_rdev = 0;
341 342 vap->va_blksize = vfsp->vfs_bsize;
342 343 /* no. of blocks = no. of data blocks + no. of xar blocks */
343 344 vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
344 345 (hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
345 346 vap->va_seq = hp->hs_seq;
346 347 return (0);
347 348 }
348 349
349 350 /*ARGSUSED*/
350 351 static int
351 352 hsfs_readlink(struct vnode *vp,
352 353 struct uio *uiop,
353 354 struct cred *cred,
354 355 caller_context_t *ct)
355 356 {
356 357 struct hsnode *hp;
357 358
358 359 if (vp->v_type != VLNK)
359 360 return (EINVAL);
360 361
361 362 hp = VTOH(vp);
362 363
363 364 if (hp->hs_dirent.sym_link == (char *)NULL)
364 365 return (ENOENT);
365 366
366 367 return (uiomove(hp->hs_dirent.sym_link,
367 368 (size_t)MIN(hp->hs_dirent.ext_size,
368 369 uiop->uio_resid), UIO_READ, uiop));
369 370 }
370 371
371 372 /*ARGSUSED*/
372 373 static void
373 374 hsfs_inactive(struct vnode *vp,
374 375 struct cred *cred,
375 376 caller_context_t *ct)
376 377 {
377 378 struct hsnode *hp;
378 379 struct hsfs *fsp;
379 380
380 381 int nopage;
381 382
382 383 hp = VTOH(vp);
383 384 fsp = VFS_TO_HSFS(vp->v_vfsp);
384 385 /*
385 386 * Note: acquiring and holding v_lock for quite a while
386 387 * here serializes on the vnode; this is unfortunate, but
387 388 * likely not to overly impact performance, as the underlying
388 389 * device (CDROM drive) is quite slow.
389 390 */
390 391 rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
391 392 mutex_enter(&hp->hs_contents_lock);
392 393 mutex_enter(&vp->v_lock);
393 394
394 395 if (vp->v_count < 1) {
395 396 panic("hsfs_inactive: v_count < 1");
396 397 /*NOTREACHED*/
397 398 }
398 399
399 400 if (vp->v_count > 1 || (hp->hs_flags & HREF) == 0) {
400 401 vp->v_count--; /* release hold from vn_rele */
401 402 mutex_exit(&vp->v_lock);
402 403 mutex_exit(&hp->hs_contents_lock);
403 404 rw_exit(&fsp->hsfs_hash_lock);
404 405 return;
405 406 }
406 407 vp->v_count--; /* release hold from vn_rele */
407 408 if (vp->v_count == 0) {
408 409 /*
409 410 * Free the hsnode.
410 411 * If there are no pages associated with the
411 412 * hsnode, give it back to the kmem_cache,
412 413 * else put at the end of this file system's
413 414 * internal free list.
414 415 */
415 416 nopage = !vn_has_cached_data(vp);
416 417 hp->hs_flags = 0;
417 418 /*
418 419 * exit these locks now, since hs_freenode may
419 420 * kmem_free the hsnode and embedded vnode
420 421 */
421 422 mutex_exit(&vp->v_lock);
422 423 mutex_exit(&hp->hs_contents_lock);
423 424 hs_freenode(vp, fsp, nopage);
424 425 } else {
425 426 mutex_exit(&vp->v_lock);
426 427 mutex_exit(&hp->hs_contents_lock);
427 428 }
428 429 rw_exit(&fsp->hsfs_hash_lock);
429 430 }
430 431
431 432
432 433 /*ARGSUSED*/
433 434 static int
434 435 hsfs_lookup(
435 436 struct vnode *dvp,
436 437 char *nm,
437 438 struct vnode **vpp,
438 439 struct pathname *pnp,
439 440 int flags,
440 441 struct vnode *rdir,
441 442 struct cred *cred,
442 443 caller_context_t *ct,
443 444 int *direntflags,
444 445 pathname_t *realpnp)
445 446 {
446 447 int error;
447 448 int namelen = (int)strlen(nm);
448 449
449 450 if (*nm == '\0') {
450 451 VN_HOLD(dvp);
451 452 *vpp = dvp;
452 453 return (0);
453 454 }
454 455
455 456 /*
456 457 * If we're looking for ourself, life is simple.
457 458 */
458 459 if (namelen == 1 && *nm == '.') {
459 460 if (error = hs_access(dvp, (mode_t)VEXEC, cred))
460 461 return (error);
461 462 VN_HOLD(dvp);
462 463 *vpp = dvp;
463 464 return (0);
464 465 }
465 466
466 467 return (hs_dirlook(dvp, nm, namelen, vpp, cred));
467 468 }
468 469
469 470
470 471 /*ARGSUSED*/
471 472 static int
472 473 hsfs_readdir(
473 474 struct vnode *vp,
474 475 struct uio *uiop,
475 476 struct cred *cred,
476 477 int *eofp,
477 478 caller_context_t *ct,
478 479 int flags)
479 480 {
480 481 struct hsnode *dhp;
481 482 struct hsfs *fsp;
482 483 struct hs_direntry hd;
483 484 struct dirent64 *nd;
484 485 int error;
485 486 uint_t offset; /* real offset in directory */
486 487 uint_t dirsiz; /* real size of directory */
487 488 uchar_t *blkp;
488 489 int hdlen; /* length of hs directory entry */
489 490 long ndlen; /* length of dirent entry */
490 491 int bytes_wanted;
491 492 size_t bufsize; /* size of dirent buffer */
492 493 char *outbuf; /* ptr to dirent buffer */
493 494 char *dname;
494 495 int dnamelen;
495 496 size_t dname_size;
496 497 struct fbuf *fbp;
497 498 uint_t last_offset; /* last index into current dir block */
498 499 ino64_t dirino; /* temporary storage before storing in dirent */
499 500 off_t diroff;
500 501
501 502 dhp = VTOH(vp);
502 503 fsp = VFS_TO_HSFS(vp->v_vfsp);
503 504 if (dhp->hs_dirent.ext_size == 0)
504 505 hs_filldirent(vp, &dhp->hs_dirent);
505 506 dirsiz = dhp->hs_dirent.ext_size;
506 507 if (uiop->uio_loffset >= dirsiz) { /* at or beyond EOF */
507 508 if (eofp)
508 509 *eofp = 1;
509 510 return (0);
510 511 }
511 512 ASSERT(uiop->uio_loffset <= HS_MAXFILEOFF);
512 513 offset = uiop->uio_loffset;
513 514
514 515 dname_size = fsp->hsfs_namemax + 1; /* 1 for the ending NUL */
515 516 dname = kmem_alloc(dname_size, KM_SLEEP);
516 517 bufsize = uiop->uio_resid + sizeof (struct dirent64);
517 518
518 519 outbuf = kmem_alloc(bufsize, KM_SLEEP);
519 520 nd = (struct dirent64 *)outbuf;
520 521
521 522 while (offset < dirsiz) {
522 523 bytes_wanted = MIN(MAXBSIZE, dirsiz - (offset & MAXBMASK));
523 524
524 525 error = fbread(vp, (offset_t)(offset & MAXBMASK),
525 526 (unsigned int)bytes_wanted, S_READ, &fbp);
526 527 if (error)
527 528 goto done;
528 529
529 530 blkp = (uchar_t *)fbp->fb_addr;
530 531 last_offset = (offset & MAXBMASK) + fbp->fb_count;
531 532
532 533 #define rel_offset(offset) ((offset) & MAXBOFFSET) /* index into blkp */
533 534
534 535 while (offset < last_offset) {
535 536 /*
536 537 * Very similar validation code is found in
537 538 * process_dirblock(), hsfs_node.c.
538 539 * For an explanation, see there.
539 540 * It may make sense for the future to
540 541 * "consolidate" the code in hs_parsedir(),
541 542 * process_dirblock() and hsfs_readdir() into
542 543 * a single utility function.
543 544 */
544 545 hdlen = (int)((uchar_t)
545 546 HDE_DIR_LEN(&blkp[rel_offset(offset)]));
546 547 if (hdlen < HDE_ROOT_DIR_REC_SIZE ||
547 548 offset + hdlen > last_offset) {
548 549 /*
549 550 * advance to next sector boundary
550 551 */
551 552 offset = roundup(offset + 1, HS_SECTOR_SIZE);
552 553 if (hdlen)
553 554 hs_log_bogus_disk_warning(fsp,
554 555 HSFS_ERR_TRAILING_JUNK, 0);
555 556
556 557 continue;
557 558 }
558 559
559 560 bzero(&hd, sizeof (hd));
560 561
561 562 /*
562 563 * Just ignore invalid directory entries.
563 564 * XXX - maybe hs_parsedir() will detect EXISTENCE bit
564 565 */
565 566 if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
566 567 &hd, dname, &dnamelen, last_offset - offset)) {
567 568 /*
568 569 * Determine if there is enough room
569 570 */
570 571 ndlen = (long)DIRENT64_RECLEN((dnamelen));
571 572
572 573 if ((ndlen + ((char *)nd - outbuf)) >
573 574 uiop->uio_resid) {
574 575 fbrelse(fbp, S_READ);
575 576 goto done; /* output buffer full */
576 577 }
577 578
578 579 diroff = offset + hdlen;
579 580 /*
580 581 * If the media carries rrip-v1.12 or newer,
581 582 * and we trust the inodes from the rrip data
582 583 * (use_rrip_inodes != 0), use that data. If the
583 584 * media has been created by a recent mkisofs
584 585 * version, we may trust all numbers in the
585 586 * starting extent number; otherwise, we cannot
586 587 * do this for zero sized files and symlinks,
587 588 * because if we did we'd end up mapping all of
588 589 * them to the same node. We use HS_DUMMY_INO
589 590 * in this case and make sure that we will not
590 591 * map all files to the same meta data.
591 592 */
592 593 if (hd.inode != 0 && use_rrip_inodes) {
593 594 dirino = hd.inode;
594 595 } else if ((hd.ext_size == 0 ||
595 596 hd.sym_link != (char *)NULL) &&
596 597 (fsp->hsfs_flags & HSFSMNT_INODE) == 0) {
597 598 dirino = HS_DUMMY_INO;
598 599 } else {
599 600 dirino = hd.ext_lbn;
600 601 }
601 602
602 603 /* strncpy(9f) will zero uninitialized bytes */
603 604
604 605 ASSERT(strlen(dname) + 1 <=
605 606 DIRENT64_NAMELEN(ndlen));
606 607 (void) strncpy(nd->d_name, dname,
607 608 DIRENT64_NAMELEN(ndlen));
608 609 nd->d_reclen = (ushort_t)ndlen;
609 610 nd->d_off = (offset_t)diroff;
610 611 nd->d_ino = dirino;
611 612 nd = (struct dirent64 *)((char *)nd + ndlen);
612 613
613 614 /*
614 615 * free up space allocated for symlink
615 616 */
616 617 if (hd.sym_link != (char *)NULL) {
617 618 kmem_free(hd.sym_link,
618 619 (size_t)(hd.ext_size+1));
619 620 hd.sym_link = (char *)NULL;
620 621 }
621 622 }
622 623 offset += hdlen;
623 624 }
624 625 fbrelse(fbp, S_READ);
625 626 }
626 627
627 628 /*
628 629 * Got here for one of the following reasons:
629 630 * 1) outbuf is full (error == 0)
630 631 * 2) end of directory reached (error == 0)
631 632 * 3) error reading directory sector (error != 0)
632 633 * 4) directory entry crosses sector boundary (error == 0)
633 634 *
634 635 * If any directory entries have been copied, don't report
635 636 * case 4. Instead, return the valid directory entries.
636 637 *
637 638 * If no entries have been copied, report the error.
638 639 * If case 4, this will be indistiguishable from EOF.
639 640 */
640 641 done:
641 642 ndlen = ((char *)nd - outbuf);
642 643 if (ndlen != 0) {
643 644 error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
644 645 uiop->uio_loffset = offset;
645 646 }
646 647 kmem_free(dname, dname_size);
647 648 kmem_free(outbuf, bufsize);
648 649 if (eofp && error == 0)
649 650 *eofp = (uiop->uio_loffset >= dirsiz);
650 651 return (error);
651 652 }
652 653
653 654 /*ARGSUSED2*/
654 655 static int
655 656 hsfs_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
656 657 {
657 658 struct hsnode *hp;
658 659 struct hsfid *fid;
659 660
660 661 if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
661 662 fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
662 663 return (ENOSPC);
663 664 }
664 665
665 666 fid = (struct hsfid *)fidp;
666 667 fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
667 668 hp = VTOH(vp);
668 669 mutex_enter(&hp->hs_contents_lock);
669 670 fid->hf_dir_lbn = hp->hs_dir_lbn;
670 671 fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
671 672 fid->hf_ino = hp->hs_nodeid;
672 673 mutex_exit(&hp->hs_contents_lock);
673 674 return (0);
674 675 }
675 676
676 677 /*ARGSUSED*/
677 678 static int
678 679 hsfs_open(struct vnode **vpp,
679 680 int flag,
680 681 struct cred *cred,
681 682 caller_context_t *ct)
682 683 {
683 684 return (0);
684 685 }
685 686
686 687 /*ARGSUSED*/
687 688 static int
688 689 hsfs_close(
689 690 struct vnode *vp,
690 691 int flag,
691 692 int count,
692 693 offset_t offset,
693 694 struct cred *cred,
694 695 caller_context_t *ct)
695 696 {
696 697 (void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
697 698 cleanshares(vp, ttoproc(curthread)->p_pid);
698 699 return (0);
699 700 }
700 701
701 702 /*ARGSUSED2*/
702 703 static int
703 704 hsfs_access(struct vnode *vp,
704 705 int mode,
705 706 int flags,
706 707 cred_t *cred,
707 708 caller_context_t *ct)
708 709 {
709 710 return (hs_access(vp, (mode_t)mode, cred));
710 711 }
711 712
712 713 /*
713 714 * the seek time of a CD-ROM is very slow, and data transfer
714 715 * rate is even worse (max. 150K per sec). The design
715 716 * decision is to reduce access to cd-rom as much as possible,
716 717 * and to transfer a sizable block (read-ahead) of data at a time.
717 718 * UFS style of read ahead one block at a time is not appropriate,
718 719 * and is not supported
719 720 */
720 721
721 722 /*
722 723 * KLUSTSIZE should be a multiple of PAGESIZE and <= MAXPHYS.
723 724 */
724 725 #define KLUSTSIZE (56 * 1024)
725 726 /* we don't support read ahead */
726 727 int hsfs_lostpage; /* no. of times we lost original page */
727 728
728 729 /*
729 730 * Used to prevent biodone() from releasing buf resources that
730 731 * we didn't allocate in quite the usual way.
731 732 */
732 733 /*ARGSUSED*/
733 734 int
734 735 hsfs_iodone(struct buf *bp)
735 736 {
736 737 sema_v(&bp->b_io);
737 738 return (0);
738 739 }
739 740
740 741 /*
741 742 * The taskq thread that invokes the scheduling function to ensure
742 743 * that all readaheads are complete and cleans up the associated
743 744 * memory and releases the page lock.
744 745 */
745 746 void
746 747 hsfs_ra_task(void *arg)
747 748 {
748 749 struct hio_info *info = arg;
749 750 uint_t count;
750 751 struct buf *wbuf;
751 752
752 753 ASSERT(info->pp != NULL);
753 754
754 755 for (count = 0; count < info->bufsused; count++) {
755 756 wbuf = &(info->bufs[count]);
756 757
757 758 DTRACE_PROBE1(hsfs_io_wait_ra, struct buf *, wbuf);
758 759 while (sema_tryp(&(info->sema[count])) == 0) {
759 760 if (hsched_invoke_strategy(info->fsp)) {
760 761 sema_p(&(info->sema[count]));
761 762 break;
762 763 }
763 764 }
764 765 sema_destroy(&(info->sema[count]));
765 766 DTRACE_PROBE1(hsfs_io_done_ra, struct buf *, wbuf);
766 767 biofini(&(info->bufs[count]));
767 768 }
768 769 for (count = 0; count < info->bufsused; count++) {
769 770 if (info->vas[count] != NULL) {
770 771 ppmapout(info->vas[count]);
771 772 }
772 773 }
773 774 kmem_free(info->vas, info->bufcnt * sizeof (caddr_t));
774 775 kmem_free(info->bufs, info->bufcnt * sizeof (struct buf));
775 776 kmem_free(info->sema, info->bufcnt * sizeof (ksema_t));
776 777
777 778 pvn_read_done(info->pp, 0);
778 779 kmem_cache_free(hio_info_cache, info);
779 780 }
780 781
781 782 /*
782 783 * Submit asynchronous readahead requests to the I/O scheduler
783 784 * depending on the number of pages to read ahead. These requests
784 785 * are asynchronous to the calling thread but I/O requests issued
785 786 * subsequently by other threads with higher LBNs must wait for
786 787 * these readaheads to complete since we have a single ordered
787 788 * I/O pipeline. Thus these readaheads are semi-asynchronous.
788 789 * A TaskQ handles waiting for the readaheads to complete.
789 790 *
790 791 * This function is mostly a copy of hsfs_getapage but somewhat
791 792 * simpler. A readahead request is aborted if page allocation
792 793 * fails.
793 794 */
794 795 /*ARGSUSED*/
795 796 static int
796 797 hsfs_getpage_ra(
797 798 struct vnode *vp,
798 799 u_offset_t off,
799 800 struct seg *seg,
800 801 caddr_t addr,
801 802 struct hsnode *hp,
802 803 struct hsfs *fsp,
803 804 int xarsiz,
804 805 offset_t bof,
805 806 int chunk_lbn_count,
806 807 int chunk_data_bytes)
807 808 {
808 809 struct buf *bufs;
809 810 caddr_t *vas;
810 811 caddr_t va;
811 812 struct page *pp, *searchp, *lastp;
812 813 struct vnode *devvp;
813 814 ulong_t byte_offset;
814 815 size_t io_len_tmp;
815 816 uint_t io_off, io_len;
816 817 uint_t xlen;
817 818 uint_t filsiz;
818 819 uint_t secsize;
819 820 uint_t bufcnt;
820 821 uint_t bufsused;
821 822 uint_t count;
822 823 uint_t io_end;
823 824 uint_t which_chunk_lbn;
824 825 uint_t offset_lbn;
825 826 uint_t offset_extra;
826 827 offset_t offset_bytes;
827 828 uint_t remaining_bytes;
828 829 uint_t extension;
829 830 int remainder; /* must be signed */
830 831 diskaddr_t driver_block;
831 832 u_offset_t io_off_tmp;
832 833 ksema_t *fio_done;
833 834 struct hio_info *info;
834 835 size_t len;
835 836
836 837 ASSERT(fsp->hqueue != NULL);
837 838
838 839 if (addr >= seg->s_base + seg->s_size) {
839 840 return (-1);
840 841 }
841 842
842 843 devvp = fsp->hsfs_devvp;
843 844 secsize = fsp->hsfs_vol.lbn_size; /* bytes per logical block */
844 845
845 846 /* file data size */
846 847 filsiz = hp->hs_dirent.ext_size;
847 848
848 849 if (off >= filsiz)
849 850 return (0);
850 851
851 852 extension = 0;
852 853 pp = NULL;
853 854
854 855 extension += hp->hs_ra_bytes;
855 856
856 857 /*
857 858 * Some CD writers (e.g. Kodak Photo CD writers)
858 859 * create CDs in TAO mode and reserve tracks that
859 860 * are not completely written. Some sectors remain
860 861 * unreadable for this reason and give I/O errors.
861 862 * Also, there's no point in reading sectors
862 863 * we'll never look at. So, if we're asked to go
863 864 * beyond the end of a file, truncate to the length
864 865 * of that file.
865 866 *
866 867 * Additionally, this behaviour is required by section
867 868 * 6.4.5 of ISO 9660:1988(E).
868 869 */
869 870 len = MIN(extension ? extension : PAGESIZE, filsiz - off);
870 871
871 872 /* A little paranoia */
872 873 if (len <= 0)
873 874 return (-1);
874 875
875 876 /*
876 877 * After all that, make sure we're asking for things in units
877 878 * that bdev_strategy() will understand (see bug 4202551).
878 879 */
879 880 len = roundup(len, DEV_BSIZE);
880 881
881 882 pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
882 883 &io_len_tmp, off, len, 1);
883 884
884 885 if (pp == NULL) {
885 886 hp->hs_num_contig = 0;
886 887 hp->hs_ra_bytes = 0;
887 888 hp->hs_prev_offset = 0;
888 889 return (-1);
889 890 }
890 891
891 892 io_off = (uint_t)io_off_tmp;
892 893 io_len = (uint_t)io_len_tmp;
893 894
894 895 /* check for truncation */
895 896 /*
896 897 * xxx Clean up and return EIO instead?
897 898 * xxx Ought to go to u_offset_t for everything, but we
898 899 * xxx call lots of things that want uint_t arguments.
899 900 */
900 901 ASSERT(io_off == io_off_tmp);
901 902
902 903 /*
903 904 * get enough buffers for worst-case scenario
904 905 * (i.e., no coalescing possible).
905 906 */
906 907 bufcnt = (len + secsize - 1) / secsize;
907 908 bufs = kmem_alloc(bufcnt * sizeof (struct buf), KM_SLEEP);
908 909 vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
909 910
910 911 /*
911 912 * Allocate a array of semaphores since we are doing I/O
912 913 * scheduling.
913 914 */
914 915 fio_done = kmem_alloc(bufcnt * sizeof (ksema_t), KM_SLEEP);
915 916
916 917 /*
917 918 * If our filesize is not an integer multiple of PAGESIZE,
918 919 * we zero that part of the last page that's between EOF and
919 920 * the PAGESIZE boundary.
920 921 */
921 922 xlen = io_len & PAGEOFFSET;
922 923 if (xlen != 0)
923 924 pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
924 925
925 926 DTRACE_PROBE2(hsfs_readahead, struct vnode *, vp, uint_t, io_len);
926 927
927 928 va = NULL;
928 929 lastp = NULL;
929 930 searchp = pp;
930 931 io_end = io_off + io_len;
931 932 for (count = 0, byte_offset = io_off;
932 933 byte_offset < io_end;
933 934 count++) {
934 935 ASSERT(count < bufcnt);
935 936
936 937 bioinit(&bufs[count]);
937 938 bufs[count].b_edev = devvp->v_rdev;
938 939 bufs[count].b_dev = cmpdev(devvp->v_rdev);
939 940 bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
940 941 bufs[count].b_iodone = hsfs_iodone;
941 942 bufs[count].b_vp = vp;
942 943 bufs[count].b_file = vp;
943 944
944 945 /* Compute disk address for interleaving. */
945 946
946 947 /* considered without skips */
947 948 which_chunk_lbn = byte_offset / chunk_data_bytes;
948 949
949 950 /* factor in skips */
950 951 offset_lbn = which_chunk_lbn * chunk_lbn_count;
951 952
952 953 /* convert to physical byte offset for lbn */
953 954 offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
954 955
955 956 /* don't forget offset into lbn */
956 957 offset_extra = byte_offset % chunk_data_bytes;
957 958
958 959 /* get virtual block number for driver */
959 960 driver_block = lbtodb(bof + xarsiz
960 961 + offset_bytes + offset_extra);
961 962
962 963 if (lastp != searchp) {
963 964 /* this branch taken first time through loop */
964 965 va = vas[count] = ppmapin(searchp, PROT_WRITE,
965 966 (caddr_t)-1);
966 967 /* ppmapin() guarantees not to return NULL */
967 968 } else {
968 969 vas[count] = NULL;
969 970 }
970 971
971 972 bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
972 973 bufs[count].b_offset =
973 974 (offset_t)(byte_offset - io_off + off);
974 975
975 976 /*
976 977 * We specifically use the b_lblkno member here
977 978 * as even in the 32 bit world driver_block can
978 979 * get very large in line with the ISO9660 spec.
979 980 */
980 981
981 982 bufs[count].b_lblkno = driver_block;
982 983
983 984 remaining_bytes = ((which_chunk_lbn + 1) * chunk_data_bytes)
984 985 - byte_offset;
985 986
986 987 /*
987 988 * remaining_bytes can't be zero, as we derived
988 989 * which_chunk_lbn directly from byte_offset.
989 990 */
990 991 if ((remaining_bytes + byte_offset) < (off + len)) {
991 992 /* coalesce-read the rest of the chunk */
992 993 bufs[count].b_bcount = remaining_bytes;
993 994 } else {
994 995 /* get the final bits */
995 996 bufs[count].b_bcount = off + len - byte_offset;
996 997 }
997 998
998 999 remainder = PAGESIZE - (byte_offset % PAGESIZE);
999 1000 if (bufs[count].b_bcount > remainder) {
1000 1001 bufs[count].b_bcount = remainder;
1001 1002 }
1002 1003
1003 1004 bufs[count].b_bufsize = bufs[count].b_bcount;
1004 1005 if (((offset_t)byte_offset + bufs[count].b_bcount) >
1005 1006 HS_MAXFILEOFF) {
1006 1007 break;
1007 1008 }
1008 1009 byte_offset += bufs[count].b_bcount;
1009 1010
1010 1011 /*
1011 1012 * We are scheduling I/O so we need to enqueue
1012 1013 * requests rather than calling bdev_strategy
1013 1014 * here. A later invocation of the scheduling
1014 1015 * function will take care of doing the actual
1015 1016 * I/O as it selects requests from the queue as
1016 1017 * per the scheduling logic.
1017 1018 */
1018 1019 struct hio *hsio = kmem_cache_alloc(hio_cache,
1019 1020 KM_SLEEP);
1020 1021
1021 1022 sema_init(&fio_done[count], 0, NULL,
1022 1023 SEMA_DEFAULT, NULL);
1023 1024 hsio->bp = &bufs[count];
1024 1025 hsio->sema = &fio_done[count];
1025 1026 hsio->io_lblkno = bufs[count].b_lblkno;
1026 1027 hsio->nblocks = howmany(hsio->bp->b_bcount,
1027 1028 DEV_BSIZE);
1028 1029
1029 1030 /* used for deadline */
1030 1031 hsio->io_timestamp = drv_hztousec(ddi_get_lbolt());
1031 1032
1032 1033 /* for I/O coalescing */
1033 1034 hsio->contig_chain = NULL;
1034 1035 hsched_enqueue_io(fsp, hsio, 1);
1035 1036
1036 1037 lwp_stat_update(LWP_STAT_INBLK, 1);
1037 1038 lastp = searchp;
1038 1039 if ((remainder - bufs[count].b_bcount) < 1) {
1039 1040 searchp = searchp->p_next;
1040 1041 }
1041 1042 }
1042 1043
1043 1044 bufsused = count;
1044 1045 info = kmem_cache_alloc(hio_info_cache, KM_SLEEP);
1045 1046 info->bufs = bufs;
1046 1047 info->vas = vas;
1047 1048 info->sema = fio_done;
1048 1049 info->bufsused = bufsused;
1049 1050 info->bufcnt = bufcnt;
1050 1051 info->fsp = fsp;
1051 1052 info->pp = pp;
1052 1053
1053 1054 (void) taskq_dispatch(fsp->hqueue->ra_task,
1054 1055 hsfs_ra_task, info, KM_SLEEP);
1055 1056 /*
1056 1057 * The I/O locked pages are unlocked in our taskq thread.
1057 1058 */
1058 1059 return (0);
1059 1060 }
1060 1061
1061 1062 /*
1062 1063 * Each file may have a different interleaving on disk. This makes
1063 1064 * things somewhat interesting. The gist is that there are some
1064 1065 * number of contiguous data sectors, followed by some other number
1065 1066 * of contiguous skip sectors. The sum of those two sets of sectors
1066 1067 * defines the interleave size. Unfortunately, it means that we generally
1067 1068 * can't simply read N sectors starting at a given offset to satisfy
1068 1069 * any given request.
1069 1070 *
1070 1071 * What we do is get the relevant memory pages via pvn_read_kluster(),
1071 1072 * then stride through the interleaves, setting up a buf for each
1072 1073 * sector that needs to be brought in. Instead of kmem_alloc'ing
1073 1074 * space for the sectors, though, we just point at the appropriate
1074 1075 * spot in the relevant page for each of them. This saves us a bunch
1075 1076 * of copying.
1076 1077 *
1077 1078 * NOTICE: The code below in hsfs_getapage is mostly same as the code
1078 1079 * in hsfs_getpage_ra above (with some omissions). If you are
1079 1080 * making any change to this function, please also look at
1080 1081 * hsfs_getpage_ra.
1081 1082 */
1082 1083 /*ARGSUSED*/
1083 1084 static int
1084 1085 hsfs_getapage(
1085 1086 struct vnode *vp,
1086 1087 u_offset_t off,
1087 1088 size_t len,
1088 1089 uint_t *protp,
1089 1090 struct page *pl[],
1090 1091 size_t plsz,
1091 1092 struct seg *seg,
1092 1093 caddr_t addr,
1093 1094 enum seg_rw rw,
1094 1095 struct cred *cred)
1095 1096 {
1096 1097 struct hsnode *hp;
1097 1098 struct hsfs *fsp;
1098 1099 int err;
1099 1100 struct buf *bufs;
1100 1101 caddr_t *vas;
1101 1102 caddr_t va;
1102 1103 struct page *pp, *searchp, *lastp;
1103 1104 page_t *pagefound;
1104 1105 offset_t bof;
1105 1106 struct vnode *devvp;
1106 1107 ulong_t byte_offset;
1107 1108 size_t io_len_tmp;
1108 1109 uint_t io_off, io_len;
1109 1110 uint_t xlen;
1110 1111 uint_t filsiz;
1111 1112 uint_t secsize;
1112 1113 uint_t bufcnt;
1113 1114 uint_t bufsused;
1114 1115 uint_t count;
1115 1116 uint_t io_end;
1116 1117 uint_t which_chunk_lbn;
1117 1118 uint_t offset_lbn;
1118 1119 uint_t offset_extra;
1119 1120 offset_t offset_bytes;
1120 1121 uint_t remaining_bytes;
1121 1122 uint_t extension;
1122 1123 int remainder; /* must be signed */
1123 1124 int chunk_lbn_count;
1124 1125 int chunk_data_bytes;
1125 1126 int xarsiz;
1126 1127 diskaddr_t driver_block;
1127 1128 u_offset_t io_off_tmp;
1128 1129 ksema_t *fio_done;
1129 1130 int calcdone;
1130 1131
1131 1132 /*
1132 1133 * We don't support asynchronous operation at the moment, so
1133 1134 * just pretend we did it. If the pages are ever actually
1134 1135 * needed, they'll get brought in then.
1135 1136 */
1136 1137 if (pl == NULL)
1137 1138 return (0);
1138 1139
1139 1140 hp = VTOH(vp);
1140 1141 fsp = VFS_TO_HSFS(vp->v_vfsp);
1141 1142 devvp = fsp->hsfs_devvp;
1142 1143 secsize = fsp->hsfs_vol.lbn_size; /* bytes per logical block */
1143 1144
1144 1145 /* file data size */
1145 1146 filsiz = hp->hs_dirent.ext_size;
1146 1147
1147 1148 /* disk addr for start of file */
1148 1149 bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
1149 1150
1150 1151 /* xarsiz byte must be skipped for data */
1151 1152 xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
1152 1153
1153 1154 /* how many logical blocks in an interleave (data+skip) */
1154 1155 chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
1155 1156
1156 1157 if (chunk_lbn_count == 0) {
1157 1158 chunk_lbn_count = 1;
1158 1159 }
1159 1160
1160 1161 /*
1161 1162 * Convert interleaving size into bytes. The zero case
1162 1163 * (no interleaving) optimization is handled as a side-
1163 1164 * effect of the read-ahead logic.
1164 1165 */
1165 1166 if (hp->hs_dirent.intlf_sz == 0) {
1166 1167 chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
1167 1168 /*
1168 1169 * Optimization: If our pagesize is a multiple of LBN
1169 1170 * bytes, we can avoid breaking up a page into individual
1170 1171 * lbn-sized requests.
1171 1172 */
1172 1173 if (PAGESIZE % chunk_data_bytes == 0) {
1173 1174 chunk_lbn_count = BYTE_TO_LBN(PAGESIZE, vp->v_vfsp);
1174 1175 chunk_data_bytes = PAGESIZE;
1175 1176 }
1176 1177 } else {
1177 1178 chunk_data_bytes =
1178 1179 LBN_TO_BYTE(hp->hs_dirent.intlf_sz, vp->v_vfsp);
1179 1180 }
1180 1181
1181 1182 reread:
1182 1183 err = 0;
1183 1184 pagefound = 0;
1184 1185 calcdone = 0;
1185 1186
1186 1187 /*
1187 1188 * Do some read-ahead. This mostly saves us a bit of
1188 1189 * system cpu time more than anything else when doing
1189 1190 * sequential reads. At some point, could do the
1190 1191 * read-ahead asynchronously which might gain us something
1191 1192 * on wall time, but it seems unlikely....
1192 1193 *
1193 1194 * We do the easy case here, which is to read through
1194 1195 * the end of the chunk, minus whatever's at the end that
1195 1196 * won't exactly fill a page.
1196 1197 */
1197 1198 if (hp->hs_ra_bytes > 0 && chunk_data_bytes != PAGESIZE) {
1198 1199 which_chunk_lbn = (off + len) / chunk_data_bytes;
1199 1200 extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
1200 1201 extension -= (extension % PAGESIZE);
1201 1202 } else {
1202 1203 extension = roundup(len, PAGESIZE);
1203 1204 }
1204 1205
1205 1206 atomic_inc_64(&fsp->total_pages_requested);
1206 1207
1207 1208 pp = NULL;
1208 1209 again:
1209 1210 /* search for page in buffer */
1210 1211 if ((pagefound = page_exists(vp, off)) == 0) {
1211 1212 /*
1212 1213 * Need to really do disk IO to get the page.
1213 1214 */
1214 1215 if (!calcdone) {
1215 1216 extension += hp->hs_ra_bytes;
1216 1217
1217 1218 /*
1218 1219 * Some cd writers don't write sectors that aren't
1219 1220 * used. Also, there's no point in reading sectors
1220 1221 * we'll never look at. So, if we're asked to go
1221 1222 * beyond the end of a file, truncate to the length
1222 1223 * of that file.
1223 1224 *
1224 1225 * Additionally, this behaviour is required by section
1225 1226 * 6.4.5 of ISO 9660:1988(E).
1226 1227 */
1227 1228 len = MIN(extension ? extension : PAGESIZE,
1228 1229 filsiz - off);
1229 1230
1230 1231 /* A little paranoia. */
1231 1232 ASSERT(len > 0);
1232 1233
1233 1234 /*
1234 1235 * After all that, make sure we're asking for things
1235 1236 * in units that bdev_strategy() will understand
1236 1237 * (see bug 4202551).
1237 1238 */
1238 1239 len = roundup(len, DEV_BSIZE);
1239 1240 calcdone = 1;
1240 1241 }
1241 1242
1242 1243 pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
1243 1244 &io_len_tmp, off, len, 0);
1244 1245
1245 1246 if (pp == NULL) {
1246 1247 /*
1247 1248 * Pressure on memory, roll back readahead
1248 1249 */
1249 1250 hp->hs_num_contig = 0;
1250 1251 hp->hs_ra_bytes = 0;
1251 1252 hp->hs_prev_offset = 0;
1252 1253 goto again;
1253 1254 }
1254 1255
1255 1256 io_off = (uint_t)io_off_tmp;
1256 1257 io_len = (uint_t)io_len_tmp;
1257 1258
1258 1259 /* check for truncation */
1259 1260 /*
1260 1261 * xxx Clean up and return EIO instead?
1261 1262 * xxx Ought to go to u_offset_t for everything, but we
1262 1263 * xxx call lots of things that want uint_t arguments.
1263 1264 */
1264 1265 ASSERT(io_off == io_off_tmp);
1265 1266
1266 1267 /*
1267 1268 * get enough buffers for worst-case scenario
1268 1269 * (i.e., no coalescing possible).
1269 1270 */
1270 1271 bufcnt = (len + secsize - 1) / secsize;
1271 1272 bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
1272 1273 vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
1273 1274
1274 1275 /*
1275 1276 * Allocate a array of semaphores if we are doing I/O
1276 1277 * scheduling.
1277 1278 */
1278 1279 if (fsp->hqueue != NULL)
1279 1280 fio_done = kmem_alloc(bufcnt * sizeof (ksema_t),
1280 1281 KM_SLEEP);
1281 1282 for (count = 0; count < bufcnt; count++) {
1282 1283 bioinit(&bufs[count]);
1283 1284 bufs[count].b_edev = devvp->v_rdev;
1284 1285 bufs[count].b_dev = cmpdev(devvp->v_rdev);
1285 1286 bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
1286 1287 bufs[count].b_iodone = hsfs_iodone;
1287 1288 bufs[count].b_vp = vp;
1288 1289 bufs[count].b_file = vp;
1289 1290 }
1290 1291
1291 1292 /*
1292 1293 * If our filesize is not an integer multiple of PAGESIZE,
1293 1294 * we zero that part of the last page that's between EOF and
1294 1295 * the PAGESIZE boundary.
1295 1296 */
1296 1297 xlen = io_len & PAGEOFFSET;
1297 1298 if (xlen != 0)
1298 1299 pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
1299 1300
1300 1301 va = NULL;
1301 1302 lastp = NULL;
1302 1303 searchp = pp;
1303 1304 io_end = io_off + io_len;
1304 1305 for (count = 0, byte_offset = io_off;
1305 1306 byte_offset < io_end; count++) {
1306 1307 ASSERT(count < bufcnt);
1307 1308
1308 1309 /* Compute disk address for interleaving. */
1309 1310
1310 1311 /* considered without skips */
1311 1312 which_chunk_lbn = byte_offset / chunk_data_bytes;
1312 1313
1313 1314 /* factor in skips */
1314 1315 offset_lbn = which_chunk_lbn * chunk_lbn_count;
1315 1316
1316 1317 /* convert to physical byte offset for lbn */
1317 1318 offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
1318 1319
1319 1320 /* don't forget offset into lbn */
1320 1321 offset_extra = byte_offset % chunk_data_bytes;
1321 1322
1322 1323 /* get virtual block number for driver */
1323 1324 driver_block =
1324 1325 lbtodb(bof + xarsiz + offset_bytes + offset_extra);
1325 1326
1326 1327 if (lastp != searchp) {
1327 1328 /* this branch taken first time through loop */
1328 1329 va = vas[count] =
1329 1330 ppmapin(searchp, PROT_WRITE, (caddr_t)-1);
1330 1331 /* ppmapin() guarantees not to return NULL */
1331 1332 } else {
1332 1333 vas[count] = NULL;
1333 1334 }
1334 1335
1335 1336 bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
1336 1337 bufs[count].b_offset =
1337 1338 (offset_t)(byte_offset - io_off + off);
1338 1339
1339 1340 /*
1340 1341 * We specifically use the b_lblkno member here
1341 1342 * as even in the 32 bit world driver_block can
1342 1343 * get very large in line with the ISO9660 spec.
1343 1344 */
1344 1345
1345 1346 bufs[count].b_lblkno = driver_block;
1346 1347
1347 1348 remaining_bytes =
1348 1349 ((which_chunk_lbn + 1) * chunk_data_bytes)
1349 1350 - byte_offset;
1350 1351
1351 1352 /*
1352 1353 * remaining_bytes can't be zero, as we derived
1353 1354 * which_chunk_lbn directly from byte_offset.
1354 1355 */
1355 1356 if ((remaining_bytes + byte_offset) < (off + len)) {
1356 1357 /* coalesce-read the rest of the chunk */
1357 1358 bufs[count].b_bcount = remaining_bytes;
1358 1359 } else {
1359 1360 /* get the final bits */
1360 1361 bufs[count].b_bcount = off + len - byte_offset;
1361 1362 }
1362 1363
1363 1364 /*
1364 1365 * It would be nice to do multiple pages'
1365 1366 * worth at once here when the opportunity
1366 1367 * arises, as that has been shown to improve
1367 1368 * our wall time. However, to do that
1368 1369 * requires that we use the pageio subsystem,
1369 1370 * which doesn't mix well with what we're
1370 1371 * already using here. We can't use pageio
1371 1372 * all the time, because that subsystem
1372 1373 * assumes that a page is stored in N
1373 1374 * contiguous blocks on the device.
1374 1375 * Interleaving violates that assumption.
1375 1376 *
1376 1377 * Update: This is now not so big a problem
1377 1378 * because of the I/O scheduler sitting below
1378 1379 * that can re-order and coalesce I/O requests.
1379 1380 */
1380 1381
1381 1382 remainder = PAGESIZE - (byte_offset % PAGESIZE);
1382 1383 if (bufs[count].b_bcount > remainder) {
1383 1384 bufs[count].b_bcount = remainder;
1384 1385 }
1385 1386
1386 1387 bufs[count].b_bufsize = bufs[count].b_bcount;
1387 1388 if (((offset_t)byte_offset + bufs[count].b_bcount) >
1388 1389 HS_MAXFILEOFF) {
1389 1390 break;
1390 1391 }
1391 1392 byte_offset += bufs[count].b_bcount;
1392 1393
1393 1394 if (fsp->hqueue == NULL) {
1394 1395 (void) bdev_strategy(&bufs[count]);
1395 1396
1396 1397 } else {
1397 1398 /*
1398 1399 * We are scheduling I/O so we need to enqueue
1399 1400 * requests rather than calling bdev_strategy
1400 1401 * here. A later invocation of the scheduling
1401 1402 * function will take care of doing the actual
1402 1403 * I/O as it selects requests from the queue as
1403 1404 * per the scheduling logic.
1404 1405 */
1405 1406 struct hio *hsio = kmem_cache_alloc(hio_cache,
1406 1407 KM_SLEEP);
1407 1408
1408 1409 sema_init(&fio_done[count], 0, NULL,
1409 1410 SEMA_DEFAULT, NULL);
1410 1411 hsio->bp = &bufs[count];
1411 1412 hsio->sema = &fio_done[count];
1412 1413 hsio->io_lblkno = bufs[count].b_lblkno;
1413 1414 hsio->nblocks = howmany(hsio->bp->b_bcount,
1414 1415 DEV_BSIZE);
1415 1416
1416 1417 /* used for deadline */
1417 1418 hsio->io_timestamp =
1418 1419 drv_hztousec(ddi_get_lbolt());
1419 1420
1420 1421 /* for I/O coalescing */
1421 1422 hsio->contig_chain = NULL;
1422 1423 hsched_enqueue_io(fsp, hsio, 0);
1423 1424 }
1424 1425
1425 1426 lwp_stat_update(LWP_STAT_INBLK, 1);
1426 1427 lastp = searchp;
1427 1428 if ((remainder - bufs[count].b_bcount) < 1) {
1428 1429 searchp = searchp->p_next;
1429 1430 }
1430 1431 }
1431 1432
1432 1433 bufsused = count;
1433 1434 /* Now wait for everything to come in */
1434 1435 if (fsp->hqueue == NULL) {
1435 1436 for (count = 0; count < bufsused; count++) {
1436 1437 if (err == 0) {
1437 1438 err = biowait(&bufs[count]);
1438 1439 } else
1439 1440 (void) biowait(&bufs[count]);
1440 1441 }
1441 1442 } else {
1442 1443 for (count = 0; count < bufsused; count++) {
1443 1444 struct buf *wbuf;
1444 1445
1445 1446 /*
1446 1447 * Invoke scheduling function till our buf
1447 1448 * is processed. In doing this it might
1448 1449 * process bufs enqueued by other threads
1449 1450 * which is good.
1450 1451 */
1451 1452 wbuf = &bufs[count];
1452 1453 DTRACE_PROBE1(hsfs_io_wait, struct buf *, wbuf);
1453 1454 while (sema_tryp(&fio_done[count]) == 0) {
1454 1455 /*
1455 1456 * hsched_invoke_strategy will return 1
1456 1457 * if the I/O queue is empty. This means
1457 1458 * that there is another thread who has
1458 1459 * issued our buf and is waiting. So we
1459 1460 * just block instead of spinning.
1460 1461 */
1461 1462 if (hsched_invoke_strategy(fsp)) {
1462 1463 sema_p(&fio_done[count]);
1463 1464 break;
1464 1465 }
1465 1466 }
1466 1467 sema_destroy(&fio_done[count]);
1467 1468 DTRACE_PROBE1(hsfs_io_done, struct buf *, wbuf);
1468 1469
1469 1470 if (err == 0) {
1470 1471 err = geterror(wbuf);
1471 1472 }
1472 1473 }
1473 1474 kmem_free(fio_done, bufcnt * sizeof (ksema_t));
1474 1475 }
1475 1476
1476 1477 /* Don't leak resources */
1477 1478 for (count = 0; count < bufcnt; count++) {
1478 1479 biofini(&bufs[count]);
1479 1480 if (count < bufsused && vas[count] != NULL) {
1480 1481 ppmapout(vas[count]);
1481 1482 }
1482 1483 }
1483 1484
1484 1485 kmem_free(vas, bufcnt * sizeof (caddr_t));
1485 1486 kmem_free(bufs, bufcnt * sizeof (struct buf));
1486 1487 }
1487 1488
1488 1489 if (err) {
1489 1490 pvn_read_done(pp, B_ERROR);
1490 1491 return (err);
1491 1492 }
1492 1493
1493 1494 /*
1494 1495 * Lock the requested page, and the one after it if possible.
1495 1496 * Don't bother if our caller hasn't given us a place to stash
1496 1497 * the page pointers, since otherwise we'd lock pages that would
1497 1498 * never get unlocked.
1498 1499 */
1499 1500 if (pagefound) {
1500 1501 int index;
1501 1502 ulong_t soff;
1502 1503
1503 1504 /*
1504 1505 * Make sure it's in memory before we say it's here.
1505 1506 */
1506 1507 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
1507 1508 hsfs_lostpage++;
1508 1509 goto reread;
1509 1510 }
1510 1511
1511 1512 pl[0] = pp;
1512 1513 index = 1;
1513 1514 atomic_inc_64(&fsp->cache_read_pages);
1514 1515
1515 1516 /*
1516 1517 * Try to lock the next page, if it exists, without
1517 1518 * blocking.
1518 1519 */
1519 1520 plsz -= PAGESIZE;
1520 1521 /* LINTED (plsz is unsigned) */
1521 1522 for (soff = off + PAGESIZE; plsz > 0;
1522 1523 soff += PAGESIZE, plsz -= PAGESIZE) {
1523 1524 pp = page_lookup_nowait(vp, (u_offset_t)soff,
1524 1525 SE_SHARED);
1525 1526 if (pp == NULL)
1526 1527 break;
1527 1528 pl[index++] = pp;
1528 1529 }
1529 1530 pl[index] = NULL;
1530 1531
1531 1532 /*
1532 1533 * Schedule a semi-asynchronous readahead if we are
1533 1534 * accessing the last cached page for the current
1534 1535 * file.
1535 1536 *
1536 1537 * Doing this here means that readaheads will be
1537 1538 * issued only if cache-hits occur. This is an advantage
1538 1539 * since cache-hits would mean that readahead is giving
1539 1540 * the desired benefit. If cache-hits do not occur there
1540 1541 * is no point in reading ahead of time - the system
1541 1542 * is loaded anyway.
1542 1543 */
1543 1544 if (fsp->hqueue != NULL &&
1544 1545 hp->hs_prev_offset - off == PAGESIZE &&
1545 1546 hp->hs_prev_offset < filsiz &&
1546 1547 hp->hs_ra_bytes > 0 &&
1547 1548 !page_exists(vp, hp->hs_prev_offset)) {
1548 1549 (void) hsfs_getpage_ra(vp, hp->hs_prev_offset, seg,
1549 1550 addr + PAGESIZE, hp, fsp, xarsiz, bof,
1550 1551 chunk_lbn_count, chunk_data_bytes);
1551 1552 }
1552 1553
1553 1554 return (0);
1554 1555 }
1555 1556
1556 1557 if (pp != NULL) {
1557 1558 pvn_plist_init(pp, pl, plsz, off, io_len, rw);
1558 1559 }
1559 1560
1560 1561 return (err);
1561 1562 }
1562 1563
1563 1564 /*ARGSUSED*/
1564 1565 static int
1565 1566 hsfs_getpage(
1566 1567 struct vnode *vp,
1567 1568 offset_t off,
↓ open down ↓ |
1533 lines elided |
↑ open up ↑ |
1568 1569 size_t len,
1569 1570 uint_t *protp,
1570 1571 struct page *pl[],
1571 1572 size_t plsz,
1572 1573 struct seg *seg,
1573 1574 caddr_t addr,
1574 1575 enum seg_rw rw,
1575 1576 struct cred *cred,
1576 1577 caller_context_t *ct)
1577 1578 {
1578 - int err;
1579 1579 uint_t filsiz;
1580 1580 struct hsfs *fsp;
1581 1581 struct hsnode *hp;
1582 1582
1583 1583 fsp = VFS_TO_HSFS(vp->v_vfsp);
1584 1584 hp = VTOH(vp);
1585 1585
1586 1586 /* does not support write */
1587 1587 if (rw == S_WRITE) {
1588 1588 return (EROFS);
1589 1589 }
1590 1590
1591 1591 if (vp->v_flag & VNOMAP) {
1592 1592 return (ENOSYS);
1593 1593 }
1594 1594
1595 1595 ASSERT(off <= HS_MAXFILEOFF);
1596 1596
1597 1597 /*
1598 1598 * Determine file data size for EOF check.
1599 1599 */
1600 1600 filsiz = hp->hs_dirent.ext_size;
1601 1601 if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
1602 1602 return (EFAULT); /* beyond EOF */
1603 1603
1604 1604 /*
1605 1605 * Async Read-ahead computation.
1606 1606 * This attempts to detect sequential access pattern and
1607 1607 * enables reading extra pages ahead of time.
1608 1608 */
1609 1609 if (fsp->hqueue != NULL) {
1610 1610 /*
1611 1611 * This check for sequential access also takes into
1612 1612 * account segmap weirdness when reading in chunks
1613 1613 * less than the segmap size of 8K.
1614 1614 */
1615 1615 if (hp->hs_prev_offset == off || (off <
1616 1616 hp->hs_prev_offset && off + MAX(len, PAGESIZE)
1617 1617 >= hp->hs_prev_offset)) {
1618 1618 if (hp->hs_num_contig <
1619 1619 (seq_contig_requests - 1)) {
1620 1620 hp->hs_num_contig++;
1621 1621
1622 1622 } else {
1623 1623 /*
1624 1624 * We increase readahead quantum till
1625 1625 * a predefined max. max_readahead_bytes
1626 1626 * is a multiple of PAGESIZE.
1627 1627 */
1628 1628 if (hp->hs_ra_bytes <
1629 1629 fsp->hqueue->max_ra_bytes) {
1630 1630 hp->hs_ra_bytes += PAGESIZE;
1631 1631 }
1632 1632 }
1633 1633 } else {
1634 1634 /*
1635 1635 * Not contiguous so reduce read ahead counters.
1636 1636 */
1637 1637 if (hp->hs_ra_bytes > 0)
1638 1638 hp->hs_ra_bytes -= PAGESIZE;
1639 1639
1640 1640 if (hp->hs_ra_bytes <= 0) {
1641 1641 hp->hs_ra_bytes = 0;
1642 1642 if (hp->hs_num_contig > 0)
1643 1643 hp->hs_num_contig--;
1644 1644 }
1645 1645 }
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
1646 1646 /*
1647 1647 * Length must be rounded up to page boundary.
1648 1648 * since we read in units of pages.
1649 1649 */
1650 1650 hp->hs_prev_offset = off + roundup(len, PAGESIZE);
1651 1651 DTRACE_PROBE1(hsfs_compute_ra, struct hsnode *, hp);
1652 1652 }
1653 1653 if (protp != NULL)
1654 1654 *protp = PROT_ALL;
1655 1655
1656 - if (len <= PAGESIZE)
1657 - err = hsfs_getapage(vp, (u_offset_t)off, len, protp, pl, plsz,
1658 - seg, addr, rw, cred);
1659 - else
1660 - err = pvn_getpages(hsfs_getapage, vp, off, len, protp,
1661 - pl, plsz, seg, addr, rw, cred);
1662 -
1663 - return (err);
1656 + return (pvn_getpages(hsfs_getapage, vp, off, len, protp, pl, plsz,
1657 + seg, addr, rw, cred));
1664 1658 }
1665 1659
1666 1660
1667 1661
1668 1662 /*
1669 1663 * This function should never be called. We need to have it to pass
1670 1664 * it as an argument to other functions.
1671 1665 */
1672 1666 /*ARGSUSED*/
1673 1667 int
1674 1668 hsfs_putapage(
1675 1669 vnode_t *vp,
1676 1670 page_t *pp,
1677 1671 u_offset_t *offp,
1678 1672 size_t *lenp,
1679 1673 int flags,
1680 1674 cred_t *cr)
1681 1675 {
1682 1676 /* should never happen - just destroy it */
1683 1677 cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
1684 1678 pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
1685 1679 return (0);
1686 1680 }
1687 1681
1688 1682
1689 1683 /*
1690 1684 * The only flags we support are B_INVAL, B_FREE and B_DONTNEED.
1691 1685 * B_INVAL is set by:
1692 1686 *
1693 1687 * 1) the MC_SYNC command of memcntl(2) to support the MS_INVALIDATE flag.
1694 1688 * 2) the MC_ADVISE command of memcntl(2) with the MADV_DONTNEED advice
1695 1689 * which translates to an MC_SYNC with the MS_INVALIDATE flag.
1696 1690 *
1697 1691 * The B_FREE (as well as the B_DONTNEED) flag is set when the
1698 1692 * MADV_SEQUENTIAL advice has been used. VOP_PUTPAGE is invoked
1699 1693 * from SEGVN to release pages behind a pagefault.
1700 1694 */
1701 1695 /*ARGSUSED*/
1702 1696 static int
1703 1697 hsfs_putpage(
1704 1698 struct vnode *vp,
1705 1699 offset_t off,
1706 1700 size_t len,
1707 1701 int flags,
1708 1702 struct cred *cr,
1709 1703 caller_context_t *ct)
1710 1704 {
1711 1705 int error = 0;
1712 1706
1713 1707 if (vp->v_count == 0) {
1714 1708 panic("hsfs_putpage: bad v_count");
1715 1709 /*NOTREACHED*/
1716 1710 }
1717 1711
1718 1712 if (vp->v_flag & VNOMAP)
1719 1713 return (ENOSYS);
1720 1714
1721 1715 ASSERT(off <= HS_MAXFILEOFF);
1722 1716
1723 1717 if (!vn_has_cached_data(vp)) /* no pages mapped */
1724 1718 return (0);
1725 1719
1726 1720 if (len == 0) { /* from 'off' to EOF */
1727 1721 error = pvn_vplist_dirty(vp, off, hsfs_putapage, flags, cr);
1728 1722 } else {
1729 1723 offset_t end_off = off + len;
1730 1724 offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
1731 1725 offset_t io_off;
1732 1726
1733 1727 file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
1734 1728 if (end_off > file_size)
1735 1729 end_off = file_size;
1736 1730
1737 1731 for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
1738 1732 page_t *pp;
1739 1733
1740 1734 /*
1741 1735 * We insist on getting the page only if we are
1742 1736 * about to invalidate, free or write it and
1743 1737 * the B_ASYNC flag is not set.
1744 1738 */
1745 1739 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
1746 1740 pp = page_lookup(vp, io_off,
1747 1741 (flags & (B_INVAL | B_FREE)) ?
1748 1742 SE_EXCL : SE_SHARED);
1749 1743 } else {
1750 1744 pp = page_lookup_nowait(vp, io_off,
1751 1745 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
1752 1746 }
1753 1747
1754 1748 if (pp == NULL)
1755 1749 continue;
1756 1750
1757 1751 /*
1758 1752 * Normally pvn_getdirty() should return 0, which
1759 1753 * impies that it has done the job for us.
1760 1754 * The shouldn't-happen scenario is when it returns 1.
1761 1755 * This means that the page has been modified and
1762 1756 * needs to be put back.
1763 1757 * Since we can't write on a CD, we fake a failed
1764 1758 * I/O and force pvn_write_done() to destroy the page.
1765 1759 */
1766 1760 if (pvn_getdirty(pp, flags) == 1) {
1767 1761 cmn_err(CE_NOTE,
1768 1762 "hsfs_putpage: dirty HSFS page");
1769 1763 pvn_write_done(pp, flags |
1770 1764 B_ERROR | B_WRITE | B_INVAL | B_FORCE);
1771 1765 }
1772 1766 }
1773 1767 }
1774 1768 return (error);
1775 1769 }
1776 1770
1777 1771
1778 1772 /*ARGSUSED*/
1779 1773 static int
1780 1774 hsfs_map(
1781 1775 struct vnode *vp,
1782 1776 offset_t off,
1783 1777 struct as *as,
1784 1778 caddr_t *addrp,
1785 1779 size_t len,
1786 1780 uchar_t prot,
1787 1781 uchar_t maxprot,
1788 1782 uint_t flags,
1789 1783 struct cred *cred,
1790 1784 caller_context_t *ct)
1791 1785 {
1792 1786 struct segvn_crargs vn_a;
1793 1787 int error;
1794 1788
1795 1789 /* VFS_RECORD(vp->v_vfsp, VS_MAP, VS_CALL); */
1796 1790
1797 1791 if (vp->v_flag & VNOMAP)
1798 1792 return (ENOSYS);
1799 1793
1800 1794 if ((prot & PROT_WRITE) && (flags & MAP_SHARED))
1801 1795 return (ENOSYS);
1802 1796
1803 1797 if (off > HS_MAXFILEOFF || off < 0 ||
1804 1798 (off + len) < 0 || (off + len) > HS_MAXFILEOFF)
1805 1799 return (ENXIO);
1806 1800
1807 1801 if (vp->v_type != VREG) {
1808 1802 return (ENODEV);
1809 1803 }
1810 1804
1811 1805 /*
1812 1806 * If file is being locked, disallow mapping.
1813 1807 */
1814 1808 if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
1815 1809 return (EAGAIN);
1816 1810
1817 1811 as_rangelock(as);
1818 1812 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
1819 1813 if (error != 0) {
1820 1814 as_rangeunlock(as);
1821 1815 return (error);
1822 1816 }
1823 1817
1824 1818 vn_a.vp = vp;
1825 1819 vn_a.offset = off;
1826 1820 vn_a.type = flags & MAP_TYPE;
1827 1821 vn_a.prot = prot;
1828 1822 vn_a.maxprot = maxprot;
1829 1823 vn_a.flags = flags & ~MAP_TYPE;
1830 1824 vn_a.cred = cred;
1831 1825 vn_a.amp = NULL;
1832 1826 vn_a.szc = 0;
1833 1827 vn_a.lgrp_mem_policy_flags = 0;
1834 1828
1835 1829 error = as_map(as, *addrp, len, segvn_create, &vn_a);
1836 1830 as_rangeunlock(as);
1837 1831 return (error);
1838 1832 }
1839 1833
1840 1834 /* ARGSUSED */
1841 1835 static int
1842 1836 hsfs_addmap(
1843 1837 struct vnode *vp,
1844 1838 offset_t off,
1845 1839 struct as *as,
1846 1840 caddr_t addr,
1847 1841 size_t len,
1848 1842 uchar_t prot,
1849 1843 uchar_t maxprot,
1850 1844 uint_t flags,
1851 1845 struct cred *cr,
1852 1846 caller_context_t *ct)
1853 1847 {
1854 1848 struct hsnode *hp;
1855 1849
1856 1850 if (vp->v_flag & VNOMAP)
1857 1851 return (ENOSYS);
1858 1852
1859 1853 hp = VTOH(vp);
1860 1854 mutex_enter(&hp->hs_contents_lock);
1861 1855 hp->hs_mapcnt += btopr(len);
1862 1856 mutex_exit(&hp->hs_contents_lock);
1863 1857 return (0);
1864 1858 }
1865 1859
1866 1860 /*ARGSUSED*/
1867 1861 static int
1868 1862 hsfs_delmap(
1869 1863 struct vnode *vp,
1870 1864 offset_t off,
1871 1865 struct as *as,
1872 1866 caddr_t addr,
1873 1867 size_t len,
1874 1868 uint_t prot,
1875 1869 uint_t maxprot,
1876 1870 uint_t flags,
1877 1871 struct cred *cr,
1878 1872 caller_context_t *ct)
1879 1873 {
1880 1874 struct hsnode *hp;
1881 1875
1882 1876 if (vp->v_flag & VNOMAP)
1883 1877 return (ENOSYS);
1884 1878
1885 1879 hp = VTOH(vp);
1886 1880 mutex_enter(&hp->hs_contents_lock);
1887 1881 hp->hs_mapcnt -= btopr(len); /* Count released mappings */
1888 1882 ASSERT(hp->hs_mapcnt >= 0);
1889 1883 mutex_exit(&hp->hs_contents_lock);
1890 1884 return (0);
1891 1885 }
1892 1886
1893 1887 /* ARGSUSED */
1894 1888 static int
1895 1889 hsfs_seek(
1896 1890 struct vnode *vp,
1897 1891 offset_t ooff,
1898 1892 offset_t *noffp,
1899 1893 caller_context_t *ct)
1900 1894 {
1901 1895 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
1902 1896 }
1903 1897
1904 1898 /* ARGSUSED */
1905 1899 static int
1906 1900 hsfs_frlock(
1907 1901 struct vnode *vp,
1908 1902 int cmd,
1909 1903 struct flock64 *bfp,
1910 1904 int flag,
1911 1905 offset_t offset,
1912 1906 struct flk_callback *flk_cbp,
1913 1907 cred_t *cr,
1914 1908 caller_context_t *ct)
1915 1909 {
1916 1910 struct hsnode *hp = VTOH(vp);
1917 1911
1918 1912 /*
1919 1913 * If the file is being mapped, disallow fs_frlock.
1920 1914 * We are not holding the hs_contents_lock while checking
1921 1915 * hs_mapcnt because the current locking strategy drops all
1922 1916 * locks before calling fs_frlock.
1923 1917 * So, hs_mapcnt could change before we enter fs_frlock making
1924 1918 * it meaningless to have held hs_contents_lock in the first place.
1925 1919 */
1926 1920 if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
1927 1921 return (EAGAIN);
1928 1922
1929 1923 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
1930 1924 }
1931 1925
1932 1926 static int
1933 1927 hsched_deadline_compare(const void *x1, const void *x2)
1934 1928 {
1935 1929 const struct hio *h1 = x1;
1936 1930 const struct hio *h2 = x2;
1937 1931
1938 1932 if (h1->io_timestamp < h2->io_timestamp)
1939 1933 return (-1);
1940 1934 if (h1->io_timestamp > h2->io_timestamp)
1941 1935 return (1);
1942 1936
1943 1937 if (h1->io_lblkno < h2->io_lblkno)
1944 1938 return (-1);
1945 1939 if (h1->io_lblkno > h2->io_lblkno)
1946 1940 return (1);
1947 1941
1948 1942 if (h1 < h2)
1949 1943 return (-1);
1950 1944 if (h1 > h2)
1951 1945 return (1);
1952 1946
1953 1947 return (0);
1954 1948 }
1955 1949
1956 1950 static int
1957 1951 hsched_offset_compare(const void *x1, const void *x2)
1958 1952 {
1959 1953 const struct hio *h1 = x1;
1960 1954 const struct hio *h2 = x2;
1961 1955
1962 1956 if (h1->io_lblkno < h2->io_lblkno)
1963 1957 return (-1);
1964 1958 if (h1->io_lblkno > h2->io_lblkno)
1965 1959 return (1);
1966 1960
1967 1961 if (h1 < h2)
1968 1962 return (-1);
1969 1963 if (h1 > h2)
1970 1964 return (1);
1971 1965
1972 1966 return (0);
1973 1967 }
1974 1968
1975 1969 void
1976 1970 hsched_init_caches(void)
1977 1971 {
1978 1972 hio_cache = kmem_cache_create("hsfs_hio_cache",
1979 1973 sizeof (struct hio), 0, NULL,
1980 1974 NULL, NULL, NULL, NULL, 0);
1981 1975
1982 1976 hio_info_cache = kmem_cache_create("hsfs_hio_info_cache",
1983 1977 sizeof (struct hio_info), 0, NULL,
1984 1978 NULL, NULL, NULL, NULL, 0);
1985 1979 }
1986 1980
1987 1981 void
1988 1982 hsched_fini_caches(void)
1989 1983 {
1990 1984 kmem_cache_destroy(hio_cache);
1991 1985 kmem_cache_destroy(hio_info_cache);
1992 1986 }
1993 1987
1994 1988 /*
1995 1989 * Initialize I/O scheduling structures. This is called via hsfs_mount
1996 1990 */
1997 1991 void
1998 1992 hsched_init(struct hsfs *fsp, int fsid, struct modlinkage *modlinkage)
1999 1993 {
2000 1994 struct hsfs_queue *hqueue = fsp->hqueue;
2001 1995 struct vnode *vp = fsp->hsfs_devvp;
2002 1996
2003 1997 /* TaskQ name of the form: hsched_task_ + stringof(int) */
2004 1998 char namebuf[23];
2005 1999 int error, err;
2006 2000 struct dk_cinfo info;
2007 2001 ldi_handle_t lh;
2008 2002 ldi_ident_t li;
2009 2003
2010 2004 /*
2011 2005 * Default maxtransfer = 16k chunk
2012 2006 */
2013 2007 hqueue->dev_maxtransfer = 16384;
2014 2008
2015 2009 /*
2016 2010 * Try to fetch the maximum device transfer size. This is used to
2017 2011 * ensure that a coalesced block does not exceed the maxtransfer.
2018 2012 */
2019 2013 err = ldi_ident_from_mod(modlinkage, &li);
2020 2014 if (err) {
2021 2015 cmn_err(CE_NOTE, "hsched_init: Querying device failed");
2022 2016 cmn_err(CE_NOTE, "hsched_init: ldi_ident_from_mod err=%d\n",
2023 2017 err);
2024 2018 goto set_ra;
2025 2019 }
2026 2020
2027 2021 err = ldi_open_by_dev(&(vp->v_rdev), OTYP_CHR, FREAD, CRED(), &lh, li);
2028 2022 ldi_ident_release(li);
2029 2023 if (err) {
2030 2024 cmn_err(CE_NOTE, "hsched_init: Querying device failed");
2031 2025 cmn_err(CE_NOTE, "hsched_init: ldi_open err=%d\n", err);
2032 2026 goto set_ra;
2033 2027 }
2034 2028
2035 2029 error = ldi_ioctl(lh, DKIOCINFO, (intptr_t)&info, FKIOCTL,
2036 2030 CRED(), &err);
2037 2031 err = ldi_close(lh, FREAD, CRED());
2038 2032 if (err) {
2039 2033 cmn_err(CE_NOTE, "hsched_init: Querying device failed");
2040 2034 cmn_err(CE_NOTE, "hsched_init: ldi_close err=%d\n", err);
2041 2035 }
2042 2036
2043 2037 if (error == 0) {
2044 2038 hqueue->dev_maxtransfer = ldbtob(info.dki_maxtransfer);
2045 2039 }
2046 2040
2047 2041 set_ra:
2048 2042 /*
2049 2043 * Max size of data to read ahead for sequential access pattern.
2050 2044 * Conservative to avoid letting the underlying CD drive to spin
2051 2045 * down, in case the application is reading slowly.
2052 2046 * We read ahead upto a max of 4 pages.
2053 2047 */
2054 2048 hqueue->max_ra_bytes = PAGESIZE * 8;
2055 2049
2056 2050 mutex_init(&(hqueue->hsfs_queue_lock), NULL, MUTEX_DEFAULT, NULL);
2057 2051 mutex_init(&(hqueue->strategy_lock), NULL, MUTEX_DEFAULT, NULL);
2058 2052 avl_create(&(hqueue->read_tree), hsched_offset_compare,
2059 2053 sizeof (struct hio), offsetof(struct hio, io_offset_node));
2060 2054 avl_create(&(hqueue->deadline_tree), hsched_deadline_compare,
2061 2055 sizeof (struct hio), offsetof(struct hio, io_deadline_node));
2062 2056
2063 2057 (void) snprintf(namebuf, sizeof (namebuf), "hsched_task_%d", fsid);
2064 2058 hqueue->ra_task = taskq_create(namebuf, hsfs_taskq_nthreads,
2065 2059 minclsyspri + 2, 1, 104857600 / PAGESIZE, TASKQ_DYNAMIC);
2066 2060
2067 2061 hqueue->next = NULL;
2068 2062 hqueue->nbuf = kmem_zalloc(sizeof (struct buf), KM_SLEEP);
2069 2063 }
2070 2064
2071 2065 void
2072 2066 hsched_fini(struct hsfs_queue *hqueue)
2073 2067 {
2074 2068 if (hqueue != NULL) {
2075 2069 /*
2076 2070 * Remove the sentinel if there was one.
2077 2071 */
2078 2072 if (hqueue->next != NULL) {
2079 2073 avl_remove(&hqueue->read_tree, hqueue->next);
2080 2074 kmem_cache_free(hio_cache, hqueue->next);
2081 2075 }
2082 2076 avl_destroy(&(hqueue->read_tree));
2083 2077 avl_destroy(&(hqueue->deadline_tree));
2084 2078 mutex_destroy(&(hqueue->hsfs_queue_lock));
2085 2079 mutex_destroy(&(hqueue->strategy_lock));
2086 2080
2087 2081 /*
2088 2082 * If there are any existing readahead threads running
2089 2083 * taskq_destroy will wait for them to finish.
2090 2084 */
2091 2085 taskq_destroy(hqueue->ra_task);
2092 2086 kmem_free(hqueue->nbuf, sizeof (struct buf));
2093 2087 }
2094 2088 }
2095 2089
2096 2090 /*
2097 2091 * Determine if two I/O requests are adjacent to each other so
2098 2092 * that they can coalesced.
2099 2093 */
2100 2094 #define IS_ADJACENT(io, nio) \
2101 2095 (((io)->io_lblkno + (io)->nblocks == (nio)->io_lblkno) && \
2102 2096 (io)->bp->b_edev == (nio)->bp->b_edev)
2103 2097
2104 2098 /*
2105 2099 * This performs the actual I/O scheduling logic. We use the Circular
2106 2100 * Look algorithm here. Sort the I/O requests in ascending order of
2107 2101 * logical block number and process them starting with the lowest
2108 2102 * numbered block and progressing towards higher block numbers in the
2109 2103 * queue. Once there are no more higher numbered blocks, start again
2110 2104 * with the lowest one. This is good for CD/DVD as you keep moving
2111 2105 * the head in one direction along the outward spiral track and avoid
2112 2106 * too many seeks as much as possible. The re-ordering also allows
2113 2107 * us to coalesce adjacent requests into one larger request.
2114 2108 * This is thus essentially a 1-way Elevator with front merging.
2115 2109 *
2116 2110 * In addition each read request here has a deadline and will be
2117 2111 * processed out of turn if the deadline (500ms) expires.
2118 2112 *
2119 2113 * This function is necessarily serialized via hqueue->strategy_lock.
2120 2114 * This function sits just below hsfs_getapage and processes all read
2121 2115 * requests orginating from that function.
2122 2116 */
2123 2117 int
2124 2118 hsched_invoke_strategy(struct hsfs *fsp)
2125 2119 {
2126 2120 struct hsfs_queue *hqueue;
2127 2121 struct buf *nbuf;
2128 2122 struct hio *fio, *nio, *tio, *prev, *last;
2129 2123 size_t bsize, soffset, offset, data;
2130 2124 int bioret, bufcount;
2131 2125 struct vnode *fvp;
2132 2126 ksema_t *io_done;
2133 2127 caddr_t iodata;
2134 2128
2135 2129 hqueue = fsp->hqueue;
2136 2130 mutex_enter(&hqueue->strategy_lock);
2137 2131 mutex_enter(&hqueue->hsfs_queue_lock);
2138 2132
2139 2133 /*
2140 2134 * Check for Deadline expiration first
2141 2135 */
2142 2136 fio = avl_first(&hqueue->deadline_tree);
2143 2137
2144 2138 /*
2145 2139 * Paranoid check for empty I/O queue. Both deadline
2146 2140 * and read trees contain same data sorted in different
2147 2141 * ways. So empty deadline tree = empty read tree.
2148 2142 */
2149 2143 if (fio == NULL) {
2150 2144 /*
2151 2145 * Remove the sentinel if there was one.
2152 2146 */
2153 2147 if (hqueue->next != NULL) {
2154 2148 avl_remove(&hqueue->read_tree, hqueue->next);
2155 2149 kmem_cache_free(hio_cache, hqueue->next);
2156 2150 hqueue->next = NULL;
2157 2151 }
2158 2152 mutex_exit(&hqueue->hsfs_queue_lock);
2159 2153 mutex_exit(&hqueue->strategy_lock);
2160 2154 return (1);
2161 2155 }
2162 2156
2163 2157 if (drv_hztousec(ddi_get_lbolt()) - fio->io_timestamp
2164 2158 < HSFS_READ_DEADLINE) {
2165 2159 /*
2166 2160 * Apply standard scheduling logic. This uses the
2167 2161 * C-LOOK approach. Process I/O requests in ascending
2168 2162 * order of logical block address till no subsequent
2169 2163 * higher numbered block request remains. Then start
2170 2164 * again from the lowest numbered block in the queue.
2171 2165 *
2172 2166 * We do this cheaply here by means of a sentinel.
2173 2167 * The last processed I/O structure from the previous
2174 2168 * invocation of this func, is left dangling in the
2175 2169 * read_tree so that we can easily scan to the next
2176 2170 * higher numbered request and remove the sentinel.
2177 2171 */
2178 2172 fio = NULL;
2179 2173 if (hqueue->next != NULL) {
2180 2174 fio = AVL_NEXT(&hqueue->read_tree, hqueue->next);
2181 2175 avl_remove(&hqueue->read_tree, hqueue->next);
2182 2176 kmem_cache_free(hio_cache, hqueue->next);
2183 2177 hqueue->next = NULL;
2184 2178 }
2185 2179 if (fio == NULL) {
2186 2180 fio = avl_first(&hqueue->read_tree);
2187 2181 }
2188 2182 } else if (hqueue->next != NULL) {
2189 2183 DTRACE_PROBE1(hsfs_deadline_expiry, struct hio *, fio);
2190 2184
2191 2185 avl_remove(&hqueue->read_tree, hqueue->next);
2192 2186 kmem_cache_free(hio_cache, hqueue->next);
2193 2187 hqueue->next = NULL;
2194 2188 }
2195 2189
2196 2190 /*
2197 2191 * In addition we try to coalesce contiguous
2198 2192 * requests into one bigger request.
2199 2193 */
2200 2194 bufcount = 1;
2201 2195 bsize = ldbtob(fio->nblocks);
2202 2196 fvp = fio->bp->b_file;
2203 2197 nio = AVL_NEXT(&hqueue->read_tree, fio);
2204 2198 tio = fio;
2205 2199 while (nio != NULL && IS_ADJACENT(tio, nio) &&
2206 2200 bsize < hqueue->dev_maxtransfer) {
2207 2201 avl_remove(&hqueue->deadline_tree, tio);
2208 2202 avl_remove(&hqueue->read_tree, tio);
2209 2203 tio->contig_chain = nio;
2210 2204 bsize += ldbtob(nio->nblocks);
2211 2205 prev = tio;
2212 2206 tio = nio;
2213 2207
2214 2208 /*
2215 2209 * This check is required to detect the case where
2216 2210 * we are merging adjacent buffers belonging to
2217 2211 * different files. fvp is used to set the b_file
2218 2212 * parameter in the coalesced buf. b_file is used
2219 2213 * by DTrace so we do not want DTrace to accrue
2220 2214 * requests to two different files to any one file.
2221 2215 */
2222 2216 if (fvp && tio->bp->b_file != fvp) {
2223 2217 fvp = NULL;
2224 2218 }
2225 2219
2226 2220 nio = AVL_NEXT(&hqueue->read_tree, nio);
2227 2221 bufcount++;
2228 2222 }
2229 2223
2230 2224 /*
2231 2225 * tio is not removed from the read_tree as it serves as a sentinel
2232 2226 * to cheaply allow us to scan to the next higher numbered I/O
2233 2227 * request.
2234 2228 */
2235 2229 hqueue->next = tio;
2236 2230 avl_remove(&hqueue->deadline_tree, tio);
2237 2231 mutex_exit(&hqueue->hsfs_queue_lock);
2238 2232 DTRACE_PROBE3(hsfs_io_dequeued, struct hio *, fio, int, bufcount,
2239 2233 size_t, bsize);
2240 2234
2241 2235 /*
2242 2236 * The benefit of coalescing occurs if the the savings in I/O outweighs
2243 2237 * the cost of doing the additional work below.
2244 2238 * It was observed that coalescing 2 buffers results in diminishing
2245 2239 * returns, so we do coalescing if we have >2 adjacent bufs.
2246 2240 */
2247 2241 if (bufcount > hsched_coalesce_min) {
2248 2242 /*
2249 2243 * We have coalesced blocks. First allocate mem and buf for
2250 2244 * the entire coalesced chunk.
2251 2245 * Since we are guaranteed single-threaded here we pre-allocate
2252 2246 * one buf at mount time and that is re-used every time. This
2253 2247 * is a synthesized buf structure that uses kmem_alloced chunk.
2254 2248 * Not quite a normal buf attached to pages.
2255 2249 */
2256 2250 fsp->coalesced_bytes += bsize;
2257 2251 nbuf = hqueue->nbuf;
2258 2252 bioinit(nbuf);
2259 2253 nbuf->b_edev = fio->bp->b_edev;
2260 2254 nbuf->b_dev = fio->bp->b_dev;
2261 2255 nbuf->b_flags = fio->bp->b_flags;
2262 2256 nbuf->b_iodone = fio->bp->b_iodone;
2263 2257 iodata = kmem_alloc(bsize, KM_SLEEP);
2264 2258 nbuf->b_un.b_addr = iodata;
2265 2259 nbuf->b_lblkno = fio->bp->b_lblkno;
2266 2260 nbuf->b_vp = fvp;
2267 2261 nbuf->b_file = fvp;
2268 2262 nbuf->b_bcount = bsize;
2269 2263 nbuf->b_bufsize = bsize;
2270 2264
2271 2265 DTRACE_PROBE3(hsfs_coalesced_io_start, struct hio *, fio, int,
2272 2266 bufcount, size_t, bsize);
2273 2267
2274 2268 /*
2275 2269 * Perform I/O for the coalesced block.
2276 2270 */
2277 2271 (void) bdev_strategy(nbuf);
2278 2272
2279 2273 /*
2280 2274 * Duplicate the last IO node to leave the sentinel alone.
2281 2275 * The sentinel is freed in the next invocation of this
2282 2276 * function.
2283 2277 */
2284 2278 prev->contig_chain = kmem_cache_alloc(hio_cache, KM_SLEEP);
2285 2279 prev->contig_chain->bp = tio->bp;
2286 2280 prev->contig_chain->sema = tio->sema;
2287 2281 tio = prev->contig_chain;
2288 2282 tio->contig_chain = NULL;
2289 2283 soffset = ldbtob(fio->bp->b_lblkno);
2290 2284 nio = fio;
2291 2285
2292 2286 bioret = biowait(nbuf);
2293 2287 data = bsize - nbuf->b_resid;
2294 2288 biofini(nbuf);
2295 2289 mutex_exit(&hqueue->strategy_lock);
2296 2290
2297 2291 /*
2298 2292 * We use the b_resid parameter to detect how much
2299 2293 * data was succesfully transferred. We will signal
2300 2294 * a success to all the fully retrieved actual bufs
2301 2295 * before coalescing, rest is signaled as error,
2302 2296 * if any.
2303 2297 */
2304 2298 tio = nio;
2305 2299 DTRACE_PROBE3(hsfs_coalesced_io_done, struct hio *, nio,
2306 2300 int, bioret, size_t, data);
2307 2301
2308 2302 /*
2309 2303 * Copy data and signal success to all the bufs
2310 2304 * which can be fully satisfied from b_resid.
2311 2305 */
2312 2306 while (nio != NULL && data >= nio->bp->b_bcount) {
2313 2307 offset = ldbtob(nio->bp->b_lblkno) - soffset;
2314 2308 bcopy(iodata + offset, nio->bp->b_un.b_addr,
2315 2309 nio->bp->b_bcount);
2316 2310 data -= nio->bp->b_bcount;
2317 2311 bioerror(nio->bp, 0);
2318 2312 biodone(nio->bp);
2319 2313 sema_v(nio->sema);
2320 2314 tio = nio;
2321 2315 nio = nio->contig_chain;
2322 2316 kmem_cache_free(hio_cache, tio);
2323 2317 }
2324 2318
2325 2319 /*
2326 2320 * Signal error to all the leftover bufs (if any)
2327 2321 * after b_resid data is exhausted.
2328 2322 */
2329 2323 while (nio != NULL) {
2330 2324 nio->bp->b_resid = nio->bp->b_bcount - data;
2331 2325 bzero(nio->bp->b_un.b_addr + data, nio->bp->b_resid);
2332 2326 bioerror(nio->bp, bioret);
2333 2327 biodone(nio->bp);
2334 2328 sema_v(nio->sema);
2335 2329 tio = nio;
2336 2330 nio = nio->contig_chain;
2337 2331 kmem_cache_free(hio_cache, tio);
2338 2332 data = 0;
2339 2333 }
2340 2334 kmem_free(iodata, bsize);
2341 2335 } else {
2342 2336
2343 2337 nbuf = tio->bp;
2344 2338 io_done = tio->sema;
2345 2339 nio = fio;
2346 2340 last = tio;
2347 2341
2348 2342 while (nio != NULL) {
2349 2343 (void) bdev_strategy(nio->bp);
2350 2344 nio = nio->contig_chain;
2351 2345 }
2352 2346 nio = fio;
2353 2347 mutex_exit(&hqueue->strategy_lock);
2354 2348
2355 2349 while (nio != NULL) {
2356 2350 if (nio == last) {
2357 2351 (void) biowait(nbuf);
2358 2352 sema_v(io_done);
2359 2353 break;
2360 2354 /* sentinel last not freed. See above. */
2361 2355 } else {
2362 2356 (void) biowait(nio->bp);
2363 2357 sema_v(nio->sema);
2364 2358 }
2365 2359 tio = nio;
2366 2360 nio = nio->contig_chain;
2367 2361 kmem_cache_free(hio_cache, tio);
2368 2362 }
2369 2363 }
2370 2364 return (0);
2371 2365 }
2372 2366
2373 2367 /*
2374 2368 * Insert an I/O request in the I/O scheduler's pipeline
2375 2369 * Using AVL tree makes it easy to reorder the I/O request
2376 2370 * based on logical block number.
2377 2371 */
2378 2372 static void
2379 2373 hsched_enqueue_io(struct hsfs *fsp, struct hio *hsio, int ra)
2380 2374 {
2381 2375 struct hsfs_queue *hqueue = fsp->hqueue;
2382 2376
2383 2377 mutex_enter(&hqueue->hsfs_queue_lock);
2384 2378
2385 2379 fsp->physical_read_bytes += hsio->bp->b_bcount;
2386 2380 if (ra)
2387 2381 fsp->readahead_bytes += hsio->bp->b_bcount;
2388 2382
2389 2383 avl_add(&hqueue->deadline_tree, hsio);
2390 2384 avl_add(&hqueue->read_tree, hsio);
2391 2385
2392 2386 DTRACE_PROBE3(hsfs_io_enqueued, struct hio *, hsio,
2393 2387 struct hsfs_queue *, hqueue, int, ra);
2394 2388
2395 2389 mutex_exit(&hqueue->hsfs_queue_lock);
2396 2390 }
2397 2391
2398 2392 /* ARGSUSED */
2399 2393 static int
2400 2394 hsfs_pathconf(struct vnode *vp,
2401 2395 int cmd,
2402 2396 ulong_t *valp,
2403 2397 struct cred *cr,
2404 2398 caller_context_t *ct)
2405 2399 {
2406 2400 struct hsfs *fsp;
2407 2401
2408 2402 int error = 0;
2409 2403
2410 2404 switch (cmd) {
2411 2405
2412 2406 case _PC_NAME_MAX:
2413 2407 fsp = VFS_TO_HSFS(vp->v_vfsp);
2414 2408 *valp = fsp->hsfs_namemax;
2415 2409 break;
2416 2410
2417 2411 case _PC_FILESIZEBITS:
2418 2412 *valp = 33; /* Without multi extent support: 4 GB - 2k */
2419 2413 break;
2420 2414
2421 2415 case _PC_TIMESTAMP_RESOLUTION:
2422 2416 /*
2423 2417 * HSFS keeps, at best, 1/100 second timestamp resolution.
2424 2418 */
2425 2419 *valp = 10000000L;
2426 2420 break;
2427 2421
2428 2422 default:
2429 2423 error = fs_pathconf(vp, cmd, valp, cr, ct);
2430 2424 break;
2431 2425 }
2432 2426
2433 2427 return (error);
2434 2428 }
2435 2429
2436 2430
2437 2431
2438 2432 const fs_operation_def_t hsfs_vnodeops_template[] = {
2439 2433 VOPNAME_OPEN, { .vop_open = hsfs_open },
2440 2434 VOPNAME_CLOSE, { .vop_close = hsfs_close },
2441 2435 VOPNAME_READ, { .vop_read = hsfs_read },
2442 2436 VOPNAME_GETATTR, { .vop_getattr = hsfs_getattr },
2443 2437 VOPNAME_ACCESS, { .vop_access = hsfs_access },
2444 2438 VOPNAME_LOOKUP, { .vop_lookup = hsfs_lookup },
2445 2439 VOPNAME_READDIR, { .vop_readdir = hsfs_readdir },
2446 2440 VOPNAME_READLINK, { .vop_readlink = hsfs_readlink },
2447 2441 VOPNAME_FSYNC, { .vop_fsync = hsfs_fsync },
2448 2442 VOPNAME_INACTIVE, { .vop_inactive = hsfs_inactive },
2449 2443 VOPNAME_FID, { .vop_fid = hsfs_fid },
2450 2444 VOPNAME_SEEK, { .vop_seek = hsfs_seek },
2451 2445 VOPNAME_FRLOCK, { .vop_frlock = hsfs_frlock },
2452 2446 VOPNAME_GETPAGE, { .vop_getpage = hsfs_getpage },
2453 2447 VOPNAME_PUTPAGE, { .vop_putpage = hsfs_putpage },
2454 2448 VOPNAME_MAP, { .vop_map = hsfs_map },
2455 2449 VOPNAME_ADDMAP, { .vop_addmap = hsfs_addmap },
2456 2450 VOPNAME_DELMAP, { .vop_delmap = hsfs_delmap },
2457 2451 VOPNAME_PATHCONF, { .vop_pathconf = hsfs_pathconf },
2458 2452 NULL, NULL
2459 2453 };
2460 2454
2461 2455 struct vnodeops *hsfs_vnodeops;
↓ open down ↓ |
788 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX