1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 /*
  28  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  29  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  30  */
  31 
  32 #include <sys/types.h>
  33 #include <sys/param.h>
  34 #include <sys/t_lock.h>
  35 #include <sys/systm.h>
  36 #include <sys/sysmacros.h>
  37 #include <sys/user.h>
  38 #include <sys/time.h>
  39 #include <sys/vfs.h>
  40 #include <sys/vfs_opreg.h>
  41 #include <sys/vnode.h>
  42 #include <sys/file.h>
  43 #include <sys/fcntl.h>
  44 #include <sys/flock.h>
  45 #include <sys/kmem.h>
  46 #include <sys/uio.h>
  47 #include <sys/errno.h>
  48 #include <sys/stat.h>
  49 #include <sys/cred.h>
  50 #include <sys/dirent.h>
  51 #include <sys/pathname.h>
  52 #include <sys/vmsystm.h>
  53 #include <sys/fs/tmp.h>
  54 #include <sys/fs/tmpnode.h>
  55 #include <sys/mman.h>
  56 #include <vm/hat.h>
  57 #include <vm/seg_vn.h>
  58 #include <vm/seg_map.h>
  59 #include <vm/seg.h>
  60 #include <vm/anon.h>
  61 #include <vm/as.h>
  62 #include <vm/page.h>
  63 #include <vm/pvn.h>
  64 #include <sys/cmn_err.h>
  65 #include <sys/debug.h>
  66 #include <sys/swap.h>
  67 #include <sys/buf.h>
  68 #include <sys/vm.h>
  69 #include <sys/vtrace.h>
  70 #include <sys/policy.h>
  71 #include <fs/fs_subr.h>
  72 
  73 static int      tmp_getapage(struct vnode *, u_offset_t, size_t, uint_t *,
  74         page_t **, size_t, struct seg *, caddr_t, enum seg_rw, struct cred *);
  75 static int      tmp_putapage(struct vnode *, page_t *, u_offset_t *, size_t *,
  76         int, struct cred *);
  77 
  78 /* ARGSUSED1 */
  79 static int
  80 tmp_open(struct vnode **vpp, int flag, struct cred *cred, caller_context_t *ct)
  81 {
  82         /*
  83          * swapon to a tmpfs file is not supported so access
  84          * is denied on open if VISSWAP is set.
  85          */
  86         if ((*vpp)->v_flag & VISSWAP)
  87                 return (EINVAL);
  88         return (0);
  89 }
  90 
  91 /* ARGSUSED1 */
  92 static int
  93 tmp_close(
  94         struct vnode *vp,
  95         int flag,
  96         int count,
  97         offset_t offset,
  98         struct cred *cred,
  99         caller_context_t *ct)
 100 {
 101         cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
 102         cleanshares(vp, ttoproc(curthread)->p_pid);
 103         return (0);
 104 }
 105 
 106 /*
 107  * wrtmp does the real work of write requests for tmpfs.
 108  */
 109 static int
 110 wrtmp(
 111         struct tmount *tm,
 112         struct tmpnode *tp,
 113         struct uio *uio,
 114         struct cred *cr,
 115         struct caller_context *ct)
 116 {
 117         pgcnt_t pageoffset;     /* offset in pages */
 118         ulong_t segmap_offset;  /* pagesize byte offset into segmap */
 119         caddr_t base;           /* base of segmap */
 120         ssize_t bytes;          /* bytes to uiomove */
 121         pfn_t pagenumber;       /* offset in pages into tmp file */
 122         struct vnode *vp;
 123         int error = 0;
 124         int     pagecreate;     /* == 1 if we allocated a page */
 125         int     newpage;
 126         rlim64_t limit = uio->uio_llimit;
 127         long oresid = uio->uio_resid;
 128         timestruc_t now;
 129 
 130         long tn_size_changed = 0;
 131         long old_tn_size;
 132         long new_tn_size;
 133 
 134         vp = TNTOV(tp);
 135         ASSERT(vp->v_type == VREG);
 136 
 137         TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START,
 138             "tmp_wrtmp_start:vp %p", vp);
 139 
 140         ASSERT(RW_WRITE_HELD(&tp->tn_contents));
 141         ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
 142 
 143         if (MANDLOCK(vp, tp->tn_mode)) {
 144                 rw_exit(&tp->tn_contents);
 145                 /*
 146                  * tmp_getattr ends up being called by chklock
 147                  */
 148                 error = chklock(vp, FWRITE, uio->uio_loffset, uio->uio_resid,
 149                     uio->uio_fmode, ct);
 150                 rw_enter(&tp->tn_contents, RW_WRITER);
 151                 if (error != 0) {
 152                         TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 153                             "tmp_wrtmp_end:vp %p error %d", vp, error);
 154                         return (error);
 155                 }
 156         }
 157 
 158         if (uio->uio_loffset < 0)
 159                 return (EINVAL);
 160 
 161         if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
 162                 limit = MAXOFFSET_T;
 163 
 164         if (uio->uio_loffset >= limit) {
 165                 proc_t *p = ttoproc(curthread);
 166 
 167                 mutex_enter(&p->p_lock);
 168                 (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
 169                     p, RCA_UNSAFE_SIGINFO);
 170                 mutex_exit(&p->p_lock);
 171                 return (EFBIG);
 172         }
 173 
 174         if (uio->uio_loffset >= MAXOFF_T) {
 175                 TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 176                     "tmp_wrtmp_end:vp %p error %d", vp, EINVAL);
 177                 return (EFBIG);
 178         }
 179 
 180         if (uio->uio_resid == 0) {
 181                 TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 182                     "tmp_wrtmp_end:vp %p error %d", vp, 0);
 183                 return (0);
 184         }
 185 
 186         if (limit > MAXOFF_T)
 187                 limit = MAXOFF_T;
 188 
 189         do {
 190                 long    offset;
 191                 long    delta;
 192 
 193                 offset = (long)uio->uio_offset;
 194                 pageoffset = offset & PAGEOFFSET;
 195                 /*
 196                  * A maximum of PAGESIZE bytes of data is transferred
 197                  * each pass through this loop
 198                  */
 199                 bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
 200 
 201                 if (offset + bytes >= limit) {
 202                         if (offset >= limit) {
 203                                 error = EFBIG;
 204                                 goto out;
 205                         }
 206                         bytes = limit - offset;
 207                 }
 208                 pagenumber = btop(offset);
 209 
 210                 /*
 211                  * delta is the amount of anonymous memory
 212                  * to reserve for the file.
 213                  * We always reserve in pagesize increments so
 214                  * unless we're extending the file into a new page,
 215                  * we don't need to call tmp_resv.
 216                  */
 217                 delta = offset + bytes -
 218                     P2ROUNDUP_TYPED(tp->tn_size, PAGESIZE, u_offset_t);
 219                 if (delta > 0) {
 220                         pagecreate = 1;
 221                         if (tmp_resv(tm, tp, delta, pagecreate)) {
 222                                 /*
 223                                  * Log file system full in the zone that owns
 224                                  * the tmpfs mount, as well as in the global
 225                                  * zone if necessary.
 226                                  */
 227                                 zcmn_err(tm->tm_vfsp->vfs_zone->zone_id,
 228                                     CE_WARN, "%s: File system full, "
 229                                     "swap space limit exceeded",
 230                                     tm->tm_mntpath);
 231 
 232                                 if (tm->tm_vfsp->vfs_zone->zone_id !=
 233                                     GLOBAL_ZONEID) {
 234 
 235                                         vfs_t *vfs = tm->tm_vfsp;
 236 
 237                                         zcmn_err(GLOBAL_ZONEID,
 238                                             CE_WARN, "%s: File system full, "
 239                                             "swap space limit exceeded",
 240                                             vfs->vfs_vnodecovered->v_path);
 241                                 }
 242                                 error = ENOSPC;
 243                                 break;
 244                         }
 245                         tmpnode_growmap(tp, (ulong_t)offset + bytes);
 246                 }
 247                 /* grow the file to the new length */
 248                 if (offset + bytes > tp->tn_size) {
 249                         tn_size_changed = 1;
 250                         old_tn_size = tp->tn_size;
 251                         /*
 252                          * Postpone updating tp->tn_size until uiomove() is
 253                          * done.
 254                          */
 255                         new_tn_size = offset + bytes;
 256                 }
 257                 if (bytes == PAGESIZE) {
 258                         /*
 259                          * Writing whole page so reading from disk
 260                          * is a waste
 261                          */
 262                         pagecreate = 1;
 263                 } else {
 264                         pagecreate = 0;
 265                 }
 266                 /*
 267                  * If writing past EOF or filling in a hole
 268                  * we need to allocate an anon slot.
 269                  */
 270                 if (anon_get_ptr(tp->tn_anon, pagenumber) == NULL) {
 271                         (void) anon_set_ptr(tp->tn_anon, pagenumber,
 272                             anon_alloc(vp, ptob(pagenumber)), ANON_SLEEP);
 273                         pagecreate = 1;
 274                         tp->tn_nblocks++;
 275                 }
 276 
 277                 /*
 278                  * We have to drop the contents lock to allow the VM
 279                  * system to reacquire it in tmp_getpage()
 280                  */
 281                 rw_exit(&tp->tn_contents);
 282 
 283                 /*
 284                  * Touch the page and fault it in if it is not in core
 285                  * before segmap_getmapflt or vpm_data_copy can lock it.
 286                  * This is to avoid the deadlock if the buffer is mapped
 287                  * to the same file through mmap which we want to write.
 288                  */
 289                 uio_prefaultpages((long)bytes, uio);
 290 
 291                 newpage = 0;
 292                 if (vpm_enable) {
 293                         /*
 294                          * Copy data. If new pages are created, part of
 295                          * the page that is not written will be initizliazed
 296                          * with zeros.
 297                          */
 298                         error = vpm_data_copy(vp, offset, bytes, uio,
 299                             !pagecreate, &newpage, 1, S_WRITE);
 300                 } else {
 301                         /* Get offset within the segmap mapping */
 302                         segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
 303                         base = segmap_getmapflt(segkmap, vp,
 304                             (offset &  MAXBMASK), PAGESIZE, !pagecreate,
 305                             S_WRITE);
 306                 }
 307 
 308 
 309                 if (!vpm_enable && pagecreate) {
 310                         /*
 311                          * segmap_pagecreate() returns 1 if it calls
 312                          * page_create_va() to allocate any pages.
 313                          */
 314                         newpage = segmap_pagecreate(segkmap,
 315                             base + segmap_offset, (size_t)PAGESIZE, 0);
 316                         /*
 317                          * Clear from the beginning of the page to the starting
 318                          * offset of the data.
 319                          */
 320                         if (pageoffset != 0)
 321                                 (void) kzero(base + segmap_offset,
 322                                     (size_t)pageoffset);
 323                 }
 324 
 325                 if (!vpm_enable) {
 326                         error = uiomove(base + segmap_offset + pageoffset,
 327                             (long)bytes, UIO_WRITE, uio);
 328                 }
 329 
 330                 if (!vpm_enable && pagecreate &&
 331                     uio->uio_offset < P2ROUNDUP(offset + bytes, PAGESIZE)) {
 332                         long    zoffset; /* zero from offset into page */
 333                         /*
 334                          * We created pages w/o initializing them completely,
 335                          * thus we need to zero the part that wasn't set up.
 336                          * This happens on most EOF write cases and if
 337                          * we had some sort of error during the uiomove.
 338                          */
 339                         long nmoved;
 340 
 341                         nmoved = uio->uio_offset - offset;
 342                         ASSERT((nmoved + pageoffset) <= PAGESIZE);
 343 
 344                         /*
 345                          * Zero from the end of data in the page to the
 346                          * end of the page.
 347                          */
 348                         if ((zoffset = pageoffset + nmoved) < PAGESIZE)
 349                                 (void) kzero(base + segmap_offset + zoffset,
 350                                     (size_t)PAGESIZE - zoffset);
 351                 }
 352 
 353                 /*
 354                  * Unlock the pages which have been allocated by
 355                  * page_create_va() in segmap_pagecreate()
 356                  */
 357                 if (!vpm_enable && newpage) {
 358                         segmap_pageunlock(segkmap, base + segmap_offset,
 359                             (size_t)PAGESIZE, S_WRITE);
 360                 }
 361 
 362                 if (error) {
 363                         /*
 364                          * If we failed on a write, we must
 365                          * be sure to invalidate any pages that may have
 366                          * been allocated.
 367                          */
 368                         if (vpm_enable) {
 369                                 (void) vpm_sync_pages(vp, offset, PAGESIZE,
 370                                     SM_INVAL);
 371                         } else {
 372                                 (void) segmap_release(segkmap, base, SM_INVAL);
 373                         }
 374                 } else {
 375                         if (vpm_enable) {
 376                                 error = vpm_sync_pages(vp, offset, PAGESIZE,
 377                                     0);
 378                         } else {
 379                                 error = segmap_release(segkmap, base, 0);
 380                         }
 381                 }
 382 
 383                 /*
 384                  * Re-acquire contents lock.
 385                  */
 386                 rw_enter(&tp->tn_contents, RW_WRITER);
 387 
 388                 /*
 389                  * Update tn_size.
 390                  */
 391                 if (tn_size_changed)
 392                         tp->tn_size = new_tn_size;
 393 
 394                 /*
 395                  * If the uiomove failed, fix up tn_size.
 396                  */
 397                 if (error) {
 398                         if (tn_size_changed) {
 399                                 /*
 400                                  * The uiomove failed, and we
 401                                  * allocated blocks,so get rid
 402                                  * of them.
 403                                  */
 404                                 (void) tmpnode_trunc(tm, tp,
 405                                     (ulong_t)old_tn_size);
 406                         }
 407                 } else {
 408                         /*
 409                          * XXX - Can this be out of the loop?
 410                          */
 411                         if ((tp->tn_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) &&
 412                             (tp->tn_mode & (S_ISUID | S_ISGID)) &&
 413                             secpolicy_vnode_setid_retain(cr,
 414                             (tp->tn_mode & S_ISUID) != 0 && tp->tn_uid == 0)) {
 415                                 /*
 416                                  * Clear Set-UID & Set-GID bits on
 417                                  * successful write if not privileged
 418                                  * and at least one of the execute bits
 419                                  * is set.  If we always clear Set-GID,
 420                                  * mandatory file and record locking is
 421                                  * unuseable.
 422                                  */
 423                                 tp->tn_mode &= ~(S_ISUID | S_ISGID);
 424                         }
 425                         gethrestime(&now);
 426                         tp->tn_mtime = now;
 427                         tp->tn_ctime = now;
 428                 }
 429         } while (error == 0 && uio->uio_resid > 0 && bytes != 0);
 430 
 431 out:
 432         /*
 433          * If we've already done a partial-write, terminate
 434          * the write but return no error.
 435          */
 436         if (oresid != uio->uio_resid)
 437                 error = 0;
 438         TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 439             "tmp_wrtmp_end:vp %p error %d", vp, error);
 440         return (error);
 441 }
 442 
 443 /*
 444  * rdtmp does the real work of read requests for tmpfs.
 445  */
 446 static int
 447 rdtmp(
 448         struct tmount *tm,
 449         struct tmpnode *tp,
 450         struct uio *uio,
 451         struct caller_context *ct)
 452 {
 453         ulong_t pageoffset;     /* offset in tmpfs file (uio_offset) */
 454         ulong_t segmap_offset;  /* pagesize byte offset into segmap */
 455         caddr_t base;           /* base of segmap */
 456         ssize_t bytes;          /* bytes to uiomove */
 457         struct vnode *vp;
 458         int error;
 459         long oresid = uio->uio_resid;
 460 
 461 #if defined(lint)
 462         tm = tm;
 463 #endif
 464         vp = TNTOV(tp);
 465 
 466         TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START, "tmp_rdtmp_start:vp %p",
 467             vp);
 468 
 469         ASSERT(RW_LOCK_HELD(&tp->tn_contents));
 470 
 471         if (MANDLOCK(vp, tp->tn_mode)) {
 472                 rw_exit(&tp->tn_contents);
 473                 /*
 474                  * tmp_getattr ends up being called by chklock
 475                  */
 476                 error = chklock(vp, FREAD, uio->uio_loffset, uio->uio_resid,
 477                     uio->uio_fmode, ct);
 478                 rw_enter(&tp->tn_contents, RW_READER);
 479                 if (error != 0) {
 480                         TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 481                             "tmp_rdtmp_end:vp %p error %d", vp, error);
 482                         return (error);
 483                 }
 484         }
 485         ASSERT(tp->tn_type == VREG);
 486 
 487         if (uio->uio_loffset >= MAXOFF_T) {
 488                 TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 489                     "tmp_rdtmp_end:vp %p error %d", vp, EINVAL);
 490                 return (0);
 491         }
 492         if (uio->uio_loffset < 0)
 493                 return (EINVAL);
 494         if (uio->uio_resid == 0) {
 495                 TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 496                     "tmp_rdtmp_end:vp %p error %d", vp, 0);
 497                 return (0);
 498         }
 499 
 500         vp = TNTOV(tp);
 501 
 502         do {
 503                 long diff;
 504                 long offset;
 505 
 506                 offset = uio->uio_offset;
 507                 pageoffset = offset & PAGEOFFSET;
 508                 bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
 509 
 510                 diff = tp->tn_size - offset;
 511 
 512                 if (diff <= 0) {
 513                         error = 0;
 514                         goto out;
 515                 }
 516                 if (diff < bytes)
 517                         bytes = diff;
 518 
 519                 /*
 520                  * We have to drop the contents lock to allow the VM system
 521                  * to reacquire it in tmp_getpage() should the uiomove cause a
 522                  * pagefault.
 523                  */
 524                 rw_exit(&tp->tn_contents);
 525 
 526                 if (vpm_enable) {
 527                         /*
 528                          * Copy data.
 529                          */
 530                         error = vpm_data_copy(vp, offset, bytes, uio, 1, NULL,
 531                             0, S_READ);
 532                 } else {
 533                         segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
 534                         base = segmap_getmapflt(segkmap, vp, offset & MAXBMASK,
 535                             bytes, 1, S_READ);
 536 
 537                         error = uiomove(base + segmap_offset + pageoffset,
 538                             (long)bytes, UIO_READ, uio);
 539                 }
 540 
 541                 if (error) {
 542                         if (vpm_enable) {
 543                                 (void) vpm_sync_pages(vp, offset, PAGESIZE, 0);
 544                         } else {
 545                                 (void) segmap_release(segkmap, base, 0);
 546                         }
 547                 } else {
 548                         if (vpm_enable) {
 549                                 error = vpm_sync_pages(vp, offset, PAGESIZE,
 550                                     0);
 551                         } else {
 552                                 error = segmap_release(segkmap, base, 0);
 553                         }
 554                 }
 555 
 556                 /*
 557                  * Re-acquire contents lock.
 558                  */
 559                 rw_enter(&tp->tn_contents, RW_READER);
 560 
 561         } while (error == 0 && uio->uio_resid > 0);
 562 
 563 out:
 564         gethrestime(&tp->tn_atime);
 565 
 566         /*
 567          * If we've already done a partial read, terminate
 568          * the read but return no error.
 569          */
 570         if (oresid != uio->uio_resid)
 571                 error = 0;
 572 
 573         TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 574             "tmp_rdtmp_end:vp %x error %d", vp, error);
 575         return (error);
 576 }
 577 
 578 /* ARGSUSED2 */
 579 static int
 580 tmp_read(struct vnode *vp, struct uio *uiop, int ioflag, cred_t *cred,
 581     struct caller_context *ct)
 582 {
 583         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 584         struct tmount *tm = (struct tmount *)VTOTM(vp);
 585         int error;
 586 
 587         /*
 588          * We don't currently support reading non-regular files
 589          */
 590         if (vp->v_type == VDIR)
 591                 return (EISDIR);
 592         if (vp->v_type != VREG)
 593                 return (EINVAL);
 594         /*
 595          * tmp_rwlock should have already been called from layers above
 596          */
 597         ASSERT(RW_READ_HELD(&tp->tn_rwlock));
 598 
 599         rw_enter(&tp->tn_contents, RW_READER);
 600 
 601         error = rdtmp(tm, tp, uiop, ct);
 602 
 603         rw_exit(&tp->tn_contents);
 604 
 605         return (error);
 606 }
 607 
 608 static int
 609 tmp_write(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
 610     struct caller_context *ct)
 611 {
 612         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 613         struct tmount *tm = (struct tmount *)VTOTM(vp);
 614         int error;
 615 
 616         /*
 617          * We don't currently support writing to non-regular files
 618          */
 619         if (vp->v_type != VREG)
 620                 return (EINVAL);        /* XXX EISDIR? */
 621 
 622         /*
 623          * tmp_rwlock should have already been called from layers above
 624          */
 625         ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
 626 
 627         rw_enter(&tp->tn_contents, RW_WRITER);
 628 
 629         if (ioflag & FAPPEND) {
 630                 /*
 631                  * In append mode start at end of file.
 632                  */
 633                 uiop->uio_loffset = tp->tn_size;
 634         }
 635 
 636         error = wrtmp(tm, tp, uiop, cred, ct);
 637 
 638         rw_exit(&tp->tn_contents);
 639 
 640         return (error);
 641 }
 642 
 643 /* ARGSUSED */
 644 static int
 645 tmp_ioctl(
 646         struct vnode *vp,
 647         int com,
 648         intptr_t data,
 649         int flag,
 650         struct cred *cred,
 651         int *rvalp,
 652         caller_context_t *ct)
 653 {
 654         return (ENOTTY);
 655 }
 656 
 657 /* ARGSUSED2 */
 658 static int
 659 tmp_getattr(
 660         struct vnode *vp,
 661         struct vattr *vap,
 662         int flags,
 663         struct cred *cred,
 664         caller_context_t *ct)
 665 {
 666         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 667         struct vnode *mvp;
 668         struct vattr va;
 669         int attrs = 1;
 670 
 671         /*
 672          * A special case to handle the root tnode on a diskless nfs
 673          * client who may have had its uid and gid inherited
 674          * from an nfs vnode with nobody ownership.  Likely the
 675          * root filesystem. After nfs is fully functional the uid/gid
 676          * may be mapable so ask again.
 677          * vfsp can't get unmounted because we hold vp.
 678          */
 679         if (vp->v_flag & VROOT &&
 680             (mvp = vp->v_vfsp->vfs_vnodecovered) != NULL) {
 681                 mutex_enter(&tp->tn_tlock);
 682                 if (tp->tn_uid == UID_NOBODY || tp->tn_gid == GID_NOBODY) {
 683                         mutex_exit(&tp->tn_tlock);
 684                         bzero(&va, sizeof (struct vattr));
 685                         va.va_mask = AT_UID|AT_GID;
 686                         attrs = VOP_GETATTR(mvp, &va, 0, cred, ct);
 687                 } else {
 688                         mutex_exit(&tp->tn_tlock);
 689                 }
 690         }
 691         mutex_enter(&tp->tn_tlock);
 692         if (attrs == 0) {
 693                 tp->tn_uid = va.va_uid;
 694                 tp->tn_gid = va.va_gid;
 695         }
 696         vap->va_type = vp->v_type;
 697         vap->va_mode = tp->tn_mode & MODEMASK;
 698         vap->va_uid = tp->tn_uid;
 699         vap->va_gid = tp->tn_gid;
 700         vap->va_fsid = tp->tn_fsid;
 701         vap->va_nodeid = (ino64_t)tp->tn_nodeid;
 702         vap->va_nlink = tp->tn_nlink;
 703         vap->va_size = (u_offset_t)tp->tn_size;
 704         vap->va_atime = tp->tn_atime;
 705         vap->va_mtime = tp->tn_mtime;
 706         vap->va_ctime = tp->tn_ctime;
 707         vap->va_blksize = PAGESIZE;
 708         vap->va_rdev = tp->tn_rdev;
 709         vap->va_seq = tp->tn_seq;
 710 
 711         /*
 712          * XXX Holes are not taken into account.  We could take the time to
 713          * run through the anon array looking for allocated slots...
 714          */
 715         vap->va_nblocks = (fsblkcnt64_t)btodb(ptob(btopr(vap->va_size)));
 716         mutex_exit(&tp->tn_tlock);
 717         return (0);
 718 }
 719 
 720 /*ARGSUSED4*/
 721 static int
 722 tmp_setattr(
 723         struct vnode *vp,
 724         struct vattr *vap,
 725         int flags,
 726         struct cred *cred,
 727         caller_context_t *ct)
 728 {
 729         struct tmount *tm = (struct tmount *)VTOTM(vp);
 730         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 731         int error = 0;
 732         struct vattr *get;
 733         long mask;
 734 
 735         /*
 736          * Cannot set these attributes
 737          */
 738         if ((vap->va_mask & AT_NOSET) || (vap->va_mask & AT_XVATTR))
 739                 return (EINVAL);
 740 
 741         mutex_enter(&tp->tn_tlock);
 742 
 743         get = &tp->tn_attr;
 744         /*
 745          * Change file access modes. Must be owner or have sufficient
 746          * privileges.
 747          */
 748         error = secpolicy_vnode_setattr(cred, vp, vap, get, flags, tmp_taccess,
 749             tp);
 750 
 751         if (error)
 752                 goto out;
 753 
 754         mask = vap->va_mask;
 755 
 756         if (mask & AT_MODE) {
 757                 get->va_mode &= S_IFMT;
 758                 get->va_mode |= vap->va_mode & ~S_IFMT;
 759         }
 760 
 761         if (mask & AT_UID)
 762                 get->va_uid = vap->va_uid;
 763         if (mask & AT_GID)
 764                 get->va_gid = vap->va_gid;
 765         if (mask & AT_ATIME)
 766                 get->va_atime = vap->va_atime;
 767         if (mask & AT_MTIME)
 768                 get->va_mtime = vap->va_mtime;
 769 
 770         if (mask & (AT_UID | AT_GID | AT_MODE | AT_MTIME))
 771                 gethrestime(&tp->tn_ctime);
 772 
 773         if (mask & AT_SIZE) {
 774                 ASSERT(vp->v_type != VDIR);
 775 
 776                 /* Don't support large files. */
 777                 if (vap->va_size > MAXOFF_T) {
 778                         error = EFBIG;
 779                         goto out;
 780                 }
 781                 mutex_exit(&tp->tn_tlock);
 782 
 783                 rw_enter(&tp->tn_rwlock, RW_WRITER);
 784                 rw_enter(&tp->tn_contents, RW_WRITER);
 785                 error = tmpnode_trunc(tm, tp, (ulong_t)vap->va_size);
 786                 rw_exit(&tp->tn_contents);
 787                 rw_exit(&tp->tn_rwlock);
 788 
 789                 if (error == 0 && vap->va_size == 0)
 790                         vnevent_truncate(vp, ct);
 791 
 792                 goto out1;
 793         }
 794 out:
 795         mutex_exit(&tp->tn_tlock);
 796 out1:
 797         return (error);
 798 }
 799 
 800 /* ARGSUSED2 */
 801 static int
 802 tmp_access(
 803         struct vnode *vp,
 804         int mode,
 805         int flags,
 806         struct cred *cred,
 807         caller_context_t *ct)
 808 {
 809         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 810         int error;
 811 
 812         mutex_enter(&tp->tn_tlock);
 813         error = tmp_taccess(tp, mode, cred);
 814         mutex_exit(&tp->tn_tlock);
 815         return (error);
 816 }
 817 
 818 /* ARGSUSED3 */
 819 static int
 820 tmp_lookup(
 821         struct vnode *dvp,
 822         char *nm,
 823         struct vnode **vpp,
 824         struct pathname *pnp,
 825         int flags,
 826         struct vnode *rdir,
 827         struct cred *cred,
 828         caller_context_t *ct,
 829         int *direntflags,
 830         pathname_t *realpnp)
 831 {
 832         struct tmpnode *tp = (struct tmpnode *)VTOTN(dvp);
 833         struct tmpnode *ntp = NULL;
 834         int error;
 835 
 836 
 837         /* allow cd into @ dir */
 838         if (flags & LOOKUP_XATTR) {
 839                 struct tmpnode *xdp;
 840                 struct tmount *tm;
 841 
 842                 /*
 843                  * don't allow attributes if not mounted XATTR support
 844                  */
 845                 if (!(dvp->v_vfsp->vfs_flag & VFS_XATTR))
 846                         return (EINVAL);
 847 
 848                 if (tp->tn_flags & ISXATTR)
 849                         /* No attributes on attributes */
 850                         return (EINVAL);
 851 
 852                 rw_enter(&tp->tn_rwlock, RW_WRITER);
 853                 if (tp->tn_xattrdp == NULL) {
 854                         if (!(flags & CREATE_XATTR_DIR)) {
 855                                 rw_exit(&tp->tn_rwlock);
 856                                 return (ENOENT);
 857                         }
 858 
 859                         /*
 860                          * No attribute directory exists for this
 861                          * node - create the attr dir as a side effect
 862                          * of this lookup.
 863                          */
 864 
 865                         /*
 866                          * Make sure we have adequate permission...
 867                          */
 868 
 869                         if ((error = tmp_taccess(tp, VWRITE, cred)) != 0) {
 870                                 rw_exit(&tp->tn_rwlock);
 871                                 return (error);
 872                         }
 873 
 874                         xdp = tmp_memalloc(sizeof (struct tmpnode),
 875                             TMP_MUSTHAVE);
 876                         tm = VTOTM(dvp);
 877                         tmpnode_init(tm, xdp, &tp->tn_attr, NULL);
 878                         /*
 879                          * Fix-up fields unique to attribute directories.
 880                          */
 881                         xdp->tn_flags = ISXATTR;
 882                         xdp->tn_type = VDIR;
 883                         if (tp->tn_type == VDIR) {
 884                                 xdp->tn_mode = tp->tn_attr.va_mode;
 885                         } else {
 886                                 xdp->tn_mode = 0700;
 887                                 if (tp->tn_attr.va_mode & 0040)
 888                                         xdp->tn_mode |= 0750;
 889                                 if (tp->tn_attr.va_mode & 0004)
 890                                         xdp->tn_mode |= 0705;
 891                         }
 892                         xdp->tn_vnode->v_type = VDIR;
 893                         xdp->tn_vnode->v_flag |= V_XATTRDIR;
 894                         tdirinit(tp, xdp);
 895                         tp->tn_xattrdp = xdp;
 896                 } else {
 897                         VN_HOLD(tp->tn_xattrdp->tn_vnode);
 898                 }
 899                 *vpp = TNTOV(tp->tn_xattrdp);
 900                 rw_exit(&tp->tn_rwlock);
 901                 return (0);
 902         }
 903 
 904         /*
 905          * Null component name is a synonym for directory being searched.
 906          */
 907         if (*nm == '\0') {
 908                 VN_HOLD(dvp);
 909                 *vpp = dvp;
 910                 return (0);
 911         }
 912         ASSERT(tp);
 913 
 914         error = tdirlookup(tp, nm, &ntp, cred);
 915 
 916         if (error == 0) {
 917                 ASSERT(ntp);
 918                 *vpp = TNTOV(ntp);
 919                 /*
 920                  * If vnode is a device return special vnode instead
 921                  */
 922                 if (IS_DEVVP(*vpp)) {
 923                         struct vnode *newvp;
 924 
 925                         newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
 926                             cred);
 927                         VN_RELE(*vpp);
 928                         *vpp = newvp;
 929                 }
 930         }
 931         TRACE_4(TR_FAC_TMPFS, TR_TMPFS_LOOKUP,
 932             "tmpfs lookup:vp %p name %s vpp %p error %d",
 933             dvp, nm, vpp, error);
 934         return (error);
 935 }
 936 
 937 /*ARGSUSED7*/
 938 static int
 939 tmp_create(
 940         struct vnode *dvp,
 941         char *nm,
 942         struct vattr *vap,
 943         enum vcexcl exclusive,
 944         int mode,
 945         struct vnode **vpp,
 946         struct cred *cred,
 947         int flag,
 948         caller_context_t *ct,
 949         vsecattr_t *vsecp)
 950 {
 951         struct tmpnode *parent;
 952         struct tmount *tm;
 953         struct tmpnode *self;
 954         int error;
 955         struct tmpnode *oldtp;
 956 
 957 again:
 958         parent = (struct tmpnode *)VTOTN(dvp);
 959         tm = (struct tmount *)VTOTM(dvp);
 960         self = NULL;
 961         error = 0;
 962         oldtp = NULL;
 963 
 964         /* device files not allowed in ext. attr dirs */
 965         if ((parent->tn_flags & ISXATTR) &&
 966             (vap->va_type == VBLK || vap->va_type == VCHR ||
 967             vap->va_type == VFIFO || vap->va_type == VDOOR ||
 968             vap->va_type == VSOCK || vap->va_type == VPORT))
 969                         return (EINVAL);
 970 
 971         if (vap->va_type == VREG && (vap->va_mode & VSVTX)) {
 972                 /* Must be privileged to set sticky bit */
 973                 if (secpolicy_vnode_stky_modify(cred))
 974                         vap->va_mode &= ~VSVTX;
 975         } else if (vap->va_type == VNON) {
 976                 return (EINVAL);
 977         }
 978 
 979         /*
 980          * Null component name is a synonym for directory being searched.
 981          */
 982         if (*nm == '\0') {
 983                 VN_HOLD(dvp);
 984                 oldtp = parent;
 985         } else {
 986                 error = tdirlookup(parent, nm, &oldtp, cred);
 987         }
 988 
 989         if (error == 0) {       /* name found */
 990                 boolean_t trunc = B_FALSE;
 991 
 992                 ASSERT(oldtp);
 993 
 994                 rw_enter(&oldtp->tn_rwlock, RW_WRITER);
 995 
 996                 /*
 997                  * if create/read-only an existing
 998                  * directory, allow it
 999                  */
1000                 if (exclusive == EXCL)
1001                         error = EEXIST;
1002                 else if ((oldtp->tn_type == VDIR) && (mode & VWRITE))
1003                         error = EISDIR;
1004                 else {
1005                         error = tmp_taccess(oldtp, mode, cred);
1006                 }
1007 
1008                 if (error) {
1009                         rw_exit(&oldtp->tn_rwlock);
1010                         tmpnode_rele(oldtp);
1011                         return (error);
1012                 }
1013                 *vpp = TNTOV(oldtp);
1014                 if ((*vpp)->v_type == VREG && (vap->va_mask & AT_SIZE) &&
1015                     vap->va_size == 0) {
1016                         rw_enter(&oldtp->tn_contents, RW_WRITER);
1017                         (void) tmpnode_trunc(tm, oldtp, 0);
1018                         rw_exit(&oldtp->tn_contents);
1019                         trunc = B_TRUE;
1020                 }
1021                 rw_exit(&oldtp->tn_rwlock);
1022                 if (IS_DEVVP(*vpp)) {
1023                         struct vnode *newvp;
1024 
1025                         newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
1026                             cred);
1027                         VN_RELE(*vpp);
1028                         if (newvp == NULL) {
1029                                 return (ENOSYS);
1030                         }
1031                         *vpp = newvp;
1032                 }
1033 
1034                 if (trunc)
1035                         vnevent_create(*vpp, ct);
1036 
1037                 return (0);
1038         }
1039 
1040         if (error != ENOENT)
1041                 return (error);
1042 
1043         rw_enter(&parent->tn_rwlock, RW_WRITER);
1044         error = tdirenter(tm, parent, nm, DE_CREATE,
1045             (struct tmpnode *)NULL, (struct tmpnode *)NULL,
1046             vap, &self, cred, ct);
1047         rw_exit(&parent->tn_rwlock);
1048 
1049         if (error) {
1050                 if (self)
1051                         tmpnode_rele(self);
1052 
1053                 if (error == EEXIST) {
1054                         /*
1055                          * This means that the file was created sometime
1056                          * after we checked and did not find it and when
1057                          * we went to create it.
1058                          * Since creat() is supposed to truncate a file
1059                          * that already exits go back to the begining
1060                          * of the function. This time we will find it
1061                          * and go down the tmp_trunc() path
1062                          */
1063                         goto again;
1064                 }
1065                 return (error);
1066         }
1067 
1068         *vpp = TNTOV(self);
1069 
1070         if (!error && IS_DEVVP(*vpp)) {
1071                 struct vnode *newvp;
1072 
1073                 newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cred);
1074                 VN_RELE(*vpp);
1075                 if (newvp == NULL)
1076                         return (ENOSYS);
1077                 *vpp = newvp;
1078         }
1079         TRACE_3(TR_FAC_TMPFS, TR_TMPFS_CREATE,
1080             "tmpfs create:dvp %p nm %s vpp %p", dvp, nm, vpp);
1081         return (0);
1082 }
1083 
1084 /* ARGSUSED3 */
1085 static int
1086 tmp_remove(
1087         struct vnode *dvp,
1088         char *nm,
1089         struct cred *cred,
1090         caller_context_t *ct,
1091         int flags)
1092 {
1093         struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1094         int error;
1095         struct tmpnode *tp = NULL;
1096 
1097         error = tdirlookup(parent, nm, &tp, cred);
1098         if (error)
1099                 return (error);
1100 
1101         ASSERT(tp);
1102         rw_enter(&parent->tn_rwlock, RW_WRITER);
1103         rw_enter(&tp->tn_rwlock, RW_WRITER);
1104 
1105         if (tp->tn_type != VDIR ||
1106             (error = secpolicy_fs_linkdir(cred, dvp->v_vfsp)) == 0)
1107                 error = tdirdelete(parent, tp, nm, DR_REMOVE, cred);
1108 
1109         rw_exit(&tp->tn_rwlock);
1110         rw_exit(&parent->tn_rwlock);
1111         vnevent_remove(TNTOV(tp), dvp, nm, ct);
1112         tmpnode_rele(tp);
1113 
1114         TRACE_3(TR_FAC_TMPFS, TR_TMPFS_REMOVE,
1115             "tmpfs remove:dvp %p nm %s error %d", dvp, nm, error);
1116         return (error);
1117 }
1118 
1119 /* ARGSUSED4 */
1120 static int
1121 tmp_link(
1122         struct vnode *dvp,
1123         struct vnode *srcvp,
1124         char *tnm,
1125         struct cred *cred,
1126         caller_context_t *ct,
1127         int flags)
1128 {
1129         struct tmpnode *parent;
1130         struct tmpnode *from;
1131         struct tmount *tm = (struct tmount *)VTOTM(dvp);
1132         int error;
1133         struct tmpnode *found = NULL;
1134         struct vnode *realvp;
1135 
1136         if (VOP_REALVP(srcvp, &realvp, ct) == 0)
1137                 srcvp = realvp;
1138 
1139         parent = (struct tmpnode *)VTOTN(dvp);
1140         from = (struct tmpnode *)VTOTN(srcvp);
1141 
1142         if ((srcvp->v_type == VDIR &&
1143             secpolicy_fs_linkdir(cred, dvp->v_vfsp)) ||
1144             (from->tn_uid != crgetuid(cred) && secpolicy_basic_link(cred)))
1145                 return (EPERM);
1146 
1147         /*
1148          * Make sure link for extended attributes is valid
1149          * We only support hard linking of xattr's in xattrdir to an xattrdir
1150          */
1151         if ((from->tn_flags & ISXATTR) != (parent->tn_flags & ISXATTR))
1152                 return (EINVAL);
1153 
1154         error = tdirlookup(parent, tnm, &found, cred);
1155         if (error == 0) {
1156                 ASSERT(found);
1157                 tmpnode_rele(found);
1158                 return (EEXIST);
1159         }
1160 
1161         if (error != ENOENT)
1162                 return (error);
1163 
1164         rw_enter(&parent->tn_rwlock, RW_WRITER);
1165         error = tdirenter(tm, parent, tnm, DE_LINK, (struct tmpnode *)NULL,
1166             from, NULL, (struct tmpnode **)NULL, cred, ct);
1167         rw_exit(&parent->tn_rwlock);
1168         if (error == 0) {
1169                 vnevent_link(srcvp, ct);
1170         }
1171         return (error);
1172 }
1173 
1174 /* ARGSUSED5 */
1175 static int
1176 tmp_rename(
1177         struct vnode *odvp,     /* source parent vnode */
1178         char *onm,              /* source name */
1179         struct vnode *ndvp,     /* destination parent vnode */
1180         char *nnm,              /* destination name */
1181         struct cred *cred,
1182         caller_context_t *ct,
1183         int flags)
1184 {
1185         struct tmpnode *fromparent;
1186         struct tmpnode *toparent;
1187         struct tmpnode *fromtp = NULL;  /* source tmpnode */
1188         struct tmount *tm = (struct tmount *)VTOTM(odvp);
1189         int error;
1190         int samedir = 0;        /* set if odvp == ndvp */
1191         struct vnode *realvp;
1192 
1193         if (VOP_REALVP(ndvp, &realvp, ct) == 0)
1194                 ndvp = realvp;
1195 
1196         fromparent = (struct tmpnode *)VTOTN(odvp);
1197         toparent = (struct tmpnode *)VTOTN(ndvp);
1198 
1199         if ((fromparent->tn_flags & ISXATTR) != (toparent->tn_flags & ISXATTR))
1200                 return (EINVAL);
1201 
1202         mutex_enter(&tm->tm_renamelck);
1203 
1204         /*
1205          * Look up tmpnode of file we're supposed to rename.
1206          */
1207         error = tdirlookup(fromparent, onm, &fromtp, cred);
1208         if (error) {
1209                 mutex_exit(&tm->tm_renamelck);
1210                 return (error);
1211         }
1212 
1213         /*
1214          * Make sure we can delete the old (source) entry.  This
1215          * requires write permission on the containing directory.  If
1216          * that directory is "sticky" it requires further checks.
1217          */
1218         if (((error = tmp_taccess(fromparent, VWRITE, cred)) != 0) ||
1219             (error = tmp_sticky_remove_access(fromparent, fromtp, cred)) != 0)
1220                 goto done;
1221 
1222         /*
1223          * Check for renaming to or from '.' or '..' or that
1224          * fromtp == fromparent
1225          */
1226         if ((onm[0] == '.' &&
1227             (onm[1] == '\0' || (onm[1] == '.' && onm[2] == '\0'))) ||
1228             (nnm[0] == '.' &&
1229             (nnm[1] == '\0' || (nnm[1] == '.' && nnm[2] == '\0'))) ||
1230             (fromparent == fromtp)) {
1231                 error = EINVAL;
1232                 goto done;
1233         }
1234 
1235         samedir = (fromparent == toparent);
1236         /*
1237          * Make sure we can search and rename into the new
1238          * (destination) directory.
1239          */
1240         if (!samedir) {
1241                 error = tmp_taccess(toparent, VEXEC|VWRITE, cred);
1242                 if (error)
1243                         goto done;
1244         }
1245 
1246         /*
1247          * Link source to new target
1248          */
1249         rw_enter(&toparent->tn_rwlock, RW_WRITER);
1250         error = tdirenter(tm, toparent, nnm, DE_RENAME,
1251             fromparent, fromtp, (struct vattr *)NULL,
1252             (struct tmpnode **)NULL, cred, ct);
1253         rw_exit(&toparent->tn_rwlock);
1254 
1255         if (error) {
1256                 /*
1257                  * ESAME isn't really an error; it indicates that the
1258                  * operation should not be done because the source and target
1259                  * are the same file, but that no error should be reported.
1260                  */
1261                 if (error == ESAME)
1262                         error = 0;
1263                 goto done;
1264         }
1265         vnevent_rename_src(TNTOV(fromtp), odvp, onm, ct);
1266 
1267         /*
1268          * Notify the target directory if not same as
1269          * source directory.
1270          */
1271         if (ndvp != odvp) {
1272                 vnevent_rename_dest_dir(ndvp, ct);
1273         }
1274 
1275         /*
1276          * Unlink from source.
1277          */
1278         rw_enter(&fromparent->tn_rwlock, RW_WRITER);
1279         rw_enter(&fromtp->tn_rwlock, RW_WRITER);
1280 
1281         error = tdirdelete(fromparent, fromtp, onm, DR_RENAME, cred);
1282 
1283         /*
1284          * The following handles the case where our source tmpnode was
1285          * removed before we got to it.
1286          *
1287          * XXX We should also cleanup properly in the case where tdirdelete
1288          * fails for some other reason.  Currently this case shouldn't happen.
1289          * (see 1184991).
1290          */
1291         if (error == ENOENT)
1292                 error = 0;
1293 
1294         rw_exit(&fromtp->tn_rwlock);
1295         rw_exit(&fromparent->tn_rwlock);
1296 done:
1297         tmpnode_rele(fromtp);
1298         mutex_exit(&tm->tm_renamelck);
1299 
1300         TRACE_5(TR_FAC_TMPFS, TR_TMPFS_RENAME,
1301             "tmpfs rename:ovp %p onm %s nvp %p nnm %s error %d", odvp, onm,
1302             ndvp, nnm, error);
1303         return (error);
1304 }
1305 
1306 /* ARGSUSED5 */
1307 static int
1308 tmp_mkdir(
1309         struct vnode *dvp,
1310         char *nm,
1311         struct vattr *va,
1312         struct vnode **vpp,
1313         struct cred *cred,
1314         caller_context_t *ct,
1315         int flags,
1316         vsecattr_t *vsecp)
1317 {
1318         struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1319         struct tmpnode *self = NULL;
1320         struct tmount *tm = (struct tmount *)VTOTM(dvp);
1321         int error;
1322 
1323         /* no new dirs allowed in xattr dirs */
1324         if (parent->tn_flags & ISXATTR)
1325                 return (EINVAL);
1326 
1327         /*
1328          * Might be dangling directory.  Catch it here,
1329          * because a ENOENT return from tdirlookup() is
1330          * an "o.k. return".
1331          */
1332         if (parent->tn_nlink == 0)
1333                 return (ENOENT);
1334 
1335         error = tdirlookup(parent, nm, &self, cred);
1336         if (error == 0) {
1337                 ASSERT(self);
1338                 tmpnode_rele(self);
1339                 return (EEXIST);
1340         }
1341         if (error != ENOENT)
1342                 return (error);
1343 
1344         rw_enter(&parent->tn_rwlock, RW_WRITER);
1345         error = tdirenter(tm, parent, nm, DE_MKDIR, (struct tmpnode *)NULL,
1346             (struct tmpnode *)NULL, va, &self, cred, ct);
1347         if (error) {
1348                 rw_exit(&parent->tn_rwlock);
1349                 if (self)
1350                         tmpnode_rele(self);
1351                 return (error);
1352         }
1353         rw_exit(&parent->tn_rwlock);
1354         *vpp = TNTOV(self);
1355         return (0);
1356 }
1357 
1358 /* ARGSUSED4 */
1359 static int
1360 tmp_rmdir(
1361         struct vnode *dvp,
1362         char *nm,
1363         struct vnode *cdir,
1364         struct cred *cred,
1365         caller_context_t *ct,
1366         int flags)
1367 {
1368         struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1369         struct tmpnode *self = NULL;
1370         struct vnode *vp;
1371         int error = 0;
1372 
1373         /*
1374          * Return error when removing . and ..
1375          */
1376         if (strcmp(nm, ".") == 0)
1377                 return (EINVAL);
1378         if (strcmp(nm, "..") == 0)
1379                 return (EEXIST); /* Should be ENOTEMPTY */
1380         error = tdirlookup(parent, nm, &self, cred);
1381         if (error)
1382                 return (error);
1383 
1384         rw_enter(&parent->tn_rwlock, RW_WRITER);
1385         rw_enter(&self->tn_rwlock, RW_WRITER);
1386 
1387         vp = TNTOV(self);
1388         if (vp == dvp || vp == cdir) {
1389                 error = EINVAL;
1390                 goto done1;
1391         }
1392         if (self->tn_type != VDIR) {
1393                 error = ENOTDIR;
1394                 goto done1;
1395         }
1396 
1397         mutex_enter(&self->tn_tlock);
1398         if (self->tn_nlink > 2) {
1399                 mutex_exit(&self->tn_tlock);
1400                 error = EEXIST;
1401                 goto done1;
1402         }
1403         mutex_exit(&self->tn_tlock);
1404 
1405         if (vn_vfswlock(vp)) {
1406                 error = EBUSY;
1407                 goto done1;
1408         }
1409         if (vn_mountedvfs(vp) != NULL) {
1410                 error = EBUSY;
1411                 goto done;
1412         }
1413 
1414         /*
1415          * Check for an empty directory
1416          * i.e. only includes entries for "." and ".."
1417          */
1418         if (self->tn_dirents > 2) {
1419                 error = EEXIST;         /* SIGH should be ENOTEMPTY */
1420                 /*
1421                  * Update atime because checking tn_dirents is logically
1422                  * equivalent to reading the directory
1423                  */
1424                 gethrestime(&self->tn_atime);
1425                 goto done;
1426         }
1427 
1428         error = tdirdelete(parent, self, nm, DR_RMDIR, cred);
1429 done:
1430         vn_vfsunlock(vp);
1431 done1:
1432         rw_exit(&self->tn_rwlock);
1433         rw_exit(&parent->tn_rwlock);
1434         vnevent_rmdir(TNTOV(self), dvp, nm, ct);
1435         tmpnode_rele(self);
1436 
1437         return (error);
1438 }
1439 
1440 /* ARGSUSED2 */
1441 static int
1442 tmp_readdir(
1443         struct vnode *vp,
1444         struct uio *uiop,
1445         struct cred *cred,
1446         int *eofp,
1447         caller_context_t *ct,
1448         int flags)
1449 {
1450         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1451         struct tdirent *tdp;
1452         int error = 0;
1453         size_t namelen;
1454         struct dirent64 *dp;
1455         ulong_t offset;
1456         ulong_t total_bytes_wanted;
1457         long outcount = 0;
1458         long bufsize;
1459         int reclen;
1460         caddr_t outbuf;
1461 
1462         if (uiop->uio_loffset >= MAXOFF_T) {
1463                 if (eofp)
1464                         *eofp = 1;
1465                 return (0);
1466         }
1467         /*
1468          * assuming system call has already called tmp_rwlock
1469          */
1470         ASSERT(RW_READ_HELD(&tp->tn_rwlock));
1471 
1472         if (uiop->uio_iovcnt != 1)
1473                 return (EINVAL);
1474 
1475         if (vp->v_type != VDIR)
1476                 return (ENOTDIR);
1477 
1478         /*
1479          * There's a window here where someone could have removed
1480          * all the entries in the directory after we put a hold on the
1481          * vnode but before we grabbed the rwlock.  Just return.
1482          */
1483         if (tp->tn_dir == NULL) {
1484                 if (tp->tn_nlink) {
1485                         panic("empty directory 0x%p", (void *)tp);
1486                         /*NOTREACHED*/
1487                 }
1488                 return (0);
1489         }
1490 
1491         /*
1492          * Get space for multiple directory entries
1493          */
1494         total_bytes_wanted = uiop->uio_iov->iov_len;
1495         bufsize = total_bytes_wanted + sizeof (struct dirent64);
1496         outbuf = kmem_alloc(bufsize, KM_SLEEP);
1497 
1498         dp = (struct dirent64 *)outbuf;
1499 
1500 
1501         offset = 0;
1502         tdp = tp->tn_dir;
1503         while (tdp) {
1504                 namelen = strlen(tdp->td_name);      /* no +1 needed */
1505                 offset = tdp->td_offset;
1506                 if (offset >= uiop->uio_offset) {
1507                         reclen = (int)DIRENT64_RECLEN(namelen);
1508                         if (outcount + reclen > total_bytes_wanted) {
1509                                 if (!outcount)
1510                                         /*
1511                                          * Buffer too small for any entries.
1512                                          */
1513                                         error = EINVAL;
1514                                 break;
1515                         }
1516                         ASSERT(tdp->td_tmpnode != NULL);
1517 
1518                         /* use strncpy(9f) to zero out uninitialized bytes */
1519 
1520                         (void) strncpy(dp->d_name, tdp->td_name,
1521                             DIRENT64_NAMELEN(reclen));
1522                         dp->d_reclen = (ushort_t)reclen;
1523                         dp->d_ino = (ino64_t)tdp->td_tmpnode->tn_nodeid;
1524                         dp->d_off = (offset_t)tdp->td_offset + 1;
1525                         dp = (struct dirent64 *)
1526                             ((uintptr_t)dp + dp->d_reclen);
1527                         outcount += reclen;
1528                         ASSERT(outcount <= bufsize);
1529                 }
1530                 tdp = tdp->td_next;
1531         }
1532 
1533         if (!error)
1534                 error = uiomove(outbuf, outcount, UIO_READ, uiop);
1535 
1536         if (!error) {
1537                 /* If we reached the end of the list our offset */
1538                 /* should now be just past the end. */
1539                 if (!tdp) {
1540                         offset += 1;
1541                         if (eofp)
1542                                 *eofp = 1;
1543                 } else if (eofp)
1544                         *eofp = 0;
1545                 uiop->uio_offset = offset;
1546         }
1547         gethrestime(&tp->tn_atime);
1548         kmem_free(outbuf, bufsize);
1549         return (error);
1550 }
1551 
1552 /* ARGSUSED5 */
1553 static int
1554 tmp_symlink(
1555         struct vnode *dvp,
1556         char *lnm,
1557         struct vattr *tva,
1558         char *tnm,
1559         struct cred *cred,
1560         caller_context_t *ct,
1561         int flags)
1562 {
1563         struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1564         struct tmpnode *self = (struct tmpnode *)NULL;
1565         struct tmount *tm = (struct tmount *)VTOTM(dvp);
1566         char *cp = NULL;
1567         int error;
1568         size_t len;
1569 
1570         /* no symlinks allowed to files in xattr dirs */
1571         if (parent->tn_flags & ISXATTR)
1572                 return (EINVAL);
1573 
1574         error = tdirlookup(parent, lnm, &self, cred);
1575         if (error == 0) {
1576                 /*
1577                  * The entry already exists
1578                  */
1579                 tmpnode_rele(self);
1580                 return (EEXIST);        /* was 0 */
1581         }
1582 
1583         if (error != ENOENT) {
1584                 if (self != NULL)
1585                         tmpnode_rele(self);
1586                 return (error);
1587         }
1588 
1589         rw_enter(&parent->tn_rwlock, RW_WRITER);
1590         error = tdirenter(tm, parent, lnm, DE_CREATE, (struct tmpnode *)NULL,
1591             (struct tmpnode *)NULL, tva, &self, cred, ct);
1592         rw_exit(&parent->tn_rwlock);
1593 
1594         if (error) {
1595                 if (self)
1596                         tmpnode_rele(self);
1597                 return (error);
1598         }
1599         len = strlen(tnm) + 1;
1600         cp = tmp_memalloc(len, 0);
1601         if (cp == NULL) {
1602                 tmpnode_rele(self);
1603                 return (ENOSPC);
1604         }
1605         (void) strcpy(cp, tnm);
1606 
1607         self->tn_symlink = cp;
1608         self->tn_size = len - 1;
1609         tmpnode_rele(self);
1610         return (error);
1611 }
1612 
1613 /* ARGSUSED2 */
1614 static int
1615 tmp_readlink(
1616         struct vnode *vp,
1617         struct uio *uiop,
1618         struct cred *cred,
1619         caller_context_t *ct)
1620 {
1621         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1622         int error = 0;
1623 
1624         if (vp->v_type != VLNK)
1625                 return (EINVAL);
1626 
1627         rw_enter(&tp->tn_rwlock, RW_READER);
1628         rw_enter(&tp->tn_contents, RW_READER);
1629         error = uiomove(tp->tn_symlink, tp->tn_size, UIO_READ, uiop);
1630         gethrestime(&tp->tn_atime);
1631         rw_exit(&tp->tn_contents);
1632         rw_exit(&tp->tn_rwlock);
1633         return (error);
1634 }
1635 
1636 /* ARGSUSED */
1637 static int
1638 tmp_fsync(
1639         struct vnode *vp,
1640         int syncflag,
1641         struct cred *cred,
1642         caller_context_t *ct)
1643 {
1644         return (0);
1645 }
1646 
1647 /* ARGSUSED */
1648 static void
1649 tmp_inactive(struct vnode *vp, struct cred *cred, caller_context_t *ct)
1650 {
1651         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1652         struct tmount *tm = (struct tmount *)VFSTOTM(vp->v_vfsp);
1653 
1654         rw_enter(&tp->tn_rwlock, RW_WRITER);
1655 top:
1656         mutex_enter(&tp->tn_tlock);
1657         mutex_enter(&vp->v_lock);
1658         ASSERT(vp->v_count >= 1);
1659 
1660         /*
1661          * If we don't have the last hold or the link count is non-zero,
1662          * there's little to do -- just drop our hold.
1663          */
1664         if (vp->v_count > 1 || tp->tn_nlink != 0) {
1665                 vp->v_count--;
1666                 mutex_exit(&vp->v_lock);
1667                 mutex_exit(&tp->tn_tlock);
1668                 rw_exit(&tp->tn_rwlock);
1669                 return;
1670         }
1671 
1672         /*
1673          * We have the last hold *and* the link count is zero, so this
1674          * tmpnode is dead from the filesystem's viewpoint.  However,
1675          * if the tmpnode has any pages associated with it (i.e. if it's
1676          * a normal file with non-zero size), the tmpnode can still be
1677          * discovered by pageout or fsflush via the page vnode pointers.
1678          * In this case we must drop all our locks, truncate the tmpnode,
1679          * and try the whole dance again.
1680          */
1681         if (tp->tn_size != 0) {
1682                 if (tp->tn_type == VREG) {
1683                         mutex_exit(&vp->v_lock);
1684                         mutex_exit(&tp->tn_tlock);
1685                         rw_enter(&tp->tn_contents, RW_WRITER);
1686                         (void) tmpnode_trunc(tm, tp, 0);
1687                         rw_exit(&tp->tn_contents);
1688                         ASSERT(tp->tn_size == 0);
1689                         ASSERT(tp->tn_nblocks == 0);
1690                         goto top;
1691                 }
1692                 if (tp->tn_type == VLNK)
1693                         tmp_memfree(tp->tn_symlink, tp->tn_size + 1);
1694         }
1695 
1696         /*
1697          * Remove normal file/dir's xattr dir and xattrs.
1698          */
1699         if (tp->tn_xattrdp) {
1700                 struct tmpnode *xtp = tp->tn_xattrdp;
1701 
1702                 ASSERT(xtp->tn_flags & ISXATTR);
1703                 tmpnode_hold(xtp);
1704                 rw_enter(&xtp->tn_rwlock, RW_WRITER);
1705                 tdirtrunc(xtp);
1706                 DECR_COUNT(&xtp->tn_nlink, &xtp->tn_tlock);
1707                 tp->tn_xattrdp = NULL;
1708                 rw_exit(&xtp->tn_rwlock);
1709                 tmpnode_rele(xtp);
1710         }
1711 
1712         mutex_exit(&vp->v_lock);
1713         mutex_exit(&tp->tn_tlock);
1714         /* Here's our chance to send invalid event while we're between locks */
1715         vn_invalid(TNTOV(tp));
1716         mutex_enter(&tm->tm_contents);
1717         if (tp->tn_forw == NULL)
1718                 tm->tm_rootnode->tn_back = tp->tn_back;
1719         else
1720                 tp->tn_forw->tn_back = tp->tn_back;
1721         tp->tn_back->tn_forw = tp->tn_forw;
1722         mutex_exit(&tm->tm_contents);
1723         rw_exit(&tp->tn_rwlock);
1724         rw_destroy(&tp->tn_rwlock);
1725         mutex_destroy(&tp->tn_tlock);
1726         vn_free(TNTOV(tp));
1727         tmp_memfree(tp, sizeof (struct tmpnode));
1728 }
1729 
1730 /* ARGSUSED2 */
1731 static int
1732 tmp_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
1733 {
1734         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1735         struct tfid *tfid;
1736 
1737         if (fidp->fid_len < (sizeof (struct tfid) - sizeof (ushort_t))) {
1738                 fidp->fid_len = sizeof (struct tfid) - sizeof (ushort_t);
1739                 return (ENOSPC);
1740         }
1741 
1742         tfid = (struct tfid *)fidp;
1743         bzero(tfid, sizeof (struct tfid));
1744         tfid->tfid_len = (int)sizeof (struct tfid) - sizeof (ushort_t);
1745 
1746         tfid->tfid_ino = tp->tn_nodeid;
1747         tfid->tfid_gen = tp->tn_gen;
1748 
1749         return (0);
1750 }
1751 
1752 
1753 /*
1754  * Return all the pages from [off..off+len] in given file
1755  */
1756 /* ARGSUSED */
1757 static int
1758 tmp_getpage(
1759         struct vnode *vp,
1760         offset_t off,
1761         size_t len,
1762         uint_t *protp,
1763         page_t *pl[],
1764         size_t plsz,
1765         struct seg *seg,
1766         caddr_t addr,
1767         enum seg_rw rw,
1768         struct cred *cr,
1769         caller_context_t *ct)
1770 {
1771         int err = 0;
1772         struct tmpnode *tp = VTOTN(vp);
1773         anoff_t toff = (anoff_t)off;
1774         size_t tlen = len;
1775         u_offset_t tmpoff;
1776         timestruc_t now;
1777 
1778         rw_enter(&tp->tn_contents, RW_READER);
1779 
1780         if (off + len  > tp->tn_size + PAGEOFFSET) {
1781                 err = EFAULT;
1782                 goto out;
1783         }
1784         /*
1785          * Look for holes (no anon slot) in faulting range. If there are
1786          * holes we have to switch to a write lock and fill them in. Swap
1787          * space for holes was already reserved when the file was grown.
1788          */
1789         tmpoff = toff;
1790         if (non_anon(tp->tn_anon, btop(off), &tmpoff, &tlen)) {
1791                 if (!rw_tryupgrade(&tp->tn_contents)) {
1792                         rw_exit(&tp->tn_contents);
1793                         rw_enter(&tp->tn_contents, RW_WRITER);
1794                         /* Size may have changed when lock was dropped */
1795                         if (off + len  > tp->tn_size + PAGEOFFSET) {
1796                                 err = EFAULT;
1797                                 goto out;
1798                         }
1799                 }
1800                 for (toff = (anoff_t)off; toff < (anoff_t)off + len;
1801                     toff += PAGESIZE) {
1802                         if (anon_get_ptr(tp->tn_anon, btop(toff)) == NULL) {
1803                                 /* XXX - may allocate mem w. write lock held */
1804                                 (void) anon_set_ptr(tp->tn_anon, btop(toff),
1805                                     anon_alloc(vp, toff), ANON_SLEEP);
1806                                 tp->tn_nblocks++;
1807                         }
1808                 }
1809                 rw_downgrade(&tp->tn_contents);
1810         }
1811 
1812 
1813         err = pvn_getpages(tmp_getapage, vp, (u_offset_t)off, len, protp,
1814             pl, plsz, seg, addr, rw, cr);
1815 
1816         gethrestime(&now);
1817         tp->tn_atime = now;
1818         if (rw == S_WRITE)
1819                 tp->tn_mtime = now;
1820 
1821 out:
1822         rw_exit(&tp->tn_contents);
1823         return (err);
1824 }
1825 
1826 /*
1827  * Called from pvn_getpages to get a particular page.
1828  */
1829 /*ARGSUSED*/
1830 static int
1831 tmp_getapage(
1832         struct vnode *vp,
1833         u_offset_t off,
1834         size_t len,
1835         uint_t *protp,
1836         page_t *pl[],
1837         size_t plsz,
1838         struct seg *seg,
1839         caddr_t addr,
1840         enum seg_rw rw,
1841         struct cred *cr)
1842 {
1843         struct page *pp;
1844         int flags;
1845         int err = 0;
1846         struct vnode *pvp;
1847         u_offset_t poff;
1848 
1849         if (protp != NULL)
1850                 *protp = PROT_ALL;
1851 again:
1852         if (pp = page_lookup(vp, off, rw == S_CREATE ? SE_EXCL : SE_SHARED)) {
1853                 if (pl) {
1854                         pl[0] = pp;
1855                         pl[1] = NULL;
1856                 } else {
1857                         page_unlock(pp);
1858                 }
1859         } else {
1860                 pp = page_create_va(vp, off, PAGESIZE,
1861                     PG_WAIT | PG_EXCL, seg, addr);
1862                 /*
1863                  * Someone raced in and created the page after we did the
1864                  * lookup but before we did the create, so go back and
1865                  * try to look it up again.
1866                  */
1867                 if (pp == NULL)
1868                         goto again;
1869                 /*
1870                  * Fill page from backing store, if any. If none, then
1871                  * either this is a newly filled hole or page must have
1872                  * been unmodified and freed so just zero it out.
1873                  */
1874                 err = swap_getphysname(vp, off, &pvp, &poff);
1875                 if (err) {
1876                         panic("tmp_getapage: no anon slot vp %p "
1877                             "off %llx pp %p\n", (void *)vp, off, (void *)pp);
1878                 }
1879                 if (pvp) {
1880                         flags = (pl == NULL ? B_ASYNC|B_READ : B_READ);
1881                         err = VOP_PAGEIO(pvp, pp, (u_offset_t)poff, PAGESIZE,
1882                             flags, cr, NULL);
1883                         if (flags & B_ASYNC)
1884                                 pp = NULL;
1885                 } else if (rw != S_CREATE) {
1886                         pagezero(pp, 0, PAGESIZE);
1887                 }
1888                 if (err && pp)
1889                         pvn_read_done(pp, B_ERROR);
1890                 if (err == 0) {
1891                         if (pl)
1892                                 pvn_plist_init(pp, pl, plsz, off, PAGESIZE, rw);
1893                         else
1894                                 pvn_io_done(pp);
1895                 }
1896         }
1897         return (err);
1898 }
1899 
1900 
1901 /*
1902  * Flags are composed of {B_INVAL, B_DIRTY B_FREE, B_DONTNEED}.
1903  * If len == 0, do from off to EOF.
1904  */
1905 static int tmp_nopage = 0;      /* Don't do tmp_putpage's if set */
1906 
1907 /* ARGSUSED */
1908 int
1909 tmp_putpage(
1910         register struct vnode *vp,
1911         offset_t off,
1912         size_t len,
1913         int flags,
1914         struct cred *cr,
1915         caller_context_t *ct)
1916 {
1917         register page_t *pp;
1918         u_offset_t io_off;
1919         size_t io_len = 0;
1920         int err = 0;
1921         struct tmpnode *tp = VTOTN(vp);
1922         int dolock;
1923 
1924         if (tmp_nopage)
1925                 return (0);
1926 
1927         ASSERT(vp->v_count != 0);
1928 
1929         if (vp->v_flag & VNOMAP)
1930                 return (ENOSYS);
1931 
1932         /*
1933          * This being tmpfs, we don't ever do i/o unless we really
1934          * have to (when we're low on memory and pageout calls us
1935          * with B_ASYNC | B_FREE or the user explicitly asks for it with
1936          * B_DONTNEED).
1937          * XXX to approximately track the mod time like ufs we should
1938          * update the times here. The problem is, once someone does a
1939          * store we never clear the mod bit and do i/o, thus fsflush
1940          * will keep calling us every 30 seconds to do the i/o and we'll
1941          * continually update the mod time. At least we update the mod
1942          * time on the first store because this results in a call to getpage.
1943          */
1944         if (flags != (B_ASYNC | B_FREE) && (flags & B_INVAL) == 0 &&
1945             (flags & B_DONTNEED) == 0)
1946                 return (0);
1947         /*
1948          * If this thread owns the lock, i.e., this thread grabbed it
1949          * as writer somewhere above, then we don't need to grab the
1950          * lock as reader in this routine.
1951          */
1952         dolock = (rw_owner(&tp->tn_contents) != curthread);
1953 
1954         /*
1955          * If this is pageout don't block on the lock as you could deadlock
1956          * when freemem == 0 (another thread has the read lock and is blocked
1957          * creating a page, and a third thread is waiting to get the writers
1958          * lock - waiting writers priority blocks us from getting the read
1959          * lock). Of course, if the only freeable pages are on this tmpnode
1960          * we're hosed anyways. A better solution might be a new lock type.
1961          * Note: ufs has the same problem.
1962          */
1963         if (curproc == proc_pageout) {
1964                 if (!rw_tryenter(&tp->tn_contents, RW_READER))
1965                         return (ENOMEM);
1966         } else if (dolock)
1967                 rw_enter(&tp->tn_contents, RW_READER);
1968 
1969         if (!vn_has_cached_data(vp))
1970                 goto out;
1971 
1972         if (len == 0) {
1973                 if (curproc == proc_pageout) {
1974                         panic("tmp: pageout can't block");
1975                         /*NOTREACHED*/
1976                 }
1977 
1978                 /* Search the entire vp list for pages >= off. */
1979                 err = pvn_vplist_dirty(vp, (u_offset_t)off, tmp_putapage,
1980                     flags, cr);
1981         } else {
1982                 u_offset_t eoff;
1983 
1984                 /*
1985                  * Loop over all offsets in the range [off...off + len]
1986                  * looking for pages to deal with.
1987                  */
1988                 eoff = MIN(off + len, tp->tn_size);
1989                 for (io_off = off; io_off < eoff; io_off += io_len) {
1990                         /*
1991                          * If we are not invalidating, synchronously
1992                          * freeing or writing pages use the routine
1993                          * page_lookup_nowait() to prevent reclaiming
1994                          * them from the free list.
1995                          */
1996                         if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
1997                                 pp = page_lookup(vp, io_off,
1998                                     (flags & (B_INVAL | B_FREE)) ?
1999                                     SE_EXCL : SE_SHARED);
2000                         } else {
2001                                 pp = page_lookup_nowait(vp, io_off,
2002                                     (flags & B_FREE) ? SE_EXCL : SE_SHARED);
2003                         }
2004 
2005                         if (pp == NULL || pvn_getdirty(pp, flags) == 0)
2006                                 io_len = PAGESIZE;
2007                         else {
2008                                 err = tmp_putapage(vp, pp, &io_off, &io_len,
2009                                     flags, cr);
2010                                 if (err != 0)
2011                                         break;
2012                         }
2013                 }
2014         }
2015         /* If invalidating, verify all pages on vnode list are gone. */
2016         if (err == 0 && off == 0 && len == 0 &&
2017             (flags & B_INVAL) && vn_has_cached_data(vp)) {
2018                 panic("tmp_putpage: B_INVAL, pages not gone");
2019                 /*NOTREACHED*/
2020         }
2021 out:
2022         if ((curproc == proc_pageout) || dolock)
2023                 rw_exit(&tp->tn_contents);
2024         /*
2025          * Only reason putapage is going to give us SE_NOSWAP as error
2026          * is when we ask a page to be written to physical backing store
2027          * and there is none. Ignore this because we might be dealing
2028          * with a swap page which does not have any backing store
2029          * on disk. In any other case we won't get this error over here.
2030          */
2031         if (err == SE_NOSWAP)
2032                 err = 0;
2033         return (err);
2034 }
2035 
2036 long tmp_putpagecnt, tmp_pagespushed;
2037 
2038 /*
2039  * Write out a single page.
2040  * For tmpfs this means choose a physical swap slot and write the page
2041  * out using VOP_PAGEIO. For performance, we attempt to kluster; i.e.,
2042  * we try to find a bunch of other dirty pages adjacent in the file
2043  * and a bunch of contiguous swap slots, and then write all the pages
2044  * out in a single i/o.
2045  */
2046 /*ARGSUSED*/
2047 static int
2048 tmp_putapage(
2049         struct vnode *vp,
2050         page_t *pp,
2051         u_offset_t *offp,
2052         size_t *lenp,
2053         int flags,
2054         struct cred *cr)
2055 {
2056         int err;
2057         ulong_t klstart, kllen;
2058         page_t *pplist, *npplist;
2059         extern int klustsize;
2060         long tmp_klustsize;
2061         struct tmpnode *tp;
2062         size_t pp_off, pp_len;
2063         u_offset_t io_off;
2064         size_t io_len;
2065         struct vnode *pvp;
2066         u_offset_t pstart;
2067         u_offset_t offset;
2068         u_offset_t tmpoff;
2069 
2070         ASSERT(PAGE_LOCKED(pp));
2071 
2072         /* Kluster in tmp_klustsize chunks */
2073         tp = VTOTN(vp);
2074         tmp_klustsize = klustsize;
2075         offset = pp->p_offset;
2076         klstart = (offset / tmp_klustsize) * tmp_klustsize;
2077         kllen = MIN(tmp_klustsize, tp->tn_size - klstart);
2078 
2079         /* Get a kluster of pages */
2080         pplist =
2081             pvn_write_kluster(vp, pp, &tmpoff, &pp_len, klstart, kllen, flags);
2082 
2083         pp_off = (size_t)tmpoff;
2084 
2085         /*
2086          * Get a cluster of physical offsets for the pages; the amount we
2087          * get may be some subrange of what we ask for (io_off, io_len).
2088          */
2089         io_off = pp_off;
2090         io_len = pp_len;
2091         err = swap_newphysname(vp, offset, &io_off, &io_len, &pvp, &pstart);
2092         ASSERT(err != SE_NOANON); /* anon slot must have been filled */
2093         if (err) {
2094                 pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2095                 /*
2096                  * If this routine is called as a result of segvn_sync
2097                  * operation and we have no physical swap then we can get an
2098                  * error here. In such case we would return SE_NOSWAP as error.
2099                  * At this point, we expect only SE_NOSWAP.
2100                  */
2101                 ASSERT(err == SE_NOSWAP);
2102                 if (flags & B_INVAL)
2103                         err = ENOMEM;
2104                 goto out;
2105         }
2106         ASSERT(pp_off <= io_off && io_off + io_len <= pp_off + pp_len);
2107         ASSERT(io_off <= offset && offset < io_off + io_len);
2108 
2109         /* Toss pages at front/rear that we couldn't get physical backing for */
2110         if (io_off != pp_off) {
2111                 npplist = NULL;
2112                 page_list_break(&pplist, &npplist, btop(io_off - pp_off));
2113                 ASSERT(pplist->p_offset == pp_off);
2114                 ASSERT(pplist->p_prev->p_offset == io_off - PAGESIZE);
2115                 pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2116                 pplist = npplist;
2117         }
2118         if (io_off + io_len < pp_off + pp_len) {
2119                 npplist = NULL;
2120                 page_list_break(&pplist, &npplist, btop(io_len));
2121                 ASSERT(npplist->p_offset == io_off + io_len);
2122                 ASSERT(npplist->p_prev->p_offset == pp_off + pp_len - PAGESIZE);
2123                 pvn_write_done(npplist, B_ERROR | B_WRITE | flags);
2124         }
2125 
2126         ASSERT(pplist->p_offset == io_off);
2127         ASSERT(pplist->p_prev->p_offset == io_off + io_len - PAGESIZE);
2128         ASSERT(btopr(io_len) <= btopr(kllen));
2129 
2130         /* Do i/o on the remaining kluster */
2131         err = VOP_PAGEIO(pvp, pplist, (u_offset_t)pstart, io_len,
2132             B_WRITE | flags, cr, NULL);
2133 
2134         if ((flags & B_ASYNC) == 0) {
2135                 pvn_write_done(pplist, ((err) ? B_ERROR : 0) | B_WRITE | flags);
2136         }
2137 out:
2138         if (!err) {
2139                 if (offp)
2140                         *offp = io_off;
2141                 if (lenp)
2142                         *lenp = io_len;
2143                 tmp_putpagecnt++;
2144                 tmp_pagespushed += btop(io_len);
2145         }
2146         if (err && err != ENOMEM && err != SE_NOSWAP)
2147                 cmn_err(CE_WARN, "tmp_putapage: err %d\n", err);
2148         return (err);
2149 }
2150 
2151 /* ARGSUSED */
2152 static int
2153 tmp_map(
2154         struct vnode *vp,
2155         offset_t off,
2156         struct as *as,
2157         caddr_t *addrp,
2158         size_t len,
2159         uchar_t prot,
2160         uchar_t maxprot,
2161         uint_t flags,
2162         struct cred *cred,
2163         caller_context_t *ct)
2164 {
2165         struct segvn_crargs vn_a;
2166         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
2167         int error;
2168 
2169 #ifdef _ILP32
2170         if (len > MAXOFF_T)
2171                 return (ENOMEM);
2172 #endif
2173 
2174         if (vp->v_flag & VNOMAP)
2175                 return (ENOSYS);
2176 
2177         if (off < 0 || (offset_t)(off + len) < 0 ||
2178             off > MAXOFF_T || (off + len) > MAXOFF_T)
2179                 return (ENXIO);
2180 
2181         if (vp->v_type != VREG)
2182                 return (ENODEV);
2183 
2184         /*
2185          * Don't allow mapping to locked file
2186          */
2187         if (vn_has_mandatory_locks(vp, tp->tn_mode)) {
2188                 return (EAGAIN);
2189         }
2190 
2191         as_rangelock(as);
2192         error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
2193         if (error != 0) {
2194                 as_rangeunlock(as);
2195                 return (error);
2196         }
2197 
2198         vn_a.vp = vp;
2199         vn_a.offset = (u_offset_t)off;
2200         vn_a.type = flags & MAP_TYPE;
2201         vn_a.prot = prot;
2202         vn_a.maxprot = maxprot;
2203         vn_a.flags = flags & ~MAP_TYPE;
2204         vn_a.cred = cred;
2205         vn_a.amp = NULL;
2206         vn_a.szc = 0;
2207         vn_a.lgrp_mem_policy_flags = 0;
2208 
2209         error = as_map(as, *addrp, len, segvn_create, &vn_a);
2210         as_rangeunlock(as);
2211         return (error);
2212 }
2213 
2214 /*
2215  * tmp_addmap and tmp_delmap can't be called since the vp
2216  * maintained in the segvn mapping is NULL.
2217  */
2218 /* ARGSUSED */
2219 static int
2220 tmp_addmap(
2221         struct vnode *vp,
2222         offset_t off,
2223         struct as *as,
2224         caddr_t addr,
2225         size_t len,
2226         uchar_t prot,
2227         uchar_t maxprot,
2228         uint_t flags,
2229         struct cred *cred,
2230         caller_context_t *ct)
2231 {
2232         return (0);
2233 }
2234 
2235 /* ARGSUSED */
2236 static int
2237 tmp_delmap(
2238         struct vnode *vp,
2239         offset_t off,
2240         struct as *as,
2241         caddr_t addr,
2242         size_t len,
2243         uint_t prot,
2244         uint_t maxprot,
2245         uint_t flags,
2246         struct cred *cred,
2247         caller_context_t *ct)
2248 {
2249         return (0);
2250 }
2251 
2252 static int
2253 tmp_freesp(struct vnode *vp, struct flock64 *lp, int flag)
2254 {
2255         register int i;
2256         register struct tmpnode *tp = VTOTN(vp);
2257         int error;
2258 
2259         ASSERT(vp->v_type == VREG);
2260         ASSERT(lp->l_start >= 0);
2261 
2262         if (lp->l_len != 0)
2263                 return (EINVAL);
2264 
2265         rw_enter(&tp->tn_rwlock, RW_WRITER);
2266         if (tp->tn_size == lp->l_start) {
2267                 rw_exit(&tp->tn_rwlock);
2268                 return (0);
2269         }
2270 
2271         /*
2272          * Check for any mandatory locks on the range
2273          */
2274         if (MANDLOCK(vp, tp->tn_mode)) {
2275                 long save_start;
2276 
2277                 save_start = lp->l_start;
2278 
2279                 if (tp->tn_size < lp->l_start) {
2280                         /*
2281                          * "Truncate up" case: need to make sure there
2282                          * is no lock beyond current end-of-file. To
2283                          * do so, we need to set l_start to the size
2284                          * of the file temporarily.
2285                          */
2286                         lp->l_start = tp->tn_size;
2287                 }
2288                 lp->l_type = F_WRLCK;
2289                 lp->l_sysid = 0;
2290                 lp->l_pid = ttoproc(curthread)->p_pid;
2291                 i = (flag & (FNDELAY|FNONBLOCK)) ? 0 : SLPFLCK;
2292                 if ((i = reclock(vp, lp, i, 0, lp->l_start, NULL)) != 0 ||
2293                     lp->l_type != F_UNLCK) {
2294                         rw_exit(&tp->tn_rwlock);
2295                         return (i ? i : EAGAIN);
2296                 }
2297 
2298                 lp->l_start = save_start;
2299         }
2300         VFSTOTM(vp->v_vfsp);
2301 
2302         rw_enter(&tp->tn_contents, RW_WRITER);
2303         error = tmpnode_trunc((struct tmount *)VFSTOTM(vp->v_vfsp),
2304             tp, (ulong_t)lp->l_start);
2305         rw_exit(&tp->tn_contents);
2306         rw_exit(&tp->tn_rwlock);
2307         return (error);
2308 }
2309 
2310 /* ARGSUSED */
2311 static int
2312 tmp_space(
2313         struct vnode *vp,
2314         int cmd,
2315         struct flock64 *bfp,
2316         int flag,
2317         offset_t offset,
2318         cred_t *cred,
2319         caller_context_t *ct)
2320 {
2321         int error;
2322 
2323         if (cmd != F_FREESP)
2324                 return (EINVAL);
2325         if ((error = convoff(vp, bfp, 0, (offset_t)offset)) == 0) {
2326                 if ((bfp->l_start > MAXOFF_T) || (bfp->l_len > MAXOFF_T))
2327                         return (EFBIG);
2328                 error = tmp_freesp(vp, bfp, flag);
2329 
2330                 if (error == 0 && bfp->l_start == 0)
2331                         vnevent_truncate(vp, ct);
2332         }
2333         return (error);
2334 }
2335 
2336 /* ARGSUSED */
2337 static int
2338 tmp_seek(
2339         struct vnode *vp,
2340         offset_t ooff,
2341         offset_t *noffp,
2342         caller_context_t *ct)
2343 {
2344         return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
2345 }
2346 
2347 /* ARGSUSED2 */
2348 static int
2349 tmp_rwlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
2350 {
2351         struct tmpnode *tp = VTOTN(vp);
2352 
2353         if (write_lock) {
2354                 rw_enter(&tp->tn_rwlock, RW_WRITER);
2355         } else {
2356                 rw_enter(&tp->tn_rwlock, RW_READER);
2357         }
2358         return (write_lock);
2359 }
2360 
2361 /* ARGSUSED1 */
2362 static void
2363 tmp_rwunlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
2364 {
2365         struct tmpnode *tp = VTOTN(vp);
2366 
2367         rw_exit(&tp->tn_rwlock);
2368 }
2369 
2370 static int
2371 tmp_pathconf(
2372         struct vnode *vp,
2373         int cmd,
2374         ulong_t *valp,
2375         cred_t *cr,
2376         caller_context_t *ct)
2377 {
2378         struct tmpnode *tp = NULL;
2379         int error;
2380 
2381         switch (cmd) {
2382         case _PC_XATTR_EXISTS:
2383                 if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
2384                         *valp = 0;      /* assume no attributes */
2385                         error = 0;      /* okay to ask */
2386                         tp = VTOTN(vp);
2387                         rw_enter(&tp->tn_rwlock, RW_READER);
2388                         if (tp->tn_xattrdp) {
2389                                 rw_enter(&tp->tn_xattrdp->tn_rwlock, RW_READER);
2390                                 /* do not count "." and ".." */
2391                                 if (tp->tn_xattrdp->tn_dirents > 2)
2392                                         *valp = 1;
2393                                 rw_exit(&tp->tn_xattrdp->tn_rwlock);
2394                         }
2395                         rw_exit(&tp->tn_rwlock);
2396                 } else {
2397                         error = EINVAL;
2398                 }
2399                 break;
2400         case _PC_SATTR_ENABLED:
2401         case _PC_SATTR_EXISTS:
2402                 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2403                     (vp->v_type == VREG || vp->v_type == VDIR);
2404                 error = 0;
2405                 break;
2406         case _PC_TIMESTAMP_RESOLUTION:
2407                 /* nanosecond timestamp resolution */
2408                 *valp = 1L;
2409                 error = 0;
2410                 break;
2411         default:
2412                 error = fs_pathconf(vp, cmd, valp, cr, ct);
2413         }
2414         return (error);
2415 }
2416 
2417 
2418 struct vnodeops *tmp_vnodeops;
2419 
2420 const fs_operation_def_t tmp_vnodeops_template[] = {
2421         VOPNAME_OPEN,           { .vop_open = tmp_open },
2422         VOPNAME_CLOSE,          { .vop_close = tmp_close },
2423         VOPNAME_READ,           { .vop_read = tmp_read },
2424         VOPNAME_WRITE,          { .vop_write = tmp_write },
2425         VOPNAME_IOCTL,          { .vop_ioctl = tmp_ioctl },
2426         VOPNAME_GETATTR,        { .vop_getattr = tmp_getattr },
2427         VOPNAME_SETATTR,        { .vop_setattr = tmp_setattr },
2428         VOPNAME_ACCESS,         { .vop_access = tmp_access },
2429         VOPNAME_LOOKUP,         { .vop_lookup = tmp_lookup },
2430         VOPNAME_CREATE,         { .vop_create = tmp_create },
2431         VOPNAME_REMOVE,         { .vop_remove = tmp_remove },
2432         VOPNAME_LINK,           { .vop_link = tmp_link },
2433         VOPNAME_RENAME,         { .vop_rename = tmp_rename },
2434         VOPNAME_MKDIR,          { .vop_mkdir = tmp_mkdir },
2435         VOPNAME_RMDIR,          { .vop_rmdir = tmp_rmdir },
2436         VOPNAME_READDIR,        { .vop_readdir = tmp_readdir },
2437         VOPNAME_SYMLINK,        { .vop_symlink = tmp_symlink },
2438         VOPNAME_READLINK,       { .vop_readlink = tmp_readlink },
2439         VOPNAME_FSYNC,          { .vop_fsync = tmp_fsync },
2440         VOPNAME_INACTIVE,       { .vop_inactive = tmp_inactive },
2441         VOPNAME_FID,            { .vop_fid = tmp_fid },
2442         VOPNAME_RWLOCK,         { .vop_rwlock = tmp_rwlock },
2443         VOPNAME_RWUNLOCK,       { .vop_rwunlock = tmp_rwunlock },
2444         VOPNAME_SEEK,           { .vop_seek = tmp_seek },
2445         VOPNAME_SPACE,          { .vop_space = tmp_space },
2446         VOPNAME_GETPAGE,        { .vop_getpage = tmp_getpage },
2447         VOPNAME_PUTPAGE,        { .vop_putpage = tmp_putpage },
2448         VOPNAME_MAP,            { .vop_map = tmp_map },
2449         VOPNAME_ADDMAP,         { .vop_addmap = tmp_addmap },
2450         VOPNAME_DELMAP,         { .vop_delmap = tmp_delmap },
2451         VOPNAME_PATHCONF,       { .vop_pathconf = tmp_pathconf },
2452         VOPNAME_VNEVENT,        { .vop_vnevent = fs_vnevent_support },
2453         NULL,                   NULL
2454 };