Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c
+++ new/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 *
25 25 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
26 26 * All rights reserved.
27 27 */
28 28
29 29 /*
30 30 * Node hash implementation initially borrowed from NFS (nfs_subr.c)
31 31 * but then heavily modified. It's no longer an array of hash lists,
32 32 * but an AVL tree per mount point. More on this below.
33 33 */
34 34
35 35 #include <sys/param.h>
36 36 #include <sys/systm.h>
37 37 #include <sys/time.h>
38 38 #include <sys/vnode.h>
39 39 #include <sys/bitmap.h>
40 40 #include <sys/dnlc.h>
41 41 #include <sys/kmem.h>
42 42 #include <sys/sunddi.h>
43 43 #include <sys/sysmacros.h>
44 44
45 45 #include <netsmb/smb_osdep.h>
46 46
47 47 #include <netsmb/smb.h>
48 48 #include <netsmb/smb_conn.h>
49 49 #include <netsmb/smb_subr.h>
50 50 #include <netsmb/smb_rq.h>
51 51
52 52 #include <smbfs/smbfs.h>
53 53 #include <smbfs/smbfs_node.h>
54 54 #include <smbfs/smbfs_subr.h>
55 55
56 56 /*
57 57 * The AVL trees (now per-mount) allow finding an smbfs node by its
58 58 * full remote path name. It also allows easy traversal of all nodes
59 59 * below (path wise) any given node. A reader/writer lock for each
60 60 * (per mount) AVL tree is used to control access and to synchronize
61 61 * lookups, additions, and deletions from that AVL tree.
62 62 *
63 63 * Previously, this code use a global array of hash chains, each with
64 64 * its own rwlock. A few struct members, functions, and comments may
65 65 * still refer to a "hash", and those should all now be considered to
66 66 * refer to the per-mount AVL tree that replaced the old hash chains.
67 67 * (i.e. member smi_hash_lk, function sn_hashfind, etc.)
68 68 *
69 69 * The smbnode freelist is organized as a doubly linked list with
70 70 * a head pointer. Additions and deletions are synchronized via
71 71 * a single mutex.
72 72 *
73 73 * In order to add an smbnode to the free list, it must be linked into
74 74 * the mount's AVL tree and the exclusive lock for the AVL must be held.
75 75 * If an smbnode is not linked into the AVL tree, then it is destroyed
76 76 * because it represents no valuable information that can be reused
77 77 * about the file. The exclusive lock for the AVL tree must be held
78 78 * in order to prevent a lookup in the AVL tree from finding the
79 79 * smbnode and using it and assuming that the smbnode is not on the
80 80 * freelist. The lookup in the AVL tree will have the AVL tree lock
81 81 * held, either exclusive or shared.
82 82 *
83 83 * The vnode reference count for each smbnode is not allowed to drop
84 84 * below 1. This prevents external entities, such as the VM
85 85 * subsystem, from acquiring references to vnodes already on the
86 86 * freelist and then trying to place them back on the freelist
87 87 * when their reference is released. This means that the when an
88 88 * smbnode is looked up in the AVL tree, then either the smbnode
89 89 * is removed from the freelist and that reference is tranfered to
90 90 * the new reference or the vnode reference count must be incremented
91 91 * accordingly. The mutex for the freelist must be held in order to
92 92 * accurately test to see if the smbnode is on the freelist or not.
93 93 * The AVL tree lock might be held shared and it is possible that
94 94 * two different threads may race to remove the smbnode from the
95 95 * freelist. This race can be resolved by holding the mutex for the
96 96 * freelist. Please note that the mutex for the freelist does not
97 97 * need to held if the smbnode is not on the freelist. It can not be
98 98 * placed on the freelist due to the requirement that the thread
99 99 * putting the smbnode on the freelist must hold the exclusive lock
100 100 * for the AVL tree and the thread doing the lookup in the AVL tree
101 101 * is holding either a shared or exclusive lock for the AVL tree.
102 102 *
103 103 * The lock ordering is:
104 104 *
105 105 * AVL tree lock -> vnode lock
106 106 * AVL tree lock -> freelist lock
107 107 */
108 108
109 109 static kmutex_t smbfreelist_lock;
110 110 static smbnode_t *smbfreelist = NULL;
111 111 static ulong_t smbnodenew = 0;
112 112 long nsmbnode = 0;
113 113
114 114 static struct kmem_cache *smbnode_cache;
115 115
116 116 static const vsecattr_t smbfs_vsa0 = { 0 };
117 117
118 118 /*
119 119 * Mutex to protect the following variables:
120 120 * smbfs_major
121 121 * smbfs_minor
122 122 */
123 123 kmutex_t smbfs_minor_lock;
124 124 int smbfs_major;
125 125 int smbfs_minor;
126 126
127 127 /* See smbfs_node_findcreate() */
128 128 struct smbfattr smbfs_fattr0;
129 129
130 130 /*
131 131 * Local functions.
132 132 * SN for Smb Node
133 133 */
134 134 static void sn_rmfree(smbnode_t *);
135 135 static void sn_inactive(smbnode_t *);
136 136 static void sn_addhash_locked(smbnode_t *, avl_index_t);
137 137 static void sn_rmhash_locked(smbnode_t *);
138 138 static void sn_destroy_node(smbnode_t *);
139 139 void smbfs_kmem_reclaim(void *cdrarg);
140 140
141 141 static smbnode_t *
142 142 sn_hashfind(smbmntinfo_t *, const char *, int, avl_index_t *);
143 143
144 144 static smbnode_t *
145 145 make_smbnode(smbmntinfo_t *, const char *, int, int *);
146 146
147 147 /*
148 148 * Free the resources associated with an smbnode.
149 149 * Note: This is different from smbfs_inactive
150 150 *
151 151 * NFS: nfs_subr.c:rinactive
152 152 */
153 153 static void
154 154 sn_inactive(smbnode_t *np)
155 155 {
156 156 vsecattr_t ovsa;
157 157 cred_t *oldcr;
158 158 char *orpath;
159 159 int orplen;
160 160
161 161 /*
162 162 * Flush and invalidate all pages (todo)
163 163 * Free any held credentials and caches...
164 164 * etc. (See NFS code)
165 165 */
166 166 mutex_enter(&np->r_statelock);
167 167
168 168 ovsa = np->r_secattr;
169 169 np->r_secattr = smbfs_vsa0;
170 170 np->r_sectime = 0;
171 171
172 172 oldcr = np->r_cred;
173 173 np->r_cred = NULL;
174 174
175 175 orpath = np->n_rpath;
176 176 orplen = np->n_rplen;
177 177 np->n_rpath = NULL;
178 178 np->n_rplen = 0;
179 179
180 180 mutex_exit(&np->r_statelock);
181 181
182 182 if (ovsa.vsa_aclentp != NULL)
183 183 kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz);
184 184
185 185 if (oldcr != NULL)
186 186 crfree(oldcr);
187 187
188 188 if (orpath != NULL)
189 189 kmem_free(orpath, orplen + 1);
190 190 }
191 191
192 192 /*
193 193 * Find and optionally create an smbnode for the passed
194 194 * mountinfo, directory, separator, and name. If the
195 195 * desired smbnode already exists, return a reference.
196 196 * If the file attributes pointer is non-null, the node
197 197 * is created if necessary and linked into the AVL tree.
198 198 *
199 199 * Callers that need a node created but don't have the
200 200 * real attributes pass smbfs_fattr0 to force creation.
201 201 *
202 202 * Note: make_smbnode() may upgrade the "hash" lock to exclusive.
203 203 *
204 204 * NFS: nfs_subr.c:makenfsnode
205 205 */
206 206 smbnode_t *
207 207 smbfs_node_findcreate(
208 208 smbmntinfo_t *mi,
209 209 const char *dirnm,
210 210 int dirlen,
211 211 const char *name,
212 212 int nmlen,
213 213 char sep,
214 214 struct smbfattr *fap)
215 215 {
216 216 char tmpbuf[256];
217 217 size_t rpalloc;
218 218 char *p, *rpath;
219 219 int rplen;
220 220 smbnode_t *np;
221 221 vnode_t *vp;
222 222 int newnode;
223 223
224 224 /*
225 225 * Build the search string, either in tmpbuf or
226 226 * in allocated memory if larger than tmpbuf.
227 227 */
228 228 rplen = dirlen;
229 229 if (sep != '\0')
230 230 rplen++;
231 231 rplen += nmlen;
232 232 if (rplen < sizeof (tmpbuf)) {
233 233 /* use tmpbuf */
234 234 rpalloc = 0;
235 235 rpath = tmpbuf;
236 236 } else {
237 237 rpalloc = rplen + 1;
238 238 rpath = kmem_alloc(rpalloc, KM_SLEEP);
239 239 }
240 240 p = rpath;
241 241 bcopy(dirnm, p, dirlen);
242 242 p += dirlen;
243 243 if (sep != '\0')
244 244 *p++ = sep;
245 245 if (name != NULL) {
246 246 bcopy(name, p, nmlen);
247 247 p += nmlen;
248 248 }
249 249 ASSERT(p == rpath + rplen);
250 250
251 251 /*
252 252 * Find or create a node with this path.
253 253 */
254 254 rw_enter(&mi->smi_hash_lk, RW_READER);
255 255 if (fap == NULL)
256 256 np = sn_hashfind(mi, rpath, rplen, NULL);
257 257 else
258 258 np = make_smbnode(mi, rpath, rplen, &newnode);
259 259 rw_exit(&mi->smi_hash_lk);
260 260
261 261 if (rpalloc)
262 262 kmem_free(rpath, rpalloc);
263 263
264 264 if (fap == NULL) {
265 265 /*
266 266 * Caller is "just looking" (no create)
267 267 * so np may or may not be NULL here.
268 268 * Either way, we're done.
269 269 */
270 270 return (np);
271 271 }
272 272
273 273 /*
274 274 * We should have a node, possibly created.
275 275 * Do we have (real) attributes to apply?
276 276 */
277 277 ASSERT(np != NULL);
278 278 if (fap == &smbfs_fattr0)
279 279 return (np);
280 280
281 281 /*
282 282 * Apply the given attributes to this node,
283 283 * dealing with any cache impact, etc.
284 284 */
285 285 vp = SMBTOV(np);
286 286 if (!newnode) {
287 287 /*
288 288 * Found an existing node.
289 289 * Maybe purge caches...
290 290 */
291 291 smbfs_cache_check(vp, fap);
292 292 }
293 293 smbfs_attrcache_fa(vp, fap);
294 294
295 295 /*
296 296 * Note NFS sets vp->v_type here, assuming it
297 297 * can never change for the life of a node.
298 298 * We allow v_type to change, and set it in
299 299 * smbfs_attrcache(). Also: mode, uid, gid
300 300 */
301 301 return (np);
302 302 }
303 303
304 304 /*
305 305 * NFS: nfs_subr.c:rtablehash
306 306 * We use smbfs_hash().
307 307 */
308 308
309 309 /*
310 310 * Find or create an smbnode.
311 311 * NFS: nfs_subr.c:make_rnode
312 312 */
313 313 static smbnode_t *
314 314 make_smbnode(
315 315 smbmntinfo_t *mi,
316 316 const char *rpath,
317 317 int rplen,
318 318 int *newnode)
319 319 {
320 320 smbnode_t *np;
321 321 smbnode_t *tnp;
322 322 vnode_t *vp;
323 323 vfs_t *vfsp;
324 324 avl_index_t where;
325 325 char *new_rpath = NULL;
326 326
327 327 ASSERT(RW_READ_HELD(&mi->smi_hash_lk));
328 328 vfsp = mi->smi_vfsp;
329 329
330 330 start:
331 331 np = sn_hashfind(mi, rpath, rplen, NULL);
332 332 if (np != NULL) {
333 333 *newnode = 0;
334 334 return (np);
335 335 }
336 336
337 337 /* Note: will retake this lock below. */
338 338 rw_exit(&mi->smi_hash_lk);
339 339
340 340 /*
341 341 * see if we can find something on the freelist
342 342 */
343 343 mutex_enter(&smbfreelist_lock);
344 344 if (smbfreelist != NULL && smbnodenew >= nsmbnode) {
345 345 np = smbfreelist;
346 346 sn_rmfree(np);
347 347 mutex_exit(&smbfreelist_lock);
348 348
349 349 vp = SMBTOV(np);
350 350
351 351 if (np->r_flags & RHASHED) {
352 352 smbmntinfo_t *tmp_mi = np->n_mount;
353 353 ASSERT(tmp_mi != NULL);
354 354 rw_enter(&tmp_mi->smi_hash_lk, RW_WRITER);
355 355 mutex_enter(&vp->v_lock);
356 356 if (vp->v_count > 1) {
357 357 vp->v_count--;
358 358 mutex_exit(&vp->v_lock);
359 359 rw_exit(&tmp_mi->smi_hash_lk);
360 360 /* start over */
361 361 rw_enter(&mi->smi_hash_lk, RW_READER);
362 362 goto start;
363 363 }
364 364 mutex_exit(&vp->v_lock);
365 365 sn_rmhash_locked(np);
366 366 rw_exit(&tmp_mi->smi_hash_lk);
367 367 }
368 368
369 369 sn_inactive(np);
370 370
371 371 mutex_enter(&vp->v_lock);
372 372 if (vp->v_count > 1) {
373 373 vp->v_count--;
374 374 mutex_exit(&vp->v_lock);
375 375 rw_enter(&mi->smi_hash_lk, RW_READER);
376 376 goto start;
377 377 }
378 378 mutex_exit(&vp->v_lock);
379 379 vn_invalid(vp);
380 380 /*
381 381 * destroy old locks before bzero'ing and
382 382 * recreating the locks below.
383 383 */
384 384 smbfs_rw_destroy(&np->r_rwlock);
385 385 smbfs_rw_destroy(&np->r_lkserlock);
386 386 mutex_destroy(&np->r_statelock);
387 387 cv_destroy(&np->r_cv);
388 388 /*
389 389 * Make sure that if smbnode is recycled then
390 390 * VFS count is decremented properly before
391 391 * reuse.
392 392 */
393 393 VFS_RELE(vp->v_vfsp);
394 394 vn_reinit(vp);
395 395 } else {
↓ open down ↓ |
395 lines elided |
↑ open up ↑ |
396 396 /*
397 397 * allocate and initialize a new smbnode
398 398 */
399 399 vnode_t *new_vp;
400 400
401 401 mutex_exit(&smbfreelist_lock);
402 402
403 403 np = kmem_cache_alloc(smbnode_cache, KM_SLEEP);
404 404 new_vp = vn_alloc(KM_SLEEP);
405 405
406 - atomic_add_long((ulong_t *)&smbnodenew, 1);
406 + atomic_inc_ulong((ulong_t *)&smbnodenew);
407 407 vp = new_vp;
408 408 }
409 409
410 410 /*
411 411 * Allocate and copy the rpath we'll need below.
412 412 */
413 413 new_rpath = kmem_alloc(rplen + 1, KM_SLEEP);
414 414 bcopy(rpath, new_rpath, rplen);
415 415 new_rpath[rplen] = '\0';
416 416
417 417 /* Initialize smbnode_t */
418 418 bzero(np, sizeof (*np));
419 419
420 420 smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL);
421 421 smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL);
422 422 mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL);
423 423 cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL);
424 424 /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */
425 425
426 426 np->r_vnode = vp;
427 427 np->n_mount = mi;
428 428
429 429 np->n_fid = SMB_FID_UNUSED;
430 430 np->n_uid = mi->smi_uid;
431 431 np->n_gid = mi->smi_gid;
432 432 /* Leave attributes "stale." */
433 433
434 434 #if 0 /* XXX dircache */
435 435 /*
436 436 * We don't know if it's a directory yet.
437 437 * Let the caller do this? XXX
438 438 */
439 439 avl_create(&np->r_dir, compar, sizeof (rddir_cache),
440 440 offsetof(rddir_cache, tree));
441 441 #endif
442 442
443 443 /* Now fill in the vnode. */
444 444 vn_setops(vp, smbfs_vnodeops);
445 445 vp->v_data = (caddr_t)np;
446 446 VFS_HOLD(vfsp);
447 447 vp->v_vfsp = vfsp;
448 448 vp->v_type = VNON;
449 449
450 450 /*
451 451 * We entered with mi->smi_hash_lk held (reader).
452 452 * Retake it now, (as the writer).
453 453 * Will return with it held.
454 454 */
455 455 rw_enter(&mi->smi_hash_lk, RW_WRITER);
456 456
457 457 /*
458 458 * There is a race condition where someone else
459 459 * may alloc the smbnode while no locks are held,
460 460 * so check again and recover if found.
461 461 */
462 462 tnp = sn_hashfind(mi, rpath, rplen, &where);
463 463 if (tnp != NULL) {
464 464 /*
465 465 * Lost the race. Put the node we were building
466 466 * on the free list and return the one we found.
467 467 */
468 468 rw_exit(&mi->smi_hash_lk);
469 469 kmem_free(new_rpath, rplen + 1);
470 470 smbfs_addfree(np);
471 471 rw_enter(&mi->smi_hash_lk, RW_READER);
472 472 *newnode = 0;
473 473 return (tnp);
474 474 }
475 475
476 476 /*
477 477 * Hash search identifies nodes by the remote path
478 478 * (n_rpath) so fill that in now, before linking
479 479 * this node into the node cache (AVL tree).
480 480 */
481 481 np->n_rpath = new_rpath;
482 482 np->n_rplen = rplen;
483 483 np->n_ino = smbfs_gethash(new_rpath, rplen);
484 484
485 485 sn_addhash_locked(np, where);
486 486 *newnode = 1;
487 487 return (np);
488 488 }
489 489
490 490 /*
491 491 * smbfs_addfree
492 492 * Put an smbnode on the free list, or destroy it immediately
493 493 * if it offers no value were it to be reclaimed later. Also
494 494 * destroy immediately when we have too many smbnodes, etc.
495 495 *
496 496 * Normally called by smbfs_inactive, but also
497 497 * called in here during cleanup operations.
498 498 *
499 499 * NFS: nfs_subr.c:rp_addfree
500 500 */
501 501 void
502 502 smbfs_addfree(smbnode_t *np)
503 503 {
504 504 vnode_t *vp;
505 505 struct vfs *vfsp;
506 506 smbmntinfo_t *mi;
507 507
508 508 ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
509 509
510 510 vp = SMBTOV(np);
511 511 ASSERT(vp->v_count >= 1);
512 512
513 513 vfsp = vp->v_vfsp;
514 514 mi = VFTOSMI(vfsp);
515 515
516 516 /*
517 517 * If there are no more references to this smbnode and:
518 518 * we have too many smbnodes allocated, or if the node
519 519 * is no longer accessible via the AVL tree (!RHASHED),
520 520 * or an i/o error occurred while writing to the file,
521 521 * or it's part of an unmounted FS, then try to destroy
522 522 * it instead of putting it on the smbnode freelist.
523 523 */
524 524 if (np->r_count == 0 && (
525 525 (np->r_flags & RHASHED) == 0 ||
526 526 (np->r_error != 0) ||
527 527 (vfsp->vfs_flag & VFS_UNMOUNTED) ||
528 528 (smbnodenew > nsmbnode))) {
529 529
530 530 /* Try to destroy this node. */
531 531
532 532 if (np->r_flags & RHASHED) {
533 533 rw_enter(&mi->smi_hash_lk, RW_WRITER);
534 534 mutex_enter(&vp->v_lock);
535 535 if (vp->v_count > 1) {
536 536 vp->v_count--;
537 537 mutex_exit(&vp->v_lock);
538 538 rw_exit(&mi->smi_hash_lk);
539 539 return;
540 540 /*
541 541 * Will get another call later,
542 542 * via smbfs_inactive.
543 543 */
544 544 }
545 545 mutex_exit(&vp->v_lock);
546 546 sn_rmhash_locked(np);
547 547 rw_exit(&mi->smi_hash_lk);
548 548 }
549 549
550 550 sn_inactive(np);
551 551
552 552 /*
553 553 * Recheck the vnode reference count. We need to
554 554 * make sure that another reference has not been
555 555 * acquired while we were not holding v_lock. The
556 556 * smbnode is not in the smbnode "hash" AVL tree, so
557 557 * the only way for a reference to have been acquired
558 558 * is for a VOP_PUTPAGE because the smbnode was marked
559 559 * with RDIRTY or for a modified page. This vnode
560 560 * reference may have been acquired before our call
561 561 * to sn_inactive. The i/o may have been completed,
562 562 * thus allowing sn_inactive to complete, but the
563 563 * reference to the vnode may not have been released
564 564 * yet. In any case, the smbnode can not be destroyed
565 565 * until the other references to this vnode have been
566 566 * released. The other references will take care of
567 567 * either destroying the smbnode or placing it on the
568 568 * smbnode freelist. If there are no other references,
569 569 * then the smbnode may be safely destroyed.
570 570 */
571 571 mutex_enter(&vp->v_lock);
572 572 if (vp->v_count > 1) {
573 573 vp->v_count--;
574 574 mutex_exit(&vp->v_lock);
575 575 return;
576 576 }
577 577 mutex_exit(&vp->v_lock);
578 578
579 579 sn_destroy_node(np);
580 580 return;
581 581 }
582 582
583 583 /*
584 584 * Lock the AVL tree and then recheck the reference count
585 585 * to ensure that no other threads have acquired a reference
586 586 * to indicate that the smbnode should not be placed on the
587 587 * freelist. If another reference has been acquired, then
588 588 * just release this one and let the other thread complete
589 589 * the processing of adding this smbnode to the freelist.
590 590 */
591 591 rw_enter(&mi->smi_hash_lk, RW_WRITER);
592 592
593 593 mutex_enter(&vp->v_lock);
594 594 if (vp->v_count > 1) {
595 595 vp->v_count--;
596 596 mutex_exit(&vp->v_lock);
597 597 rw_exit(&mi->smi_hash_lk);
598 598 return;
599 599 }
600 600 mutex_exit(&vp->v_lock);
601 601
602 602 /*
603 603 * Put this node on the free list.
604 604 */
605 605 mutex_enter(&smbfreelist_lock);
606 606 if (smbfreelist == NULL) {
607 607 np->r_freef = np;
608 608 np->r_freeb = np;
609 609 smbfreelist = np;
610 610 } else {
611 611 np->r_freef = smbfreelist;
612 612 np->r_freeb = smbfreelist->r_freeb;
613 613 smbfreelist->r_freeb->r_freef = np;
614 614 smbfreelist->r_freeb = np;
615 615 }
616 616 mutex_exit(&smbfreelist_lock);
617 617
618 618 rw_exit(&mi->smi_hash_lk);
619 619 }
620 620
621 621 /*
622 622 * Remove an smbnode from the free list.
623 623 *
624 624 * The caller must be holding smbfreelist_lock and the smbnode
625 625 * must be on the freelist.
626 626 *
627 627 * NFS: nfs_subr.c:rp_rmfree
628 628 */
629 629 static void
630 630 sn_rmfree(smbnode_t *np)
631 631 {
632 632
633 633 ASSERT(MUTEX_HELD(&smbfreelist_lock));
634 634 ASSERT(np->r_freef != NULL && np->r_freeb != NULL);
635 635
636 636 if (np == smbfreelist) {
637 637 smbfreelist = np->r_freef;
638 638 if (np == smbfreelist)
639 639 smbfreelist = NULL;
640 640 }
641 641
642 642 np->r_freeb->r_freef = np->r_freef;
643 643 np->r_freef->r_freeb = np->r_freeb;
644 644
645 645 np->r_freef = np->r_freeb = NULL;
646 646 }
647 647
648 648 /*
649 649 * Put an smbnode in the "hash" AVL tree.
650 650 *
651 651 * The caller must be hold the rwlock as writer.
652 652 *
653 653 * NFS: nfs_subr.c:rp_addhash
654 654 */
655 655 static void
656 656 sn_addhash_locked(smbnode_t *np, avl_index_t where)
657 657 {
658 658 smbmntinfo_t *mi = np->n_mount;
659 659
660 660 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
661 661 ASSERT(!(np->r_flags & RHASHED));
662 662
663 663 avl_insert(&mi->smi_hash_avl, np, where);
664 664
665 665 mutex_enter(&np->r_statelock);
666 666 np->r_flags |= RHASHED;
667 667 mutex_exit(&np->r_statelock);
668 668 }
669 669
670 670 /*
671 671 * Remove an smbnode from the "hash" AVL tree.
672 672 *
673 673 * The caller must hold the rwlock as writer.
674 674 *
675 675 * NFS: nfs_subr.c:rp_rmhash_locked
676 676 */
677 677 static void
678 678 sn_rmhash_locked(smbnode_t *np)
679 679 {
680 680 smbmntinfo_t *mi = np->n_mount;
681 681
682 682 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
683 683 ASSERT(np->r_flags & RHASHED);
684 684
685 685 avl_remove(&mi->smi_hash_avl, np);
686 686
687 687 mutex_enter(&np->r_statelock);
688 688 np->r_flags &= ~RHASHED;
689 689 mutex_exit(&np->r_statelock);
690 690 }
691 691
692 692 /*
693 693 * Remove an smbnode from the "hash" AVL tree.
694 694 *
695 695 * The caller must not be holding the rwlock.
696 696 */
697 697 void
698 698 smbfs_rmhash(smbnode_t *np)
699 699 {
700 700 smbmntinfo_t *mi = np->n_mount;
701 701
702 702 rw_enter(&mi->smi_hash_lk, RW_WRITER);
703 703 sn_rmhash_locked(np);
704 704 rw_exit(&mi->smi_hash_lk);
705 705 }
706 706
707 707 /*
708 708 * Lookup an smbnode by remote pathname
709 709 *
710 710 * The caller must be holding the AVL rwlock, either shared or exclusive.
711 711 *
712 712 * NFS: nfs_subr.c:rfind
713 713 */
714 714 static smbnode_t *
715 715 sn_hashfind(
716 716 smbmntinfo_t *mi,
717 717 const char *rpath,
718 718 int rplen,
719 719 avl_index_t *pwhere) /* optional */
720 720 {
721 721 smbfs_node_hdr_t nhdr;
722 722 smbnode_t *np;
723 723 vnode_t *vp;
724 724
725 725 ASSERT(RW_LOCK_HELD(&mi->smi_hash_lk));
726 726
727 727 bzero(&nhdr, sizeof (nhdr));
728 728 nhdr.hdr_n_rpath = (char *)rpath;
729 729 nhdr.hdr_n_rplen = rplen;
730 730
731 731 /* See smbfs_node_cmp below. */
732 732 np = avl_find(&mi->smi_hash_avl, &nhdr, pwhere);
733 733
734 734 if (np == NULL)
735 735 return (NULL);
736 736
737 737 /*
738 738 * Found it in the "hash" AVL tree.
739 739 * Remove from free list, if necessary.
740 740 */
741 741 vp = SMBTOV(np);
742 742 if (np->r_freef != NULL) {
743 743 mutex_enter(&smbfreelist_lock);
744 744 /*
745 745 * If the smbnode is on the freelist,
746 746 * then remove it and use that reference
747 747 * as the new reference. Otherwise,
748 748 * need to increment the reference count.
749 749 */
750 750 if (np->r_freef != NULL) {
751 751 sn_rmfree(np);
752 752 mutex_exit(&smbfreelist_lock);
753 753 } else {
754 754 mutex_exit(&smbfreelist_lock);
755 755 VN_HOLD(vp);
756 756 }
757 757 } else
758 758 VN_HOLD(vp);
759 759
760 760 return (np);
761 761 }
762 762
763 763 static int
764 764 smbfs_node_cmp(const void *va, const void *vb)
765 765 {
766 766 const smbfs_node_hdr_t *a = va;
767 767 const smbfs_node_hdr_t *b = vb;
768 768 int clen, diff;
769 769
770 770 /*
771 771 * Same semantics as strcmp, but does not
772 772 * assume the strings are null terminated.
773 773 */
774 774 clen = (a->hdr_n_rplen < b->hdr_n_rplen) ?
775 775 a->hdr_n_rplen : b->hdr_n_rplen;
776 776 diff = strncmp(a->hdr_n_rpath, b->hdr_n_rpath, clen);
777 777 if (diff < 0)
778 778 return (-1);
779 779 if (diff > 0)
780 780 return (1);
781 781 /* they match through clen */
782 782 if (b->hdr_n_rplen > clen)
783 783 return (-1);
784 784 if (a->hdr_n_rplen > clen)
785 785 return (1);
786 786 return (0);
787 787 }
788 788
789 789 /*
790 790 * Setup the "hash" AVL tree used for our node cache.
791 791 * See: smbfs_mount, smbfs_destroy_table.
792 792 */
793 793 void
794 794 smbfs_init_hash_avl(avl_tree_t *avl)
795 795 {
796 796 avl_create(avl, smbfs_node_cmp, sizeof (smbnode_t),
797 797 offsetof(smbnode_t, r_avl_node));
798 798 }
799 799
800 800 /*
801 801 * Invalidate the cached attributes for all nodes "under" the
802 802 * passed-in node. Note: the passed-in node is NOT affected by
803 803 * this call. This is used both for files under some directory
804 804 * after the directory is deleted or renamed, and for extended
805 805 * attribute files (named streams) under a plain file after that
806 806 * file is renamed or deleted.
807 807 *
808 808 * Do this by walking the AVL tree starting at the passed in node,
809 809 * and continuing while the visited nodes have a path prefix matching
810 810 * the entire path of the passed-in node, and a separator just after
811 811 * that matching path prefix. Watch out for cases where the AVL tree
812 812 * order may not exactly match the order of an FS walk, i.e.
813 813 * consider this sequence:
814 814 * "foo" (directory)
815 815 * "foo bar" (name containing a space)
816 816 * "foo/bar"
817 817 * The walk needs to skip "foo bar" and keep going until it finds
818 818 * something that doesn't match the "foo" name prefix.
819 819 */
820 820 void
821 821 smbfs_attrcache_prune(smbnode_t *top_np)
822 822 {
823 823 smbmntinfo_t *mi;
824 824 smbnode_t *np;
825 825 char *rpath;
826 826 int rplen;
827 827
828 828 mi = top_np->n_mount;
829 829 rw_enter(&mi->smi_hash_lk, RW_READER);
830 830
831 831 np = top_np;
832 832 rpath = top_np->n_rpath;
833 833 rplen = top_np->n_rplen;
834 834 for (;;) {
835 835 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER);
836 836 if (np == NULL)
837 837 break;
838 838 if (np->n_rplen < rplen)
839 839 break;
840 840 if (0 != strncmp(np->n_rpath, rpath, rplen))
841 841 break;
842 842 if (np->n_rplen > rplen && (
843 843 np->n_rpath[rplen] == ':' ||
844 844 np->n_rpath[rplen] == '\\'))
845 845 smbfs_attrcache_remove(np);
846 846 }
847 847
848 848 rw_exit(&mi->smi_hash_lk);
849 849 }
850 850
851 851 #ifdef SMB_VNODE_DEBUG
852 852 int smbfs_check_table_debug = 1;
853 853 #else /* SMB_VNODE_DEBUG */
854 854 int smbfs_check_table_debug = 0;
855 855 #endif /* SMB_VNODE_DEBUG */
856 856
857 857
858 858 /*
859 859 * Return 1 if there is a active vnode belonging to this vfs in the
860 860 * smbnode cache.
861 861 *
862 862 * Several of these checks are done without holding the usual
863 863 * locks. This is safe because destroy_smbtable(), smbfs_addfree(),
864 864 * etc. will redo the necessary checks before actually destroying
865 865 * any smbnodes.
866 866 *
867 867 * NFS: nfs_subr.c:check_rtable
868 868 *
869 869 * Debugging changes here relative to NFS.
870 870 * Relatively harmless, so left 'em in.
871 871 */
872 872 int
873 873 smbfs_check_table(struct vfs *vfsp, smbnode_t *rtnp)
874 874 {
875 875 smbmntinfo_t *mi;
876 876 smbnode_t *np;
877 877 vnode_t *vp;
878 878 int busycnt = 0;
879 879
880 880 mi = VFTOSMI(vfsp);
881 881 rw_enter(&mi->smi_hash_lk, RW_READER);
882 882 for (np = avl_first(&mi->smi_hash_avl); np != NULL;
883 883 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
884 884
885 885 if (np == rtnp)
886 886 continue; /* skip the root */
887 887 vp = SMBTOV(np);
888 888
889 889 /* Now the 'busy' checks: */
890 890 /* Not on the free list? */
891 891 if (np->r_freef == NULL) {
892 892 SMBVDEBUG("!r_freef: node=0x%p, rpath=%s\n",
893 893 (void *)np, np->n_rpath);
894 894 busycnt++;
895 895 }
896 896
897 897 /* Has dirty pages? */
898 898 if (vn_has_cached_data(vp) &&
899 899 (np->r_flags & RDIRTY)) {
900 900 SMBVDEBUG("is dirty: node=0x%p, rpath=%s\n",
901 901 (void *)np, np->n_rpath);
902 902 busycnt++;
903 903 }
904 904
905 905 /* Other refs? (not reflected in v_count) */
906 906 if (np->r_count > 0) {
907 907 SMBVDEBUG("+r_count: node=0x%p, rpath=%s\n",
908 908 (void *)np, np->n_rpath);
909 909 busycnt++;
910 910 }
911 911
912 912 if (busycnt && !smbfs_check_table_debug)
913 913 break;
914 914
915 915 }
916 916 rw_exit(&mi->smi_hash_lk);
917 917
918 918 return (busycnt);
919 919 }
920 920
921 921 /*
922 922 * Destroy inactive vnodes from the AVL tree which belong to this
923 923 * vfs. It is essential that we destroy all inactive vnodes during a
924 924 * forced unmount as well as during a normal unmount.
925 925 *
926 926 * NFS: nfs_subr.c:destroy_rtable
927 927 *
928 928 * In here, we're normally destrying all or most of the AVL tree,
929 929 * so the natural choice is to use avl_destroy_nodes. However,
930 930 * there may be a few busy nodes that should remain in the AVL
931 931 * tree when we're done. The solution: use a temporary tree to
932 932 * hold the busy nodes until we're done destroying the old tree,
933 933 * then copy the temporary tree over the (now emtpy) real tree.
934 934 */
935 935 void
936 936 smbfs_destroy_table(struct vfs *vfsp)
937 937 {
938 938 avl_tree_t tmp_avl;
939 939 smbmntinfo_t *mi;
940 940 smbnode_t *np;
941 941 smbnode_t *rlist;
942 942 void *v;
943 943
944 944 mi = VFTOSMI(vfsp);
945 945 rlist = NULL;
946 946 smbfs_init_hash_avl(&tmp_avl);
947 947
948 948 rw_enter(&mi->smi_hash_lk, RW_WRITER);
949 949 v = NULL;
950 950 while ((np = avl_destroy_nodes(&mi->smi_hash_avl, &v)) != NULL) {
951 951
952 952 mutex_enter(&smbfreelist_lock);
953 953 if (np->r_freef == NULL) {
954 954 /*
955 955 * Busy node (not on the free list).
956 956 * Will keep in the final AVL tree.
957 957 */
958 958 mutex_exit(&smbfreelist_lock);
959 959 avl_add(&tmp_avl, np);
960 960 } else {
961 961 /*
962 962 * It's on the free list. Remove and
963 963 * arrange for it to be destroyed.
964 964 */
965 965 sn_rmfree(np);
966 966 mutex_exit(&smbfreelist_lock);
967 967
968 968 /*
969 969 * Last part of sn_rmhash_locked().
970 970 * NB: avl_destroy_nodes has already
971 971 * removed this from the "hash" AVL.
972 972 */
973 973 mutex_enter(&np->r_statelock);
974 974 np->r_flags &= ~RHASHED;
975 975 mutex_exit(&np->r_statelock);
976 976
977 977 /*
978 978 * Add to the list of nodes to destroy.
979 979 * Borrowing avl_child[0] for this list.
980 980 */
981 981 np->r_avl_node.avl_child[0] =
982 982 (struct avl_node *)rlist;
983 983 rlist = np;
984 984 }
985 985 }
986 986 avl_destroy(&mi->smi_hash_avl);
987 987
988 988 /*
989 989 * Replace the (now destroyed) "hash" AVL with the
990 990 * temporary AVL, which restores the busy nodes.
991 991 */
992 992 mi->smi_hash_avl = tmp_avl;
993 993 rw_exit(&mi->smi_hash_lk);
994 994
995 995 /*
996 996 * Now destroy the nodes on our temporary list (rlist).
997 997 * This call to smbfs_addfree will end up destroying the
998 998 * smbnode, but in a safe way with the appropriate set
999 999 * of checks done.
1000 1000 */
1001 1001 while ((np = rlist) != NULL) {
1002 1002 rlist = (smbnode_t *)np->r_avl_node.avl_child[0];
1003 1003 smbfs_addfree(np);
1004 1004 }
1005 1005 }
1006 1006
1007 1007 /*
1008 1008 * This routine destroys all the resources associated with the smbnode
1009 1009 * and then the smbnode itself. Note: sn_inactive has been called.
1010 1010 *
1011 1011 * NFS: nfs_subr.c:destroy_rnode
1012 1012 */
1013 1013 static void
1014 1014 sn_destroy_node(smbnode_t *np)
1015 1015 {
1016 1016 vnode_t *vp;
1017 1017 vfs_t *vfsp;
1018 1018
1019 1019 vp = SMBTOV(np);
↓ open down ↓ |
603 lines elided |
↑ open up ↑ |
1020 1020 vfsp = vp->v_vfsp;
1021 1021
1022 1022 ASSERT(vp->v_count == 1);
1023 1023 ASSERT(np->r_count == 0);
1024 1024 ASSERT(np->r_mapcnt == 0);
1025 1025 ASSERT(np->r_secattr.vsa_aclentp == NULL);
1026 1026 ASSERT(np->r_cred == NULL);
1027 1027 ASSERT(np->n_rpath == NULL);
1028 1028 ASSERT(!(np->r_flags & RHASHED));
1029 1029 ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
1030 - atomic_add_long((ulong_t *)&smbnodenew, -1);
1030 + atomic_dec_ulong((ulong_t *)&smbnodenew);
1031 1031 vn_invalid(vp);
1032 1032 vn_free(vp);
1033 1033 kmem_cache_free(smbnode_cache, np);
1034 1034 VFS_RELE(vfsp);
1035 1035 }
1036 1036
1037 1037 /*
1038 1038 * Flush all vnodes in this (or every) vfs.
1039 1039 * Used by nfs_sync and by nfs_unmount.
1040 1040 */
1041 1041 /*ARGSUSED*/
1042 1042 void
1043 1043 smbfs_rflush(struct vfs *vfsp, cred_t *cr)
1044 1044 {
1045 1045 /* Todo: mmap support. */
1046 1046 }
1047 1047
1048 1048 /* access cache */
1049 1049 /* client handles */
1050 1050
1051 1051 /*
1052 1052 * initialize resources that are used by smbfs_subr.c
1053 1053 * this is called from the _init() routine (by the way of smbfs_clntinit())
1054 1054 *
1055 1055 * NFS: nfs_subr.c:nfs_subrinit
1056 1056 */
1057 1057 int
1058 1058 smbfs_subrinit(void)
1059 1059 {
1060 1060 ulong_t nsmbnode_max;
1061 1061
1062 1062 /*
1063 1063 * Allocate and initialize the smbnode cache
1064 1064 */
1065 1065 if (nsmbnode <= 0)
1066 1066 nsmbnode = ncsize; /* dnlc.h */
1067 1067 nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) /
1068 1068 sizeof (struct smbnode));
1069 1069 if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) {
1070 1070 zcmn_err(GLOBAL_ZONEID, CE_NOTE,
1071 1071 "setting nsmbnode to max value of %ld", nsmbnode_max);
1072 1072 nsmbnode = nsmbnode_max;
1073 1073 }
1074 1074
1075 1075 smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t),
1076 1076 0, NULL, NULL, smbfs_kmem_reclaim, NULL, NULL, 0);
1077 1077
1078 1078 /*
1079 1079 * Initialize the various mutexes and reader/writer locks
1080 1080 */
1081 1081 mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL);
1082 1082 mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL);
1083 1083
1084 1084 /*
1085 1085 * Assign unique major number for all smbfs mounts
1086 1086 */
1087 1087 if ((smbfs_major = getudev()) == -1) {
1088 1088 zcmn_err(GLOBAL_ZONEID, CE_WARN,
1089 1089 "smbfs: init: can't get unique device number");
1090 1090 smbfs_major = 0;
1091 1091 }
1092 1092 smbfs_minor = 0;
1093 1093
1094 1094 return (0);
1095 1095 }
1096 1096
1097 1097 /*
1098 1098 * free smbfs hash table, etc.
1099 1099 * NFS: nfs_subr.c:nfs_subrfini
1100 1100 */
1101 1101 void
1102 1102 smbfs_subrfini(void)
1103 1103 {
1104 1104
1105 1105 /*
1106 1106 * Destroy the smbnode cache
1107 1107 */
1108 1108 kmem_cache_destroy(smbnode_cache);
1109 1109
1110 1110 /*
1111 1111 * Destroy the various mutexes and reader/writer locks
1112 1112 */
1113 1113 mutex_destroy(&smbfreelist_lock);
1114 1114 mutex_destroy(&smbfs_minor_lock);
1115 1115 }
1116 1116
1117 1117 /* rddir_cache ? */
1118 1118
1119 1119 /*
1120 1120 * Support functions for smbfs_kmem_reclaim
1121 1121 */
1122 1122
1123 1123 static void
1124 1124 smbfs_node_reclaim(void)
1125 1125 {
1126 1126 smbmntinfo_t *mi;
1127 1127 smbnode_t *np;
1128 1128 vnode_t *vp;
1129 1129
1130 1130 mutex_enter(&smbfreelist_lock);
1131 1131 while ((np = smbfreelist) != NULL) {
1132 1132 sn_rmfree(np);
1133 1133 mutex_exit(&smbfreelist_lock);
1134 1134 if (np->r_flags & RHASHED) {
1135 1135 vp = SMBTOV(np);
1136 1136 mi = np->n_mount;
1137 1137 rw_enter(&mi->smi_hash_lk, RW_WRITER);
1138 1138 mutex_enter(&vp->v_lock);
1139 1139 if (vp->v_count > 1) {
1140 1140 vp->v_count--;
1141 1141 mutex_exit(&vp->v_lock);
1142 1142 rw_exit(&mi->smi_hash_lk);
1143 1143 mutex_enter(&smbfreelist_lock);
1144 1144 continue;
1145 1145 }
1146 1146 mutex_exit(&vp->v_lock);
1147 1147 sn_rmhash_locked(np);
1148 1148 rw_exit(&mi->smi_hash_lk);
1149 1149 }
1150 1150 /*
1151 1151 * This call to smbfs_addfree will end up destroying the
1152 1152 * smbnode, but in a safe way with the appropriate set
1153 1153 * of checks done.
1154 1154 */
1155 1155 smbfs_addfree(np);
1156 1156 mutex_enter(&smbfreelist_lock);
1157 1157 }
1158 1158 mutex_exit(&smbfreelist_lock);
1159 1159 }
1160 1160
1161 1161 /*
1162 1162 * Called by kmem_cache_alloc ask us if we could
1163 1163 * "Please give back some memory!"
1164 1164 *
1165 1165 * Todo: dump nodes from the free list?
1166 1166 */
1167 1167 /*ARGSUSED*/
1168 1168 void
1169 1169 smbfs_kmem_reclaim(void *cdrarg)
1170 1170 {
1171 1171 smbfs_node_reclaim();
1172 1172 }
1173 1173
1174 1174 /* nfs failover stuff */
1175 1175 /* nfs_rw_xxx - see smbfs_rwlock.c */
↓ open down ↓ |
135 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX