1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Direct Attached Disk
28 */
29
30 #include <sys/file.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/var.h>
33 #include <sys/proc.h>
34 #include <sys/dktp/cm.h>
35 #include <sys/vtoc.h>
36 #include <sys/dkio.h>
37 #include <sys/policy.h>
38 #include <sys/priv.h>
39
40 #include <sys/dktp/dadev.h>
41 #include <sys/dktp/fctypes.h>
42 #include <sys/dktp/flowctrl.h>
43 #include <sys/dktp/tgcom.h>
44 #include <sys/dktp/tgdk.h>
45 #include <sys/dktp/bbh.h>
46 #include <sys/dktp/dadkio.h>
47 #include <sys/dktp/dadk.h>
48 #include <sys/cdio.h>
49
50 /*
51 * Local Function Prototypes
52 */
53 static void dadk_restart(void *pktp);
54 static void dadk_pktcb(struct cmpkt *pktp);
55 static void dadk_iodone(struct buf *bp);
56 static void dadk_polldone(struct buf *bp);
57 static void dadk_setcap(struct dadk *dadkp);
58 static void dadk_create_errstats(struct dadk *dadkp, int instance);
59 static void dadk_destroy_errstats(struct dadk *dadkp);
60
61 static int dadk_chkerr(struct cmpkt *pktp);
62 static int dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp);
63 static int dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp);
64 static int dadk_ioretry(struct cmpkt *pktp, int action);
65
66 static struct cmpkt *dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp,
67 struct buf *bp, void (*cb_func)(struct buf *), int (*func)(caddr_t),
68 caddr_t arg);
69
70 static int dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t),
71 caddr_t arg);
72 static void dadk_transport(opaque_t com_data, struct buf *bp);
73 static int dadk_ctl_ioctl(struct dadk *, uint32_t, uintptr_t, int);
74
75 struct tgcom_objops dadk_com_ops = {
76 nodev,
77 nodev,
78 dadk_pkt,
79 dadk_transport,
80 0, 0
81 };
82
83 /*
84 * architecture dependent allocation restrictions for dadk_iob_alloc(). For
85 * x86, we'll set dma_attr_addr_hi to dadk_max_phys_addr and dma_attr_sgllen
86 * to dadk_sgl_size during _init().
87 */
88 #if defined(__sparc)
89 static ddi_dma_attr_t dadk_alloc_attr = {
90 DMA_ATTR_V0, /* version number */
91 0x0, /* lowest usable address */
92 0xFFFFFFFFull, /* high DMA address range */
93 0xFFFFFFFFull, /* DMA counter register */
94 1, /* DMA address alignment */
95 1, /* DMA burstsizes */
96 1, /* min effective DMA size */
97 0xFFFFFFFFull, /* max DMA xfer size */
98 0xFFFFFFFFull, /* segment boundary */
99 1, /* s/g list length */
100 512, /* granularity of device */
101 0, /* DMA transfer flags */
102 };
103 #elif defined(__x86)
104 static ddi_dma_attr_t dadk_alloc_attr = {
105 DMA_ATTR_V0, /* version number */
106 0x0, /* lowest usable address */
107 0x0, /* high DMA address range [set in _init()] */
108 0xFFFFull, /* DMA counter register */
109 512, /* DMA address alignment */
110 1, /* DMA burstsizes */
111 1, /* min effective DMA size */
112 0xFFFFFFFFull, /* max DMA xfer size */
113 0xFFFFFFFFull, /* segment boundary */
114 0, /* s/g list length [set in _init()] */
115 512, /* granularity of device */
116 0, /* DMA transfer flags */
117 };
118
119 uint64_t dadk_max_phys_addr = 0xFFFFFFFFull;
120 int dadk_sgl_size = 0xFF;
121 #endif
122
123 static int dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags,
124 int silent);
125 static void dadk_rmb_iodone(struct buf *bp);
126
127 static int dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp,
128 dev_t dev, enum uio_seg dataspace, int rw);
129 static void dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *scmdp,
130 struct buf *bp);
131 static void dadkmin(struct buf *bp);
132 static int dadk_dk_strategy(struct buf *bp);
133 static void dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp);
134
135 struct tgdk_objops dadk_ops = {
136 dadk_init,
137 dadk_free,
138 dadk_probe,
139 dadk_attach,
140 dadk_open,
141 dadk_close,
142 dadk_ioctl,
143 dadk_strategy,
144 dadk_setgeom,
145 dadk_getgeom,
146 dadk_iob_alloc,
147 dadk_iob_free,
148 dadk_iob_htoc,
149 dadk_iob_xfer,
150 dadk_dump,
151 dadk_getphygeom,
152 dadk_set_bbhobj,
153 dadk_check_media,
154 dadk_inquiry,
155 dadk_cleanup,
156 0
157 };
158
159 /*
160 * Local static data
161 */
162
163 #ifdef DADK_DEBUG
164 #define DENT 0x0001
165 #define DERR 0x0002
166 #define DIO 0x0004
167 #define DGEOM 0x0010
168 #define DSTATE 0x0020
169 static int dadk_debug = DGEOM;
170
171 #endif /* DADK_DEBUG */
172
173 static int dadk_check_media_time = 3000000; /* 3 Second State Check */
174 static int dadk_dk_maxphys = 0x80000;
175
176 static char *dadk_cmds[] = {
177 "\000Unknown", /* unknown */
178 "\001read sector", /* DCMD_READ 1 */
179 "\002write sector", /* DCMD_WRITE 2 */
180 "\003format track", /* DCMD_FMTTRK 3 */
181 "\004format whole drive", /* DCMD_FMTDRV 4 */
182 "\005recalibrate", /* DCMD_RECAL 5 */
183 "\006seek sector", /* DCMD_SEEK 6 */
184 "\007read verify", /* DCMD_RDVER 7 */
185 "\010read defect list", /* DCMD_GETDEF 8 */
186 "\011lock door", /* DCMD_LOCK 9 */
187 "\012unlock door", /* DCMD_UNLOCK 10 */
188 "\013start motor", /* DCMD_START_MOTOR 11 */
189 "\014stop motor", /* DCMD_STOP_MOTOR 12 */
190 "\015eject", /* DCMD_EJECT 13 */
191 "\016update geometry", /* DCMD_UPDATE_GEOM 14 */
192 "\017get state", /* DCMD_GET_STATE 15 */
193 "\020cdrom pause", /* DCMD_PAUSE 16 */
194 "\021cdrom resume", /* DCMD_RESUME 17 */
195 "\022cdrom play track index", /* DCMD_PLAYTRKIND 18 */
196 "\023cdrom play msf", /* DCMD_PLAYMSF 19 */
197 "\024cdrom sub channel", /* DCMD_SUBCHNL 20 */
198 "\025cdrom read mode 1", /* DCMD_READMODE1 21 */
199 "\026cdrom read toc header", /* DCMD_READTOCHDR 22 */
200 "\027cdrom read toc entry", /* DCMD_READTOCENT 23 */
201 "\030cdrom read offset", /* DCMD_READOFFSET 24 */
202 "\031cdrom read mode 2", /* DCMD_READMODE2 25 */
203 "\032cdrom volume control", /* DCMD_VOLCTRL 26 */
204 "\033flush cache", /* DCMD_FLUSH_CACHE 27 */
205 NULL
206 };
207
208 static char *dadk_sense[] = {
209 "\000Success", /* DERR_SUCCESS */
210 "\001address mark not found", /* DERR_AMNF */
211 "\002track 0 not found", /* DERR_TKONF */
212 "\003aborted command", /* DERR_ABORT */
213 "\004write fault", /* DERR_DWF */
214 "\005ID not found", /* DERR_IDNF */
215 "\006drive busy", /* DERR_BUSY */
216 "\007uncorrectable data error", /* DERR_UNC */
217 "\010bad block detected", /* DERR_BBK */
218 "\011invalid command", /* DERR_INVCDB */
219 "\012device hard error", /* DERR_HARD */
220 "\013illegal length indicated", /* DERR_ILI */
221 "\014end of media", /* DERR_EOM */
222 "\015media change requested", /* DERR_MCR */
223 "\016recovered from error", /* DERR_RECOVER */
224 "\017device not ready", /* DERR_NOTREADY */
225 "\020medium error", /* DERR_MEDIUM */
226 "\021hardware error", /* DERR_HW */
227 "\022illegal request", /* DERR_ILL */
228 "\023unit attention", /* DERR_UNIT_ATTN */
229 "\024data protection", /* DERR_DATA_PROT */
230 "\025miscompare", /* DERR_MISCOMPARE */
231 "\026ICRC error during UDMA", /* DERR_ICRC */
232 "\027reserved", /* DERR_RESV */
233 NULL
234 };
235
236 static char *dadk_name = "Disk";
237
238 /*
239 * This is the loadable module wrapper
240 */
241 #include <sys/modctl.h>
242
243 extern struct mod_ops mod_miscops;
244
245 static struct modlmisc modlmisc = {
246 &mod_miscops, /* Type of module */
247 "Direct Attached Disk"
248 };
249
250 static struct modlinkage modlinkage = {
251 MODREV_1, (void *)&modlmisc, NULL
252 };
253
254 int
255 _init(void)
256 {
257 #ifdef DADK_DEBUG
258 if (dadk_debug & DENT)
259 PRF("dadk_init: call\n");
260 #endif
261
262 #if defined(__x86)
263 /* set the max physical address for iob allocs on x86 */
264 dadk_alloc_attr.dma_attr_addr_hi = dadk_max_phys_addr;
265
266 /*
267 * set the sgllen for iob allocs on x86. If this is set less than
268 * the number of pages the buffer will take (taking into account
269 * alignment), it would force the allocator to try and allocate
270 * contiguous pages.
271 */
272 dadk_alloc_attr.dma_attr_sgllen = dadk_sgl_size;
273 #endif
274
275 return (mod_install(&modlinkage));
276 }
277
278 int
279 _fini(void)
280 {
281 #ifdef DADK_DEBUG
282 if (dadk_debug & DENT)
283 PRF("dadk_fini: call\n");
284 #endif
285
286 return (mod_remove(&modlinkage));
287 }
288
289 int
290 _info(struct modinfo *modinfop)
291 {
292 return (mod_info(&modlinkage, modinfop));
293 }
294
295 struct tgdk_obj *
296 dadk_create()
297 {
298 struct tgdk_obj *dkobjp;
299 struct dadk *dadkp;
300
301 dkobjp = kmem_zalloc((sizeof (*dkobjp) + sizeof (*dadkp)), KM_NOSLEEP);
302 if (!dkobjp)
303 return (NULL);
304 dadkp = (struct dadk *)(dkobjp+1);
305
306 dkobjp->tg_ops = (struct tgdk_objops *)&dadk_ops;
307 dkobjp->tg_data = (opaque_t)dadkp;
308 dkobjp->tg_ext = &(dkobjp->tg_extblk);
309 dadkp->dad_extp = &(dkobjp->tg_extblk);
310
311 #ifdef DADK_DEBUG
312 if (dadk_debug & DENT)
313 PRF("dadk_create: tgdkobjp= 0x%x dadkp= 0x%x\n", dkobjp, dadkp);
314 #endif
315 return (dkobjp);
316 }
317
318 int
319 dadk_init(opaque_t objp, opaque_t devp, opaque_t flcobjp, opaque_t queobjp,
320 opaque_t bbhobjp, void *lkarg)
321 {
322 struct dadk *dadkp = (struct dadk *)objp;
323 struct scsi_device *sdevp = (struct scsi_device *)devp;
324
325 dadkp->dad_sd = devp;
326 dadkp->dad_ctlobjp = (opaque_t)sdevp->sd_address.a_hba_tran;
327 sdevp->sd_private = (caddr_t)dadkp;
328
329 /* initialize the communication object */
330 dadkp->dad_com.com_data = (opaque_t)dadkp;
331 dadkp->dad_com.com_ops = &dadk_com_ops;
332
333 dadkp->dad_bbhobjp = bbhobjp;
334 BBH_INIT(bbhobjp);
335
336 dadkp->dad_flcobjp = flcobjp;
337 mutex_init(&dadkp->dad_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
338 dadkp->dad_cmd_count = 0;
339 return (FLC_INIT(flcobjp, &(dadkp->dad_com), queobjp, lkarg));
340 }
341
342 int
343 dadk_free(struct tgdk_obj *dkobjp)
344 {
345 TGDK_CLEANUP(dkobjp);
346 kmem_free(dkobjp, (sizeof (*dkobjp) + sizeof (struct dadk)));
347
348 return (DDI_SUCCESS);
349 }
350
351 void
352 dadk_cleanup(struct tgdk_obj *dkobjp)
353 {
354 struct dadk *dadkp;
355
356 dadkp = (struct dadk *)(dkobjp->tg_data);
357 if (dadkp->dad_sd)
358 dadkp->dad_sd->sd_private = NULL;
359 if (dadkp->dad_bbhobjp) {
360 BBH_FREE(dadkp->dad_bbhobjp);
361 dadkp->dad_bbhobjp = NULL;
362 }
363 if (dadkp->dad_flcobjp) {
364 FLC_FREE(dadkp->dad_flcobjp);
365 dadkp->dad_flcobjp = NULL;
366 }
367 mutex_destroy(&dadkp->dad_cmd_mutex);
368 }
369
370 /* ARGSUSED */
371 int
372 dadk_probe(opaque_t objp, int kmsflg)
373 {
374 struct dadk *dadkp = (struct dadk *)objp;
375 struct scsi_device *devp;
376 char name[80];
377
378 devp = dadkp->dad_sd;
379 if (!devp->sd_inq || (devp->sd_inq->inq_dtype == DTYPE_NOTPRESENT) ||
380 (devp->sd_inq->inq_dtype == DTYPE_UNKNOWN)) {
381 return (DDI_PROBE_FAILURE);
382 }
383
384 switch (devp->sd_inq->inq_dtype) {
385 case DTYPE_DIRECT:
386 dadkp->dad_ctype = DKC_DIRECT;
387 dadkp->dad_extp->tg_nodetype = DDI_NT_BLOCK;
388 dadkp->dad_extp->tg_ctype = DKC_DIRECT;
389 break;
390 case DTYPE_RODIRECT: /* eg cdrom */
391 dadkp->dad_ctype = DKC_CDROM;
392 dadkp->dad_extp->tg_rdonly = 1;
393 dadkp->dad_rdonly = 1;
394 dadkp->dad_cdrom = 1;
395 dadkp->dad_extp->tg_nodetype = DDI_NT_CD;
396 dadkp->dad_extp->tg_ctype = DKC_CDROM;
397 break;
398 case DTYPE_WORM:
399 case DTYPE_OPTICAL:
400 default:
401 return (DDI_PROBE_FAILURE);
402 }
403
404 dadkp->dad_extp->tg_rmb = dadkp->dad_rmb = devp->sd_inq->inq_rmb;
405
406 dadkp->dad_secshf = SCTRSHFT;
407 dadkp->dad_blkshf = 0;
408
409 /* display the device name */
410 (void) strcpy(name, "Vendor '");
411 gda_inqfill((caddr_t)devp->sd_inq->inq_vid, 8, &name[strlen(name)]);
412 (void) strcat(name, "' Product '");
413 gda_inqfill((caddr_t)devp->sd_inq->inq_pid, 16, &name[strlen(name)]);
414 (void) strcat(name, "'");
415 gda_log(devp->sd_dev, dadk_name, CE_NOTE, "!<%s>\n", name);
416
417 return (DDI_PROBE_SUCCESS);
418 }
419
420
421 /* ARGSUSED */
422 int
423 dadk_attach(opaque_t objp)
424 {
425 return (DDI_SUCCESS);
426 }
427
428 int
429 dadk_set_bbhobj(opaque_t objp, opaque_t bbhobjp)
430 {
431 struct dadk *dadkp = (struct dadk *)objp;
432 /* free the old bbh object */
433 if (dadkp->dad_bbhobjp)
434 BBH_FREE(dadkp->dad_bbhobjp);
435
436 /* initialize the new bbh object */
437 dadkp->dad_bbhobjp = bbhobjp;
438 BBH_INIT(bbhobjp);
439
440 return (DDI_SUCCESS);
441 }
442
443 /* ARGSUSED */
444 int
445 dadk_open(opaque_t objp, int flag)
446 {
447 struct dadk *dadkp = (struct dadk *)objp;
448 int error;
449 int wce;
450
451 if (!dadkp->dad_rmb) {
452 if (dadkp->dad_phyg.g_cap) {
453 FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
454 ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
455 return (DDI_SUCCESS);
456 }
457 } else {
458 mutex_enter(&dadkp->dad_mutex);
459 dadkp->dad_iostate = DKIO_NONE;
460 cv_broadcast(&dadkp->dad_state_cv);
461 mutex_exit(&dadkp->dad_mutex);
462
463 if (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0,
464 DADK_SILENT) ||
465 dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT) ||
466 dadk_rmb_ioctl(dadkp, DCMD_UPDATE_GEOM, 0, 0,
467 DADK_SILENT)) {
468 return (DDI_FAILURE);
469 }
470
471 mutex_enter(&dadkp->dad_mutex);
472 dadkp->dad_iostate = DKIO_INSERTED;
473 cv_broadcast(&dadkp->dad_state_cv);
474 mutex_exit(&dadkp->dad_mutex);
475 }
476
477 /*
478 * get write cache enable state
479 * If there is an error, must assume that write cache
480 * is enabled.
481 * NOTE: Since there is currently no Solaris mechanism to
482 * change the state of the Write Cache Enable feature,
483 * this code just checks the value of the WCE bit
484 * obtained at device init time. If a mechanism
485 * is added to the driver to change WCE, dad_wce
486 * must be updated appropriately.
487 */
488 error = dadk_ctl_ioctl(dadkp, DIOCTL_GETWCE,
489 (uintptr_t)&wce, FKIOCTL | FNATIVE);
490 mutex_enter(&dadkp->dad_mutex);
491 dadkp->dad_wce = (error != 0) || (wce != 0);
492 mutex_exit(&dadkp->dad_mutex);
493
494 /* logical disk geometry */
495 (void) dadk_ctl_ioctl(dadkp, DIOCTL_GETGEOM,
496 (uintptr_t)&dadkp->dad_logg, FKIOCTL | FNATIVE);
497 if (dadkp->dad_logg.g_cap == 0)
498 return (DDI_FAILURE);
499
500 /* get physical disk geometry */
501 (void) dadk_ctl_ioctl(dadkp, DIOCTL_GETPHYGEOM,
502 (uintptr_t)&dadkp->dad_phyg, FKIOCTL | FNATIVE);
503 if (dadkp->dad_phyg.g_cap == 0)
504 return (DDI_FAILURE);
505
506 dadk_setcap(dadkp);
507
508 dadk_create_errstats(dadkp,
509 ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
510
511 /* start profiling */
512 FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
513 ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
514
515 return (DDI_SUCCESS);
516 }
517
518 static void
519 dadk_setcap(struct dadk *dadkp)
520 {
521 int totsize;
522 int i;
523
524 totsize = dadkp->dad_phyg.g_secsiz;
525
526 if (totsize == 0) {
527 if (dadkp->dad_cdrom) {
528 totsize = 2048;
529 } else {
530 totsize = NBPSCTR;
531 }
532 } else {
533 /* Round down sector size to multiple of 512B */
534 totsize &= ~(NBPSCTR-1);
535 }
536 dadkp->dad_phyg.g_secsiz = totsize;
537
538 /* set sec,block shift factor - (512->0, 1024->1, 2048->2, etc.) */
539 totsize >>= SCTRSHFT;
540 for (i = 0; totsize != 1; i++, totsize >>= 1)
541 ;
542 dadkp->dad_blkshf = i;
543 dadkp->dad_secshf = i + SCTRSHFT;
544 }
545
546
547 static void
548 dadk_create_errstats(struct dadk *dadkp, int instance)
549 {
550 dadk_errstats_t *dep;
551 char kstatname[KSTAT_STRLEN];
552 dadk_ioc_string_t dadk_ioc_string;
553
554 if (dadkp->dad_errstats)
555 return;
556
557 (void) sprintf(kstatname, "cmdk%d,error", instance);
558 dadkp->dad_errstats = kstat_create("cmdkerror", instance,
559 kstatname, "device_error", KSTAT_TYPE_NAMED,
560 sizeof (dadk_errstats_t) / sizeof (kstat_named_t),
561 KSTAT_FLAG_PERSISTENT);
562
563 if (!dadkp->dad_errstats)
564 return;
565
566 dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
567
568 kstat_named_init(&dep->dadk_softerrs,
569 "Soft Errors", KSTAT_DATA_UINT32);
570 kstat_named_init(&dep->dadk_harderrs,
571 "Hard Errors", KSTAT_DATA_UINT32);
572 kstat_named_init(&dep->dadk_transerrs,
573 "Transport Errors", KSTAT_DATA_UINT32);
574 kstat_named_init(&dep->dadk_model,
575 "Model", KSTAT_DATA_CHAR);
576 kstat_named_init(&dep->dadk_revision,
577 "Revision", KSTAT_DATA_CHAR);
578 kstat_named_init(&dep->dadk_serial,
579 "Serial No", KSTAT_DATA_CHAR);
580 kstat_named_init(&dep->dadk_capacity,
581 "Size", KSTAT_DATA_ULONGLONG);
582 kstat_named_init(&dep->dadk_rq_media_err,
583 "Media Error", KSTAT_DATA_UINT32);
584 kstat_named_init(&dep->dadk_rq_ntrdy_err,
585 "Device Not Ready", KSTAT_DATA_UINT32);
586 kstat_named_init(&dep->dadk_rq_nodev_err,
587 "No Device", KSTAT_DATA_UINT32);
588 kstat_named_init(&dep->dadk_rq_recov_err,
589 "Recoverable", KSTAT_DATA_UINT32);
590 kstat_named_init(&dep->dadk_rq_illrq_err,
591 "Illegal Request", KSTAT_DATA_UINT32);
592
593 dadkp->dad_errstats->ks_private = dep;
594 dadkp->dad_errstats->ks_update = nulldev;
595 kstat_install(dadkp->dad_errstats);
596
597 /* get model */
598 dep->dadk_model.value.c[0] = 0;
599 dadk_ioc_string.is_buf = &dep->dadk_model.value.c[0];
600 dadk_ioc_string.is_size = sizeof (dep->dadk_model.value.c);
601 (void) dadk_ctl_ioctl(dadkp, DIOCTL_GETMODEL,
602 (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
603
604 /* get serial */
605 dep->dadk_serial.value.c[0] = 0;
606 dadk_ioc_string.is_buf = &dep->dadk_serial.value.c[0];
607 dadk_ioc_string.is_size = sizeof (dep->dadk_serial.value.c);
608 (void) dadk_ctl_ioctl(dadkp, DIOCTL_GETSERIAL,
609 (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
610
611 /* Get revision */
612 dep->dadk_revision.value.c[0] = 0;
613
614 /* Get capacity */
615
616 dep->dadk_capacity.value.ui64 =
617 (uint64_t)dadkp->dad_logg.g_cap *
618 (uint64_t)dadkp->dad_logg.g_secsiz;
619 }
620
621
622 int
623 dadk_close(opaque_t objp)
624 {
625 struct dadk *dadkp = (struct dadk *)objp;
626
627 if (dadkp->dad_rmb) {
628 (void) dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0,
629 DADK_SILENT);
630 (void) dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT);
631 }
632 FLC_STOP_KSTAT(dadkp->dad_flcobjp);
633
634 dadk_destroy_errstats(dadkp);
635
636 return (DDI_SUCCESS);
637 }
638
639 static void
640 dadk_destroy_errstats(struct dadk *dadkp)
641 {
642 if (!dadkp->dad_errstats)
643 return;
644
645 kstat_delete(dadkp->dad_errstats);
646 dadkp->dad_errstats = NULL;
647 }
648
649
650 int
651 dadk_strategy(opaque_t objp, struct buf *bp)
652 {
653 struct dadk *dadkp = (struct dadk *)objp;
654
655 if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
656 bioerror(bp, EROFS);
657 return (DDI_FAILURE);
658 }
659
660 if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
661 bioerror(bp, ENXIO);
662 return (DDI_FAILURE);
663 }
664
665 SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
666 mutex_enter(&dadkp->dad_cmd_mutex);
667 dadkp->dad_cmd_count++;
668 mutex_exit(&dadkp->dad_cmd_mutex);
669 FLC_ENQUE(dadkp->dad_flcobjp, bp);
670
671 return (DDI_SUCCESS);
672 }
673
674 int
675 dadk_dump(opaque_t objp, struct buf *bp)
676 {
677 struct dadk *dadkp = (struct dadk *)objp;
678 struct cmpkt *pktp;
679
680 if (dadkp->dad_rdonly) {
681 bioerror(bp, EROFS);
682 return (DDI_FAILURE);
683 }
684
685 if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
686 bioerror(bp, ENXIO);
687 return (DDI_FAILURE);
688 }
689
690 SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
691
692 pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
693 if (!pktp) {
694 cmn_err(CE_WARN, "no resources for dumping");
695 bioerror(bp, EIO);
696 return (DDI_FAILURE);
697 }
698 pktp->cp_flags |= CPF_NOINTR;
699
700 (void) dadk_ioprep(dadkp, pktp);
701 dadk_transport(dadkp, bp);
702 pktp->cp_byteleft -= pktp->cp_bytexfer;
703
704 while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
705 (void) dadk_iosetup(dadkp, pktp);
706 dadk_transport(dadkp, bp);
707 pktp->cp_byteleft -= pktp->cp_bytexfer;
708 }
709
710 if (pktp->cp_private)
711 BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
712 gda_free(dadkp->dad_ctlobjp, pktp, NULL);
713 return (DDI_SUCCESS);
714 }
715
716 /* ARGSUSED */
717 int
718 dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
719 cred_t *cred_p, int *rval_p)
720 {
721 struct dadk *dadkp = (struct dadk *)objp;
722
723 switch (cmd) {
724 case DKIOCGETDEF:
725 {
726 struct buf *bp;
727 int err, head;
728 unsigned char *secbuf;
729 STRUCT_DECL(defect_header, adh);
730
731 STRUCT_INIT(adh, flag & FMODELS);
732
733 /*
734 * copyin header ....
735 * yields head number and buffer address
736 */
737 if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
738 flag))
739 return (EFAULT);
740 head = STRUCT_FGET(adh, head);
741 if (head < 0 || head >= dadkp->dad_phyg.g_head)
742 return (ENXIO);
743 secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
744
745 bp = getrbuf(KM_SLEEP);
746
747 bp->b_edev = dev;
748 bp->b_dev = cmpdev(dev);
749 bp->b_flags = B_BUSY;
750 bp->b_resid = 0;
751 bp->b_bcount = NBPSCTR;
752 bp->b_un.b_addr = (caddr_t)secbuf;
753 bp->b_blkno = head; /* I had to put it somwhere! */
754 bp->b_forw = (struct buf *)dadkp;
755 bp->b_back = (struct buf *)DCMD_GETDEF;
756
757 mutex_enter(&dadkp->dad_cmd_mutex);
758 dadkp->dad_cmd_count++;
759 mutex_exit(&dadkp->dad_cmd_mutex);
760 FLC_ENQUE(dadkp->dad_flcobjp, bp);
761 err = biowait(bp);
762 if (!err) {
763 if (ddi_copyout((caddr_t)secbuf,
764 STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
765 err = ENXIO;
766 }
767 kmem_free(secbuf, NBPSCTR);
768 freerbuf(bp);
769 return (err);
770 }
771 case DIOCTL_RWCMD:
772 {
773 struct dadkio_rwcmd *rwcmdp;
774 int status, rw;
775
776 /*
777 * copied in by cmdk and, if necessary, converted to the
778 * correct datamodel
779 */
780 rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;
781
782 /*
783 * handle the complex cases here; we pass these
784 * through to the driver, which will queue them and
785 * handle the requests asynchronously. The simpler
786 * cases ,which can return immediately, fail here, and
787 * the request reverts to the dadk_ioctl routine, while
788 * will reroute them directly to the ata driver.
789 */
790 switch (rwcmdp->cmd) {
791 case DADKIO_RWCMD_READ :
792 /*FALLTHROUGH*/
793 case DADKIO_RWCMD_WRITE:
794 rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
795 B_WRITE : B_READ);
796 status = dadk_dk_buf_setup(dadkp,
797 (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
798 UIO_SYSSPACE : UIO_USERSPACE), rw);
799 return (status);
800 default:
801 return (EINVAL);
802 }
803 }
804 case DKIOC_UPDATEFW:
805
806 /*
807 * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW
808 * to protect the firmware update from malicious use
809 */
810 if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0)
811 return (EPERM);
812 else
813 return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
814
815 case DKIOCFLUSHWRITECACHE:
816 {
817 struct buf *bp;
818 int err = 0;
819 struct dk_callback *dkc = (struct dk_callback *)arg;
820 struct cmpkt *pktp;
821 int is_sync = 1;
822
823 mutex_enter(&dadkp->dad_mutex);
824 if (dadkp->dad_noflush || ! dadkp->dad_wce) {
825 err = dadkp->dad_noflush ? ENOTSUP : 0;
826 mutex_exit(&dadkp->dad_mutex);
827 /*
828 * If a callback was requested: a
829 * callback will always be done if the
830 * caller saw the DKIOCFLUSHWRITECACHE
831 * ioctl return 0, and never done if the
832 * caller saw the ioctl return an error.
833 */
834 if ((flag & FKIOCTL) && dkc != NULL &&
835 dkc->dkc_callback != NULL) {
836 (*dkc->dkc_callback)(dkc->dkc_cookie,
837 err);
838 /*
839 * Did callback and reported error.
840 * Since we did a callback, ioctl
841 * should return 0.
842 */
843 err = 0;
844 }
845 return (err);
846 }
847 mutex_exit(&dadkp->dad_mutex);
848
849 bp = getrbuf(KM_SLEEP);
850
851 bp->b_edev = dev;
852 bp->b_dev = cmpdev(dev);
853 bp->b_flags = B_BUSY;
854 bp->b_resid = 0;
855 bp->b_bcount = 0;
856 SET_BP_SEC(bp, 0);
857
858 if ((flag & FKIOCTL) && dkc != NULL &&
859 dkc->dkc_callback != NULL) {
860 struct dk_callback *dkc2 =
861 (struct dk_callback *)kmem_zalloc(
862 sizeof (struct dk_callback), KM_SLEEP);
863
864 bcopy(dkc, dkc2, sizeof (*dkc2));
865 bp->b_private = dkc2;
866 bp->b_iodone = dadk_flushdone;
867 is_sync = 0;
868 }
869
870 /*
871 * Setup command pkt
872 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
873 */
874 pktp = dadk_pktprep(dadkp, NULL, bp,
875 dadk_iodone, DDI_DMA_SLEEP, NULL);
876
877 pktp->cp_time = DADK_FLUSH_CACHE_TIME;
878
879 *((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
880 pktp->cp_byteleft = 0;
881 pktp->cp_private = NULL;
882 pktp->cp_secleft = 0;
883 pktp->cp_srtsec = -1;
884 pktp->cp_bytexfer = 0;
885
886 CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
887
888 mutex_enter(&dadkp->dad_cmd_mutex);
889 dadkp->dad_cmd_count++;
890 mutex_exit(&dadkp->dad_cmd_mutex);
891 FLC_ENQUE(dadkp->dad_flcobjp, bp);
892
893 if (is_sync) {
894 err = biowait(bp);
895 freerbuf(bp);
896 }
897 return (err);
898 }
899 default:
900 if (!dadkp->dad_rmb)
901 return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
902 }
903
904 switch (cmd) {
905 case CDROMSTOP:
906 return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
907 0, DADK_SILENT));
908 case CDROMSTART:
909 return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
910 0, DADK_SILENT));
911 case DKIOCLOCK:
912 return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
913 case DKIOCUNLOCK:
914 return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
915 case DKIOCEJECT:
916 case CDROMEJECT:
917 {
918 int ret;
919
920 if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
921 DADK_SILENT)) {
922 return (ret);
923 }
924 if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
925 DADK_SILENT)) {
926 return (ret);
927 }
928 mutex_enter(&dadkp->dad_mutex);
929 dadkp->dad_iostate = DKIO_EJECTED;
930 cv_broadcast(&dadkp->dad_state_cv);
931 mutex_exit(&dadkp->dad_mutex);
932
933 return (0);
934
935 }
936 default:
937 return (ENOTTY);
938 /*
939 * cdrom audio commands
940 */
941 case CDROMPAUSE:
942 cmd = DCMD_PAUSE;
943 break;
944 case CDROMRESUME:
945 cmd = DCMD_RESUME;
946 break;
947 case CDROMPLAYMSF:
948 cmd = DCMD_PLAYMSF;
949 break;
950 case CDROMPLAYTRKIND:
951 cmd = DCMD_PLAYTRKIND;
952 break;
953 case CDROMREADTOCHDR:
954 cmd = DCMD_READTOCHDR;
955 break;
956 case CDROMREADTOCENTRY:
957 cmd = DCMD_READTOCENT;
958 break;
959 case CDROMVOLCTRL:
960 cmd = DCMD_VOLCTRL;
961 break;
962 case CDROMSUBCHNL:
963 cmd = DCMD_SUBCHNL;
964 break;
965 case CDROMREADMODE2:
966 cmd = DCMD_READMODE2;
967 break;
968 case CDROMREADMODE1:
969 cmd = DCMD_READMODE1;
970 break;
971 case CDROMREADOFFSET:
972 cmd = DCMD_READOFFSET;
973 break;
974 }
975 return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
976 }
977
978 int
979 dadk_flushdone(struct buf *bp)
980 {
981 struct dk_callback *dkc = bp->b_private;
982
983 ASSERT(dkc != NULL && dkc->dkc_callback != NULL);
984
985 (*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
986
987 kmem_free(dkc, sizeof (*dkc));
988 freerbuf(bp);
989 return (0);
990 }
991
992 int
993 dadk_getphygeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
994 {
995 struct dadk *dadkp = (struct dadk *)objp;
996
997 bcopy((caddr_t)&dadkp->dad_phyg, (caddr_t)dkgeom_p,
998 sizeof (struct tgdk_geom));
999 return (DDI_SUCCESS);
1000 }
1001
1002 int
1003 dadk_getgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1004 {
1005 struct dadk *dadkp = (struct dadk *)objp;
1006 bcopy((caddr_t)&dadkp->dad_logg, (caddr_t)dkgeom_p,
1007 sizeof (struct tgdk_geom));
1008 return (DDI_SUCCESS);
1009 }
1010
1011 int
1012 dadk_setgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1013 {
1014 struct dadk *dadkp = (struct dadk *)objp;
1015
1016 dadkp->dad_logg.g_cyl = dkgeom_p->g_cyl;
1017 dadkp->dad_logg.g_head = dkgeom_p->g_head;
1018 dadkp->dad_logg.g_sec = dkgeom_p->g_sec;
1019 dadkp->dad_logg.g_cap = dkgeom_p->g_cap;
1020 return (DDI_SUCCESS);
1021 }
1022
1023
1024 tgdk_iob_handle
1025 dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg)
1026 {
1027 struct dadk *dadkp = (struct dadk *)objp;
1028 struct buf *bp;
1029 struct tgdk_iob *iobp;
1030 size_t rlen;
1031
1032 iobp = kmem_zalloc(sizeof (*iobp), kmsflg);
1033 if (iobp == NULL)
1034 return (NULL);
1035 if ((bp = getrbuf(kmsflg)) == NULL) {
1036 kmem_free(iobp, sizeof (*iobp));
1037 return (NULL);
1038 }
1039
1040 iobp->b_psec = LBLK2SEC(blkno, dadkp->dad_blkshf);
1041 iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT;
1042 iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1)
1043 >> dadkp->dad_secshf) << dadkp->dad_secshf;
1044
1045 bp->b_un.b_addr = 0;
1046 /*
1047 * use i_ddi_mem_alloc() for now until we have an interface to allocate
1048 * memory for DMA which doesn't require a DMA handle.
1049 */
1050 if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr,
1051 (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL,
1052 &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
1053 freerbuf(bp);
1054 kmem_free(iobp, sizeof (*iobp));
1055 return (NULL);
1056 }
1057 iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC;
1058 iobp->b_bp = bp;
1059 iobp->b_lblk = blkno;
1060 iobp->b_xfer = xfer;
1061 iobp->b_lblk = blkno;
1062 iobp->b_xfer = xfer;
1063 return (iobp);
1064 }
1065
1066 /* ARGSUSED */
1067 int
1068 dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp)
1069 {
1070 struct buf *bp;
1071
1072 if (iobp) {
1073 if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) {
1074 bp = iobp->b_bp;
1075 if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC))
1076 i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
1077 freerbuf(bp);
1078 }
1079 kmem_free(iobp, sizeof (*iobp));
1080 }
1081 return (DDI_SUCCESS);
1082 }
1083
1084 /* ARGSUSED */
1085 caddr_t
1086 dadk_iob_htoc(opaque_t objp, struct tgdk_iob *iobp)
1087 {
1088 return (iobp->b_bp->b_un.b_addr+iobp->b_pbyteoff);
1089 }
1090
1091
1092 caddr_t
1093 dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
1094 {
1095 struct dadk *dadkp = (struct dadk *)objp;
1096 struct buf *bp;
1097 int err;
1098
1099 bp = iobp->b_bp;
1100 if (dadkp->dad_rdonly && !(rw & B_READ)) {
1101 bioerror(bp, EROFS);
1102 return (NULL);
1103 }
1104
1105 bp->b_flags |= (B_BUSY | rw);
1106 bp->b_bcount = iobp->b_pbytecnt;
1107 SET_BP_SEC(bp, iobp->b_psec);
1108 bp->av_back = (struct buf *)0;
1109 bp->b_resid = 0;
1110
1111 /* call flow control */
1112 mutex_enter(&dadkp->dad_cmd_mutex);
1113 dadkp->dad_cmd_count++;
1114 mutex_exit(&dadkp->dad_cmd_mutex);
1115 FLC_ENQUE(dadkp->dad_flcobjp, bp);
1116 err = biowait(bp);
1117
1118 bp->b_bcount = iobp->b_xfer;
1119 bp->b_flags &= ~(B_DONE|B_BUSY);
1120
1121 if (err)
1122 return (NULL);
1123
1124 return (bp->b_un.b_addr+iobp->b_pbyteoff);
1125 }
1126
1127 static void
1128 dadk_transport(opaque_t com_data, struct buf *bp)
1129 {
1130 struct dadk *dadkp = (struct dadk *)com_data;
1131
1132 if (CTL_TRANSPORT(dadkp->dad_ctlobjp, GDA_BP_PKT(bp)) ==
1133 CTL_SEND_SUCCESS)
1134 return;
1135 dadk_restart((void*)GDA_BP_PKT(bp));
1136 }
1137
1138 static int
1139 dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t), caddr_t arg)
1140 {
1141 struct cmpkt *pktp;
1142 struct dadk *dadkp = (struct dadk *)com_data;
1143
1144 if (GDA_BP_PKT(bp))
1145 return (DDI_SUCCESS);
1146
1147 pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, func, arg);
1148 if (!pktp)
1149 return (DDI_FAILURE);
1150
1151 return (dadk_ioprep(dadkp, pktp));
1152 }
1153
1154 /*
1155 * Read, Write preparation
1156 */
1157 static int
1158 dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp)
1159 {
1160 struct buf *bp;
1161
1162 bp = pktp->cp_bp;
1163 if (bp->b_forw == (struct buf *)dadkp)
1164 *((char *)(pktp->cp_cdbp)) = (char)(intptr_t)bp->b_back;
1165
1166 else if (bp->b_flags & B_READ)
1167 *((char *)(pktp->cp_cdbp)) = DCMD_READ;
1168 else
1169 *((char *)(pktp->cp_cdbp)) = DCMD_WRITE;
1170 pktp->cp_byteleft = bp->b_bcount;
1171
1172 /* setup the bad block list handle */
1173 pktp->cp_private = BBH_GETHANDLE(dadkp->dad_bbhobjp, bp);
1174 return (dadk_iosetup(dadkp, pktp));
1175 }
1176
1177 static int
1178 dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp)
1179 {
1180 struct buf *bp;
1181 bbh_cookie_t bbhckp;
1182 int seccnt;
1183
1184 seccnt = pktp->cp_bytexfer >> dadkp->dad_secshf;
1185 pktp->cp_secleft -= seccnt;
1186
1187 if (pktp->cp_secleft) {
1188 pktp->cp_srtsec += seccnt;
1189 } else {
1190 /* get the first cookie from the bad block list */
1191 if (!pktp->cp_private) {
1192 bp = pktp->cp_bp;
1193 pktp->cp_srtsec = GET_BP_SEC(bp);
1194 pktp->cp_secleft = (bp->b_bcount >> dadkp->dad_secshf);
1195 } else {
1196 bbhckp = BBH_HTOC(dadkp->dad_bbhobjp,
1197 pktp->cp_private);
1198 pktp->cp_srtsec = BBH_GETCK_SECTOR(dadkp->dad_bbhobjp,
1199 bbhckp);
1200 pktp->cp_secleft = BBH_GETCK_SECLEN(dadkp->dad_bbhobjp,
1201 bbhckp);
1202 }
1203 }
1204
1205 pktp->cp_bytexfer = pktp->cp_secleft << dadkp->dad_secshf;
1206
1207 if (CTL_IOSETUP(dadkp->dad_ctlobjp, pktp)) {
1208 return (DDI_SUCCESS);
1209 } else {
1210 return (DDI_FAILURE);
1211 }
1212
1213
1214
1215
1216 }
1217
1218 static struct cmpkt *
1219 dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp, struct buf *bp,
1220 void (*cb_func)(struct buf *), int (*func)(caddr_t), caddr_t arg)
1221 {
1222 struct cmpkt *pktp;
1223
1224 pktp = gda_pktprep(dadkp->dad_ctlobjp, in_pktp, (opaque_t)bp, func,
1225 arg);
1226
1227 if (pktp) {
1228 pktp->cp_callback = dadk_pktcb;
1229 pktp->cp_time = DADK_IO_TIME;
1230 pktp->cp_flags = 0;
1231 pktp->cp_iodone = cb_func;
1232 pktp->cp_dev_private = (opaque_t)dadkp;
1233
1234 }
1235
1236 return (pktp);
1237 }
1238
1239
1240 static void
1241 dadk_restart(void *vpktp)
1242 {
1243 struct cmpkt *pktp = (struct cmpkt *)vpktp;
1244
1245 if (dadk_ioretry(pktp, QUE_COMMAND) == JUST_RETURN)
1246 return;
1247 pktp->cp_iodone(pktp->cp_bp);
1248 }
1249
1250 static int
1251 dadk_ioretry(struct cmpkt *pktp, int action)
1252 {
1253 struct buf *bp;
1254 struct dadk *dadkp = PKT2DADK(pktp);
1255
1256 switch (action) {
1257 case QUE_COMMAND:
1258 if (pktp->cp_retry++ < DADK_RETRY_COUNT) {
1259 CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
1260 if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) ==
1261 CTL_SEND_SUCCESS) {
1262 return (JUST_RETURN);
1263 }
1264 gda_log(dadkp->dad_sd->sd_dev, dadk_name,
1265 CE_WARN, "transport of command fails\n");
1266 } else
1267 gda_log(dadkp->dad_sd->sd_dev,
1268 dadk_name, CE_WARN,
1269 "exceeds maximum number of retries\n");
1270 bioerror(pktp->cp_bp, ENXIO);
1271 /*FALLTHROUGH*/
1272 case COMMAND_DONE_ERROR:
1273 bp = pktp->cp_bp;
1274 bp->b_resid += pktp->cp_byteleft - pktp->cp_bytexfer +
1275 pktp->cp_resid;
1276 if (geterror(bp) == 0) {
1277 if ((*((char *)(pktp->cp_cdbp)) == DCMD_FLUSH_CACHE) &&
1278 (pktp->cp_dev_private == (opaque_t)dadkp) &&
1279 ((int)(*(char *)pktp->cp_scbp) == DERR_ABORT)) {
1280 /*
1281 * Flag "unimplemented" responses for
1282 * DCMD_FLUSH_CACHE as ENOTSUP
1283 */
1284 bioerror(bp, ENOTSUP);
1285 mutex_enter(&dadkp->dad_mutex);
1286 dadkp->dad_noflush = 1;
1287 mutex_exit(&dadkp->dad_mutex);
1288 } else {
1289 bioerror(bp, EIO);
1290 }
1291 }
1292 /*FALLTHROUGH*/
1293 case COMMAND_DONE:
1294 default:
1295 return (COMMAND_DONE);
1296 }
1297 }
1298
1299
1300 static void
1301 dadk_pktcb(struct cmpkt *pktp)
1302 {
1303 int action;
1304 struct dadkio_rwcmd *rwcmdp;
1305
1306 rwcmdp = (struct dadkio_rwcmd *)pktp->cp_passthru; /* ioctl packet */
1307
1308 if (pktp->cp_reason == CPS_SUCCESS) {
1309 if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT))
1310 rwcmdp->status.status = DADKIO_STAT_NO_ERROR;
1311 pktp->cp_iodone(pktp->cp_bp);
1312 return;
1313 }
1314
1315 if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT)) {
1316 if (pktp->cp_reason == CPS_CHKERR)
1317 dadk_recorderr(pktp, rwcmdp);
1318 dadk_iodone(pktp->cp_bp);
1319 return;
1320 }
1321
1322 if (pktp->cp_reason == CPS_CHKERR)
1323 action = dadk_chkerr(pktp);
1324 else
1325 action = COMMAND_DONE_ERROR;
1326
1327 if (action == JUST_RETURN)
1328 return;
1329
1330 /*
1331 * If we are panicking don't retry the command
1332 * just fail it so we can go down completing all
1333 * of the buffers.
1334 */
1335 if (ddi_in_panic() && action == QUE_COMMAND)
1336 action = COMMAND_DONE_ERROR;
1337
1338 if (action != COMMAND_DONE) {
1339 if ((dadk_ioretry(pktp, action)) == JUST_RETURN)
1340 return;
1341 }
1342 pktp->cp_iodone(pktp->cp_bp);
1343 }
1344
1345
1346
1347 static struct dadkio_derr dadk_errtab[] = {
1348 {COMMAND_DONE, GDA_INFORMATIONAL}, /* 0 DERR_SUCCESS */
1349 {QUE_COMMAND, GDA_FATAL}, /* 1 DERR_AMNF */
1350 {QUE_COMMAND, GDA_FATAL}, /* 2 DERR_TKONF */
1351 {COMMAND_DONE_ERROR, GDA_INFORMATIONAL}, /* 3 DERR_ABORT */
1352 {QUE_COMMAND, GDA_RETRYABLE}, /* 4 DERR_DWF */
1353 {QUE_COMMAND, GDA_FATAL}, /* 5 DERR_IDNF */
1354 {JUST_RETURN, GDA_INFORMATIONAL}, /* 6 DERR_BUSY */
1355 {QUE_COMMAND, GDA_FATAL}, /* 7 DERR_UNC */
1356 {QUE_COMMAND, GDA_RETRYABLE}, /* 8 DERR_BBK */
1357 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 9 DERR_INVCDB */
1358 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 10 DERR_HARD */
1359 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 11 DERR_ILI */
1360 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 12 DERR_EOM */
1361 {COMMAND_DONE, GDA_INFORMATIONAL}, /* 13 DERR_MCR */
1362 {COMMAND_DONE, GDA_INFORMATIONAL}, /* 14 DERR_RECOVER */
1363 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 15 DERR_NOTREADY */
1364 {QUE_COMMAND, GDA_RETRYABLE}, /* 16 DERR_MEDIUM */
1365 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 17 DERR_HW */
1366 {COMMAND_DONE, GDA_FATAL}, /* 18 DERR_ILL */
1367 {COMMAND_DONE, GDA_FATAL}, /* 19 DERR_UNIT_ATTN */
1368 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 20 DERR_DATA_PROT */
1369 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 21 DERR_MISCOMPARE */
1370 {QUE_COMMAND, GDA_RETRYABLE}, /* 22 DERR_ICRC */
1371 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 23 DERR_RESV */
1372 };
1373
1374 static int
1375 dadk_chkerr(struct cmpkt *pktp)
1376 {
1377 daddr_t err_blkno;
1378 struct dadk *dadkp = PKT2DADK(pktp);
1379 dadk_errstats_t *dep;
1380 int scb = *(char *)pktp->cp_scbp;
1381
1382 if (scb == DERR_SUCCESS) {
1383 if (pktp->cp_retry != 0 && dadkp->dad_errstats != NULL) {
1384 dep = (dadk_errstats_t *)
1385 dadkp->dad_errstats->ks_data;
1386 dep->dadk_rq_recov_err.value.ui32++;
1387 }
1388 return (COMMAND_DONE);
1389 }
1390
1391 if (pktp->cp_retry) {
1392 err_blkno = pktp->cp_srtsec + ((pktp->cp_bytexfer -
1393 pktp->cp_resid) >> dadkp->dad_secshf);
1394 } else
1395 err_blkno = -1;
1396
1397 if (dadkp->dad_errstats != NULL) {
1398 dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
1399
1400 switch (dadk_errtab[scb].d_severity) {
1401 case GDA_RETRYABLE:
1402 dep->dadk_softerrs.value.ui32++;
1403 break;
1404
1405 case GDA_FATAL:
1406 dep->dadk_harderrs.value.ui32++;
1407 break;
1408
1409 default:
1410 break;
1411 }
1412
1413 switch (scb) {
1414 case DERR_INVCDB:
1415 case DERR_ILI:
1416 case DERR_EOM:
1417 case DERR_HW:
1418 case DERR_ICRC:
1419 dep->dadk_transerrs.value.ui32++;
1420 break;
1421
1422 case DERR_AMNF:
1423 case DERR_TKONF:
1424 case DERR_DWF:
1425 case DERR_BBK:
1426 case DERR_UNC:
1427 case DERR_HARD:
1428 case DERR_MEDIUM:
1429 case DERR_DATA_PROT:
1430 case DERR_MISCOMP:
1431 dep->dadk_rq_media_err.value.ui32++;
1432 break;
1433
1434 case DERR_NOTREADY:
1435 dep->dadk_rq_ntrdy_err.value.ui32++;
1436 break;
1437
1438 case DERR_IDNF:
1439 case DERR_UNIT_ATTN:
1440 dep->dadk_rq_nodev_err.value.ui32++;
1441 break;
1442
1443 case DERR_ILL:
1444 case DERR_RESV:
1445 dep->dadk_rq_illrq_err.value.ui32++;
1446 break;
1447
1448 default:
1449 break;
1450 }
1451 }
1452
1453 /* if attempting to read a sector from a cdrom audio disk */
1454 if ((dadkp->dad_cdrom) &&
1455 (*((char *)(pktp->cp_cdbp)) == DCMD_READ) &&
1456 (scb == DERR_ILL)) {
1457 return (COMMAND_DONE);
1458 }
1459 if (pktp->cp_passthru == NULL) {
1460 gda_errmsg(dadkp->dad_sd, pktp, dadk_name,
1461 dadk_errtab[scb].d_severity, pktp->cp_srtsec,
1462 err_blkno, dadk_cmds, dadk_sense);
1463 }
1464
1465 if (scb == DERR_BUSY) {
1466 (void) timeout(dadk_restart, (void *)pktp, DADK_BSY_TIMEOUT);
1467 }
1468
1469 return (dadk_errtab[scb].d_action);
1470 }
1471
1472 static void
1473 dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp)
1474 {
1475 struct dadk *dadkp;
1476 int scb;
1477
1478 dadkp = PKT2DADK(pktp);
1479 scb = (int)(*(char *)pktp->cp_scbp);
1480
1481
1482 rwcmdp->status.failed_blk = rwcmdp->blkaddr +
1483 ((pktp->cp_bytexfer - pktp->cp_resid) >> dadkp->dad_secshf);
1484
1485 rwcmdp->status.resid = pktp->cp_bp->b_resid +
1486 pktp->cp_byteleft - pktp->cp_bytexfer + pktp->cp_resid;
1487 switch ((int)(* (char *)pktp->cp_scbp)) {
1488 case DERR_AMNF:
1489 case DERR_ABORT:
1490 rwcmdp->status.status = DADKIO_STAT_ILLEGAL_REQUEST;
1491 break;
1492 case DERR_DWF:
1493 case DERR_IDNF:
1494 rwcmdp->status.status = DADKIO_STAT_ILLEGAL_ADDRESS;
1495 break;
1496 case DERR_TKONF:
1497 case DERR_UNC:
1498 case DERR_BBK:
1499 rwcmdp->status.status = DADKIO_STAT_MEDIUM_ERROR;
1500 rwcmdp->status.failed_blk_is_valid = 1;
1501 rwcmdp->status.resid = 0;
1502 break;
1503 case DERR_BUSY:
1504 rwcmdp->status.status = DADKIO_STAT_NOT_READY;
1505 break;
1506 case DERR_INVCDB:
1507 case DERR_HARD:
1508 rwcmdp->status.status = DADKIO_STAT_HARDWARE_ERROR;
1509 break;
1510 case DERR_ICRC:
1511 default:
1512 rwcmdp->status.status = DADKIO_STAT_NOT_SUPPORTED;
1513 }
1514
1515 if (rwcmdp->flags & DADKIO_FLAG_SILENT)
1516 return;
1517 gda_errmsg(dadkp->dad_sd, pktp, dadk_name, dadk_errtab[scb].d_severity,
1518 rwcmdp->blkaddr, rwcmdp->status.failed_blk,
1519 dadk_cmds, dadk_sense);
1520 }
1521
1522 /*ARGSUSED*/
1523 static void
1524 dadk_polldone(struct buf *bp)
1525 {
1526 struct cmpkt *pktp;
1527 struct dadk *dadkp;
1528
1529 pktp = GDA_BP_PKT(bp);
1530 dadkp = PKT2DADK(pktp);
1531 mutex_enter(&dadkp->dad_cmd_mutex);
1532 dadkp->dad_cmd_count--;
1533 mutex_exit(&dadkp->dad_cmd_mutex);
1534 }
1535
1536 static void
1537 dadk_iodone(struct buf *bp)
1538 {
1539 struct cmpkt *pktp;
1540 struct dadk *dadkp;
1541
1542 pktp = GDA_BP_PKT(bp);
1543 dadkp = PKT2DADK(pktp);
1544
1545 /* check for all iodone */
1546 pktp->cp_byteleft -= pktp->cp_bytexfer;
1547 if (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
1548 pktp->cp_retry = 0;
1549 (void) dadk_iosetup(dadkp, pktp);
1550
1551
1552 /* transport the next one */
1553 if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) == CTL_SEND_SUCCESS)
1554 return;
1555 if ((dadk_ioretry(pktp, QUE_COMMAND)) == JUST_RETURN)
1556 return;
1557 }
1558
1559 /* start next one */
1560 FLC_DEQUE(dadkp->dad_flcobjp, bp);
1561
1562 /* free pkt */
1563 if (pktp->cp_private)
1564 BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
1565 gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1566 mutex_enter(&dadkp->dad_cmd_mutex);
1567 dadkp->dad_cmd_count--;
1568 mutex_exit(&dadkp->dad_cmd_mutex);
1569 biodone(bp);
1570 }
1571
1572 int
1573 dadk_check_media(opaque_t objp, int *state)
1574 {
1575 struct dadk *dadkp = (struct dadk *)objp;
1576
1577 if (!dadkp->dad_rmb) {
1578 return (ENXIO);
1579 }
1580 #ifdef DADK_DEBUG
1581 if (dadk_debug & DSTATE)
1582 PRF("dadk_check_media: user state %x disk state %x\n",
1583 *state, dadkp->dad_iostate);
1584 #endif
1585 /*
1586 * If state already changed just return
1587 */
1588 if (*state != dadkp->dad_iostate) {
1589 *state = dadkp->dad_iostate;
1590 return (0);
1591 }
1592
1593 /*
1594 * Startup polling on thread state
1595 */
1596 mutex_enter(&dadkp->dad_mutex);
1597 if (dadkp->dad_thread_cnt == 0) {
1598 /*
1599 * One thread per removable dadk device
1600 */
1601 (void) thread_create(NULL, 0, dadk_watch_thread, dadkp, 0, &p0,
1602 TS_RUN, v.v_maxsyspri - 2);
1603 }
1604 dadkp->dad_thread_cnt++;
1605
1606 /*
1607 * Wait for state to change
1608 */
1609 do {
1610 if (cv_wait_sig(&dadkp->dad_state_cv, &dadkp->dad_mutex) == 0) {
1611 dadkp->dad_thread_cnt--;
1612 mutex_exit(&dadkp->dad_mutex);
1613 return (EINTR);
1614 }
1615 } while (*state == dadkp->dad_iostate);
1616 *state = dadkp->dad_iostate;
1617 dadkp->dad_thread_cnt--;
1618 mutex_exit(&dadkp->dad_mutex);
1619 return (0);
1620 }
1621
1622
1623 #define MEDIA_ACCESS_DELAY 2000000
1624
1625 static void
1626 dadk_watch_thread(struct dadk *dadkp)
1627 {
1628 enum dkio_state state;
1629 int interval;
1630
1631 interval = drv_usectohz(dadk_check_media_time);
1632
1633 do {
1634 if (dadk_rmb_ioctl(dadkp, DCMD_GET_STATE, (intptr_t)&state, 0,
1635 DADK_SILENT)) {
1636 /*
1637 * Assume state remained the same
1638 */
1639 state = dadkp->dad_iostate;
1640 }
1641
1642 /*
1643 * now signal the waiting thread if this is *not* the
1644 * specified state;
1645 * delay the signal if the state is DKIO_INSERTED
1646 * to allow the target to recover
1647 */
1648 if (state != dadkp->dad_iostate) {
1649
1650 dadkp->dad_iostate = state;
1651 if (state == DKIO_INSERTED) {
1652 /*
1653 * delay the signal to give the drive a chance
1654 * to do what it apparently needs to do
1655 */
1656 (void) timeout((void(*)(void *))cv_broadcast,
1657 (void *)&dadkp->dad_state_cv,
1658 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
1659 } else {
1660 cv_broadcast(&dadkp->dad_state_cv);
1661 }
1662 }
1663 delay(interval);
1664 } while (dadkp->dad_thread_cnt);
1665 }
1666
1667 int
1668 dadk_inquiry(opaque_t objp, opaque_t *inqpp)
1669 {
1670 struct dadk *dadkp = (struct dadk *)objp;
1671 struct scsi_inquiry **sinqpp = (struct scsi_inquiry **)inqpp;
1672
1673 if (dadkp && dadkp->dad_sd && dadkp->dad_sd->sd_inq) {
1674 *sinqpp = dadkp->dad_sd->sd_inq;
1675 return (DDI_SUCCESS);
1676 }
1677
1678 return (DDI_FAILURE);
1679 }
1680
1681 static int
1682 dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags, int silent)
1683
1684 {
1685 struct buf *bp;
1686 int err;
1687 struct cmpkt *pktp;
1688
1689 bp = getrbuf(KM_SLEEP);
1690 pktp = dadk_pktprep(dadkp, NULL, bp, dadk_rmb_iodone, NULL, NULL);
1691 if (!pktp) {
1692 freerbuf(bp);
1693 return (ENOMEM);
1694 }
1695 bp->b_back = (struct buf *)arg;
1696 bp->b_forw = (struct buf *)dadkp->dad_flcobjp;
1697 pktp->cp_passthru = (opaque_t)(intptr_t)silent;
1698
1699 err = dadk_ctl_ioctl(dadkp, cmd, (uintptr_t)pktp, flags);
1700 freerbuf(bp);
1701 gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1702 return (err);
1703
1704
1705 }
1706
1707 static void
1708 dadk_rmb_iodone(struct buf *bp)
1709 {
1710 struct cmpkt *pktp;
1711 struct dadk *dadkp;
1712
1713 pktp = GDA_BP_PKT(bp);
1714 dadkp = PKT2DADK(pktp);
1715
1716 bp->b_flags &= ~(B_DONE|B_BUSY);
1717
1718 /* Start next one */
1719 FLC_DEQUE(dadkp->dad_flcobjp, bp);
1720
1721 mutex_enter(&dadkp->dad_cmd_mutex);
1722 dadkp->dad_cmd_count--;
1723 mutex_exit(&dadkp->dad_cmd_mutex);
1724 biodone(bp);
1725 }
1726
1727 static int
1728 dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp, dev_t dev,
1729 enum uio_seg dataspace, int rw)
1730 {
1731 struct dadkio_rwcmd *rwcmdp = (struct dadkio_rwcmd *)cmdp;
1732 struct buf *bp;
1733 struct iovec aiov;
1734 struct uio auio;
1735 struct uio *uio = &auio;
1736 int status;
1737
1738 bp = getrbuf(KM_SLEEP);
1739
1740 bp->av_forw = bp->b_forw = (struct buf *)dadkp;
1741 bp->b_back = (struct buf *)rwcmdp; /* ioctl packet */
1742
1743 bzero((caddr_t)&auio, sizeof (struct uio));
1744 bzero((caddr_t)&aiov, sizeof (struct iovec));
1745 aiov.iov_base = rwcmdp->bufaddr;
1746 aiov.iov_len = rwcmdp->buflen;
1747 uio->uio_iov = &aiov;
1748
1749 uio->uio_iovcnt = 1;
1750 uio->uio_resid = rwcmdp->buflen;
1751 uio->uio_segflg = dataspace;
1752
1753 /* Let physio do the rest... */
1754 status = physio(dadk_dk_strategy, bp, dev, rw, dadkmin, uio);
1755
1756 freerbuf(bp);
1757 return (status);
1758
1759 }
1760
1761 /* Do not let a user gendisk request get too big or */
1762 /* else we could use to many resources. */
1763
1764 static void
1765 dadkmin(struct buf *bp)
1766 {
1767 if (bp->b_bcount > dadk_dk_maxphys)
1768 bp->b_bcount = dadk_dk_maxphys;
1769 }
1770
1771 static int
1772 dadk_dk_strategy(struct buf *bp)
1773 {
1774 dadk_dk((struct dadk *)bp->av_forw, (struct dadkio_rwcmd *)bp->b_back,
1775 bp);
1776 return (0);
1777 }
1778
1779 static void
1780 dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *rwcmdp, struct buf *bp)
1781 {
1782 struct cmpkt *pktp;
1783
1784 pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, NULL, NULL);
1785 if (!pktp) {
1786 bioerror(bp, ENOMEM);
1787 biodone(bp);
1788 return;
1789 }
1790
1791 pktp->cp_passthru = rwcmdp;
1792
1793 (void) dadk_ioprep(dadkp, pktp);
1794
1795 mutex_enter(&dadkp->dad_cmd_mutex);
1796 dadkp->dad_cmd_count++;
1797 mutex_exit(&dadkp->dad_cmd_mutex);
1798 FLC_ENQUE(dadkp->dad_flcobjp, bp);
1799 }
1800
1801 /*
1802 * There is no existing way to notify cmdk module
1803 * when the command completed, so add this function
1804 * to calculate how many on-going commands.
1805 */
1806 int
1807 dadk_getcmds(opaque_t objp)
1808 {
1809 struct dadk *dadkp = (struct dadk *)objp;
1810 int count;
1811
1812 mutex_enter(&dadkp->dad_cmd_mutex);
1813 count = dadkp->dad_cmd_count;
1814 mutex_exit(&dadkp->dad_cmd_mutex);
1815 return (count);
1816 }
1817
1818 /*
1819 * this function was used to calc the cmd for CTL_IOCTL
1820 */
1821 static int
1822 dadk_ctl_ioctl(struct dadk *dadkp, uint32_t cmd, uintptr_t arg, int flag)
1823 {
1824 int error;
1825 mutex_enter(&dadkp->dad_cmd_mutex);
1826 dadkp->dad_cmd_count++;
1827 mutex_exit(&dadkp->dad_cmd_mutex);
1828 error = CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag);
1829 mutex_enter(&dadkp->dad_cmd_mutex);
1830 dadkp->dad_cmd_count--;
1831 mutex_exit(&dadkp->dad_cmd_mutex);
1832 return (error);
1833 }