Print this page
5253 kmem_alloc/kmem_zalloc won't fail with KM_SLEEP
5254 getrbuf won't fail with KM_SLEEP
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/crypto/io/dca.c
+++ new/usr/src/uts/common/crypto/io/dca.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27
28 28 /*
29 29 * Deimos - cryptographic acceleration based upon Broadcom 582x.
30 30 */
31 31
32 32 #include <sys/types.h>
33 33 #include <sys/modctl.h>
34 34 #include <sys/conf.h>
35 35 #include <sys/devops.h>
36 36 #include <sys/ddi.h>
37 37 #include <sys/sunddi.h>
38 38 #include <sys/cmn_err.h>
39 39 #include <sys/varargs.h>
40 40 #include <sys/file.h>
41 41 #include <sys/stat.h>
42 42 #include <sys/kmem.h>
43 43 #include <sys/ioccom.h>
44 44 #include <sys/open.h>
45 45 #include <sys/cred.h>
46 46 #include <sys/kstat.h>
47 47 #include <sys/strsun.h>
48 48 #include <sys/note.h>
49 49 #include <sys/crypto/common.h>
50 50 #include <sys/crypto/spi.h>
51 51 #include <sys/ddifm.h>
52 52 #include <sys/fm/protocol.h>
53 53 #include <sys/fm/util.h>
54 54 #include <sys/fm/io/ddi.h>
55 55 #include <sys/crypto/dca.h>
56 56
57 57 /*
58 58 * Core Deimos driver.
59 59 */
60 60
61 61 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *,
62 62 kmutex_t *);
63 63 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *);
64 64 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *);
65 65 static void dca_free_context_list(dca_t *dca);
66 66 static int dca_free_context_low(crypto_ctx_t *ctx);
67 67 static int dca_attach(dev_info_t *, ddi_attach_cmd_t);
68 68 static int dca_detach(dev_info_t *, ddi_detach_cmd_t);
69 69 static int dca_suspend(dca_t *);
70 70 static int dca_resume(dca_t *);
71 71 static int dca_init(dca_t *);
72 72 static int dca_reset(dca_t *, int);
73 73 static int dca_initworklist(dca_t *, dca_worklist_t *);
74 74 static void dca_uninit(dca_t *);
75 75 static void dca_initq(dca_listnode_t *);
76 76 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *);
77 77 static dca_listnode_t *dca_dequeue(dca_listnode_t *);
78 78 static dca_listnode_t *dca_unqueue(dca_listnode_t *);
79 79 static dca_request_t *dca_newreq(dca_t *);
80 80 static dca_work_t *dca_getwork(dca_t *, int);
81 81 static void dca_freework(dca_work_t *);
82 82 static dca_work_t *dca_newwork(dca_t *);
83 83 static void dca_destroywork(dca_work_t *);
84 84 static void dca_schedule(dca_t *, int);
85 85 static void dca_reclaim(dca_t *, int);
86 86 static uint_t dca_intr(char *);
87 87 static void dca_failure(dca_t *, ddi_fault_location_t,
88 88 dca_fma_eclass_t index, uint64_t, int, char *, ...);
89 89 static void dca_jobtimeout(void *);
90 90 static int dca_drain(dca_t *);
91 91 static void dca_undrain(dca_t *);
92 92 static void dca_rejectjobs(dca_t *);
93 93
94 94 #ifdef SCHEDDELAY
95 95 static void dca_schedtimeout(void *);
96 96 #endif
97 97
98 98 /*
99 99 * We want these inlined for performance.
100 100 */
101 101 #ifndef DEBUG
102 102 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork)
103 103 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done)
104 104 #pragma inline(dca_reverse, dca_length)
105 105 #endif
106 106
107 107 /*
108 108 * Device operations.
109 109 */
110 110 static struct dev_ops devops = {
111 111 DEVO_REV, /* devo_rev */
112 112 0, /* devo_refcnt */
113 113 nodev, /* devo_getinfo */
114 114 nulldev, /* devo_identify */
115 115 nulldev, /* devo_probe */
116 116 dca_attach, /* devo_attach */
117 117 dca_detach, /* devo_detach */
118 118 nodev, /* devo_reset */
119 119 NULL, /* devo_cb_ops */
120 120 NULL, /* devo_bus_ops */
121 121 ddi_power, /* devo_power */
122 122 ddi_quiesce_not_supported, /* devo_quiesce */
123 123 };
124 124
125 125 #define IDENT "PCI Crypto Accelerator"
126 126 #define IDENT_SYM "Crypto Accel Sym 2.0"
127 127 #define IDENT_ASYM "Crypto Accel Asym 2.0"
128 128
129 129 /* Space-padded, will be filled in dynamically during registration */
130 130 #define IDENT3 "PCI Crypto Accelerator Mod 2.0"
131 131
132 132 #define VENDOR "Sun Microsystems, Inc."
133 133
134 134 #define STALETIME (30 * SECOND)
135 135
136 136 #define crypto_prov_notify crypto_provider_notification
137 137 /* A 28 char function name doesn't leave much line space */
138 138
139 139 /*
140 140 * Module linkage.
141 141 */
142 142 static struct modldrv modldrv = {
143 143 &mod_driverops, /* drv_modops */
144 144 IDENT, /* drv_linkinfo */
145 145 &devops, /* drv_dev_ops */
146 146 };
147 147
148 148 extern struct mod_ops mod_cryptoops;
149 149
150 150 static struct modlcrypto modlcrypto = {
151 151 &mod_cryptoops,
152 152 IDENT3
153 153 };
154 154
155 155 static struct modlinkage modlinkage = {
156 156 MODREV_1, /* ml_rev */
157 157 &modldrv, /* ml_linkage */
158 158 &modlcrypto,
159 159 NULL
160 160 };
161 161
162 162 /*
163 163 * CSPI information (entry points, provider info, etc.)
164 164 */
165 165
166 166 /* Mechanisms for the symmetric cipher provider */
167 167 static crypto_mech_info_t dca_mech_info_tab1[] = {
168 168 /* DES-CBC */
169 169 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE,
170 170 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
171 171 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
172 172 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
173 173 /* 3DES-CBC */
174 174 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE,
175 175 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
176 176 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
177 177 DES3_MIN_KEY_LEN, DES3_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
178 178 };
179 179
180 180 /* Mechanisms for the asymmetric cipher provider */
181 181 static crypto_mech_info_t dca_mech_info_tab2[] = {
182 182 /* DSA */
183 183 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE,
184 184 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY |
185 185 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC,
186 186 CRYPTO_BYTES2BITS(DSA_MIN_KEY_LEN),
187 187 CRYPTO_BYTES2BITS(DSA_MAX_KEY_LEN),
188 188 CRYPTO_KEYSIZE_UNIT_IN_BITS},
189 189
190 190 /* RSA */
191 191 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE,
192 192 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
193 193 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
194 194 CRYPTO_FG_VERIFY_RECOVER |
195 195 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
196 196 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
197 197 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
198 198 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
199 199 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
200 200 CRYPTO_KEYSIZE_UNIT_IN_BITS},
201 201 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE,
202 202 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
203 203 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
204 204 CRYPTO_FG_VERIFY_RECOVER |
205 205 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
206 206 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
207 207 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
208 208 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
209 209 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
210 210 CRYPTO_KEYSIZE_UNIT_IN_BITS}
211 211 };
212 212
213 213 static void dca_provider_status(crypto_provider_handle_t, uint_t *);
214 214
215 215 static crypto_control_ops_t dca_control_ops = {
216 216 dca_provider_status
217 217 };
218 218
219 219 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
220 220 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
221 221 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
222 222 crypto_req_handle_t);
223 223 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *,
224 224 crypto_data_t *, crypto_req_handle_t);
225 225 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *,
226 226 crypto_req_handle_t);
227 227 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
228 228 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
229 229 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
230 230
231 231 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
232 232 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
233 233 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
234 234 crypto_req_handle_t);
235 235 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *,
236 236 crypto_data_t *, crypto_req_handle_t);
237 237 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *,
238 238 crypto_req_handle_t);
239 239 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
240 240 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
241 241 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
242 242
243 243 static crypto_cipher_ops_t dca_cipher_ops = {
244 244 dca_encrypt_init,
245 245 dca_encrypt,
246 246 dca_encrypt_update,
247 247 dca_encrypt_final,
248 248 dca_encrypt_atomic,
249 249 dca_decrypt_init,
250 250 dca_decrypt,
251 251 dca_decrypt_update,
252 252 dca_decrypt_final,
253 253 dca_decrypt_atomic
254 254 };
255 255
256 256 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
257 257 crypto_spi_ctx_template_t, crypto_req_handle_t);
258 258 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
259 259 crypto_req_handle_t);
260 260 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *,
261 261 crypto_req_handle_t);
262 262 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *,
263 263 crypto_req_handle_t);
264 264 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t,
265 265 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
266 266 crypto_spi_ctx_template_t, crypto_req_handle_t);
267 267 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
268 268 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
269 269 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
270 270 crypto_req_handle_t);
271 271 static int dca_sign_recover_atomic(crypto_provider_handle_t,
272 272 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
273 273 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
274 274
275 275 static crypto_sign_ops_t dca_sign_ops = {
276 276 dca_sign_init,
277 277 dca_sign,
278 278 dca_sign_update,
279 279 dca_sign_final,
280 280 dca_sign_atomic,
281 281 dca_sign_recover_init,
282 282 dca_sign_recover,
283 283 dca_sign_recover_atomic
284 284 };
285 285
286 286 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *,
287 287 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
288 288 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
289 289 crypto_req_handle_t);
290 290 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *,
291 291 crypto_req_handle_t);
292 292 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *,
293 293 crypto_req_handle_t);
294 294 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
295 295 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
296 296 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
297 297 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
298 298 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
299 299 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *,
300 300 crypto_data_t *, crypto_req_handle_t);
301 301 static int dca_verify_recover_atomic(crypto_provider_handle_t,
302 302 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
303 303 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
304 304
305 305 static crypto_verify_ops_t dca_verify_ops = {
306 306 dca_verify_init,
307 307 dca_verify,
308 308 dca_verify_update,
309 309 dca_verify_final,
310 310 dca_verify_atomic,
311 311 dca_verify_recover_init,
312 312 dca_verify_recover,
313 313 dca_verify_recover_atomic
314 314 };
315 315
316 316 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t,
317 317 uchar_t *, size_t, crypto_req_handle_t);
318 318
319 319 static crypto_random_number_ops_t dca_random_number_ops = {
320 320 NULL,
321 321 dca_generate_random
322 322 };
323 323
324 324 static int ext_info_sym(crypto_provider_handle_t prov,
325 325 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
326 326 static int ext_info_asym(crypto_provider_handle_t prov,
327 327 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
328 328 static int ext_info_base(crypto_provider_handle_t prov,
329 329 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id);
330 330
331 331 static crypto_provider_management_ops_t dca_provmanage_ops_1 = {
332 332 ext_info_sym, /* ext_info */
333 333 NULL, /* init_token */
334 334 NULL, /* init_pin */
335 335 NULL /* set_pin */
336 336 };
337 337
338 338 static crypto_provider_management_ops_t dca_provmanage_ops_2 = {
339 339 ext_info_asym, /* ext_info */
340 340 NULL, /* init_token */
341 341 NULL, /* init_pin */
342 342 NULL /* set_pin */
343 343 };
344 344
345 345 int dca_free_context(crypto_ctx_t *);
346 346
347 347 static crypto_ctx_ops_t dca_ctx_ops = {
348 348 NULL,
349 349 dca_free_context
350 350 };
351 351
352 352 /* Operations for the symmetric cipher provider */
353 353 static crypto_ops_t dca_crypto_ops1 = {
354 354 &dca_control_ops,
355 355 NULL, /* digest_ops */
356 356 &dca_cipher_ops,
357 357 NULL, /* mac_ops */
358 358 NULL, /* sign_ops */
359 359 NULL, /* verify_ops */
360 360 NULL, /* dual_ops */
361 361 NULL, /* cipher_mac_ops */
362 362 NULL, /* random_number_ops */
363 363 NULL, /* session_ops */
364 364 NULL, /* object_ops */
365 365 NULL, /* key_ops */
366 366 &dca_provmanage_ops_1, /* management_ops */
367 367 &dca_ctx_ops
368 368 };
369 369
370 370 /* Operations for the asymmetric cipher provider */
371 371 static crypto_ops_t dca_crypto_ops2 = {
372 372 &dca_control_ops,
373 373 NULL, /* digest_ops */
374 374 &dca_cipher_ops,
375 375 NULL, /* mac_ops */
376 376 &dca_sign_ops,
377 377 &dca_verify_ops,
378 378 NULL, /* dual_ops */
379 379 NULL, /* cipher_mac_ops */
380 380 &dca_random_number_ops,
381 381 NULL, /* session_ops */
382 382 NULL, /* object_ops */
383 383 NULL, /* key_ops */
384 384 &dca_provmanage_ops_2, /* management_ops */
385 385 &dca_ctx_ops
386 386 };
387 387
388 388 /* Provider information for the symmetric cipher provider */
389 389 static crypto_provider_info_t dca_prov_info1 = {
390 390 CRYPTO_SPI_VERSION_1,
391 391 NULL, /* pi_provider_description */
392 392 CRYPTO_HW_PROVIDER,
393 393 NULL, /* pi_provider_dev */
394 394 NULL, /* pi_provider_handle */
395 395 &dca_crypto_ops1,
396 396 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t),
397 397 dca_mech_info_tab1,
398 398 0, /* pi_logical_provider_count */
399 399 NULL /* pi_logical_providers */
400 400 };
401 401
402 402 /* Provider information for the asymmetric cipher provider */
403 403 static crypto_provider_info_t dca_prov_info2 = {
404 404 CRYPTO_SPI_VERSION_1,
405 405 NULL, /* pi_provider_description */
406 406 CRYPTO_HW_PROVIDER,
407 407 NULL, /* pi_provider_dev */
408 408 NULL, /* pi_provider_handle */
409 409 &dca_crypto_ops2,
410 410 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t),
411 411 dca_mech_info_tab2,
412 412 0, /* pi_logical_provider_count */
413 413 NULL /* pi_logical_providers */
414 414 };
415 415
416 416 /* Convenience macros */
417 417 /* Retrieve the softc and instance number from a SPI crypto context */
418 418 #define DCA_SOFTC_FROM_CTX(ctx, softc, instance) { \
419 419 (softc) = (dca_t *)(ctx)->cc_provider; \
420 420 (instance) = ddi_get_instance((softc)->dca_dip); \
421 421 }
422 422
423 423 #define DCA_MECH_FROM_CTX(ctx) \
424 424 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type)
425 425
426 426 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
427 427 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
428 428 dca_chain_t *head, int *n_chain);
429 429 static uint64_t dca_ena(uint64_t ena);
430 430 static caddr_t dca_bufdaddr_out(crypto_data_t *data);
431 431 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index);
432 432 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
433 433 dca_fma_eclass_t eclass_index);
434 434
435 435 static void dca_fma_init(dca_t *dca);
436 436 static void dca_fma_fini(dca_t *dca);
437 437 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
438 438 const void *impl_data);
439 439
440 440
441 441 static dca_device_t dca_devices[] = {
442 442 /* Broadcom vanilla variants */
443 443 { 0x14e4, 0x5820, "Broadcom 5820" },
444 444 { 0x14e4, 0x5821, "Broadcom 5821" },
445 445 { 0x14e4, 0x5822, "Broadcom 5822" },
446 446 { 0x14e4, 0x5825, "Broadcom 5825" },
447 447 /* Sun specific OEMd variants */
448 448 { 0x108e, 0x5454, "SCA" },
449 449 { 0x108e, 0x5455, "SCA 1000" },
450 450 { 0x108e, 0x5457, "SCA 500" },
451 451 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */
452 452 { 0x108e, 0x1, "SCA 500" },
453 453 };
454 454
455 455 /*
456 456 * Device attributes.
457 457 */
458 458 static struct ddi_device_acc_attr dca_regsattr = {
459 459 DDI_DEVICE_ATTR_V1,
460 460 DDI_STRUCTURE_LE_ACC,
461 461 DDI_STRICTORDER_ACC,
462 462 DDI_FLAGERR_ACC
463 463 };
464 464
465 465 static struct ddi_device_acc_attr dca_devattr = {
466 466 DDI_DEVICE_ATTR_V0,
467 467 DDI_STRUCTURE_LE_ACC,
468 468 DDI_STRICTORDER_ACC
469 469 };
470 470
471 471 #if !defined(i386) && !defined(__i386)
472 472 static struct ddi_device_acc_attr dca_bufattr = {
473 473 DDI_DEVICE_ATTR_V0,
474 474 DDI_NEVERSWAP_ACC,
475 475 DDI_STRICTORDER_ACC
476 476 };
477 477 #endif
478 478
479 479 static struct ddi_dma_attr dca_dmaattr = {
480 480 DMA_ATTR_V0, /* dma_attr_version */
481 481 0x0, /* dma_attr_addr_lo */
482 482 0xffffffffUL, /* dma_attr_addr_hi */
483 483 0x00ffffffUL, /* dma_attr_count_max */
484 484 0x40, /* dma_attr_align */
485 485 0x40, /* dma_attr_burstsizes */
486 486 0x1, /* dma_attr_minxfer */
487 487 0x00ffffffUL, /* dma_attr_maxxfer */
488 488 0xffffffffUL, /* dma_attr_seg */
489 489 #if defined(i386) || defined(__i386) || defined(__amd64)
490 490 512, /* dma_attr_sgllen */
491 491 #else
492 492 1, /* dma_attr_sgllen */
493 493 #endif
494 494 1, /* dma_attr_granular */
495 495 DDI_DMA_FLAGERR /* dma_attr_flags */
496 496 };
497 497
498 498 static void *dca_state = NULL;
499 499 int dca_mindma = 2500;
500 500
501 501 /*
502 502 * FMA eclass string definitions. Note that these string arrays must be
503 503 * consistent with the dca_fma_eclass_t enum.
504 504 */
505 505 static char *dca_fma_eclass_sca1000[] = {
506 506 "sca1000.hw.device",
507 507 "sca1000.hw.timeout",
508 508 "sca1000.none"
509 509 };
510 510
511 511 static char *dca_fma_eclass_sca500[] = {
512 512 "sca500.hw.device",
513 513 "sca500.hw.timeout",
514 514 "sca500.none"
515 515 };
516 516
517 517 /*
518 518 * DDI entry points.
519 519 */
520 520 int
521 521 _init(void)
522 522 {
523 523 int rv;
524 524
525 525 DBG(NULL, DMOD, "dca: in _init");
526 526
527 527 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) {
528 528 /* this should *never* happen! */
529 529 return (rv);
530 530 }
531 531
532 532 if ((rv = mod_install(&modlinkage)) != 0) {
533 533 /* cleanup here */
534 534 ddi_soft_state_fini(&dca_state);
535 535 return (rv);
536 536 }
537 537
538 538 return (0);
539 539 }
540 540
541 541 int
542 542 _fini(void)
543 543 {
544 544 int rv;
545 545
546 546 DBG(NULL, DMOD, "dca: in _fini");
547 547
548 548 if ((rv = mod_remove(&modlinkage)) == 0) {
549 549 /* cleanup here */
550 550 ddi_soft_state_fini(&dca_state);
551 551 }
552 552 return (rv);
553 553 }
554 554
555 555 int
556 556 _info(struct modinfo *modinfop)
557 557 {
558 558 DBG(NULL, DMOD, "dca: in _info");
559 559
560 560 return (mod_info(&modlinkage, modinfop));
561 561 }
562 562
563 563 int
564 564 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
565 565 {
566 566 ddi_acc_handle_t pci;
567 567 int instance;
568 568 ddi_iblock_cookie_t ibc;
569 569 int intr_added = 0;
570 570 dca_t *dca;
571 571 ushort_t venid;
572 572 ushort_t devid;
573 573 ushort_t revid;
574 574 ushort_t subsysid;
575 575 ushort_t subvenid;
576 576 int i;
577 577 int ret;
578 578 char ID[64];
579 579 static char *unknowndev = "Unknown device";
580 580
581 581 #if DEBUG
582 582 /* these are only used for debugging */
583 583 ushort_t pcicomm;
584 584 ushort_t pcistat;
585 585 uchar_t cachelinesz;
586 586 uchar_t mingnt;
587 587 uchar_t maxlat;
588 588 uchar_t lattmr;
589 589 #endif
590 590
591 591 instance = ddi_get_instance(dip);
592 592
593 593 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance);
594 594
595 595 switch (cmd) {
596 596 case DDI_RESUME:
597 597 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
598 598 dca_diperror(dip, "no soft state in detach");
599 599 return (DDI_FAILURE);
600 600 }
601 601 /* assumption: we won't be DDI_DETACHed until we return */
602 602 return (dca_resume(dca));
603 603 case DDI_ATTACH:
604 604 break;
605 605 default:
606 606 return (DDI_FAILURE);
607 607 }
608 608
609 609 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
610 610 dca_diperror(dip, "slot does not support PCI bus-master");
611 611 return (DDI_FAILURE);
612 612 }
613 613
614 614 if (ddi_intr_hilevel(dip, 0) != 0) {
615 615 dca_diperror(dip, "hilevel interrupts not supported");
616 616 return (DDI_FAILURE);
617 617 }
618 618
619 619 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
620 620 dca_diperror(dip, "unable to setup PCI config handle");
621 621 return (DDI_FAILURE);
622 622 }
623 623
624 624 /* common PCI attributes */
625 625 venid = pci_config_get16(pci, PCI_VENID);
626 626 devid = pci_config_get16(pci, PCI_DEVID);
627 627 revid = pci_config_get8(pci, PCI_REVID);
628 628 subvenid = pci_config_get16(pci, PCI_SUBVENID);
629 629 subsysid = pci_config_get16(pci, PCI_SUBSYSID);
630 630
631 631 /*
632 632 * Broadcom-specific timings.
633 633 * We disable these timers/counters since they can cause
634 634 * incorrect false failures when the bus is just a little
635 635 * bit slow, or busy.
636 636 */
637 637 pci_config_put8(pci, PCI_TRDYTO, 0);
638 638 pci_config_put8(pci, PCI_RETRIES, 0);
639 639
640 640 /* initialize PCI access settings */
641 641 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
642 642 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
643 643
644 644 /* set up our PCI latency timer */
645 645 pci_config_put8(pci, PCI_LATTMR, 0x40);
646 646
647 647 #if DEBUG
648 648 /* read registers (for debugging) */
649 649 pcicomm = pci_config_get16(pci, PCI_COMM);
650 650 pcistat = pci_config_get16(pci, PCI_STATUS);
651 651 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ);
652 652 mingnt = pci_config_get8(pci, PCI_MINGNT);
653 653 maxlat = pci_config_get8(pci, PCI_MAXLAT);
654 654 lattmr = pci_config_get8(pci, PCI_LATTMR);
655 655 #endif
656 656
657 657 pci_config_teardown(&pci);
658 658
659 659 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) {
660 660 dca_diperror(dip, "unable to get iblock cookie");
661 661 return (DDI_FAILURE);
662 662 }
663 663
664 664 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) {
665 665 dca_diperror(dip, "unable to allocate soft state");
666 666 return (DDI_FAILURE);
667 667 }
668 668
669 669 dca = ddi_get_soft_state(dca_state, instance);
670 670 ASSERT(dca != NULL);
671 671 dca->dca_dip = dip;
672 672 WORKLIST(dca, MCR1)->dwl_prov = NULL;
673 673 WORKLIST(dca, MCR2)->dwl_prov = NULL;
674 674 /* figure pagesize */
675 675 dca->dca_pagesize = ddi_ptob(dip, 1);
676 676
677 677 /*
678 678 * Search for the device in our supported devices table. This
679 679 * is here for two reasons. First, we want to ensure that
680 680 * only Sun-qualified (and presumably Sun-labeled) devices can
681 681 * be used with this driver. Second, some devices have
682 682 * specific differences. E.g. the 5821 has support for a
683 683 * special mode of RC4, deeper queues, power management, and
684 684 * other changes. Also, the export versions of some of these
685 685 * chips don't support RC4 or 3DES, so we catch that here.
686 686 *
687 687 * Note that we only look at the upper nibble of the device
688 688 * id, which is used to distinguish export vs. domestic
689 689 * versions of the chip. (The lower nibble is used for
690 690 * stepping information.)
691 691 */
692 692 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) {
693 693 /*
694 694 * Try to match the subsystem information first.
695 695 */
696 696 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) &&
697 697 subsysid && (subsysid == dca_devices[i].dd_device_id)) {
698 698 dca->dca_model = dca_devices[i].dd_model;
699 699 dca->dca_devid = dca_devices[i].dd_device_id;
700 700 break;
701 701 }
702 702 /*
703 703 * Failing that, try the generic vendor and device id.
704 704 * Even if we find a match, we keep searching anyway,
705 705 * since we would prefer to find a match based on the
706 706 * subsystem ids.
707 707 */
708 708 if ((venid == dca_devices[i].dd_vendor_id) &&
709 709 (devid == dca_devices[i].dd_device_id)) {
710 710 dca->dca_model = dca_devices[i].dd_model;
711 711 dca->dca_devid = dca_devices[i].dd_device_id;
712 712 }
713 713 }
714 714 /* try and handle an unrecognized device */
715 715 if (dca->dca_model == NULL) {
716 716 dca->dca_model = unknowndev;
717 717 dca_error(dca, "device not recognized, not supported");
718 718 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d",
719 719 i, venid, devid, revid);
720 720 }
721 721
722 722 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description",
723 723 dca->dca_model) != DDI_SUCCESS) {
724 724 dca_error(dca, "unable to create description property");
725 725 return (DDI_FAILURE);
726 726 }
727 727
728 728 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x",
729 729 pcicomm, pcistat, cachelinesz);
730 730 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x",
731 731 mingnt, maxlat, lattmr);
732 732
733 733 /*
734 734 * initialize locks, etc.
735 735 */
736 736 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc);
737 737
738 738 /* use RNGSHA1 by default */
739 739 if (ddi_getprop(DDI_DEV_T_ANY, dip,
740 740 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) {
741 741 dca->dca_flags |= DCA_RNGSHA1;
742 742 }
743 743
744 744 /* initialize FMA */
745 745 dca_fma_init(dca);
746 746
747 747 /* initialize some key data structures */
748 748 if (dca_init(dca) != DDI_SUCCESS) {
749 749 goto failed;
750 750 }
751 751
752 752 /* initialize kstats */
753 753 dca_ksinit(dca);
754 754
755 755 /* setup access to registers */
756 756 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs,
757 757 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) {
758 758 dca_error(dca, "unable to map registers");
759 759 goto failed;
760 760 }
761 761
762 762 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1));
763 763 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL));
764 764 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT));
765 765 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA));
766 766 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2));
767 767
768 768 /* reset the chip */
769 769 if (dca_reset(dca, 0) < 0) {
770 770 goto failed;
771 771 }
772 772
773 773 /* initialize the chip */
774 774 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
775 775 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
776 776 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
777 777 goto failed;
778 778 }
779 779
780 780 /* add the interrupt */
781 781 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr,
782 782 (void *)dca) != DDI_SUCCESS) {
783 783 DBG(dca, DWARN, "ddi_add_intr failed");
784 784 goto failed;
785 785 } else {
786 786 intr_added = 1;
787 787 }
788 788
789 789 /* enable interrupts on the device */
790 790 /*
791 791 * XXX: Note, 5820A1 errata indicates that this may clobber
792 792 * bits 24 and 23, which affect the speed of the RNG. Since
793 793 * we always want to run in full-speed mode, this should be
794 794 * harmless.
795 795 */
796 796 if (dca->dca_devid == 0x5825) {
797 797 /* for 5825 - increase the DMA read size */
798 798 SETBIT(dca, CSR_DMACTL,
799 799 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
800 800 } else {
801 801 SETBIT(dca, CSR_DMACTL,
802 802 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
803 803 }
804 804 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
805 805 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
806 806 goto failed;
807 807 }
808 808
809 809 /* register MCR1 with the crypto framework */
810 810 /* Be careful not to exceed 32 chars */
811 811 (void) sprintf(ID, "%s/%d %s",
812 812 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM);
813 813 dca_prov_info1.pi_provider_description = ID;
814 814 dca_prov_info1.pi_provider_dev.pd_hw = dip;
815 815 dca_prov_info1.pi_provider_handle = dca;
816 816 if ((ret = crypto_register_provider(&dca_prov_info1,
817 817 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) {
818 818 cmn_err(CE_WARN,
819 819 "crypto_register_provider() failed (%d) for MCR1", ret);
820 820 goto failed;
821 821 }
822 822
823 823 /* register MCR2 with the crypto framework */
824 824 /* Be careful not to exceed 32 chars */
825 825 (void) sprintf(ID, "%s/%d %s",
826 826 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM);
827 827 dca_prov_info2.pi_provider_description = ID;
828 828 dca_prov_info2.pi_provider_dev.pd_hw = dip;
829 829 dca_prov_info2.pi_provider_handle = dca;
830 830 if ((ret = crypto_register_provider(&dca_prov_info2,
831 831 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) {
832 832 cmn_err(CE_WARN,
833 833 "crypto_register_provider() failed (%d) for MCR2", ret);
834 834 goto failed;
835 835 }
836 836
837 837 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov,
838 838 CRYPTO_PROVIDER_READY);
839 839 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov,
840 840 CRYPTO_PROVIDER_READY);
841 841
842 842 /* Initialize the local random number pool for this instance */
843 843 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) {
844 844 goto failed;
845 845 }
846 846
847 847 mutex_enter(&dca->dca_intrlock);
848 848 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca,
849 849 drv_usectohz(SECOND));
850 850 mutex_exit(&dca->dca_intrlock);
851 851
852 852 ddi_set_driver_private(dip, (caddr_t)dca);
853 853
854 854 ddi_report_dev(dip);
855 855
856 856 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) {
857 857 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED);
858 858 }
859 859
860 860 return (DDI_SUCCESS);
861 861
862 862 failed:
863 863 /* unregister from the crypto framework */
864 864 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
865 865 (void) crypto_unregister_provider(
866 866 WORKLIST(dca, MCR1)->dwl_prov);
867 867 }
868 868 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
869 869 (void) crypto_unregister_provider(
870 870 WORKLIST(dca, MCR2)->dwl_prov);
871 871 }
872 872 if (intr_added) {
873 873 CLRBIT(dca, CSR_DMACTL,
874 874 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
875 875 /* unregister intr handler */
876 876 ddi_remove_intr(dip, 0, dca->dca_icookie);
877 877 }
878 878 if (dca->dca_regs_handle) {
879 879 ddi_regs_map_free(&dca->dca_regs_handle);
880 880 }
881 881 if (dca->dca_intrstats) {
882 882 kstat_delete(dca->dca_intrstats);
883 883 }
884 884 if (dca->dca_ksp) {
885 885 kstat_delete(dca->dca_ksp);
886 886 }
887 887 dca_uninit(dca);
888 888
889 889 /* finalize FMA */
890 890 dca_fma_fini(dca);
891 891
892 892 mutex_destroy(&dca->dca_intrlock);
893 893 ddi_soft_state_free(dca_state, instance);
894 894 return (DDI_FAILURE);
895 895
896 896 }
897 897
898 898 int
899 899 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
900 900 {
901 901 int instance;
902 902 dca_t *dca;
903 903 timeout_id_t tid;
904 904
905 905 instance = ddi_get_instance(dip);
906 906
907 907 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance);
908 908
909 909 switch (cmd) {
910 910 case DDI_SUSPEND:
911 911 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
912 912 dca_diperror(dip, "no soft state in detach");
913 913 return (DDI_FAILURE);
914 914 }
915 915 /* assumption: we won't be DDI_DETACHed until we return */
916 916 return (dca_suspend(dca));
917 917
918 918 case DDI_DETACH:
919 919 break;
920 920 default:
921 921 return (DDI_FAILURE);
922 922 }
923 923
924 924 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
925 925 dca_diperror(dip, "no soft state in detach");
926 926 return (DDI_FAILURE);
927 927 }
928 928
929 929 /*
930 930 * Unregister from kCF.
931 931 * This needs to be done at the beginning of detach.
932 932 */
933 933 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
934 934 if (crypto_unregister_provider(
935 935 WORKLIST(dca, MCR1)->dwl_prov) != CRYPTO_SUCCESS) {
936 936 dca_error(dca, "unable to unregister MCR1 from kcf");
937 937 return (DDI_FAILURE);
938 938 }
939 939 }
940 940
941 941 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
942 942 if (crypto_unregister_provider(
943 943 WORKLIST(dca, MCR2)->dwl_prov) != CRYPTO_SUCCESS) {
944 944 dca_error(dca, "unable to unregister MCR2 from kcf");
945 945 return (DDI_FAILURE);
946 946 }
947 947 }
948 948
949 949 /*
950 950 * Cleanup the private context list. Once the
951 951 * crypto_unregister_provider returns, it is safe to do so.
952 952 */
953 953 dca_free_context_list(dca);
954 954
955 955 /* Cleanup the local random number pool */
956 956 dca_random_fini(dca);
957 957
958 958 /* send any jobs in the waitq back to kCF */
959 959 dca_rejectjobs(dca);
960 960
961 961 /* untimeout the timeouts */
962 962 mutex_enter(&dca->dca_intrlock);
963 963 tid = dca->dca_jobtid;
964 964 dca->dca_jobtid = 0;
965 965 mutex_exit(&dca->dca_intrlock);
966 966 if (tid) {
967 967 (void) untimeout(tid);
968 968 }
969 969
970 970 /* disable device interrupts */
971 971 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
972 972
973 973 /* unregister interrupt handlers */
974 974 ddi_remove_intr(dip, 0, dca->dca_icookie);
975 975
976 976 /* release our regs handle */
977 977 ddi_regs_map_free(&dca->dca_regs_handle);
978 978
979 979 /* toss out kstats */
980 980 if (dca->dca_intrstats) {
981 981 kstat_delete(dca->dca_intrstats);
982 982 }
983 983 if (dca->dca_ksp) {
984 984 kstat_delete(dca->dca_ksp);
985 985 }
986 986
987 987 mutex_destroy(&dca->dca_intrlock);
988 988 dca_uninit(dca);
989 989
990 990 /* finalize FMA */
991 991 dca_fma_fini(dca);
992 992
993 993 ddi_soft_state_free(dca_state, instance);
994 994
995 995 return (DDI_SUCCESS);
996 996 }
997 997
998 998 int
999 999 dca_resume(dca_t *dca)
1000 1000 {
1001 1001 ddi_acc_handle_t pci;
1002 1002
1003 1003 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) {
1004 1004 dca_error(dca, "unable to setup PCI config handle");
1005 1005 return (DDI_FAILURE);
1006 1006 }
1007 1007
1008 1008 /*
1009 1009 * Reprogram registers in PCI configuration space.
1010 1010 */
1011 1011
1012 1012 /* Broadcom-specific timers -- we disable them. */
1013 1013 pci_config_put8(pci, PCI_TRDYTO, 0);
1014 1014 pci_config_put8(pci, PCI_RETRIES, 0);
1015 1015
1016 1016 /* initialize PCI access settings */
1017 1017 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
1018 1018 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
1019 1019
1020 1020 /* set up our PCI latency timer */
1021 1021 pci_config_put8(pci, PCI_LATTMR, 0x40);
1022 1022
1023 1023 pci_config_teardown(&pci);
1024 1024
1025 1025 if (dca_reset(dca, 0) < 0) {
1026 1026 dca_error(dca, "unable to reset device during resume");
1027 1027 return (DDI_FAILURE);
1028 1028 }
1029 1029
1030 1030 /*
1031 1031 * Now restore the card-specific CSRs.
1032 1032 */
1033 1033
1034 1034 /* restore endianness settings */
1035 1035 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
1036 1036 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1037 1037 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1038 1038 return (DDI_FAILURE);
1039 1039
1040 1040 /* restore interrupt enables */
1041 1041 if (dca->dca_devid == 0x5825) {
1042 1042 /* for 5825 set 256 byte read size to improve performance */
1043 1043 SETBIT(dca, CSR_DMACTL,
1044 1044 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
1045 1045 } else {
1046 1046 SETBIT(dca, CSR_DMACTL,
1047 1047 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
1048 1048 }
1049 1049 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1050 1050 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1051 1051 return (DDI_FAILURE);
1052 1052
1053 1053 /* resume scheduling jobs on the device */
1054 1054 dca_undrain(dca);
1055 1055
1056 1056 return (DDI_SUCCESS);
1057 1057 }
1058 1058
1059 1059 int
1060 1060 dca_suspend(dca_t *dca)
1061 1061 {
1062 1062 if ((dca_drain(dca)) != 0) {
1063 1063 return (DDI_FAILURE);
1064 1064 }
1065 1065 if (dca_reset(dca, 0) < 0) {
1066 1066 dca_error(dca, "unable to reset device during suspend");
1067 1067 return (DDI_FAILURE);
1068 1068 }
1069 1069 return (DDI_SUCCESS);
1070 1070 }
1071 1071
1072 1072 /*
1073 1073 * Hardware access stuff.
1074 1074 */
1075 1075 int
1076 1076 dca_reset(dca_t *dca, int failreset)
1077 1077 {
1078 1078 int i;
1079 1079
1080 1080 if (dca->dca_regs_handle == NULL) {
1081 1081 return (-1);
1082 1082 }
1083 1083
1084 1084 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET);
1085 1085 if (!failreset) {
1086 1086 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1087 1087 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1088 1088 return (-1);
1089 1089 }
1090 1090
1091 1091 /* now wait for a reset */
1092 1092 for (i = 1; i < 100; i++) {
1093 1093 uint32_t dmactl;
1094 1094 drv_usecwait(100);
1095 1095 dmactl = GETCSR(dca, CSR_DMACTL);
1096 1096 if (!failreset) {
1097 1097 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1098 1098 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1099 1099 return (-1);
1100 1100 }
1101 1101 if ((dmactl & DMACTL_RESET) == 0) {
1102 1102 DBG(dca, DCHATTY, "reset in %d usec", i * 100);
1103 1103 return (0);
1104 1104 }
1105 1105 }
1106 1106 if (!failreset) {
1107 1107 dca_failure(dca, DDI_DEVICE_FAULT,
1108 1108 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1109 1109 "timeout waiting for reset after %d usec", i * 100);
1110 1110 }
1111 1111 return (-1);
1112 1112 }
1113 1113
1114 1114 int
1115 1115 dca_initworklist(dca_t *dca, dca_worklist_t *wlp)
1116 1116 {
1117 1117 int i;
1118 1118 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR);
1119 1119
1120 1120 /*
1121 1121 * Set up work queue.
1122 1122 */
1123 1123 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1124 1124 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER,
1125 1125 dca->dca_icookie);
1126 1126 mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1127 1127 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL);
1128 1128
1129 1129 mutex_enter(&wlp->dwl_lock);
1130 1130
1131 1131 dca_initq(&wlp->dwl_freereqs);
1132 1132 dca_initq(&wlp->dwl_waitq);
1133 1133 dca_initq(&wlp->dwl_freework);
1134 1134 dca_initq(&wlp->dwl_runq);
1135 1135
1136 1136 for (i = 0; i < MAXWORK; i++) {
1137 1137 dca_work_t *workp;
1138 1138
1139 1139 if ((workp = dca_newwork(dca)) == NULL) {
1140 1140 dca_error(dca, "unable to allocate work");
1141 1141 mutex_exit(&wlp->dwl_lock);
1142 1142 return (DDI_FAILURE);
1143 1143 }
1144 1144 workp->dw_wlp = wlp;
1145 1145 dca_freework(workp);
1146 1146 }
1147 1147 mutex_exit(&wlp->dwl_lock);
1148 1148
1149 1149 for (i = 0; i < reqprealloc; i++) {
1150 1150 dca_request_t *reqp;
1151 1151
1152 1152 if ((reqp = dca_newreq(dca)) == NULL) {
1153 1153 dca_error(dca, "unable to allocate request");
1154 1154 return (DDI_FAILURE);
1155 1155 }
1156 1156 reqp->dr_dca = dca;
1157 1157 reqp->dr_wlp = wlp;
1158 1158 dca_freereq(reqp);
1159 1159 }
1160 1160 return (DDI_SUCCESS);
1161 1161 }
1162 1162
1163 1163 int
1164 1164 dca_init(dca_t *dca)
1165 1165 {
1166 1166 dca_worklist_t *wlp;
1167 1167
1168 1168 /* Initialize the private context list and the corresponding lock. */
1169 1169 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL);
1170 1170 dca_initq(&dca->dca_ctx_list);
1171 1171
1172 1172 /*
1173 1173 * MCR1 algorithms.
1174 1174 */
1175 1175 wlp = WORKLIST(dca, MCR1);
1176 1176 (void) sprintf(wlp->dwl_name, "dca%d:mcr1",
1177 1177 ddi_get_instance(dca->dca_dip));
1178 1178 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1179 1179 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1180 1180 "mcr1_lowater", MCR1LOWATER);
1181 1181 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1182 1182 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1183 1183 "mcr1_hiwater", MCR1HIWATER);
1184 1184 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1185 1185 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1186 1186 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR);
1187 1187 wlp->dwl_dca = dca;
1188 1188 wlp->dwl_mcr = MCR1;
1189 1189 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1190 1190 return (DDI_FAILURE);
1191 1191 }
1192 1192
1193 1193 /*
1194 1194 * MCR2 algorithms.
1195 1195 */
1196 1196 wlp = WORKLIST(dca, MCR2);
1197 1197 (void) sprintf(wlp->dwl_name, "dca%d:mcr2",
1198 1198 ddi_get_instance(dca->dca_dip));
1199 1199 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1200 1200 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1201 1201 "mcr2_lowater", MCR2LOWATER);
1202 1202 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1203 1203 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1204 1204 "mcr2_hiwater", MCR2HIWATER);
1205 1205 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1206 1206 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1207 1207 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR);
1208 1208 wlp->dwl_dca = dca;
1209 1209 wlp->dwl_mcr = MCR2;
1210 1210 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1211 1211 return (DDI_FAILURE);
1212 1212 }
1213 1213 return (DDI_SUCCESS);
1214 1214 }
1215 1215
1216 1216 /*
1217 1217 * Uninitialize worklists. This routine should only be called when no
1218 1218 * active jobs (hence DMA mappings) exist. One way to ensure this is
1219 1219 * to unregister from kCF before calling this routine. (This is done
1220 1220 * e.g. in detach(9e).)
1221 1221 */
1222 1222 void
1223 1223 dca_uninit(dca_t *dca)
1224 1224 {
1225 1225 int mcr;
1226 1226
1227 1227 mutex_destroy(&dca->dca_ctx_list_lock);
1228 1228
1229 1229 for (mcr = MCR1; mcr <= MCR2; mcr++) {
1230 1230 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1231 1231 dca_work_t *workp;
1232 1232 dca_request_t *reqp;
1233 1233
1234 1234 if (dca->dca_regs_handle == NULL) {
1235 1235 continue;
1236 1236 }
1237 1237
1238 1238 mutex_enter(&wlp->dwl_lock);
1239 1239 while ((workp = dca_getwork(dca, mcr)) != NULL) {
1240 1240 dca_destroywork(workp);
1241 1241 }
1242 1242 mutex_exit(&wlp->dwl_lock);
1243 1243 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) {
1244 1244 dca_destroyreq(reqp);
1245 1245 }
1246 1246
1247 1247 mutex_destroy(&wlp->dwl_lock);
1248 1248 mutex_destroy(&wlp->dwl_freereqslock);
1249 1249 mutex_destroy(&wlp->dwl_freelock);
1250 1250 cv_destroy(&wlp->dwl_cv);
1251 1251 wlp->dwl_prov = NULL;
1252 1252 }
1253 1253 }
1254 1254
1255 1255 static void
1256 1256 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock)
1257 1257 {
1258 1258 if (!q || !node)
1259 1259 return;
1260 1260
1261 1261 mutex_enter(lock);
1262 1262 node->dl_next2 = q;
1263 1263 node->dl_prev2 = q->dl_prev2;
1264 1264 node->dl_next2->dl_prev2 = node;
1265 1265 node->dl_prev2->dl_next2 = node;
1266 1266 mutex_exit(lock);
1267 1267 }
1268 1268
1269 1269 static void
1270 1270 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock)
1271 1271 {
1272 1272 if (!node)
1273 1273 return;
1274 1274
1275 1275 mutex_enter(lock);
1276 1276 node->dl_next2->dl_prev2 = node->dl_prev2;
1277 1277 node->dl_prev2->dl_next2 = node->dl_next2;
1278 1278 node->dl_next2 = NULL;
1279 1279 node->dl_prev2 = NULL;
1280 1280 mutex_exit(lock);
1281 1281 }
1282 1282
1283 1283 static dca_listnode_t *
1284 1284 dca_delist2(dca_listnode_t *q, kmutex_t *lock)
1285 1285 {
1286 1286 dca_listnode_t *node;
1287 1287
1288 1288 mutex_enter(lock);
1289 1289 if ((node = q->dl_next2) == q) {
1290 1290 mutex_exit(lock);
1291 1291 return (NULL);
1292 1292 }
1293 1293
1294 1294 node->dl_next2->dl_prev2 = node->dl_prev2;
1295 1295 node->dl_prev2->dl_next2 = node->dl_next2;
1296 1296 node->dl_next2 = NULL;
1297 1297 node->dl_prev2 = NULL;
1298 1298 mutex_exit(lock);
1299 1299
1300 1300 return (node);
1301 1301 }
1302 1302
1303 1303 void
1304 1304 dca_initq(dca_listnode_t *q)
1305 1305 {
1306 1306 q->dl_next = q;
1307 1307 q->dl_prev = q;
1308 1308 q->dl_next2 = q;
1309 1309 q->dl_prev2 = q;
1310 1310 }
1311 1311
1312 1312 void
1313 1313 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node)
1314 1314 {
1315 1315 /*
1316 1316 * Enqueue submits at the "tail" of the list, i.e. just
1317 1317 * behind the sentinel.
1318 1318 */
1319 1319 node->dl_next = q;
1320 1320 node->dl_prev = q->dl_prev;
1321 1321 node->dl_next->dl_prev = node;
1322 1322 node->dl_prev->dl_next = node;
1323 1323 }
1324 1324
1325 1325 void
1326 1326 dca_rmqueue(dca_listnode_t *node)
1327 1327 {
1328 1328 node->dl_next->dl_prev = node->dl_prev;
1329 1329 node->dl_prev->dl_next = node->dl_next;
1330 1330 node->dl_next = NULL;
1331 1331 node->dl_prev = NULL;
1332 1332 }
1333 1333
1334 1334 dca_listnode_t *
1335 1335 dca_dequeue(dca_listnode_t *q)
1336 1336 {
1337 1337 dca_listnode_t *node;
1338 1338 /*
1339 1339 * Dequeue takes from the "head" of the list, i.e. just after
1340 1340 * the sentinel.
1341 1341 */
1342 1342 if ((node = q->dl_next) == q) {
1343 1343 /* queue is empty */
1344 1344 return (NULL);
1345 1345 }
1346 1346 dca_rmqueue(node);
1347 1347 return (node);
1348 1348 }
1349 1349
1350 1350 /* this is the opposite of dequeue, it takes things off in LIFO order */
1351 1351 dca_listnode_t *
1352 1352 dca_unqueue(dca_listnode_t *q)
1353 1353 {
1354 1354 dca_listnode_t *node;
1355 1355 /*
1356 1356 * unqueue takes from the "tail" of the list, i.e. just before
1357 1357 * the sentinel.
1358 1358 */
1359 1359 if ((node = q->dl_prev) == q) {
1360 1360 /* queue is empty */
1361 1361 return (NULL);
1362 1362 }
1363 1363 dca_rmqueue(node);
1364 1364 return (node);
1365 1365 }
1366 1366
1367 1367 dca_listnode_t *
1368 1368 dca_peekqueue(dca_listnode_t *q)
1369 1369 {
1370 1370 dca_listnode_t *node;
1371 1371
1372 1372 if ((node = q->dl_next) == q) {
1373 1373 return (NULL);
1374 1374 } else {
1375 1375 return (node);
1376 1376 }
1377 1377 }
1378 1378
1379 1379 /*
1380 1380 * Interrupt service routine.
1381 1381 */
1382 1382 uint_t
1383 1383 dca_intr(char *arg)
1384 1384 {
1385 1385 dca_t *dca = (dca_t *)arg;
1386 1386 uint32_t status;
1387 1387
1388 1388 mutex_enter(&dca->dca_intrlock);
1389 1389 status = GETCSR(dca, CSR_DMASTAT);
1390 1390 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS);
1391 1391 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1392 1392 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
1393 1393 mutex_exit(&dca->dca_intrlock);
1394 1394 return ((uint_t)DDI_FAILURE);
1395 1395 }
1396 1396
1397 1397 DBG(dca, DINTR, "interrupted, status = 0x%x!", status);
1398 1398
1399 1399 if ((status & DMASTAT_INTERRUPTS) == 0) {
1400 1400 /* increment spurious interrupt kstat */
1401 1401 if (dca->dca_intrstats) {
1402 1402 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++;
1403 1403 }
1404 1404 mutex_exit(&dca->dca_intrlock);
1405 1405 return (DDI_INTR_UNCLAIMED);
1406 1406 }
1407 1407
1408 1408 if (dca->dca_intrstats) {
1409 1409 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++;
1410 1410 }
1411 1411 if (status & DMASTAT_MCR1INT) {
1412 1412 DBG(dca, DINTR, "MCR1 interrupted");
1413 1413 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock));
1414 1414 dca_schedule(dca, MCR1);
1415 1415 dca_reclaim(dca, MCR1);
1416 1416 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock));
1417 1417 }
1418 1418
1419 1419 if (status & DMASTAT_MCR2INT) {
1420 1420 DBG(dca, DINTR, "MCR2 interrupted");
1421 1421 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock));
1422 1422 dca_schedule(dca, MCR2);
1423 1423 dca_reclaim(dca, MCR2);
1424 1424 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock));
1425 1425 }
1426 1426
1427 1427 if (status & DMASTAT_ERRINT) {
1428 1428 uint32_t erraddr;
1429 1429 erraddr = GETCSR(dca, CSR_DMAEA);
1430 1430 mutex_exit(&dca->dca_intrlock);
1431 1431
1432 1432 /*
1433 1433 * bit 1 of the error address indicates failure during
1434 1434 * read if set, during write otherwise.
1435 1435 */
1436 1436 dca_failure(dca, DDI_DEVICE_FAULT,
1437 1437 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1438 1438 "DMA master access error %s address 0x%x",
1439 1439 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1);
1440 1440 return (DDI_INTR_CLAIMED);
1441 1441 }
1442 1442
1443 1443 mutex_exit(&dca->dca_intrlock);
1444 1444
1445 1445 return (DDI_INTR_CLAIMED);
1446 1446 }
1447 1447
1448 1448 /*
1449 1449 * Reverse a string of bytes from s1 into s2. The reversal happens
1450 1450 * from the tail of s1. If len1 < len2, then null bytes will be
1451 1451 * padded to the end of s2. If len2 < len1, then (presumably null)
1452 1452 * bytes will be dropped from the start of s1.
1453 1453 *
1454 1454 * The rationale here is that when s1 (source) is shorter, then we
1455 1455 * are reversing from big-endian ordering, into device ordering, and
1456 1456 * want to add some extra nulls to the tail (MSB) side of the device.
1457 1457 *
1458 1458 * Similarly, when s2 (dest) is shorter, then we are truncating what
1459 1459 * are presumably null MSB bits from the device.
1460 1460 *
1461 1461 * There is an expectation when reversing from the device back into
1462 1462 * big-endian, that the number of bytes to reverse and the target size
1463 1463 * will match, and no truncation or padding occurs.
1464 1464 */
1465 1465 void
1466 1466 dca_reverse(void *s1, void *s2, int len1, int len2)
1467 1467 {
1468 1468 caddr_t src, dst;
1469 1469
1470 1470 if (len1 == 0) {
1471 1471 if (len2) {
1472 1472 bzero(s2, len2);
1473 1473 }
1474 1474 return;
1475 1475 }
1476 1476 src = (caddr_t)s1 + len1 - 1;
1477 1477 dst = s2;
1478 1478 while ((src >= (caddr_t)s1) && (len2)) {
1479 1479 *dst++ = *src--;
1480 1480 len2--;
1481 1481 }
1482 1482 while (len2 > 0) {
1483 1483 *dst++ = 0;
1484 1484 len2--;
1485 1485 }
1486 1486 }
1487 1487
1488 1488 uint16_t
1489 1489 dca_padfull(int num)
1490 1490 {
1491 1491 if (num <= 512) {
1492 1492 return (BITS2BYTES(512));
1493 1493 }
1494 1494 if (num <= 768) {
1495 1495 return (BITS2BYTES(768));
1496 1496 }
1497 1497 if (num <= 1024) {
1498 1498 return (BITS2BYTES(1024));
1499 1499 }
1500 1500 if (num <= 1536) {
1501 1501 return (BITS2BYTES(1536));
1502 1502 }
1503 1503 if (num <= 2048) {
1504 1504 return (BITS2BYTES(2048));
1505 1505 }
1506 1506 return (0);
1507 1507 }
1508 1508
1509 1509 uint16_t
1510 1510 dca_padhalf(int num)
1511 1511 {
1512 1512 if (num <= 256) {
1513 1513 return (BITS2BYTES(256));
1514 1514 }
1515 1515 if (num <= 384) {
1516 1516 return (BITS2BYTES(384));
1517 1517 }
1518 1518 if (num <= 512) {
1519 1519 return (BITS2BYTES(512));
1520 1520 }
1521 1521 if (num <= 768) {
1522 1522 return (BITS2BYTES(768));
1523 1523 }
1524 1524 if (num <= 1024) {
1525 1525 return (BITS2BYTES(1024));
1526 1526 }
1527 1527 return (0);
1528 1528 }
1529 1529
1530 1530 dca_work_t *
1531 1531 dca_newwork(dca_t *dca)
1532 1532 {
1533 1533 dca_work_t *workp;
1534 1534 size_t size;
1535 1535 ddi_dma_cookie_t c;
1536 1536 unsigned nc;
1537 1537 int rv;
1538 1538
1539 1539 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP);
1540 1540
1541 1541 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1542 1542 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah);
1543 1543 if (rv != 0) {
1544 1544 dca_error(dca, "unable to alloc MCR DMA handle");
1545 1545 dca_destroywork(workp);
1546 1546 return (NULL);
1547 1547 }
1548 1548
1549 1549 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah,
1550 1550 ROUNDUP(MCR_SIZE, dca->dca_pagesize),
1551 1551 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1552 1552 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch);
1553 1553 if (rv != 0) {
1554 1554 dca_error(dca, "unable to alloc MCR DMA memory");
1555 1555 dca_destroywork(workp);
1556 1556 return (NULL);
1557 1557 }
1558 1558
1559 1559 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL,
1560 1560 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
1561 1561 DDI_DMA_SLEEP, NULL, &c, &nc);
1562 1562 if (rv != DDI_DMA_MAPPED) {
1563 1563 dca_error(dca, "unable to map MCR DMA memory");
1564 1564 dca_destroywork(workp);
1565 1565 return (NULL);
1566 1566 }
1567 1567
1568 1568 workp->dw_mcr_paddr = c.dmac_address;
1569 1569 return (workp);
1570 1570 }
1571 1571
1572 1572 void
1573 1573 dca_destroywork(dca_work_t *workp)
1574 1574 {
1575 1575 if (workp->dw_mcr_paddr) {
1576 1576 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah);
1577 1577 }
1578 1578 if (workp->dw_mcr_acch) {
1579 1579 ddi_dma_mem_free(&workp->dw_mcr_acch);
1580 1580 }
1581 1581 if (workp->dw_mcr_dmah) {
1582 1582 ddi_dma_free_handle(&workp->dw_mcr_dmah);
1583 1583 }
1584 1584 kmem_free(workp, sizeof (dca_work_t));
1585 1585 }
1586 1586
1587 1587 dca_request_t *
1588 1588 dca_newreq(dca_t *dca)
1589 1589 {
1590 1590 dca_request_t *reqp;
1591 1591 size_t size;
1592 1592 ddi_dma_cookie_t c;
1593 1593 unsigned nc;
1594 1594 int rv;
1595 1595 int n_chain = 0;
1596 1596
1597 1597 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH;
1598 1598
1599 1599 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP);
1600 1600
1601 1601 reqp->dr_dca = dca;
1602 1602
1603 1603 /*
1604 1604 * Setup the DMA region for the context and descriptors.
1605 1605 */
1606 1606 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP,
1607 1607 NULL, &reqp->dr_ctx_dmah);
1608 1608 if (rv != DDI_SUCCESS) {
1609 1609 dca_error(dca, "failure allocating request DMA handle");
1610 1610 dca_destroyreq(reqp);
1611 1611 return (NULL);
1612 1612 }
1613 1613
1614 1614 /* for driver hardening, allocate in whole pages */
1615 1615 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah,
1616 1616 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT,
1617 1617 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size,
1618 1618 &reqp->dr_ctx_acch);
1619 1619 if (rv != DDI_SUCCESS) {
1620 1620 dca_error(dca, "unable to alloc request DMA memory");
1621 1621 dca_destroyreq(reqp);
1622 1622 return (NULL);
1623 1623 }
1624 1624
1625 1625 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL,
1626 1626 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE,
1627 1627 DDI_DMA_SLEEP, 0, &c, &nc);
1628 1628 if (rv != DDI_DMA_MAPPED) {
1629 1629 dca_error(dca, "failed binding request DMA handle");
1630 1630 dca_destroyreq(reqp);
1631 1631 return (NULL);
1632 1632 }
1633 1633 reqp->dr_ctx_paddr = c.dmac_address;
1634 1634
1635 1635 reqp->dr_dma_size = size;
1636 1636
1637 1637 /*
1638 1638 * Set up the dma for our scratch/shared buffers.
1639 1639 */
1640 1640 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1641 1641 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah);
1642 1642 if (rv != DDI_SUCCESS) {
1643 1643 dca_error(dca, "failure allocating ibuf DMA handle");
1644 1644 dca_destroyreq(reqp);
1645 1645 return (NULL);
1646 1646 }
1647 1647 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1648 1648 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah);
1649 1649 if (rv != DDI_SUCCESS) {
1650 1650 dca_error(dca, "failure allocating obuf DMA handle");
1651 1651 dca_destroyreq(reqp);
1652 1652 return (NULL);
1653 1653 }
1654 1654
1655 1655 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1656 1656 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah);
1657 1657 if (rv != DDI_SUCCESS) {
1658 1658 dca_error(dca, "failure allocating chain_in DMA handle");
1659 1659 dca_destroyreq(reqp);
1660 1660 return (NULL);
1661 1661 }
1662 1662
1663 1663 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1664 1664 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah);
1665 1665 if (rv != DDI_SUCCESS) {
1666 1666 dca_error(dca, "failure allocating chain_out DMA handle");
1667 1667 dca_destroyreq(reqp);
1668 1668 return (NULL);
1669 1669 }
1670 1670
1671 1671 /*
↓ open down ↓ |
1671 lines elided |
↑ open up ↑ |
1672 1672 * for driver hardening, allocate in whole pages.
1673 1673 */
1674 1674 size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1675 1675 #if defined(i386) || defined(__i386)
1676 1676 /*
1677 1677 * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter
1678 1678 * may fail on x86 platform if a physically contiguous memory chunk
1679 1679 * cannot be found. From initial testing, we did not see performance
1680 1680 * degradation as seen on Sparc.
1681 1681 */
1682 - if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
1683 - dca_error(dca, "unable to alloc request ibuf memory");
1684 - dca_destroyreq(reqp);
1685 - return (NULL);
1686 - }
1687 - if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
1688 - dca_error(dca, "unable to alloc request obuf memory");
1689 - dca_destroyreq(reqp);
1690 - return (NULL);
1691 - }
1682 + reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP);
1683 + reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP);
1692 1684 #else
1693 1685 /*
1694 1686 * We could kmem_alloc for Sparc too. However, it gives worse
1695 1687 * performance when transferring more than one page data. For example,
1696 1688 * using 4 threads and 12032 byte data and 3DES on 900MHZ Sparc system,
1697 1689 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for
1698 1690 * the same throughput.
1699 1691 */
1700 1692 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah,
1701 1693 size, &dca_bufattr,
1702 1694 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr,
1703 1695 &size, &reqp->dr_ibuf_acch);
1704 1696 if (rv != DDI_SUCCESS) {
1705 1697 dca_error(dca, "unable to alloc request DMA memory");
1706 1698 dca_destroyreq(reqp);
1707 1699 return (NULL);
1708 1700 }
1709 1701
1710 1702 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah,
1711 1703 size, &dca_bufattr,
1712 1704 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr,
1713 1705 &size, &reqp->dr_obuf_acch);
1714 1706 if (rv != DDI_SUCCESS) {
1715 1707 dca_error(dca, "unable to alloc request DMA memory");
1716 1708 dca_destroyreq(reqp);
1717 1709 return (NULL);
1718 1710 }
1719 1711 #endif
1720 1712
1721 1713 /* Skip the used portion in the context page */
1722 1714 reqp->dr_offset = CTX_MAXLENGTH;
1723 1715 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1724 1716 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah,
1725 1717 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1726 1718 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) {
1727 1719 (void) dca_destroyreq(reqp);
1728 1720 return (NULL);
1729 1721 }
1730 1722 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
1731 1723 /* Skip the space used by the input buffer */
1732 1724 reqp->dr_offset += DESC_SIZE * n_chain;
1733 1725
1734 1726 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1735 1727 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah,
1736 1728 DDI_DMA_READ | DDI_DMA_STREAMING,
1737 1729 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) {
1738 1730 (void) dca_destroyreq(reqp);
1739 1731 return (NULL);
1740 1732 }
1741 1733 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
1742 1734 /* Skip the space used by the output buffer */
1743 1735 reqp->dr_offset += DESC_SIZE * n_chain;
1744 1736
1745 1737 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d",
1746 1738 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH);
1747 1739 return (reqp);
1748 1740 }
1749 1741
1750 1742 void
1751 1743 dca_destroyreq(dca_request_t *reqp)
1752 1744 {
1753 1745 #if defined(i386) || defined(__i386)
1754 1746 dca_t *dca = reqp->dr_dca;
1755 1747 size_t size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1756 1748 #endif
1757 1749
1758 1750 /*
1759 1751 * Clean up DMA for the context structure.
1760 1752 */
1761 1753 if (reqp->dr_ctx_paddr) {
1762 1754 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah);
1763 1755 }
1764 1756
1765 1757 if (reqp->dr_ctx_acch) {
1766 1758 ddi_dma_mem_free(&reqp->dr_ctx_acch);
1767 1759 }
1768 1760
1769 1761 if (reqp->dr_ctx_dmah) {
1770 1762 ddi_dma_free_handle(&reqp->dr_ctx_dmah);
1771 1763 }
1772 1764
1773 1765 /*
1774 1766 * Clean up DMA for the scratch buffer.
1775 1767 */
1776 1768 #if defined(i386) || defined(__i386)
1777 1769 if (reqp->dr_ibuf_dmah) {
1778 1770 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1779 1771 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1780 1772 }
1781 1773 if (reqp->dr_obuf_dmah) {
1782 1774 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1783 1775 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1784 1776 }
1785 1777
1786 1778 kmem_free(reqp->dr_ibuf_kaddr, size);
1787 1779 kmem_free(reqp->dr_obuf_kaddr, size);
1788 1780 #else
1789 1781 if (reqp->dr_ibuf_paddr) {
1790 1782 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1791 1783 }
1792 1784 if (reqp->dr_obuf_paddr) {
1793 1785 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1794 1786 }
1795 1787
1796 1788 if (reqp->dr_ibuf_acch) {
1797 1789 ddi_dma_mem_free(&reqp->dr_ibuf_acch);
1798 1790 }
1799 1791 if (reqp->dr_obuf_acch) {
1800 1792 ddi_dma_mem_free(&reqp->dr_obuf_acch);
1801 1793 }
1802 1794
1803 1795 if (reqp->dr_ibuf_dmah) {
1804 1796 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1805 1797 }
1806 1798 if (reqp->dr_obuf_dmah) {
1807 1799 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1808 1800 }
1809 1801 #endif
1810 1802 /*
1811 1803 * These two DMA handles should have been unbinded in
1812 1804 * dca_unbindchains() function
1813 1805 */
1814 1806 if (reqp->dr_chain_in_dmah) {
1815 1807 ddi_dma_free_handle(&reqp->dr_chain_in_dmah);
1816 1808 }
1817 1809 if (reqp->dr_chain_out_dmah) {
1818 1810 ddi_dma_free_handle(&reqp->dr_chain_out_dmah);
1819 1811 }
1820 1812
1821 1813 kmem_free(reqp, sizeof (dca_request_t));
1822 1814 }
1823 1815
1824 1816 dca_work_t *
1825 1817 dca_getwork(dca_t *dca, int mcr)
1826 1818 {
1827 1819 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1828 1820 dca_work_t *workp;
1829 1821
1830 1822 mutex_enter(&wlp->dwl_freelock);
1831 1823 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework);
1832 1824 mutex_exit(&wlp->dwl_freelock);
1833 1825 if (workp) {
1834 1826 int nreqs;
1835 1827 bzero(workp->dw_mcr_kaddr, 8);
1836 1828
1837 1829 /* clear out old requests */
1838 1830 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) {
1839 1831 workp->dw_reqs[nreqs] = NULL;
1840 1832 }
1841 1833 }
1842 1834 return (workp);
1843 1835 }
1844 1836
1845 1837 void
1846 1838 dca_freework(dca_work_t *workp)
1847 1839 {
1848 1840 mutex_enter(&workp->dw_wlp->dwl_freelock);
1849 1841 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp);
1850 1842 mutex_exit(&workp->dw_wlp->dwl_freelock);
1851 1843 }
1852 1844
1853 1845 dca_request_t *
1854 1846 dca_getreq(dca_t *dca, int mcr, int tryhard)
1855 1847 {
1856 1848 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1857 1849 dca_request_t *reqp;
1858 1850
1859 1851 mutex_enter(&wlp->dwl_freereqslock);
1860 1852 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs);
1861 1853 mutex_exit(&wlp->dwl_freereqslock);
1862 1854 if (reqp) {
1863 1855 reqp->dr_flags = 0;
1864 1856 reqp->dr_callback = NULL;
1865 1857 } else if (tryhard) {
1866 1858 /*
1867 1859 * failed to get a free one, try an allocation, the hard way.
1868 1860 * XXX: Kstat desired here.
1869 1861 */
1870 1862 if ((reqp = dca_newreq(dca)) != NULL) {
1871 1863 reqp->dr_wlp = wlp;
1872 1864 reqp->dr_dca = dca;
1873 1865 reqp->dr_flags = 0;
1874 1866 reqp->dr_callback = NULL;
1875 1867 }
1876 1868 }
1877 1869 return (reqp);
1878 1870 }
1879 1871
1880 1872 void
1881 1873 dca_freereq(dca_request_t *reqp)
1882 1874 {
1883 1875 reqp->dr_kcf_req = NULL;
1884 1876 if (!(reqp->dr_flags & DR_NOCACHE)) {
1885 1877 mutex_enter(&reqp->dr_wlp->dwl_freereqslock);
1886 1878 dca_enqueue(&reqp->dr_wlp->dwl_freereqs,
1887 1879 (dca_listnode_t *)reqp);
1888 1880 mutex_exit(&reqp->dr_wlp->dwl_freereqslock);
1889 1881 }
1890 1882 }
1891 1883
1892 1884 /*
1893 1885 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer
1894 1886 * is mapped to a single physical address. On x86, a user buffer is mapped
1895 1887 * to multiple physical addresses. These physical addresses are chained
1896 1888 * using the method specified in Broadcom BCM5820 specification.
1897 1889 */
1898 1890 int
1899 1891 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt)
1900 1892 {
1901 1893 int rv;
1902 1894 caddr_t kaddr;
1903 1895 uint_t flags;
1904 1896 int n_chain = 0;
1905 1897
1906 1898 if (reqp->dr_flags & DR_INPLACE) {
1907 1899 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
1908 1900 } else {
1909 1901 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING;
1910 1902 }
1911 1903
1912 1904 /* first the input */
1913 1905 if (incnt) {
1914 1906 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) {
1915 1907 DBG(NULL, DWARN, "unrecognised crypto data format");
1916 1908 return (DDI_FAILURE);
1917 1909 }
1918 1910 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset,
1919 1911 kaddr, reqp->dr_chain_in_dmah, flags,
1920 1912 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) {
1921 1913 (void) dca_unbindchains(reqp);
1922 1914 return (rv);
1923 1915 }
1924 1916
1925 1917 /*
1926 1918 * The offset and length are altered by the calling routine
1927 1919 * reqp->dr_in->cd_offset += incnt;
1928 1920 * reqp->dr_in->cd_length -= incnt;
1929 1921 */
1930 1922 /* Save the first one in the chain for MCR */
1931 1923 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr;
1932 1924 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr;
1933 1925 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length;
1934 1926 } else {
1935 1927 reqp->dr_in_paddr = NULL;
1936 1928 reqp->dr_in_next = 0;
1937 1929 reqp->dr_in_len = 0;
1938 1930 }
1939 1931
1940 1932 if (reqp->dr_flags & DR_INPLACE) {
1941 1933 reqp->dr_out_paddr = reqp->dr_in_paddr;
1942 1934 reqp->dr_out_len = reqp->dr_in_len;
1943 1935 reqp->dr_out_next = reqp->dr_in_next;
1944 1936 return (DDI_SUCCESS);
1945 1937 }
1946 1938
1947 1939 /* then the output */
1948 1940 if (outcnt) {
1949 1941 flags = DDI_DMA_READ | DDI_DMA_STREAMING;
1950 1942 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) {
1951 1943 DBG(NULL, DWARN, "unrecognised crypto data format");
1952 1944 (void) dca_unbindchains(reqp);
1953 1945 return (DDI_FAILURE);
1954 1946 }
1955 1947 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset +
1956 1948 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah,
1957 1949 flags, &reqp->dr_chain_out_head, &n_chain);
1958 1950 if (rv != DDI_SUCCESS) {
1959 1951 (void) dca_unbindchains(reqp);
1960 1952 return (DDI_FAILURE);
1961 1953 }
1962 1954
1963 1955 /* Save the first one in the chain for MCR */
1964 1956 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr;
1965 1957 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr;
1966 1958 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length;
1967 1959 } else {
1968 1960 reqp->dr_out_paddr = NULL;
1969 1961 reqp->dr_out_next = 0;
1970 1962 reqp->dr_out_len = 0;
1971 1963 }
1972 1964
1973 1965 return (DDI_SUCCESS);
1974 1966 }
1975 1967
1976 1968 /*
1977 1969 * Unbind the user buffers from the DMA handles.
1978 1970 */
1979 1971 int
1980 1972 dca_unbindchains(dca_request_t *reqp)
1981 1973 {
1982 1974 int rv = DDI_SUCCESS;
1983 1975 int rv1 = DDI_SUCCESS;
1984 1976
1985 1977 /* Clear the input chain */
1986 1978 if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) {
1987 1979 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah);
1988 1980 reqp->dr_chain_in_head.dc_buffer_paddr = 0;
1989 1981 }
1990 1982
1991 1983 if (reqp->dr_flags & DR_INPLACE) {
1992 1984 return (rv);
1993 1985 }
1994 1986
1995 1987 /* Clear the output chain */
1996 1988 if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) {
1997 1989 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah);
1998 1990 reqp->dr_chain_out_head.dc_buffer_paddr = 0;
1999 1991 }
2000 1992
2001 1993 return ((rv != DDI_SUCCESS)? rv : rv1);
2002 1994 }
2003 1995
2004 1996 /*
2005 1997 * Build either input chain or output chain. It is single-item chain for Sparc,
2006 1998 * and possible mutiple-item chain for x86.
2007 1999 */
2008 2000 static int
2009 2001 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
2010 2002 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
2011 2003 dca_chain_t *head, int *n_chain)
2012 2004 {
2013 2005 ddi_dma_cookie_t c;
2014 2006 uint_t nc;
2015 2007 int rv;
2016 2008 caddr_t chain_kaddr_pre;
2017 2009 caddr_t chain_kaddr;
2018 2010 uint32_t chain_paddr;
2019 2011 int i;
2020 2012
2021 2013 /* Advance past the context structure to the starting address */
2022 2014 chain_paddr = reqp->dr_ctx_paddr + dr_offset;
2023 2015 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset;
2024 2016
2025 2017 /*
2026 2018 * Bind the kernel address to the DMA handle. On x86, the actual
2027 2019 * buffer is mapped into multiple physical addresses. On Sparc,
2028 2020 * the actual buffer is mapped into a single address.
2029 2021 */
2030 2022 rv = ddi_dma_addr_bind_handle(handle,
2031 2023 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc);
2032 2024 if (rv != DDI_DMA_MAPPED) {
2033 2025 return (DDI_FAILURE);
2034 2026 }
2035 2027
2036 2028 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV);
2037 2029 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle,
2038 2030 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) {
2039 2031 reqp->destroy = TRUE;
2040 2032 return (rv);
2041 2033 }
2042 2034
2043 2035 *n_chain = nc;
2044 2036
2045 2037 /* Setup the data buffer chain for DMA transfer */
2046 2038 chain_kaddr_pre = NULL;
2047 2039 head->dc_buffer_paddr = 0;
2048 2040 head->dc_next_paddr = 0;
2049 2041 head->dc_buffer_length = 0;
2050 2042 for (i = 0; i < nc; i++) {
2051 2043 /* PIO */
2052 2044 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address);
2053 2045 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0);
2054 2046 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size);
2055 2047
2056 2048 /* Remember the head of the chain */
2057 2049 if (head->dc_buffer_paddr == 0) {
2058 2050 head->dc_buffer_paddr = c.dmac_address;
2059 2051 head->dc_buffer_length = c.dmac_size;
2060 2052 }
2061 2053
2062 2054 /* Link to the previous one if one exists */
2063 2055 if (chain_kaddr_pre) {
2064 2056 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT,
2065 2057 chain_paddr);
2066 2058 if (head->dc_next_paddr == 0)
2067 2059 head->dc_next_paddr = chain_paddr;
2068 2060 }
2069 2061 chain_kaddr_pre = chain_kaddr;
2070 2062
2071 2063 /* Maintain pointers */
2072 2064 chain_paddr += DESC_SIZE;
2073 2065 chain_kaddr += DESC_SIZE;
2074 2066
2075 2067 /* Retrieve the next cookie if there is one */
2076 2068 if (i < nc-1)
2077 2069 ddi_dma_nextcookie(handle, &c);
2078 2070 }
2079 2071
2080 2072 /* Set the next pointer in the last entry to NULL */
2081 2073 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0);
2082 2074
2083 2075 return (DDI_SUCCESS);
2084 2076 }
2085 2077
2086 2078 /*
2087 2079 * Schedule some work.
2088 2080 */
2089 2081 int
2090 2082 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched)
2091 2083 {
2092 2084 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2093 2085
2094 2086 mutex_enter(&wlp->dwl_lock);
2095 2087
2096 2088 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p",
2097 2089 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr,
2098 2090 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr);
2099 2091 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x",
2100 2092 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr);
2101 2093 /* sync out the entire context and descriptor chains */
2102 2094 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2103 2095 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah,
2104 2096 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2105 2097 reqp->destroy = TRUE;
2106 2098 mutex_exit(&wlp->dwl_lock);
2107 2099 return (CRYPTO_DEVICE_ERROR);
2108 2100 }
2109 2101
2110 2102 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp);
2111 2103 wlp->dwl_count++;
2112 2104 wlp->dwl_lastsubmit = ddi_get_lbolt();
2113 2105 reqp->dr_wlp = wlp;
2114 2106
2115 2107 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) {
2116 2108 /* we are fully loaded now, let kCF know */
2117 2109
2118 2110 wlp->dwl_flowctl++;
2119 2111 wlp->dwl_busy = 1;
2120 2112
2121 2113 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY);
2122 2114 }
2123 2115
2124 2116 if (dosched) {
2125 2117 #ifdef SCHEDDELAY
2126 2118 /* possibly wait for more work to arrive */
2127 2119 if (wlp->dwl_count >= wlp->dwl_reqspermcr) {
2128 2120 dca_schedule(dca, mcr);
2129 2121 } else if (!wlp->dwl_schedtid) {
2130 2122 /* wait 1 msec for more work before doing it */
2131 2123 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2132 2124 (void *)wlp, drv_usectohz(MSEC));
2133 2125 }
2134 2126 #else
2135 2127 dca_schedule(dca, mcr);
2136 2128 #endif
2137 2129 }
2138 2130 mutex_exit(&wlp->dwl_lock);
2139 2131
2140 2132 return (CRYPTO_QUEUED);
2141 2133 }
2142 2134
2143 2135 void
2144 2136 dca_schedule(dca_t *dca, int mcr)
2145 2137 {
2146 2138 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2147 2139 int csr;
2148 2140 int full;
2149 2141 uint32_t status;
2150 2142
2151 2143 ASSERT(mutex_owned(&wlp->dwl_lock));
2152 2144 /*
2153 2145 * If the card is draining or has an outstanding failure,
2154 2146 * don't schedule any more work on it right now
2155 2147 */
2156 2148 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) {
2157 2149 return;
2158 2150 }
2159 2151
2160 2152 if (mcr == MCR2) {
2161 2153 csr = CSR_MCR2;
2162 2154 full = DMASTAT_MCR2FULL;
2163 2155 } else {
2164 2156 csr = CSR_MCR1;
2165 2157 full = DMASTAT_MCR1FULL;
2166 2158 }
2167 2159
2168 2160 for (;;) {
2169 2161 dca_work_t *workp;
2170 2162 uint32_t offset;
2171 2163 int nreqs;
2172 2164
2173 2165 status = GETCSR(dca, CSR_DMASTAT);
2174 2166 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2175 2167 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
2176 2168 return;
2177 2169
2178 2170 if ((status & full) != 0)
2179 2171 break;
2180 2172
2181 2173 #ifdef SCHEDDELAY
2182 2174 /* if there isn't enough to do, don't bother now */
2183 2175 if ((wlp->dwl_count < wlp->dwl_reqspermcr) &&
2184 2176 (ddi_get_lbolt() < (wlp->dwl_lastsubmit +
2185 2177 drv_usectohz(MSEC)))) {
2186 2178 /* wait a bit longer... */
2187 2179 if (wlp->dwl_schedtid == 0) {
2188 2180 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2189 2181 (void *)wlp, drv_usectohz(MSEC));
2190 2182 }
2191 2183 return;
2192 2184 }
2193 2185 #endif
2194 2186
2195 2187 /* grab a work structure */
2196 2188 workp = dca_getwork(dca, mcr);
2197 2189
2198 2190 if (workp == NULL) {
2199 2191 /*
2200 2192 * There must be work ready to be reclaimed,
2201 2193 * in this case, since the chip can only hold
2202 2194 * less work outstanding than there are total.
2203 2195 */
2204 2196 dca_reclaim(dca, mcr);
2205 2197 continue;
2206 2198 }
2207 2199
2208 2200 nreqs = 0;
2209 2201 offset = MCR_CTXADDR;
2210 2202
2211 2203 while (nreqs < wlp->dwl_reqspermcr) {
2212 2204 dca_request_t *reqp;
2213 2205
2214 2206 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq);
2215 2207 if (reqp == NULL) {
2216 2208 /* nothing left to process */
2217 2209 break;
2218 2210 }
2219 2211 /*
2220 2212 * Update flow control.
2221 2213 */
2222 2214 wlp->dwl_count--;
2223 2215 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2224 2216 (wlp->dwl_busy)) {
2225 2217 wlp->dwl_busy = 0;
2226 2218 crypto_prov_notify(wlp->dwl_prov,
2227 2219 CRYPTO_PROVIDER_READY);
2228 2220 }
2229 2221
2230 2222 /*
2231 2223 * Context address.
2232 2224 */
2233 2225 PUTMCR32(workp, offset, reqp->dr_ctx_paddr);
2234 2226 offset += 4;
2235 2227
2236 2228 /*
2237 2229 * Input chain.
2238 2230 */
2239 2231 /* input buffer address */
2240 2232 PUTMCR32(workp, offset, reqp->dr_in_paddr);
2241 2233 offset += 4;
2242 2234 /* next input buffer entry */
2243 2235 PUTMCR32(workp, offset, reqp->dr_in_next);
2244 2236 offset += 4;
2245 2237 /* input buffer length */
2246 2238 PUTMCR16(workp, offset, reqp->dr_in_len);
2247 2239 offset += 2;
2248 2240 /* zero the reserved field */
2249 2241 PUTMCR16(workp, offset, 0);
2250 2242 offset += 2;
2251 2243
2252 2244 /*
2253 2245 * Overall length.
2254 2246 */
2255 2247 /* reserved field */
2256 2248 PUTMCR16(workp, offset, 0);
2257 2249 offset += 2;
2258 2250 /* total packet length */
2259 2251 PUTMCR16(workp, offset, reqp->dr_pkt_length);
2260 2252 offset += 2;
2261 2253
2262 2254 /*
2263 2255 * Output chain.
2264 2256 */
2265 2257 /* output buffer address */
2266 2258 PUTMCR32(workp, offset, reqp->dr_out_paddr);
2267 2259 offset += 4;
2268 2260 /* next output buffer entry */
2269 2261 PUTMCR32(workp, offset, reqp->dr_out_next);
2270 2262 offset += 4;
2271 2263 /* output buffer length */
2272 2264 PUTMCR16(workp, offset, reqp->dr_out_len);
2273 2265 offset += 2;
2274 2266 /* zero the reserved field */
2275 2267 PUTMCR16(workp, offset, 0);
2276 2268 offset += 2;
2277 2269
2278 2270 /*
2279 2271 * Note submission.
2280 2272 */
2281 2273 workp->dw_reqs[nreqs] = reqp;
2282 2274 nreqs++;
2283 2275 }
2284 2276
2285 2277 if (nreqs == 0) {
2286 2278 /* nothing in the queue! */
2287 2279 dca_freework(workp);
2288 2280 return;
2289 2281 }
2290 2282
2291 2283 wlp->dwl_submit++;
2292 2284
2293 2285 PUTMCR16(workp, MCR_FLAGS, 0);
2294 2286 PUTMCR16(workp, MCR_COUNT, nreqs);
2295 2287
2296 2288 DBG(dca, DCHATTY,
2297 2289 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d",
2298 2290 workp->dw_mcr_paddr, workp->dw_mcr_kaddr,
2299 2291 nreqs, mcr);
2300 2292
2301 2293 workp->dw_lbolt = ddi_get_lbolt();
2302 2294 /* Make sure MCR is synced out to device. */
2303 2295 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0,
2304 2296 DDI_DMA_SYNC_FORDEV);
2305 2297 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2306 2298 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2307 2299 dca_destroywork(workp);
2308 2300 return;
2309 2301 }
2310 2302
2311 2303 PUTCSR(dca, csr, workp->dw_mcr_paddr);
2312 2304 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2313 2305 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2314 2306 dca_destroywork(workp);
2315 2307 return;
2316 2308 } else {
2317 2309 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp);
2318 2310 }
2319 2311
2320 2312 DBG(dca, DCHATTY, "posted");
2321 2313 }
2322 2314 }
2323 2315
2324 2316 /*
2325 2317 * Reclaim completed work, called in interrupt context.
2326 2318 */
2327 2319 void
2328 2320 dca_reclaim(dca_t *dca, int mcr)
2329 2321 {
2330 2322 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2331 2323 dca_work_t *workp;
2332 2324 ushort_t flags;
2333 2325 int nreclaimed = 0;
2334 2326 int i;
2335 2327
2336 2328 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr);
2337 2329 ASSERT(mutex_owned(&wlp->dwl_lock));
2338 2330 /*
2339 2331 * For each MCR in the submitted (runq), we check to see if
2340 2332 * it has been processed. If so, then we note each individual
2341 2333 * job in the MCR, and and do the completion processing for
2342 2334 * each of such job.
2343 2335 */
2344 2336 for (;;) {
2345 2337
2346 2338 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2347 2339 if (workp == NULL) {
2348 2340 break;
2349 2341 }
2350 2342
2351 2343 /* only sync the MCR flags, since that's all we need */
2352 2344 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4,
2353 2345 DDI_DMA_SYNC_FORKERNEL);
2354 2346 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2355 2347 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2356 2348 dca_rmqueue((dca_listnode_t *)workp);
2357 2349 dca_destroywork(workp);
2358 2350 return;
2359 2351 }
2360 2352
2361 2353 flags = GETMCR16(workp, MCR_FLAGS);
2362 2354 if ((flags & MCRFLAG_FINISHED) == 0) {
2363 2355 /* chip is still working on it */
2364 2356 DBG(dca, DRECLAIM,
2365 2357 "chip still working on it (MCR%d)", mcr);
2366 2358 break;
2367 2359 }
2368 2360
2369 2361 /* its really for us, so remove it from the queue */
2370 2362 dca_rmqueue((dca_listnode_t *)workp);
2371 2363
2372 2364 /* if we were draining, signal on the cv */
2373 2365 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2374 2366 cv_signal(&wlp->dwl_cv);
2375 2367 }
2376 2368
2377 2369 /* update statistics, done under the lock */
2378 2370 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2379 2371 dca_request_t *reqp = workp->dw_reqs[i];
2380 2372 if (reqp == NULL) {
2381 2373 continue;
2382 2374 }
2383 2375 if (reqp->dr_byte_stat >= 0) {
2384 2376 dca->dca_stats[reqp->dr_byte_stat] +=
2385 2377 reqp->dr_pkt_length;
2386 2378 }
2387 2379 if (reqp->dr_job_stat >= 0) {
2388 2380 dca->dca_stats[reqp->dr_job_stat]++;
2389 2381 }
2390 2382 }
2391 2383 mutex_exit(&wlp->dwl_lock);
2392 2384
2393 2385 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2394 2386 dca_request_t *reqp = workp->dw_reqs[i];
2395 2387
2396 2388 if (reqp == NULL) {
2397 2389 continue;
2398 2390 }
2399 2391
2400 2392 /* Do the callback. */
2401 2393 workp->dw_reqs[i] = NULL;
2402 2394 dca_done(reqp, CRYPTO_SUCCESS);
2403 2395
2404 2396 nreclaimed++;
2405 2397 }
2406 2398
2407 2399 /* now we can release the work */
2408 2400 dca_freework(workp);
2409 2401
2410 2402 mutex_enter(&wlp->dwl_lock);
2411 2403 }
2412 2404 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed);
2413 2405 }
2414 2406
2415 2407 int
2416 2408 dca_length(crypto_data_t *cdata)
2417 2409 {
2418 2410 return (cdata->cd_length);
2419 2411 }
2420 2412
2421 2413 /*
2422 2414 * This is the callback function called from the interrupt when a kCF job
2423 2415 * completes. It does some driver-specific things, and then calls the
2424 2416 * kCF-provided callback. Finally, it cleans up the state for the work
2425 2417 * request and drops the reference count to allow for DR.
2426 2418 */
2427 2419 void
2428 2420 dca_done(dca_request_t *reqp, int err)
2429 2421 {
2430 2422 uint64_t ena = 0;
2431 2423
2432 2424 /* unbind any chains we were using */
2433 2425 if (dca_unbindchains(reqp) != DDI_SUCCESS) {
2434 2426 /* DMA failure */
2435 2427 ena = dca_ena(ena);
2436 2428 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT,
2437 2429 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR,
2438 2430 "fault on buffer DMA handle");
2439 2431 if (err == CRYPTO_SUCCESS) {
2440 2432 err = CRYPTO_DEVICE_ERROR;
2441 2433 }
2442 2434 }
2443 2435
2444 2436 if (reqp->dr_callback != NULL) {
2445 2437 reqp->dr_callback(reqp, err);
2446 2438 } else {
2447 2439 dca_freereq(reqp);
2448 2440 }
2449 2441 }
2450 2442
2451 2443 /*
2452 2444 * Call this when a failure is detected. It will reset the chip,
2453 2445 * log a message, alert kCF, and mark jobs in the runq as failed.
2454 2446 */
2455 2447 /* ARGSUSED */
2456 2448 void
2457 2449 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index,
2458 2450 uint64_t ena, int errno, char *mess, ...)
2459 2451 {
2460 2452 va_list ap;
2461 2453 char buf[256];
2462 2454 int mcr;
2463 2455 char *eclass;
2464 2456 int have_mutex;
2465 2457
2466 2458 va_start(ap, mess);
2467 2459 (void) vsprintf(buf, mess, ap);
2468 2460 va_end(ap);
2469 2461
2470 2462 eclass = dca_fma_eclass_string(dca->dca_model, index);
2471 2463
2472 2464 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) &&
2473 2465 index != DCA_FM_ECLASS_NONE) {
2474 2466 ddi_fm_ereport_post(dca->dca_dip, eclass, ena,
2475 2467 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2476 2468 FM_EREPORT_VERS0, NULL);
2477 2469
2478 2470 /* Report the impact of the failure to the DDI. */
2479 2471 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST);
2480 2472 } else {
2481 2473 /* Just log the error string to the message log */
2482 2474 dca_error(dca, buf);
2483 2475 }
2484 2476
2485 2477 /*
2486 2478 * Indicate a failure (keeps schedule from running).
2487 2479 */
2488 2480 dca->dca_flags |= DCA_FAILED;
2489 2481
2490 2482 /*
2491 2483 * Reset the chip. This should also have as a side effect, the
2492 2484 * disabling of all interrupts from the device.
2493 2485 */
2494 2486 (void) dca_reset(dca, 1);
2495 2487
2496 2488 /*
2497 2489 * Report the failure to kCF.
2498 2490 */
2499 2491 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2500 2492 if (WORKLIST(dca, mcr)->dwl_prov) {
2501 2493 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov,
2502 2494 CRYPTO_PROVIDER_FAILED);
2503 2495 }
2504 2496 }
2505 2497
2506 2498 /*
2507 2499 * Return jobs not sent to hardware back to kCF.
2508 2500 */
2509 2501 dca_rejectjobs(dca);
2510 2502
2511 2503 /*
2512 2504 * From this point on, no new work should be arriving, and the
2513 2505 * chip should not be doing any active DMA.
2514 2506 */
2515 2507
2516 2508 /*
2517 2509 * Now find all the work submitted to the device and fail
2518 2510 * them.
2519 2511 */
2520 2512 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2521 2513 dca_worklist_t *wlp;
2522 2514 int i;
2523 2515
2524 2516 wlp = WORKLIST(dca, mcr);
2525 2517
2526 2518 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2527 2519 continue;
2528 2520 }
2529 2521 for (;;) {
2530 2522 dca_work_t *workp;
2531 2523
2532 2524 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2533 2525 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq);
2534 2526 if (workp == NULL) {
2535 2527 if (have_mutex)
2536 2528 mutex_exit(&wlp->dwl_lock);
2537 2529 break;
2538 2530 }
2539 2531 mutex_exit(&wlp->dwl_lock);
2540 2532
2541 2533 /*
2542 2534 * Free up requests
2543 2535 */
2544 2536 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2545 2537 dca_request_t *reqp = workp->dw_reqs[i];
2546 2538 if (reqp) {
2547 2539 dca_done(reqp, errno);
2548 2540 workp->dw_reqs[i] = NULL;
2549 2541 }
2550 2542 }
2551 2543
2552 2544 mutex_enter(&wlp->dwl_lock);
2553 2545 /*
2554 2546 * If waiting to drain, signal on the waiter.
2555 2547 */
2556 2548 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2557 2549 cv_signal(&wlp->dwl_cv);
2558 2550 }
2559 2551
2560 2552 /*
2561 2553 * Return the work and request structures to
2562 2554 * the free pool.
2563 2555 */
2564 2556 dca_freework(workp);
2565 2557 if (have_mutex)
2566 2558 mutex_exit(&wlp->dwl_lock);
2567 2559 }
2568 2560 }
2569 2561
2570 2562 }
2571 2563
2572 2564 #ifdef SCHEDDELAY
2573 2565 /*
2574 2566 * Reschedule worklist as needed.
2575 2567 */
2576 2568 void
2577 2569 dca_schedtimeout(void *arg)
2578 2570 {
2579 2571 dca_worklist_t *wlp = (dca_worklist_t *)arg;
2580 2572 mutex_enter(&wlp->dwl_lock);
2581 2573 wlp->dwl_schedtid = 0;
2582 2574 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr);
2583 2575 mutex_exit(&wlp->dwl_lock);
2584 2576 }
2585 2577 #endif
2586 2578
2587 2579 /*
2588 2580 * Check for stalled jobs.
2589 2581 */
2590 2582 void
2591 2583 dca_jobtimeout(void *arg)
2592 2584 {
2593 2585 int mcr;
2594 2586 dca_t *dca = (dca_t *)arg;
2595 2587 int hung = 0;
2596 2588
2597 2589 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2598 2590 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2599 2591 dca_work_t *workp;
2600 2592 clock_t when;
2601 2593
2602 2594 mutex_enter(&wlp->dwl_lock);
2603 2595 when = ddi_get_lbolt();
2604 2596
2605 2597 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2606 2598 if (workp == NULL) {
2607 2599 /* nothing sitting in the queue */
2608 2600 mutex_exit(&wlp->dwl_lock);
2609 2601 continue;
2610 2602 }
2611 2603
2612 2604 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) {
2613 2605 /* request has been queued for less than STALETIME */
2614 2606 mutex_exit(&wlp->dwl_lock);
2615 2607 continue;
2616 2608 }
2617 2609
2618 2610 /* job has been sitting around for over 1 second, badness */
2619 2611 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp,
2620 2612 mcr);
2621 2613
2622 2614 /* put it back in the queue, until we reset the chip */
2623 2615 hung++;
2624 2616 mutex_exit(&wlp->dwl_lock);
2625 2617 }
2626 2618
2627 2619 if (hung) {
2628 2620 dca_failure(dca, DDI_DEVICE_FAULT,
2629 2621 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR,
2630 2622 "timeout processing job.)");
2631 2623 }
2632 2624
2633 2625 /* reschedule ourself */
2634 2626 mutex_enter(&dca->dca_intrlock);
2635 2627 if (dca->dca_jobtid == 0) {
2636 2628 /* timeout has been canceled, prior to DR */
2637 2629 mutex_exit(&dca->dca_intrlock);
2638 2630 return;
2639 2631 }
2640 2632
2641 2633 /* check again in 1 second */
2642 2634 dca->dca_jobtid = timeout(dca_jobtimeout, arg,
2643 2635 drv_usectohz(SECOND));
2644 2636 mutex_exit(&dca->dca_intrlock);
2645 2637 }
2646 2638
2647 2639 /*
2648 2640 * This returns all jobs back to kCF. It assumes that processing
2649 2641 * on the worklist has halted.
2650 2642 */
2651 2643 void
2652 2644 dca_rejectjobs(dca_t *dca)
2653 2645 {
2654 2646 int mcr;
2655 2647 int have_mutex;
2656 2648 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2657 2649 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2658 2650 dca_request_t *reqp;
2659 2651
2660 2652 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2661 2653 continue;
2662 2654 }
2663 2655 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2664 2656 for (;;) {
2665 2657 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq);
2666 2658 if (reqp == NULL) {
2667 2659 break;
2668 2660 }
2669 2661 /* update flow control */
2670 2662 wlp->dwl_count--;
2671 2663 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2672 2664 (wlp->dwl_busy)) {
2673 2665 wlp->dwl_busy = 0;
2674 2666 crypto_prov_notify(wlp->dwl_prov,
2675 2667 CRYPTO_PROVIDER_READY);
2676 2668 }
2677 2669 mutex_exit(&wlp->dwl_lock);
2678 2670
2679 2671 (void) dca_unbindchains(reqp);
2680 2672 reqp->dr_callback(reqp, EAGAIN);
2681 2673 mutex_enter(&wlp->dwl_lock);
2682 2674 }
2683 2675 if (have_mutex)
2684 2676 mutex_exit(&wlp->dwl_lock);
2685 2677 }
2686 2678 }
2687 2679
2688 2680 int
2689 2681 dca_drain(dca_t *dca)
2690 2682 {
2691 2683 int mcr;
2692 2684 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2693 2685 #ifdef SCHEDDELAY
2694 2686 timeout_id_t tid;
2695 2687 #endif
2696 2688 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2697 2689
2698 2690 mutex_enter(&wlp->dwl_lock);
2699 2691 wlp->dwl_drain = 1;
2700 2692
2701 2693 /* give it up to a second to drain from the chip */
2702 2694 if (!QEMPTY(&wlp->dwl_runq)) {
2703 2695 (void) cv_reltimedwait(&wlp->dwl_cv, &wlp->dwl_lock,
2704 2696 drv_usectohz(STALETIME), TR_CLOCK_TICK);
2705 2697
2706 2698 if (!QEMPTY(&wlp->dwl_runq)) {
2707 2699 dca_error(dca, "unable to drain device");
2708 2700 mutex_exit(&wlp->dwl_lock);
2709 2701 dca_undrain(dca);
2710 2702 return (EBUSY);
2711 2703 }
2712 2704 }
2713 2705
2714 2706 #ifdef SCHEDDELAY
2715 2707 tid = wlp->dwl_schedtid;
2716 2708 mutex_exit(&wlp->dwl_lock);
2717 2709
2718 2710 /*
2719 2711 * untimeout outside the lock -- this is safe because we
2720 2712 * have set the drain flag, so dca_schedule() will not
2721 2713 * reschedule another timeout
2722 2714 */
2723 2715 if (tid) {
2724 2716 untimeout(tid);
2725 2717 }
2726 2718 #else
2727 2719 mutex_exit(&wlp->dwl_lock);
2728 2720 #endif
2729 2721 }
2730 2722 return (0);
2731 2723 }
2732 2724
2733 2725 void
2734 2726 dca_undrain(dca_t *dca)
2735 2727 {
2736 2728 int mcr;
2737 2729
2738 2730 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2739 2731 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2740 2732 mutex_enter(&wlp->dwl_lock);
2741 2733 wlp->dwl_drain = 0;
2742 2734 dca_schedule(dca, mcr);
2743 2735 mutex_exit(&wlp->dwl_lock);
2744 2736 }
2745 2737 }
2746 2738
2747 2739 /*
2748 2740 * Duplicate the crypto_data_t structure, but point to the original
2749 2741 * buffers.
2750 2742 */
2751 2743 int
2752 2744 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput)
2753 2745 {
2754 2746 ninput->cd_format = input->cd_format;
2755 2747 ninput->cd_offset = input->cd_offset;
2756 2748 ninput->cd_length = input->cd_length;
2757 2749 ninput->cd_miscdata = input->cd_miscdata;
2758 2750
2759 2751 switch (input->cd_format) {
2760 2752 case CRYPTO_DATA_RAW:
2761 2753 ninput->cd_raw.iov_base = input->cd_raw.iov_base;
2762 2754 ninput->cd_raw.iov_len = input->cd_raw.iov_len;
2763 2755 break;
2764 2756
2765 2757 case CRYPTO_DATA_UIO:
2766 2758 ninput->cd_uio = input->cd_uio;
2767 2759 break;
2768 2760
2769 2761 case CRYPTO_DATA_MBLK:
2770 2762 ninput->cd_mp = input->cd_mp;
2771 2763 break;
2772 2764
2773 2765 default:
2774 2766 DBG(NULL, DWARN,
2775 2767 "dca_dupcrypto: unrecognised crypto data format");
2776 2768 return (CRYPTO_FAILED);
2777 2769 }
2778 2770
2779 2771 return (CRYPTO_SUCCESS);
2780 2772 }
2781 2773
2782 2774 /*
2783 2775 * Performs validation checks on the input and output data structures.
2784 2776 */
2785 2777 int
2786 2778 dca_verifyio(crypto_data_t *input, crypto_data_t *output)
2787 2779 {
2788 2780 int rv = CRYPTO_SUCCESS;
2789 2781
2790 2782 switch (input->cd_format) {
2791 2783 case CRYPTO_DATA_RAW:
2792 2784 break;
2793 2785
2794 2786 case CRYPTO_DATA_UIO:
2795 2787 /* we support only kernel buffer */
2796 2788 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
2797 2789 DBG(NULL, DWARN, "non kernel input uio buffer");
2798 2790 rv = CRYPTO_ARGUMENTS_BAD;
2799 2791 }
2800 2792 break;
2801 2793
2802 2794 case CRYPTO_DATA_MBLK:
2803 2795 break;
2804 2796
2805 2797 default:
2806 2798 DBG(NULL, DWARN, "unrecognised input crypto data format");
2807 2799 rv = CRYPTO_ARGUMENTS_BAD;
2808 2800 }
2809 2801
2810 2802 switch (output->cd_format) {
2811 2803 case CRYPTO_DATA_RAW:
2812 2804 break;
2813 2805
2814 2806 case CRYPTO_DATA_UIO:
2815 2807 /* we support only kernel buffer */
2816 2808 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) {
2817 2809 DBG(NULL, DWARN, "non kernel output uio buffer");
2818 2810 rv = CRYPTO_ARGUMENTS_BAD;
2819 2811 }
2820 2812 break;
2821 2813
2822 2814 case CRYPTO_DATA_MBLK:
2823 2815 break;
2824 2816
2825 2817 default:
2826 2818 DBG(NULL, DWARN, "unrecognised output crypto data format");
2827 2819 rv = CRYPTO_ARGUMENTS_BAD;
2828 2820 }
2829 2821
2830 2822 return (rv);
2831 2823 }
2832 2824
2833 2825 /*
2834 2826 * data: source crypto_data_t struct
2835 2827 * off: offset into the source before commencing copy
2836 2828 * count: the amount of data to copy
2837 2829 * dest: destination buffer
2838 2830 */
2839 2831 int
2840 2832 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest)
2841 2833 {
2842 2834 int rv = CRYPTO_SUCCESS;
2843 2835 uio_t *uiop;
2844 2836 uint_t vec_idx;
2845 2837 size_t cur_len;
2846 2838 mblk_t *mp;
2847 2839
2848 2840 if (count == 0) {
2849 2841 /* We don't want anything so we're done. */
2850 2842 return (rv);
2851 2843 }
2852 2844
2853 2845 /*
2854 2846 * Sanity check that we haven't specified a length greater than the
2855 2847 * offset adjusted size of the buffer.
2856 2848 */
2857 2849 if (count > (data->cd_length - off)) {
2858 2850 return (CRYPTO_DATA_LEN_RANGE);
2859 2851 }
2860 2852
2861 2853 /* Add the internal crypto_data offset to the requested offset. */
2862 2854 off += data->cd_offset;
2863 2855
2864 2856 switch (data->cd_format) {
2865 2857 case CRYPTO_DATA_RAW:
2866 2858 bcopy(data->cd_raw.iov_base + off, dest, count);
2867 2859 break;
2868 2860
2869 2861 case CRYPTO_DATA_UIO:
2870 2862 /*
2871 2863 * Jump to the first iovec containing data to be
2872 2864 * processed.
2873 2865 */
2874 2866 uiop = data->cd_uio;
2875 2867 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
2876 2868 off >= uiop->uio_iov[vec_idx].iov_len;
2877 2869 off -= uiop->uio_iov[vec_idx++].iov_len)
2878 2870 ;
2879 2871 if (vec_idx == uiop->uio_iovcnt) {
2880 2872 /*
2881 2873 * The caller specified an offset that is larger than
2882 2874 * the total size of the buffers it provided.
2883 2875 */
2884 2876 return (CRYPTO_DATA_LEN_RANGE);
2885 2877 }
2886 2878
2887 2879 /*
2888 2880 * Now process the iovecs.
2889 2881 */
2890 2882 while (vec_idx < uiop->uio_iovcnt && count > 0) {
2891 2883 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
2892 2884 off, count);
2893 2885 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
2894 2886 cur_len);
2895 2887 count -= cur_len;
2896 2888 dest += cur_len;
2897 2889 vec_idx++;
2898 2890 off = 0;
2899 2891 }
2900 2892
2901 2893 if (vec_idx == uiop->uio_iovcnt && count > 0) {
2902 2894 /*
2903 2895 * The end of the specified iovec's was reached but
2904 2896 * the length requested could not be processed
2905 2897 * (requested to digest more data than it provided).
2906 2898 */
2907 2899 return (CRYPTO_DATA_LEN_RANGE);
2908 2900 }
2909 2901 break;
2910 2902
2911 2903 case CRYPTO_DATA_MBLK:
2912 2904 /*
2913 2905 * Jump to the first mblk_t containing data to be processed.
2914 2906 */
2915 2907 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp);
2916 2908 off -= MBLKL(mp), mp = mp->b_cont)
2917 2909 ;
2918 2910 if (mp == NULL) {
2919 2911 /*
2920 2912 * The caller specified an offset that is larger than
2921 2913 * the total size of the buffers it provided.
2922 2914 */
2923 2915 return (CRYPTO_DATA_LEN_RANGE);
2924 2916 }
2925 2917
2926 2918 /*
2927 2919 * Now do the processing on the mblk chain.
2928 2920 */
2929 2921 while (mp != NULL && count > 0) {
2930 2922 cur_len = min(MBLKL(mp) - off, count);
2931 2923 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
2932 2924 count -= cur_len;
2933 2925 dest += cur_len;
2934 2926 mp = mp->b_cont;
2935 2927 off = 0;
2936 2928 }
2937 2929
2938 2930 if (mp == NULL && count > 0) {
2939 2931 /*
2940 2932 * The end of the mblk was reached but the length
2941 2933 * requested could not be processed, (requested to
2942 2934 * digest more data than it provided).
2943 2935 */
2944 2936 return (CRYPTO_DATA_LEN_RANGE);
2945 2937 }
2946 2938 break;
2947 2939
2948 2940 default:
2949 2941 DBG(NULL, DWARN, "unrecognised crypto data format");
2950 2942 rv = CRYPTO_ARGUMENTS_BAD;
2951 2943 }
2952 2944 return (rv);
2953 2945 }
2954 2946
2955 2947
2956 2948 /*
2957 2949 * Performs the input, output or hard scatter/gather checks on the specified
2958 2950 * crypto_data_t struct. Returns true if the data is scatter/gather in nature
2959 2951 * ie fails the test.
2960 2952 */
2961 2953 int
2962 2954 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val)
2963 2955 {
2964 2956 uio_t *uiop;
2965 2957 mblk_t *mp;
2966 2958 int rv = FALSE;
2967 2959
2968 2960 switch (val) {
2969 2961 case DCA_SG_CONTIG:
2970 2962 /*
2971 2963 * Check for a contiguous data buffer.
2972 2964 */
2973 2965 switch (data->cd_format) {
2974 2966 case CRYPTO_DATA_RAW:
2975 2967 /* Contiguous in nature */
2976 2968 break;
2977 2969
2978 2970 case CRYPTO_DATA_UIO:
2979 2971 if (data->cd_uio->uio_iovcnt > 1)
2980 2972 rv = TRUE;
2981 2973 break;
2982 2974
2983 2975 case CRYPTO_DATA_MBLK:
2984 2976 mp = data->cd_mp;
2985 2977 if (mp->b_cont != NULL)
2986 2978 rv = TRUE;
2987 2979 break;
2988 2980
2989 2981 default:
2990 2982 DBG(NULL, DWARN, "unrecognised crypto data format");
2991 2983 }
2992 2984 break;
2993 2985
2994 2986 case DCA_SG_WALIGN:
2995 2987 /*
2996 2988 * Check for a contiguous data buffer that is 32-bit word
2997 2989 * aligned and is of word multiples in size.
2998 2990 */
2999 2991 switch (data->cd_format) {
3000 2992 case CRYPTO_DATA_RAW:
3001 2993 if ((data->cd_raw.iov_len % sizeof (uint32_t)) ||
3002 2994 ((uintptr_t)data->cd_raw.iov_base %
3003 2995 sizeof (uint32_t))) {
3004 2996 rv = TRUE;
3005 2997 }
3006 2998 break;
3007 2999
3008 3000 case CRYPTO_DATA_UIO:
3009 3001 uiop = data->cd_uio;
3010 3002 if (uiop->uio_iovcnt > 1) {
3011 3003 return (TRUE);
3012 3004 }
3013 3005 /* So there is only one iovec */
3014 3006 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) ||
3015 3007 ((uintptr_t)uiop->uio_iov[0].iov_base %
3016 3008 sizeof (uint32_t))) {
3017 3009 rv = TRUE;
3018 3010 }
3019 3011 break;
3020 3012
3021 3013 case CRYPTO_DATA_MBLK:
3022 3014 mp = data->cd_mp;
3023 3015 if (mp->b_cont != NULL) {
3024 3016 return (TRUE);
3025 3017 }
3026 3018 /* So there is only one mblk in the chain */
3027 3019 if ((MBLKL(mp) % sizeof (uint32_t)) ||
3028 3020 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) {
3029 3021 rv = TRUE;
3030 3022 }
3031 3023 break;
3032 3024
3033 3025 default:
3034 3026 DBG(NULL, DWARN, "unrecognised crypto data format");
3035 3027 }
3036 3028 break;
3037 3029
3038 3030 case DCA_SG_PALIGN:
3039 3031 /*
3040 3032 * Check that the data buffer is page aligned and is of
3041 3033 * page multiples in size.
3042 3034 */
3043 3035 switch (data->cd_format) {
3044 3036 case CRYPTO_DATA_RAW:
3045 3037 if ((data->cd_length % dca->dca_pagesize) ||
3046 3038 ((uintptr_t)data->cd_raw.iov_base %
3047 3039 dca->dca_pagesize)) {
3048 3040 rv = TRUE;
3049 3041 }
3050 3042 break;
3051 3043
3052 3044 case CRYPTO_DATA_UIO:
3053 3045 uiop = data->cd_uio;
3054 3046 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) ||
3055 3047 ((uintptr_t)uiop->uio_iov[0].iov_base %
3056 3048 dca->dca_pagesize)) {
3057 3049 rv = TRUE;
3058 3050 }
3059 3051 break;
3060 3052
3061 3053 case CRYPTO_DATA_MBLK:
3062 3054 mp = data->cd_mp;
3063 3055 if ((MBLKL(mp) % dca->dca_pagesize) ||
3064 3056 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) {
3065 3057 rv = TRUE;
3066 3058 }
3067 3059 break;
3068 3060
3069 3061 default:
3070 3062 DBG(NULL, DWARN, "unrecognised crypto data format");
3071 3063 }
3072 3064 break;
3073 3065
3074 3066 default:
3075 3067 DBG(NULL, DWARN, "unrecognised scatter/gather param type");
3076 3068 }
3077 3069
3078 3070 return (rv);
3079 3071 }
3080 3072
3081 3073 /*
3082 3074 * Increments the cd_offset and decrements the cd_length as the data is
3083 3075 * gathered from the crypto_data_t struct.
3084 3076 * The data is reverse-copied into the dest buffer if the flag is true.
3085 3077 */
3086 3078 int
3087 3079 dca_gather(crypto_data_t *in, char *dest, int count, int reverse)
3088 3080 {
3089 3081 int rv = CRYPTO_SUCCESS;
3090 3082 uint_t vec_idx;
3091 3083 uio_t *uiop;
3092 3084 off_t off = in->cd_offset;
3093 3085 size_t cur_len;
3094 3086 mblk_t *mp;
3095 3087
3096 3088 switch (in->cd_format) {
3097 3089 case CRYPTO_DATA_RAW:
3098 3090 if (count > in->cd_length) {
3099 3091 /*
3100 3092 * The caller specified a length greater than the
3101 3093 * size of the buffer.
3102 3094 */
3103 3095 return (CRYPTO_DATA_LEN_RANGE);
3104 3096 }
3105 3097 if (reverse)
3106 3098 dca_reverse(in->cd_raw.iov_base + off, dest, count,
3107 3099 count);
3108 3100 else
3109 3101 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3110 3102 in->cd_offset += count;
3111 3103 in->cd_length -= count;
3112 3104 break;
3113 3105
3114 3106 case CRYPTO_DATA_UIO:
3115 3107 /*
3116 3108 * Jump to the first iovec containing data to be processed.
3117 3109 */
3118 3110 uiop = in->cd_uio;
3119 3111 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3120 3112 off >= uiop->uio_iov[vec_idx].iov_len;
3121 3113 off -= uiop->uio_iov[vec_idx++].iov_len)
3122 3114 ;
3123 3115 if (vec_idx == uiop->uio_iovcnt) {
3124 3116 /*
3125 3117 * The caller specified an offset that is larger than
3126 3118 * the total size of the buffers it provided.
3127 3119 */
3128 3120 return (CRYPTO_DATA_LEN_RANGE);
3129 3121 }
3130 3122
3131 3123 /*
3132 3124 * Now process the iovecs.
3133 3125 */
3134 3126 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3135 3127 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3136 3128 off, count);
3137 3129 count -= cur_len;
3138 3130 if (reverse) {
3139 3131 /* Fill the dest buffer from the end */
3140 3132 dca_reverse(uiop->uio_iov[vec_idx].iov_base +
3141 3133 off, dest+count, cur_len, cur_len);
3142 3134 } else {
3143 3135 bcopy(uiop->uio_iov[vec_idx].iov_base + off,
3144 3136 dest, cur_len);
3145 3137 dest += cur_len;
3146 3138 }
3147 3139 in->cd_offset += cur_len;
3148 3140 in->cd_length -= cur_len;
3149 3141 vec_idx++;
3150 3142 off = 0;
3151 3143 }
3152 3144
3153 3145 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3154 3146 /*
3155 3147 * The end of the specified iovec's was reached but
3156 3148 * the length requested could not be processed
3157 3149 * (requested to digest more data than it provided).
3158 3150 */
3159 3151 return (CRYPTO_DATA_LEN_RANGE);
3160 3152 }
3161 3153 break;
3162 3154
3163 3155 case CRYPTO_DATA_MBLK:
3164 3156 /*
3165 3157 * Jump to the first mblk_t containing data to be processed.
3166 3158 */
3167 3159 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3168 3160 off -= MBLKL(mp), mp = mp->b_cont)
3169 3161 ;
3170 3162 if (mp == NULL) {
3171 3163 /*
3172 3164 * The caller specified an offset that is larger than
3173 3165 * the total size of the buffers it provided.
3174 3166 */
3175 3167 return (CRYPTO_DATA_LEN_RANGE);
3176 3168 }
3177 3169
3178 3170 /*
3179 3171 * Now do the processing on the mblk chain.
3180 3172 */
3181 3173 while (mp != NULL && count > 0) {
3182 3174 cur_len = min(MBLKL(mp) - off, count);
3183 3175 count -= cur_len;
3184 3176 if (reverse) {
3185 3177 /* Fill the dest buffer from the end */
3186 3178 dca_reverse((char *)(mp->b_rptr + off),
3187 3179 dest+count, cur_len, cur_len);
3188 3180 } else {
3189 3181 bcopy((char *)(mp->b_rptr + off), dest,
3190 3182 cur_len);
3191 3183 dest += cur_len;
3192 3184 }
3193 3185 in->cd_offset += cur_len;
3194 3186 in->cd_length -= cur_len;
3195 3187 mp = mp->b_cont;
3196 3188 off = 0;
3197 3189 }
3198 3190
3199 3191 if (mp == NULL && count > 0) {
3200 3192 /*
3201 3193 * The end of the mblk was reached but the length
3202 3194 * requested could not be processed, (requested to
3203 3195 * digest more data than it provided).
3204 3196 */
3205 3197 return (CRYPTO_DATA_LEN_RANGE);
3206 3198 }
3207 3199 break;
3208 3200
3209 3201 default:
3210 3202 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format");
3211 3203 rv = CRYPTO_ARGUMENTS_BAD;
3212 3204 }
3213 3205 return (rv);
3214 3206 }
3215 3207
3216 3208 /*
3217 3209 * Increments the cd_offset and decrements the cd_length as the data is
3218 3210 * gathered from the crypto_data_t struct.
3219 3211 */
3220 3212 int
3221 3213 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest,
3222 3214 int count)
3223 3215 {
3224 3216 int rv = CRYPTO_SUCCESS;
3225 3217 caddr_t baddr;
3226 3218 uint_t vec_idx;
3227 3219 uio_t *uiop;
3228 3220 off_t off = in->cd_offset;
3229 3221 size_t cur_len;
3230 3222 mblk_t *mp;
3231 3223
3232 3224 /* Process the residual first */
3233 3225 if (*residlen > 0) {
3234 3226 uint_t num = min(count, *residlen);
3235 3227 bcopy(resid, dest, num);
3236 3228 *residlen -= num;
3237 3229 if (*residlen > 0) {
3238 3230 /*
3239 3231 * Requested amount 'count' is less than what's in
3240 3232 * the residual, so shuffle any remaining resid to
3241 3233 * the front.
3242 3234 */
3243 3235 baddr = resid + num;
3244 3236 bcopy(baddr, resid, *residlen);
3245 3237 }
3246 3238 dest += num;
3247 3239 count -= num;
3248 3240 }
3249 3241
3250 3242 /* Now process what's in the crypto_data_t structs */
3251 3243 switch (in->cd_format) {
3252 3244 case CRYPTO_DATA_RAW:
3253 3245 if (count > in->cd_length) {
3254 3246 /*
3255 3247 * The caller specified a length greater than the
3256 3248 * size of the buffer.
3257 3249 */
3258 3250 return (CRYPTO_DATA_LEN_RANGE);
3259 3251 }
3260 3252 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3261 3253 in->cd_offset += count;
3262 3254 in->cd_length -= count;
3263 3255 break;
3264 3256
3265 3257 case CRYPTO_DATA_UIO:
3266 3258 /*
3267 3259 * Jump to the first iovec containing data to be processed.
3268 3260 */
3269 3261 uiop = in->cd_uio;
3270 3262 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3271 3263 off >= uiop->uio_iov[vec_idx].iov_len;
3272 3264 off -= uiop->uio_iov[vec_idx++].iov_len)
3273 3265 ;
3274 3266 if (vec_idx == uiop->uio_iovcnt) {
3275 3267 /*
3276 3268 * The caller specified an offset that is larger than
3277 3269 * the total size of the buffers it provided.
3278 3270 */
3279 3271 return (CRYPTO_DATA_LEN_RANGE);
3280 3272 }
3281 3273
3282 3274 /*
3283 3275 * Now process the iovecs.
3284 3276 */
3285 3277 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3286 3278 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3287 3279 off, count);
3288 3280 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
3289 3281 cur_len);
3290 3282 count -= cur_len;
3291 3283 dest += cur_len;
3292 3284 in->cd_offset += cur_len;
3293 3285 in->cd_length -= cur_len;
3294 3286 vec_idx++;
3295 3287 off = 0;
3296 3288 }
3297 3289
3298 3290 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3299 3291 /*
3300 3292 * The end of the specified iovec's was reached but
3301 3293 * the length requested could not be processed
3302 3294 * (requested to digest more data than it provided).
3303 3295 */
3304 3296 return (CRYPTO_DATA_LEN_RANGE);
3305 3297 }
3306 3298 break;
3307 3299
3308 3300 case CRYPTO_DATA_MBLK:
3309 3301 /*
3310 3302 * Jump to the first mblk_t containing data to be processed.
3311 3303 */
3312 3304 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3313 3305 off -= MBLKL(mp), mp = mp->b_cont)
3314 3306 ;
3315 3307 if (mp == NULL) {
3316 3308 /*
3317 3309 * The caller specified an offset that is larger than
3318 3310 * the total size of the buffers it provided.
3319 3311 */
3320 3312 return (CRYPTO_DATA_LEN_RANGE);
3321 3313 }
3322 3314
3323 3315 /*
3324 3316 * Now do the processing on the mblk chain.
3325 3317 */
3326 3318 while (mp != NULL && count > 0) {
3327 3319 cur_len = min(MBLKL(mp) - off, count);
3328 3320 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
3329 3321 count -= cur_len;
3330 3322 dest += cur_len;
3331 3323 in->cd_offset += cur_len;
3332 3324 in->cd_length -= cur_len;
3333 3325 mp = mp->b_cont;
3334 3326 off = 0;
3335 3327 }
3336 3328
3337 3329 if (mp == NULL && count > 0) {
3338 3330 /*
3339 3331 * The end of the mblk was reached but the length
3340 3332 * requested could not be processed, (requested to
3341 3333 * digest more data than it provided).
3342 3334 */
3343 3335 return (CRYPTO_DATA_LEN_RANGE);
3344 3336 }
3345 3337 break;
3346 3338
3347 3339 default:
3348 3340 DBG(NULL, DWARN,
3349 3341 "dca_resid_gather: unrecognised crypto data format");
3350 3342 rv = CRYPTO_ARGUMENTS_BAD;
3351 3343 }
3352 3344 return (rv);
3353 3345 }
3354 3346
3355 3347 /*
3356 3348 * Appends the data to the crypto_data_t struct increasing cd_length.
3357 3349 * cd_offset is left unchanged.
3358 3350 * Data is reverse-copied if the flag is TRUE.
3359 3351 */
3360 3352 int
3361 3353 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse)
3362 3354 {
3363 3355 int rv = CRYPTO_SUCCESS;
3364 3356 off_t offset = out->cd_offset + out->cd_length;
3365 3357 uint_t vec_idx;
3366 3358 uio_t *uiop;
3367 3359 size_t cur_len;
3368 3360 mblk_t *mp;
3369 3361
3370 3362 switch (out->cd_format) {
3371 3363 case CRYPTO_DATA_RAW:
3372 3364 if (out->cd_raw.iov_len - offset < count) {
3373 3365 /* Trying to write out more than space available. */
3374 3366 return (CRYPTO_DATA_LEN_RANGE);
3375 3367 }
3376 3368 if (reverse)
3377 3369 dca_reverse((void*) src, out->cd_raw.iov_base + offset,
3378 3370 count, count);
3379 3371 else
3380 3372 bcopy(src, out->cd_raw.iov_base + offset, count);
3381 3373 out->cd_length += count;
3382 3374 break;
3383 3375
3384 3376 case CRYPTO_DATA_UIO:
3385 3377 /*
3386 3378 * Jump to the first iovec that can be written to.
3387 3379 */
3388 3380 uiop = out->cd_uio;
3389 3381 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3390 3382 offset >= uiop->uio_iov[vec_idx].iov_len;
3391 3383 offset -= uiop->uio_iov[vec_idx++].iov_len)
3392 3384 ;
3393 3385 if (vec_idx == uiop->uio_iovcnt) {
3394 3386 /*
3395 3387 * The caller specified an offset that is larger than
3396 3388 * the total size of the buffers it provided.
3397 3389 */
3398 3390 return (CRYPTO_DATA_LEN_RANGE);
3399 3391 }
3400 3392
3401 3393 /*
3402 3394 * Now process the iovecs.
3403 3395 */
3404 3396 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3405 3397 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3406 3398 offset, count);
3407 3399 count -= cur_len;
3408 3400 if (reverse) {
3409 3401 dca_reverse((void*) (src+count),
3410 3402 uiop->uio_iov[vec_idx].iov_base +
3411 3403 offset, cur_len, cur_len);
3412 3404 } else {
3413 3405 bcopy(src, uiop->uio_iov[vec_idx].iov_base +
3414 3406 offset, cur_len);
3415 3407 src += cur_len;
3416 3408 }
3417 3409 out->cd_length += cur_len;
3418 3410 vec_idx++;
3419 3411 offset = 0;
3420 3412 }
3421 3413
3422 3414 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3423 3415 /*
3424 3416 * The end of the specified iovec's was reached but
3425 3417 * the length requested could not be processed
3426 3418 * (requested to write more data than space provided).
3427 3419 */
3428 3420 return (CRYPTO_DATA_LEN_RANGE);
3429 3421 }
3430 3422 break;
3431 3423
3432 3424 case CRYPTO_DATA_MBLK:
3433 3425 /*
3434 3426 * Jump to the first mblk_t that can be written to.
3435 3427 */
3436 3428 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp);
3437 3429 offset -= MBLKL(mp), mp = mp->b_cont)
3438 3430 ;
3439 3431 if (mp == NULL) {
3440 3432 /*
3441 3433 * The caller specified an offset that is larger than
3442 3434 * the total size of the buffers it provided.
3443 3435 */
3444 3436 return (CRYPTO_DATA_LEN_RANGE);
3445 3437 }
3446 3438
3447 3439 /*
3448 3440 * Now do the processing on the mblk chain.
3449 3441 */
3450 3442 while (mp != NULL && count > 0) {
3451 3443 cur_len = min(MBLKL(mp) - offset, count);
3452 3444 count -= cur_len;
3453 3445 if (reverse) {
3454 3446 dca_reverse((void*) (src+count),
3455 3447 (char *)(mp->b_rptr + offset), cur_len,
3456 3448 cur_len);
3457 3449 } else {
3458 3450 bcopy(src, (char *)(mp->b_rptr + offset),
3459 3451 cur_len);
3460 3452 src += cur_len;
3461 3453 }
3462 3454 out->cd_length += cur_len;
3463 3455 mp = mp->b_cont;
3464 3456 offset = 0;
3465 3457 }
3466 3458
3467 3459 if (mp == NULL && count > 0) {
3468 3460 /*
3469 3461 * The end of the mblk was reached but the length
3470 3462 * requested could not be processed, (requested to
3471 3463 * digest more data than it provided).
3472 3464 */
3473 3465 return (CRYPTO_DATA_LEN_RANGE);
3474 3466 }
3475 3467 break;
3476 3468
3477 3469 default:
3478 3470 DBG(NULL, DWARN, "unrecognised crypto data format");
3479 3471 rv = CRYPTO_ARGUMENTS_BAD;
3480 3472 }
3481 3473 return (rv);
3482 3474 }
3483 3475
3484 3476 /*
3485 3477 * Compare two byte arrays in reverse order.
3486 3478 * Return 0 if they are identical, 1 otherwise.
3487 3479 */
3488 3480 int
3489 3481 dca_bcmp_reverse(const void *s1, const void *s2, size_t n)
3490 3482 {
3491 3483 int i;
3492 3484 caddr_t src, dst;
3493 3485
3494 3486 if (!n)
3495 3487 return (0);
3496 3488
3497 3489 src = ((caddr_t)s1) + n - 1;
3498 3490 dst = (caddr_t)s2;
3499 3491 for (i = 0; i < n; i++) {
3500 3492 if (*src != *dst)
3501 3493 return (1);
3502 3494 src--;
3503 3495 dst++;
3504 3496 }
3505 3497
3506 3498 return (0);
3507 3499 }
3508 3500
3509 3501
3510 3502 /*
3511 3503 * This calculates the size of a bignum in bits, specifically not counting
3512 3504 * leading zero bits. This size calculation must be done *before* any
3513 3505 * endian reversal takes place (i.e. the numbers are in absolute big-endian
3514 3506 * order.)
3515 3507 */
3516 3508 int
3517 3509 dca_bitlen(unsigned char *bignum, int bytelen)
3518 3510 {
3519 3511 unsigned char msbyte;
3520 3512 int i, j;
3521 3513
3522 3514 for (i = 0; i < bytelen - 1; i++) {
3523 3515 if (bignum[i] != 0) {
3524 3516 break;
3525 3517 }
3526 3518 }
3527 3519 msbyte = bignum[i];
3528 3520 for (j = 8; j > 1; j--) {
3529 3521 if (msbyte & 0x80) {
3530 3522 break;
3531 3523 }
3532 3524 msbyte <<= 1;
3533 3525 }
3534 3526 return ((8 * (bytelen - i - 1)) + j);
3535 3527 }
3536 3528
3537 3529 /*
3538 3530 * This compares to bignums (in big-endian order). It ignores leading
3539 3531 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc.
3540 3532 */
3541 3533 int
3542 3534 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len)
3543 3535 {
3544 3536 while ((n1len > 1) && (*n1 == 0)) {
3545 3537 n1len--;
3546 3538 n1++;
3547 3539 }
3548 3540 while ((n2len > 1) && (*n2 == 0)) {
3549 3541 n2len--;
3550 3542 n2++;
3551 3543 }
3552 3544 if (n1len != n2len) {
3553 3545 return (n1len - n2len);
3554 3546 }
3555 3547 while ((n1len > 1) && (*n1 == *n2)) {
3556 3548 n1++;
3557 3549 n2++;
3558 3550 n1len--;
3559 3551 }
3560 3552 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2));
3561 3553 }
3562 3554
3563 3555 /*
3564 3556 * Return array of key attributes.
3565 3557 */
3566 3558 crypto_object_attribute_t *
3567 3559 dca_get_key_attr(crypto_key_t *key)
3568 3560 {
3569 3561 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) ||
3570 3562 (key->ck_count == 0)) {
3571 3563 return (NULL);
3572 3564 }
3573 3565
3574 3566 return (key->ck_attrs);
3575 3567 }
3576 3568
3577 3569 /*
3578 3570 * If attribute type exists valp points to it's 32-bit value.
3579 3571 */
3580 3572 int
3581 3573 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum,
3582 3574 uint64_t atype, uint32_t *valp)
3583 3575 {
3584 3576 crypto_object_attribute_t *bap;
3585 3577
3586 3578 bap = dca_find_attribute(attrp, atnum, atype);
3587 3579 if (bap == NULL) {
3588 3580 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3589 3581 }
3590 3582
3591 3583 *valp = *bap->oa_value;
3592 3584
3593 3585 return (CRYPTO_SUCCESS);
3594 3586 }
3595 3587
3596 3588 /*
3597 3589 * If attribute type exists data contains the start address of the value,
3598 3590 * and numelems contains it's length.
3599 3591 */
3600 3592 int
3601 3593 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum,
3602 3594 uint64_t atype, void **data, unsigned int *numelems)
3603 3595 {
3604 3596 crypto_object_attribute_t *bap;
3605 3597
3606 3598 bap = dca_find_attribute(attrp, atnum, atype);
3607 3599 if (bap == NULL) {
3608 3600 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3609 3601 }
3610 3602
3611 3603 *data = bap->oa_value;
3612 3604 *numelems = bap->oa_value_len;
3613 3605
3614 3606 return (CRYPTO_SUCCESS);
3615 3607 }
3616 3608
3617 3609 /*
3618 3610 * Finds entry of specified name. If it is not found dca_find_attribute returns
3619 3611 * NULL.
3620 3612 */
3621 3613 crypto_object_attribute_t *
3622 3614 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum,
3623 3615 uint64_t atype)
3624 3616 {
3625 3617 while (atnum) {
3626 3618 if (attrp->oa_type == atype)
3627 3619 return (attrp);
3628 3620 atnum--;
3629 3621 attrp++;
3630 3622 }
3631 3623 return (NULL);
3632 3624 }
3633 3625
3634 3626 /*
3635 3627 * Return the address of the first data buffer. If the data format is
3636 3628 * unrecognised return NULL.
3637 3629 */
3638 3630 caddr_t
3639 3631 dca_bufdaddr(crypto_data_t *data)
3640 3632 {
3641 3633 switch (data->cd_format) {
3642 3634 case CRYPTO_DATA_RAW:
3643 3635 return (data->cd_raw.iov_base + data->cd_offset);
3644 3636 case CRYPTO_DATA_UIO:
3645 3637 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset);
3646 3638 case CRYPTO_DATA_MBLK:
3647 3639 return ((char *)data->cd_mp->b_rptr + data->cd_offset);
3648 3640 default:
3649 3641 DBG(NULL, DWARN,
3650 3642 "dca_bufdaddr: unrecognised crypto data format");
3651 3643 return (NULL);
3652 3644 }
3653 3645 }
3654 3646
3655 3647 static caddr_t
3656 3648 dca_bufdaddr_out(crypto_data_t *data)
3657 3649 {
3658 3650 size_t offset = data->cd_offset + data->cd_length;
3659 3651
3660 3652 switch (data->cd_format) {
3661 3653 case CRYPTO_DATA_RAW:
3662 3654 return (data->cd_raw.iov_base + offset);
3663 3655 case CRYPTO_DATA_UIO:
3664 3656 return (data->cd_uio->uio_iov[0].iov_base + offset);
3665 3657 case CRYPTO_DATA_MBLK:
3666 3658 return ((char *)data->cd_mp->b_rptr + offset);
3667 3659 default:
3668 3660 DBG(NULL, DWARN,
3669 3661 "dca_bufdaddr_out: unrecognised crypto data format");
3670 3662 return (NULL);
3671 3663 }
3672 3664 }
3673 3665
3674 3666 /*
3675 3667 * Control entry points.
3676 3668 */
3677 3669
3678 3670 /* ARGSUSED */
3679 3671 static void
3680 3672 dca_provider_status(crypto_provider_handle_t provider, uint_t *status)
3681 3673 {
3682 3674 *status = CRYPTO_PROVIDER_READY;
3683 3675 }
3684 3676
3685 3677 /*
3686 3678 * Cipher (encrypt/decrypt) entry points.
3687 3679 */
3688 3680
3689 3681 /* ARGSUSED */
3690 3682 static int
3691 3683 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3692 3684 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3693 3685 crypto_req_handle_t req)
3694 3686 {
3695 3687 int error = CRYPTO_FAILED;
3696 3688 dca_t *softc;
3697 3689 /* LINTED E_FUNC_SET_NOT_USED */
3698 3690 int instance;
3699 3691
3700 3692 /* extract softc and instance number from context */
3701 3693 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3702 3694 DBG(softc, DENTRY, "dca_encrypt_init: started");
3703 3695
3704 3696 /* check mechanism */
3705 3697 switch (mechanism->cm_type) {
3706 3698 case DES_CBC_MECH_INFO_TYPE:
3707 3699 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3708 3700 DR_ENCRYPT);
3709 3701 break;
3710 3702 case DES3_CBC_MECH_INFO_TYPE:
3711 3703 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3712 3704 DR_ENCRYPT | DR_TRIPLE);
3713 3705 break;
3714 3706 case RSA_PKCS_MECH_INFO_TYPE:
3715 3707 case RSA_X_509_MECH_INFO_TYPE:
3716 3708 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3717 3709 break;
3718 3710 default:
3719 3711 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type "
3720 3712 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3721 3713 error = CRYPTO_MECHANISM_INVALID;
3722 3714 }
3723 3715
3724 3716 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error);
3725 3717
3726 3718 if (error == CRYPTO_SUCCESS)
3727 3719 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3728 3720 &softc->dca_ctx_list_lock);
3729 3721
3730 3722 return (error);
3731 3723 }
3732 3724
3733 3725 /* ARGSUSED */
3734 3726 static int
3735 3727 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3736 3728 crypto_data_t *ciphertext, crypto_req_handle_t req)
3737 3729 {
3738 3730 int error = CRYPTO_FAILED;
3739 3731 dca_t *softc;
3740 3732 /* LINTED E_FUNC_SET_NOT_USED */
3741 3733 int instance;
3742 3734
3743 3735 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3744 3736 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3745 3737
3746 3738 /* extract softc and instance number from context */
3747 3739 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3748 3740 DBG(softc, DENTRY, "dca_encrypt: started");
3749 3741
3750 3742 /* handle inplace ops */
3751 3743 if (!ciphertext) {
3752 3744 dca_request_t *reqp = ctx->cc_provider_private;
3753 3745 reqp->dr_flags |= DR_INPLACE;
3754 3746 ciphertext = plaintext;
3755 3747 }
3756 3748
3757 3749 /* check mechanism */
3758 3750 switch (DCA_MECH_FROM_CTX(ctx)) {
3759 3751 case DES_CBC_MECH_INFO_TYPE:
3760 3752 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT);
3761 3753 break;
3762 3754 case DES3_CBC_MECH_INFO_TYPE:
3763 3755 error = dca_3des(ctx, plaintext, ciphertext, req,
3764 3756 DR_ENCRYPT | DR_TRIPLE);
3765 3757 break;
3766 3758 case RSA_PKCS_MECH_INFO_TYPE:
3767 3759 case RSA_X_509_MECH_INFO_TYPE:
3768 3760 error = dca_rsastart(ctx, plaintext, ciphertext, req,
3769 3761 DCA_RSA_ENC);
3770 3762 break;
3771 3763 default:
3772 3764 /* Should never reach here */
3773 3765 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type "
3774 3766 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3775 3767 error = CRYPTO_MECHANISM_INVALID;
3776 3768 }
3777 3769
3778 3770 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3779 3771 (error != CRYPTO_BUFFER_TOO_SMALL)) {
3780 3772 ciphertext->cd_length = 0;
3781 3773 }
3782 3774
3783 3775 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error);
3784 3776
3785 3777 return (error);
3786 3778 }
3787 3779
3788 3780 /* ARGSUSED */
3789 3781 static int
3790 3782 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3791 3783 crypto_data_t *ciphertext, crypto_req_handle_t req)
3792 3784 {
3793 3785 int error = CRYPTO_FAILED;
3794 3786 dca_t *softc;
3795 3787 /* LINTED E_FUNC_SET_NOT_USED */
3796 3788 int instance;
3797 3789
3798 3790 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3799 3791 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3800 3792
3801 3793 /* extract softc and instance number from context */
3802 3794 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3803 3795 DBG(softc, DENTRY, "dca_encrypt_update: started");
3804 3796
3805 3797 /* handle inplace ops */
3806 3798 if (!ciphertext) {
3807 3799 dca_request_t *reqp = ctx->cc_provider_private;
3808 3800 reqp->dr_flags |= DR_INPLACE;
3809 3801 ciphertext = plaintext;
3810 3802 }
3811 3803
3812 3804 /* check mechanism */
3813 3805 switch (DCA_MECH_FROM_CTX(ctx)) {
3814 3806 case DES_CBC_MECH_INFO_TYPE:
3815 3807 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3816 3808 DR_ENCRYPT);
3817 3809 break;
3818 3810 case DES3_CBC_MECH_INFO_TYPE:
3819 3811 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3820 3812 DR_ENCRYPT | DR_TRIPLE);
3821 3813 break;
3822 3814 default:
3823 3815 /* Should never reach here */
3824 3816 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type "
3825 3817 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3826 3818 error = CRYPTO_MECHANISM_INVALID;
3827 3819 }
3828 3820
3829 3821 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error);
3830 3822
3831 3823 return (error);
3832 3824 }
3833 3825
3834 3826 /* ARGSUSED */
3835 3827 static int
3836 3828 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3837 3829 crypto_req_handle_t req)
3838 3830 {
3839 3831 int error = CRYPTO_FAILED;
3840 3832 dca_t *softc;
3841 3833 /* LINTED E_FUNC_SET_NOT_USED */
3842 3834 int instance;
3843 3835
3844 3836 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3845 3837 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3846 3838
3847 3839 /* extract softc and instance number from context */
3848 3840 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3849 3841 DBG(softc, DENTRY, "dca_encrypt_final: started");
3850 3842
3851 3843 /* check mechanism */
3852 3844 switch (DCA_MECH_FROM_CTX(ctx)) {
3853 3845 case DES_CBC_MECH_INFO_TYPE:
3854 3846 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT);
3855 3847 break;
3856 3848 case DES3_CBC_MECH_INFO_TYPE:
3857 3849 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE);
3858 3850 break;
3859 3851 default:
3860 3852 /* Should never reach here */
3861 3853 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type "
3862 3854 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3863 3855 error = CRYPTO_MECHANISM_INVALID;
3864 3856 }
3865 3857
3866 3858 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error);
3867 3859
3868 3860 return (error);
3869 3861 }
3870 3862
3871 3863 /* ARGSUSED */
3872 3864 static int
3873 3865 dca_encrypt_atomic(crypto_provider_handle_t provider,
3874 3866 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
3875 3867 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
3876 3868 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
3877 3869 {
3878 3870 int error = CRYPTO_FAILED;
3879 3871 dca_t *softc = (dca_t *)provider;
3880 3872
3881 3873 DBG(softc, DENTRY, "dca_encrypt_atomic: started");
3882 3874
3883 3875 if (ctx_template != NULL)
3884 3876 return (CRYPTO_ARGUMENTS_BAD);
3885 3877
3886 3878 /* handle inplace ops */
3887 3879 if (!ciphertext) {
3888 3880 ciphertext = plaintext;
3889 3881 }
3890 3882
3891 3883 /* check mechanism */
3892 3884 switch (mechanism->cm_type) {
3893 3885 case DES_CBC_MECH_INFO_TYPE:
3894 3886 error = dca_3desatomic(provider, session_id, mechanism, key,
3895 3887 plaintext, ciphertext, KM_SLEEP, req,
3896 3888 DR_ENCRYPT | DR_ATOMIC);
3897 3889 break;
3898 3890 case DES3_CBC_MECH_INFO_TYPE:
3899 3891 error = dca_3desatomic(provider, session_id, mechanism, key,
3900 3892 plaintext, ciphertext, KM_SLEEP, req,
3901 3893 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC);
3902 3894 break;
3903 3895 case RSA_PKCS_MECH_INFO_TYPE:
3904 3896 case RSA_X_509_MECH_INFO_TYPE:
3905 3897 error = dca_rsaatomic(provider, session_id, mechanism, key,
3906 3898 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC);
3907 3899 break;
3908 3900 default:
3909 3901 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type "
3910 3902 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3911 3903 error = CRYPTO_MECHANISM_INVALID;
3912 3904 }
3913 3905
3914 3906 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
3915 3907 ciphertext->cd_length = 0;
3916 3908 }
3917 3909
3918 3910 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error);
3919 3911
3920 3912 return (error);
3921 3913 }
3922 3914
3923 3915 /* ARGSUSED */
3924 3916 static int
3925 3917 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3926 3918 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3927 3919 crypto_req_handle_t req)
3928 3920 {
3929 3921 int error = CRYPTO_FAILED;
3930 3922 dca_t *softc;
3931 3923 /* LINTED E_FUNC_SET_NOT_USED */
3932 3924 int instance;
3933 3925
3934 3926 /* extract softc and instance number from context */
3935 3927 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3936 3928 DBG(softc, DENTRY, "dca_decrypt_init: started");
3937 3929
3938 3930 /* check mechanism */
3939 3931 switch (mechanism->cm_type) {
3940 3932 case DES_CBC_MECH_INFO_TYPE:
3941 3933 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3942 3934 DR_DECRYPT);
3943 3935 break;
3944 3936 case DES3_CBC_MECH_INFO_TYPE:
3945 3937 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3946 3938 DR_DECRYPT | DR_TRIPLE);
3947 3939 break;
3948 3940 case RSA_PKCS_MECH_INFO_TYPE:
3949 3941 case RSA_X_509_MECH_INFO_TYPE:
3950 3942 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3951 3943 break;
3952 3944 default:
3953 3945 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type "
3954 3946 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3955 3947 error = CRYPTO_MECHANISM_INVALID;
3956 3948 }
3957 3949
3958 3950 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error);
3959 3951
3960 3952 if (error == CRYPTO_SUCCESS)
3961 3953 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3962 3954 &softc->dca_ctx_list_lock);
3963 3955
3964 3956 return (error);
3965 3957 }
3966 3958
3967 3959 /* ARGSUSED */
3968 3960 static int
3969 3961 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3970 3962 crypto_data_t *plaintext, crypto_req_handle_t req)
3971 3963 {
3972 3964 int error = CRYPTO_FAILED;
3973 3965 dca_t *softc;
3974 3966 /* LINTED E_FUNC_SET_NOT_USED */
3975 3967 int instance;
3976 3968
3977 3969 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3978 3970 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3979 3971
3980 3972 /* extract softc and instance number from context */
3981 3973 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3982 3974 DBG(softc, DENTRY, "dca_decrypt: started");
3983 3975
3984 3976 /* handle inplace ops */
3985 3977 if (!plaintext) {
3986 3978 dca_request_t *reqp = ctx->cc_provider_private;
3987 3979 reqp->dr_flags |= DR_INPLACE;
3988 3980 plaintext = ciphertext;
3989 3981 }
3990 3982
3991 3983 /* check mechanism */
3992 3984 switch (DCA_MECH_FROM_CTX(ctx)) {
3993 3985 case DES_CBC_MECH_INFO_TYPE:
3994 3986 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT);
3995 3987 break;
3996 3988 case DES3_CBC_MECH_INFO_TYPE:
3997 3989 error = dca_3des(ctx, ciphertext, plaintext, req,
3998 3990 DR_DECRYPT | DR_TRIPLE);
3999 3991 break;
4000 3992 case RSA_PKCS_MECH_INFO_TYPE:
4001 3993 case RSA_X_509_MECH_INFO_TYPE:
4002 3994 error = dca_rsastart(ctx, ciphertext, plaintext, req,
4003 3995 DCA_RSA_DEC);
4004 3996 break;
4005 3997 default:
4006 3998 /* Should never reach here */
4007 3999 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type "
4008 4000 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4009 4001 error = CRYPTO_MECHANISM_INVALID;
4010 4002 }
4011 4003
4012 4004 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
4013 4005 (error != CRYPTO_BUFFER_TOO_SMALL)) {
4014 4006 if (plaintext)
4015 4007 plaintext->cd_length = 0;
4016 4008 }
4017 4009
4018 4010 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error);
4019 4011
4020 4012 return (error);
4021 4013 }
4022 4014
4023 4015 /* ARGSUSED */
4024 4016 static int
4025 4017 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
4026 4018 crypto_data_t *plaintext, crypto_req_handle_t req)
4027 4019 {
4028 4020 int error = CRYPTO_FAILED;
4029 4021 dca_t *softc;
4030 4022 /* LINTED E_FUNC_SET_NOT_USED */
4031 4023 int instance;
4032 4024
4033 4025 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4034 4026 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4035 4027
4036 4028 /* extract softc and instance number from context */
4037 4029 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4038 4030 DBG(softc, DENTRY, "dca_decrypt_update: started");
4039 4031
4040 4032 /* handle inplace ops */
4041 4033 if (!plaintext) {
4042 4034 dca_request_t *reqp = ctx->cc_provider_private;
4043 4035 reqp->dr_flags |= DR_INPLACE;
4044 4036 plaintext = ciphertext;
4045 4037 }
4046 4038
4047 4039 /* check mechanism */
4048 4040 switch (DCA_MECH_FROM_CTX(ctx)) {
4049 4041 case DES_CBC_MECH_INFO_TYPE:
4050 4042 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4051 4043 DR_DECRYPT);
4052 4044 break;
4053 4045 case DES3_CBC_MECH_INFO_TYPE:
4054 4046 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4055 4047 DR_DECRYPT | DR_TRIPLE);
4056 4048 break;
4057 4049 default:
4058 4050 /* Should never reach here */
4059 4051 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type "
4060 4052 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4061 4053 error = CRYPTO_MECHANISM_INVALID;
4062 4054 }
4063 4055
4064 4056 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error);
4065 4057
4066 4058 return (error);
4067 4059 }
4068 4060
4069 4061 /* ARGSUSED */
4070 4062 static int
4071 4063 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext,
4072 4064 crypto_req_handle_t req)
4073 4065 {
4074 4066 int error = CRYPTO_FAILED;
4075 4067 dca_t *softc;
4076 4068 /* LINTED E_FUNC_SET_NOT_USED */
4077 4069 int instance;
4078 4070
4079 4071 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4080 4072 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4081 4073
4082 4074 /* extract softc and instance number from context */
4083 4075 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4084 4076 DBG(softc, DENTRY, "dca_decrypt_final: started");
4085 4077
4086 4078 /* check mechanism */
4087 4079 switch (DCA_MECH_FROM_CTX(ctx)) {
4088 4080 case DES_CBC_MECH_INFO_TYPE:
4089 4081 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT);
4090 4082 break;
4091 4083 case DES3_CBC_MECH_INFO_TYPE:
4092 4084 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE);
4093 4085 break;
4094 4086 default:
4095 4087 /* Should never reach here */
4096 4088 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type "
4097 4089 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4098 4090 error = CRYPTO_MECHANISM_INVALID;
4099 4091 }
4100 4092
4101 4093 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error);
4102 4094
4103 4095 return (error);
4104 4096 }
4105 4097
4106 4098 /* ARGSUSED */
4107 4099 static int
4108 4100 dca_decrypt_atomic(crypto_provider_handle_t provider,
4109 4101 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4110 4102 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
4111 4103 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4112 4104 {
4113 4105 int error = CRYPTO_FAILED;
4114 4106 dca_t *softc = (dca_t *)provider;
4115 4107
4116 4108 DBG(softc, DENTRY, "dca_decrypt_atomic: started");
4117 4109
4118 4110 if (ctx_template != NULL)
4119 4111 return (CRYPTO_ARGUMENTS_BAD);
4120 4112
4121 4113 /* handle inplace ops */
4122 4114 if (!plaintext) {
4123 4115 plaintext = ciphertext;
4124 4116 }
4125 4117
4126 4118 /* check mechanism */
4127 4119 switch (mechanism->cm_type) {
4128 4120 case DES_CBC_MECH_INFO_TYPE:
4129 4121 error = dca_3desatomic(provider, session_id, mechanism, key,
4130 4122 ciphertext, plaintext, KM_SLEEP, req,
4131 4123 DR_DECRYPT | DR_ATOMIC);
4132 4124 break;
4133 4125 case DES3_CBC_MECH_INFO_TYPE:
4134 4126 error = dca_3desatomic(provider, session_id, mechanism, key,
4135 4127 ciphertext, plaintext, KM_SLEEP, req,
4136 4128 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC);
4137 4129 break;
4138 4130 case RSA_PKCS_MECH_INFO_TYPE:
4139 4131 case RSA_X_509_MECH_INFO_TYPE:
4140 4132 error = dca_rsaatomic(provider, session_id, mechanism, key,
4141 4133 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC);
4142 4134 break;
4143 4135 default:
4144 4136 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type "
4145 4137 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4146 4138 error = CRYPTO_MECHANISM_INVALID;
4147 4139 }
4148 4140
4149 4141 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
4150 4142 plaintext->cd_length = 0;
4151 4143 }
4152 4144
4153 4145 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error);
4154 4146
4155 4147 return (error);
4156 4148 }
4157 4149
4158 4150 /*
4159 4151 * Sign entry points.
4160 4152 */
4161 4153
4162 4154 /* ARGSUSED */
4163 4155 static int
4164 4156 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4165 4157 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4166 4158 crypto_req_handle_t req)
4167 4159 {
4168 4160 int error = CRYPTO_FAILED;
4169 4161 dca_t *softc;
4170 4162 /* LINTED E_FUNC_SET_NOT_USED */
4171 4163 int instance;
4172 4164
4173 4165 /* extract softc and instance number from context */
4174 4166 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4175 4167 DBG(softc, DENTRY, "dca_sign_init: started\n");
4176 4168
4177 4169 if (ctx_template != NULL)
4178 4170 return (CRYPTO_ARGUMENTS_BAD);
4179 4171
4180 4172 /* check mechanism */
4181 4173 switch (mechanism->cm_type) {
4182 4174 case RSA_PKCS_MECH_INFO_TYPE:
4183 4175 case RSA_X_509_MECH_INFO_TYPE:
4184 4176 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4185 4177 break;
4186 4178 case DSA_MECH_INFO_TYPE:
4187 4179 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4188 4180 DCA_DSA_SIGN);
4189 4181 break;
4190 4182 default:
4191 4183 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type "
4192 4184 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4193 4185 error = CRYPTO_MECHANISM_INVALID;
4194 4186 }
4195 4187
4196 4188 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error);
4197 4189
4198 4190 if (error == CRYPTO_SUCCESS)
4199 4191 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4200 4192 &softc->dca_ctx_list_lock);
4201 4193
4202 4194 return (error);
4203 4195 }
4204 4196
4205 4197 static int
4206 4198 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data,
4207 4199 crypto_data_t *signature, crypto_req_handle_t req)
4208 4200 {
4209 4201 int error = CRYPTO_FAILED;
4210 4202 dca_t *softc;
4211 4203 /* LINTED E_FUNC_SET_NOT_USED */
4212 4204 int instance;
4213 4205
4214 4206 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4215 4207 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4216 4208
4217 4209 /* extract softc and instance number from context */
4218 4210 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4219 4211 DBG(softc, DENTRY, "dca_sign: started\n");
4220 4212
4221 4213 /* check mechanism */
4222 4214 switch (DCA_MECH_FROM_CTX(ctx)) {
4223 4215 case RSA_PKCS_MECH_INFO_TYPE:
4224 4216 case RSA_X_509_MECH_INFO_TYPE:
4225 4217 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN);
4226 4218 break;
4227 4219 case DSA_MECH_INFO_TYPE:
4228 4220 error = dca_dsa_sign(ctx, data, signature, req);
4229 4221 break;
4230 4222 default:
4231 4223 cmn_err(CE_WARN, "dca_sign: unexpected mech type "
4232 4224 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4233 4225 error = CRYPTO_MECHANISM_INVALID;
4234 4226 }
4235 4227
4236 4228 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error);
4237 4229
4238 4230 return (error);
4239 4231 }
4240 4232
4241 4233 /* ARGSUSED */
4242 4234 static int
4243 4235 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data,
4244 4236 crypto_req_handle_t req)
4245 4237 {
4246 4238 int error = CRYPTO_MECHANISM_INVALID;
4247 4239 dca_t *softc;
4248 4240 /* LINTED E_FUNC_SET_NOT_USED */
4249 4241 int instance;
4250 4242
4251 4243 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4252 4244 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4253 4245
4254 4246 /* extract softc and instance number from context */
4255 4247 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4256 4248 DBG(softc, DENTRY, "dca_sign_update: started\n");
4257 4249
4258 4250 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type "
4259 4251 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4260 4252
4261 4253 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error);
4262 4254
4263 4255 return (error);
4264 4256 }
4265 4257
4266 4258 /* ARGSUSED */
4267 4259 static int
4268 4260 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4269 4261 crypto_req_handle_t req)
4270 4262 {
4271 4263 int error = CRYPTO_MECHANISM_INVALID;
4272 4264 dca_t *softc;
4273 4265 /* LINTED E_FUNC_SET_NOT_USED */
4274 4266 int instance;
4275 4267
4276 4268 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4277 4269 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4278 4270
4279 4271 /* extract softc and instance number from context */
4280 4272 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4281 4273 DBG(softc, DENTRY, "dca_sign_final: started\n");
4282 4274
4283 4275 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type "
4284 4276 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4285 4277
4286 4278 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error);
4287 4279
4288 4280 return (error);
4289 4281 }
4290 4282
4291 4283 static int
4292 4284 dca_sign_atomic(crypto_provider_handle_t provider,
4293 4285 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4294 4286 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4295 4287 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4296 4288 {
4297 4289 int error = CRYPTO_FAILED;
4298 4290 dca_t *softc = (dca_t *)provider;
4299 4291
4300 4292 DBG(softc, DENTRY, "dca_sign_atomic: started\n");
4301 4293
4302 4294 if (ctx_template != NULL)
4303 4295 return (CRYPTO_ARGUMENTS_BAD);
4304 4296
4305 4297 /* check mechanism */
4306 4298 switch (mechanism->cm_type) {
4307 4299 case RSA_PKCS_MECH_INFO_TYPE:
4308 4300 case RSA_X_509_MECH_INFO_TYPE:
4309 4301 error = dca_rsaatomic(provider, session_id, mechanism, key,
4310 4302 data, signature, KM_SLEEP, req, DCA_RSA_SIGN);
4311 4303 break;
4312 4304 case DSA_MECH_INFO_TYPE:
4313 4305 error = dca_dsaatomic(provider, session_id, mechanism, key,
4314 4306 data, signature, KM_SLEEP, req, DCA_DSA_SIGN);
4315 4307 break;
4316 4308 default:
4317 4309 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type "
4318 4310 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4319 4311 error = CRYPTO_MECHANISM_INVALID;
4320 4312 }
4321 4313
4322 4314 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error);
4323 4315
4324 4316 return (error);
4325 4317 }
4326 4318
4327 4319 /* ARGSUSED */
4328 4320 static int
4329 4321 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4330 4322 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4331 4323 crypto_req_handle_t req)
4332 4324 {
4333 4325 int error = CRYPTO_FAILED;
4334 4326 dca_t *softc;
4335 4327 /* LINTED E_FUNC_SET_NOT_USED */
4336 4328 int instance;
4337 4329
4338 4330 /* extract softc and instance number from context */
4339 4331 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4340 4332 DBG(softc, DENTRY, "dca_sign_recover_init: started\n");
4341 4333
4342 4334 if (ctx_template != NULL)
4343 4335 return (CRYPTO_ARGUMENTS_BAD);
4344 4336
4345 4337 /* check mechanism */
4346 4338 switch (mechanism->cm_type) {
4347 4339 case RSA_PKCS_MECH_INFO_TYPE:
4348 4340 case RSA_X_509_MECH_INFO_TYPE:
4349 4341 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4350 4342 break;
4351 4343 default:
4352 4344 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type "
4353 4345 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4354 4346 error = CRYPTO_MECHANISM_INVALID;
4355 4347 }
4356 4348
4357 4349 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error);
4358 4350
4359 4351 if (error == CRYPTO_SUCCESS)
4360 4352 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4361 4353 &softc->dca_ctx_list_lock);
4362 4354
4363 4355 return (error);
4364 4356 }
4365 4357
4366 4358 static int
4367 4359 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data,
4368 4360 crypto_data_t *signature, crypto_req_handle_t req)
4369 4361 {
4370 4362 int error = CRYPTO_FAILED;
4371 4363 dca_t *softc;
4372 4364 /* LINTED E_FUNC_SET_NOT_USED */
4373 4365 int instance;
4374 4366
4375 4367 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4376 4368 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4377 4369
4378 4370 /* extract softc and instance number from context */
4379 4371 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4380 4372 DBG(softc, DENTRY, "dca_sign_recover: started\n");
4381 4373
4382 4374 /* check mechanism */
4383 4375 switch (DCA_MECH_FROM_CTX(ctx)) {
4384 4376 case RSA_PKCS_MECH_INFO_TYPE:
4385 4377 case RSA_X_509_MECH_INFO_TYPE:
4386 4378 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR);
4387 4379 break;
4388 4380 default:
4389 4381 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type "
4390 4382 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4391 4383 error = CRYPTO_MECHANISM_INVALID;
4392 4384 }
4393 4385
4394 4386 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error);
4395 4387
4396 4388 return (error);
4397 4389 }
4398 4390
4399 4391 static int
4400 4392 dca_sign_recover_atomic(crypto_provider_handle_t provider,
4401 4393 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4402 4394 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4403 4395 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4404 4396 {
4405 4397 int error = CRYPTO_FAILED;
4406 4398 dca_t *softc = (dca_t *)provider;
4407 4399 /* LINTED E_FUNC_SET_NOT_USED */
4408 4400 int instance;
4409 4401
4410 4402 instance = ddi_get_instance(softc->dca_dip);
4411 4403 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n");
4412 4404
4413 4405 if (ctx_template != NULL)
4414 4406 return (CRYPTO_ARGUMENTS_BAD);
4415 4407
4416 4408 /* check mechanism */
4417 4409 switch (mechanism->cm_type) {
4418 4410 case RSA_PKCS_MECH_INFO_TYPE:
4419 4411 case RSA_X_509_MECH_INFO_TYPE:
4420 4412 error = dca_rsaatomic(provider, session_id, mechanism, key,
4421 4413 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR);
4422 4414 break;
4423 4415 default:
4424 4416 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type"
4425 4417 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4426 4418 error = CRYPTO_MECHANISM_INVALID;
4427 4419 }
4428 4420
4429 4421 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error);
4430 4422
4431 4423 return (error);
4432 4424 }
4433 4425
4434 4426 /*
4435 4427 * Verify entry points.
4436 4428 */
4437 4429
4438 4430 /* ARGSUSED */
4439 4431 static int
4440 4432 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4441 4433 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4442 4434 crypto_req_handle_t req)
4443 4435 {
4444 4436 int error = CRYPTO_FAILED;
4445 4437 dca_t *softc;
4446 4438 /* LINTED E_FUNC_SET_NOT_USED */
4447 4439 int instance;
4448 4440
4449 4441 /* extract softc and instance number from context */
4450 4442 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4451 4443 DBG(softc, DENTRY, "dca_verify_init: started\n");
4452 4444
4453 4445 if (ctx_template != NULL)
4454 4446 return (CRYPTO_ARGUMENTS_BAD);
4455 4447
4456 4448 /* check mechanism */
4457 4449 switch (mechanism->cm_type) {
4458 4450 case RSA_PKCS_MECH_INFO_TYPE:
4459 4451 case RSA_X_509_MECH_INFO_TYPE:
4460 4452 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4461 4453 break;
4462 4454 case DSA_MECH_INFO_TYPE:
4463 4455 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4464 4456 DCA_DSA_VRFY);
4465 4457 break;
4466 4458 default:
4467 4459 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type "
4468 4460 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4469 4461 error = CRYPTO_MECHANISM_INVALID;
4470 4462 }
4471 4463
4472 4464 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error);
4473 4465
4474 4466 if (error == CRYPTO_SUCCESS)
4475 4467 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4476 4468 &softc->dca_ctx_list_lock);
4477 4469
4478 4470 return (error);
4479 4471 }
4480 4472
4481 4473 static int
4482 4474 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature,
4483 4475 crypto_req_handle_t req)
4484 4476 {
4485 4477 int error = CRYPTO_FAILED;
4486 4478 dca_t *softc;
4487 4479 /* LINTED E_FUNC_SET_NOT_USED */
4488 4480 int instance;
4489 4481
4490 4482 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4491 4483 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4492 4484
4493 4485 /* extract softc and instance number from context */
4494 4486 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4495 4487 DBG(softc, DENTRY, "dca_verify: started\n");
4496 4488
4497 4489 /* check mechanism */
4498 4490 switch (DCA_MECH_FROM_CTX(ctx)) {
4499 4491 case RSA_PKCS_MECH_INFO_TYPE:
4500 4492 case RSA_X_509_MECH_INFO_TYPE:
4501 4493 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY);
4502 4494 break;
4503 4495 case DSA_MECH_INFO_TYPE:
4504 4496 error = dca_dsa_verify(ctx, data, signature, req);
4505 4497 break;
4506 4498 default:
4507 4499 cmn_err(CE_WARN, "dca_verify: unexpected mech type "
4508 4500 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4509 4501 error = CRYPTO_MECHANISM_INVALID;
4510 4502 }
4511 4503
4512 4504 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error);
4513 4505
4514 4506 return (error);
4515 4507 }
4516 4508
4517 4509 /* ARGSUSED */
4518 4510 static int
4519 4511 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data,
4520 4512 crypto_req_handle_t req)
4521 4513 {
4522 4514 int error = CRYPTO_MECHANISM_INVALID;
4523 4515 dca_t *softc;
4524 4516 /* LINTED E_FUNC_SET_NOT_USED */
4525 4517 int instance;
4526 4518
4527 4519 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4528 4520 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4529 4521
4530 4522 /* extract softc and instance number from context */
4531 4523 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4532 4524 DBG(softc, DENTRY, "dca_verify_update: started\n");
4533 4525
4534 4526 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type "
4535 4527 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4536 4528
4537 4529 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error);
4538 4530
4539 4531 return (error);
4540 4532 }
4541 4533
4542 4534 /* ARGSUSED */
4543 4535 static int
4544 4536 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4545 4537 crypto_req_handle_t req)
4546 4538 {
4547 4539 int error = CRYPTO_MECHANISM_INVALID;
4548 4540 dca_t *softc;
4549 4541 /* LINTED E_FUNC_SET_NOT_USED */
4550 4542 int instance;
4551 4543
4552 4544 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4553 4545 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4554 4546
4555 4547 /* extract softc and instance number from context */
4556 4548 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4557 4549 DBG(softc, DENTRY, "dca_verify_final: started\n");
4558 4550
4559 4551 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type "
4560 4552 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4561 4553
4562 4554 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error);
4563 4555
4564 4556 return (error);
4565 4557 }
4566 4558
4567 4559 static int
4568 4560 dca_verify_atomic(crypto_provider_handle_t provider,
4569 4561 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4570 4562 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4571 4563 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4572 4564 {
4573 4565 int error = CRYPTO_FAILED;
4574 4566 dca_t *softc = (dca_t *)provider;
4575 4567
4576 4568 DBG(softc, DENTRY, "dca_verify_atomic: started\n");
4577 4569
4578 4570 if (ctx_template != NULL)
4579 4571 return (CRYPTO_ARGUMENTS_BAD);
4580 4572
4581 4573 /* check mechanism */
4582 4574 switch (mechanism->cm_type) {
4583 4575 case RSA_PKCS_MECH_INFO_TYPE:
4584 4576 case RSA_X_509_MECH_INFO_TYPE:
4585 4577 error = dca_rsaatomic(provider, session_id, mechanism, key,
4586 4578 signature, data, KM_SLEEP, req, DCA_RSA_VRFY);
4587 4579 break;
4588 4580 case DSA_MECH_INFO_TYPE:
4589 4581 error = dca_dsaatomic(provider, session_id, mechanism, key,
4590 4582 data, signature, KM_SLEEP, req, DCA_DSA_VRFY);
4591 4583 break;
4592 4584 default:
4593 4585 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type "
4594 4586 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4595 4587 error = CRYPTO_MECHANISM_INVALID;
4596 4588 }
4597 4589
4598 4590 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error);
4599 4591
4600 4592 return (error);
4601 4593 }
4602 4594
4603 4595 /* ARGSUSED */
4604 4596 static int
4605 4597 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4606 4598 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4607 4599 crypto_req_handle_t req)
4608 4600 {
4609 4601 int error = CRYPTO_MECHANISM_INVALID;
4610 4602 dca_t *softc;
4611 4603 /* LINTED E_FUNC_SET_NOT_USED */
4612 4604 int instance;
4613 4605
4614 4606 /* extract softc and instance number from context */
4615 4607 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4616 4608 DBG(softc, DENTRY, "dca_verify_recover_init: started\n");
4617 4609
4618 4610 if (ctx_template != NULL)
4619 4611 return (CRYPTO_ARGUMENTS_BAD);
4620 4612
4621 4613 /* check mechanism */
4622 4614 switch (mechanism->cm_type) {
4623 4615 case RSA_PKCS_MECH_INFO_TYPE:
4624 4616 case RSA_X_509_MECH_INFO_TYPE:
4625 4617 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4626 4618 break;
4627 4619 default:
4628 4620 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type"
4629 4621 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4630 4622 }
4631 4623
4632 4624 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error);
4633 4625
4634 4626 if (error == CRYPTO_SUCCESS)
4635 4627 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4636 4628 &softc->dca_ctx_list_lock);
4637 4629
4638 4630 return (error);
4639 4631 }
4640 4632
4641 4633 static int
4642 4634 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature,
4643 4635 crypto_data_t *data, crypto_req_handle_t req)
4644 4636 {
4645 4637 int error = CRYPTO_MECHANISM_INVALID;
4646 4638 dca_t *softc;
4647 4639 /* LINTED E_FUNC_SET_NOT_USED */
4648 4640 int instance;
4649 4641
4650 4642 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4651 4643 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4652 4644
4653 4645 /* extract softc and instance number from context */
4654 4646 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4655 4647 DBG(softc, DENTRY, "dca_verify_recover: started\n");
4656 4648
4657 4649 /* check mechanism */
4658 4650 switch (DCA_MECH_FROM_CTX(ctx)) {
4659 4651 case RSA_PKCS_MECH_INFO_TYPE:
4660 4652 case RSA_X_509_MECH_INFO_TYPE:
4661 4653 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR);
4662 4654 break;
4663 4655 default:
4664 4656 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type "
4665 4657 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4666 4658 }
4667 4659
4668 4660 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error);
4669 4661
4670 4662 return (error);
4671 4663 }
4672 4664
4673 4665 static int
4674 4666 dca_verify_recover_atomic(crypto_provider_handle_t provider,
4675 4667 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4676 4668 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4677 4669 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4678 4670 {
4679 4671 int error = CRYPTO_MECHANISM_INVALID;
4680 4672 dca_t *softc = (dca_t *)provider;
4681 4673
4682 4674 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n");
4683 4675
4684 4676 if (ctx_template != NULL)
4685 4677 return (CRYPTO_ARGUMENTS_BAD);
4686 4678
4687 4679 /* check mechanism */
4688 4680 switch (mechanism->cm_type) {
4689 4681 case RSA_PKCS_MECH_INFO_TYPE:
4690 4682 case RSA_X_509_MECH_INFO_TYPE:
4691 4683 error = dca_rsaatomic(provider, session_id, mechanism, key,
4692 4684 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR);
4693 4685 break;
4694 4686 default:
4695 4687 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech "
4696 4688 "type 0x%llx\n", (unsigned long long)mechanism->cm_type);
4697 4689 error = CRYPTO_MECHANISM_INVALID;
4698 4690 }
4699 4691
4700 4692 DBG(softc, DENTRY,
4701 4693 "dca_verify_recover_atomic: done, err = 0x%x", error);
4702 4694
4703 4695 return (error);
4704 4696 }
4705 4697
4706 4698 /*
4707 4699 * Random number entry points.
4708 4700 */
4709 4701
4710 4702 /* ARGSUSED */
4711 4703 static int
4712 4704 dca_generate_random(crypto_provider_handle_t provider,
4713 4705 crypto_session_id_t session_id,
4714 4706 uchar_t *buf, size_t len, crypto_req_handle_t req)
4715 4707 {
4716 4708 int error = CRYPTO_FAILED;
4717 4709 dca_t *softc = (dca_t *)provider;
4718 4710 /* LINTED E_FUNC_SET_NOT_USED */
4719 4711 int instance;
4720 4712
4721 4713 instance = ddi_get_instance(softc->dca_dip);
4722 4714 DBG(softc, DENTRY, "dca_generate_random: started");
4723 4715
4724 4716 error = dca_rng(softc, buf, len, req);
4725 4717
4726 4718 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error);
4727 4719
4728 4720 return (error);
4729 4721 }
4730 4722
4731 4723 /*
4732 4724 * Context management entry points.
4733 4725 */
4734 4726
4735 4727 int
4736 4728 dca_free_context(crypto_ctx_t *ctx)
4737 4729 {
4738 4730 int error = CRYPTO_SUCCESS;
4739 4731 dca_t *softc;
4740 4732 /* LINTED E_FUNC_SET_NOT_USED */
4741 4733 int instance;
4742 4734
4743 4735 /* extract softc and instance number from context */
4744 4736 DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4745 4737 DBG(softc, DENTRY, "dca_free_context: entered");
4746 4738
4747 4739 if (ctx->cc_provider_private == NULL)
4748 4740 return (error);
4749 4741
4750 4742 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock);
4751 4743
4752 4744 error = dca_free_context_low(ctx);
4753 4745
4754 4746 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error);
4755 4747
4756 4748 return (error);
4757 4749 }
4758 4750
4759 4751 static int
4760 4752 dca_free_context_low(crypto_ctx_t *ctx)
4761 4753 {
4762 4754 int error = CRYPTO_SUCCESS;
4763 4755
4764 4756 /* check mechanism */
4765 4757 switch (DCA_MECH_FROM_CTX(ctx)) {
4766 4758 case DES_CBC_MECH_INFO_TYPE:
4767 4759 case DES3_CBC_MECH_INFO_TYPE:
4768 4760 dca_3desctxfree(ctx);
4769 4761 break;
4770 4762 case RSA_PKCS_MECH_INFO_TYPE:
4771 4763 case RSA_X_509_MECH_INFO_TYPE:
4772 4764 dca_rsactxfree(ctx);
4773 4765 break;
4774 4766 case DSA_MECH_INFO_TYPE:
4775 4767 dca_dsactxfree(ctx);
4776 4768 break;
4777 4769 default:
4778 4770 /* Should never reach here */
4779 4771 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type "
4780 4772 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4781 4773 error = CRYPTO_MECHANISM_INVALID;
4782 4774 }
4783 4775
4784 4776 return (error);
4785 4777 }
4786 4778
4787 4779
4788 4780 /* Free any unfreed private context. It is called in detach. */
4789 4781 static void
4790 4782 dca_free_context_list(dca_t *dca)
4791 4783 {
4792 4784 dca_listnode_t *node;
4793 4785 crypto_ctx_t ctx;
4794 4786
4795 4787 (void) memset(&ctx, 0, sizeof (ctx));
4796 4788 ctx.cc_provider = dca;
4797 4789
4798 4790 while ((node = dca_delist2(&dca->dca_ctx_list,
4799 4791 &dca->dca_ctx_list_lock)) != NULL) {
4800 4792 ctx.cc_provider_private = node;
4801 4793 (void) dca_free_context_low(&ctx);
4802 4794 }
4803 4795 }
4804 4796
4805 4797 static int
4806 4798 ext_info_sym(crypto_provider_handle_t prov,
4807 4799 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4808 4800 {
4809 4801 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM));
4810 4802 }
4811 4803
4812 4804 static int
4813 4805 ext_info_asym(crypto_provider_handle_t prov,
4814 4806 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4815 4807 {
4816 4808 int rv;
4817 4809
4818 4810 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM);
4819 4811 /* The asymmetric cipher slot supports random */
4820 4812 ext_info->ei_flags |= CRYPTO_EXTF_RNG;
4821 4813
4822 4814 return (rv);
4823 4815 }
4824 4816
4825 4817 /* ARGSUSED */
4826 4818 static int
4827 4819 ext_info_base(crypto_provider_handle_t prov,
4828 4820 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id)
4829 4821 {
4830 4822 dca_t *dca = (dca_t *)prov;
4831 4823 int len;
4832 4824
4833 4825 /* Label */
4834 4826 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s",
4835 4827 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id);
4836 4828 len = strlen((char *)ext_info->ei_label);
4837 4829 (void) memset(ext_info->ei_label + len, ' ',
4838 4830 CRYPTO_EXT_SIZE_LABEL - len);
4839 4831
4840 4832 /* Manufacturer ID */
4841 4833 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s",
4842 4834 DCA_MANUFACTURER_ID);
4843 4835 len = strlen((char *)ext_info->ei_manufacturerID);
4844 4836 (void) memset(ext_info->ei_manufacturerID + len, ' ',
4845 4837 CRYPTO_EXT_SIZE_MANUF - len);
4846 4838
4847 4839 /* Model */
4848 4840 (void) sprintf((char *)ext_info->ei_model, dca->dca_model);
4849 4841
4850 4842 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model);
4851 4843
4852 4844 len = strlen((char *)ext_info->ei_model);
4853 4845 (void) memset(ext_info->ei_model + len, ' ',
4854 4846 CRYPTO_EXT_SIZE_MODEL - len);
4855 4847
4856 4848 /* Serial Number. Blank for Deimos */
4857 4849 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL);
4858 4850
4859 4851 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED;
4860 4852
4861 4853 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO;
4862 4854 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO;
4863 4855 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO;
4864 4856 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO;
4865 4857 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO;
4866 4858 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO;
4867 4859 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO;
4868 4860 ext_info->ei_hardware_version.cv_major = 0;
4869 4861 ext_info->ei_hardware_version.cv_minor = 0;
4870 4862 ext_info->ei_firmware_version.cv_major = 0;
4871 4863 ext_info->ei_firmware_version.cv_minor = 0;
4872 4864
4873 4865 /* Time. No need to be supplied for token without a clock */
4874 4866 ext_info->ei_time[0] = '\000';
4875 4867
4876 4868 return (CRYPTO_SUCCESS);
4877 4869 }
4878 4870
4879 4871 static void
4880 4872 dca_fma_init(dca_t *dca)
4881 4873 {
4882 4874 ddi_iblock_cookie_t fm_ibc;
4883 4875 int fm_capabilities = DDI_FM_EREPORT_CAPABLE |
4884 4876 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
4885 4877 DDI_FM_ERRCB_CAPABLE;
4886 4878
4887 4879 /* Read FMA capabilities from dca.conf file (if present) */
4888 4880 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip,
4889 4881 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4890 4882 fm_capabilities);
4891 4883
4892 4884 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities);
4893 4885
4894 4886 /* Only register with IO Fault Services if we have some capability */
4895 4887 if (dca->fm_capabilities) {
4896 4888 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC;
4897 4889 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR;
4898 4890
4899 4891 /* Register capabilities with IO Fault Services */
4900 4892 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc);
4901 4893 DBG(dca, DWARN, "fm_capable() = 0x%x",
4902 4894 ddi_fm_capable(dca->dca_dip));
4903 4895
4904 4896 /*
4905 4897 * Initialize pci ereport capabilities if ereport capable
4906 4898 */
4907 4899 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4908 4900 DDI_FM_ERRCB_CAP(dca->fm_capabilities))
4909 4901 pci_ereport_setup(dca->dca_dip);
4910 4902
4911 4903 /*
4912 4904 * Initialize callback mutex and register error callback if
4913 4905 * error callback capable.
4914 4906 */
4915 4907 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4916 4908 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb,
4917 4909 (void *)dca);
4918 4910 }
4919 4911 } else {
4920 4912 /*
4921 4913 * These fields have to be cleared of FMA if there are no
4922 4914 * FMA capabilities at runtime.
4923 4915 */
4924 4916 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC;
4925 4917 dca_dmaattr.dma_attr_flags = 0;
4926 4918 }
4927 4919 }
4928 4920
4929 4921
4930 4922 static void
4931 4923 dca_fma_fini(dca_t *dca)
4932 4924 {
4933 4925 /* Only unregister FMA capabilities if we registered some */
4934 4926 if (dca->fm_capabilities) {
4935 4927
4936 4928 /*
4937 4929 * Release any resources allocated by pci_ereport_setup()
4938 4930 */
4939 4931 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4940 4932 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4941 4933 pci_ereport_teardown(dca->dca_dip);
4942 4934 }
4943 4935
4944 4936 /*
4945 4937 * Free callback mutex and un-register error callback if
4946 4938 * error callback capable.
4947 4939 */
4948 4940 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4949 4941 ddi_fm_handler_unregister(dca->dca_dip);
4950 4942 }
4951 4943
4952 4944 /* Unregister from IO Fault Services */
4953 4945 ddi_fm_fini(dca->dca_dip);
4954 4946 DBG(dca, DWARN, "fm_capable() = 0x%x",
4955 4947 ddi_fm_capable(dca->dca_dip));
4956 4948 }
4957 4949 }
4958 4950
4959 4951
4960 4952 /*
4961 4953 * The IO fault service error handling callback function
4962 4954 */
4963 4955 /*ARGSUSED*/
4964 4956 static int
4965 4957 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4966 4958 {
4967 4959 dca_t *dca = (dca_t *)impl_data;
4968 4960
4969 4961 pci_ereport_post(dip, err, NULL);
4970 4962 if (err->fme_status == DDI_FM_FATAL) {
4971 4963 dca_failure(dca, DDI_DATAPATH_FAULT,
4972 4964 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
4973 4965 "fault PCI in FMA callback.");
4974 4966 }
4975 4967 return (err->fme_status);
4976 4968 }
4977 4969
4978 4970
4979 4971 static int
4980 4972 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
4981 4973 dca_fma_eclass_t eclass_index)
4982 4974 {
4983 4975 ddi_fm_error_t de;
4984 4976 int version = 0;
4985 4977
4986 4978 ddi_fm_acc_err_get(handle, &de, version);
4987 4979 if (de.fme_status != DDI_FM_OK) {
4988 4980 dca_failure(dca, DDI_DATAPATH_FAULT,
4989 4981 eclass_index, fm_ena_increment(de.fme_ena),
4990 4982 CRYPTO_DEVICE_ERROR, "");
4991 4983 return (DDI_FAILURE);
4992 4984 }
4993 4985
4994 4986 return (DDI_SUCCESS);
4995 4987 }
4996 4988
4997 4989 int
4998 4990 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle,
4999 4991 dca_fma_eclass_t eclass_index)
5000 4992 {
5001 4993 ddi_fm_error_t de;
5002 4994 int version = 0;
5003 4995
5004 4996 ddi_fm_dma_err_get(handle, &de, version);
5005 4997 if (de.fme_status != DDI_FM_OK) {
5006 4998 dca_failure(dca, DDI_DATAPATH_FAULT,
5007 4999 eclass_index, fm_ena_increment(de.fme_ena),
5008 5000 CRYPTO_DEVICE_ERROR, "");
5009 5001 return (DDI_FAILURE);
5010 5002 }
5011 5003 return (DDI_SUCCESS);
5012 5004 }
5013 5005
5014 5006 static uint64_t
5015 5007 dca_ena(uint64_t ena)
5016 5008 {
5017 5009 if (ena == 0)
5018 5010 ena = fm_ena_generate(0, FM_ENA_FMT1);
5019 5011 else
5020 5012 ena = fm_ena_increment(ena);
5021 5013 return (ena);
5022 5014 }
5023 5015
5024 5016 static char *
5025 5017 dca_fma_eclass_string(char *model, dca_fma_eclass_t index)
5026 5018 {
5027 5019 if (strstr(model, "500"))
5028 5020 return (dca_fma_eclass_sca500[index]);
5029 5021 else
5030 5022 return (dca_fma_eclass_sca1000[index]);
5031 5023 }
↓ open down ↓ |
3330 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX