Print this page
XXXX pass in cpu_pause_func via pause_cpus
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4u/io/mem_cache.c
+++ new/usr/src/uts/sun4u/io/mem_cache.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * Driver to retire/unretire L2/L3 cachelines on panther
28 28 */
29 29 #include <sys/types.h>
30 30 #include <sys/types32.h>
31 31 #include <sys/time.h>
32 32 #include <sys/errno.h>
33 33 #include <sys/cmn_err.h>
34 34 #include <sys/param.h>
35 35 #include <sys/modctl.h>
36 36 #include <sys/conf.h>
37 37 #include <sys/open.h>
38 38 #include <sys/stat.h>
39 39 #include <sys/ddi.h>
40 40 #include <sys/sunddi.h>
41 41 #include <sys/file.h>
42 42 #include <sys/cpuvar.h>
43 43 #include <sys/x_call.h>
44 44 #include <sys/cheetahregs.h>
45 45 #include <sys/mem_cache.h>
46 46 #include <sys/mem_cache_ioctl.h>
47 47
48 48 extern int retire_l2(uint64_t, uint64_t);
49 49 extern int retire_l2_alternate(uint64_t, uint64_t);
50 50 extern int unretire_l2(uint64_t, uint64_t);
51 51 extern int unretire_l2_alternate(uint64_t, uint64_t);
52 52 extern int retire_l3(uint64_t, uint64_t);
53 53 extern int retire_l3_alternate(uint64_t, uint64_t);
54 54 extern int unretire_l3(uint64_t, uint64_t);
55 55 extern int unretire_l3_alternate(uint64_t, uint64_t);
56 56
57 57 extern void retire_l2_start(uint64_t, uint64_t);
58 58 extern void retire_l2_end(uint64_t, uint64_t);
59 59 extern void unretire_l2_start(uint64_t, uint64_t);
60 60 extern void unretire_l2_end(uint64_t, uint64_t);
61 61 extern void retire_l3_start(uint64_t, uint64_t);
62 62 extern void retire_l3_end(uint64_t, uint64_t);
63 63 extern void unretire_l3_start(uint64_t, uint64_t);
64 64 extern void unretire_l3_end(uint64_t, uint64_t);
65 65
66 66 extern void get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *);
67 67 extern void get_l2_tag_tl1(uint64_t, uint64_t);
68 68 extern void get_l3_tag_tl1(uint64_t, uint64_t);
69 69 extern const int _ncpu;
70 70
71 71 /* Macro for putting 64-bit onto stack as two 32-bit ints */
72 72 #define PRTF_64_TO_32(x) (uint32_t)((x)>>32), (uint32_t)(x)
73 73
74 74
75 75 uint_t l2_flush_retries_done = 0;
76 76 int mem_cache_debug = 0x0;
77 77 uint64_t pattern = 0;
78 78 uint32_t retire_failures = 0;
79 79 #ifdef DEBUG
80 80 int inject_anonymous_tag_error = 0;
81 81 int32_t last_error_injected_way = 0;
82 82 uint8_t last_error_injected_bit = 0;
83 83 int32_t last_l3tag_error_injected_way;
84 84 uint8_t last_l3tag_error_injected_bit;
85 85 int32_t last_l2tag_error_injected_way;
86 86 uint8_t last_l2tag_error_injected_bit;
87 87 #endif
88 88
89 89 /* dev_ops and cb_ops entry point function declarations */
90 90 static int mem_cache_attach(dev_info_t *, ddi_attach_cmd_t);
91 91 static int mem_cache_detach(dev_info_t *, ddi_detach_cmd_t);
92 92 static int mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *,
93 93 void **);
94 94 static int mem_cache_open(dev_t *, int, int, cred_t *);
95 95 static int mem_cache_close(dev_t, int, int, cred_t *);
96 96 static int mem_cache_ioctl_ops(int, int, cache_info_t *);
97 97 static int mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
98 98
99 99 struct cb_ops mem_cache_cb_ops = {
100 100 mem_cache_open,
101 101 mem_cache_close,
102 102 nodev,
103 103 nodev,
104 104 nodev, /* dump */
105 105 nodev,
106 106 nodev,
107 107 mem_cache_ioctl,
108 108 nodev, /* devmap */
109 109 nodev,
110 110 ddi_segmap, /* segmap */
111 111 nochpoll,
112 112 ddi_prop_op,
113 113 NULL, /* for STREAMS drivers */
114 114 D_NEW | D_MP /* driver compatibility flag */
115 115 };
116 116
117 117 static struct dev_ops mem_cache_dev_ops = {
118 118 DEVO_REV, /* driver build version */
119 119 0, /* device reference count */
120 120 mem_cache_getinfo,
121 121 nulldev,
122 122 nulldev, /* probe */
123 123 mem_cache_attach,
124 124 mem_cache_detach,
125 125 nulldev, /* reset */
126 126 &mem_cache_cb_ops,
127 127 (struct bus_ops *)NULL,
128 128 nulldev, /* power */
129 129 ddi_quiesce_not_needed, /* quiesce */
130 130 };
131 131
132 132 /*
133 133 * Soft state
134 134 */
135 135 struct mem_cache_softc {
136 136 dev_info_t *dip;
137 137 kmutex_t mutex;
138 138 };
139 139 #define getsoftc(inst) ((struct mem_cache_softc *)ddi_get_soft_state(statep,\
140 140 (inst)))
141 141
142 142 /* module configuration stuff */
143 143 static void *statep;
144 144 extern struct mod_ops mod_driverops;
145 145
146 146 static struct modldrv modldrv = {
147 147 &mod_driverops,
148 148 "mem_cache_driver (08/01/30) ",
149 149 &mem_cache_dev_ops
150 150 };
151 151
152 152 static struct modlinkage modlinkage = {
153 153 MODREV_1,
154 154 &modldrv,
155 155 0
156 156 };
157 157
158 158 extern const int _ncpu; /* Pull the kernel's global _ncpu definition */
159 159
160 160 int
161 161 _init(void)
162 162 {
163 163 int e;
164 164
165 165 if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc),
166 166 MAX_MEM_CACHE_INSTANCES)) {
167 167 return (e);
168 168 }
169 169
170 170 if ((e = mod_install(&modlinkage)) != 0)
171 171 ddi_soft_state_fini(&statep);
172 172
173 173 return (e);
174 174 }
175 175
176 176 int
177 177 _fini(void)
178 178 {
179 179 int e;
180 180
181 181 if ((e = mod_remove(&modlinkage)) != 0)
182 182 return (e);
183 183
184 184 ddi_soft_state_fini(&statep);
185 185
186 186 return (DDI_SUCCESS);
187 187 }
188 188
189 189 int
190 190 _info(struct modinfo *modinfop)
191 191 {
192 192 return (mod_info(&modlinkage, modinfop));
193 193 }
194 194
195 195 /*ARGSUSED*/
196 196 static int
197 197 mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
198 198 {
199 199 int inst;
200 200 int retval = DDI_SUCCESS;
201 201 struct mem_cache_softc *softc;
202 202
203 203 inst = getminor((dev_t)arg);
204 204
205 205 switch (cmd) {
206 206 case DDI_INFO_DEVT2DEVINFO:
207 207 if ((softc = getsoftc(inst)) == NULL) {
208 208 *result = (void *)NULL;
209 209 retval = DDI_FAILURE;
210 210 } else
211 211 *result = (void *)softc->dip;
212 212 break;
213 213
214 214 case DDI_INFO_DEVT2INSTANCE:
215 215 *result = (void *)((uintptr_t)inst);
216 216 break;
217 217
218 218 default:
219 219 retval = DDI_FAILURE;
220 220 }
221 221
222 222 return (retval);
223 223 }
224 224
225 225 static int
226 226 mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
227 227 {
228 228 int inst;
229 229 struct mem_cache_softc *softc = NULL;
230 230 char name[80];
231 231
232 232 switch (cmd) {
233 233 case DDI_ATTACH:
234 234 inst = ddi_get_instance(dip);
235 235 if (inst >= MAX_MEM_CACHE_INSTANCES) {
236 236 cmn_err(CE_WARN, "attach failed, too many instances\n");
237 237 return (DDI_FAILURE);
238 238 }
239 239 (void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst);
240 240 if (ddi_create_priv_minor_node(dip, name,
241 241 S_IFCHR,
242 242 inst,
243 243 DDI_PSEUDO,
244 244 0, NULL, "all", 0640) ==
245 245 DDI_FAILURE) {
246 246 ddi_remove_minor_node(dip, NULL);
247 247 return (DDI_FAILURE);
248 248 }
249 249
250 250 /* Allocate a soft state structure for this instance */
251 251 if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) {
252 252 cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed "
253 253 "for inst %d\n", inst);
254 254 goto attach_failed;
255 255 }
256 256
257 257 /* Setup soft state */
258 258 softc = getsoftc(inst);
259 259 softc->dip = dip;
260 260 mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL);
261 261
262 262 /* Create main environmental node */
263 263 ddi_report_dev(dip);
264 264
265 265 return (DDI_SUCCESS);
266 266
267 267 case DDI_RESUME:
268 268 return (DDI_SUCCESS);
269 269
270 270 default:
271 271 return (DDI_FAILURE);
272 272 }
273 273
274 274 attach_failed:
275 275
276 276 /* Free soft state, if allocated. remove minor node if added earlier */
277 277 if (softc)
278 278 ddi_soft_state_free(statep, inst);
279 279
280 280 ddi_remove_minor_node(dip, NULL);
281 281
282 282 return (DDI_FAILURE);
283 283 }
284 284
285 285 static int
286 286 mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
287 287 {
288 288 int inst;
289 289 struct mem_cache_softc *softc;
290 290
291 291 switch (cmd) {
292 292 case DDI_DETACH:
293 293 inst = ddi_get_instance(dip);
294 294 if ((softc = getsoftc(inst)) == NULL)
295 295 return (ENXIO);
296 296
297 297 /* Free the soft state and remove minor node added earlier */
298 298 mutex_destroy(&softc->mutex);
299 299 ddi_soft_state_free(statep, inst);
300 300 ddi_remove_minor_node(dip, NULL);
301 301 return (DDI_SUCCESS);
302 302
303 303 case DDI_SUSPEND:
304 304 return (DDI_SUCCESS);
305 305
306 306 default:
307 307 return (DDI_FAILURE);
308 308 }
309 309 }
310 310
311 311 /*ARGSUSED*/
312 312 static int
313 313 mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp)
314 314 {
315 315 int inst = getminor(*devp);
316 316
317 317 return (getsoftc(inst) == NULL ? ENXIO : 0);
318 318 }
319 319
320 320 /*ARGSUSED*/
321 321 static int
322 322 mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp)
323 323 {
324 324 int inst = getminor(dev);
325 325
326 326 return (getsoftc(inst) == NULL ? ENXIO : 0);
327 327 }
328 328
329 329 static char *tstate_to_desc[] = {
330 330 "Invalid", /* 0 */
331 331 "Shared", /* 1 */
332 332 "Exclusive", /* 2 */
333 333 "Owner", /* 3 */
334 334 "Modified", /* 4 */
335 335 "NA", /* 5 */
336 336 "Owner/Shared", /* 6 */
337 337 "Reserved(7)", /* 7 */
338 338 };
339 339
340 340 static char *
341 341 tag_state_to_desc(uint8_t tagstate)
342 342 {
343 343 return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]);
344 344 }
345 345
346 346 void
347 347 print_l2_tag(uint64_t tag_addr, uint64_t l2_tag)
348 348 {
349 349 uint64_t l2_subaddr;
350 350 uint8_t l2_state;
351 351
352 352 l2_subaddr = PN_L2TAG_TO_PA(l2_tag);
353 353 l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK);
354 354
355 355 l2_state = (l2_tag & CH_ECSTATE_MASK);
356 356 cmn_err(CE_CONT,
357 357 "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n",
358 358 PRTF_64_TO_32(l2_subaddr),
359 359 PRTF_64_TO_32(l2_tag),
360 360 tag_state_to_desc(l2_state));
361 361 }
362 362
363 363 void
364 364 print_l2cache_line(ch_cpu_logout_t *clop)
365 365 {
366 366 uint64_t l2_subaddr;
367 367 int i, offset;
368 368 uint8_t way, l2_state;
369 369 ch_ec_data_t *ecp;
370 370
371 371
372 372 for (way = 0; way < PN_CACHE_NWAYS; way++) {
373 373 ecp = &clop->clo_data.chd_l2_data[way];
374 374 l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag);
375 375 l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK);
376 376
377 377 l2_state = (ecp->ec_tag & CH_ECSTATE_MASK);
378 378 cmn_err(CE_CONT,
379 379 "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
380 380 "E$tag 0x%08x.%08x E$state %s",
381 381 way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr),
382 382 PRTF_64_TO_32(ecp->ec_tag),
383 383 tag_state_to_desc(l2_state));
384 384 /*
385 385 * Dump out Ecache subblock data captured.
386 386 * For Cheetah, we need to compute the ECC for each 16-byte
387 387 * chunk and compare it with the captured chunk ECC to figure
388 388 * out which chunk is bad.
389 389 */
390 390 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
391 391 ec_data_elm_t *ecdptr;
392 392 uint64_t d_low, d_high;
393 393 uint32_t ecc;
394 394 int l2_data_idx = (i/2);
395 395
396 396 offset = i * 16;
397 397 ecdptr = &clop->clo_data.chd_l2_data[way].ec_data
398 398 [l2_data_idx];
399 399 if ((i & 1) == 0) {
400 400 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
401 401 d_high = ecdptr->ec_d8[0];
402 402 d_low = ecdptr->ec_d8[1];
403 403 } else {
404 404 ecc = ecdptr->ec_eccd & 0x1ff;
405 405 d_high = ecdptr->ec_d8[2];
406 406 d_low = ecdptr->ec_d8[3];
407 407 }
408 408
409 409 cmn_err(CE_CONT,
410 410 "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
411 411 " ECC 0x%03x",
412 412 offset, PRTF_64_TO_32(d_high),
413 413 PRTF_64_TO_32(d_low), ecc);
414 414 }
415 415 } /* end of for way loop */
416 416 }
417 417
418 418 void
419 419 print_ecache_line(ch_cpu_logout_t *clop)
420 420 {
421 421 uint64_t ec_subaddr;
422 422 int i, offset;
423 423 uint8_t way, ec_state;
424 424 ch_ec_data_t *ecp;
425 425
426 426
427 427 for (way = 0; way < PN_CACHE_NWAYS; way++) {
428 428 ecp = &clop->clo_data.chd_ec_data[way];
429 429 ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag);
430 430 ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK);
431 431
432 432 ec_state = (ecp->ec_tag & CH_ECSTATE_MASK);
433 433 cmn_err(CE_CONT,
434 434 "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
435 435 "E$tag 0x%08x.%08x E$state %s",
436 436 way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr),
437 437 PRTF_64_TO_32(ecp->ec_tag),
438 438 tag_state_to_desc(ec_state));
439 439 /*
440 440 * Dump out Ecache subblock data captured.
441 441 * For Cheetah, we need to compute the ECC for each 16-byte
442 442 * chunk and compare it with the captured chunk ECC to figure
443 443 * out which chunk is bad.
444 444 */
445 445 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
446 446 ec_data_elm_t *ecdptr;
447 447 uint64_t d_low, d_high;
448 448 uint32_t ecc;
449 449 int ec_data_idx = (i/2);
450 450
451 451 offset = i * 16;
452 452 ecdptr =
453 453 &clop->clo_data.chd_ec_data[way].ec_data
454 454 [ec_data_idx];
455 455 if ((i & 1) == 0) {
456 456 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
457 457 d_high = ecdptr->ec_d8[0];
458 458 d_low = ecdptr->ec_d8[1];
459 459 } else {
460 460 ecc = ecdptr->ec_eccd & 0x1ff;
461 461 d_high = ecdptr->ec_d8[2];
462 462 d_low = ecdptr->ec_d8[3];
463 463 }
464 464
465 465 cmn_err(CE_CONT,
466 466 "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
467 467 " ECC 0x%03x",
468 468 offset, PRTF_64_TO_32(d_high),
469 469 PRTF_64_TO_32(d_low), ecc);
470 470 }
471 471 }
472 472 }
473 473
474 474 static boolean_t
475 475 tag_addr_collides(uint64_t tag_addr, cache_id_t type,
476 476 retire_func_t start_of_func, retire_func_t end_of_func)
477 477 {
478 478 uint64_t start_paddr, end_paddr;
479 479 char *type_str;
480 480
481 481 start_paddr = va_to_pa((void *)start_of_func);
482 482 end_paddr = va_to_pa((void *)end_of_func);
483 483 switch (type) {
484 484 case L2_CACHE_TAG:
485 485 case L2_CACHE_DATA:
486 486 tag_addr &= PN_L2_INDEX_MASK;
487 487 start_paddr &= PN_L2_INDEX_MASK;
488 488 end_paddr &= PN_L2_INDEX_MASK;
489 489 type_str = "L2:";
490 490 break;
491 491 case L3_CACHE_TAG:
492 492 case L3_CACHE_DATA:
493 493 tag_addr &= PN_L3_TAG_RD_MASK;
494 494 start_paddr &= PN_L3_TAG_RD_MASK;
495 495 end_paddr &= PN_L3_TAG_RD_MASK;
496 496 type_str = "L3:";
497 497 break;
498 498 default:
499 499 /*
500 500 * Should never reach here.
501 501 */
502 502 ASSERT(0);
503 503 return (B_FALSE);
504 504 }
505 505 if ((tag_addr > (start_paddr - 0x100)) &&
506 506 (tag_addr < (end_paddr + 0x100))) {
507 507 if (mem_cache_debug & 0x1)
508 508 cmn_err(CE_CONT,
509 509 "%s collision detected tag_addr = 0x%08x"
510 510 " start_paddr = 0x%08x end_paddr = 0x%08x\n",
511 511 type_str, (uint32_t)tag_addr, (uint32_t)start_paddr,
512 512 (uint32_t)end_paddr);
513 513 return (B_TRUE);
514 514 }
515 515 else
516 516 return (B_FALSE);
517 517 }
518 518
519 519 static uint64_t
520 520 get_tag_addr(cache_info_t *cache_info)
521 521 {
522 522 uint64_t tag_addr, scratch;
523 523
524 524 switch (cache_info->cache) {
525 525 case L2_CACHE_TAG:
526 526 case L2_CACHE_DATA:
527 527 tag_addr = (uint64_t)(cache_info->index <<
528 528 PN_CACHE_LINE_SHIFT);
529 529 scratch = (uint64_t)(cache_info->way <<
530 530 PN_L2_WAY_SHIFT);
531 531 tag_addr |= scratch;
532 532 tag_addr |= PN_L2_IDX_HW_ECC_EN;
533 533 break;
534 534 case L3_CACHE_TAG:
535 535 case L3_CACHE_DATA:
536 536 tag_addr = (uint64_t)(cache_info->index <<
537 537 PN_CACHE_LINE_SHIFT);
538 538 scratch = (uint64_t)(cache_info->way <<
539 539 PN_L3_WAY_SHIFT);
540 540 tag_addr |= scratch;
541 541 tag_addr |= PN_L3_IDX_HW_ECC_EN;
542 542 break;
543 543 default:
544 544 /*
545 545 * Should never reach here.
546 546 */
547 547 ASSERT(0);
548 548 return (uint64_t)(0);
549 549 }
550 550 return (tag_addr);
551 551 }
552 552
553 553 static int
554 554 mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info)
555 555 {
556 556 int ret_val = 0;
557 557 uint64_t afar, tag_addr;
558 558 ch_cpu_logout_t clop;
559 559 uint64_t Lxcache_tag_data[PN_CACHE_NWAYS];
560 560 int i, retire_retry_count;
561 561 cpu_t *cpu;
562 562 uint64_t tag_data;
563 563 uint8_t state;
564 564
565 565 if (cache_info->way >= PN_CACHE_NWAYS)
566 566 return (EINVAL);
567 567 switch (cache_info->cache) {
568 568 case L2_CACHE_TAG:
569 569 case L2_CACHE_DATA:
570 570 if (cache_info->index >=
571 571 (PN_L2_SET_SIZE/PN_L2_LINESIZE))
572 572 return (EINVAL);
573 573 break;
574 574 case L3_CACHE_TAG:
575 575 case L3_CACHE_DATA:
576 576 if (cache_info->index >=
577 577 (PN_L3_SET_SIZE/PN_L3_LINESIZE))
578 578 return (EINVAL);
579 579 break;
580 580 default:
581 581 return (ENOTSUP);
582 582 }
583 583 /*
584 584 * Check if we have a valid cpu ID and that
585 585 * CPU is ONLINE.
586 586 */
587 587 mutex_enter(&cpu_lock);
588 588 cpu = cpu_get(cache_info->cpu_id);
589 589 if ((cpu == NULL) || (!cpu_is_online(cpu))) {
590 590 mutex_exit(&cpu_lock);
591 591 return (EINVAL);
592 592 }
593 593 mutex_exit(&cpu_lock);
594 594 pattern = 0; /* default value of TAG PA when cacheline is retired. */
595 595 switch (cmd) {
596 596 case MEM_CACHE_RETIRE:
597 597 tag_addr = get_tag_addr(cache_info);
598 598 pattern |= PN_ECSTATE_NA;
599 599 retire_retry_count = 0;
600 600 affinity_set(cache_info->cpu_id);
601 601 switch (cache_info->cache) {
602 602 case L2_CACHE_DATA:
603 603 case L2_CACHE_TAG:
604 604 if ((cache_info->bit & MSB_BIT_MASK) ==
605 605 MSB_BIT_MASK)
606 606 pattern |= PN_L2TAG_PA_MASK;
607 607 retry_l2_retire:
608 608 if (tag_addr_collides(tag_addr,
609 609 cache_info->cache,
610 610 retire_l2_start, retire_l2_end))
611 611 ret_val =
612 612 retire_l2_alternate(
613 613 tag_addr, pattern);
614 614 else
615 615 ret_val = retire_l2(tag_addr,
616 616 pattern);
617 617 if (ret_val == 1) {
618 618 /*
619 619 * cacheline was in retired
620 620 * STATE already.
621 621 * so return success.
622 622 */
623 623 ret_val = 0;
624 624 }
625 625 if (ret_val < 0) {
626 626 cmn_err(CE_WARN,
627 627 "retire_l2() failed. index = 0x%x way %d. Retrying...\n",
628 628 cache_info->index,
629 629 cache_info->way);
630 630 if (retire_retry_count >= 2) {
631 631 retire_failures++;
632 632 affinity_clear();
633 633 return (EIO);
634 634 }
635 635 retire_retry_count++;
636 636 goto retry_l2_retire;
637 637 }
638 638 if (ret_val == 2)
639 639 l2_flush_retries_done++;
640 640 /*
641 641 * We bind ourself to a CPU and send cross trap to
642 642 * ourself. On return from xt_one we can rely on the
643 643 * data in tag_data being filled in. Normally one would
644 644 * do a xt_sync to make sure that the CPU has completed
645 645 * the cross trap call xt_one.
646 646 */
647 647 xt_one(cache_info->cpu_id,
648 648 (xcfunc_t *)(get_l2_tag_tl1),
649 649 tag_addr, (uint64_t)(&tag_data));
650 650 state = tag_data & CH_ECSTATE_MASK;
651 651 if (state != PN_ECSTATE_NA) {
652 652 retire_failures++;
653 653 print_l2_tag(tag_addr,
654 654 tag_data);
655 655 cmn_err(CE_WARN,
656 656 "L2 RETIRE:failed for index 0x%x way %d. Retrying...\n",
657 657 cache_info->index,
658 658 cache_info->way);
659 659 if (retire_retry_count >= 2) {
660 660 retire_failures++;
661 661 affinity_clear();
662 662 return (EIO);
663 663 }
664 664 retire_retry_count++;
665 665 goto retry_l2_retire;
666 666 }
667 667 break;
668 668 case L3_CACHE_TAG:
669 669 case L3_CACHE_DATA:
670 670 if ((cache_info->bit & MSB_BIT_MASK) ==
671 671 MSB_BIT_MASK)
672 672 pattern |= PN_L3TAG_PA_MASK;
673 673 if (tag_addr_collides(tag_addr,
674 674 cache_info->cache,
675 675 retire_l3_start, retire_l3_end))
676 676 ret_val =
677 677 retire_l3_alternate(
678 678 tag_addr, pattern);
679 679 else
680 680 ret_val = retire_l3(tag_addr,
681 681 pattern);
682 682 if (ret_val == 1) {
683 683 /*
684 684 * cacheline was in retired
685 685 * STATE already.
686 686 * so return success.
687 687 */
688 688 ret_val = 0;
689 689 }
690 690 if (ret_val < 0) {
691 691 cmn_err(CE_WARN,
692 692 "retire_l3() failed. ret_val = %d index = 0x%x\n",
693 693 ret_val,
694 694 cache_info->index);
695 695 retire_failures++;
696 696 affinity_clear();
697 697 return (EIO);
698 698 }
699 699 /*
700 700 * We bind ourself to a CPU and send cross trap to
701 701 * ourself. On return from xt_one we can rely on the
702 702 * data in tag_data being filled in. Normally one would
703 703 * do a xt_sync to make sure that the CPU has completed
704 704 * the cross trap call xt_one.
705 705 */
706 706 xt_one(cache_info->cpu_id,
707 707 (xcfunc_t *)(get_l3_tag_tl1),
708 708 tag_addr, (uint64_t)(&tag_data));
709 709 state = tag_data & CH_ECSTATE_MASK;
710 710 if (state != PN_ECSTATE_NA) {
711 711 cmn_err(CE_WARN,
712 712 "L3 RETIRE failed for index 0x%x\n",
713 713 cache_info->index);
714 714 retire_failures++;
715 715 affinity_clear();
716 716 return (EIO);
717 717 }
718 718
719 719 break;
720 720 }
721 721 affinity_clear();
722 722 break;
723 723 case MEM_CACHE_UNRETIRE:
724 724 tag_addr = get_tag_addr(cache_info);
725 725 pattern = PN_ECSTATE_INV;
726 726 affinity_set(cache_info->cpu_id);
727 727 switch (cache_info->cache) {
728 728 case L2_CACHE_DATA:
729 729 case L2_CACHE_TAG:
730 730 /*
731 731 * We bind ourself to a CPU and send cross trap to
732 732 * ourself. On return from xt_one we can rely on the
733 733 * data in tag_data being filled in. Normally one would
734 734 * do a xt_sync to make sure that the CPU has completed
735 735 * the cross trap call xt_one.
736 736 */
737 737 xt_one(cache_info->cpu_id,
738 738 (xcfunc_t *)(get_l2_tag_tl1),
739 739 tag_addr, (uint64_t)(&tag_data));
740 740 state = tag_data & CH_ECSTATE_MASK;
741 741 if (state != PN_ECSTATE_NA) {
742 742 affinity_clear();
743 743 return (EINVAL);
744 744 }
745 745 if (tag_addr_collides(tag_addr,
746 746 cache_info->cache,
747 747 unretire_l2_start, unretire_l2_end))
748 748 ret_val =
749 749 unretire_l2_alternate(
750 750 tag_addr, pattern);
751 751 else
752 752 ret_val =
753 753 unretire_l2(tag_addr,
754 754 pattern);
755 755 if (ret_val != 0) {
756 756 cmn_err(CE_WARN,
757 757 "unretire_l2() failed. ret_val = %d index = 0x%x\n",
758 758 ret_val,
759 759 cache_info->index);
760 760 retire_failures++;
761 761 affinity_clear();
762 762 return (EIO);
763 763 }
764 764 break;
765 765 case L3_CACHE_TAG:
766 766 case L3_CACHE_DATA:
767 767 /*
768 768 * We bind ourself to a CPU and send cross trap to
769 769 * ourself. On return from xt_one we can rely on the
770 770 * data in tag_data being filled in. Normally one would
771 771 * do a xt_sync to make sure that the CPU has completed
772 772 * the cross trap call xt_one.
773 773 */
774 774 xt_one(cache_info->cpu_id,
775 775 (xcfunc_t *)(get_l3_tag_tl1),
776 776 tag_addr, (uint64_t)(&tag_data));
777 777 state = tag_data & CH_ECSTATE_MASK;
778 778 if (state != PN_ECSTATE_NA) {
779 779 affinity_clear();
780 780 return (EINVAL);
781 781 }
782 782 if (tag_addr_collides(tag_addr,
783 783 cache_info->cache,
784 784 unretire_l3_start, unretire_l3_end))
785 785 ret_val =
786 786 unretire_l3_alternate(
787 787 tag_addr, pattern);
788 788 else
789 789 ret_val =
790 790 unretire_l3(tag_addr,
791 791 pattern);
792 792 if (ret_val != 0) {
793 793 cmn_err(CE_WARN,
794 794 "unretire_l3() failed. ret_val = %d index = 0x%x\n",
795 795 ret_val,
796 796 cache_info->index);
797 797 affinity_clear();
798 798 return (EIO);
799 799 }
800 800 break;
801 801 }
802 802 affinity_clear();
803 803 break;
804 804 case MEM_CACHE_ISRETIRED:
805 805 case MEM_CACHE_STATE:
806 806 return (ENOTSUP);
807 807 case MEM_CACHE_READ_TAGS:
↓ open down ↓ |
807 lines elided |
↑ open up ↑ |
808 808 #ifdef DEBUG
809 809 case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
810 810 #endif
811 811 /*
812 812 * Read tag and data for all the ways at a given afar
813 813 */
814 814 afar = (uint64_t)(cache_info->index
815 815 << PN_CACHE_LINE_SHIFT);
816 816 mutex_enter(&cpu_lock);
817 817 affinity_set(cache_info->cpu_id);
818 - (void) pause_cpus(NULL);
818 + (void) pause_cpus(NULL, NULL);
819 819 mutex_exit(&cpu_lock);
820 820 /*
821 821 * We bind ourself to a CPU and send cross trap to
822 822 * ourself. On return from xt_one we can rely on the
823 823 * data in clop being filled in. Normally one would
824 824 * do a xt_sync to make sure that the CPU has completed
825 825 * the cross trap call xt_one.
826 826 */
827 827 xt_one(cache_info->cpu_id,
828 828 (xcfunc_t *)(get_ecache_dtags_tl1),
829 829 afar, (uint64_t)(&clop));
830 830 mutex_enter(&cpu_lock);
831 831 (void) start_cpus();
832 832 mutex_exit(&cpu_lock);
833 833 affinity_clear();
834 834 switch (cache_info->cache) {
835 835 case L2_CACHE_TAG:
836 836 for (i = 0; i < PN_CACHE_NWAYS; i++) {
837 837 Lxcache_tag_data[i] =
838 838 clop.clo_data.chd_l2_data
839 839 [i].ec_tag;
840 840 }
841 841 #ifdef DEBUG
842 842 last_error_injected_bit =
843 843 last_l2tag_error_injected_bit;
844 844 last_error_injected_way =
845 845 last_l2tag_error_injected_way;
846 846 #endif
847 847 break;
848 848 case L3_CACHE_TAG:
849 849 for (i = 0; i < PN_CACHE_NWAYS; i++) {
850 850 Lxcache_tag_data[i] =
851 851 clop.clo_data.chd_ec_data
852 852 [i].ec_tag;
853 853 }
854 854 #ifdef DEBUG
855 855 last_error_injected_bit =
856 856 last_l3tag_error_injected_bit;
857 857 last_error_injected_way =
858 858 last_l3tag_error_injected_way;
859 859 #endif
860 860 break;
861 861 default:
862 862 return (ENOTSUP);
863 863 } /* end if switch(cache) */
864 864 #ifdef DEBUG
865 865 if ((cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) &&
866 866 (inject_anonymous_tag_error == 0) &&
867 867 (last_error_injected_way >= 0) &&
868 868 (last_error_injected_way <= 3)) {
869 869 pattern = ((uint64_t)1 <<
870 870 last_error_injected_bit);
871 871 /*
872 872 * If error bit is ECC we need to make sure
873 873 * ECC on all all WAYS are corrupted.
874 874 */
875 875 if ((last_error_injected_bit >= 6) &&
876 876 (last_error_injected_bit <= 14)) {
877 877 for (i = 0; i < PN_CACHE_NWAYS; i++)
878 878 Lxcache_tag_data[i] ^=
879 879 pattern;
880 880 } else
881 881 Lxcache_tag_data
882 882 [last_error_injected_way] ^=
883 883 pattern;
884 884 }
885 885 #endif
886 886 if (ddi_copyout((caddr_t)Lxcache_tag_data,
887 887 (caddr_t)cache_info->datap,
888 888 sizeof (Lxcache_tag_data), mode)
889 889 != DDI_SUCCESS) {
890 890 return (EFAULT);
891 891 }
892 892 break; /* end of READ_TAGS */
893 893 default:
894 894 return (ENOTSUP);
895 895 } /* end if switch(cmd) */
896 896 return (ret_val);
897 897 }
898 898
899 899 /*ARGSUSED*/
900 900 static int
901 901 mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
902 902 int *rvalp)
903 903 {
904 904 int inst;
905 905 struct mem_cache_softc *softc;
906 906 cache_info_t cache_info;
907 907 cache_info32_t cache_info32;
908 908 int ret_val;
909 909 int is_panther;
910 910
911 911 inst = getminor(dev);
912 912 if ((softc = getsoftc(inst)) == NULL)
913 913 return (ENXIO);
914 914
915 915 mutex_enter(&softc->mutex);
916 916
917 917 #ifdef _MULTI_DATAMODEL
918 918 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
919 919 if (ddi_copyin((cache_info32_t *)arg, &cache_info32,
920 920 sizeof (cache_info32), mode) != DDI_SUCCESS) {
921 921 mutex_exit(&softc->mutex);
922 922 return (EFAULT);
923 923 }
924 924 cache_info.cache = cache_info32.cache;
925 925 cache_info.index = cache_info32.index;
926 926 cache_info.way = cache_info32.way;
927 927 cache_info.cpu_id = cache_info32.cpu_id;
928 928 cache_info.bit = cache_info32.bit;
929 929 cache_info.datap = (void *)((uint64_t)cache_info32.datap);
930 930 } else
931 931 #endif
932 932 if (ddi_copyin((cache_info_t *)arg, &cache_info,
933 933 sizeof (cache_info), mode) != DDI_SUCCESS) {
934 934 mutex_exit(&softc->mutex);
935 935 return (EFAULT);
936 936 }
937 937
938 938 if ((cache_info.cpu_id < 0) || (cache_info.cpu_id >= _ncpu)) {
939 939 mutex_exit(&softc->mutex);
940 940 return (EINVAL);
941 941 }
942 942 is_panther = IS_PANTHER(cpunodes[cache_info.cpu_id].implementation);
943 943 if (!is_panther) {
944 944 mutex_exit(&softc->mutex);
945 945 return (ENOTSUP);
946 946 }
947 947 switch (cmd) {
948 948 case MEM_CACHE_RETIRE:
949 949 case MEM_CACHE_UNRETIRE:
950 950 if ((mode & FWRITE) == 0) {
951 951 ret_val = EBADF;
952 952 break;
953 953 }
954 954 /*FALLTHROUGH*/
955 955 case MEM_CACHE_ISRETIRED:
956 956 case MEM_CACHE_STATE:
957 957 case MEM_CACHE_READ_TAGS:
958 958 #ifdef DEBUG
959 959 case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
960 960 #endif
961 961 ret_val = mem_cache_ioctl_ops(cmd, mode, &cache_info);
962 962 break;
963 963 default:
964 964 ret_val = ENOTSUP;
965 965 break;
966 966 }
967 967 mutex_exit(&softc->mutex);
968 968 return (ret_val);
969 969 }
↓ open down ↓ |
141 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX