1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #include <sys/machsystm.h>
  27 #include <sys/cpu.h>
  28 #include <sys/intreg.h>
  29 #include <sys/machcpuvar.h>
  30 #include <vm/hat_sfmmu.h>
  31 #include <sys/error.h>
  32 #include <sys/hypervisor_api.h>
  33 
  34 void
  35 cpu_intrq_register(struct cpu *cpu)
  36 {
  37         struct machcpu *mcpup = &cpu->cpu_m;
  38         uint64_t ret;
  39 
  40         ret = hv_cpu_qconf(INTR_CPU_Q, mcpup->cpu_q_base_pa, cpu_q_entries);
  41         if (ret != H_EOK)
  42                 cmn_err(CE_PANIC, "cpu%d: cpu_mondo queue configuration "
  43                     "failed, error %lu", cpu->cpu_id, ret);
  44 
  45         ret = hv_cpu_qconf(INTR_DEV_Q, mcpup->dev_q_base_pa, dev_q_entries);
  46         if (ret != H_EOK)
  47                 cmn_err(CE_PANIC, "cpu%d: dev_mondo queue configuration "
  48                     "failed, error %lu", cpu->cpu_id, ret);
  49 
  50         ret = hv_cpu_qconf(CPU_RQ, mcpup->cpu_rq_base_pa, cpu_rq_entries);
  51         if (ret != H_EOK)
  52                 cmn_err(CE_PANIC, "cpu%d: resumable error queue configuration "
  53                     "failed, error %lu", cpu->cpu_id, ret);
  54 
  55         ret = hv_cpu_qconf(CPU_NRQ, mcpup->cpu_nrq_base_pa, cpu_nrq_entries);
  56         if (ret != H_EOK)
  57                 cmn_err(CE_PANIC, "cpu%d: non-resumable error queue "
  58                     "configuration failed, error %lu", cpu->cpu_id, ret);
  59 }
  60 
  61 int
  62 cpu_intrq_setup(struct cpu *cpu)
  63 {
  64         struct machcpu *mcpup = &cpu->cpu_m;
  65         size_t size;
  66 
  67         /*
  68          * This routine will return with an error return if any
  69          * contig_mem_alloc() fails.  It is expected that the caller will
  70          * call cpu_intrq_cleanup() (or cleanup_cpu_common() which will).
  71          * That will cleanly free only those blocks that were alloc'd.
  72          */
  73 
  74         /*
  75          * Allocate mondo data for xcalls.
  76          */
  77         mcpup->mondo_data = contig_mem_alloc(INTR_REPORT_SIZE);
  78 
  79         if (mcpup->mondo_data == NULL) {
  80                 cmn_err(CE_NOTE, "cpu%d: cpu mondo_data allocation failed",
  81                     cpu->cpu_id);
  82                 return (ENOMEM);
  83         }
  84         /*
  85          * va_to_pa() is too expensive to call for every crosscall
  86          * so we do it here at init time and save it in machcpu.
  87          */
  88         mcpup->mondo_data_ra = va_to_pa(mcpup->mondo_data);
  89 
  90         /*
  91          *  Allocate a per-cpu list of ncpu_guest_max for xcalls
  92          */
  93         size = ncpu_guest_max * sizeof (uint16_t);
  94         if (size < INTR_REPORT_SIZE)
  95                 size = INTR_REPORT_SIZE;
  96 
  97         /*
  98          * contig_mem_alloc() requires size to be a power of 2.
  99          * Increase size to a power of 2 if necessary.
 100          */
 101         if ((size & (size - 1)) != 0) {
 102                 size = 1 << highbit(size);
 103         }
 104 
 105         mcpup->cpu_list = contig_mem_alloc(size);
 106 
 107         if (mcpup->cpu_list == NULL) {
 108                 cmn_err(CE_NOTE, "cpu%d: cpu cpu_list allocation failed",
 109                     cpu->cpu_id);
 110                 return (ENOMEM);
 111         }
 112         mcpup->cpu_list_ra = va_to_pa(mcpup->cpu_list);
 113 
 114         /*
 115          * Allocate sun4v interrupt and error queues.
 116          */
 117         size = cpu_q_entries * INTR_REPORT_SIZE;
 118 
 119         mcpup->cpu_q_va = contig_mem_alloc(size);
 120 
 121         if (mcpup->cpu_q_va == NULL) {
 122                 cmn_err(CE_NOTE, "cpu%d: cpu intrq allocation failed",
 123                     cpu->cpu_id);
 124                 return (ENOMEM);
 125         }
 126         mcpup->cpu_q_base_pa = va_to_pa(mcpup->cpu_q_va);
 127         mcpup->cpu_q_size = size;
 128 
 129         /*
 130          * Allocate device queues
 131          */
 132         size = dev_q_entries * INTR_REPORT_SIZE;
 133 
 134         mcpup->dev_q_va = contig_mem_alloc(size);
 135 
 136         if (mcpup->dev_q_va == NULL) {
 137                 cmn_err(CE_NOTE, "cpu%d: dev intrq allocation failed",
 138                     cpu->cpu_id);
 139                 return (ENOMEM);
 140         }
 141         mcpup->dev_q_base_pa = va_to_pa(mcpup->dev_q_va);
 142         mcpup->dev_q_size = size;
 143 
 144         /*
 145          * Allocate resumable queue and its kernel buffer
 146          */
 147         size = cpu_rq_entries * Q_ENTRY_SIZE;
 148 
 149         mcpup->cpu_rq_va = contig_mem_alloc(2 * size);
 150 
 151         if (mcpup->cpu_rq_va == NULL) {
 152                 cmn_err(CE_NOTE, "cpu%d: resumable queue allocation failed",
 153                     cpu->cpu_id);
 154                 return (ENOMEM);
 155         }
 156         mcpup->cpu_rq_base_pa = va_to_pa(mcpup->cpu_rq_va);
 157         mcpup->cpu_rq_size = size;
 158         /* zero out the memory */
 159         bzero(mcpup->cpu_rq_va, 2 * size);
 160 
 161         /*
 162          * Allocate non-resumable queues
 163          */
 164         size = cpu_nrq_entries * Q_ENTRY_SIZE;
 165 
 166         mcpup->cpu_nrq_va = contig_mem_alloc(2 * size);
 167 
 168         if (mcpup->cpu_nrq_va == NULL) {
 169                 cmn_err(CE_NOTE, "cpu%d: nonresumable queue allocation failed",
 170                     cpu->cpu_id);
 171                 return (ENOMEM);
 172         }
 173         mcpup->cpu_nrq_base_pa = va_to_pa(mcpup->cpu_nrq_va);
 174         mcpup->cpu_nrq_size = size;
 175         /* zero out the memory */
 176         bzero(mcpup->cpu_nrq_va, 2 * size);
 177 
 178         return (0);
 179 }
 180 
 181 void
 182 cpu_intrq_cleanup(struct cpu *cpu)
 183 {
 184         struct machcpu *mcpup = &cpu->cpu_m;
 185         int cpu_list_size;
 186         uint64_t cpu_q_size;
 187         uint64_t dev_q_size;
 188         uint64_t cpu_rq_size;
 189         uint64_t cpu_nrq_size;
 190 
 191         /*
 192          * Free mondo data for xcalls.
 193          */
 194         if (mcpup->mondo_data) {
 195                 contig_mem_free(mcpup->mondo_data, INTR_REPORT_SIZE);
 196                 mcpup->mondo_data = NULL;
 197                 mcpup->mondo_data_ra = NULL;
 198         }
 199 
 200         /*
 201          *  Free per-cpu list of ncpu_guest_max for xcalls
 202          */
 203         cpu_list_size = ncpu_guest_max * sizeof (uint16_t);
 204         if (cpu_list_size < INTR_REPORT_SIZE)
 205                 cpu_list_size = INTR_REPORT_SIZE;
 206 
 207         /*
 208          * contig_mem_alloc() requires size to be a power of 2.
 209          * Increase size to a power of 2 if necessary.
 210          */
 211         if ((cpu_list_size & (cpu_list_size - 1)) != 0) {
 212                 cpu_list_size = 1 << highbit(cpu_list_size);
 213         }
 214 
 215         if (mcpup->cpu_list) {
 216                 contig_mem_free(mcpup->cpu_list, cpu_list_size);
 217                 mcpup->cpu_list = NULL;
 218                 mcpup->cpu_list_ra = NULL;
 219         }
 220 
 221         /*
 222          * Free sun4v interrupt and error queues.
 223          */
 224         if (mcpup->cpu_q_va) {
 225                 cpu_q_size = cpu_q_entries * INTR_REPORT_SIZE;
 226                 contig_mem_free(mcpup->cpu_q_va, cpu_q_size);
 227                 mcpup->cpu_q_va = NULL;
 228                 mcpup->cpu_q_base_pa = NULL;
 229                 mcpup->cpu_q_size = 0;
 230         }
 231 
 232         if (mcpup->dev_q_va) {
 233                 dev_q_size = dev_q_entries * INTR_REPORT_SIZE;
 234                 contig_mem_free(mcpup->dev_q_va, dev_q_size);
 235                 mcpup->dev_q_va = NULL;
 236                 mcpup->dev_q_base_pa = NULL;
 237                 mcpup->dev_q_size = 0;
 238         }
 239 
 240         if (mcpup->cpu_rq_va) {
 241                 cpu_rq_size = cpu_rq_entries * Q_ENTRY_SIZE;
 242                 contig_mem_free(mcpup->cpu_rq_va, 2 * cpu_rq_size);
 243                 mcpup->cpu_rq_va = NULL;
 244                 mcpup->cpu_rq_base_pa = NULL;
 245                 mcpup->cpu_rq_size = 0;
 246         }
 247 
 248         if (mcpup->cpu_nrq_va) {
 249                 cpu_nrq_size = cpu_nrq_entries * Q_ENTRY_SIZE;
 250                 contig_mem_free(mcpup->cpu_nrq_va, 2 * cpu_nrq_size);
 251                 mcpup->cpu_nrq_va = NULL;
 252                 mcpup->cpu_nrq_base_pa = NULL;
 253                 mcpup->cpu_nrq_size = 0;
 254         }
 255 }