Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4/os/memnode.c
+++ new/usr/src/uts/sun4/os/memnode.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #include <sys/systm.h>
27 27 #include <sys/platform_module.h>
28 28 #include <sys/sysmacros.h>
29 29 #include <sys/atomic.h>
30 30 #include <sys/memlist.h>
31 31 #include <sys/memnode.h>
32 32 #include <vm/vm_dep.h>
33 33
34 34 int max_mem_nodes = 1; /* max memory nodes on this system */
35 35
36 36 struct mem_node_conf mem_node_config[MAX_MEM_NODES];
37 37 int mem_node_pfn_shift;
38 38 /*
39 39 * num_memnodes should be updated atomically and always >=
40 40 * the number of bits in memnodes_mask or the algorithm may fail.
41 41 */
42 42 uint16_t num_memnodes;
43 43 mnodeset_t memnodes_mask; /* assumes 8*(sizeof(mnodeset_t)) >= MAX_MEM_NODES */
44 44
45 45 /*
46 46 * If set, mem_node_physalign should be a power of two, and
47 47 * should reflect the minimum address alignment of each node.
48 48 */
49 49 uint64_t mem_node_physalign;
50 50
51 51 /*
52 52 * Platform hooks we will need.
53 53 */
54 54
55 55 #pragma weak plat_build_mem_nodes
56 56 #pragma weak plat_slice_add
57 57 #pragma weak plat_slice_del
58 58
59 59 /*
60 60 * Adjust the memnode config after a DR operation.
61 61 *
62 62 * It is rather tricky to do these updates since we can't
63 63 * protect the memnode structures with locks, so we must
64 64 * be mindful of the order in which updates and reads to
65 65 * these values can occur.
66 66 */
67 67 void
68 68 mem_node_add_slice(pfn_t start, pfn_t end)
69 69 {
70 70 int mnode;
71 71 mnodeset_t newmask, oldmask;
72 72
73 73 /*
74 74 * DR will pass us the first pfn that is allocatable.
75 75 * We need to round down to get the real start of
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
76 76 * the slice.
77 77 */
78 78 if (mem_node_physalign) {
79 79 start &= ~(btop(mem_node_physalign) - 1);
80 80 end = roundup(end, btop(mem_node_physalign)) - 1;
81 81 }
82 82
83 83 mnode = PFN_2_MEM_NODE(start);
84 84 ASSERT(mnode < max_mem_nodes);
85 85
86 - if (cas32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
86 + if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
87 87 /*
88 88 * Add slice to existing node.
89 89 */
90 90 if (start < mem_node_config[mnode].physbase)
91 91 mem_node_config[mnode].physbase = start;
92 92 if (end > mem_node_config[mnode].physmax)
93 93 mem_node_config[mnode].physmax = end;
94 94 } else {
95 95 mem_node_config[mnode].physbase = start;
96 96 mem_node_config[mnode].physmax = end;
97 97 atomic_add_16(&num_memnodes, 1);
98 98 do {
99 99 oldmask = memnodes_mask;
100 100 newmask = memnodes_mask | (1ull << mnode);
101 - } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
101 + } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
102 + oldmask);
102 103 }
103 104 /*
104 105 * Let the common lgrp framework know about the new memory
105 106 */
106 107 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
107 108 }
108 109
109 110 /*
110 111 * Remove a PFN range from a memnode. On some platforms,
111 112 * the memnode will be created with physbase at the first
112 113 * allocatable PFN, but later deleted with the MC slice
113 114 * base address converted to a PFN, in which case we need
114 115 * to assume physbase and up.
115 116 */
116 117 void
117 118 mem_node_del_slice(pfn_t start, pfn_t end)
118 119 {
119 120 int mnode;
120 121 pgcnt_t delta_pgcnt, node_size;
121 122 mnodeset_t omask, nmask;
122 123
123 124 if (mem_node_physalign) {
124 125 start &= ~(btop(mem_node_physalign) - 1);
125 126 end = roundup(end, btop(mem_node_physalign)) - 1;
126 127 }
127 128 mnode = PFN_2_MEM_NODE(start);
128 129
129 130 ASSERT(mnode < max_mem_nodes);
130 131 ASSERT(mem_node_config[mnode].exists == 1);
131 132
132 133 delta_pgcnt = end - start;
133 134 node_size = mem_node_config[mnode].physmax -
134 135 mem_node_config[mnode].physbase;
135 136
136 137 if (node_size > delta_pgcnt) {
137 138 /*
138 139 * Subtract the slice from the memnode.
139 140 */
140 141 if (start <= mem_node_config[mnode].physbase)
141 142 mem_node_config[mnode].physbase = end + 1;
142 143 ASSERT(end <= mem_node_config[mnode].physmax);
143 144 if (end == mem_node_config[mnode].physmax)
144 145 mem_node_config[mnode].physmax = start - 1;
145 146 } else {
146 147
147 148 /*
148 149 * Let the common lgrp framework know the mnode is
149 150 * leaving
150 151 */
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
151 152 lgrp_config(LGRP_CONFIG_MEM_DEL, mnode,
152 153 MEM_NODE_2_LGRPHAND(mnode));
153 154
154 155 /*
155 156 * Delete the whole node.
156 157 */
157 158 ASSERT(MNODE_PGCNT(mnode) == 0);
158 159 do {
159 160 omask = memnodes_mask;
160 161 nmask = omask & ~(1ull << mnode);
161 - } while (cas64(&memnodes_mask, omask, nmask) != omask);
162 + } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
162 163 atomic_add_16(&num_memnodes, -1);
163 164 mem_node_config[mnode].exists = 0;
164 165 }
165 166 }
166 167
167 168 void
168 169 mem_node_add_range(pfn_t start, pfn_t end)
169 170 {
170 171 if (&plat_slice_add != NULL)
171 172 plat_slice_add(start, end);
172 173 else
173 174 mem_node_add_slice(start, end);
174 175 }
175 176
176 177 void
177 178 mem_node_del_range(pfn_t start, pfn_t end)
178 179 {
179 180 if (&plat_slice_del != NULL)
180 181 plat_slice_del(start, end);
181 182 else
182 183 mem_node_del_slice(start, end);
183 184 }
184 185
185 186 void
186 187 startup_build_mem_nodes(prom_memlist_t *list, size_t nelems)
187 188 {
188 189 size_t elem;
189 190 pfn_t basepfn;
190 191 pgcnt_t npgs;
191 192
192 193 /* LINTED: ASSERT will always true or false */
193 194 ASSERT(NBBY * sizeof (mnodeset_t) >= max_mem_nodes);
194 195
195 196 if (&plat_build_mem_nodes != NULL) {
196 197 plat_build_mem_nodes(list, nelems);
197 198 } else {
198 199 /*
199 200 * Boot install lists are arranged <addr, len>, ...
200 201 */
201 202 for (elem = 0; elem < nelems; list++, elem++) {
202 203 basepfn = btop(list->addr);
203 204 npgs = btop(list->size);
204 205 mem_node_add_range(basepfn, basepfn + npgs - 1);
205 206 }
206 207 }
207 208 }
208 209
209 210 /*
210 211 * Allocate an unassigned memnode.
211 212 */
212 213 int
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
213 214 mem_node_alloc()
214 215 {
215 216 int mnode;
216 217 mnodeset_t newmask, oldmask;
217 218
218 219 /*
219 220 * Find an unused memnode. Update it atomically to prevent
220 221 * a first time memnode creation race.
221 222 */
222 223 for (mnode = 0; mnode < max_mem_nodes; mnode++)
223 - if (cas32((uint32_t *)&mem_node_config[mnode].exists,
224 + if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
224 225 0, 1) == 0)
225 226 break;
226 227
227 228 if (mnode >= max_mem_nodes)
228 229 panic("Out of free memnodes\n");
229 230
230 231 mem_node_config[mnode].physbase = (uint64_t)-1;
231 232 mem_node_config[mnode].physmax = 0;
232 233 atomic_add_16(&num_memnodes, 1);
233 234 do {
234 235 oldmask = memnodes_mask;
235 236 newmask = memnodes_mask | (1ull << mnode);
236 - } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
237 + } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
237 238
238 239 return (mnode);
239 240 }
240 241
241 242 /*
242 243 * Find the intersection between a memnode and a memlist
243 244 * and returns the number of pages that overlap.
244 245 *
245 246 * Grab the memlist lock to protect the list from DR operations.
246 247 */
247 248 pgcnt_t
248 249 mem_node_memlist_pages(int mnode, struct memlist *mlist)
249 250 {
250 251 pfn_t base, end;
251 252 pfn_t cur_base, cur_end;
252 253 pgcnt_t npgs = 0;
253 254 pgcnt_t pages;
254 255 struct memlist *pmem;
255 256
256 257 if (&plat_mem_node_intersect_range != NULL) {
257 258 memlist_read_lock();
258 259
259 260 for (pmem = mlist; pmem; pmem = pmem->ml_next) {
260 261 plat_mem_node_intersect_range(btop(pmem->ml_address),
261 262 btop(pmem->ml_size), mnode, &pages);
262 263 npgs += pages;
263 264 }
264 265
265 266 memlist_read_unlock();
266 267 return (npgs);
267 268 }
268 269
269 270 base = mem_node_config[mnode].physbase;
270 271 end = mem_node_config[mnode].physmax;
271 272
272 273 memlist_read_lock();
273 274
274 275 for (pmem = mlist; pmem; pmem = pmem->ml_next) {
275 276 cur_base = btop(pmem->ml_address);
276 277 cur_end = cur_base + btop(pmem->ml_size) - 1;
277 278 if (end < cur_base || base > cur_end)
278 279 continue;
279 280 npgs = npgs + (MIN(cur_end, end) -
280 281 MAX(cur_base, base)) + 1;
281 282 }
282 283
283 284 memlist_read_unlock();
284 285
285 286 return (npgs);
286 287 }
287 288
288 289 /*
289 290 * Find MIN(physbase) and MAX(physmax) over all mnodes
290 291 *
291 292 * Called during startup and DR to find hpm_counters limits when
292 293 * interleaved_mnodes is set.
293 294 * NOTE: there is a race condition with DR if it tries to change more than
294 295 * one mnode in parallel. Sizing shared hpm_counters depends on finding the
295 296 * min(physbase) and max(physmax) across all mnodes. Therefore, the caller of
296 297 * page_ctrs_adjust must ensure that mem_node_config does not change while it
297 298 * is running.
298 299 */
299 300 void
300 301 mem_node_max_range(pfn_t *basep, pfn_t *maxp)
301 302 {
302 303 int mnode;
303 304 pfn_t max = 0;
304 305 pfn_t base = (pfn_t)-1;
305 306
306 307 for (mnode = 0; mnode < max_mem_nodes; mnode++) {
307 308 if (mem_node_config[mnode].exists == 0)
308 309 continue;
309 310 if (max < mem_node_config[mnode].physmax)
310 311 max = mem_node_config[mnode].physmax;
311 312 if (base > mem_node_config[mnode].physbase)
312 313 base = mem_node_config[mnode].physbase;
313 314 }
314 315 ASSERT(base != (pfn_t)-1 && max != 0);
315 316 *basep = base;
316 317 *maxp = max;
317 318 }
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX