54 * Platform hooks we will need.
55 */
56
57 #pragma weak plat_build_mem_nodes
58 #pragma weak plat_slice_add
59 #pragma weak plat_slice_del
60
61 /*
62 * Adjust the memnode config after a DR operation.
63 *
64 * It is rather tricky to do these updates since we can't
65 * protect the memnode structures with locks, so we must
66 * be mindful of the order in which updates and reads to
67 * these values can occur.
68 */
69
70 void
71 mem_node_add_slice(pfn_t start, pfn_t end)
72 {
73 int mnode;
74 mnodeset_t newmask, oldmask;
75
76 /*
77 * DR will pass us the first pfn that is allocatable.
78 * We need to round down to get the real start of
79 * the slice.
80 */
81 if (mem_node_physalign) {
82 start &= ~(btop(mem_node_physalign) - 1);
83 end = roundup(end, btop(mem_node_physalign)) - 1;
84 }
85
86 mnode = PFN_2_MEM_NODE(start);
87 ASSERT(mnode >= 0 && mnode < max_mem_nodes);
88
89 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
90 /*
91 * Add slice to existing node.
92 */
93 if (start < mem_node_config[mnode].physbase)
94 mem_node_config[mnode].physbase = start;
95 if (end > mem_node_config[mnode].physmax)
96 mem_node_config[mnode].physmax = end;
97 } else {
98 mem_node_config[mnode].physbase = start;
99 mem_node_config[mnode].physmax = end;
100 atomic_inc_16(&num_memnodes);
101 do {
102 oldmask = memnodes_mask;
103 newmask = memnodes_mask | (1ull << mnode);
104 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
105 oldmask);
106 }
107
108 /*
109 * Inform the common lgrp framework about the new memory
110 */
111 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
112 }
113
114 /*
115 * Remove a PFN range from a memnode. On some platforms,
116 * the memnode will be created with physbase at the first
117 * allocatable PFN, but later deleted with the MC slice
118 * base address converted to a PFN, in which case we need
119 * to assume physbase and up.
120 */
121 void
122 mem_node_del_slice(pfn_t start, pfn_t end)
123 {
124 int mnode;
125 pgcnt_t delta_pgcnt, node_size;
126 mnodeset_t omask, nmask;
127
128 if (mem_node_physalign) {
129 start &= ~(btop(mem_node_physalign) - 1);
130 end = roundup(end, btop(mem_node_physalign)) - 1;
131 }
132 mnode = PFN_2_MEM_NODE(start);
133
134 ASSERT(mnode >= 0 && mnode < max_mem_nodes);
135 ASSERT(mem_node_config[mnode].exists == 1);
136
137 delta_pgcnt = end - start;
138 node_size = mem_node_config[mnode].physmax -
139 mem_node_config[mnode].physbase;
140
141 if (node_size > delta_pgcnt) {
142 /*
143 * Subtract the slice from the memnode.
144 */
145 if (start <= mem_node_config[mnode].physbase)
146 mem_node_config[mnode].physbase = end + 1;
147 ASSERT(end <= mem_node_config[mnode].physmax);
148 if (end == mem_node_config[mnode].physmax)
149 mem_node_config[mnode].physmax = start - 1;
150 } else {
151 /*
152 * Let the common lgrp framework know this mnode is
153 * leaving
154 */
155 lgrp_config(LGRP_CONFIG_MEM_DEL,
156 mnode, MEM_NODE_2_LGRPHAND(mnode));
157
158 /*
159 * Delete the whole node.
160 */
161 ASSERT(MNODE_PGCNT(mnode) == 0);
162 do {
163 omask = memnodes_mask;
164 nmask = omask & ~(1ull << mnode);
165 } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
166 atomic_dec_16(&num_memnodes);
167 mem_node_config[mnode].exists = 0;
168 }
169 }
170
171 void
172 mem_node_add_range(pfn_t start, pfn_t end)
173 {
174 if (&plat_slice_add)
175 plat_slice_add(start, end);
176 else
177 mem_node_add_slice(start, end);
178 }
179
180 void
181 mem_node_del_range(pfn_t start, pfn_t end)
182 {
183 if (&plat_slice_del)
184 plat_slice_del(start, end);
185 else
206 continue;
207 end =
208 (list->ml_address + list->ml_size - 1) >> PAGESHIFT;
209 if (end > physmax)
210 end = physmax;
211 mem_node_add_range(start, end);
212 list = list->ml_next;
213 }
214 mem_node_physalign = 0;
215 mem_node_pfn_shift = 0;
216 }
217 }
218
219 /*
220 * Allocate an unassigned memnode.
221 */
222 int
223 mem_node_alloc()
224 {
225 int mnode;
226 mnodeset_t newmask, oldmask;
227
228 /*
229 * Find an unused memnode. Update it atomically to prevent
230 * a first time memnode creation race.
231 */
232 for (mnode = 0; mnode < max_mem_nodes; mnode++)
233 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
234 0, 1) == 0)
235 break;
236
237 if (mnode >= max_mem_nodes)
238 panic("Out of free memnodes\n");
239
240 mem_node_config[mnode].physbase = (pfn_t)-1l;
241 mem_node_config[mnode].physmax = 0;
242 atomic_inc_16(&num_memnodes);
243 do {
244 oldmask = memnodes_mask;
245 newmask = memnodes_mask | (1ull << mnode);
246 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
247
248 return (mnode);
249 }
250
251 /*
252 * Find the intersection between a memnode and a memlist
253 * and returns the number of pages that overlap.
254 *
255 * Assumes the list is protected from DR operations by
256 * the memlist lock.
257 */
258 pgcnt_t
259 mem_node_memlist_pages(int mnode, struct memlist *mlist)
260 {
261 pfn_t base, end;
262 pfn_t cur_base, cur_end;
263 pgcnt_t npgs;
264 struct memlist *pmem;
265
266 base = mem_node_config[mnode].physbase;
|
54 * Platform hooks we will need.
55 */
56
57 #pragma weak plat_build_mem_nodes
58 #pragma weak plat_slice_add
59 #pragma weak plat_slice_del
60
61 /*
62 * Adjust the memnode config after a DR operation.
63 *
64 * It is rather tricky to do these updates since we can't
65 * protect the memnode structures with locks, so we must
66 * be mindful of the order in which updates and reads to
67 * these values can occur.
68 */
69
70 void
71 mem_node_add_slice(pfn_t start, pfn_t end)
72 {
73 int mnode;
74
75 /*
76 * DR will pass us the first pfn that is allocatable.
77 * We need to round down to get the real start of
78 * the slice.
79 */
80 if (mem_node_physalign) {
81 start &= ~(btop(mem_node_physalign) - 1);
82 end = roundup(end, btop(mem_node_physalign)) - 1;
83 }
84
85 mnode = PFN_2_MEM_NODE(start);
86 ASSERT(mnode >= 0 && mnode < max_mem_nodes);
87
88 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
89 /*
90 * Add slice to existing node.
91 */
92 if (start < mem_node_config[mnode].physbase)
93 mem_node_config[mnode].physbase = start;
94 if (end > mem_node_config[mnode].physmax)
95 mem_node_config[mnode].physmax = end;
96 } else {
97 mem_node_config[mnode].physbase = start;
98 mem_node_config[mnode].physmax = end;
99 atomic_inc_16(&num_memnodes);
100 atomic_or_64(&memnodes_mask, 1ull << mnode);
101 }
102
103 /*
104 * Inform the common lgrp framework about the new memory
105 */
106 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
107 }
108
109 /*
110 * Remove a PFN range from a memnode. On some platforms,
111 * the memnode will be created with physbase at the first
112 * allocatable PFN, but later deleted with the MC slice
113 * base address converted to a PFN, in which case we need
114 * to assume physbase and up.
115 */
116 void
117 mem_node_del_slice(pfn_t start, pfn_t end)
118 {
119 int mnode;
120 pgcnt_t delta_pgcnt, node_size;
121
122 if (mem_node_physalign) {
123 start &= ~(btop(mem_node_physalign) - 1);
124 end = roundup(end, btop(mem_node_physalign)) - 1;
125 }
126 mnode = PFN_2_MEM_NODE(start);
127
128 ASSERT(mnode >= 0 && mnode < max_mem_nodes);
129 ASSERT(mem_node_config[mnode].exists == 1);
130
131 delta_pgcnt = end - start;
132 node_size = mem_node_config[mnode].physmax -
133 mem_node_config[mnode].physbase;
134
135 if (node_size > delta_pgcnt) {
136 /*
137 * Subtract the slice from the memnode.
138 */
139 if (start <= mem_node_config[mnode].physbase)
140 mem_node_config[mnode].physbase = end + 1;
141 ASSERT(end <= mem_node_config[mnode].physmax);
142 if (end == mem_node_config[mnode].physmax)
143 mem_node_config[mnode].physmax = start - 1;
144 } else {
145 /*
146 * Let the common lgrp framework know this mnode is
147 * leaving
148 */
149 lgrp_config(LGRP_CONFIG_MEM_DEL,
150 mnode, MEM_NODE_2_LGRPHAND(mnode));
151
152 /*
153 * Delete the whole node.
154 */
155 ASSERT(MNODE_PGCNT(mnode) == 0);
156 atomic_and_64(&memnodes_mask, ~(1ull << mnode));
157 atomic_dec_16(&num_memnodes);
158 mem_node_config[mnode].exists = 0;
159 }
160 }
161
162 void
163 mem_node_add_range(pfn_t start, pfn_t end)
164 {
165 if (&plat_slice_add)
166 plat_slice_add(start, end);
167 else
168 mem_node_add_slice(start, end);
169 }
170
171 void
172 mem_node_del_range(pfn_t start, pfn_t end)
173 {
174 if (&plat_slice_del)
175 plat_slice_del(start, end);
176 else
197 continue;
198 end =
199 (list->ml_address + list->ml_size - 1) >> PAGESHIFT;
200 if (end > physmax)
201 end = physmax;
202 mem_node_add_range(start, end);
203 list = list->ml_next;
204 }
205 mem_node_physalign = 0;
206 mem_node_pfn_shift = 0;
207 }
208 }
209
210 /*
211 * Allocate an unassigned memnode.
212 */
213 int
214 mem_node_alloc()
215 {
216 int mnode;
217
218 /*
219 * Find an unused memnode. Update it atomically to prevent
220 * a first time memnode creation race.
221 */
222 for (mnode = 0; mnode < max_mem_nodes; mnode++)
223 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
224 0, 1) == 0)
225 break;
226
227 if (mnode >= max_mem_nodes)
228 panic("Out of free memnodes\n");
229
230 mem_node_config[mnode].physbase = (pfn_t)-1l;
231 mem_node_config[mnode].physmax = 0;
232 atomic_inc_16(&num_memnodes);
233 atomic_or_64(&memnodes_mask, 1ull << mnode);
234
235 return (mnode);
236 }
237
238 /*
239 * Find the intersection between a memnode and a memlist
240 * and returns the number of pages that overlap.
241 *
242 * Assumes the list is protected from DR operations by
243 * the memlist lock.
244 */
245 pgcnt_t
246 mem_node_memlist_pages(int mnode, struct memlist *mlist)
247 {
248 pfn_t base, end;
249 pfn_t cur_base, cur_end;
250 pgcnt_t npgs;
251 struct memlist *pmem;
252
253 base = mem_node_config[mnode].physbase;
|