root/trunk/vmem.c

Revision 2, 48.5 kB (checked in by wez, 8 years ago)

Initial revision

  • Property svn:eol-style set to native
Line 
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26
27 /* #pragma ident        "@(#)vmem.c     1.10    05/06/08 SMI" */
28
29 /*
30  * For a more complete description of the main ideas, see:
31  *
32  *      Jeff Bonwick and Jonathan Adams,
33  *
34  *      Magazines and vmem: Extending the Slab Allocator to Many CPUs and
35  *      Arbitrary Resources.
36  *
37  *      Proceedings of the 2001 Usenix Conference.
38  *      Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
39  *
40  * For the "Big Theory Statement", see usr/src/common/os/vmem.c
41  *
42  * 1. Overview of changes
43  * ------------------------------
44  * There have been a few changes to vmem in order to support umem.  The
45  * main areas are:
46  *
47  *      * VM_SLEEP unsupported
48  *
49  *      * Reaping changes
50  *
51  *      * initialization changes
52  *
53  *      * _vmem_extend_alloc
54  *
55  *
56  * 2. VM_SLEEP Removed
57  * -------------------
58  * Since VM_SLEEP allocations can hold locks (in vmem_populate()) for
59  * possibly infinite amounts of time, they are not supported in this
60  * version of vmem.  Sleep-like behavior can be achieved through
61  * UMEM_NOFAIL umem allocations.
62  *
63  *
64  * 3. Reaping changes
65  * ------------------
66  * Unlike kmem_reap(), which just asynchronously schedules work, umem_reap()
67  * can do allocations and frees synchronously.  This is a problem if it
68  * occurs during a vmem_populate() allocation.
69  *
70  * Instead, we delay reaps while populates are active.
71  *
72  *
73  * 4. Initialization changes
74  * -------------------------
75  * In the kernel, vmem_init() allows you to create a single, top-level arena,
76  * which has vmem_internal_arena as a child.  For umem, we want to be able
77  * to extend arenas dynamically.  It is much easier to support this if we
78  * allow a two-level "heap" arena:
79  *
80  *      +----------+
81  *      |  "fake"  |
82  *      +----------+
83  *            |
84  *      +----------+
85  *      |  "heap"  |
86  *      +----------+
87  *        |    \ \
88  *        |     +-+-- ... <other children>
89  *        |
90  *      +---------------+
91  *      | vmem_internal |
92  *      +---------------+
93  *          | | | |
94  *         <children>
95  *
96  * The new vmem_init() allows you to specify a "parent" of the heap, along
97  * with allocation functions.
98  *
99  *
100  * 5. _vmem_extend_alloc
101  * ---------------------
102  * The other part of extending is _vmem_extend_alloc.  This function allows
103  * you to extend (expand current spans, if possible) an arena and allocate
104  * a chunk of the newly extened span atomically.  This is needed to support
105  * extending the heap while vmem_populate()ing it.
106  *
107  * In order to increase the usefulness of extending, non-imported spans are
108  * sorted in address order.
109  */
110
111 #include "config.h"
112 /* #include "mtlib.h" */
113 #include <sys/vmem_impl_user.h>
114 #if HAVE_ALLOCA_H
115 #include <alloca.h>
116 #endif
117 #ifdef HAVE_SYS_SYSMACROS_H
118 #include <sys/sysmacros.h>
119 #endif
120 #include <stdio.h>
121 #if HAVE_STRINGS_H
122 #include <strings.h>
123 #endif
124 #if HAVE_ATOMIC_H
125 #include <atomic.h>
126 #endif
127
128 #include "vmem_base.h"
129 #include "umem_base.h"
130
131 #define VMEM_INITIAL            6       /* early vmem arenas */
132 #define VMEM_SEG_INITIAL        100     /* early segments */
133
134 /*
135  * Adding a new span to an arena requires two segment structures: one to
136  * represent the span, and one to represent the free segment it contains.
137  */
138 #define VMEM_SEGS_PER_SPAN_CREATE       2
139
140 /*
141  * Allocating a piece of an existing segment requires 0-2 segment structures
142  * depending on how much of the segment we're allocating.
143  *
144  * To allocate the entire segment, no new segment structures are needed; we
145  * simply move the existing segment structure from the freelist to the
146  * allocation hash table.
147  *
148  * To allocate a piece from the left or right end of the segment, we must
149  * split the segment into two pieces (allocated part and remainder), so we
150  * need one new segment structure to represent the remainder.
151  *
152  * To allocate from the middle of a segment, we need two new segment strucures
153  * to represent the remainders on either side of the allocated part.
154  */
155 #define VMEM_SEGS_PER_EXACT_ALLOC       0
156 #define VMEM_SEGS_PER_LEFT_ALLOC        1
157 #define VMEM_SEGS_PER_RIGHT_ALLOC       1
158 #define VMEM_SEGS_PER_MIDDLE_ALLOC      2
159
160 /*
161  * vmem_populate() preallocates segment structures for vmem to do its work.
162  * It must preallocate enough for the worst case, which is when we must import
163  * a new span and then allocate from the middle of it.
164  */
165 #define VMEM_SEGS_PER_ALLOC_MAX         \
166         (VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC)
167
168 /*
169  * The segment structures themselves are allocated from vmem_seg_arena, so
170  * we have a recursion problem when vmem_seg_arena needs to populate itself.
171  * We address this by working out the maximum number of segment structures
172  * this act will require, and multiplying by the maximum number of threads
173  * that we'll allow to do it simultaneously.
174  *
175  * The worst-case segment consumption to populate vmem_seg_arena is as
176  * follows (depicted as a stack trace to indicate why events are occurring):
177  *
178  * vmem_alloc(vmem_seg_arena)           -> 2 segs (span create + exact alloc)
179  *  vmem_alloc(vmem_internal_arena)     -> 2 segs (span create + exact alloc)
180  *   heap_alloc(heap_arena)
181  *    vmem_alloc(heap_arena)            -> 4 seg (span create + alloc)
182  *     parent_alloc(parent_arena)
183  *      _vmem_extend_alloc(parent_arena) -> 3 seg (span create + left alloc)
184  *
185  * Note:  The reservation for heap_arena must be 4, since vmem_xalloc()
186  * is overly pessimistic on allocations where parent_arena has a stricter
187  * alignment than heap_arena.
188  *
189  * The worst-case consumption for any arena is 4 segment structures.
190  * For now, we only support VM_NOSLEEP allocations, so as long as we
191  * serialize all vmem_populates, a 4-seg reserve is sufficient.
192  */
193 #define VMEM_POPULATE_SEGS_PER_ARENA    4
194 #define VMEM_POPULATE_LOCKS             1
195
196 #define VMEM_POPULATE_RESERVE           \
197         (VMEM_POPULATE_SEGS_PER_ARENA * VMEM_POPULATE_LOCKS)
198
199 /*
200  * vmem_populate() ensures that each arena has VMEM_MINFREE seg structures
201  * so that it can satisfy the worst-case allocation *and* participate in
202  * worst-case allocation from vmem_seg_arena.
203  */
204 #define VMEM_MINFREE    (VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX)
205
206 /* Don't assume new statics are zeroed - see vmem_startup() */
207 static vmem_t vmem0[VMEM_INITIAL];
208 static vmem_t *vmem_populator[VMEM_INITIAL];
209 static uint32_t vmem_id;
210 static uint32_t vmem_populators;
211 static vmem_seg_t vmem_seg0[VMEM_SEG_INITIAL];
212 static vmem_seg_t *vmem_segfree;
213 static mutex_t vmem_list_lock = DEFAULTMUTEX;
214 static mutex_t vmem_segfree_lock = DEFAULTMUTEX;
215 static vmem_populate_lock_t vmem_nosleep_lock;
216 #define IN_POPULATE()   (vmem_nosleep_lock.vmpl_thr == thr_self())
217 static vmem_t *vmem_list;
218 static vmem_t *vmem_internal_arena;
219 static vmem_t *vmem_seg_arena;
220 static vmem_t *vmem_hash_arena;
221 static vmem_t *vmem_vmem_arena;
222
223 vmem_t *vmem_heap;
224 vmem_alloc_t *vmem_heap_alloc;
225 vmem_free_t *vmem_heap_free;
226
227 uint32_t vmem_mtbf;             /* mean time between failures [default: off] */
228 size_t vmem_seg_size = sizeof (vmem_seg_t);
229
230 /*
231  * we use the _ version, since we don't want to be cancelled.
232  * Actually, this is automatically taken care of by including "mtlib.h".
233  */
234 extern int _cond_wait(cond_t *cv, mutex_t *mutex);
235
236 /*
237  * Insert/delete from arena list (type 'a') or next-of-kin list (type 'k').
238  */
239 #define VMEM_INSERT(vprev, vsp, type)                                   \
240 {                                                                       \
241         vmem_seg_t *vnext = (vprev)->vs_##type##next;                   \
242         (vsp)->vs_##type##next = (vnext);                               \
243         (vsp)->vs_##type##prev = (vprev);                               \
244         (vprev)->vs_##type##next = (vsp);                               \
245         (vnext)->vs_##type##prev = (vsp);                               \
246 }
247
248 #define VMEM_DELETE(vsp, type)                                          \
249 {                                                                       \
250         vmem_seg_t *vprev = (vsp)->vs_##type##prev;                     \
251         vmem_seg_t *vnext = (vsp)->vs_##type##next;                     \
252         (vprev)->vs_##type##next = (vnext);                             \
253         (vnext)->vs_##type##prev = (vprev);                             \
254 }
255
256 /*
257  * Get a vmem_seg_t from the global segfree list.
258  */
259 static vmem_seg_t *
260 vmem_getseg_global(void)
261 {
262         vmem_seg_t *vsp;
263
264         (void) mutex_lock(&vmem_segfree_lock);
265         if ((vsp = vmem_segfree) != NULL)
266                 vmem_segfree = vsp->vs_knext;
267         (void) mutex_unlock(&vmem_segfree_lock);
268
269         return (vsp);
270 }
271
272 /*
273  * Put a vmem_seg_t on the global segfree list.
274  */
275 static void
276 vmem_putseg_global(vmem_seg_t *vsp)
277 {
278         (void) mutex_lock(&vmem_segfree_lock);
279         vsp->vs_knext = vmem_segfree;
280         vmem_segfree = vsp;
281         (void) mutex_unlock(&vmem_segfree_lock);
282 }
283
284 /*
285  * Get a vmem_seg_t from vmp's segfree list.
286  */
287 static vmem_seg_t *
288 vmem_getseg(vmem_t *vmp)
289 {
290         vmem_seg_t *vsp;
291
292         ASSERT(vmp->vm_nsegfree > 0);
293
294         vsp = vmp->vm_segfree;
295         vmp->vm_segfree = vsp->vs_knext;
296         vmp->vm_nsegfree--;
297
298         return (vsp);
299 }
300
301 /*
302  * Put a vmem_seg_t on vmp's segfree list.
303  */
304 static void
305 vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
306 {
307         vsp->vs_knext = vmp->vm_segfree;
308         vmp->vm_segfree = vsp;
309         vmp->vm_nsegfree++;
310 }
311
312 /*
313  * Add vsp to the appropriate freelist.
314  */
315 static void
316 vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
317 {
318         vmem_seg_t *vprev;
319
320         ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
321
322         vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
323         vsp->vs_type = VMEM_FREE;
324         vmp->vm_freemap |= VS_SIZE(vprev);
325         VMEM_INSERT(vprev, vsp, k);
326
327         (void) cond_broadcast(&vmp->vm_cv);
328 }
329
330 /*
331  * Take vsp from the freelist.
332  */
333 static void
334 vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
335 {
336         ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
337         ASSERT(vsp->vs_type == VMEM_FREE);
338
339         if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) {
340                 /*
341                  * The segments on both sides of 'vsp' are freelist heads,
342                  * so taking vsp leaves the freelist at vsp->vs_kprev empty.
343                  */
344                 ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
345                 vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
346         }
347         VMEM_DELETE(vsp, k);
348 }
349
350 /*
351  * Add vsp to the allocated-segment hash table and update kstats.
352  */
353 static void
354 vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
355 {
356         vmem_seg_t **bucket;
357
358         vsp->vs_type = VMEM_ALLOC;
359         bucket = VMEM_HASH(vmp, vsp->vs_start);
360         vsp->vs_knext = *bucket;
361         *bucket = vsp;
362
363         if (vmem_seg_size == sizeof (vmem_seg_t)) {
364                 vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack,
365                     VMEM_STACK_DEPTH, 0);
366                 vsp->vs_thread = thr_self();
367                 vsp->vs_timestamp = gethrtime();
368         } else {
369                 vsp->vs_depth = 0;
370         }
371
372         vmp->vm_kstat.vk_alloc++;
373         vmp->vm_kstat.vk_mem_inuse += VS_SIZE(vsp);
374 }
375
376 /*
377  * Remove vsp from the allocated-segment hash table and update kstats.
378  */
379 static vmem_seg_t *
380 vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size)
381 {
382         vmem_seg_t *vsp, **prev_vspp;
383
384         prev_vspp = VMEM_HASH(vmp, addr);
385         while ((vsp = *prev_vspp) != NULL) {
386                 if (vsp->vs_start == addr) {
387                         *prev_vspp = vsp->vs_knext;
388                         break;
389                 }
390                 vmp->vm_kstat.vk_lookup++;
391                 prev_vspp = &vsp->vs_knext;
392         }
393
394         if (vsp == NULL) {
395                 umem_panic("vmem_hash_delete(%p, %lx, %lu): bad free",
396                     vmp, addr, size);
397         }
398         if (VS_SIZE(vsp) != size) {
399                 umem_panic("vmem_hash_delete(%p, %lx, %lu): wrong size "
400                     "(expect %lu)", vmp, addr, size, VS_SIZE(vsp));
401         }
402
403         vmp->vm_kstat.vk_free++;
404         vmp->vm_kstat.vk_mem_inuse -= size;
405
406         return (vsp);
407 }
408
409 /*
410  * Create a segment spanning the range [start, end) and add it to the arena.
411  */
412 static vmem_seg_t *
413 vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end)
414 {
415         vmem_seg_t *newseg = vmem_getseg(vmp);
416
417         newseg->vs_start = start;
418         newseg->vs_end = end;
419         newseg->vs_type = 0;
420         newseg->vs_import = 0;
421
422         VMEM_INSERT(vprev, newseg, a);
423
424         return (newseg);
425 }
426
427 /*
428  * Remove segment vsp from the arena.
429  */
430 static void
431 vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
432 {
433         ASSERT(vsp->vs_type != VMEM_ROTOR);
434         VMEM_DELETE(vsp, a);
435
436         vmem_putseg(vmp, vsp);
437 }
438
439 /*
440  * Add the span [vaddr, vaddr + size) to vmp and update kstats.
441  */
442 static vmem_seg_t *
443 vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import)
444 {
445         vmem_seg_t *knext;
446         vmem_seg_t *newseg, *span;
447         uintptr_t start = (uintptr_t)vaddr;
448         uintptr_t end = start + size;
449
450         knext = &vmp->vm_seg0;
451         if (!import && vmp->vm_source_alloc == NULL) {
452                 vmem_seg_t *kend, *kprev;
453                 /*
454                  * non-imported spans are sorted in address order.  This
455                  * makes vmem_extend_unlocked() much more effective.
456                  *
457                  * We search in reverse order, since new spans are
458                  * generally at higher addresses.
459                  */
460                 kend = &vmp->vm_seg0;
461                 for (kprev = kend->vs_kprev; kprev != kend;
462                     kprev = kprev->vs_kprev) {
463                         if (!kprev->vs_import && (kprev->vs_end - 1) < start)
464                                 break;
465                 }
466                 knext = kprev->vs_knext;
467         }
468
469         ASSERT(MUTEX_HELD(&vmp->vm_lock));
470
471         if ((start | end) & (vmp->vm_quantum - 1)) {
472                 umem_panic("vmem_span_create(%p, %p, %lu): misaligned",
473                     vmp, vaddr, size);
474         }
475
476         span = vmem_seg_create(vmp, knext->vs_aprev, start, end);
477         span->vs_type = VMEM_SPAN;
478         VMEM_INSERT(knext->vs_kprev, span, k);
479
480         newseg = vmem_seg_create(vmp, span, start, end);
481         vmem_freelist_insert(vmp, newseg);
482
483         newseg->vs_import = import;
484         if (import)
485                 vmp->vm_kstat.vk_mem_import += size;
486         vmp->vm_kstat.vk_mem_total += size;
487
488         return (newseg);
489 }
490
491 /*
492  * Remove span vsp from vmp and update kstats.
493  */
494 static void
495 vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
496 {
497         vmem_seg_t *span = vsp->vs_aprev;
498         size_t size = VS_SIZE(vsp);
499
500         ASSERT(MUTEX_HELD(&vmp->vm_lock));
501         ASSERT(span->vs_type == VMEM_SPAN);
502
503         if (vsp->vs_import)
504                 vmp->vm_kstat.vk_mem_import -= size;
505         vmp->vm_kstat.vk_mem_total -= size;
506
507         VMEM_DELETE(span, k);
508
509         vmem_seg_destroy(vmp, vsp);
510         vmem_seg_destroy(vmp, span);
511 }
512
513 /*
514  * Allocate the subrange [addr, addr + size) from segment vsp.
515  * If there are leftovers on either side, place them on the freelist.
516  * Returns a pointer to the segment representing [addr, addr + size).
517  */
518 static vmem_seg_t *
519 vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
520 {
521         uintptr_t vs_start = vsp->vs_start;
522         uintptr_t vs_end = vsp->vs_end;
523         size_t vs_size = vs_end - vs_start;
524         size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
525         uintptr_t addr_end = addr + realsize;
526
527         ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0);
528         ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0);
529         ASSERT(vsp->vs_type == VMEM_FREE);
530         ASSERT(addr >= vs_start && addr_end - 1 <= vs_end - 1);
531         ASSERT(addr - 1 <= addr_end - 1);
532
533         /*
534          * If we're allocating from the start of the segment, and the
535          * remainder will be on the same freelist, we can save quite
536          * a bit of work.
537          */
538         if (P2SAMEHIGHBIT(vs_size, vs_size - realsize) && addr == vs_start) {
539                 ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
540                 vsp->vs_start = addr_end;
541                 vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
542                 vmem_hash_insert(vmp, vsp);
543                 return (vsp);
544         }
545
546         vmem_freelist_delete(vmp, vsp);
547
548         if (vs_end != addr_end)
549                 vmem_freelist_insert(vmp,
550                     vmem_seg_create(vmp, vsp, addr_end, vs_end));
551
552         if (vs_start != addr)
553                 vmem_freelist_insert(vmp,
554                     vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
555
556         vsp->vs_start = addr;
557         vsp->vs_end = addr + size;
558
559         vmem_hash_insert(vmp, vsp);
560         return (vsp);
561 }
562
563 /*
564  * We cannot reap if we are in the middle of a vmem_populate().
565  */
566 void
567 vmem_reap(void)
568 {
569         if (!IN_POPULATE())
570                 umem_reap();
571 }
572
573 /*
574  * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
575  */
576 static int
577 vmem_populate(vmem_t *vmp, int vmflag)
578 {
579         char *p;
580         vmem_seg_t *vsp;
581         ssize_t nseg;
582         size_t size;
583         vmem_populate_lock_t *lp;
584         int i;
585
586         while (vmp->vm_nsegfree < VMEM_MINFREE &&
587             (vsp = vmem_getseg_global()) != NULL)
588                 vmem_putseg(vmp, vsp);
589
590         if (vmp->vm_nsegfree >= VMEM_MINFREE)
591                 return (1);
592
593         /*
594          * If we're already populating, tap the reserve.
595          */
596         if (vmem_nosleep_lock.vmpl_thr == thr_self()) {
597                 ASSERT(vmp->vm_cflags & VMC_POPULATOR);
598                 return (1);
599         }
600
601         (void) mutex_unlock(&vmp->vm_lock);
602
603         ASSERT(vmflag & VM_NOSLEEP);    /* we do not allow sleep allocations */
604         lp = &vmem_nosleep_lock;
605
606         /*
607          * Cannot be just a mutex_lock(), since that has no effect if
608          * libthread is not linked.
609          */
610         (void) mutex_lock(&lp->vmpl_mutex);
611         ASSERT(lp->vmpl_thr == 0);
612         lp->vmpl_thr = thr_self();
613
614         nseg = VMEM_MINFREE + vmem_populators * VMEM_POPULATE_RESERVE;
615         size = P2ROUNDUP(nseg * vmem_seg_size, vmem_seg_arena->vm_quantum);
616         nseg = size / vmem_seg_size;
617
618         /*
619          * The following vmem_alloc() may need to populate vmem_seg_arena
620          * and all the things it imports from.  When doing so, it will tap
621          * each arena's reserve to prevent recursion (see the block comment
622          * above the definition of VMEM_POPULATE_RESERVE).
623          *
624          * During this allocation, vmem_reap() is a no-op.  If the allocation
625          * fails, we call vmem_reap() after dropping the population lock.
626          */
627         p = vmem_alloc(vmem_seg_arena, size, vmflag & VM_UMFLAGS);
628         if (p == NULL) {
629                 lp->vmpl_thr = 0;
630                 (void) mutex_unlock(&lp->vmpl_mutex);
631                 vmem_reap();
632
633                 (void) mutex_lock(&vmp->vm_lock);
634                 vmp->vm_kstat.vk_populate_fail++;
635                 return (0);
636         }
637         /*
638          * Restock the arenas that may have been depleted during population.
639          */
640         for (i = 0; i < vmem_populators; i++) {
641                 (void) mutex_lock(&vmem_populator[i]->vm_lock);
642                 while (vmem_populator[i]->vm_nsegfree < VMEM_POPULATE_RESERVE)
643                         vmem_putseg(vmem_populator[i],
644                             (vmem_seg_t *)(p + --nseg * vmem_seg_size));
645                 (void) mutex_unlock(&vmem_populator[i]->vm_lock);
646         }
647
648         lp->vmpl_thr = 0;
649         (void) mutex_unlock(&lp->vmpl_mutex);
650         (void) mutex_lock(&vmp->vm_lock);
651
652         /*
653          * Now take our own segments.
654          */
655         ASSERT(nseg >= VMEM_MINFREE);
656         while (vmp->vm_nsegfree < VMEM_MINFREE)
657                 vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size));
658
659         /*
660          * Give the remainder to charity.
661          */
662         while (nseg > 0)
663                 vmem_putseg_global((vmem_seg_t *)(p + --nseg * vmem_seg_size));
664
665         return (1);
666 }
667
668 /*
669  * Advance a walker from its previous position to 'afterme'.
670  * Note: may drop and reacquire vmp->vm_lock.
671  */
672 static void
673 vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme)
674 {
675         vmem_seg_t *vprev = walker->vs_aprev;
676         vmem_seg_t *vnext = walker->vs_anext;
677         vmem_seg_t *vsp = NULL;
678
679         VMEM_DELETE(walker, a);
680
681         if (afterme != NULL)
682                 VMEM_INSERT(afterme, walker, a);
683
684         /*
685          * The walker segment's presence may have prevented its neighbors
686          * from coalescing.  If so, coalesce them now.
687          */
688         if (vprev->vs_type == VMEM_FREE) {
689                 if (vnext->vs_type == VMEM_FREE) {
690                         ASSERT(vprev->vs_end == vnext->vs_start);
691                         vmem_freelist_delete(vmp, vnext);
692                         vmem_freelist_delete(vmp, vprev);
693                         vprev->vs_end = vnext->vs_end;
694                         vmem_freelist_insert(vmp, vprev);
695                         vmem_seg_destroy(vmp, vnext);
696                 }
697                 vsp = vprev;
698         } else if (vnext->vs_type == VMEM_FREE) {
699                 vsp = vnext;
700         }
701
702         /*
703          * vsp could represent a complete imported span,
704          * in which case we must return it to the source.
705          */
706         if (vsp != NULL && vsp->vs_import && vmp->vm_source_free != NULL &&
707             vsp->vs_aprev->vs_type == VMEM_SPAN &&
708             vsp->vs_anext->vs_type == VMEM_SPAN) {
709                 void *vaddr = (void *)vsp->vs_start;
710                 size_t size = VS_SIZE(vsp);
711                 ASSERT(size == VS_SIZE(vsp->vs_aprev));
712                 vmem_freelist_delete(vmp, vsp);
713                 vmem_span_destroy(vmp, vsp);
714                 (void) mutex_unlock(&vmp->vm_lock);
715                 vmp->vm_source_free(vmp->vm_source, vaddr, size);
716                 (void) mutex_lock(&vmp->vm_lock);
717         }
718 }
719
720 /*
721  * VM_NEXTFIT allocations deliberately cycle through all virtual addresses
722  * in an arena, so that we avoid reusing addresses for as long as possible.
723  * This helps to catch used-after-freed bugs.  It's also the perfect policy
724  * for allocating things like process IDs, where we want to cycle through
725  * all values in order.
726  */
727 static void *
728 vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag)
729 {
730         vmem_seg_t *vsp, *rotor;
731         uintptr_t addr;
732         size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
733         size_t vs_size;
734
735         (void) mutex_lock(&vmp->vm_lock);
736
737         if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) {
738                 (void) mutex_unlock(&vmp->vm_lock);
739                 return (NULL);
740         }
741
742         /*
743          * The common case is that the segment right after the rotor is free,
744          * and large enough that extracting 'size' bytes won't change which
745          * freelist it's on.  In this case we can avoid a *lot* of work.
746          * Instead of the normal vmem_seg_alloc(), we just advance the start
747          * address of the victim segment.  Instead of moving the rotor, we
748          * create the new segment structure *behind the rotor*, which has
749          * the same effect.  And finally, we know we don't have to coalesce
750          * the rotor's neighbors because the new segment lies between them.
751          */
752         rotor = &vmp->vm_rotor;
753         vsp = rotor->vs_anext;
754         if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize &&
755             P2SAMEHIGHBIT(vs_size, vs_size - realsize)) {
756                 ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
757                 addr = vsp->vs_start;
758                 vsp->vs_start = addr + realsize;
759                 vmem_hash_insert(vmp,
760                     vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size));
761                 (void) mutex_unlock(&vmp->vm_lock);
762                 return ((void *)addr);
763         }
764
765         /*
766          * Starting at the rotor, look for a segment large enough to
767          * satisfy the allocation.
768          */
769         for (;;) {
770                 vmp->vm_kstat.vk_search++;
771                 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
772                         break;
773                 vsp = vsp->vs_anext;
774                 if (vsp == rotor) {
775                         /*
776                          * We've come full circle.  One possibility is that the
777                          * there's actually enough space, but the rotor itself
778                          * is preventing the allocation from succeeding because
779                          * it's sitting between two free segments.  Therefore,
780                          * we advance the rotor and see if that liberates a
781                          * suitable segment.
782                          */
783                         vmem_advance(vmp, rotor, rotor->vs_anext);
784                         vsp = rotor->vs_aprev;
785                         if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
786                                 break;
787                         /*
788                          * If there's a lower arena we can import from, or it's
789                          * a VM_NOSLEEP allocation, let vmem_xalloc() handle it.
790                          * Otherwise, wait until another thread frees something.
791                          */
792                         if (vmp->vm_source_alloc != NULL ||
793                             (vmflag & VM_NOSLEEP)) {
794                                 (void) mutex_unlock(&vmp->vm_lock);
795                                 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
796                                     0, 0, NULL, NULL, vmflag & VM_UMFLAGS));
797                         }
798                         vmp->vm_kstat.vk_wait++;
799                         (void) _cond_wait(&vmp->vm_cv, &vmp->vm_lock);
800                         vsp = rotor->vs_anext;
801                 }
802         }
803
804         /*
805          * We found a segment.  Extract enough space to satisfy the allocation.
806          */
807         addr = vsp->vs_start;
808         vsp = vmem_seg_alloc(vmp, vsp, addr, size);
809         ASSERT(vsp->vs_type == VMEM_ALLOC &&
810             vsp->vs_start == addr && vsp->vs_end == addr + size);
811
812         /*
813          * Advance the rotor to right after the newly-allocated segment.
814          * That's where the next VM_NEXTFIT allocation will begin searching.
815          */
816         vmem_advance(vmp, rotor, vsp);
817         (void) mutex_unlock(&vmp->vm_lock);
818         return ((void *)addr);
819 }
820
821 /*
822  * Allocate size bytes at offset phase from an align boundary such that the
823  * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
824  * that does not straddle a nocross-aligned boundary.
825  */
826 void *
827 vmem_xalloc(vmem_t *vmp, size_t size, size_t align, size_t phase,
828         size_t nocross, void *minaddr, void *maxaddr, int vmflag)
829 {
830         vmem_seg_t *vsp;
831         vmem_seg_t *vbest = NULL;
832         uintptr_t addr, taddr, start, end;
833         void *vaddr;
834         int hb, flist, resv;
835         uint32_t mtbf;
836
837         if (phase > 0 && phase >= align)
838                 umem_panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
839                     "invalid phase",
840                     (void *)vmp, size, align, phase, nocross,
841                     minaddr, maxaddr, vmflag);
842
843         if (align == 0)
844                 align = vmp->vm_quantum;
845
846         if ((align | phase | nocross) & (vmp->vm_quantum - 1)) {
847                 umem_panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
848                     "parameters not vm_quantum aligned",
849                     (void *)vmp, size, align, phase, nocross,
850                     minaddr, maxaddr, vmflag);
851         }
852
853         if (nocross != 0 &&
854             (align > nocross || P2ROUNDUP(phase + size, align) > nocross)) {
855                 umem_panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
856                     "overconstrained allocation",
857                     (void *)vmp, size, align, phase, nocross,
858                     minaddr, maxaddr, vmflag);
859         }
860
861         if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
862             (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
863                 return (NULL);
864
865         (void) mutex_lock(&vmp->vm_lock);
866         for (;;) {
867                 if (vmp->vm_nsegfree < VMEM_MINFREE &&
868                     !vmem_populate(vmp, vmflag))
869                         break;
870
871                 /*
872                  * highbit() returns the highest bit + 1, which is exactly
873                  * what we want: we want to search the first freelist whose
874                  * members are *definitely* large enough to satisfy our
875                  * allocation.  However, there are certain cases in which we
876                  * want to look at the next-smallest freelist (which *might*
877                  * be able to satisfy the allocation):
878                  *
879                  * (1)  The size is exactly a power of 2, in which case
880                  *      the smaller freelist is always big enough;
881                  *
882                  * (2)  All other freelists are empty;
883                  *
884                  * (3)  We're in the highest possible freelist, which is
885                  *      always empty (e.g. the 4GB freelist on 32-bit systems);
886                  *
887                  * (4)  We're doing a best-fit or first-fit allocation.
888                  */
889                 if ((size & (size - 1)) == 0) {
890                         flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
891                 } else {
892                         hb = highbit(size);
893                         if ((vmp->vm_freemap >> hb) == 0 ||
894                             hb == VMEM_FREELISTS ||
895                             (vmflag & (VM_BESTFIT | VM_FIRSTFIT)))
896                                 hb--;
897                         flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
898                 }
899
900                 for (vbest = NULL, vsp = (flist == 0) ? NULL :
901                     vmp->vm_freelist[flist - 1].vs_knext;
902                     vsp != NULL; vsp = vsp->vs_knext) {
903                         vmp->vm_kstat.vk_search++;
904                         if (vsp->vs_start == 0) {
905                                 /*
906                                  * We're moving up to a larger freelist,
907                                  * so if we've already found a candidate,
908                                  * the fit can't possibly get any better.
909                                  */
910                                 if (vbest != NULL)
911                                         break;
912                                 /*
913                                  * Find the next non-empty freelist.
914                                  */
915                                 flist = lowbit(P2ALIGN(vmp->vm_freemap,
916                                     VS_SIZE(vsp)));
917                                 if (flist-- == 0)
918                                         break;
919                                 vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
920                                 ASSERT(vsp->vs_knext->vs_type == VMEM_FREE);
921                                 continue;
922                         }
923                         if (vsp->vs_end - 1 < (uintptr_t)minaddr)
924                                 continue;
925                         if (vsp->vs_start > (uintptr_t)maxaddr - 1)
926                                 continue;
927                         start = MAX(vsp->vs_start, (uintptr_t)minaddr);
928                         end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1;
929                         taddr = P2PHASEUP(start, align, phase);
930                         if (P2CROSS(taddr, taddr + size - 1, nocross))
931                                 taddr +=
932                                     P2ROUNDUP(P2NPHASE(taddr, nocross), align);
933                         if ((taddr - start) + size > end - start ||
934                             (vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest)))
935                                 continue;
936                         vbest = vsp;
937                         addr = taddr;
938                         if (!(vmflag & VM_BESTFIT) || VS_SIZE(vbest) == size)
939                                 break;
940                 }
941                 if (vbest != NULL)
942                         break;
943                 if (size == 0)
944                         umem_panic("vmem_xalloc(): size == 0");
945                 if (vmp->vm_source_alloc != NULL && nocross == 0 &&
946                     minaddr == NULL && maxaddr == NULL) {
947                         size_t asize = P2ROUNDUP(size + phase,
948                             MAX(align, vmp->vm_source->vm_quantum));
949                         if (asize < size) {             /* overflow */
950                                 (void) mutex_unlock(&vmp->vm_lock);
951                                 if (vmflag & VM_NOSLEEP)
952                                         return (NULL);
953
954                                 umem_panic("vmem_xalloc(): "
955                                     "overflow on VM_SLEEP allocation");
956                         }
957                         /*
958                          * Determine how many segment structures we'll consume.
959                          * The calculation must be presise because if we're
960                          * here on behalf of vmem_populate(), we are taking
961                          * segments from a very limited reserve.
962                          */
963                         resv = (size == asize) ?
964                             VMEM_SEGS_PER_SPAN_CREATE +
965                             VMEM_SEGS_PER_EXACT_ALLOC :
966                             VMEM_SEGS_PER_ALLOC_MAX;
967                         ASSERT(vmp->vm_nsegfree >= resv);
968                         vmp->vm_nsegfree -= resv;       /* reserve our segs */
969                         (void) mutex_unlock(&vmp->vm_lock);
970                         vaddr = vmp->vm_source_alloc(vmp->vm_source, asize,
971                             vmflag & VM_UMFLAGS);
972                         (void) mutex_lock(&vmp->vm_lock);
973                         vmp->vm_nsegfree += resv;       /* claim reservation */
974                         if (vaddr != NULL) {
975                                 vbest = vmem_span_create(vmp, vaddr, asize, 1);
976                                 addr = P2PHASEUP(vbest->vs_start, align, phase);
977                                 break;
978                         }
979                 }
980                 (void) mutex_unlock(&vmp->vm_lock);
981                 vmem_reap();
982                 (void) mutex_lock(&vmp->vm_lock);
983                 if (vmflag & VM_NOSLEEP)
984                         break;
985                 vmp->vm_kstat.vk_wait++;
986                 (void) _cond_wait(&vmp->vm_cv, &vmp->vm_lock);
987         }
988         if (vbest != NULL) {
989                 ASSERT(vbest->vs_type == VMEM_FREE);
990                 ASSERT(vbest->vs_knext != vbest);
991                 (void) vmem_seg_alloc(vmp, vbest, addr, size);
992                 (void) mutex_unlock(&vmp->vm_lock);
993                 ASSERT(P2PHASE(addr, align) == phase);
994                 ASSERT(!P2CROSS(addr, addr + size - 1, nocross));
995                 ASSERT(addr >= (uintptr_t)minaddr);
996                 ASSERT(addr + size - 1 <= (uintptr_t)maxaddr - 1);
997                 return ((void *)addr);
998         }
999         vmp->vm_kstat.vk_fail++;
1000         (void) mutex_unlock(&vmp->vm_lock);
1001         if (vmflag & VM_PANIC)
1002                 umem_panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
1003                     "cannot satisfy mandatory allocation",
1004                     (void *)vmp, size, align, phase, nocross,
1005                     minaddr, maxaddr, vmflag);
1006         return (NULL);
1007 }
1008
1009 /*
1010  * Free the segment [vaddr, vaddr + size), where vaddr was a constrained
1011  * allocation.  vmem_xalloc() and vmem_xfree() must always be paired because
1012  * both routines bypass the quantum caches.
1013  */
1014 void
1015 vmem_xfree(vmem_t *vmp, void *vaddr, size_t size)
1016 {
1017         vmem_seg_t *vsp, *vnext, *vprev;
1018
1019         (void) mutex_lock(&vmp->vm_lock);
1020
1021         vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
1022         vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
1023
1024         /*
1025          * Attempt to coalesce with the next segment.
1026          */
1027         vnext = vsp->vs_anext;
1028         if (vnext->vs_type == VMEM_FREE) {
1029                 ASSERT(vsp->vs_end == vnext->vs_start);
1030                 vmem_freelist_delete(vmp, vnext);
1031                 vsp->vs_end = vnext->vs_end;
1032                 vmem_seg_destroy(vmp, vnext);
1033         }
1034
1035         /*
1036          * Attempt to coalesce with the previous segment.
1037          */
1038         vprev = vsp->vs_aprev;
1039         if (vprev->vs_type == VMEM_FREE) {
1040                 ASSERT(vprev->vs_end == vsp->vs_start);
1041                 vmem_freelist_delete(vmp, vprev);
1042                 vprev->vs_end = vsp->vs_end;
1043                 vmem_seg_destroy(vmp, vsp);
1044                 vsp = vprev;
1045         }
1046
1047         /*
1048          * If the entire span is free, return it to the source.
1049          */
1050         if (vsp->vs_import && vmp->vm_source_free != NULL &&
1051             vsp->vs_aprev->vs_type == VMEM_SPAN &&
1052             vsp->vs_anext->vs_type == VMEM_SPAN) {
1053                 vaddr = (void *)vsp->vs_start;
1054                 size = VS_SIZE(vsp);
1055                 ASSERT(size == VS_SIZE(vsp->vs_aprev));
1056                 vmem_span_destroy(vmp, vsp);
1057                 (void) mutex_unlock(&vmp->vm_lock);
1058                 vmp->vm_source_free(vmp->vm_source, vaddr, size);
1059         } else {
1060                 vmem_freelist_insert(vmp, vsp);
1061                 (void) mutex_unlock(&vmp->vm_lock);
1062         }
1063 }
1064
1065 /*
1066  * Allocate size bytes from arena vmp.  Returns the allocated address
1067  * on success, NULL on failure.  vmflag specifies VM_SLEEP or VM_NOSLEEP,
1068  * and may also specify best-fit, first-fit, or next-fit allocation policy
1069  * instead of the default instant-fit policy.  VM_SLEEP allocations are
1070  * guaranteed to succeed.
1071  */
1072 void *
1073 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
1074 {
1075         vmem_seg_t *vsp;
1076         uintptr_t addr;
1077         int hb;
1078         int flist = 0;
1079         uint32_t mtbf;
1080
1081         if (size - 1 < vmp->vm_qcache_max) {
1082                 ASSERT(vmflag & VM_NOSLEEP);
1083                 return (_umem_cache_alloc(vmp->vm_qcache[(size - 1) >>
1084                     vmp->vm_qshift], UMEM_DEFAULT));
1085         }
1086
1087         if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
1088             (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
1089                 return (NULL);
1090
1091         if (vmflag & VM_NEXTFIT)
1092                 return (vmem_nextfit_alloc(vmp, size, vmflag));
1093
1094         if (vmflag & (VM_BESTFIT | VM_FIRSTFIT))
1095                 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
1096                     NULL, NULL, vmflag));
1097
1098         /*
1099          * Unconstrained instant-fit allocation from the segment list.
1100          */
1101         (void) mutex_lock(&vmp->vm_lock);
1102
1103         if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
1104                 if ((size & (size - 1)) == 0)
1105                         flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1106                 else if ((hb = highbit(size)) < VMEM_FREELISTS)
1107                         flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1108         }
1109
1110         if (flist-- == 0) {
1111                 (void) mutex_unlock(&vmp->vm_lock);
1112                 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
1113                     0, 0, NULL, NULL, vmflag));
1114         }
1115
1116         ASSERT(size <= (1UL << flist));
1117         vsp = vmp->vm_freelist[flist].vs_knext;
1118         addr = vsp->vs_start;
1119         (void) vmem_seg_alloc(vmp, vsp, addr, size);
1120         (void) mutex_unlock(&vmp->vm_lock);
1121         return ((void *)addr);
1122 }
1123
1124 /*
1125  * Free the segment [vaddr, vaddr + size).
1126  */
1127 void
1128 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1129 {
1130         if (size - 1 < vmp->vm_qcache_max)
1131                 _umem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift],
1132                     vaddr);
1133         else
1134                 vmem_xfree(vmp, vaddr, size);
1135 }
1136
1137 /*
1138  * Determine whether arena vmp contains the segment [vaddr, vaddr + size).
1139  */
1140 int
1141 vmem_contains(vmem_t *vmp, void *vaddr, size_t size)
1142 {
1143         uintptr_t start = (uintptr_t)vaddr;
1144         uintptr_t end = start + size;
1145         vmem_seg_t *vsp;
1146         vmem_seg_t *seg0 = &vmp->vm_seg0;
1147
1148         (void) mutex_lock(&vmp->vm_lock);
1149         vmp->vm_kstat.vk_contains++;
1150         for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) {
1151                 vmp->vm_kstat.vk_contains_search++;
1152                 ASSERT(vsp->vs_type == VMEM_SPAN);
1153                 if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1)
1154                         break;
1155         }
1156         (void) mutex_unlock(&vmp->vm_lock);
1157         return (vsp != seg0);
1158 }
1159
1160 /*
1161  * Add the span [vaddr, vaddr + size) to arena vmp.
1162  */
1163 void *
1164 vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag)
1165 {
1166         if (vaddr == NULL || size == 0) {
1167                 umem_panic("vmem_add(%p, %p, %lu): bad arguments",
1168                     vmp, vaddr, size);
1169         }
1170
1171         ASSERT(!vmem_contains(vmp, vaddr, size));
1172
1173         (void) mutex_lock(&vmp->vm_lock);
1174         if (vmem_populate(vmp, vmflag))
1175                 (void) vmem_span_create(vmp, vaddr, size, 0);
1176         else
1177                 vaddr = NULL;
1178         (void) cond_broadcast(&vmp->vm_cv);
1179         (void) mutex_unlock(&vmp->vm_lock);
1180         return (vaddr);
1181 }
1182
1183 /*
1184  * Adds the address range [addr, endaddr) to arena vmp, by either:
1185  *   1. joining two existing spans, [x, addr), and [endaddr, y) (which
1186  *      are in that order) into a single [x, y) span,
1187  *   2. expanding an existing [x, addr) span to [x, endaddr),
1188  *   3. expanding an existing [endaddr, x) span to [addr, x), or
1189  *   4. creating a new [addr, endaddr) span.
1190  *
1191  * Called with vmp->vm_lock held, and a successful vmem_populate() completed.
1192  * Cannot fail.  Returns the new segment.
1193  *
1194  * NOTE:  this algorithm is linear-time in the number of spans, but is
1195  *      constant-time when you are extending the last (highest-addressed)
1196  *      span.
1197  */
1198 static vmem_seg_t *
1199 vmem_extend_unlocked(vmem_t *vmp, uintptr_t addr, uintptr_t endaddr)
1200 {
1201         vmem_seg_t *span;
1202         vmem_seg_t *vsp;
1203
1204         vmem_seg_t *end = &vmp->vm_seg0;
1205
1206         ASSERT(MUTEX_HELD(&vmp->vm_lock));
1207
1208         /*
1209          * the second "if" clause below relies on the direction of this search
1210          */
1211         for (span = end->vs_kprev; span != end; span = span->vs_kprev) {
1212                 if (span->vs_end == addr || span->vs_start == endaddr)
1213                         break;
1214         }
1215
1216         if (span == end)
1217                 return (vmem_span_create(vmp, (void *)addr, endaddr - addr, 0));
1218         if (span->vs_kprev->vs_end == addr && span->vs_start == endaddr) {
1219                 vmem_seg_t *prevspan = span->vs_kprev;
1220                 vmem_seg_t *nextseg = span->vs_anext;
1221                 vmem_seg_t *prevseg = span->vs_aprev;
1222
1223                 /*
1224                  * prevspan becomes the span marker for the full range
1225                  */
1226                 prevspan->vs_end = span->vs_end;
1227
1228                 /*
1229                  * Notionally, span becomes a free segment representing
1230                  * [addr, endaddr).
1231                  *
1232                  * However, if either of its neighbors are free, we coalesce
1233                  * by destroying span and changing the free segment.
1234                  */
1235                 if (prevseg->vs_type == VMEM_FREE &&
1236                     nextseg->vs_type == VMEM_FREE) {
1237                         /*
1238                          * coalesce both ways
1239                          */
1240                         ASSERT(prevseg->vs_end == addr &&
1241                             nextseg->vs_start == endaddr);
1242
1243                         vmem_freelist_delete(vmp, prevseg);
1244                         prevseg->vs_end = nextseg->vs_end;
1245
1246                         vmem_freelist_delete(vmp, nextseg);
1247                         VMEM_DELETE(span, k);
1248                         vmem_seg_destroy(vmp, nextseg);
1249                         vmem_seg_destroy(vmp, span);
1250
1251                         vsp = prevseg;
1252                 } else if (prevseg->vs_type == VMEM_FREE) {
1253                         /*
1254                          * coalesce left
1255                          */
1256                         ASSERT(prevseg->vs_end == addr);
1257
1258                         VMEM_DELETE(span, k);
1259                         vmem_seg_destroy(vmp, span);
1260
1261                         vmem_freelist_delete(vmp, prevseg);
1262                         prevseg->vs_end = endaddr;
1263
1264                         vsp = prevseg;
1265                 } else if (nextseg->vs_type == VMEM_FREE) {
1266                         /*
1267                          * coalesce right
1268                          */
1269                         ASSERT(nextseg->vs_start == endaddr);
1270
1271                         VMEM_DELETE(span, k);
1272                         vmem_seg_destroy(vmp, span);
1273
1274                         vmem_freelist_delete(vmp, nextseg);
1275                         nextseg->vs_start = addr;
1276
1277                         vsp = nextseg;
1278                 } else {
1279                         /*
1280                          * cannnot coalesce
1281                          */
1282                         VMEM_DELETE(span, k);
1283                         span->vs_start = addr;
1284                         span->vs_end = endaddr;
1285
1286                         vsp = span;
1287                 }
1288         } else if (span->vs_end == addr) {
1289                 vmem_seg_t *oldseg = span->vs_knext->vs_aprev;
1290                 span->vs_end = endaddr;
1291
1292                 ASSERT(oldseg->vs_type != VMEM_SPAN);
1293                 if (oldseg->vs_type == VMEM_FREE) {
1294                         ASSERT(oldseg->vs_end == addr);
1295                         vmem_freelist_delete(vmp, oldseg);
1296                         oldseg->vs_end = endaddr;
1297                         vsp = oldseg;
1298                 } else
1299                         vsp = vmem_seg_create(vmp, oldseg, addr, endaddr);
1300         } else {
1301                 vmem_seg_t *oldseg = span->vs_anext;
1302                 ASSERT(span->vs_start == endaddr);
1303                 span->vs_start = addr;
1304
1305                 ASSERT(oldseg->vs_type != VMEM_SPAN);
1306                 if (oldseg->vs_type == VMEM_FREE) {
1307                         ASSERT(oldseg->vs_start == endaddr);
1308                         vmem_freelist_delete(vmp, oldseg);
1309                         oldseg->vs_start = addr;
1310                         vsp = oldseg;
1311                 } else
1312                         vsp = vmem_seg_create(vmp, span, addr, endaddr);
1313         }
1314         vmem_freelist_insert(vmp, vsp);
1315         vmp->vm_kstat.vk_mem_total += (endaddr - addr);
1316         return (vsp);
1317 }
1318
1319 /*
1320  * Does some error checking, calls vmem_extend_unlocked to add
1321  * [vaddr, vaddr+size) to vmp, then allocates alloc bytes from the
1322  * newly merged segment.
1323  */
1324 void *
1325 _vmem_extend_alloc(vmem_t *vmp, void *vaddr, size_t size, size_t alloc,
1326     int vmflag)
1327 {
1328         uintptr_t addr = (uintptr_t)vaddr;
1329         uintptr_t endaddr = addr + size;
1330         vmem_seg_t *vsp;
1331
1332         ASSERT(vaddr != NULL && size != 0 && endaddr > addr);
1333         ASSERT(alloc <= size && alloc != 0);
1334         ASSERT(((addr | size | alloc) & (vmp->vm_quantum - 1)) == 0);
1335
1336         ASSERT(!vmem_contains(vmp, vaddr, size));
1337
1338         (void) mutex_lock(&vmp->vm_lock);
1339         if (!vmem_populate(vmp, vmflag)) {
1340                 (void) mutex_unlock(&vmp->vm_lock);
1341                 return (NULL);
1342         }
1343         /*
1344          * if there is a source, we can't mess with the spans
1345          */
1346         if (vmp->vm_source_alloc != NULL)
1347                 vsp = vmem_span_create(vmp, vaddr, size, 0);
1348         else
1349                 vsp = vmem_extend_unlocked(vmp, addr, endaddr);
1350
1351         ASSERT(VS_SIZE(vsp) >= alloc);
1352
1353         addr = vsp->vs_start;
1354         (void) vmem_seg_alloc(vmp, vsp, addr, alloc);
1355         vaddr = (void *)addr;
1356
1357         (void) cond_broadcast(&vmp->vm_cv);
1358         (void) mutex_unlock(&vmp->vm_lock);
1359
1360         return (vaddr);
1361 }
1362
1363 /*
1364  * Walk the vmp arena, applying func to each segment matching typemask.
1365  * If VMEM_REENTRANT is specified, the arena lock is dropped across each
1366  * call to func(); otherwise, it is held for the duration of vmem_walk()
1367  * to ensure a consistent snapshot.  Note that VMEM_REENTRANT callbacks
1368  * are *not* necessarily consistent, so they may only be used when a hint
1369  * is adequate.
1370  */
1371 void
1372 vmem_walk(vmem_t *vmp, int typemask,
1373         void (*func)(void *, void *, size_t), void *arg)
1374 {
1375         vmem_seg_t *vsp;
1376         vmem_seg_t *seg0 = &vmp->vm_seg0;
1377         vmem_seg_t walker;
1378
1379         if (typemask & VMEM_WALKER)
1380                 return;
1381
1382         bzero(&walker, sizeof (walker));
1383         walker.vs_type = VMEM_WALKER;
1384
1385         (void) mutex_lock(&vmp->vm_lock);
1386         VMEM_INSERT(seg0, &walker, a);
1387         for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) {
1388                 if (vsp->vs_type & typemask) {
1389                         void *start = (void *)vsp->vs_start;
1390                         size_t size = VS_SIZE(vsp);
1391                         if (typemask & VMEM_REENTRANT) {
1392                                 vmem_advance(vmp, &walker, vsp);
1393                                 (void) mutex_unlock(&vmp->vm_lock);
1394                                 func(arg, start, size);
1395                                 (void) mutex_lock(&vmp->vm_lock);
1396                                 vsp = &walker;
1397                         } else {
1398                                 func(arg, start, size);
1399                         }
1400                 }
1401         }
1402         vmem_advance(vmp, &walker, NULL);
1403         (void) mutex_unlock(&vmp->vm_lock);
1404 }
1405
1406 /*
1407  * Return the total amount of memory whose type matches typemask.  Thus:
1408  *
1409  *      typemask VMEM_ALLOC yields total memory allocated (in use).
1410  *      typemask VMEM_FREE yields total memory free (available).
1411  *      typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size.
1412  */
1413 size_t
1414 vmem_size(vmem_t *vmp, int typemask)
1415 {
1416         uint64_t size = 0;
1417
1418         if (typemask & VMEM_ALLOC)
1419                 size += vmp->vm_kstat.vk_mem_inuse;
1420         if (typemask & VMEM_FREE)
1421                 size += vmp->vm_kstat.vk_mem_total -
1422                     vmp->vm_kstat.vk_mem_inuse;
1423         return ((size_t)size);
1424 }
1425
1426 /*
1427  * Create an arena called name whose initial span is [base, base + size).
1428  * The arena's natural unit of currency is quantum, so vmem_alloc()
1429  * guarantees quantum-aligned results.  The arena may import new spans
1430  * by invoking afunc() on source, and may return those spans by invoking
1431  * ffunc() on source.  To make small allocations fast and scalable,
1432  * the arena offers high-performance caching for each integer multiple
1433  * of quantum up to qcache_max.
1434  */
1435 vmem_t *
1436 vmem_create(const char *name, void *base, size_t size, size_t quantum,
1437         vmem_alloc_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1438         size_t qcache_max, int vmflag)
1439 {
1440         int i;
1441         size_t nqcache;
1442         vmem_t *vmp, *cur, **vmpp;
1443         vmem_seg_t *vsp;
1444         vmem_freelist_t *vfp;
1445         uint32_t id = atomic_add_32_nv(&vmem_id, 1);
1446
1447         if (vmem_vmem_arena != NULL) {
1448                 vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
1449                     vmflag & VM_UMFLAGS);
1450         } else {
1451                 ASSERT(id <= VMEM_INITIAL);
1452                 vmp = &vmem0[id - 1];
1453         }
1454
1455         if (vmp == NULL)
1456                 return (NULL);
1457         bzero(vmp, sizeof (vmem_t));
1458
1459         (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
1460         (void) mutex_init(&vmp->vm_lock, USYNC_THREAD, NULL);
1461         (void) cond_init(&vmp->vm_cv, USYNC_THREAD, NULL);
1462         vmp->vm_cflags = vmflag;
1463         vmflag &= VM_UMFLAGS;
1464
1465         vmp->vm_quantum = quantum;
1466         vmp->vm_qshift = highbit(quantum) - 1;
1467         nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX);
1468
1469         for (i = 0; i <= VMEM_FREELISTS; i++) {
1470                 vfp = &vmp->vm_freelist[i];
1471                 vfp->vs_end = 1UL << i;
1472                 vfp->vs_knext = (vmem_seg_t *)(vfp + 1);
1473                 vfp->vs_kprev = (vmem_seg_t *)(vfp - 1);
1474         }
1475
1476         vmp->vm_freelist[0].vs_kprev = NULL;
1477         vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL;
1478         vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0;
1479         vmp->vm_hash_table = vmp->vm_hash0;
1480         vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1;
1481         vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1482
1483         vsp = &vmp->vm_seg0;
1484         vsp->vs_anext = vsp;
1485         vsp->vs_aprev = vsp;
1486         vsp->vs_knext = vsp;
1487         vsp->vs_kprev = vsp;
1488         vsp->vs_type = VMEM_SPAN;
1489
1490         vsp = &vmp->vm_rotor;
1491         vsp->vs_type = VMEM_ROTOR;
1492         VMEM_INSERT(&vmp->vm_seg0, vsp, a);
1493
1494         vmp->vm_id = id;
1495         if (source != NULL)
1496                 vmp->vm_kstat.vk_source_id = source->vm_id;
1497         vmp->vm_source = source;
1498         vmp->vm_source_alloc = afunc;
1499         vmp->vm_source_free = ffunc;
1500
1501         if (nqcache != 0) {
1502                 vmp->vm_qcache_max = nqcache << vmp->vm_qshift;
1503                 for (i = 0; i < nqcache; i++) {
1504                         char buf[VMEM_NAMELEN + 21];
1505                         (void) snprintf(buf, sizeof (buf), "%s_%lu",
1506                             vmp->vm_name, (long)((i + 1) * quantum));
1507                         vmp->vm_qcache[i] = umem_cache_create(buf,
1508                             (i + 1) * quantum, quantum, NULL, NULL, NULL,
1509                             NULL, vmp, UMC_QCACHE | UMC_NOTOUCH);
1510                         if (vmp->vm_qcache[i] == NULL) {
1511                                 vmp->vm_qcache_max = i * quantum;
1512                                 break;
1513                         }
1514                 }
1515         }
1516
1517         (void) mutex_lock(&vmem_list_lock);
1518         vmpp = &vmem_list;
1519         while ((cur = *vmpp) != NULL)
1520                 vmpp = &cur->vm_next;
1521         *vmpp = vmp;
1522         (void) mutex_unlock(&vmem_list_lock);
1523
1524         if (vmp->vm_cflags & VMC_POPULATOR) {
1525                 uint_t pop_id = atomic_add_32_nv(&vmem_populators, 1);
1526                 ASSERT(pop_id <= VMEM_INITIAL);
1527                 vmem_populator[pop_id - 1] = vmp;
1528                 (void) mutex_lock(&vmp->vm_lock);
1529                 (void) vmem_populate(vmp, vmflag | VM_PANIC);
1530                 (void) mutex_unlock(&vmp->vm_lock);
1531         }
1532
1533         if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
1534                 vmem_destroy(vmp);
1535                 return (NULL);
1536         }
1537
1538         return (vmp);
1539 }
1540
1541 /*
1542  * Destroy arena vmp.
1543  */
1544 void
1545 vmem_destroy(vmem_t *vmp)
1546 {
1547         vmem_t *cur, **vmpp;
1548         vmem_seg_t *seg0 = &vmp->vm_seg0;
1549         vmem_seg_t *vsp;
1550         size_t leaked;
1551         int i;
1552
1553         (void) mutex_lock(&vmem_list_lock);
1554         vmpp = &vmem_list;
1555         while ((cur = *vmpp) != vmp)
1556                 vmpp = &cur->vm_next;
1557         *vmpp = vmp->vm_next;
1558         (void) mutex_unlock(&vmem_list_lock);
1559
1560         for (i = 0; i < VMEM_NQCACHE_MAX; i++)
1561                 if (vmp->vm_qcache[i])
1562                         umem_cache_destroy(vmp->vm_qcache[i]);
1563
1564         leaked = vmem_size(vmp, VMEM_ALLOC);
1565         if (leaked != 0)
1566                 umem_printf("vmem_destroy('%s'): leaked %lu bytes",
1567                     vmp->vm_name, leaked);
1568
1569         if (vmp->vm_hash_table != vmp->vm_hash0)
1570                 vmem_free(vmem_hash_arena, vmp->vm_hash_table,
1571                     (vmp->vm_hash_mask + 1) * sizeof (void *));
1572
1573         /*
1574          * Give back the segment structures for anything that's left in the
1575          * arena, e.g. the primary spans and their free segments.
1576          */
1577         VMEM_DELETE(&vmp->vm_rotor, a);
1578         for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext)
1579                 vmem_putseg_global(vsp);
1580
1581         while (vmp->vm_nsegfree > 0)
1582                 vmem_putseg_global(vmem_getseg(vmp));
1583
1584         (void) mutex_destroy(&vmp->vm_lock);
1585         (void) cond_destroy(&vmp->vm_cv);
1586         vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t));
1587 }
1588
1589 /*
1590  * Resize vmp's hash table to keep the average lookup depth near 1.0.
1591  */
1592 static void
1593 vmem_hash_rescale(vmem_t *vmp)
1594 {
1595         vmem_seg_t **old_table, **new_table, *vsp;
1596         size_t old_size, new_size, h, nseg;
1597
1598         nseg = (size_t)(vmp->vm_kstat.vk_alloc - vmp->vm_kstat.vk_free);
1599
1600         new_size = MAX(VMEM_HASH_INITIAL, 1 << (highbit(3 * nseg + 4) - 2));
1601         old_size = vmp->vm_hash_mask + 1;
1602
1603         if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
1604                 return;
1605
1606         new_table = vmem_alloc(vmem_hash_arena, new_size * sizeof (void *),
1607             VM_NOSLEEP);
1608         if (new_table == NULL)
1609                 return;
1610         bzero(new_table, new_size * sizeof (void *));
1611
1612         (void) mutex_lock(&vmp->vm_lock);
1613
1614         old_size = vmp->vm_hash_mask + 1;
1615         old_table = vmp->vm_hash_table;
1616
1617         vmp->vm_hash_mask = new_size - 1;
1618         vmp->vm_hash_table = new_table;
1619         vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1620
1621         for (h = 0; h < old_size; h++) {
1622                 vsp = old_table[h];
1623                 while (vsp != NULL) {
1624                         uintptr_t addr = vsp->vs_start;
1625                         vmem_seg_t *next_vsp = vsp->vs_knext;
1626                         vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr);
1627                         vsp->vs_knext = *hash_bucket;
1628                         *hash_bucket = vsp;
1629                         vsp = next_vsp;
1630                 }
1631         }
1632
1633         (void) mutex_unlock(&vmp->vm_lock);
1634
1635         if (old_table != vmp->vm_hash0)
1636                 vmem_free(vmem_hash_arena, old_table,
1637                     old_size * sizeof (void *));
1638 }
1639
1640 /*
1641  * Perform periodic maintenance on all vmem arenas.
1642  */
1643 /*ARGSUSED*/
1644 void
1645 vmem_update(void *dummy)
1646 {
1647         vmem_t *vmp;
1648
1649         (void) mutex_lock(&vmem_list_lock);
1650         for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) {
1651                 /*
1652                  * If threads are waiting for resources, wake them up
1653                  * periodically so they can issue another vmem_reap()
1654                  * to reclaim resources cached by the slab allocator.
1655                  */
1656                 (void) cond_broadcast(&vmp->vm_cv);
1657
1658                 /*
1659                  * Rescale the hash table to keep the hash chains short.
1660                  */
1661                 vmem_hash_rescale(vmp);
1662         }
1663         (void) mutex_unlock(&vmem_list_lock);
1664 }
1665
1666 /*
1667  * If vmem_init is called again, we need to be able to reset the world.
1668  * That includes resetting the statics back to their original values.
1669  */
1670 void
1671 vmem_startup(void)
1672 {
1673 #ifdef UMEM_STANDALONE
1674         vmem_id = 0;
1675         vmem_populators = 0;
1676         vmem_segfree = NULL;
1677         vmem_list = NULL;
1678         vmem_internal_arena = NULL;
1679         vmem_seg_arena = NULL;
1680         vmem_hash_arena = NULL;
1681         vmem_vmem_arena = NULL;
1682         vmem_heap = NULL;
1683         vmem_heap_alloc = NULL;
1684         vmem_heap_free = NULL;
1685
1686         bzero(vmem0, sizeof (vmem0));
1687         bzero(vmem_populator, sizeof (vmem_populator));
1688         bzero(vmem_seg0, sizeof (vmem_seg0));
1689 #endif
1690 }
1691
1692 /*
1693  * Prepare vmem for use.
1694  */
1695 vmem_t *
1696 vmem_init(const char *parent_name, size_t parent_quantum,
1697     vmem_alloc_t *parent_alloc, vmem_free_t *parent_free,
1698     const char *heap_name, void *heap_start, size_t heap_size,
1699     size_t heap_quantum, vmem_alloc_t *heap_alloc, vmem_free_t *heap_free)
1700 {
1701         uint32_t id;
1702         int nseg = VMEM_SEG_INITIAL;
1703         vmem_t *parent, *heap;
1704
1705         ASSERT(vmem_internal_arena == NULL);
1706
1707         while (--nseg >= 0)
1708                 vmem_putseg_global(&vmem_seg0[nseg]);
1709
1710         if (parent_name != NULL) {
1711                 parent = vmem_create(parent_name,
1712                     heap_start, heap_size, parent_quantum,
1713                     NULL, NULL, NULL, 0,
1714                     VM_SLEEP | VMC_POPULATOR);
1715                 heap_start = NULL;
1716                 heap_size = 0;
1717         } else {
1718                 ASSERT(parent_alloc == NULL && parent_free == NULL);
1719                 parent = NULL;
1720         }
1721
1722         heap = vmem_create(heap_name,
1723             heap_start, heap_size, heap_quantum,
1724             parent_alloc, parent_free, parent, 0,
1725             VM_SLEEP | VMC_POPULATOR);
1726
1727         vmem_heap = heap;
1728         vmem_heap_alloc = heap_alloc;
1729         vmem_heap_free = heap_free;
1730
1731         vmem_internal_arena = vmem_create("vmem_internal",
1732             NULL, 0, heap_quantum,
1733             heap_alloc, heap_free, heap, 0,
1734             VM_SLEEP | VMC_POPULATOR);
1735
1736         vmem_seg_arena = vmem_create("vmem_seg",
1737             NULL, 0, heap_quantum,
1738             vmem_alloc, vmem_free, vmem_internal_arena, 0,
1739             VM_SLEEP | VMC_POPULATOR);
1740
1741         vmem_hash_arena = vmem_create("vmem_hash",
1742             NULL, 0, 8,
1743             vmem_alloc, vmem_free, vmem_internal_arena, 0,
1744             VM_SLEEP);
1745
1746         vmem_vmem_arena = vmem_create("vmem_vmem",
1747             vmem0, sizeof (vmem0), 1,
1748             vmem_alloc, vmem_free, vmem_internal_arena, 0,
1749             VM_SLEEP);
1750
1751         for (id = 0; id < vmem_id; id++)
1752                 (void) vmem_xalloc(vmem_vmem_arena, sizeof (vmem_t),
1753                     1, 0, 0, &vmem0[id], &vmem0[id + 1],
1754                     VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
1755
1756         return (heap);
1757 }
1758
1759 void
1760 vmem_no_debug(void)
1761 {
1762         /*
1763          * This size must be a multiple of the minimum required alignment,
1764          * since vmem_populate allocates them compactly.
1765          */
1766         vmem_seg_size = P2ROUNDUP(offsetof(vmem_seg_t, vs_thread),
1767             sizeof (hrtime_t));
1768 }
1769
1770 /*
1771  * Lockup and release, for fork1(2) handling.
1772  */
1773 void
1774 vmem_lockup(void)
1775 {
1776         vmem_t *cur;
1777
1778         (void) mutex_lock(&vmem_list_lock);
1779         (void) mutex_lock(&vmem_nosleep_lock.vmpl_mutex);
1780
1781         /*
1782          * Lock up and broadcast all arenas.
1783          */
1784         for (cur = vmem_list; cur != NULL; cur = cur->vm_next) {
1785                 (void) mutex_lock(&cur->vm_lock);
1786                 (void) cond_broadcast(&cur->vm_cv);
1787         }
1788
1789         (void) mutex_lock(&vmem_segfree_lock);
1790 }
1791
1792 void
1793 vmem_release(void)
1794 {
1795         vmem_t *cur;
1796
1797         (void) mutex_unlock(&vmem_nosleep_lock.vmpl_mutex);
1798
1799         for (cur = vmem_list; cur != NULL; cur = cur->vm_next)
1800                 (void) mutex_unlock(&cur->vm_lock);
1801
1802         (void) mutex_unlock(&vmem_segfree_lock);
1803         (void) mutex_unlock(&vmem_list_lock);
1804 }
Note: See TracBrowser for help on using the browser.