root/trunk/umem_fork.c

Revision 2, 5.1 kB (checked in by wez, 8 years ago)

Initial revision

  • Property svn:eol-style set to native
Line 
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Portions Copyright 2006 OmniTI, Inc.
28  */
29
30 /* #pragma ident        "@(#)umem_fork.c        1.3     05/06/08 SMI" */
31
32 #include "config.h"
33 /* #include "mtlib.h" */
34 #include "umem_base.h"
35 #include "vmem_base.h"
36
37 #ifndef _WIN32
38 #include <unistd.h>
39
40 /*
41  * The following functions are for pre- and post-fork1(2) handling.
42  */
43
44 static void
45 umem_lockup_cache(umem_cache_t *cp)
46 {
47         int idx;
48         int ncpus = cp->cache_cpu_mask + 1;
49
50         for (idx = 0; idx < ncpus; idx++)
51                 (void) mutex_lock(&cp->cache_cpu[idx].cc_lock);
52
53         (void) mutex_lock(&cp->cache_depot_lock);
54         (void) mutex_lock(&cp->cache_lock);
55 }
56
57 static void
58 umem_release_cache(umem_cache_t *cp)
59 {
60         int idx;
61         int ncpus = cp->cache_cpu_mask + 1;
62
63         (void) mutex_unlock(&cp->cache_lock);
64         (void) mutex_unlock(&cp->cache_depot_lock);
65
66         for (idx = 0; idx < ncpus; idx++)
67                 (void) mutex_unlock(&cp->cache_cpu[idx].cc_lock);
68 }
69
70 static void
71 umem_lockup_log_header(umem_log_header_t *lhp)
72 {
73         int idx;
74         if (lhp == NULL)
75                 return;
76         for (idx = 0; idx < umem_max_ncpus; idx++)
77                 (void) mutex_lock(&lhp->lh_cpu[idx].clh_lock);
78
79         (void) mutex_lock(&lhp->lh_lock);
80 }
81
82 static void
83 umem_release_log_header(umem_log_header_t *lhp)
84 {
85         int idx;
86         if (lhp == NULL)
87                 return;
88
89         (void) mutex_unlock(&lhp->lh_lock);
90
91         for (idx = 0; idx < umem_max_ncpus; idx++)
92                 (void) mutex_unlock(&lhp->lh_cpu[idx].clh_lock);
93 }
94
95 static void
96 umem_lockup(void)
97 {
98         umem_cache_t *cp;
99
100         (void) mutex_lock(&umem_init_lock);
101         /*
102          * If another thread is busy initializing the library, we must
103          * wait for it to complete (by calling umem_init()) before allowing
104          * the fork() to proceed.
105          */
106         if (umem_ready == UMEM_READY_INITING && umem_init_thr != thr_self()) {
107                 (void) mutex_unlock(&umem_init_lock);
108                 (void) umem_init();
109                 (void) mutex_lock(&umem_init_lock);
110         }
111         (void) mutex_lock(&umem_cache_lock);
112         (void) mutex_lock(&umem_update_lock);
113         (void) mutex_lock(&umem_flags_lock);
114
115         umem_lockup_cache(&umem_null_cache);
116         for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
117             cp = cp->cache_prev)
118                 umem_lockup_cache(cp);
119
120         umem_lockup_log_header(umem_transaction_log);
121         umem_lockup_log_header(umem_content_log);
122         umem_lockup_log_header(umem_failure_log);
123         umem_lockup_log_header(umem_slab_log);
124
125         (void) cond_broadcast(&umem_update_cv);
126
127         vmem_sbrk_lockup();
128         vmem_lockup();
129 }
130
131 static void
132 umem_release(void)
133 {
134         umem_cache_t *cp;
135
136         vmem_release();
137         vmem_sbrk_release();
138
139         umem_release_log_header(umem_slab_log);
140         umem_release_log_header(umem_failure_log);
141         umem_release_log_header(umem_content_log);
142         umem_release_log_header(umem_transaction_log);
143
144         for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
145             cp = cp->cache_next)
146                 umem_release_cache(cp);
147         umem_release_cache(&umem_null_cache);
148
149         (void) mutex_unlock(&umem_flags_lock);
150         (void) mutex_unlock(&umem_update_lock);
151         (void) mutex_unlock(&umem_cache_lock);
152         (void) mutex_unlock(&umem_init_lock);
153 }
154
155 static void
156 umem_release_child(void)
157 {
158         umem_cache_t *cp;
159
160         /*
161          * Clean up the update state
162          */
163         umem_update_thr = 0;
164
165         if (umem_st_update_thr != thr_self()) {
166                 umem_st_update_thr = 0;
167                 umem_reaping = UMEM_REAP_DONE;
168
169                 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
170                     cp = cp->cache_next) {
171                         if (cp->cache_uflags & UMU_NOTIFY)
172                                 cp->cache_uflags &= ~UMU_NOTIFY;
173
174                         /*
175                          * If the cache is active, we just re-add it to
176                          * the update list.  This will re-do any active
177                          * updates on the cache, but that won't break
178                          * anything.
179                          *
180                          * The worst that can happen is a cache has
181                          * its magazines rescaled twice, instead of once.
182                          */
183                         if (cp->cache_uflags & UMU_ACTIVE) {
184                                 umem_cache_t *cnext, *cprev;
185
186                                 ASSERT(cp->cache_unext == NULL &&
187                                     cp->cache_uprev == NULL);
188
189                                 cp->cache_uflags &= ~UMU_ACTIVE;
190                                 cp->cache_unext = cnext = &umem_null_cache;
191                                 cp->cache_uprev = cprev =
192                                     umem_null_cache.cache_uprev;
193                                 cnext->cache_uprev = cp;
194                                 cprev->cache_unext = cp;
195                         }
196                 }
197         }
198
199         umem_release();
200 }
201 #endif
202
203 void
204 umem_forkhandler_init(void)
205 {
206 #ifndef _WIN32
207         /*
208          * There is no way to unregister these atfork functions,
209          * but we don't need to.  The dynamic linker and libc take
210          * care of unregistering them if/when the library is unloaded.
211          */
212         (void) pthread_atfork(umem_lockup, umem_release, umem_release_child);
213 #endif
214 }
Note: See TracBrowser for help on using the browser.