root/trunk/sol_compat.h

Revision 58, 6.7 kB (checked in by wez, 4 years ago)

move umem_startup constructor to the umem .so itself, rather than
the malloc replacement.

Rename ec_atomic to umem_atomic.

Add a posix_memalign function.

-ldl isn't universal; make a configure check for it.

  • Property svn:eol-style set to native
Line 
1 /*
2  * Copyright (c) 2006-2008 Message Systems, Inc. All rights reserved
3  * This header file distributed under the terms of the CDDL.
4  * Portions Copyright 2004 Sun Microsystems, Inc. All Rights reserved.
5  */
6 #ifndef _EC_UMEM_SOL_COMPAT_H_
7 #define _EC_UMEM_SOL_COMPAT_H_
8
9 #include "config.h"
10
11 #include <stdint.h>
12 #include <pthread.h>
13
14 #ifdef HAVE_SYS_TIME_H
15 #include <sys/time.h>
16 #endif
17
18 #ifdef _WIN32
19 # define THR_RETURN DWORD
20 # define THR_API WINAPI
21 # define INLINE __inline
22 #else
23 # define THR_RETURN void *
24 # define THR_API
25 # define INLINE inline
26 #endif
27
28 #if defined(__MACH__) || defined(_WIN32)
29 #define NO_WEAK_SYMBOLS
30 #define _umem_cache_alloc(a,b) umem_cache_alloc(a,b)
31 #define _umem_cache_free(a,b) umem_cache_free(a,b)
32 #define _umem_zalloc(a,b) umem_zalloc(a,b)
33 #define _umem_alloc(a,b) umem_alloc(a,b)
34 #define _umem_alloc_align(a,b,c) umem_alloc_align(a,b,c)
35 #define _umem_free(a,b) umem_free(a,b)
36 #define _umem_free_align(a,b) umem_free_align(a,b)
37 #endif
38
39 #ifdef _WIN32
40 #define bcopy(s, d, n)          memcpy(d, s, n)
41 #define bzero(m, s)                     memset(m, 0, s)
42 #endif
43
44 typedef pthread_t thread_t;
45 typedef pthread_mutex_t mutex_t;
46 typedef pthread_cond_t cond_t;
47 typedef u_int64_t hrtime_t;
48 typedef uint32_t uint_t;
49 typedef unsigned long ulong_t;
50 typedef struct timespec timestruc_t;
51 typedef long long longlong_t;
52 typedef struct timespec timespec_t;
53 static INLINE hrtime_t gethrtime(void) {
54   struct timeval tv;
55   gettimeofday(&tv, NULL);
56   return (((u_int64_t)tv.tv_sec) << 32) | tv.tv_usec;
57 }
58 # define thr_self()                pthread_self()
59 static INLINE thread_t _thr_self(void) {
60   return thr_self();
61 }
62 #if defined(__MACH__)
63 #define CPUHINT() (pthread_mach_thread_np(pthread_self()))
64 #endif
65 # define thr_sigsetmask            pthread_sigmask
66
67 #define THR_BOUND     1
68 #define THR_DETACHED  2
69 #define THR_DAEMON    4
70
71 static INLINE int thr_create(void *stack_base,
72   size_t stack_size, THR_RETURN (THR_API *start_func)(void*),
73   void *arg, long flags, thread_t *new_thread_ID)
74 {
75   int ret;
76   pthread_attr_t attr;
77
78   pthread_attr_init(&attr);
79
80   if (flags & THR_DETACHED) {
81     pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
82   }
83   ret = pthread_create(new_thread_ID, &attr, start_func, arg);
84   pthread_attr_destroy(&attr);
85   return ret;
86 }
87
88
89 # define mutex_init(mp, type, arg) pthread_mutex_init(mp, NULL)
90 # define mutex_lock(mp)            pthread_mutex_lock(mp)
91 # define mutex_unlock(mp)          pthread_mutex_unlock(mp)
92 # define mutex_destroy(mp)         pthread_mutex_destroy(mp)
93 # define mutex_trylock(mp)         pthread_mutex_trylock(mp)
94 # define DEFAULTMUTEX              PTHREAD_MUTEX_INITIALIZER
95 # define DEFAULTCV                                 PTHREAD_COND_INITIALIZER
96 # define MUTEX_HELD(mp)            1 /* not really, but only used in an assert */
97
98 # define cond_init(c, type, arg)   pthread_cond_init(c, NULL)
99 # define cond_wait(c, m)           pthread_cond_wait(c, m)
100 # define _cond_wait(c, m)          pthread_cond_wait(c, m)
101 # define cond_signal(c)            pthread_cond_signal(c)
102 # define cond_broadcast(c)         pthread_cond_broadcast(c)
103 # define cond_destroy(c)           pthread_cond_destroy(c)
104 # define cond_timedwait            pthread_cond_timedwait
105 # define _cond_timedwait           pthread_cond_timedwait
106
107 #ifndef RTLD_FIRST
108 # define RTLD_FIRST 0
109 #endif
110
111 #ifdef ECELERITY
112 # include "umem_atomic.h"
113 #else
114 # ifdef _WIN32
115 #  define umem_atomic_inc(a)            InterlockedIncrement(a)
116 #  define umem_atomic_inc64(a)    InterlockedIncrement64(a)
117 # elif defined(__MACH__)
118 #  include <libkern/OSAtomic.h>
119 #  define umem_atomic_inc(x) OSAtomicIncrement32Barrier((int32_t*)x)
120 #  if !defined(__ppc__)
121 #   define umem_atomic_inc64(x) OSAtomicIncrement64Barrier((int64_t*)x)
122 #  endif
123 # elif (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
124 static INLINE uint_t umem_atomic_cas(uint_t *mem, uint_t with, uint_t cmp)
125 {
126   uint_t prev;
127   asm volatile ("lock; cmpxchgl %1, %2"
128         : "=a" (prev)
129         : "r"    (with), "m" (*(mem)), "0" (cmp)
130         : "memory");
131   return prev;
132 }
133 static INLINE uint64_t umem_atomic_cas64(uint64_t *mem, uint64_t with,
134   uint64_t cmp)
135 {
136   uint64_t prev;
137 #  if defined(__x86_64__)
138   __asm__ volatile ("lock; cmpxchgq %1, %2"
139     : "=a" (prev)
140     : "r" (with), "m" (*(mem)), "0" (cmp)
141     : "memory");
142 #  else
143   __asm__ volatile (
144     "pushl %%ebx;"
145     "mov 4+%1,%%ecx;"
146     "mov %1,%%ebx;"
147     "lock;"
148     "cmpxchg8b (%3);"
149     "popl %%ebx"
150     : "=A" (prev)
151     : "m" (with), "A" (cmp), "r" (mem)
152     : "%ecx", "memory");
153 #  endif
154   return prev;
155 }
156 static INLINE uint64_t umem_atomic_inc64(uint64_t *mem)
157 {
158   register uint64_t last;
159   do {
160     last = *mem;
161   } while (umem_atomic_cas64(mem, last+1, last) != last);
162   return ++last;
163 }
164 #  define umem_atomic_inc64 umem_atomic_inc64
165 # else
166 #  error no atomic solution for your platform
167 # endif
168
169 # ifndef umem_atomic_inc
170 static INLINE uint_t umem_atomic_inc(uint_t *mem)
171 {
172   register uint_t last;
173   do {
174     last = *mem;
175   } while (umem_atomic_cas(mem, last+1, last) != last);
176   return ++last;
177 }
178 # endif
179 # ifndef umem_atomic_inc64
180    /* yeah, it's not great.  It's only used to bump failed allocation
181     * counts, so it is not critical right now. */
182 extern pthread_mutex_t umem_ppc_64inc_lock;
183 static INLINE uint64_t umem_atomic_inc64(uint64_t *val)
184 {
185   uint64_t rval;
186   pthread_mutex_lock(&umem_ppc_64inc_lock);
187   rval = *val + 1;
188   *val = rval;
189   pthread_mutex_unlock(&umem_ppc_64inc_lock);
190   return rval;
191 }
192 #  define umem_atomic_inc64 umem_atomic_inc64
193 #  define NEED_64_LOCK 1
194 # endif
195
196 #endif
197
198 #define P2PHASE(x, align)    ((x) & ((align) - 1))
199 #define P2ALIGN(x, align)    ((x) & -(align))
200 #define P2NPHASE(x, align)    (-(x) & ((align) - 1))
201 #define P2ROUNDUP(x, align)   (-(-(x) & -(align)))
202 #define P2END(x, align)     (-(~(x) & -(align)))
203 #define P2PHASEUP(x, align, phase)  ((phase) - (((phase) - (x)) & -(align)))
204 #define P2CROSS(x, y, align)    (((x) ^ (y)) > (align) - 1)
205 #define P2SAMEHIGHBIT(x, y)    (((x) ^ (y)) < ((x) & (y)))
206 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
207 #define ISP2(x)    (((x) & ((x) - 1)) == 0)
208
209 /* beware! umem only uses these atomic adds for incrementing by 1 */
210 #define atomic_add_64(lvalptr, delta) umem_atomic_inc64(lvalptr)
211 #define atomic_add_32_nv(a, b)            umem_atomic_inc(a)
212
213 #ifndef NANOSEC
214 #define NANOSEC 1000000000
215 #endif
216
217 #ifdef _WIN32
218 #define issetugid()               0
219 #elif !HAVE_ISSETUGID
220 #define issetugid()       (geteuid() == 0)
221 #endif
222
223 #define _sysconf(a) sysconf(a)
224 #define __NORETURN  __attribute__ ((noreturn))
225
226 #define EC_UMEM_DUMMY_PCSTACK 1
227 static INLINE int __nthreads(void)
228 {
229   /* or more; just to force multi-threaded mode */
230   return 2;
231 }
232
233 #if (SIZEOF_VOID_P == 8)
234 # define _LP64 1
235 #endif
236
237 #ifndef MIN
238 # define MIN(a,b) ((a) < (b) ? (a) : (b))
239 #endif
240 #ifndef MAX
241 # define MAX(a,b) ((a) > (b) ? (a) : (b))
242 #endif
243
244
245 #endif
Note: See TracBrowser for help on using the browser.