19 /* Only use one lock mechanism. Select which one. */
22 pthread_mutex_t mutex;
25 static inline void lock_impl_lock(struct lock_impl *lock)
27 BUG_ON(pthread_mutex_lock(&lock->mutex));
30 static inline void lock_impl_unlock(struct lock_impl *lock)
32 BUG_ON(pthread_mutex_unlock(&lock->mutex));
35 static inline bool lock_impl_trylock(struct lock_impl *lock)
37 int err = pthread_mutex_trylock(&lock->mutex);
41 else if (err == EBUSY)
46 static inline void lock_impl_init(struct lock_impl *lock)
48 pthread_mutex_init(&lock->mutex, NULL);
51 #define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER}
53 #else /* !defined(PTHREAD_LOCK) */
54 /* Spinlock that assumes that it always gets the lock immediately. */
60 static inline bool lock_impl_trylock(struct lock_impl *lock)
63 /* TODO: Should this be a test and set? */
64 return __sync_bool_compare_and_swap(&lock->locked, false, true);
66 __CPROVER_atomic_begin();
67 bool old_locked = lock->locked;
69 __CPROVER_atomic_end();
71 /* Minimal barrier to prevent accesses leaking out of lock. */
72 __CPROVER_fence("RRfence", "RWfence");
78 static inline void lock_impl_lock(struct lock_impl *lock)
81 * CBMC doesn't support busy waiting, so just assume that the
84 assume(lock_impl_trylock(lock));
87 * If the lock was already held by this thread then the assumption
88 * is unsatisfiable (deadlock).
92 static inline void lock_impl_unlock(struct lock_impl *lock)
95 BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false));
97 /* Minimal barrier to prevent accesses leaking out of lock. */
98 __CPROVER_fence("RWfence", "WWfence");
100 __CPROVER_atomic_begin();
101 bool old_locked = lock->locked;
102 lock->locked = false;
103 __CPROVER_atomic_end();
109 static inline void lock_impl_init(struct lock_impl *lock)
111 lock->locked = false;
114 #define LOCK_IMPL_INITIALIZER {.locked = false}
116 #endif /* !defined(PTHREAD_LOCK) */
119 * Implement spinlocks using the lock mechanism. Wrap the lock to prevent mixing
120 * locks of different types.
123 struct lock_impl internal_lock;
126 #define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER}
127 #define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED
128 #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
130 static inline void spin_lock_init(spinlock_t *lock)
132 lock_impl_init(&lock->internal_lock);
135 static inline void spin_lock(spinlock_t *lock)
138 * Spin locks also need to be removed in order to eliminate all
139 * memory barriers. They are only used by the write side anyway.
141 #ifndef NO_SYNC_SMP_MB
143 lock_impl_lock(&lock->internal_lock);
147 static inline void spin_unlock(spinlock_t *lock)
149 #ifndef NO_SYNC_SMP_MB
150 lock_impl_unlock(&lock->internal_lock);
155 /* Don't bother with interrupts */
156 #define spin_lock_irq(lock) spin_lock(lock)
157 #define spin_unlock_irq(lock) spin_unlock(lock)
158 #define spin_lock_irqsave(lock, flags) spin_lock(lock)
159 #define spin_unlock_irqrestore(lock, flags) spin_unlock(lock)
162 * This is supposed to return an int, but I think that a bool should work as
165 static inline bool spin_trylock(spinlock_t *lock)
167 #ifndef NO_SYNC_SMP_MB
169 return lock_impl_trylock(&lock->internal_lock);
176 /* Hopefuly this won't overflow. */
180 #define COMPLETION_INITIALIZER(x) {.count = 0}
181 #define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x)
182 #define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x)
184 static inline void init_completion(struct completion *c)
189 static inline void wait_for_completion(struct completion *c)
191 unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1);
196 static inline void complete(struct completion *c)
198 unsigned int prev_count = __sync_fetch_and_add(&c->count, 1);
200 BUG_ON(prev_count == UINT_MAX);
203 /* This function probably isn't very useful for CBMC. */
204 static inline bool try_wait_for_completion(struct completion *c)
209 static inline bool completion_done(struct completion *c)
214 /* TODO: Implement complete_all */
215 static inline void complete_all(struct completion *c)