1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008 Intel Corporation
4 * Author: Matthew Wilcox <willy@linux.intel.com>
6 * This file implements counting semaphores.
7 * A counting semaphore may be acquired 'n' times before sleeping.
8 * See mutex.c for single-acquisition sleeping locks which enforce
9 * rules which allow code to be debugged more easily.
13 * Some notes on the implementation:
15 * The spinlock controls access to the other members of the semaphore.
16 * down_trylock() and up() can be called from interrupt context, so we
17 * have to disable interrupts when taking the lock. It turns out various
18 * parts of the kernel expect to be able to use down() on a semaphore in
19 * interrupt context when they know it will succeed, so we have to use
20 * irqsave variants for down(), down_interruptible() and down_killable()
23 * The ->count variable represents how many more tasks can acquire this
24 * semaphore. If it's zero, there may be tasks waiting on the wait_list.
27 #include <linux/compiler.h>
28 #include <linux/kernel.h>
29 #include <linux/export.h>
30 #include <linux/sched.h>
31 #include <linux/semaphore.h>
32 #include <linux/spinlock.h>
34 static noinline void __down(struct semaphore *sem);
35 static noinline int __down_timeout(struct semaphore *sem, long timeout);
36 static noinline void __up(struct semaphore *sem);
39 * down - acquire the semaphore
40 * @sem: the semaphore to be acquired
42 * Acquires the semaphore. If no more tasks are allowed to acquire the
43 * semaphore, calling this function will put the task to sleep until the
44 * semaphore is released.
46 * Use of this function is deprecated, please use down_interruptible() or
47 * down_killable() instead.
49 void down(struct semaphore *sem)
53 raw_spin_lock_irqsave(&sem->lock, flags);
54 if (likely(sem->count > 0))
58 raw_spin_unlock_irqrestore(&sem->lock, flags);
63 * down_trylock - try to acquire the semaphore, without waiting
64 * @sem: the semaphore to be acquired
66 * Try to acquire the semaphore atomically. Returns 0 if the semaphore has
67 * been acquired successfully or 1 if it it cannot be acquired.
69 * NOTE: This return value is inverted from both spin_trylock and
70 * mutex_trylock! Be careful about this when converting code.
72 * Unlike mutex_trylock, this function can be used from interrupt context,
73 * and the semaphore can be released by any task or interrupt.
75 int down_trylock(struct semaphore *sem)
80 raw_spin_lock_irqsave(&sem->lock, flags);
81 count = sem->count - 1;
82 if (likely(count >= 0))
84 raw_spin_unlock_irqrestore(&sem->lock, flags);
88 EXPORT_SYMBOL(down_trylock);
91 * down_timeout - acquire the semaphore within a specified time
92 * @sem: the semaphore to be acquired
93 * @timeout: how long to wait before failing
95 * Attempts to acquire the semaphore. If no more tasks are allowed to
96 * acquire the semaphore, calling this function will put the task to sleep.
97 * If the semaphore is not released within the specified number of jiffies,
98 * this function returns -ETIME. It returns 0 if the semaphore was acquired.
100 int down_timeout(struct semaphore *sem, long timeout)
105 raw_spin_lock_irqsave(&sem->lock, flags);
106 if (likely(sem->count > 0))
109 result = __down_timeout(sem, timeout);
110 raw_spin_unlock_irqrestore(&sem->lock, flags);
114 EXPORT_SYMBOL(down_timeout);
117 * up - release the semaphore
118 * @sem: the semaphore to release
120 * Release the semaphore. Unlike mutexes, up() may be called from any
121 * context and even by tasks which have never called down().
123 void up(struct semaphore *sem)
127 raw_spin_lock_irqsave(&sem->lock, flags);
128 if (likely(list_empty(&sem->wait_list)))
132 raw_spin_unlock_irqrestore(&sem->lock, flags);
136 /* Functions for the contended case */
138 struct semaphore_waiter {
139 struct list_head list;
140 struct task_struct *task;
145 * Because this function is inlined, the 'state' parameter will be
146 * constant, and thus optimised away by the compiler. Likewise the
147 * 'timeout' parameter for the cases without timeouts.
149 static inline int __sched __down_common(struct semaphore *sem, long state,
152 struct semaphore_waiter waiter;
154 list_add_tail(&waiter.list, &sem->wait_list);
155 waiter.task = current;
159 if (unlikely(timeout <= 0))
161 __set_current_state(state);
162 raw_spin_unlock_irq(&sem->lock);
163 timeout = schedule_timeout(timeout);
164 raw_spin_lock_irq(&sem->lock);
170 list_del(&waiter.list);
174 static noinline void __sched __down(struct semaphore *sem)
176 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
179 static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
181 return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
184 static noinline void __sched __up(struct semaphore *sem)
186 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
187 struct semaphore_waiter, list);
188 list_del(&waiter->list);
190 wake_up_process(waiter->task);