]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/locking/mutex.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / kernel / locking / mutex.c
index 935116723a3d4c06fa5c63369c545a8fdbb07033..ad2d9e22697b92125a643efd049bd2d3e7a54352 100644 (file)
@@ -325,8 +325,6 @@ __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
 static __always_inline void
 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-       unsigned long flags;
-
        ww_mutex_lock_acquired(lock, ctx);
 
        lock->ctx = ctx;
@@ -350,9 +348,9 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
         * Uh oh, we raced in fastpath, wake up everyone in this case,
         * so they can see the new lock->ctx.
         */
-       spin_lock_mutex(&lock->base.wait_lock, flags);
+       spin_lock(&lock->base.wait_lock);
        __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
-       spin_unlock_mutex(&lock->base.wait_lock, flags);
+       spin_unlock(&lock->base.wait_lock);
 }
 
 /*
@@ -740,7 +738,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
        struct mutex_waiter waiter;
-       unsigned long flags;
        bool first = false;
        struct ww_mutex *ww;
        int ret;
@@ -766,7 +763,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                return 0;
        }
 
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
        /*
         * After waiting to acquire the wait_lock, try again.
         */
@@ -830,7 +827,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                                goto err;
                }
 
-               spin_unlock_mutex(&lock->wait_lock, flags);
+               spin_unlock(&lock->wait_lock);
                schedule_preempt_disabled();
 
                /*
@@ -853,9 +850,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
                        break;
 
-               spin_lock_mutex(&lock->wait_lock, flags);
+               spin_lock(&lock->wait_lock);
        }
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
 acquired:
        __set_current_state(TASK_RUNNING);
 
@@ -872,7 +869,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        if (use_ww_ctx && ww_ctx)
                ww_mutex_set_context_slowpath(ww, ww_ctx);
 
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
        preempt_enable();
        return 0;
 
@@ -880,7 +877,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        __set_current_state(TASK_RUNNING);
        mutex_remove_waiter(lock, &waiter, current);
 err_early_backoff:
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, 1, ip);
        preempt_enable();
@@ -932,6 +929,20 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 }
 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
 
+void __sched
+mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
+{
+       int token;
+
+       might_sleep();
+
+       token = io_schedule_prepare();
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
+                           subclass, NULL, _RET_IP_, NULL, 0);
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
+
 static inline int
 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
@@ -999,8 +1010,8 @@ EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
 {
        struct task_struct *next = NULL;
-       unsigned long owner, flags;
        DEFINE_WAKE_Q(wake_q);
+       unsigned long owner;
 
        mutex_release(&lock->dep_map, 1, ip);
 
@@ -1035,7 +1046,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
                owner = old;
        }
 
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
        debug_mutex_unlock(lock);
        if (!list_empty(&lock->wait_list)) {
                /* get the first entry from the wait-list: */
@@ -1052,7 +1063,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
        if (owner & MUTEX_FLAG_HANDOFF)
                __mutex_handoff(lock, next);
 
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
 
        wake_up_q(&wake_q);
 }
@@ -1102,6 +1113,16 @@ int __sched mutex_lock_killable(struct mutex *lock)
 }
 EXPORT_SYMBOL(mutex_lock_killable);
 
+void __sched mutex_lock_io(struct mutex *lock)
+{
+       int token;
+
+       token = io_schedule_prepare();
+       mutex_lock(lock);
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_io);
+
 static noinline void __sched
 __mutex_lock_slowpath(struct mutex *lock)
 {