1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/buffer_head.h>
13 #include <linux/delay.h>
14 #include <linux/sort.h>
15 #include <linux/hash.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
32 #include <linux/percpu.h>
33 #include <linux/list_sort.h>
34 #include <linux/lockref.h>
35 #include <linux/rhashtable.h>
48 #define CREATE_TRACE_POINTS
49 #include "trace_gfs2.h"
51 struct gfs2_glock_iter {
52 struct gfs2_sbd *sdp; /* incore superblock */
53 struct rhashtable_iter hti; /* rhashtable iterator */
54 struct gfs2_glock *gl; /* current glock struct */
55 loff_t last_pos; /* last position */
58 typedef void (*glock_examiner) (struct gfs2_glock * gl);
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
62 static struct dentry *gfs2_root;
63 static struct workqueue_struct *glock_workqueue;
64 struct workqueue_struct *gfs2_delete_workqueue;
65 static LIST_HEAD(lru_list);
66 static atomic_t lru_count = ATOMIC_INIT(0);
67 static DEFINE_SPINLOCK(lru_lock);
69 #define GFS2_GL_HASH_SHIFT 15
70 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
72 static const struct rhashtable_params ht_parms = {
73 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
74 .key_len = offsetofend(struct lm_lockname, ln_type),
75 .key_offset = offsetof(struct gfs2_glock, gl_name),
76 .head_offset = offsetof(struct gfs2_glock, gl_node),
79 static struct rhashtable gl_hash_table;
81 #define GLOCK_WAIT_TABLE_BITS 12
82 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
83 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
85 struct wait_glock_queue {
86 struct lm_lockname *name;
87 wait_queue_entry_t wait;
90 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
93 struct wait_glock_queue *wait_glock =
94 container_of(wait, struct wait_glock_queue, wait);
95 struct lm_lockname *wait_name = wait_glock->name;
96 struct lm_lockname *wake_name = key;
98 if (wake_name->ln_sbd != wait_name->ln_sbd ||
99 wake_name->ln_number != wait_name->ln_number ||
100 wake_name->ln_type != wait_name->ln_type)
102 return autoremove_wake_function(wait, mode, sync, key);
105 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
107 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
109 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
113 * wake_up_glock - Wake up waiters on a glock
116 static void wake_up_glock(struct gfs2_glock *gl)
118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
120 if (waitqueue_active(wq))
121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
124 static void gfs2_glock_dealloc(struct rcu_head *rcu)
126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
128 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
129 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
131 kfree(gl->gl_lksb.sb_lvbptr);
132 kmem_cache_free(gfs2_glock_cachep, gl);
136 void gfs2_glock_free(struct gfs2_glock *gl)
138 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
140 BUG_ON(atomic_read(&gl->gl_revokes));
141 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
144 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
145 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
146 wake_up(&sdp->sd_glock_wait);
150 * gfs2_glock_hold() - increment reference count on glock
151 * @gl: The glock to hold
155 void gfs2_glock_hold(struct gfs2_glock *gl)
157 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
158 lockref_get(&gl->gl_lockref);
162 * demote_ok - Check to see if it's ok to unlock a glock
165 * Returns: 1 if it's ok
168 static int demote_ok(const struct gfs2_glock *gl)
170 const struct gfs2_glock_operations *glops = gl->gl_ops;
172 if (gl->gl_state == LM_ST_UNLOCKED)
174 if (!list_empty(&gl->gl_holders))
176 if (glops->go_demote_ok)
177 return glops->go_demote_ok(gl);
182 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
184 if (!(gl->gl_ops->go_flags & GLOF_LRU))
187 spin_lock(&lru_lock);
189 list_del(&gl->gl_lru);
190 list_add_tail(&gl->gl_lru, &lru_list);
192 if (!test_bit(GLF_LRU, &gl->gl_flags)) {
193 set_bit(GLF_LRU, &gl->gl_flags);
194 atomic_inc(&lru_count);
197 spin_unlock(&lru_lock);
200 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
202 if (!(gl->gl_ops->go_flags & GLOF_LRU))
205 spin_lock(&lru_lock);
206 if (test_bit(GLF_LRU, &gl->gl_flags)) {
207 list_del_init(&gl->gl_lru);
208 atomic_dec(&lru_count);
209 clear_bit(GLF_LRU, &gl->gl_flags);
211 spin_unlock(&lru_lock);
215 * Enqueue the glock on the work queue. Passes one glock reference on to the
218 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
219 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
221 * We are holding the lockref spinlock, and the work was still
222 * queued above. The queued work (glock_work_func) takes that
223 * spinlock before dropping its glock reference(s), so it
224 * cannot have dropped them in the meantime.
226 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
227 gl->gl_lockref.count--;
231 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
232 spin_lock(&gl->gl_lockref.lock);
233 __gfs2_glock_queue_work(gl, delay);
234 spin_unlock(&gl->gl_lockref.lock);
237 static void __gfs2_glock_put(struct gfs2_glock *gl)
239 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
240 struct address_space *mapping = gfs2_glock2aspace(gl);
242 lockref_mark_dead(&gl->gl_lockref);
244 gfs2_glock_remove_from_lru(gl);
245 spin_unlock(&gl->gl_lockref.lock);
246 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
247 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
248 trace_gfs2_glock_put(gl);
249 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
253 * Cause the glock to be put in work queue context.
255 void gfs2_glock_queue_put(struct gfs2_glock *gl)
257 gfs2_glock_queue_work(gl, 0);
261 * gfs2_glock_put() - Decrement reference count on glock
262 * @gl: The glock to put
266 void gfs2_glock_put(struct gfs2_glock *gl)
268 if (lockref_put_or_lock(&gl->gl_lockref))
271 __gfs2_glock_put(gl);
275 * may_grant - check if its ok to grant a new lock
277 * @gh: The lock request which we wish to grant
279 * Returns: true if its ok to grant the lock
282 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
284 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
285 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
286 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
288 if (gl->gl_state == gh->gh_state)
290 if (gh->gh_flags & GL_EXACT)
292 if (gl->gl_state == LM_ST_EXCLUSIVE) {
293 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
295 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
298 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
303 static void gfs2_holder_wake(struct gfs2_holder *gh)
305 clear_bit(HIF_WAIT, &gh->gh_iflags);
306 smp_mb__after_atomic();
307 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
308 if (gh->gh_flags & GL_ASYNC) {
309 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
311 wake_up(&sdp->sd_async_glock_wait);
316 * do_error - Something unexpected has happened during a lock request
320 static void do_error(struct gfs2_glock *gl, const int ret)
322 struct gfs2_holder *gh, *tmp;
324 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
325 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
327 if (ret & LM_OUT_ERROR)
329 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
330 gh->gh_error = GLR_TRYFAILED;
333 list_del_init(&gh->gh_list);
334 trace_gfs2_glock_queue(gh, 0);
335 gfs2_holder_wake(gh);
340 * do_promote - promote as many requests as possible on the current queue
343 * Returns: 1 if there is a blocked holder at the head of the list, or 2
344 * if a type specific operation is underway.
347 static int do_promote(struct gfs2_glock *gl)
348 __releases(&gl->gl_lockref.lock)
349 __acquires(&gl->gl_lockref.lock)
351 const struct gfs2_glock_operations *glops = gl->gl_ops;
352 struct gfs2_holder *gh, *tmp;
356 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
357 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
359 if (may_grant(gl, gh)) {
360 if (gh->gh_list.prev == &gl->gl_holders &&
362 spin_unlock(&gl->gl_lockref.lock);
363 /* FIXME: eliminate this eventually */
364 ret = glops->go_lock(gh);
365 spin_lock(&gl->gl_lockref.lock);
370 list_del_init(&gh->gh_list);
371 trace_gfs2_glock_queue(gh, 0);
372 gfs2_holder_wake(gh);
375 set_bit(HIF_HOLDER, &gh->gh_iflags);
376 trace_gfs2_promote(gh, 1);
377 gfs2_holder_wake(gh);
380 set_bit(HIF_HOLDER, &gh->gh_iflags);
381 trace_gfs2_promote(gh, 0);
382 gfs2_holder_wake(gh);
385 if (gh->gh_list.prev == &gl->gl_holders)
394 * find_first_waiter - find the first gh that's waiting for the glock
398 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
400 struct gfs2_holder *gh;
402 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
403 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
410 * state_change - record that the glock is now in a different state
412 * @new_state the new state
416 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
420 held1 = (gl->gl_state != LM_ST_UNLOCKED);
421 held2 = (new_state != LM_ST_UNLOCKED);
423 if (held1 != held2) {
424 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
426 gl->gl_lockref.count++;
428 gl->gl_lockref.count--;
430 if (held1 && held2 && list_empty(&gl->gl_holders))
431 clear_bit(GLF_QUEUED, &gl->gl_flags);
433 if (new_state != gl->gl_target)
434 /* shorten our minimum hold time */
435 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
437 gl->gl_state = new_state;
438 gl->gl_tchange = jiffies;
441 static void gfs2_demote_wake(struct gfs2_glock *gl)
443 gl->gl_demote_state = LM_ST_EXCLUSIVE;
444 clear_bit(GLF_DEMOTE, &gl->gl_flags);
445 smp_mb__after_atomic();
446 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
450 * finish_xmote - The DLM has replied to one of our lock requests
452 * @ret: The status from the DLM
456 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
458 const struct gfs2_glock_operations *glops = gl->gl_ops;
459 struct gfs2_holder *gh;
460 unsigned state = ret & LM_OUT_ST_MASK;
463 spin_lock(&gl->gl_lockref.lock);
464 trace_gfs2_glock_state_change(gl, state);
465 state_change(gl, state);
466 gh = find_first_waiter(gl);
468 /* Demote to UN request arrived during demote to SH or DF */
469 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
470 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
471 gl->gl_target = LM_ST_UNLOCKED;
473 /* Check for state != intended state */
474 if (unlikely(state != gl->gl_target)) {
475 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
476 /* move to back of queue and try next entry */
477 if (ret & LM_OUT_CANCELED) {
478 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
479 list_move_tail(&gh->gh_list, &gl->gl_holders);
480 gh = find_first_waiter(gl);
481 gl->gl_target = gh->gh_state;
484 /* Some error or failed "try lock" - report it */
485 if ((ret & LM_OUT_ERROR) ||
486 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
487 gl->gl_target = gl->gl_state;
493 /* Unlocked due to conversion deadlock, try again */
496 do_xmote(gl, gh, gl->gl_target);
498 /* Conversion fails, unlock and try again */
501 do_xmote(gl, gh, LM_ST_UNLOCKED);
503 default: /* Everything else */
504 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
505 gl->gl_target, state);
508 spin_unlock(&gl->gl_lockref.lock);
512 /* Fast path - we got what we asked for */
513 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
514 gfs2_demote_wake(gl);
515 if (state != LM_ST_UNLOCKED) {
516 if (glops->go_xmote_bh) {
517 spin_unlock(&gl->gl_lockref.lock);
518 rv = glops->go_xmote_bh(gl, gh);
519 spin_lock(&gl->gl_lockref.lock);
530 clear_bit(GLF_LOCK, &gl->gl_flags);
532 spin_unlock(&gl->gl_lockref.lock);
536 * do_xmote - Calls the DLM to change the state of a lock
537 * @gl: The lock state
538 * @gh: The holder (only for promotes)
539 * @target: The target lock state
543 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
544 __releases(&gl->gl_lockref.lock)
545 __acquires(&gl->gl_lockref.lock)
547 const struct gfs2_glock_operations *glops = gl->gl_ops;
548 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
549 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
552 if (unlikely(gfs2_withdrawn(sdp)) &&
553 target != LM_ST_UNLOCKED)
555 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
557 GLOCK_BUG_ON(gl, gl->gl_state == target);
558 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
559 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
561 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
562 do_error(gl, 0); /* Fail queued try locks */
565 set_bit(GLF_BLOCKING, &gl->gl_flags);
566 if ((gl->gl_req == LM_ST_UNLOCKED) ||
567 (gl->gl_state == LM_ST_EXCLUSIVE) ||
568 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
569 clear_bit(GLF_BLOCKING, &gl->gl_flags);
570 spin_unlock(&gl->gl_lockref.lock);
573 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
574 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
575 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
578 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
580 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
581 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
582 target == LM_ST_UNLOCKED &&
583 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
584 finish_xmote(gl, target);
585 gfs2_glock_queue_work(gl, 0);
588 fs_err(sdp, "lm_lock ret %d\n", ret);
589 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
591 } else { /* lock_nolock */
592 finish_xmote(gl, target);
593 gfs2_glock_queue_work(gl, 0);
596 spin_lock(&gl->gl_lockref.lock);
600 * find_first_holder - find the first "holder" gh
604 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
606 struct gfs2_holder *gh;
608 if (!list_empty(&gl->gl_holders)) {
609 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
610 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
617 * run_queue - do all outstanding tasks related to a glock
618 * @gl: The glock in question
619 * @nonblock: True if we must not block in run_queue
623 static void run_queue(struct gfs2_glock *gl, const int nonblock)
624 __releases(&gl->gl_lockref.lock)
625 __acquires(&gl->gl_lockref.lock)
627 struct gfs2_holder *gh = NULL;
630 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
633 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
635 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
636 gl->gl_demote_state != gl->gl_state) {
637 if (find_first_holder(gl))
641 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
642 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
643 gl->gl_target = gl->gl_demote_state;
645 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
646 gfs2_demote_wake(gl);
647 ret = do_promote(gl);
652 gh = find_first_waiter(gl);
653 gl->gl_target = gh->gh_state;
654 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
655 do_error(gl, 0); /* Fail queued try locks */
657 do_xmote(gl, gh, gl->gl_target);
662 clear_bit(GLF_LOCK, &gl->gl_flags);
663 smp_mb__after_atomic();
664 gl->gl_lockref.count++;
665 __gfs2_glock_queue_work(gl, 0);
669 clear_bit(GLF_LOCK, &gl->gl_flags);
670 smp_mb__after_atomic();
674 static void delete_work_func(struct work_struct *work)
676 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
677 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
679 u64 no_addr = gl->gl_name.ln_number;
681 /* If someone's using this glock to create a new dinode, the block must
682 have been freed by another node, then re-used, in which case our
683 iopen callback is too late after the fact. Ignore it. */
684 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
687 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
688 if (!IS_ERR_OR_NULL(inode)) {
689 d_prune_aliases(inode);
696 static void glock_work_func(struct work_struct *work)
698 unsigned long delay = 0;
699 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
700 unsigned int drop_refs = 1;
702 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
703 finish_xmote(gl, gl->gl_reply);
706 spin_lock(&gl->gl_lockref.lock);
707 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
708 gl->gl_state != LM_ST_UNLOCKED &&
709 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
710 unsigned long holdtime, now = jiffies;
712 holdtime = gl->gl_tchange + gl->gl_hold_time;
713 if (time_before(now, holdtime))
714 delay = holdtime - now;
717 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
718 set_bit(GLF_DEMOTE, &gl->gl_flags);
723 /* Keep one glock reference for the work we requeue. */
725 if (gl->gl_name.ln_type != LM_TYPE_INODE)
727 __gfs2_glock_queue_work(gl, delay);
731 * Drop the remaining glock references manually here. (Mind that
732 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
735 gl->gl_lockref.count -= drop_refs;
736 if (!gl->gl_lockref.count) {
737 __gfs2_glock_put(gl);
740 spin_unlock(&gl->gl_lockref.lock);
743 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
744 struct gfs2_glock *new)
746 struct wait_glock_queue wait;
747 wait_queue_head_t *wq = glock_waitqueue(name);
748 struct gfs2_glock *gl;
751 init_wait(&wait.wait);
752 wait.wait.func = glock_wake_function;
755 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
758 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
759 &new->gl_node, ht_parms);
763 gl = rhashtable_lookup_fast(&gl_hash_table,
766 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
773 finish_wait(wq, &wait.wait);
778 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
779 * @sdp: The GFS2 superblock
780 * @number: the lock number
781 * @glops: The glock_operations to use
782 * @create: If 0, don't create the glock if it doesn't exist
783 * @glp: the glock is returned here
785 * This does not lock a glock, just finds/creates structures for one.
790 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
791 const struct gfs2_glock_operations *glops, int create,
792 struct gfs2_glock **glp)
794 struct super_block *s = sdp->sd_vfs;
795 struct lm_lockname name = { .ln_number = number,
796 .ln_type = glops->go_type,
798 struct gfs2_glock *gl, *tmp;
799 struct address_space *mapping;
800 struct kmem_cache *cachep;
803 gl = find_insert_glock(&name, NULL);
811 if (glops->go_flags & GLOF_ASPACE)
812 cachep = gfs2_glock_aspace_cachep;
814 cachep = gfs2_glock_cachep;
815 gl = kmem_cache_alloc(cachep, GFP_NOFS);
819 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
821 if (glops->go_flags & GLOF_LVB) {
822 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
823 if (!gl->gl_lksb.sb_lvbptr) {
824 kmem_cache_free(cachep, gl);
829 atomic_inc(&sdp->sd_glock_disposal);
830 gl->gl_node.next = NULL;
833 gl->gl_lockref.count = 1;
834 gl->gl_state = LM_ST_UNLOCKED;
835 gl->gl_target = LM_ST_UNLOCKED;
836 gl->gl_demote_state = LM_ST_EXCLUSIVE;
840 /* We use the global stats to estimate the initial per-glock stats */
841 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
843 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
844 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
845 gl->gl_tchange = jiffies;
846 gl->gl_object = NULL;
847 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
848 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
849 INIT_WORK(&gl->gl_delete, delete_work_func);
851 mapping = gfs2_glock2aspace(gl);
853 mapping->a_ops = &gfs2_meta_aops;
854 mapping->host = s->s_bdev->bd_inode;
856 mapping_set_gfp_mask(mapping, GFP_NOFS);
857 mapping->private_data = NULL;
858 mapping->writeback_index = 0;
861 tmp = find_insert_glock(&name, gl);
873 kfree(gl->gl_lksb.sb_lvbptr);
874 kmem_cache_free(cachep, gl);
875 atomic_dec(&sdp->sd_glock_disposal);
882 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
884 * @state: the state we're requesting
885 * @flags: the modifier flags
886 * @gh: the holder structure
890 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
891 struct gfs2_holder *gh)
893 INIT_LIST_HEAD(&gh->gh_list);
895 gh->gh_ip = _RET_IP_;
896 gh->gh_owner_pid = get_pid(task_pid(current));
897 gh->gh_state = state;
898 gh->gh_flags = flags;
905 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
906 * @state: the state we're requesting
907 * @flags: the modifier flags
908 * @gh: the holder structure
910 * Don't mess with the glock.
914 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
916 gh->gh_state = state;
917 gh->gh_flags = flags;
919 gh->gh_ip = _RET_IP_;
920 put_pid(gh->gh_owner_pid);
921 gh->gh_owner_pid = get_pid(task_pid(current));
925 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
926 * @gh: the holder structure
930 void gfs2_holder_uninit(struct gfs2_holder *gh)
932 put_pid(gh->gh_owner_pid);
933 gfs2_glock_put(gh->gh_gl);
934 gfs2_holder_mark_uninitialized(gh);
938 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
939 unsigned long start_time)
941 /* Have we waited longer that a second? */
942 if (time_after(jiffies, start_time + HZ)) {
943 /* Lengthen the minimum hold time. */
944 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
950 * gfs2_glock_wait - wait on a glock acquisition
951 * @gh: the glock holder
953 * Returns: 0 on success
956 int gfs2_glock_wait(struct gfs2_holder *gh)
958 unsigned long start_time = jiffies;
961 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
962 gfs2_glock_update_hold_time(gh->gh_gl, start_time);
966 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
970 for (i = 0; i < num_gh; i++)
971 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
977 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
978 * @num_gh: the number of holders in the array
979 * @ghs: the glock holder array
981 * Returns: 0 on success, meaning all glocks have been granted and are held.
982 * -ESTALE if the request timed out, meaning all glocks were released,
983 * and the caller should retry the operation.
986 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
988 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
989 int i, ret = 0, timeout = 0;
990 unsigned long start_time = jiffies;
995 * Total up the (minimum hold time * 2) of all glocks and use that to
996 * determine the max amount of time we should wait.
998 for (i = 0; i < num_gh; i++)
999 timeout += ghs[i].gh_gl->gl_hold_time << 1;
1002 if (!wait_event_timeout(sdp->sd_async_glock_wait,
1003 !glocks_pending(num_gh, ghs), timeout))
1004 ret = -ESTALE; /* request timed out. */
1007 * If dlm granted all our requests, we need to adjust the glock
1008 * minimum hold time values according to how long we waited.
1010 * If our request timed out, we need to repeatedly release any held
1011 * glocks we acquired thus far to allow dlm to acquire the remaining
1012 * glocks without deadlocking. We cannot currently cancel outstanding
1013 * glock acquisitions.
1015 * The HIF_WAIT bit tells us which requests still need a response from
1018 * If dlm sent us any errors, we return the first error we find.
1020 keep_waiting = false;
1021 for (i = 0; i < num_gh; i++) {
1022 /* Skip holders we have already dequeued below. */
1023 if (!gfs2_holder_queued(&ghs[i]))
1025 /* Skip holders with a pending DLM response. */
1026 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
1027 keep_waiting = true;
1031 if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
1033 gfs2_glock_dq(&ghs[i]);
1035 gfs2_glock_update_hold_time(ghs[i].gh_gl,
1039 ret = ghs[i].gh_error;
1046 * At this point, we've either acquired all locks or released them all.
1052 * handle_callback - process a demote request
1054 * @state: the state the caller wants us to change to
1056 * There are only two requests that we are going to see in actual
1057 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
1060 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
1061 unsigned long delay, bool remote)
1063 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
1065 set_bit(bit, &gl->gl_flags);
1066 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
1067 gl->gl_demote_state = state;
1068 gl->gl_demote_time = jiffies;
1069 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
1070 gl->gl_demote_state != state) {
1071 gl->gl_demote_state = LM_ST_UNLOCKED;
1073 if (gl->gl_ops->go_callback)
1074 gl->gl_ops->go_callback(gl, remote);
1075 trace_gfs2_demote_rq(gl, remote);
1078 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
1080 struct va_format vaf;
1083 va_start(args, fmt);
1086 seq_vprintf(seq, fmt, args);
1091 pr_err("%pV", &vaf);
1098 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1099 * @gh: the holder structure to add
1101 * Eventually we should move the recursive locking trap to a
1102 * debugging option or something like that. This is the fast
1103 * path and needs to have the minimum number of distractions.
1107 static inline void add_to_queue(struct gfs2_holder *gh)
1108 __releases(&gl->gl_lockref.lock)
1109 __acquires(&gl->gl_lockref.lock)
1111 struct gfs2_glock *gl = gh->gh_gl;
1112 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1113 struct list_head *insert_pt = NULL;
1114 struct gfs2_holder *gh2;
1117 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1118 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1119 GLOCK_BUG_ON(gl, true);
1121 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1122 if (test_bit(GLF_LOCK, &gl->gl_flags))
1123 try_futile = !may_grant(gl, gh);
1124 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1128 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1129 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1130 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1131 goto trap_recursive;
1133 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1135 gh->gh_error = GLR_TRYFAILED;
1136 gfs2_holder_wake(gh);
1139 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1141 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1142 insert_pt = &gh2->gh_list;
1144 set_bit(GLF_QUEUED, &gl->gl_flags);
1145 trace_gfs2_glock_queue(gh, 1);
1146 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1147 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1148 if (likely(insert_pt == NULL)) {
1149 list_add_tail(&gh->gh_list, &gl->gl_holders);
1150 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1154 list_add_tail(&gh->gh_list, insert_pt);
1156 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1157 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1158 spin_unlock(&gl->gl_lockref.lock);
1159 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1160 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1161 spin_lock(&gl->gl_lockref.lock);
1166 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1167 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1168 fs_err(sdp, "lock type: %d req lock state : %d\n",
1169 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1170 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1171 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1172 fs_err(sdp, "lock type: %d req lock state : %d\n",
1173 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1174 gfs2_dump_glock(NULL, gl, true);
1179 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1180 * @gh: the holder structure
1182 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1184 * Returns: 0, GLR_TRYFAILED, or errno on failure
1187 int gfs2_glock_nq(struct gfs2_holder *gh)
1189 struct gfs2_glock *gl = gh->gh_gl;
1190 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1193 if (unlikely(gfs2_withdrawn(sdp)))
1196 if (test_bit(GLF_LRU, &gl->gl_flags))
1197 gfs2_glock_remove_from_lru(gl);
1199 spin_lock(&gl->gl_lockref.lock);
1201 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1202 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1203 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1204 gl->gl_lockref.count++;
1205 __gfs2_glock_queue_work(gl, 0);
1208 spin_unlock(&gl->gl_lockref.lock);
1210 if (!(gh->gh_flags & GL_ASYNC))
1211 error = gfs2_glock_wait(gh);
1217 * gfs2_glock_poll - poll to see if an async request has been completed
1220 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1223 int gfs2_glock_poll(struct gfs2_holder *gh)
1225 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1229 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1230 * @gh: the glock holder
1234 void gfs2_glock_dq(struct gfs2_holder *gh)
1236 struct gfs2_glock *gl = gh->gh_gl;
1237 const struct gfs2_glock_operations *glops = gl->gl_ops;
1241 spin_lock(&gl->gl_lockref.lock);
1242 if (gh->gh_flags & GL_NOCACHE)
1243 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1245 list_del_init(&gh->gh_list);
1246 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1247 if (find_first_holder(gl) == NULL) {
1248 if (glops->go_unlock) {
1249 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1250 spin_unlock(&gl->gl_lockref.lock);
1251 glops->go_unlock(gh);
1252 spin_lock(&gl->gl_lockref.lock);
1253 clear_bit(GLF_LOCK, &gl->gl_flags);
1255 if (list_empty(&gl->gl_holders) &&
1256 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1257 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1260 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1261 gfs2_glock_add_to_lru(gl);
1263 trace_gfs2_glock_queue(gh, 0);
1264 if (unlikely(!fast_path)) {
1265 gl->gl_lockref.count++;
1266 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1267 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1268 gl->gl_name.ln_type == LM_TYPE_INODE)
1269 delay = gl->gl_hold_time;
1270 __gfs2_glock_queue_work(gl, delay);
1272 spin_unlock(&gl->gl_lockref.lock);
1275 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1277 struct gfs2_glock *gl = gh->gh_gl;
1280 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1284 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1285 * @gh: the holder structure
1289 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1292 gfs2_holder_uninit(gh);
1296 * gfs2_glock_nq_num - acquire a glock based on lock number
1297 * @sdp: the filesystem
1298 * @number: the lock number
1299 * @glops: the glock operations for the type of glock
1300 * @state: the state to acquire the glock in
1301 * @flags: modifier flags for the acquisition
1302 * @gh: the struct gfs2_holder
1307 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1308 const struct gfs2_glock_operations *glops,
1309 unsigned int state, u16 flags, struct gfs2_holder *gh)
1311 struct gfs2_glock *gl;
1314 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1316 error = gfs2_glock_nq_init(gl, state, flags, gh);
1324 * glock_compare - Compare two struct gfs2_glock structures for sorting
1325 * @arg_a: the first structure
1326 * @arg_b: the second structure
1330 static int glock_compare(const void *arg_a, const void *arg_b)
1332 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1333 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1334 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1335 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1337 if (a->ln_number > b->ln_number)
1339 if (a->ln_number < b->ln_number)
1341 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1346 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1347 * @num_gh: the number of structures
1348 * @ghs: an array of struct gfs2_holder structures
1350 * Returns: 0 on success (all glocks acquired),
1351 * errno on failure (no glocks acquired)
1354 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1355 struct gfs2_holder **p)
1360 for (x = 0; x < num_gh; x++)
1363 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1365 for (x = 0; x < num_gh; x++) {
1366 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1368 error = gfs2_glock_nq(p[x]);
1371 gfs2_glock_dq(p[x]);
1380 * gfs2_glock_nq_m - acquire multiple glocks
1381 * @num_gh: the number of structures
1382 * @ghs: an array of struct gfs2_holder structures
1385 * Returns: 0 on success (all glocks acquired),
1386 * errno on failure (no glocks acquired)
1389 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1391 struct gfs2_holder *tmp[4];
1392 struct gfs2_holder **pph = tmp;
1399 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1400 return gfs2_glock_nq(ghs);
1404 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
1410 error = nq_m_sync(num_gh, ghs, pph);
1419 * gfs2_glock_dq_m - release multiple glocks
1420 * @num_gh: the number of structures
1421 * @ghs: an array of struct gfs2_holder structures
1425 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1428 gfs2_glock_dq(&ghs[num_gh]);
1431 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1433 unsigned long delay = 0;
1434 unsigned long holdtime;
1435 unsigned long now = jiffies;
1437 gfs2_glock_hold(gl);
1438 holdtime = gl->gl_tchange + gl->gl_hold_time;
1439 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1440 gl->gl_name.ln_type == LM_TYPE_INODE) {
1441 if (time_before(now, holdtime))
1442 delay = holdtime - now;
1443 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1444 delay = gl->gl_hold_time;
1447 spin_lock(&gl->gl_lockref.lock);
1448 handle_callback(gl, state, delay, true);
1449 __gfs2_glock_queue_work(gl, delay);
1450 spin_unlock(&gl->gl_lockref.lock);
1454 * gfs2_should_freeze - Figure out if glock should be frozen
1455 * @gl: The glock in question
1457 * Glocks are not frozen if (a) the result of the dlm operation is
1458 * an error, (b) the locking operation was an unlock operation or
1459 * (c) if there is a "noexp" flagged request anywhere in the queue
1461 * Returns: 1 if freezing should occur, 0 otherwise
1464 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1466 const struct gfs2_holder *gh;
1468 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1470 if (gl->gl_target == LM_ST_UNLOCKED)
1473 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1474 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1476 if (LM_FLAG_NOEXP & gh->gh_flags)
1484 * gfs2_glock_complete - Callback used by locking
1485 * @gl: Pointer to the glock
1486 * @ret: The return value from the dlm
1488 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1489 * to use a bitfield shared with other glock state fields.
1492 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1494 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1496 spin_lock(&gl->gl_lockref.lock);
1499 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1500 if (gfs2_should_freeze(gl)) {
1501 set_bit(GLF_FROZEN, &gl->gl_flags);
1502 spin_unlock(&gl->gl_lockref.lock);
1507 gl->gl_lockref.count++;
1508 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1509 __gfs2_glock_queue_work(gl, 0);
1510 spin_unlock(&gl->gl_lockref.lock);
1513 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1515 struct gfs2_glock *gla, *glb;
1517 gla = list_entry(a, struct gfs2_glock, gl_lru);
1518 glb = list_entry(b, struct gfs2_glock, gl_lru);
1520 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1522 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1529 * gfs2_dispose_glock_lru - Demote a list of glocks
1530 * @list: The list to dispose of
1532 * Disposing of glocks may involve disk accesses, so that here we sort
1533 * the glocks by number (i.e. disk location of the inodes) so that if
1534 * there are any such accesses, they'll be sent in order (mostly).
1536 * Must be called under the lru_lock, but may drop and retake this
1537 * lock. While the lru_lock is dropped, entries may vanish from the
1538 * list, but no new entries will appear on the list (since it is
1542 static void gfs2_dispose_glock_lru(struct list_head *list)
1543 __releases(&lru_lock)
1544 __acquires(&lru_lock)
1546 struct gfs2_glock *gl;
1548 list_sort(NULL, list, glock_cmp);
1550 while(!list_empty(list)) {
1551 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1552 list_del_init(&gl->gl_lru);
1553 if (!spin_trylock(&gl->gl_lockref.lock)) {
1555 list_add(&gl->gl_lru, &lru_list);
1556 set_bit(GLF_LRU, &gl->gl_flags);
1557 atomic_inc(&lru_count);
1560 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1561 spin_unlock(&gl->gl_lockref.lock);
1562 goto add_back_to_lru;
1564 gl->gl_lockref.count++;
1566 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1567 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1568 __gfs2_glock_queue_work(gl, 0);
1569 spin_unlock(&gl->gl_lockref.lock);
1570 cond_resched_lock(&lru_lock);
1575 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1576 * @nr: The number of entries to scan
1578 * This function selects the entries on the LRU which are able to
1579 * be demoted, and then kicks off the process by calling
1580 * gfs2_dispose_glock_lru() above.
1583 static long gfs2_scan_glock_lru(int nr)
1585 struct gfs2_glock *gl;
1590 spin_lock(&lru_lock);
1591 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1592 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1594 /* Test for being demotable */
1595 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1596 list_move(&gl->gl_lru, &dispose);
1597 atomic_dec(&lru_count);
1598 clear_bit(GLF_LRU, &gl->gl_flags);
1603 list_move(&gl->gl_lru, &skipped);
1605 list_splice(&skipped, &lru_list);
1606 if (!list_empty(&dispose))
1607 gfs2_dispose_glock_lru(&dispose);
1608 spin_unlock(&lru_lock);
1613 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1614 struct shrink_control *sc)
1616 if (!(sc->gfp_mask & __GFP_FS))
1618 return gfs2_scan_glock_lru(sc->nr_to_scan);
1621 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1622 struct shrink_control *sc)
1624 return vfs_pressure_ratio(atomic_read(&lru_count));
1627 static struct shrinker glock_shrinker = {
1628 .seeks = DEFAULT_SEEKS,
1629 .count_objects = gfs2_glock_shrink_count,
1630 .scan_objects = gfs2_glock_shrink_scan,
1634 * examine_bucket - Call a function for glock in a hash bucket
1635 * @examiner: the function
1636 * @sdp: the filesystem
1637 * @bucket: the bucket
1639 * Note that the function can be called multiple times on the same
1640 * object. So the user must ensure that the function can cope with
1644 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1646 struct gfs2_glock *gl;
1647 struct rhashtable_iter iter;
1649 rhashtable_walk_enter(&gl_hash_table, &iter);
1652 rhashtable_walk_start(&iter);
1654 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
1655 if (gl->gl_name.ln_sbd == sdp &&
1656 lockref_get_not_dead(&gl->gl_lockref))
1659 rhashtable_walk_stop(&iter);
1660 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1662 rhashtable_walk_exit(&iter);
1666 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1667 * @gl: The glock to thaw
1671 static void thaw_glock(struct gfs2_glock *gl)
1673 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1677 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1678 gfs2_glock_queue_work(gl, 0);
1682 * clear_glock - look at a glock and see if we can free it from glock cache
1683 * @gl: the glock to look at
1687 static void clear_glock(struct gfs2_glock *gl)
1689 gfs2_glock_remove_from_lru(gl);
1691 spin_lock(&gl->gl_lockref.lock);
1692 if (gl->gl_state != LM_ST_UNLOCKED)
1693 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1694 __gfs2_glock_queue_work(gl, 0);
1695 spin_unlock(&gl->gl_lockref.lock);
1699 * gfs2_glock_thaw - Thaw any frozen glocks
1700 * @sdp: The super block
1704 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1706 glock_hash_walk(thaw_glock, sdp);
1709 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
1711 spin_lock(&gl->gl_lockref.lock);
1712 gfs2_dump_glock(seq, gl, fsid);
1713 spin_unlock(&gl->gl_lockref.lock);
1716 static void dump_glock_func(struct gfs2_glock *gl)
1718 dump_glock(NULL, gl, true);
1722 * gfs2_gl_hash_clear - Empty out the glock hash table
1723 * @sdp: the filesystem
1724 * @wait: wait until it's all gone
1726 * Called when unmounting the filesystem.
1729 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1731 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1732 flush_workqueue(glock_workqueue);
1733 glock_hash_walk(clear_glock, sdp);
1734 flush_workqueue(glock_workqueue);
1735 wait_event_timeout(sdp->sd_glock_wait,
1736 atomic_read(&sdp->sd_glock_disposal) == 0,
1738 glock_hash_walk(dump_glock_func, sdp);
1741 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1743 struct gfs2_glock *gl = ip->i_gl;
1746 ret = gfs2_truncatei_resume(ip);
1747 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1749 spin_lock(&gl->gl_lockref.lock);
1750 clear_bit(GLF_LOCK, &gl->gl_flags);
1752 spin_unlock(&gl->gl_lockref.lock);
1755 static const char *state2str(unsigned state)
1758 case LM_ST_UNLOCKED:
1762 case LM_ST_DEFERRED:
1764 case LM_ST_EXCLUSIVE:
1770 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1773 if (flags & LM_FLAG_TRY)
1775 if (flags & LM_FLAG_TRY_1CB)
1777 if (flags & LM_FLAG_NOEXP)
1779 if (flags & LM_FLAG_ANY)
1781 if (flags & LM_FLAG_PRIORITY)
1783 if (flags & GL_ASYNC)
1785 if (flags & GL_EXACT)
1787 if (flags & GL_NOCACHE)
1789 if (test_bit(HIF_HOLDER, &iflags))
1791 if (test_bit(HIF_WAIT, &iflags))
1793 if (test_bit(HIF_FIRST, &iflags))
1800 * dump_holder - print information about a glock holder
1801 * @seq: the seq_file struct
1802 * @gh: the glock holder
1803 * @fs_id_buf: pointer to file system id (if requested)
1807 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
1808 const char *fs_id_buf)
1810 struct task_struct *gh_owner = NULL;
1814 if (gh->gh_owner_pid)
1815 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1816 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1817 fs_id_buf, state2str(gh->gh_state),
1818 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1820 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1821 gh_owner ? gh_owner->comm : "(ended)",
1826 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1828 const unsigned long *gflags = &gl->gl_flags;
1831 if (test_bit(GLF_LOCK, gflags))
1833 if (test_bit(GLF_DEMOTE, gflags))
1835 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1837 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1839 if (test_bit(GLF_DIRTY, gflags))
1841 if (test_bit(GLF_LFLUSH, gflags))
1843 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1845 if (test_bit(GLF_REPLY_PENDING, gflags))
1847 if (test_bit(GLF_INITIAL, gflags))
1849 if (test_bit(GLF_FROZEN, gflags))
1851 if (test_bit(GLF_QUEUED, gflags))
1853 if (test_bit(GLF_LRU, gflags))
1857 if (test_bit(GLF_BLOCKING, gflags))
1864 * gfs2_dump_glock - print information about a glock
1865 * @seq: The seq_file struct
1867 * @fsid: If true, also dump the file system id
1869 * The file format is as follows:
1870 * One line per object, capital letters are used to indicate objects
1871 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1872 * other objects are indented by a single space and follow the glock to
1873 * which they are related. Fields are indicated by lower case letters
1874 * followed by a colon and the field value, except for strings which are in
1875 * [] so that its possible to see if they are composed of spaces for
1876 * example. The field's are n = number (id of the object), f = flags,
1877 * t = type, s = state, r = refcount, e = error, p = pid.
1881 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
1883 const struct gfs2_glock_operations *glops = gl->gl_ops;
1884 unsigned long long dtime;
1885 const struct gfs2_holder *gh;
1886 char gflags_buf[32];
1887 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1888 char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
1890 memset(fs_id_buf, 0, sizeof(fs_id_buf));
1891 if (fsid && sdp) /* safety precaution */
1892 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
1893 dtime = jiffies - gl->gl_demote_time;
1894 dtime *= 1000000/HZ; /* demote time in uSec */
1895 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1897 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
1898 "v:%d r:%d m:%ld\n", fs_id_buf, state2str(gl->gl_state),
1899 gl->gl_name.ln_type,
1900 (unsigned long long)gl->gl_name.ln_number,
1901 gflags2str(gflags_buf, gl),
1902 state2str(gl->gl_target),
1903 state2str(gl->gl_demote_state), dtime,
1904 atomic_read(&gl->gl_ail_count),
1905 atomic_read(&gl->gl_revokes),
1906 (int)gl->gl_lockref.count, gl->gl_hold_time);
1908 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1909 dump_holder(seq, gh, fs_id_buf);
1911 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1912 glops->go_dump(seq, gl, fs_id_buf);
1915 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1917 struct gfs2_glock *gl = iter_ptr;
1919 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1920 gl->gl_name.ln_type,
1921 (unsigned long long)gl->gl_name.ln_number,
1922 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1923 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1924 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1925 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1926 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1927 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1928 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1929 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1933 static const char *gfs2_gltype[] = {
1947 static const char *gfs2_stype[] = {
1948 [GFS2_LKS_SRTT] = "srtt",
1949 [GFS2_LKS_SRTTVAR] = "srttvar",
1950 [GFS2_LKS_SRTTB] = "srttb",
1951 [GFS2_LKS_SRTTVARB] = "srttvarb",
1952 [GFS2_LKS_SIRT] = "sirt",
1953 [GFS2_LKS_SIRTVAR] = "sirtvar",
1954 [GFS2_LKS_DCOUNT] = "dlm",
1955 [GFS2_LKS_QCOUNT] = "queue",
1958 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1960 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1962 struct gfs2_sbd *sdp = seq->private;
1963 loff_t pos = *(loff_t *)iter_ptr;
1964 unsigned index = pos >> 3;
1965 unsigned subindex = pos & 0x07;
1968 if (index == 0 && subindex != 0)
1971 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1972 (index == 0) ? "cpu": gfs2_stype[subindex]);
1974 for_each_possible_cpu(i) {
1975 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1978 seq_printf(seq, " %15u", i);
1980 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1981 lkstats[index - 1].stats[subindex]);
1983 seq_putc(seq, '\n');
1987 int __init gfs2_glock_init(void)
1991 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1995 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1996 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1997 if (!glock_workqueue) {
1998 rhashtable_destroy(&gl_hash_table);
2001 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
2002 WQ_MEM_RECLAIM | WQ_FREEZABLE,
2004 if (!gfs2_delete_workqueue) {
2005 destroy_workqueue(glock_workqueue);
2006 rhashtable_destroy(&gl_hash_table);
2010 ret = register_shrinker(&glock_shrinker);
2012 destroy_workqueue(gfs2_delete_workqueue);
2013 destroy_workqueue(glock_workqueue);
2014 rhashtable_destroy(&gl_hash_table);
2018 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
2019 init_waitqueue_head(glock_wait_table + i);
2024 void gfs2_glock_exit(void)
2026 unregister_shrinker(&glock_shrinker);
2027 rhashtable_destroy(&gl_hash_table);
2028 destroy_workqueue(glock_workqueue);
2029 destroy_workqueue(gfs2_delete_workqueue);
2032 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
2034 struct gfs2_glock *gl = gi->gl;
2039 if (!lockref_put_not_zero(&gl->gl_lockref))
2040 gfs2_glock_queue_put(gl);
2043 gl = rhashtable_walk_next(&gi->hti);
2044 if (IS_ERR_OR_NULL(gl)) {
2045 if (gl == ERR_PTR(-EAGAIN)) {
2052 if (gl->gl_name.ln_sbd != gi->sdp)
2055 if (!lockref_get_not_dead(&gl->gl_lockref))
2059 if (__lockref_is_dead(&gl->gl_lockref))
2067 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
2070 struct gfs2_glock_iter *gi = seq->private;
2074 * We can either stay where we are, skip to the next hash table
2075 * entry, or start from the beginning.
2077 if (*pos < gi->last_pos) {
2078 rhashtable_walk_exit(&gi->hti);
2079 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2082 n = *pos - gi->last_pos;
2085 rhashtable_walk_start(&gi->hti);
2087 gfs2_glock_iter_next(gi, n);
2088 gi->last_pos = *pos;
2092 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
2095 struct gfs2_glock_iter *gi = seq->private;
2098 gi->last_pos = *pos;
2099 gfs2_glock_iter_next(gi, 1);
2103 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
2106 struct gfs2_glock_iter *gi = seq->private;
2108 rhashtable_walk_stop(&gi->hti);
2111 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2113 dump_glock(seq, iter_ptr, false);
2117 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2120 if (*pos >= GFS2_NR_SBSTATS)
2125 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2129 if (*pos >= GFS2_NR_SBSTATS)
2134 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2139 static const struct seq_operations gfs2_glock_seq_ops = {
2140 .start = gfs2_glock_seq_start,
2141 .next = gfs2_glock_seq_next,
2142 .stop = gfs2_glock_seq_stop,
2143 .show = gfs2_glock_seq_show,
2146 static const struct seq_operations gfs2_glstats_seq_ops = {
2147 .start = gfs2_glock_seq_start,
2148 .next = gfs2_glock_seq_next,
2149 .stop = gfs2_glock_seq_stop,
2150 .show = gfs2_glstats_seq_show,
2153 static const struct seq_operations gfs2_sbstats_seq_ops = {
2154 .start = gfs2_sbstats_seq_start,
2155 .next = gfs2_sbstats_seq_next,
2156 .stop = gfs2_sbstats_seq_stop,
2157 .show = gfs2_sbstats_seq_show,
2160 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2162 static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2163 const struct seq_operations *ops)
2165 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2167 struct seq_file *seq = file->private_data;
2168 struct gfs2_glock_iter *gi = seq->private;
2170 gi->sdp = inode->i_private;
2171 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2173 seq->size = GFS2_SEQ_GOODSIZE;
2175 * Initially, we are "before" the first hash table entry; the
2176 * first call to rhashtable_walk_next gets us the first entry.
2180 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2185 static int gfs2_glocks_open(struct inode *inode, struct file *file)
2187 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2190 static int gfs2_glocks_release(struct inode *inode, struct file *file)
2192 struct seq_file *seq = file->private_data;
2193 struct gfs2_glock_iter *gi = seq->private;
2196 gfs2_glock_put(gi->gl);
2197 rhashtable_walk_exit(&gi->hti);
2198 return seq_release_private(inode, file);
2201 static int gfs2_glstats_open(struct inode *inode, struct file *file)
2203 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2206 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2208 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2210 struct seq_file *seq = file->private_data;
2211 seq->private = inode->i_private; /* sdp */
2216 static const struct file_operations gfs2_glocks_fops = {
2217 .owner = THIS_MODULE,
2218 .open = gfs2_glocks_open,
2220 .llseek = seq_lseek,
2221 .release = gfs2_glocks_release,
2224 static const struct file_operations gfs2_glstats_fops = {
2225 .owner = THIS_MODULE,
2226 .open = gfs2_glstats_open,
2228 .llseek = seq_lseek,
2229 .release = gfs2_glocks_release,
2232 static const struct file_operations gfs2_sbstats_fops = {
2233 .owner = THIS_MODULE,
2234 .open = gfs2_sbstats_open,
2236 .llseek = seq_lseek,
2237 .release = seq_release,
2240 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2242 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2244 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2247 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2248 &gfs2_glstats_fops);
2250 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2251 &gfs2_sbstats_fops);
2254 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2256 debugfs_remove_recursive(sdp->debugfs_dir);
2257 sdp->debugfs_dir = NULL;
2260 void gfs2_register_debugfs(void)
2262 gfs2_root = debugfs_create_dir("gfs2", NULL);
2265 void gfs2_unregister_debugfs(void)
2267 debugfs_remove(gfs2_root);