1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/buffer_head.h>
13 #include <linux/delay.h>
14 #include <linux/sort.h>
15 #include <linux/hash.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
32 #include <linux/percpu.h>
33 #include <linux/list_sort.h>
34 #include <linux/lockref.h>
35 #include <linux/rhashtable.h>
48 #define CREATE_TRACE_POINTS
49 #include "trace_gfs2.h"
51 struct gfs2_glock_iter {
52 struct gfs2_sbd *sdp; /* incore superblock */
53 struct rhashtable_iter hti; /* rhashtable iterator */
54 struct gfs2_glock *gl; /* current glock struct */
55 loff_t last_pos; /* last position */
58 typedef void (*glock_examiner) (struct gfs2_glock * gl);
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
62 static struct dentry *gfs2_root;
63 static struct workqueue_struct *glock_workqueue;
64 struct workqueue_struct *gfs2_delete_workqueue;
65 static LIST_HEAD(lru_list);
66 static atomic_t lru_count = ATOMIC_INIT(0);
67 static DEFINE_SPINLOCK(lru_lock);
69 #define GFS2_GL_HASH_SHIFT 15
70 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
72 static const struct rhashtable_params ht_parms = {
73 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
74 .key_len = offsetofend(struct lm_lockname, ln_type),
75 .key_offset = offsetof(struct gfs2_glock, gl_name),
76 .head_offset = offsetof(struct gfs2_glock, gl_node),
79 static struct rhashtable gl_hash_table;
81 #define GLOCK_WAIT_TABLE_BITS 12
82 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
83 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
85 struct wait_glock_queue {
86 struct lm_lockname *name;
87 wait_queue_entry_t wait;
90 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
93 struct wait_glock_queue *wait_glock =
94 container_of(wait, struct wait_glock_queue, wait);
95 struct lm_lockname *wait_name = wait_glock->name;
96 struct lm_lockname *wake_name = key;
98 if (wake_name->ln_sbd != wait_name->ln_sbd ||
99 wake_name->ln_number != wait_name->ln_number ||
100 wake_name->ln_type != wait_name->ln_type)
102 return autoremove_wake_function(wait, mode, sync, key);
105 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
107 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
109 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
113 * wake_up_glock - Wake up waiters on a glock
116 static void wake_up_glock(struct gfs2_glock *gl)
118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
120 if (waitqueue_active(wq))
121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
124 static void gfs2_glock_dealloc(struct rcu_head *rcu)
126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
128 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
129 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
131 kfree(gl->gl_lksb.sb_lvbptr);
132 kmem_cache_free(gfs2_glock_cachep, gl);
136 void gfs2_glock_free(struct gfs2_glock *gl)
138 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
140 BUG_ON(atomic_read(&gl->gl_revokes));
141 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
144 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
145 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
146 wake_up(&sdp->sd_glock_wait);
150 * gfs2_glock_hold() - increment reference count on glock
151 * @gl: The glock to hold
155 void gfs2_glock_hold(struct gfs2_glock *gl)
157 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
158 lockref_get(&gl->gl_lockref);
162 * demote_ok - Check to see if it's ok to unlock a glock
165 * Returns: 1 if it's ok
168 static int demote_ok(const struct gfs2_glock *gl)
170 const struct gfs2_glock_operations *glops = gl->gl_ops;
172 if (gl->gl_state == LM_ST_UNLOCKED)
174 if (!list_empty(&gl->gl_holders))
176 if (glops->go_demote_ok)
177 return glops->go_demote_ok(gl);
182 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
184 if (!(gl->gl_ops->go_flags & GLOF_LRU))
187 spin_lock(&lru_lock);
189 list_del(&gl->gl_lru);
190 list_add_tail(&gl->gl_lru, &lru_list);
192 if (!test_bit(GLF_LRU, &gl->gl_flags)) {
193 set_bit(GLF_LRU, &gl->gl_flags);
194 atomic_inc(&lru_count);
197 spin_unlock(&lru_lock);
200 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
202 if (!(gl->gl_ops->go_flags & GLOF_LRU))
205 spin_lock(&lru_lock);
206 if (test_bit(GLF_LRU, &gl->gl_flags)) {
207 list_del_init(&gl->gl_lru);
208 atomic_dec(&lru_count);
209 clear_bit(GLF_LRU, &gl->gl_flags);
211 spin_unlock(&lru_lock);
215 * Enqueue the glock on the work queue. Passes one glock reference on to the
218 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
219 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
221 * We are holding the lockref spinlock, and the work was still
222 * queued above. The queued work (glock_work_func) takes that
223 * spinlock before dropping its glock reference(s), so it
224 * cannot have dropped them in the meantime.
226 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
227 gl->gl_lockref.count--;
231 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
232 spin_lock(&gl->gl_lockref.lock);
233 __gfs2_glock_queue_work(gl, delay);
234 spin_unlock(&gl->gl_lockref.lock);
237 static void __gfs2_glock_put(struct gfs2_glock *gl)
239 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
240 struct address_space *mapping = gfs2_glock2aspace(gl);
242 lockref_mark_dead(&gl->gl_lockref);
244 gfs2_glock_remove_from_lru(gl);
245 spin_unlock(&gl->gl_lockref.lock);
246 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
247 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
248 trace_gfs2_glock_put(gl);
249 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
253 * Cause the glock to be put in work queue context.
255 void gfs2_glock_queue_put(struct gfs2_glock *gl)
257 gfs2_glock_queue_work(gl, 0);
261 * gfs2_glock_put() - Decrement reference count on glock
262 * @gl: The glock to put
266 void gfs2_glock_put(struct gfs2_glock *gl)
268 if (lockref_put_or_lock(&gl->gl_lockref))
271 __gfs2_glock_put(gl);
275 * may_grant - check if its ok to grant a new lock
277 * @gh: The lock request which we wish to grant
279 * Returns: true if its ok to grant the lock
282 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
284 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
285 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
286 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
288 if (gl->gl_state == gh->gh_state)
290 if (gh->gh_flags & GL_EXACT)
292 if (gl->gl_state == LM_ST_EXCLUSIVE) {
293 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
295 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
298 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
303 static void gfs2_holder_wake(struct gfs2_holder *gh)
305 clear_bit(HIF_WAIT, &gh->gh_iflags);
306 smp_mb__after_atomic();
307 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
308 if (gh->gh_flags & GL_ASYNC) {
309 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
311 wake_up(&sdp->sd_async_glock_wait);
316 * do_error - Something unexpected has happened during a lock request
320 static void do_error(struct gfs2_glock *gl, const int ret)
322 struct gfs2_holder *gh, *tmp;
324 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
325 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
327 if (ret & LM_OUT_ERROR)
329 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
330 gh->gh_error = GLR_TRYFAILED;
333 list_del_init(&gh->gh_list);
334 trace_gfs2_glock_queue(gh, 0);
335 gfs2_holder_wake(gh);
340 * do_promote - promote as many requests as possible on the current queue
343 * Returns: 1 if there is a blocked holder at the head of the list, or 2
344 * if a type specific operation is underway.
347 static int do_promote(struct gfs2_glock *gl)
348 __releases(&gl->gl_lockref.lock)
349 __acquires(&gl->gl_lockref.lock)
351 const struct gfs2_glock_operations *glops = gl->gl_ops;
352 struct gfs2_holder *gh, *tmp;
356 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
357 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
359 if (may_grant(gl, gh)) {
360 if (gh->gh_list.prev == &gl->gl_holders &&
362 spin_unlock(&gl->gl_lockref.lock);
363 /* FIXME: eliminate this eventually */
364 ret = glops->go_lock(gh);
365 spin_lock(&gl->gl_lockref.lock);
370 list_del_init(&gh->gh_list);
371 trace_gfs2_glock_queue(gh, 0);
372 gfs2_holder_wake(gh);
375 set_bit(HIF_HOLDER, &gh->gh_iflags);
376 trace_gfs2_promote(gh, 1);
377 gfs2_holder_wake(gh);
380 set_bit(HIF_HOLDER, &gh->gh_iflags);
381 trace_gfs2_promote(gh, 0);
382 gfs2_holder_wake(gh);
385 if (gh->gh_list.prev == &gl->gl_holders)
394 * find_first_waiter - find the first gh that's waiting for the glock
398 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
400 struct gfs2_holder *gh;
402 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
403 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
410 * state_change - record that the glock is now in a different state
412 * @new_state the new state
416 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
420 held1 = (gl->gl_state != LM_ST_UNLOCKED);
421 held2 = (new_state != LM_ST_UNLOCKED);
423 if (held1 != held2) {
424 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
426 gl->gl_lockref.count++;
428 gl->gl_lockref.count--;
430 if (held1 && held2 && list_empty(&gl->gl_holders))
431 clear_bit(GLF_QUEUED, &gl->gl_flags);
433 if (new_state != gl->gl_target)
434 /* shorten our minimum hold time */
435 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
437 gl->gl_state = new_state;
438 gl->gl_tchange = jiffies;
441 static void gfs2_demote_wake(struct gfs2_glock *gl)
443 gl->gl_demote_state = LM_ST_EXCLUSIVE;
444 clear_bit(GLF_DEMOTE, &gl->gl_flags);
445 smp_mb__after_atomic();
446 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
450 * finish_xmote - The DLM has replied to one of our lock requests
452 * @ret: The status from the DLM
456 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
458 const struct gfs2_glock_operations *glops = gl->gl_ops;
459 struct gfs2_holder *gh;
460 unsigned state = ret & LM_OUT_ST_MASK;
463 spin_lock(&gl->gl_lockref.lock);
464 trace_gfs2_glock_state_change(gl, state);
465 state_change(gl, state);
466 gh = find_first_waiter(gl);
468 /* Demote to UN request arrived during demote to SH or DF */
469 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
470 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
471 gl->gl_target = LM_ST_UNLOCKED;
473 /* Check for state != intended state */
474 if (unlikely(state != gl->gl_target)) {
475 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
476 /* move to back of queue and try next entry */
477 if (ret & LM_OUT_CANCELED) {
478 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
479 list_move_tail(&gh->gh_list, &gl->gl_holders);
480 gh = find_first_waiter(gl);
481 gl->gl_target = gh->gh_state;
484 /* Some error or failed "try lock" - report it */
485 if ((ret & LM_OUT_ERROR) ||
486 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
487 gl->gl_target = gl->gl_state;
493 /* Unlocked due to conversion deadlock, try again */
496 do_xmote(gl, gh, gl->gl_target);
498 /* Conversion fails, unlock and try again */
501 do_xmote(gl, gh, LM_ST_UNLOCKED);
503 default: /* Everything else */
504 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
505 gl->gl_target, state);
508 spin_unlock(&gl->gl_lockref.lock);
512 /* Fast path - we got what we asked for */
513 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
514 gfs2_demote_wake(gl);
515 if (state != LM_ST_UNLOCKED) {
516 if (glops->go_xmote_bh) {
517 spin_unlock(&gl->gl_lockref.lock);
518 rv = glops->go_xmote_bh(gl, gh);
519 spin_lock(&gl->gl_lockref.lock);
530 clear_bit(GLF_LOCK, &gl->gl_flags);
532 spin_unlock(&gl->gl_lockref.lock);
536 * do_xmote - Calls the DLM to change the state of a lock
537 * @gl: The lock state
538 * @gh: The holder (only for promotes)
539 * @target: The target lock state
543 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
544 __releases(&gl->gl_lockref.lock)
545 __acquires(&gl->gl_lockref.lock)
547 const struct gfs2_glock_operations *glops = gl->gl_ops;
548 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
549 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
552 if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) &&
553 target != LM_ST_UNLOCKED)
555 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
557 GLOCK_BUG_ON(gl, gl->gl_state == target);
558 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
559 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
561 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
562 do_error(gl, 0); /* Fail queued try locks */
565 set_bit(GLF_BLOCKING, &gl->gl_flags);
566 if ((gl->gl_req == LM_ST_UNLOCKED) ||
567 (gl->gl_state == LM_ST_EXCLUSIVE) ||
568 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
569 clear_bit(GLF_BLOCKING, &gl->gl_flags);
570 spin_unlock(&gl->gl_lockref.lock);
573 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
574 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
575 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
578 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
580 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
581 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
582 target == LM_ST_UNLOCKED &&
583 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
584 finish_xmote(gl, target);
585 gfs2_glock_queue_work(gl, 0);
588 fs_err(sdp, "lm_lock ret %d\n", ret);
589 GLOCK_BUG_ON(gl, !test_bit(SDF_WITHDRAWN,
592 } else { /* lock_nolock */
593 finish_xmote(gl, target);
594 gfs2_glock_queue_work(gl, 0);
597 spin_lock(&gl->gl_lockref.lock);
601 * find_first_holder - find the first "holder" gh
605 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
607 struct gfs2_holder *gh;
609 if (!list_empty(&gl->gl_holders)) {
610 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
611 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
618 * run_queue - do all outstanding tasks related to a glock
619 * @gl: The glock in question
620 * @nonblock: True if we must not block in run_queue
624 static void run_queue(struct gfs2_glock *gl, const int nonblock)
625 __releases(&gl->gl_lockref.lock)
626 __acquires(&gl->gl_lockref.lock)
628 struct gfs2_holder *gh = NULL;
631 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
634 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
636 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
637 gl->gl_demote_state != gl->gl_state) {
638 if (find_first_holder(gl))
642 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
643 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
644 gl->gl_target = gl->gl_demote_state;
646 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
647 gfs2_demote_wake(gl);
648 ret = do_promote(gl);
653 gh = find_first_waiter(gl);
654 gl->gl_target = gh->gh_state;
655 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
656 do_error(gl, 0); /* Fail queued try locks */
658 do_xmote(gl, gh, gl->gl_target);
663 clear_bit(GLF_LOCK, &gl->gl_flags);
664 smp_mb__after_atomic();
665 gl->gl_lockref.count++;
666 __gfs2_glock_queue_work(gl, 0);
670 clear_bit(GLF_LOCK, &gl->gl_flags);
671 smp_mb__after_atomic();
675 static void delete_work_func(struct work_struct *work)
677 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
678 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
680 u64 no_addr = gl->gl_name.ln_number;
682 /* If someone's using this glock to create a new dinode, the block must
683 have been freed by another node, then re-used, in which case our
684 iopen callback is too late after the fact. Ignore it. */
685 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
688 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
689 if (!IS_ERR_OR_NULL(inode)) {
690 d_prune_aliases(inode);
697 static void glock_work_func(struct work_struct *work)
699 unsigned long delay = 0;
700 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
701 unsigned int drop_refs = 1;
703 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
704 finish_xmote(gl, gl->gl_reply);
707 spin_lock(&gl->gl_lockref.lock);
708 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
709 gl->gl_state != LM_ST_UNLOCKED &&
710 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
711 unsigned long holdtime, now = jiffies;
713 holdtime = gl->gl_tchange + gl->gl_hold_time;
714 if (time_before(now, holdtime))
715 delay = holdtime - now;
718 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
719 set_bit(GLF_DEMOTE, &gl->gl_flags);
724 /* Keep one glock reference for the work we requeue. */
726 if (gl->gl_name.ln_type != LM_TYPE_INODE)
728 __gfs2_glock_queue_work(gl, delay);
732 * Drop the remaining glock references manually here. (Mind that
733 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
736 gl->gl_lockref.count -= drop_refs;
737 if (!gl->gl_lockref.count) {
738 __gfs2_glock_put(gl);
741 spin_unlock(&gl->gl_lockref.lock);
744 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
745 struct gfs2_glock *new)
747 struct wait_glock_queue wait;
748 wait_queue_head_t *wq = glock_waitqueue(name);
749 struct gfs2_glock *gl;
752 init_wait(&wait.wait);
753 wait.wait.func = glock_wake_function;
756 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
759 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
760 &new->gl_node, ht_parms);
764 gl = rhashtable_lookup_fast(&gl_hash_table,
767 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
774 finish_wait(wq, &wait.wait);
779 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
780 * @sdp: The GFS2 superblock
781 * @number: the lock number
782 * @glops: The glock_operations to use
783 * @create: If 0, don't create the glock if it doesn't exist
784 * @glp: the glock is returned here
786 * This does not lock a glock, just finds/creates structures for one.
791 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
792 const struct gfs2_glock_operations *glops, int create,
793 struct gfs2_glock **glp)
795 struct super_block *s = sdp->sd_vfs;
796 struct lm_lockname name = { .ln_number = number,
797 .ln_type = glops->go_type,
799 struct gfs2_glock *gl, *tmp;
800 struct address_space *mapping;
801 struct kmem_cache *cachep;
804 gl = find_insert_glock(&name, NULL);
812 if (glops->go_flags & GLOF_ASPACE)
813 cachep = gfs2_glock_aspace_cachep;
815 cachep = gfs2_glock_cachep;
816 gl = kmem_cache_alloc(cachep, GFP_NOFS);
820 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
822 if (glops->go_flags & GLOF_LVB) {
823 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
824 if (!gl->gl_lksb.sb_lvbptr) {
825 kmem_cache_free(cachep, gl);
830 atomic_inc(&sdp->sd_glock_disposal);
831 gl->gl_node.next = NULL;
834 gl->gl_lockref.count = 1;
835 gl->gl_state = LM_ST_UNLOCKED;
836 gl->gl_target = LM_ST_UNLOCKED;
837 gl->gl_demote_state = LM_ST_EXCLUSIVE;
841 /* We use the global stats to estimate the initial per-glock stats */
842 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
844 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
845 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
846 gl->gl_tchange = jiffies;
847 gl->gl_object = NULL;
848 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
849 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
850 INIT_WORK(&gl->gl_delete, delete_work_func);
852 mapping = gfs2_glock2aspace(gl);
854 mapping->a_ops = &gfs2_meta_aops;
855 mapping->host = s->s_bdev->bd_inode;
857 mapping_set_gfp_mask(mapping, GFP_NOFS);
858 mapping->private_data = NULL;
859 mapping->writeback_index = 0;
862 tmp = find_insert_glock(&name, gl);
874 kfree(gl->gl_lksb.sb_lvbptr);
875 kmem_cache_free(cachep, gl);
876 atomic_dec(&sdp->sd_glock_disposal);
883 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
885 * @state: the state we're requesting
886 * @flags: the modifier flags
887 * @gh: the holder structure
891 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
892 struct gfs2_holder *gh)
894 INIT_LIST_HEAD(&gh->gh_list);
896 gh->gh_ip = _RET_IP_;
897 gh->gh_owner_pid = get_pid(task_pid(current));
898 gh->gh_state = state;
899 gh->gh_flags = flags;
906 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
907 * @state: the state we're requesting
908 * @flags: the modifier flags
909 * @gh: the holder structure
911 * Don't mess with the glock.
915 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
917 gh->gh_state = state;
918 gh->gh_flags = flags;
920 gh->gh_ip = _RET_IP_;
921 put_pid(gh->gh_owner_pid);
922 gh->gh_owner_pid = get_pid(task_pid(current));
926 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
927 * @gh: the holder structure
931 void gfs2_holder_uninit(struct gfs2_holder *gh)
933 put_pid(gh->gh_owner_pid);
934 gfs2_glock_put(gh->gh_gl);
935 gfs2_holder_mark_uninitialized(gh);
939 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
940 unsigned long start_time)
942 /* Have we waited longer that a second? */
943 if (time_after(jiffies, start_time + HZ)) {
944 /* Lengthen the minimum hold time. */
945 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
951 * gfs2_glock_wait - wait on a glock acquisition
952 * @gh: the glock holder
954 * Returns: 0 on success
957 int gfs2_glock_wait(struct gfs2_holder *gh)
959 unsigned long start_time = jiffies;
962 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
963 gfs2_glock_update_hold_time(gh->gh_gl, start_time);
967 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
971 for (i = 0; i < num_gh; i++)
972 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
978 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
979 * @num_gh: the number of holders in the array
980 * @ghs: the glock holder array
982 * Returns: 0 on success, meaning all glocks have been granted and are held.
983 * -ESTALE if the request timed out, meaning all glocks were released,
984 * and the caller should retry the operation.
987 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
989 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
990 int i, ret = 0, timeout = 0;
991 unsigned long start_time = jiffies;
996 * Total up the (minimum hold time * 2) of all glocks and use that to
997 * determine the max amount of time we should wait.
999 for (i = 0; i < num_gh; i++)
1000 timeout += ghs[i].gh_gl->gl_hold_time << 1;
1003 if (!wait_event_timeout(sdp->sd_async_glock_wait,
1004 !glocks_pending(num_gh, ghs), timeout))
1005 ret = -ESTALE; /* request timed out. */
1008 * If dlm granted all our requests, we need to adjust the glock
1009 * minimum hold time values according to how long we waited.
1011 * If our request timed out, we need to repeatedly release any held
1012 * glocks we acquired thus far to allow dlm to acquire the remaining
1013 * glocks without deadlocking. We cannot currently cancel outstanding
1014 * glock acquisitions.
1016 * The HIF_WAIT bit tells us which requests still need a response from
1019 * If dlm sent us any errors, we return the first error we find.
1021 keep_waiting = false;
1022 for (i = 0; i < num_gh; i++) {
1023 /* Skip holders we have already dequeued below. */
1024 if (!gfs2_holder_queued(&ghs[i]))
1026 /* Skip holders with a pending DLM response. */
1027 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
1028 keep_waiting = true;
1032 if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
1034 gfs2_glock_dq(&ghs[i]);
1036 gfs2_glock_update_hold_time(ghs[i].gh_gl,
1040 ret = ghs[i].gh_error;
1047 * At this point, we've either acquired all locks or released them all.
1053 * handle_callback - process a demote request
1055 * @state: the state the caller wants us to change to
1057 * There are only two requests that we are going to see in actual
1058 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
1061 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
1062 unsigned long delay, bool remote)
1064 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
1066 set_bit(bit, &gl->gl_flags);
1067 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
1068 gl->gl_demote_state = state;
1069 gl->gl_demote_time = jiffies;
1070 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
1071 gl->gl_demote_state != state) {
1072 gl->gl_demote_state = LM_ST_UNLOCKED;
1074 if (gl->gl_ops->go_callback)
1075 gl->gl_ops->go_callback(gl, remote);
1076 trace_gfs2_demote_rq(gl, remote);
1079 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
1081 struct va_format vaf;
1084 va_start(args, fmt);
1087 seq_vprintf(seq, fmt, args);
1092 pr_err("%pV", &vaf);
1099 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1100 * @gh: the holder structure to add
1102 * Eventually we should move the recursive locking trap to a
1103 * debugging option or something like that. This is the fast
1104 * path and needs to have the minimum number of distractions.
1108 static inline void add_to_queue(struct gfs2_holder *gh)
1109 __releases(&gl->gl_lockref.lock)
1110 __acquires(&gl->gl_lockref.lock)
1112 struct gfs2_glock *gl = gh->gh_gl;
1113 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1114 struct list_head *insert_pt = NULL;
1115 struct gfs2_holder *gh2;
1118 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1119 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1120 GLOCK_BUG_ON(gl, true);
1122 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1123 if (test_bit(GLF_LOCK, &gl->gl_flags))
1124 try_futile = !may_grant(gl, gh);
1125 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1129 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1130 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1131 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1132 goto trap_recursive;
1134 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1136 gh->gh_error = GLR_TRYFAILED;
1137 gfs2_holder_wake(gh);
1140 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1142 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1143 insert_pt = &gh2->gh_list;
1145 set_bit(GLF_QUEUED, &gl->gl_flags);
1146 trace_gfs2_glock_queue(gh, 1);
1147 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1148 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1149 if (likely(insert_pt == NULL)) {
1150 list_add_tail(&gh->gh_list, &gl->gl_holders);
1151 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1155 list_add_tail(&gh->gh_list, insert_pt);
1157 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1158 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1159 spin_unlock(&gl->gl_lockref.lock);
1160 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1161 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1162 spin_lock(&gl->gl_lockref.lock);
1167 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1168 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1169 fs_err(sdp, "lock type: %d req lock state : %d\n",
1170 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1171 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1172 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1173 fs_err(sdp, "lock type: %d req lock state : %d\n",
1174 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1175 gfs2_dump_glock(NULL, gl, true);
1180 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1181 * @gh: the holder structure
1183 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1185 * Returns: 0, GLR_TRYFAILED, or errno on failure
1188 int gfs2_glock_nq(struct gfs2_holder *gh)
1190 struct gfs2_glock *gl = gh->gh_gl;
1191 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1194 if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
1197 if (test_bit(GLF_LRU, &gl->gl_flags))
1198 gfs2_glock_remove_from_lru(gl);
1200 spin_lock(&gl->gl_lockref.lock);
1202 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1203 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1204 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1205 gl->gl_lockref.count++;
1206 __gfs2_glock_queue_work(gl, 0);
1209 spin_unlock(&gl->gl_lockref.lock);
1211 if (!(gh->gh_flags & GL_ASYNC))
1212 error = gfs2_glock_wait(gh);
1218 * gfs2_glock_poll - poll to see if an async request has been completed
1221 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1224 int gfs2_glock_poll(struct gfs2_holder *gh)
1226 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1230 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1231 * @gh: the glock holder
1235 void gfs2_glock_dq(struct gfs2_holder *gh)
1237 struct gfs2_glock *gl = gh->gh_gl;
1238 const struct gfs2_glock_operations *glops = gl->gl_ops;
1242 spin_lock(&gl->gl_lockref.lock);
1243 if (gh->gh_flags & GL_NOCACHE)
1244 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1246 list_del_init(&gh->gh_list);
1247 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1248 if (find_first_holder(gl) == NULL) {
1249 if (glops->go_unlock) {
1250 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1251 spin_unlock(&gl->gl_lockref.lock);
1252 glops->go_unlock(gh);
1253 spin_lock(&gl->gl_lockref.lock);
1254 clear_bit(GLF_LOCK, &gl->gl_flags);
1256 if (list_empty(&gl->gl_holders) &&
1257 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1258 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1261 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1262 gfs2_glock_add_to_lru(gl);
1264 trace_gfs2_glock_queue(gh, 0);
1265 if (unlikely(!fast_path)) {
1266 gl->gl_lockref.count++;
1267 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1268 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1269 gl->gl_name.ln_type == LM_TYPE_INODE)
1270 delay = gl->gl_hold_time;
1271 __gfs2_glock_queue_work(gl, delay);
1273 spin_unlock(&gl->gl_lockref.lock);
1276 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1278 struct gfs2_glock *gl = gh->gh_gl;
1281 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1285 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1286 * @gh: the holder structure
1290 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1293 gfs2_holder_uninit(gh);
1297 * gfs2_glock_nq_num - acquire a glock based on lock number
1298 * @sdp: the filesystem
1299 * @number: the lock number
1300 * @glops: the glock operations for the type of glock
1301 * @state: the state to acquire the glock in
1302 * @flags: modifier flags for the acquisition
1303 * @gh: the struct gfs2_holder
1308 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1309 const struct gfs2_glock_operations *glops,
1310 unsigned int state, u16 flags, struct gfs2_holder *gh)
1312 struct gfs2_glock *gl;
1315 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1317 error = gfs2_glock_nq_init(gl, state, flags, gh);
1325 * glock_compare - Compare two struct gfs2_glock structures for sorting
1326 * @arg_a: the first structure
1327 * @arg_b: the second structure
1331 static int glock_compare(const void *arg_a, const void *arg_b)
1333 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1334 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1335 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1336 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1338 if (a->ln_number > b->ln_number)
1340 if (a->ln_number < b->ln_number)
1342 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1347 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1348 * @num_gh: the number of structures
1349 * @ghs: an array of struct gfs2_holder structures
1351 * Returns: 0 on success (all glocks acquired),
1352 * errno on failure (no glocks acquired)
1355 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1356 struct gfs2_holder **p)
1361 for (x = 0; x < num_gh; x++)
1364 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1366 for (x = 0; x < num_gh; x++) {
1367 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1369 error = gfs2_glock_nq(p[x]);
1372 gfs2_glock_dq(p[x]);
1381 * gfs2_glock_nq_m - acquire multiple glocks
1382 * @num_gh: the number of structures
1383 * @ghs: an array of struct gfs2_holder structures
1386 * Returns: 0 on success (all glocks acquired),
1387 * errno on failure (no glocks acquired)
1390 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1392 struct gfs2_holder *tmp[4];
1393 struct gfs2_holder **pph = tmp;
1400 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1401 return gfs2_glock_nq(ghs);
1405 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
1411 error = nq_m_sync(num_gh, ghs, pph);
1420 * gfs2_glock_dq_m - release multiple glocks
1421 * @num_gh: the number of structures
1422 * @ghs: an array of struct gfs2_holder structures
1426 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1429 gfs2_glock_dq(&ghs[num_gh]);
1432 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1434 unsigned long delay = 0;
1435 unsigned long holdtime;
1436 unsigned long now = jiffies;
1438 gfs2_glock_hold(gl);
1439 holdtime = gl->gl_tchange + gl->gl_hold_time;
1440 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1441 gl->gl_name.ln_type == LM_TYPE_INODE) {
1442 if (time_before(now, holdtime))
1443 delay = holdtime - now;
1444 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1445 delay = gl->gl_hold_time;
1448 spin_lock(&gl->gl_lockref.lock);
1449 handle_callback(gl, state, delay, true);
1450 __gfs2_glock_queue_work(gl, delay);
1451 spin_unlock(&gl->gl_lockref.lock);
1455 * gfs2_should_freeze - Figure out if glock should be frozen
1456 * @gl: The glock in question
1458 * Glocks are not frozen if (a) the result of the dlm operation is
1459 * an error, (b) the locking operation was an unlock operation or
1460 * (c) if there is a "noexp" flagged request anywhere in the queue
1462 * Returns: 1 if freezing should occur, 0 otherwise
1465 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1467 const struct gfs2_holder *gh;
1469 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1471 if (gl->gl_target == LM_ST_UNLOCKED)
1474 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1475 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1477 if (LM_FLAG_NOEXP & gh->gh_flags)
1485 * gfs2_glock_complete - Callback used by locking
1486 * @gl: Pointer to the glock
1487 * @ret: The return value from the dlm
1489 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1490 * to use a bitfield shared with other glock state fields.
1493 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1495 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1497 spin_lock(&gl->gl_lockref.lock);
1500 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1501 if (gfs2_should_freeze(gl)) {
1502 set_bit(GLF_FROZEN, &gl->gl_flags);
1503 spin_unlock(&gl->gl_lockref.lock);
1508 gl->gl_lockref.count++;
1509 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1510 __gfs2_glock_queue_work(gl, 0);
1511 spin_unlock(&gl->gl_lockref.lock);
1514 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1516 struct gfs2_glock *gla, *glb;
1518 gla = list_entry(a, struct gfs2_glock, gl_lru);
1519 glb = list_entry(b, struct gfs2_glock, gl_lru);
1521 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1523 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1530 * gfs2_dispose_glock_lru - Demote a list of glocks
1531 * @list: The list to dispose of
1533 * Disposing of glocks may involve disk accesses, so that here we sort
1534 * the glocks by number (i.e. disk location of the inodes) so that if
1535 * there are any such accesses, they'll be sent in order (mostly).
1537 * Must be called under the lru_lock, but may drop and retake this
1538 * lock. While the lru_lock is dropped, entries may vanish from the
1539 * list, but no new entries will appear on the list (since it is
1543 static void gfs2_dispose_glock_lru(struct list_head *list)
1544 __releases(&lru_lock)
1545 __acquires(&lru_lock)
1547 struct gfs2_glock *gl;
1549 list_sort(NULL, list, glock_cmp);
1551 while(!list_empty(list)) {
1552 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1553 list_del_init(&gl->gl_lru);
1554 if (!spin_trylock(&gl->gl_lockref.lock)) {
1556 list_add(&gl->gl_lru, &lru_list);
1557 set_bit(GLF_LRU, &gl->gl_flags);
1558 atomic_inc(&lru_count);
1561 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1562 spin_unlock(&gl->gl_lockref.lock);
1563 goto add_back_to_lru;
1565 gl->gl_lockref.count++;
1567 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1568 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1569 __gfs2_glock_queue_work(gl, 0);
1570 spin_unlock(&gl->gl_lockref.lock);
1571 cond_resched_lock(&lru_lock);
1576 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1577 * @nr: The number of entries to scan
1579 * This function selects the entries on the LRU which are able to
1580 * be demoted, and then kicks off the process by calling
1581 * gfs2_dispose_glock_lru() above.
1584 static long gfs2_scan_glock_lru(int nr)
1586 struct gfs2_glock *gl;
1591 spin_lock(&lru_lock);
1592 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1593 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1595 /* Test for being demotable */
1596 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1597 list_move(&gl->gl_lru, &dispose);
1598 atomic_dec(&lru_count);
1599 clear_bit(GLF_LRU, &gl->gl_flags);
1604 list_move(&gl->gl_lru, &skipped);
1606 list_splice(&skipped, &lru_list);
1607 if (!list_empty(&dispose))
1608 gfs2_dispose_glock_lru(&dispose);
1609 spin_unlock(&lru_lock);
1614 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1615 struct shrink_control *sc)
1617 if (!(sc->gfp_mask & __GFP_FS))
1619 return gfs2_scan_glock_lru(sc->nr_to_scan);
1622 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1623 struct shrink_control *sc)
1625 return vfs_pressure_ratio(atomic_read(&lru_count));
1628 static struct shrinker glock_shrinker = {
1629 .seeks = DEFAULT_SEEKS,
1630 .count_objects = gfs2_glock_shrink_count,
1631 .scan_objects = gfs2_glock_shrink_scan,
1635 * examine_bucket - Call a function for glock in a hash bucket
1636 * @examiner: the function
1637 * @sdp: the filesystem
1638 * @bucket: the bucket
1640 * Note that the function can be called multiple times on the same
1641 * object. So the user must ensure that the function can cope with
1645 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1647 struct gfs2_glock *gl;
1648 struct rhashtable_iter iter;
1650 rhashtable_walk_enter(&gl_hash_table, &iter);
1653 rhashtable_walk_start(&iter);
1655 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
1656 if (gl->gl_name.ln_sbd == sdp &&
1657 lockref_get_not_dead(&gl->gl_lockref))
1660 rhashtable_walk_stop(&iter);
1661 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1663 rhashtable_walk_exit(&iter);
1667 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1668 * @gl: The glock to thaw
1672 static void thaw_glock(struct gfs2_glock *gl)
1674 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1678 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1679 gfs2_glock_queue_work(gl, 0);
1683 * clear_glock - look at a glock and see if we can free it from glock cache
1684 * @gl: the glock to look at
1688 static void clear_glock(struct gfs2_glock *gl)
1690 gfs2_glock_remove_from_lru(gl);
1692 spin_lock(&gl->gl_lockref.lock);
1693 if (gl->gl_state != LM_ST_UNLOCKED)
1694 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1695 __gfs2_glock_queue_work(gl, 0);
1696 spin_unlock(&gl->gl_lockref.lock);
1700 * gfs2_glock_thaw - Thaw any frozen glocks
1701 * @sdp: The super block
1705 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1707 glock_hash_walk(thaw_glock, sdp);
1710 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
1712 spin_lock(&gl->gl_lockref.lock);
1713 gfs2_dump_glock(seq, gl, fsid);
1714 spin_unlock(&gl->gl_lockref.lock);
1717 static void dump_glock_func(struct gfs2_glock *gl)
1719 dump_glock(NULL, gl, true);
1723 * gfs2_gl_hash_clear - Empty out the glock hash table
1724 * @sdp: the filesystem
1725 * @wait: wait until it's all gone
1727 * Called when unmounting the filesystem.
1730 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1732 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1733 flush_workqueue(glock_workqueue);
1734 glock_hash_walk(clear_glock, sdp);
1735 flush_workqueue(glock_workqueue);
1736 wait_event_timeout(sdp->sd_glock_wait,
1737 atomic_read(&sdp->sd_glock_disposal) == 0,
1739 glock_hash_walk(dump_glock_func, sdp);
1742 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1744 struct gfs2_glock *gl = ip->i_gl;
1747 ret = gfs2_truncatei_resume(ip);
1748 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1750 spin_lock(&gl->gl_lockref.lock);
1751 clear_bit(GLF_LOCK, &gl->gl_flags);
1753 spin_unlock(&gl->gl_lockref.lock);
1756 static const char *state2str(unsigned state)
1759 case LM_ST_UNLOCKED:
1763 case LM_ST_DEFERRED:
1765 case LM_ST_EXCLUSIVE:
1771 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1774 if (flags & LM_FLAG_TRY)
1776 if (flags & LM_FLAG_TRY_1CB)
1778 if (flags & LM_FLAG_NOEXP)
1780 if (flags & LM_FLAG_ANY)
1782 if (flags & LM_FLAG_PRIORITY)
1784 if (flags & GL_ASYNC)
1786 if (flags & GL_EXACT)
1788 if (flags & GL_NOCACHE)
1790 if (test_bit(HIF_HOLDER, &iflags))
1792 if (test_bit(HIF_WAIT, &iflags))
1794 if (test_bit(HIF_FIRST, &iflags))
1801 * dump_holder - print information about a glock holder
1802 * @seq: the seq_file struct
1803 * @gh: the glock holder
1804 * @fs_id_buf: pointer to file system id (if requested)
1808 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
1809 const char *fs_id_buf)
1811 struct task_struct *gh_owner = NULL;
1815 if (gh->gh_owner_pid)
1816 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1817 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1818 fs_id_buf, state2str(gh->gh_state),
1819 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1821 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1822 gh_owner ? gh_owner->comm : "(ended)",
1827 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1829 const unsigned long *gflags = &gl->gl_flags;
1832 if (test_bit(GLF_LOCK, gflags))
1834 if (test_bit(GLF_DEMOTE, gflags))
1836 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1838 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1840 if (test_bit(GLF_DIRTY, gflags))
1842 if (test_bit(GLF_LFLUSH, gflags))
1844 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1846 if (test_bit(GLF_REPLY_PENDING, gflags))
1848 if (test_bit(GLF_INITIAL, gflags))
1850 if (test_bit(GLF_FROZEN, gflags))
1852 if (test_bit(GLF_QUEUED, gflags))
1854 if (test_bit(GLF_LRU, gflags))
1858 if (test_bit(GLF_BLOCKING, gflags))
1865 * gfs2_dump_glock - print information about a glock
1866 * @seq: The seq_file struct
1868 * @fsid: If true, also dump the file system id
1870 * The file format is as follows:
1871 * One line per object, capital letters are used to indicate objects
1872 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1873 * other objects are indented by a single space and follow the glock to
1874 * which they are related. Fields are indicated by lower case letters
1875 * followed by a colon and the field value, except for strings which are in
1876 * [] so that its possible to see if they are composed of spaces for
1877 * example. The field's are n = number (id of the object), f = flags,
1878 * t = type, s = state, r = refcount, e = error, p = pid.
1882 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
1884 const struct gfs2_glock_operations *glops = gl->gl_ops;
1885 unsigned long long dtime;
1886 const struct gfs2_holder *gh;
1887 char gflags_buf[32];
1888 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1889 char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
1891 memset(fs_id_buf, 0, sizeof(fs_id_buf));
1892 if (fsid && sdp) /* safety precaution */
1893 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
1894 dtime = jiffies - gl->gl_demote_time;
1895 dtime *= 1000000/HZ; /* demote time in uSec */
1896 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1898 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
1899 "v:%d r:%d m:%ld\n", fs_id_buf, state2str(gl->gl_state),
1900 gl->gl_name.ln_type,
1901 (unsigned long long)gl->gl_name.ln_number,
1902 gflags2str(gflags_buf, gl),
1903 state2str(gl->gl_target),
1904 state2str(gl->gl_demote_state), dtime,
1905 atomic_read(&gl->gl_ail_count),
1906 atomic_read(&gl->gl_revokes),
1907 (int)gl->gl_lockref.count, gl->gl_hold_time);
1909 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1910 dump_holder(seq, gh, fs_id_buf);
1912 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1913 glops->go_dump(seq, gl, fs_id_buf);
1916 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1918 struct gfs2_glock *gl = iter_ptr;
1920 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1921 gl->gl_name.ln_type,
1922 (unsigned long long)gl->gl_name.ln_number,
1923 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1924 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1925 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1926 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1927 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1928 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1929 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1930 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1934 static const char *gfs2_gltype[] = {
1948 static const char *gfs2_stype[] = {
1949 [GFS2_LKS_SRTT] = "srtt",
1950 [GFS2_LKS_SRTTVAR] = "srttvar",
1951 [GFS2_LKS_SRTTB] = "srttb",
1952 [GFS2_LKS_SRTTVARB] = "srttvarb",
1953 [GFS2_LKS_SIRT] = "sirt",
1954 [GFS2_LKS_SIRTVAR] = "sirtvar",
1955 [GFS2_LKS_DCOUNT] = "dlm",
1956 [GFS2_LKS_QCOUNT] = "queue",
1959 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1961 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1963 struct gfs2_sbd *sdp = seq->private;
1964 loff_t pos = *(loff_t *)iter_ptr;
1965 unsigned index = pos >> 3;
1966 unsigned subindex = pos & 0x07;
1969 if (index == 0 && subindex != 0)
1972 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1973 (index == 0) ? "cpu": gfs2_stype[subindex]);
1975 for_each_possible_cpu(i) {
1976 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1979 seq_printf(seq, " %15u", i);
1981 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1982 lkstats[index - 1].stats[subindex]);
1984 seq_putc(seq, '\n');
1988 int __init gfs2_glock_init(void)
1992 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1996 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1997 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1998 if (!glock_workqueue) {
1999 rhashtable_destroy(&gl_hash_table);
2002 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
2003 WQ_MEM_RECLAIM | WQ_FREEZABLE,
2005 if (!gfs2_delete_workqueue) {
2006 destroy_workqueue(glock_workqueue);
2007 rhashtable_destroy(&gl_hash_table);
2011 ret = register_shrinker(&glock_shrinker);
2013 destroy_workqueue(gfs2_delete_workqueue);
2014 destroy_workqueue(glock_workqueue);
2015 rhashtable_destroy(&gl_hash_table);
2019 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
2020 init_waitqueue_head(glock_wait_table + i);
2025 void gfs2_glock_exit(void)
2027 unregister_shrinker(&glock_shrinker);
2028 rhashtable_destroy(&gl_hash_table);
2029 destroy_workqueue(glock_workqueue);
2030 destroy_workqueue(gfs2_delete_workqueue);
2033 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
2035 struct gfs2_glock *gl = gi->gl;
2040 if (!lockref_put_not_zero(&gl->gl_lockref))
2041 gfs2_glock_queue_put(gl);
2044 gl = rhashtable_walk_next(&gi->hti);
2045 if (IS_ERR_OR_NULL(gl)) {
2046 if (gl == ERR_PTR(-EAGAIN)) {
2053 if (gl->gl_name.ln_sbd != gi->sdp)
2056 if (!lockref_get_not_dead(&gl->gl_lockref))
2060 if (__lockref_is_dead(&gl->gl_lockref))
2068 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
2071 struct gfs2_glock_iter *gi = seq->private;
2075 * We can either stay where we are, skip to the next hash table
2076 * entry, or start from the beginning.
2078 if (*pos < gi->last_pos) {
2079 rhashtable_walk_exit(&gi->hti);
2080 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2083 n = *pos - gi->last_pos;
2086 rhashtable_walk_start(&gi->hti);
2088 gfs2_glock_iter_next(gi, n);
2089 gi->last_pos = *pos;
2093 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
2096 struct gfs2_glock_iter *gi = seq->private;
2099 gi->last_pos = *pos;
2100 gfs2_glock_iter_next(gi, 1);
2104 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
2107 struct gfs2_glock_iter *gi = seq->private;
2109 rhashtable_walk_stop(&gi->hti);
2112 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2114 dump_glock(seq, iter_ptr, false);
2118 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2121 if (*pos >= GFS2_NR_SBSTATS)
2126 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2130 if (*pos >= GFS2_NR_SBSTATS)
2135 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2140 static const struct seq_operations gfs2_glock_seq_ops = {
2141 .start = gfs2_glock_seq_start,
2142 .next = gfs2_glock_seq_next,
2143 .stop = gfs2_glock_seq_stop,
2144 .show = gfs2_glock_seq_show,
2147 static const struct seq_operations gfs2_glstats_seq_ops = {
2148 .start = gfs2_glock_seq_start,
2149 .next = gfs2_glock_seq_next,
2150 .stop = gfs2_glock_seq_stop,
2151 .show = gfs2_glstats_seq_show,
2154 static const struct seq_operations gfs2_sbstats_seq_ops = {
2155 .start = gfs2_sbstats_seq_start,
2156 .next = gfs2_sbstats_seq_next,
2157 .stop = gfs2_sbstats_seq_stop,
2158 .show = gfs2_sbstats_seq_show,
2161 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2163 static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2164 const struct seq_operations *ops)
2166 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2168 struct seq_file *seq = file->private_data;
2169 struct gfs2_glock_iter *gi = seq->private;
2171 gi->sdp = inode->i_private;
2172 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2174 seq->size = GFS2_SEQ_GOODSIZE;
2176 * Initially, we are "before" the first hash table entry; the
2177 * first call to rhashtable_walk_next gets us the first entry.
2181 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2186 static int gfs2_glocks_open(struct inode *inode, struct file *file)
2188 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2191 static int gfs2_glocks_release(struct inode *inode, struct file *file)
2193 struct seq_file *seq = file->private_data;
2194 struct gfs2_glock_iter *gi = seq->private;
2197 gfs2_glock_put(gi->gl);
2198 rhashtable_walk_exit(&gi->hti);
2199 return seq_release_private(inode, file);
2202 static int gfs2_glstats_open(struct inode *inode, struct file *file)
2204 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2207 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2209 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2211 struct seq_file *seq = file->private_data;
2212 seq->private = inode->i_private; /* sdp */
2217 static const struct file_operations gfs2_glocks_fops = {
2218 .owner = THIS_MODULE,
2219 .open = gfs2_glocks_open,
2221 .llseek = seq_lseek,
2222 .release = gfs2_glocks_release,
2225 static const struct file_operations gfs2_glstats_fops = {
2226 .owner = THIS_MODULE,
2227 .open = gfs2_glstats_open,
2229 .llseek = seq_lseek,
2230 .release = gfs2_glocks_release,
2233 static const struct file_operations gfs2_sbstats_fops = {
2234 .owner = THIS_MODULE,
2235 .open = gfs2_sbstats_open,
2237 .llseek = seq_lseek,
2238 .release = seq_release,
2241 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2243 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2245 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2248 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2249 &gfs2_glstats_fops);
2251 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2252 &gfs2_sbstats_fops);
2255 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2257 debugfs_remove_recursive(sdp->debugfs_dir);
2258 sdp->debugfs_dir = NULL;
2261 void gfs2_register_debugfs(void)
2263 gfs2_root = debugfs_create_dir("gfs2", NULL);
2266 void gfs2_unregister_debugfs(void)
2268 debugfs_remove(gfs2_root);