]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/quota/dquot.c
e1a155e8db155fdd221029933daf0cac4a6bbe75
[linux.git] / fs / quota / dquot.c
1 /*
2  * Implementation of the diskquota system for the LINUX operating system. QUOTA
3  * is implemented using the BSD system call interface as the means of
4  * communication with the user level. This file contains the generic routines
5  * called by the different filesystems on allocation of an inode or block.
6  * These routines take care of the administration needed to have a consistent
7  * diskquota tracking system. The ideas of both user and group quotas are based
8  * on the Melbourne quota system as used on BSD derived systems. The internal
9  * implementation is based on one of the several variants of the LINUX
10  * inode-subsystem with added complexity of the diskquota system.
11  * 
12  * Author:      Marco van Wieringen <mvw@planets.elm.net>
13  *
14  * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
15  *
16  *              Revised list management to avoid races
17  *              -- Bill Hawes, <whawes@star.net>, 9/98
18  *
19  *              Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
20  *              As the consequence the locking was moved from dquot_decr_...(),
21  *              dquot_incr_...() to calling functions.
22  *              invalidate_dquots() now writes modified dquots.
23  *              Serialized quota_off() and quota_on() for mount point.
24  *              Fixed a few bugs in grow_dquots().
25  *              Fixed deadlock in write_dquot() - we no longer account quotas on
26  *              quota files
27  *              remove_dquot_ref() moved to inode.c - it now traverses through inodes
28  *              add_dquot_ref() restarts after blocking
29  *              Added check for bogus uid and fixed check for group in quotactl.
30  *              Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
31  *
32  *              Used struct list_head instead of own list struct
33  *              Invalidation of referenced dquots is no longer possible
34  *              Improved free_dquots list management
35  *              Quota and i_blocks are now updated in one place to avoid races
36  *              Warnings are now delayed so we won't block in critical section
37  *              Write updated not to require dquot lock
38  *              Jan Kara, <jack@suse.cz>, 9/2000
39  *
40  *              Added dynamic quota structure allocation
41  *              Jan Kara <jack@suse.cz> 12/2000
42  *
43  *              Rewritten quota interface. Implemented new quota format and
44  *              formats registering.
45  *              Jan Kara, <jack@suse.cz>, 2001,2002
46  *
47  *              New SMP locking.
48  *              Jan Kara, <jack@suse.cz>, 10/2002
49  *
50  *              Added journalled quota support, fix lock inversion problems
51  *              Jan Kara, <jack@suse.cz>, 2003,2004
52  *
53  * (C) Copyright 1994 - 1997 Marco van Wieringen 
54  */
55
56 #include <linux/errno.h>
57 #include <linux/kernel.h>
58 #include <linux/fs.h>
59 #include <linux/mount.h>
60 #include <linux/mm.h>
61 #include <linux/time.h>
62 #include <linux/types.h>
63 #include <linux/string.h>
64 #include <linux/fcntl.h>
65 #include <linux/stat.h>
66 #include <linux/tty.h>
67 #include <linux/file.h>
68 #include <linux/slab.h>
69 #include <linux/sysctl.h>
70 #include <linux/init.h>
71 #include <linux/module.h>
72 #include <linux/proc_fs.h>
73 #include <linux/security.h>
74 #include <linux/sched.h>
75 #include <linux/cred.h>
76 #include <linux/kmod.h>
77 #include <linux/namei.h>
78 #include <linux/capability.h>
79 #include <linux/quotaops.h>
80 #include "../internal.h" /* ugh */
81
82 #include <linux/uaccess.h>
83
84 /*
85  * There are three quota SMP locks. dq_list_lock protects all lists with quotas
86  * and quota formats.
87  * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
88  * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
89  * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
90  * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
91  * modifications of quota state (on quotaon and quotaoff) and readers who care
92  * about latest values take it as well.
93  *
94  * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
95  *   dq_list_lock > dq_state_lock
96  *
97  * Note that some things (eg. sb pointer, type, id) doesn't change during
98  * the life of the dquot structure and so needn't to be protected by a lock
99  *
100  * Operation accessing dquots via inode pointers are protected by dquot_srcu.
101  * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
102  * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
103  * inode and before dropping dquot references to avoid use of dquots after
104  * they are freed. dq_data_lock is used to serialize the pointer setting and
105  * clearing operations.
106  * Special care needs to be taken about S_NOQUOTA inode flag (marking that
107  * inode is a quota file). Functions adding pointers from inode to dquots have
108  * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
109  * have to do all pointer modifications before dropping dq_data_lock. This makes
110  * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
111  * then drops all pointers to dquots from an inode.
112  *
113  * Each dquot has its dq_lock mutex.  Dquot is locked when it is being read to
114  * memory (or space for it is being allocated) on the first dqget(), when it is
115  * being written out, and when it is being released on the last dqput(). The
116  * allocation and release operations are serialized by the dq_lock and by
117  * checking the use count in dquot_release().
118  *
119  * Lock ordering (including related VFS locks) is the following:
120  *   s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
121  */
122
123 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
124 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
125 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
126 EXPORT_SYMBOL(dq_data_lock);
127 DEFINE_STATIC_SRCU(dquot_srcu);
128
129 void __quota_error(struct super_block *sb, const char *func,
130                    const char *fmt, ...)
131 {
132         if (printk_ratelimit()) {
133                 va_list args;
134                 struct va_format vaf;
135
136                 va_start(args, fmt);
137
138                 vaf.fmt = fmt;
139                 vaf.va = &args;
140
141                 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
142                        sb->s_id, func, &vaf);
143
144                 va_end(args);
145         }
146 }
147 EXPORT_SYMBOL(__quota_error);
148
149 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
150 static char *quotatypes[] = INITQFNAMES;
151 #endif
152 static struct quota_format_type *quota_formats; /* List of registered formats */
153 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
154
155 /* SLAB cache for dquot structures */
156 static struct kmem_cache *dquot_cachep;
157
158 int register_quota_format(struct quota_format_type *fmt)
159 {
160         spin_lock(&dq_list_lock);
161         fmt->qf_next = quota_formats;
162         quota_formats = fmt;
163         spin_unlock(&dq_list_lock);
164         return 0;
165 }
166 EXPORT_SYMBOL(register_quota_format);
167
168 void unregister_quota_format(struct quota_format_type *fmt)
169 {
170         struct quota_format_type **actqf;
171
172         spin_lock(&dq_list_lock);
173         for (actqf = &quota_formats; *actqf && *actqf != fmt;
174              actqf = &(*actqf)->qf_next)
175                 ;
176         if (*actqf)
177                 *actqf = (*actqf)->qf_next;
178         spin_unlock(&dq_list_lock);
179 }
180 EXPORT_SYMBOL(unregister_quota_format);
181
182 static struct quota_format_type *find_quota_format(int id)
183 {
184         struct quota_format_type *actqf;
185
186         spin_lock(&dq_list_lock);
187         for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
188              actqf = actqf->qf_next)
189                 ;
190         if (!actqf || !try_module_get(actqf->qf_owner)) {
191                 int qm;
192
193                 spin_unlock(&dq_list_lock);
194                 
195                 for (qm = 0; module_names[qm].qm_fmt_id &&
196                              module_names[qm].qm_fmt_id != id; qm++)
197                         ;
198                 if (!module_names[qm].qm_fmt_id ||
199                     request_module(module_names[qm].qm_mod_name))
200                         return NULL;
201
202                 spin_lock(&dq_list_lock);
203                 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
204                      actqf = actqf->qf_next)
205                         ;
206                 if (actqf && !try_module_get(actqf->qf_owner))
207                         actqf = NULL;
208         }
209         spin_unlock(&dq_list_lock);
210         return actqf;
211 }
212
213 static void put_quota_format(struct quota_format_type *fmt)
214 {
215         module_put(fmt->qf_owner);
216 }
217
218 /*
219  * Dquot List Management:
220  * The quota code uses three lists for dquot management: the inuse_list,
221  * free_dquots, and dquot_hash[] array. A single dquot structure may be
222  * on all three lists, depending on its current state.
223  *
224  * All dquots are placed to the end of inuse_list when first created, and this
225  * list is used for invalidate operation, which must look at every dquot.
226  *
227  * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
228  * and this list is searched whenever we need an available dquot.  Dquots are
229  * removed from the list as soon as they are used again, and
230  * dqstats.free_dquots gives the number of dquots on the list. When
231  * dquot is invalidated it's completely released from memory.
232  *
233  * Dquots with a specific identity (device, type and id) are placed on
234  * one of the dquot_hash[] hash chains. The provides an efficient search
235  * mechanism to locate a specific dquot.
236  */
237
238 static LIST_HEAD(inuse_list);
239 static LIST_HEAD(free_dquots);
240 static unsigned int dq_hash_bits, dq_hash_mask;
241 static struct hlist_head *dquot_hash;
242
243 struct dqstats dqstats;
244 EXPORT_SYMBOL(dqstats);
245
246 static qsize_t inode_get_rsv_space(struct inode *inode);
247 static int __dquot_initialize(struct inode *inode, int type);
248
249 static inline unsigned int
250 hashfn(const struct super_block *sb, struct kqid qid)
251 {
252         unsigned int id = from_kqid(&init_user_ns, qid);
253         int type = qid.type;
254         unsigned long tmp;
255
256         tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
257         return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
258 }
259
260 /*
261  * Following list functions expect dq_list_lock to be held
262  */
263 static inline void insert_dquot_hash(struct dquot *dquot)
264 {
265         struct hlist_head *head;
266         head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
267         hlist_add_head(&dquot->dq_hash, head);
268 }
269
270 static inline void remove_dquot_hash(struct dquot *dquot)
271 {
272         hlist_del_init(&dquot->dq_hash);
273 }
274
275 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
276                                 struct kqid qid)
277 {
278         struct hlist_node *node;
279         struct dquot *dquot;
280
281         hlist_for_each (node, dquot_hash+hashent) {
282                 dquot = hlist_entry(node, struct dquot, dq_hash);
283                 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
284                         return dquot;
285         }
286         return NULL;
287 }
288
289 /* Add a dquot to the tail of the free list */
290 static inline void put_dquot_last(struct dquot *dquot)
291 {
292         list_add_tail(&dquot->dq_free, &free_dquots);
293         dqstats_inc(DQST_FREE_DQUOTS);
294 }
295
296 static inline void remove_free_dquot(struct dquot *dquot)
297 {
298         if (list_empty(&dquot->dq_free))
299                 return;
300         list_del_init(&dquot->dq_free);
301         dqstats_dec(DQST_FREE_DQUOTS);
302 }
303
304 static inline void put_inuse(struct dquot *dquot)
305 {
306         /* We add to the back of inuse list so we don't have to restart
307          * when traversing this list and we block */
308         list_add_tail(&dquot->dq_inuse, &inuse_list);
309         dqstats_inc(DQST_ALLOC_DQUOTS);
310 }
311
312 static inline void remove_inuse(struct dquot *dquot)
313 {
314         dqstats_dec(DQST_ALLOC_DQUOTS);
315         list_del(&dquot->dq_inuse);
316 }
317 /*
318  * End of list functions needing dq_list_lock
319  */
320
321 static void wait_on_dquot(struct dquot *dquot)
322 {
323         mutex_lock(&dquot->dq_lock);
324         mutex_unlock(&dquot->dq_lock);
325 }
326
327 static inline int dquot_dirty(struct dquot *dquot)
328 {
329         return test_bit(DQ_MOD_B, &dquot->dq_flags);
330 }
331
332 static inline int mark_dquot_dirty(struct dquot *dquot)
333 {
334         return dquot->dq_sb->dq_op->mark_dirty(dquot);
335 }
336
337 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
338 int dquot_mark_dquot_dirty(struct dquot *dquot)
339 {
340         int ret = 1;
341
342         /* If quota is dirty already, we don't have to acquire dq_list_lock */
343         if (test_bit(DQ_MOD_B, &dquot->dq_flags))
344                 return 1;
345
346         spin_lock(&dq_list_lock);
347         if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
348                 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
349                                 info[dquot->dq_id.type].dqi_dirty_list);
350                 ret = 0;
351         }
352         spin_unlock(&dq_list_lock);
353         return ret;
354 }
355 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
356
357 /* Dirtify all the dquots - this can block when journalling */
358 static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
359 {
360         int ret, err, cnt;
361
362         ret = err = 0;
363         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
364                 if (dquot[cnt])
365                         /* Even in case of error we have to continue */
366                         ret = mark_dquot_dirty(dquot[cnt]);
367                 if (!err)
368                         err = ret;
369         }
370         return err;
371 }
372
373 static inline void dqput_all(struct dquot **dquot)
374 {
375         unsigned int cnt;
376
377         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
378                 dqput(dquot[cnt]);
379 }
380
381 /* This function needs dq_list_lock */
382 static inline int clear_dquot_dirty(struct dquot *dquot)
383 {
384         if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
385                 return 0;
386         list_del_init(&dquot->dq_dirty);
387         return 1;
388 }
389
390 void mark_info_dirty(struct super_block *sb, int type)
391 {
392         spin_lock(&dq_data_lock);
393         sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
394         spin_unlock(&dq_data_lock);
395 }
396 EXPORT_SYMBOL(mark_info_dirty);
397
398 /*
399  *      Read dquot from disk and alloc space for it
400  */
401
402 int dquot_acquire(struct dquot *dquot)
403 {
404         int ret = 0, ret2 = 0;
405         struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
406
407         mutex_lock(&dquot->dq_lock);
408         if (!test_bit(DQ_READ_B, &dquot->dq_flags))
409                 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
410         if (ret < 0)
411                 goto out_iolock;
412         /* Make sure flags update is visible after dquot has been filled */
413         smp_mb__before_atomic();
414         set_bit(DQ_READ_B, &dquot->dq_flags);
415         /* Instantiate dquot if needed */
416         if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
417                 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
418                 /* Write the info if needed */
419                 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
420                         ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
421                                         dquot->dq_sb, dquot->dq_id.type);
422                 }
423                 if (ret < 0)
424                         goto out_iolock;
425                 if (ret2 < 0) {
426                         ret = ret2;
427                         goto out_iolock;
428                 }
429         }
430         /*
431          * Make sure flags update is visible after on-disk struct has been
432          * allocated. Paired with smp_rmb() in dqget().
433          */
434         smp_mb__before_atomic();
435         set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
436 out_iolock:
437         mutex_unlock(&dquot->dq_lock);
438         return ret;
439 }
440 EXPORT_SYMBOL(dquot_acquire);
441
442 /*
443  *      Write dquot to disk
444  */
445 int dquot_commit(struct dquot *dquot)
446 {
447         int ret = 0;
448         struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
449
450         mutex_lock(&dquot->dq_lock);
451         spin_lock(&dq_list_lock);
452         if (!clear_dquot_dirty(dquot)) {
453                 spin_unlock(&dq_list_lock);
454                 goto out_lock;
455         }
456         spin_unlock(&dq_list_lock);
457         /* Inactive dquot can be only if there was error during read/init
458          * => we have better not writing it */
459         if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
460                 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
461         else
462                 ret = -EIO;
463 out_lock:
464         mutex_unlock(&dquot->dq_lock);
465         return ret;
466 }
467 EXPORT_SYMBOL(dquot_commit);
468
469 /*
470  *      Release dquot
471  */
472 int dquot_release(struct dquot *dquot)
473 {
474         int ret = 0, ret2 = 0;
475         struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
476
477         mutex_lock(&dquot->dq_lock);
478         /* Check whether we are not racing with some other dqget() */
479         if (atomic_read(&dquot->dq_count) > 1)
480                 goto out_dqlock;
481         if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
482                 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
483                 /* Write the info */
484                 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
485                         ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
486                                                 dquot->dq_sb, dquot->dq_id.type);
487                 }
488                 if (ret >= 0)
489                         ret = ret2;
490         }
491         clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
492 out_dqlock:
493         mutex_unlock(&dquot->dq_lock);
494         return ret;
495 }
496 EXPORT_SYMBOL(dquot_release);
497
498 void dquot_destroy(struct dquot *dquot)
499 {
500         kmem_cache_free(dquot_cachep, dquot);
501 }
502 EXPORT_SYMBOL(dquot_destroy);
503
504 static inline void do_destroy_dquot(struct dquot *dquot)
505 {
506         dquot->dq_sb->dq_op->destroy_dquot(dquot);
507 }
508
509 /* Invalidate all dquots on the list. Note that this function is called after
510  * quota is disabled and pointers from inodes removed so there cannot be new
511  * quota users. There can still be some users of quotas due to inodes being
512  * just deleted or pruned by prune_icache() (those are not attached to any
513  * list) or parallel quotactl call. We have to wait for such users.
514  */
515 static void invalidate_dquots(struct super_block *sb, int type)
516 {
517         struct dquot *dquot, *tmp;
518
519 restart:
520         spin_lock(&dq_list_lock);
521         list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
522                 if (dquot->dq_sb != sb)
523                         continue;
524                 if (dquot->dq_id.type != type)
525                         continue;
526                 /* Wait for dquot users */
527                 if (atomic_read(&dquot->dq_count)) {
528                         DEFINE_WAIT(wait);
529
530                         dqgrab(dquot);
531                         prepare_to_wait(&dquot->dq_wait_unused, &wait,
532                                         TASK_UNINTERRUPTIBLE);
533                         spin_unlock(&dq_list_lock);
534                         /* Once dqput() wakes us up, we know it's time to free
535                          * the dquot.
536                          * IMPORTANT: we rely on the fact that there is always
537                          * at most one process waiting for dquot to free.
538                          * Otherwise dq_count would be > 1 and we would never
539                          * wake up.
540                          */
541                         if (atomic_read(&dquot->dq_count) > 1)
542                                 schedule();
543                         finish_wait(&dquot->dq_wait_unused, &wait);
544                         dqput(dquot);
545                         /* At this moment dquot() need not exist (it could be
546                          * reclaimed by prune_dqcache(). Hence we must
547                          * restart. */
548                         goto restart;
549                 }
550                 /*
551                  * Quota now has no users and it has been written on last
552                  * dqput()
553                  */
554                 remove_dquot_hash(dquot);
555                 remove_free_dquot(dquot);
556                 remove_inuse(dquot);
557                 do_destroy_dquot(dquot);
558         }
559         spin_unlock(&dq_list_lock);
560 }
561
562 /* Call callback for every active dquot on given filesystem */
563 int dquot_scan_active(struct super_block *sb,
564                       int (*fn)(struct dquot *dquot, unsigned long priv),
565                       unsigned long priv)
566 {
567         struct dquot *dquot, *old_dquot = NULL;
568         int ret = 0;
569
570         WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
571
572         spin_lock(&dq_list_lock);
573         list_for_each_entry(dquot, &inuse_list, dq_inuse) {
574                 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
575                         continue;
576                 if (dquot->dq_sb != sb)
577                         continue;
578                 /* Now we have active dquot so we can just increase use count */
579                 atomic_inc(&dquot->dq_count);
580                 spin_unlock(&dq_list_lock);
581                 dqstats_inc(DQST_LOOKUPS);
582                 dqput(old_dquot);
583                 old_dquot = dquot;
584                 /*
585                  * ->release_dquot() can be racing with us. Our reference
586                  * protects us from new calls to it so just wait for any
587                  * outstanding call and recheck the DQ_ACTIVE_B after that.
588                  */
589                 wait_on_dquot(dquot);
590                 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
591                         ret = fn(dquot, priv);
592                         if (ret < 0)
593                                 goto out;
594                 }
595                 spin_lock(&dq_list_lock);
596                 /* We are safe to continue now because our dquot could not
597                  * be moved out of the inuse list while we hold the reference */
598         }
599         spin_unlock(&dq_list_lock);
600 out:
601         dqput(old_dquot);
602         return ret;
603 }
604 EXPORT_SYMBOL(dquot_scan_active);
605
606 /* Write all dquot structures to quota files */
607 int dquot_writeback_dquots(struct super_block *sb, int type)
608 {
609         struct list_head *dirty;
610         struct dquot *dquot;
611         struct quota_info *dqopt = sb_dqopt(sb);
612         int cnt;
613         int err, ret = 0;
614
615         WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
616
617         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
618                 if (type != -1 && cnt != type)
619                         continue;
620                 if (!sb_has_quota_active(sb, cnt))
621                         continue;
622                 spin_lock(&dq_list_lock);
623                 dirty = &dqopt->info[cnt].dqi_dirty_list;
624                 while (!list_empty(dirty)) {
625                         dquot = list_first_entry(dirty, struct dquot,
626                                                  dq_dirty);
627                         /* Dirty and inactive can be only bad dquot... */
628                         if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
629                                 clear_dquot_dirty(dquot);
630                                 continue;
631                         }
632                         /* Now we have active dquot from which someone is
633                          * holding reference so we can safely just increase
634                          * use count */
635                         dqgrab(dquot);
636                         spin_unlock(&dq_list_lock);
637                         dqstats_inc(DQST_LOOKUPS);
638                         err = sb->dq_op->write_dquot(dquot);
639                         if (!ret && err)
640                                 ret = err;
641                         dqput(dquot);
642                         spin_lock(&dq_list_lock);
643                 }
644                 spin_unlock(&dq_list_lock);
645         }
646
647         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
648                 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
649                     && info_dirty(&dqopt->info[cnt]))
650                         sb->dq_op->write_info(sb, cnt);
651         dqstats_inc(DQST_SYNCS);
652
653         return ret;
654 }
655 EXPORT_SYMBOL(dquot_writeback_dquots);
656
657 /* Write all dquot structures to disk and make them visible from userspace */
658 int dquot_quota_sync(struct super_block *sb, int type)
659 {
660         struct quota_info *dqopt = sb_dqopt(sb);
661         int cnt;
662         int ret;
663
664         ret = dquot_writeback_dquots(sb, type);
665         if (ret)
666                 return ret;
667         if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
668                 return 0;
669
670         /* This is not very clever (and fast) but currently I don't know about
671          * any other simple way of getting quota data to disk and we must get
672          * them there for userspace to be visible... */
673         if (sb->s_op->sync_fs)
674                 sb->s_op->sync_fs(sb, 1);
675         sync_blockdev(sb->s_bdev);
676
677         /*
678          * Now when everything is written we can discard the pagecache so
679          * that userspace sees the changes.
680          */
681         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
682                 if (type != -1 && cnt != type)
683                         continue;
684                 if (!sb_has_quota_active(sb, cnt))
685                         continue;
686                 inode_lock(dqopt->files[cnt]);
687                 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
688                 inode_unlock(dqopt->files[cnt]);
689         }
690
691         return 0;
692 }
693 EXPORT_SYMBOL(dquot_quota_sync);
694
695 static unsigned long
696 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
697 {
698         struct list_head *head;
699         struct dquot *dquot;
700         unsigned long freed = 0;
701
702         spin_lock(&dq_list_lock);
703         head = free_dquots.prev;
704         while (head != &free_dquots && sc->nr_to_scan) {
705                 dquot = list_entry(head, struct dquot, dq_free);
706                 remove_dquot_hash(dquot);
707                 remove_free_dquot(dquot);
708                 remove_inuse(dquot);
709                 do_destroy_dquot(dquot);
710                 sc->nr_to_scan--;
711                 freed++;
712                 head = free_dquots.prev;
713         }
714         spin_unlock(&dq_list_lock);
715         return freed;
716 }
717
718 static unsigned long
719 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
720 {
721         return vfs_pressure_ratio(
722         percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
723 }
724
725 static struct shrinker dqcache_shrinker = {
726         .count_objects = dqcache_shrink_count,
727         .scan_objects = dqcache_shrink_scan,
728         .seeks = DEFAULT_SEEKS,
729 };
730
731 /*
732  * Put reference to dquot
733  */
734 void dqput(struct dquot *dquot)
735 {
736         int ret;
737
738         if (!dquot)
739                 return;
740 #ifdef CONFIG_QUOTA_DEBUG
741         if (!atomic_read(&dquot->dq_count)) {
742                 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
743                             quotatypes[dquot->dq_id.type],
744                             from_kqid(&init_user_ns, dquot->dq_id));
745                 BUG();
746         }
747 #endif
748         dqstats_inc(DQST_DROPS);
749 we_slept:
750         spin_lock(&dq_list_lock);
751         if (atomic_read(&dquot->dq_count) > 1) {
752                 /* We have more than one user... nothing to do */
753                 atomic_dec(&dquot->dq_count);
754                 /* Releasing dquot during quotaoff phase? */
755                 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
756                     atomic_read(&dquot->dq_count) == 1)
757                         wake_up(&dquot->dq_wait_unused);
758                 spin_unlock(&dq_list_lock);
759                 return;
760         }
761         /* Need to release dquot? */
762         if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
763                 spin_unlock(&dq_list_lock);
764                 /* Commit dquot before releasing */
765                 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
766                 if (ret < 0) {
767                         quota_error(dquot->dq_sb, "Can't write quota structure"
768                                     " (error %d). Quota may get out of sync!",
769                                     ret);
770                         /*
771                          * We clear dirty bit anyway, so that we avoid
772                          * infinite loop here
773                          */
774                         spin_lock(&dq_list_lock);
775                         clear_dquot_dirty(dquot);
776                         spin_unlock(&dq_list_lock);
777                 }
778                 goto we_slept;
779         }
780         /* Clear flag in case dquot was inactive (something bad happened) */
781         clear_dquot_dirty(dquot);
782         if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
783                 spin_unlock(&dq_list_lock);
784                 dquot->dq_sb->dq_op->release_dquot(dquot);
785                 goto we_slept;
786         }
787         atomic_dec(&dquot->dq_count);
788 #ifdef CONFIG_QUOTA_DEBUG
789         /* sanity check */
790         BUG_ON(!list_empty(&dquot->dq_free));
791 #endif
792         put_dquot_last(dquot);
793         spin_unlock(&dq_list_lock);
794 }
795 EXPORT_SYMBOL(dqput);
796
797 struct dquot *dquot_alloc(struct super_block *sb, int type)
798 {
799         return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
800 }
801 EXPORT_SYMBOL(dquot_alloc);
802
803 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
804 {
805         struct dquot *dquot;
806
807         dquot = sb->dq_op->alloc_dquot(sb, type);
808         if(!dquot)
809                 return NULL;
810
811         mutex_init(&dquot->dq_lock);
812         INIT_LIST_HEAD(&dquot->dq_free);
813         INIT_LIST_HEAD(&dquot->dq_inuse);
814         INIT_HLIST_NODE(&dquot->dq_hash);
815         INIT_LIST_HEAD(&dquot->dq_dirty);
816         init_waitqueue_head(&dquot->dq_wait_unused);
817         dquot->dq_sb = sb;
818         dquot->dq_id = make_kqid_invalid(type);
819         atomic_set(&dquot->dq_count, 1);
820
821         return dquot;
822 }
823
824 /*
825  * Get reference to dquot
826  *
827  * Locking is slightly tricky here. We are guarded from parallel quotaoff()
828  * destroying our dquot by:
829  *   a) checking for quota flags under dq_list_lock and
830  *   b) getting a reference to dquot before we release dq_list_lock
831  */
832 struct dquot *dqget(struct super_block *sb, struct kqid qid)
833 {
834         unsigned int hashent = hashfn(sb, qid);
835         struct dquot *dquot, *empty = NULL;
836
837         if (!qid_has_mapping(sb->s_user_ns, qid))
838                 return ERR_PTR(-EINVAL);
839
840         if (!sb_has_quota_active(sb, qid.type))
841                 return ERR_PTR(-ESRCH);
842 we_slept:
843         spin_lock(&dq_list_lock);
844         spin_lock(&dq_state_lock);
845         if (!sb_has_quota_active(sb, qid.type)) {
846                 spin_unlock(&dq_state_lock);
847                 spin_unlock(&dq_list_lock);
848                 dquot = ERR_PTR(-ESRCH);
849                 goto out;
850         }
851         spin_unlock(&dq_state_lock);
852
853         dquot = find_dquot(hashent, sb, qid);
854         if (!dquot) {
855                 if (!empty) {
856                         spin_unlock(&dq_list_lock);
857                         empty = get_empty_dquot(sb, qid.type);
858                         if (!empty)
859                                 schedule();     /* Try to wait for a moment... */
860                         goto we_slept;
861                 }
862                 dquot = empty;
863                 empty = NULL;
864                 dquot->dq_id = qid;
865                 /* all dquots go on the inuse_list */
866                 put_inuse(dquot);
867                 /* hash it first so it can be found */
868                 insert_dquot_hash(dquot);
869                 spin_unlock(&dq_list_lock);
870                 dqstats_inc(DQST_LOOKUPS);
871         } else {
872                 if (!atomic_read(&dquot->dq_count))
873                         remove_free_dquot(dquot);
874                 atomic_inc(&dquot->dq_count);
875                 spin_unlock(&dq_list_lock);
876                 dqstats_inc(DQST_CACHE_HITS);
877                 dqstats_inc(DQST_LOOKUPS);
878         }
879         /* Wait for dq_lock - after this we know that either dquot_release() is
880          * already finished or it will be canceled due to dq_count > 1 test */
881         wait_on_dquot(dquot);
882         /* Read the dquot / allocate space in quota file */
883         if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
884                 int err;
885
886                 err = sb->dq_op->acquire_dquot(dquot);
887                 if (err < 0) {
888                         dqput(dquot);
889                         dquot = ERR_PTR(err);
890                         goto out;
891                 }
892         }
893         /*
894          * Make sure following reads see filled structure - paired with
895          * smp_mb__before_atomic() in dquot_acquire().
896          */
897         smp_rmb();
898 #ifdef CONFIG_QUOTA_DEBUG
899         BUG_ON(!dquot->dq_sb);  /* Has somebody invalidated entry under us? */
900 #endif
901 out:
902         if (empty)
903                 do_destroy_dquot(empty);
904
905         return dquot;
906 }
907 EXPORT_SYMBOL(dqget);
908
909 static inline struct dquot **i_dquot(struct inode *inode)
910 {
911         return inode->i_sb->s_op->get_dquots(inode);
912 }
913
914 static int dqinit_needed(struct inode *inode, int type)
915 {
916         struct dquot * const *dquots;
917         int cnt;
918
919         if (IS_NOQUOTA(inode))
920                 return 0;
921
922         dquots = i_dquot(inode);
923         if (type != -1)
924                 return !dquots[type];
925         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
926                 if (!dquots[cnt])
927                         return 1;
928         return 0;
929 }
930
931 /* This routine is guarded by s_umount semaphore */
932 static void add_dquot_ref(struct super_block *sb, int type)
933 {
934         struct inode *inode, *old_inode = NULL;
935 #ifdef CONFIG_QUOTA_DEBUG
936         int reserved = 0;
937 #endif
938
939         spin_lock(&sb->s_inode_list_lock);
940         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
941                 spin_lock(&inode->i_lock);
942                 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
943                     !atomic_read(&inode->i_writecount) ||
944                     !dqinit_needed(inode, type)) {
945                         spin_unlock(&inode->i_lock);
946                         continue;
947                 }
948                 __iget(inode);
949                 spin_unlock(&inode->i_lock);
950                 spin_unlock(&sb->s_inode_list_lock);
951
952 #ifdef CONFIG_QUOTA_DEBUG
953                 if (unlikely(inode_get_rsv_space(inode) > 0))
954                         reserved = 1;
955 #endif
956                 iput(old_inode);
957                 __dquot_initialize(inode, type);
958
959                 /*
960                  * We hold a reference to 'inode' so it couldn't have been
961                  * removed from s_inodes list while we dropped the
962                  * s_inode_list_lock. We cannot iput the inode now as we can be
963                  * holding the last reference and we cannot iput it under
964                  * s_inode_list_lock. So we keep the reference and iput it
965                  * later.
966                  */
967                 old_inode = inode;
968                 spin_lock(&sb->s_inode_list_lock);
969         }
970         spin_unlock(&sb->s_inode_list_lock);
971         iput(old_inode);
972
973 #ifdef CONFIG_QUOTA_DEBUG
974         if (reserved) {
975                 quota_error(sb, "Writes happened before quota was turned on "
976                         "thus quota information is probably inconsistent. "
977                         "Please run quotacheck(8)");
978         }
979 #endif
980 }
981
982 /*
983  * Remove references to dquots from inode and add dquot to list for freeing
984  * if we have the last reference to dquot
985  */
986 static void remove_inode_dquot_ref(struct inode *inode, int type,
987                                    struct list_head *tofree_head)
988 {
989         struct dquot **dquots = i_dquot(inode);
990         struct dquot *dquot = dquots[type];
991
992         if (!dquot)
993                 return;
994
995         dquots[type] = NULL;
996         if (list_empty(&dquot->dq_free)) {
997                 /*
998                  * The inode still has reference to dquot so it can't be in the
999                  * free list
1000                  */
1001                 spin_lock(&dq_list_lock);
1002                 list_add(&dquot->dq_free, tofree_head);
1003                 spin_unlock(&dq_list_lock);
1004         } else {
1005                 /*
1006                  * Dquot is already in a list to put so we won't drop the last
1007                  * reference here.
1008                  */
1009                 dqput(dquot);
1010         }
1011 }
1012
1013 /*
1014  * Free list of dquots
1015  * Dquots are removed from inodes and no new references can be got so we are
1016  * the only ones holding reference
1017  */
1018 static void put_dquot_list(struct list_head *tofree_head)
1019 {
1020         struct list_head *act_head;
1021         struct dquot *dquot;
1022
1023         act_head = tofree_head->next;
1024         while (act_head != tofree_head) {
1025                 dquot = list_entry(act_head, struct dquot, dq_free);
1026                 act_head = act_head->next;
1027                 /* Remove dquot from the list so we won't have problems... */
1028                 list_del_init(&dquot->dq_free);
1029                 dqput(dquot);
1030         }
1031 }
1032
1033 static void remove_dquot_ref(struct super_block *sb, int type,
1034                 struct list_head *tofree_head)
1035 {
1036         struct inode *inode;
1037         int reserved = 0;
1038
1039         spin_lock(&sb->s_inode_list_lock);
1040         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1041                 /*
1042                  *  We have to scan also I_NEW inodes because they can already
1043                  *  have quota pointer initialized. Luckily, we need to touch
1044                  *  only quota pointers and these have separate locking
1045                  *  (dq_data_lock).
1046                  */
1047                 spin_lock(&dq_data_lock);
1048                 if (!IS_NOQUOTA(inode)) {
1049                         if (unlikely(inode_get_rsv_space(inode) > 0))
1050                                 reserved = 1;
1051                         remove_inode_dquot_ref(inode, type, tofree_head);
1052                 }
1053                 spin_unlock(&dq_data_lock);
1054         }
1055         spin_unlock(&sb->s_inode_list_lock);
1056 #ifdef CONFIG_QUOTA_DEBUG
1057         if (reserved) {
1058                 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1059                         " was disabled thus quota information is probably "
1060                         "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1061         }
1062 #endif
1063 }
1064
1065 /* Gather all references from inodes and drop them */
1066 static void drop_dquot_ref(struct super_block *sb, int type)
1067 {
1068         LIST_HEAD(tofree_head);
1069
1070         if (sb->dq_op) {
1071                 remove_dquot_ref(sb, type, &tofree_head);
1072                 synchronize_srcu(&dquot_srcu);
1073                 put_dquot_list(&tofree_head);
1074         }
1075 }
1076
1077 static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
1078 {
1079         dquot->dq_dqb.dqb_curinodes += number;
1080 }
1081
1082 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
1083 {
1084         dquot->dq_dqb.dqb_curspace += number;
1085 }
1086
1087 static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
1088 {
1089         dquot->dq_dqb.dqb_rsvspace += number;
1090 }
1091
1092 /*
1093  * Claim reserved quota space
1094  */
1095 static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
1096 {
1097         if (dquot->dq_dqb.dqb_rsvspace < number) {
1098                 WARN_ON_ONCE(1);
1099                 number = dquot->dq_dqb.dqb_rsvspace;
1100         }
1101         dquot->dq_dqb.dqb_curspace += number;
1102         dquot->dq_dqb.dqb_rsvspace -= number;
1103 }
1104
1105 static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number)
1106 {
1107         if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1108                 number = dquot->dq_dqb.dqb_curspace;
1109         dquot->dq_dqb.dqb_rsvspace += number;
1110         dquot->dq_dqb.dqb_curspace -= number;
1111 }
1112
1113 static inline
1114 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1115 {
1116         if (dquot->dq_dqb.dqb_rsvspace >= number)
1117                 dquot->dq_dqb.dqb_rsvspace -= number;
1118         else {
1119                 WARN_ON_ONCE(1);
1120                 dquot->dq_dqb.dqb_rsvspace = 0;
1121         }
1122 }
1123
1124 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1125 {
1126         if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1127             dquot->dq_dqb.dqb_curinodes >= number)
1128                 dquot->dq_dqb.dqb_curinodes -= number;
1129         else
1130                 dquot->dq_dqb.dqb_curinodes = 0;
1131         if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1132                 dquot->dq_dqb.dqb_itime = (time64_t) 0;
1133         clear_bit(DQ_INODES_B, &dquot->dq_flags);
1134 }
1135
1136 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1137 {
1138         if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1139             dquot->dq_dqb.dqb_curspace >= number)
1140                 dquot->dq_dqb.dqb_curspace -= number;
1141         else
1142                 dquot->dq_dqb.dqb_curspace = 0;
1143         if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1144                 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1145         clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1146 }
1147
1148 struct dquot_warn {
1149         struct super_block *w_sb;
1150         struct kqid w_dq_id;
1151         short w_type;
1152 };
1153
1154 static int warning_issued(struct dquot *dquot, const int warntype)
1155 {
1156         int flag = (warntype == QUOTA_NL_BHARDWARN ||
1157                 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1158                 ((warntype == QUOTA_NL_IHARDWARN ||
1159                 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1160
1161         if (!flag)
1162                 return 0;
1163         return test_and_set_bit(flag, &dquot->dq_flags);
1164 }
1165
1166 #ifdef CONFIG_PRINT_QUOTA_WARNING
1167 static int flag_print_warnings = 1;
1168
1169 static int need_print_warning(struct dquot_warn *warn)
1170 {
1171         if (!flag_print_warnings)
1172                 return 0;
1173
1174         switch (warn->w_dq_id.type) {
1175                 case USRQUOTA:
1176                         return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1177                 case GRPQUOTA:
1178                         return in_group_p(warn->w_dq_id.gid);
1179                 case PRJQUOTA:
1180                         return 1;
1181         }
1182         return 0;
1183 }
1184
1185 /* Print warning to user which exceeded quota */
1186 static void print_warning(struct dquot_warn *warn)
1187 {
1188         char *msg = NULL;
1189         struct tty_struct *tty;
1190         int warntype = warn->w_type;
1191
1192         if (warntype == QUOTA_NL_IHARDBELOW ||
1193             warntype == QUOTA_NL_ISOFTBELOW ||
1194             warntype == QUOTA_NL_BHARDBELOW ||
1195             warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1196                 return;
1197
1198         tty = get_current_tty();
1199         if (!tty)
1200                 return;
1201         tty_write_message(tty, warn->w_sb->s_id);
1202         if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1203                 tty_write_message(tty, ": warning, ");
1204         else
1205                 tty_write_message(tty, ": write failed, ");
1206         tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1207         switch (warntype) {
1208                 case QUOTA_NL_IHARDWARN:
1209                         msg = " file limit reached.\r\n";
1210                         break;
1211                 case QUOTA_NL_ISOFTLONGWARN:
1212                         msg = " file quota exceeded too long.\r\n";
1213                         break;
1214                 case QUOTA_NL_ISOFTWARN:
1215                         msg = " file quota exceeded.\r\n";
1216                         break;
1217                 case QUOTA_NL_BHARDWARN:
1218                         msg = " block limit reached.\r\n";
1219                         break;
1220                 case QUOTA_NL_BSOFTLONGWARN:
1221                         msg = " block quota exceeded too long.\r\n";
1222                         break;
1223                 case QUOTA_NL_BSOFTWARN:
1224                         msg = " block quota exceeded.\r\n";
1225                         break;
1226         }
1227         tty_write_message(tty, msg);
1228         tty_kref_put(tty);
1229 }
1230 #endif
1231
1232 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1233                             int warntype)
1234 {
1235         if (warning_issued(dquot, warntype))
1236                 return;
1237         warn->w_type = warntype;
1238         warn->w_sb = dquot->dq_sb;
1239         warn->w_dq_id = dquot->dq_id;
1240 }
1241
1242 /*
1243  * Write warnings to the console and send warning messages over netlink.
1244  *
1245  * Note that this function can call into tty and networking code.
1246  */
1247 static void flush_warnings(struct dquot_warn *warn)
1248 {
1249         int i;
1250
1251         for (i = 0; i < MAXQUOTAS; i++) {
1252                 if (warn[i].w_type == QUOTA_NL_NOWARN)
1253                         continue;
1254 #ifdef CONFIG_PRINT_QUOTA_WARNING
1255                 print_warning(&warn[i]);
1256 #endif
1257                 quota_send_warning(warn[i].w_dq_id,
1258                                    warn[i].w_sb->s_dev, warn[i].w_type);
1259         }
1260 }
1261
1262 static int ignore_hardlimit(struct dquot *dquot)
1263 {
1264         struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1265
1266         return capable(CAP_SYS_RESOURCE) &&
1267                (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1268                 !(info->dqi_flags & DQF_ROOT_SQUASH));
1269 }
1270
1271 /* needs dq_data_lock */
1272 static int check_idq(struct dquot *dquot, qsize_t inodes,
1273                      struct dquot_warn *warn)
1274 {
1275         qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1276
1277         if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1278             test_bit(DQ_FAKE_B, &dquot->dq_flags))
1279                 return 0;
1280
1281         if (dquot->dq_dqb.dqb_ihardlimit &&
1282             newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1283             !ignore_hardlimit(dquot)) {
1284                 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1285                 return -EDQUOT;
1286         }
1287
1288         if (dquot->dq_dqb.dqb_isoftlimit &&
1289             newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1290             dquot->dq_dqb.dqb_itime &&
1291             ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1292             !ignore_hardlimit(dquot)) {
1293                 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1294                 return -EDQUOT;
1295         }
1296
1297         if (dquot->dq_dqb.dqb_isoftlimit &&
1298             newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1299             dquot->dq_dqb.dqb_itime == 0) {
1300                 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1301                 dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1302                     sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1303         }
1304
1305         return 0;
1306 }
1307
1308 /* needs dq_data_lock */
1309 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc,
1310                      struct dquot_warn *warn)
1311 {
1312         qsize_t tspace;
1313         struct super_block *sb = dquot->dq_sb;
1314
1315         if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1316             test_bit(DQ_FAKE_B, &dquot->dq_flags))
1317                 return 0;
1318
1319         tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1320                 + space;
1321
1322         if (dquot->dq_dqb.dqb_bhardlimit &&
1323             tspace > dquot->dq_dqb.dqb_bhardlimit &&
1324             !ignore_hardlimit(dquot)) {
1325                 if (!prealloc)
1326                         prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1327                 return -EDQUOT;
1328         }
1329
1330         if (dquot->dq_dqb.dqb_bsoftlimit &&
1331             tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1332             dquot->dq_dqb.dqb_btime &&
1333             ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1334             !ignore_hardlimit(dquot)) {
1335                 if (!prealloc)
1336                         prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1337                 return -EDQUOT;
1338         }
1339
1340         if (dquot->dq_dqb.dqb_bsoftlimit &&
1341             tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1342             dquot->dq_dqb.dqb_btime == 0) {
1343                 if (!prealloc) {
1344                         prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1345                         dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1346                             sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1347                 }
1348                 else
1349                         /*
1350                          * We don't allow preallocation to exceed softlimit so exceeding will
1351                          * be always printed
1352                          */
1353                         return -EDQUOT;
1354         }
1355
1356         return 0;
1357 }
1358
1359 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1360 {
1361         qsize_t newinodes;
1362
1363         if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1364             dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1365             !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1366                 return QUOTA_NL_NOWARN;
1367
1368         newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1369         if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1370                 return QUOTA_NL_ISOFTBELOW;
1371         if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1372             newinodes < dquot->dq_dqb.dqb_ihardlimit)
1373                 return QUOTA_NL_IHARDBELOW;
1374         return QUOTA_NL_NOWARN;
1375 }
1376
1377 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1378 {
1379         if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1380             dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1381                 return QUOTA_NL_NOWARN;
1382
1383         if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1384                 return QUOTA_NL_BSOFTBELOW;
1385         if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
1386             dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
1387                 return QUOTA_NL_BHARDBELOW;
1388         return QUOTA_NL_NOWARN;
1389 }
1390
1391 static int dquot_active(const struct inode *inode)
1392 {
1393         struct super_block *sb = inode->i_sb;
1394
1395         if (IS_NOQUOTA(inode))
1396                 return 0;
1397         return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1398 }
1399
1400 /*
1401  * Initialize quota pointers in inode
1402  *
1403  * It is better to call this function outside of any transaction as it
1404  * might need a lot of space in journal for dquot structure allocation.
1405  */
1406 static int __dquot_initialize(struct inode *inode, int type)
1407 {
1408         int cnt, init_needed = 0;
1409         struct dquot **dquots, *got[MAXQUOTAS] = {};
1410         struct super_block *sb = inode->i_sb;
1411         qsize_t rsv;
1412         int ret = 0;
1413
1414         if (!dquot_active(inode))
1415                 return 0;
1416
1417         dquots = i_dquot(inode);
1418
1419         /* First get references to structures we might need. */
1420         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1421                 struct kqid qid;
1422                 kprojid_t projid;
1423                 int rc;
1424                 struct dquot *dquot;
1425
1426                 if (type != -1 && cnt != type)
1427                         continue;
1428                 /*
1429                  * The i_dquot should have been initialized in most cases,
1430                  * we check it without locking here to avoid unnecessary
1431                  * dqget()/dqput() calls.
1432                  */
1433                 if (dquots[cnt])
1434                         continue;
1435
1436                 if (!sb_has_quota_active(sb, cnt))
1437                         continue;
1438
1439                 init_needed = 1;
1440
1441                 switch (cnt) {
1442                 case USRQUOTA:
1443                         qid = make_kqid_uid(inode->i_uid);
1444                         break;
1445                 case GRPQUOTA:
1446                         qid = make_kqid_gid(inode->i_gid);
1447                         break;
1448                 case PRJQUOTA:
1449                         rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1450                         if (rc)
1451                                 continue;
1452                         qid = make_kqid_projid(projid);
1453                         break;
1454                 }
1455                 dquot = dqget(sb, qid);
1456                 if (IS_ERR(dquot)) {
1457                         /* We raced with somebody turning quotas off... */
1458                         if (PTR_ERR(dquot) != -ESRCH) {
1459                                 ret = PTR_ERR(dquot);
1460                                 goto out_put;
1461                         }
1462                         dquot = NULL;
1463                 }
1464                 got[cnt] = dquot;
1465         }
1466
1467         /* All required i_dquot has been initialized */
1468         if (!init_needed)
1469                 return 0;
1470
1471         spin_lock(&dq_data_lock);
1472         if (IS_NOQUOTA(inode))
1473                 goto out_lock;
1474         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1475                 if (type != -1 && cnt != type)
1476                         continue;
1477                 /* Avoid races with quotaoff() */
1478                 if (!sb_has_quota_active(sb, cnt))
1479                         continue;
1480                 /* We could race with quotaon or dqget() could have failed */
1481                 if (!got[cnt])
1482                         continue;
1483                 if (!dquots[cnt]) {
1484                         dquots[cnt] = got[cnt];
1485                         got[cnt] = NULL;
1486                         /*
1487                          * Make quota reservation system happy if someone
1488                          * did a write before quota was turned on
1489                          */
1490                         rsv = inode_get_rsv_space(inode);
1491                         if (unlikely(rsv))
1492                                 dquot_resv_space(dquots[cnt], rsv);
1493                 }
1494         }
1495 out_lock:
1496         spin_unlock(&dq_data_lock);
1497 out_put:
1498         /* Drop unused references */
1499         dqput_all(got);
1500
1501         return ret;
1502 }
1503
1504 int dquot_initialize(struct inode *inode)
1505 {
1506         return __dquot_initialize(inode, -1);
1507 }
1508 EXPORT_SYMBOL(dquot_initialize);
1509
1510 bool dquot_initialize_needed(struct inode *inode)
1511 {
1512         struct dquot **dquots;
1513         int i;
1514
1515         if (!dquot_active(inode))
1516                 return false;
1517
1518         dquots = i_dquot(inode);
1519         for (i = 0; i < MAXQUOTAS; i++)
1520                 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1521                         return true;
1522         return false;
1523 }
1524 EXPORT_SYMBOL(dquot_initialize_needed);
1525
1526 /*
1527  * Release all quotas referenced by inode.
1528  *
1529  * This function only be called on inode free or converting
1530  * a file to quota file, no other users for the i_dquot in
1531  * both cases, so we needn't call synchronize_srcu() after
1532  * clearing i_dquot.
1533  */
1534 static void __dquot_drop(struct inode *inode)
1535 {
1536         int cnt;
1537         struct dquot **dquots = i_dquot(inode);
1538         struct dquot *put[MAXQUOTAS];
1539
1540         spin_lock(&dq_data_lock);
1541         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1542                 put[cnt] = dquots[cnt];
1543                 dquots[cnt] = NULL;
1544         }
1545         spin_unlock(&dq_data_lock);
1546         dqput_all(put);
1547 }
1548
1549 void dquot_drop(struct inode *inode)
1550 {
1551         struct dquot * const *dquots;
1552         int cnt;
1553
1554         if (IS_NOQUOTA(inode))
1555                 return;
1556
1557         /*
1558          * Test before calling to rule out calls from proc and such
1559          * where we are not allowed to block. Note that this is
1560          * actually reliable test even without the lock - the caller
1561          * must assure that nobody can come after the DQUOT_DROP and
1562          * add quota pointers back anyway.
1563          */
1564         dquots = i_dquot(inode);
1565         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1566                 if (dquots[cnt])
1567                         break;
1568         }
1569
1570         if (cnt < MAXQUOTAS)
1571                 __dquot_drop(inode);
1572 }
1573 EXPORT_SYMBOL(dquot_drop);
1574
1575 /*
1576  * inode_reserved_space is managed internally by quota, and protected by
1577  * i_lock similar to i_blocks+i_bytes.
1578  */
1579 static qsize_t *inode_reserved_space(struct inode * inode)
1580 {
1581         /* Filesystem must explicitly define it's own method in order to use
1582          * quota reservation interface */
1583         BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1584         return inode->i_sb->dq_op->get_reserved_space(inode);
1585 }
1586
1587 void inode_add_rsv_space(struct inode *inode, qsize_t number)
1588 {
1589         spin_lock(&inode->i_lock);
1590         *inode_reserved_space(inode) += number;
1591         spin_unlock(&inode->i_lock);
1592 }
1593 EXPORT_SYMBOL(inode_add_rsv_space);
1594
1595 void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1596 {
1597         spin_lock(&inode->i_lock);
1598         *inode_reserved_space(inode) -= number;
1599         __inode_add_bytes(inode, number);
1600         spin_unlock(&inode->i_lock);
1601 }
1602 EXPORT_SYMBOL(inode_claim_rsv_space);
1603
1604 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number)
1605 {
1606         spin_lock(&inode->i_lock);
1607         *inode_reserved_space(inode) += number;
1608         __inode_sub_bytes(inode, number);
1609         spin_unlock(&inode->i_lock);
1610 }
1611 EXPORT_SYMBOL(inode_reclaim_rsv_space);
1612
1613 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
1614 {
1615         spin_lock(&inode->i_lock);
1616         *inode_reserved_space(inode) -= number;
1617         spin_unlock(&inode->i_lock);
1618 }
1619 EXPORT_SYMBOL(inode_sub_rsv_space);
1620
1621 static qsize_t inode_get_rsv_space(struct inode *inode)
1622 {
1623         qsize_t ret;
1624
1625         if (!inode->i_sb->dq_op->get_reserved_space)
1626                 return 0;
1627         spin_lock(&inode->i_lock);
1628         ret = *inode_reserved_space(inode);
1629         spin_unlock(&inode->i_lock);
1630         return ret;
1631 }
1632
1633 static void inode_incr_space(struct inode *inode, qsize_t number,
1634                                 int reserve)
1635 {
1636         if (reserve)
1637                 inode_add_rsv_space(inode, number);
1638         else
1639                 inode_add_bytes(inode, number);
1640 }
1641
1642 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1643 {
1644         if (reserve)
1645                 inode_sub_rsv_space(inode, number);
1646         else
1647                 inode_sub_bytes(inode, number);
1648 }
1649
1650 /*
1651  * This functions updates i_blocks+i_bytes fields and quota information
1652  * (together with appropriate checks).
1653  *
1654  * NOTE: We absolutely rely on the fact that caller dirties the inode
1655  * (usually helpers in quotaops.h care about this) and holds a handle for
1656  * the current transaction so that dquot write and inode write go into the
1657  * same transaction.
1658  */
1659
1660 /*
1661  * This operation can block, but only after everything is updated
1662  */
1663 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1664 {
1665         int cnt, ret = 0, index;
1666         struct dquot_warn warn[MAXQUOTAS];
1667         int reserve = flags & DQUOT_SPACE_RESERVE;
1668         struct dquot **dquots;
1669
1670         if (!dquot_active(inode)) {
1671                 inode_incr_space(inode, number, reserve);
1672                 goto out;
1673         }
1674
1675         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1676                 warn[cnt].w_type = QUOTA_NL_NOWARN;
1677
1678         dquots = i_dquot(inode);
1679         index = srcu_read_lock(&dquot_srcu);
1680         spin_lock(&dq_data_lock);
1681         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1682                 if (!dquots[cnt])
1683                         continue;
1684                 ret = check_bdq(dquots[cnt], number,
1685                                 !(flags & DQUOT_SPACE_WARN), &warn[cnt]);
1686                 if (ret && !(flags & DQUOT_SPACE_NOFAIL)) {
1687                         spin_unlock(&dq_data_lock);
1688                         goto out_flush_warn;
1689                 }
1690         }
1691         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1692                 if (!dquots[cnt])
1693                         continue;
1694                 if (reserve)
1695                         dquot_resv_space(dquots[cnt], number);
1696                 else
1697                         dquot_incr_space(dquots[cnt], number);
1698         }
1699         inode_incr_space(inode, number, reserve);
1700         spin_unlock(&dq_data_lock);
1701
1702         if (reserve)
1703                 goto out_flush_warn;
1704         mark_all_dquot_dirty(dquots);
1705 out_flush_warn:
1706         srcu_read_unlock(&dquot_srcu, index);
1707         flush_warnings(warn);
1708 out:
1709         return ret;
1710 }
1711 EXPORT_SYMBOL(__dquot_alloc_space);
1712
1713 /*
1714  * This operation can block, but only after everything is updated
1715  */
1716 int dquot_alloc_inode(struct inode *inode)
1717 {
1718         int cnt, ret = 0, index;
1719         struct dquot_warn warn[MAXQUOTAS];
1720         struct dquot * const *dquots;
1721
1722         if (!dquot_active(inode))
1723                 return 0;
1724         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1725                 warn[cnt].w_type = QUOTA_NL_NOWARN;
1726
1727         dquots = i_dquot(inode);
1728         index = srcu_read_lock(&dquot_srcu);
1729         spin_lock(&dq_data_lock);
1730         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1731                 if (!dquots[cnt])
1732                         continue;
1733                 ret = check_idq(dquots[cnt], 1, &warn[cnt]);
1734                 if (ret)
1735                         goto warn_put_all;
1736         }
1737
1738         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1739                 if (!dquots[cnt])
1740                         continue;
1741                 dquot_incr_inodes(dquots[cnt], 1);
1742         }
1743
1744 warn_put_all:
1745         spin_unlock(&dq_data_lock);
1746         if (ret == 0)
1747                 mark_all_dquot_dirty(dquots);
1748         srcu_read_unlock(&dquot_srcu, index);
1749         flush_warnings(warn);
1750         return ret;
1751 }
1752 EXPORT_SYMBOL(dquot_alloc_inode);
1753
1754 /*
1755  * Convert in-memory reserved quotas to real consumed quotas
1756  */
1757 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1758 {
1759         struct dquot **dquots;
1760         int cnt, index;
1761
1762         if (!dquot_active(inode)) {
1763                 inode_claim_rsv_space(inode, number);
1764                 return 0;
1765         }
1766
1767         dquots = i_dquot(inode);
1768         index = srcu_read_lock(&dquot_srcu);
1769         spin_lock(&dq_data_lock);
1770         /* Claim reserved quotas to allocated quotas */
1771         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1772                 if (dquots[cnt])
1773                         dquot_claim_reserved_space(dquots[cnt], number);
1774         }
1775         /* Update inode bytes */
1776         inode_claim_rsv_space(inode, number);
1777         spin_unlock(&dq_data_lock);
1778         mark_all_dquot_dirty(dquots);
1779         srcu_read_unlock(&dquot_srcu, index);
1780         return 0;
1781 }
1782 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1783
1784 /*
1785  * Convert allocated space back to in-memory reserved quotas
1786  */
1787 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1788 {
1789         struct dquot **dquots;
1790         int cnt, index;
1791
1792         if (!dquot_active(inode)) {
1793                 inode_reclaim_rsv_space(inode, number);
1794                 return;
1795         }
1796
1797         dquots = i_dquot(inode);
1798         index = srcu_read_lock(&dquot_srcu);
1799         spin_lock(&dq_data_lock);
1800         /* Claim reserved quotas to allocated quotas */
1801         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1802                 if (dquots[cnt])
1803                         dquot_reclaim_reserved_space(dquots[cnt], number);
1804         }
1805         /* Update inode bytes */
1806         inode_reclaim_rsv_space(inode, number);
1807         spin_unlock(&dq_data_lock);
1808         mark_all_dquot_dirty(dquots);
1809         srcu_read_unlock(&dquot_srcu, index);
1810         return;
1811 }
1812 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1813
1814 /*
1815  * This operation can block, but only after everything is updated
1816  */
1817 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1818 {
1819         unsigned int cnt;
1820         struct dquot_warn warn[MAXQUOTAS];
1821         struct dquot **dquots;
1822         int reserve = flags & DQUOT_SPACE_RESERVE, index;
1823
1824         if (!dquot_active(inode)) {
1825                 inode_decr_space(inode, number, reserve);
1826                 return;
1827         }
1828
1829         dquots = i_dquot(inode);
1830         index = srcu_read_lock(&dquot_srcu);
1831         spin_lock(&dq_data_lock);
1832         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1833                 int wtype;
1834
1835                 warn[cnt].w_type = QUOTA_NL_NOWARN;
1836                 if (!dquots[cnt])
1837                         continue;
1838                 wtype = info_bdq_free(dquots[cnt], number);
1839                 if (wtype != QUOTA_NL_NOWARN)
1840                         prepare_warning(&warn[cnt], dquots[cnt], wtype);
1841                 if (reserve)
1842                         dquot_free_reserved_space(dquots[cnt], number);
1843                 else
1844                         dquot_decr_space(dquots[cnt], number);
1845         }
1846         inode_decr_space(inode, number, reserve);
1847         spin_unlock(&dq_data_lock);
1848
1849         if (reserve)
1850                 goto out_unlock;
1851         mark_all_dquot_dirty(dquots);
1852 out_unlock:
1853         srcu_read_unlock(&dquot_srcu, index);
1854         flush_warnings(warn);
1855 }
1856 EXPORT_SYMBOL(__dquot_free_space);
1857
1858 /*
1859  * This operation can block, but only after everything is updated
1860  */
1861 void dquot_free_inode(struct inode *inode)
1862 {
1863         unsigned int cnt;
1864         struct dquot_warn warn[MAXQUOTAS];
1865         struct dquot * const *dquots;
1866         int index;
1867
1868         if (!dquot_active(inode))
1869                 return;
1870
1871         dquots = i_dquot(inode);
1872         index = srcu_read_lock(&dquot_srcu);
1873         spin_lock(&dq_data_lock);
1874         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1875                 int wtype;
1876
1877                 warn[cnt].w_type = QUOTA_NL_NOWARN;
1878                 if (!dquots[cnt])
1879                         continue;
1880                 wtype = info_idq_free(dquots[cnt], 1);
1881                 if (wtype != QUOTA_NL_NOWARN)
1882                         prepare_warning(&warn[cnt], dquots[cnt], wtype);
1883                 dquot_decr_inodes(dquots[cnt], 1);
1884         }
1885         spin_unlock(&dq_data_lock);
1886         mark_all_dquot_dirty(dquots);
1887         srcu_read_unlock(&dquot_srcu, index);
1888         flush_warnings(warn);
1889 }
1890 EXPORT_SYMBOL(dquot_free_inode);
1891
1892 /*
1893  * Transfer the number of inode and blocks from one diskquota to an other.
1894  * On success, dquot references in transfer_to are consumed and references
1895  * to original dquots that need to be released are placed there. On failure,
1896  * references are kept untouched.
1897  *
1898  * This operation can block, but only after everything is updated
1899  * A transaction must be started when entering this function.
1900  *
1901  * We are holding reference on transfer_from & transfer_to, no need to
1902  * protect them by srcu_read_lock().
1903  */
1904 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1905 {
1906         qsize_t space, cur_space;
1907         qsize_t rsv_space = 0;
1908         qsize_t inode_usage = 1;
1909         struct dquot *transfer_from[MAXQUOTAS] = {};
1910         int cnt, ret = 0;
1911         char is_valid[MAXQUOTAS] = {};
1912         struct dquot_warn warn_to[MAXQUOTAS];
1913         struct dquot_warn warn_from_inodes[MAXQUOTAS];
1914         struct dquot_warn warn_from_space[MAXQUOTAS];
1915
1916         if (IS_NOQUOTA(inode))
1917                 return 0;
1918
1919         if (inode->i_sb->dq_op->get_inode_usage) {
1920                 ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
1921                 if (ret)
1922                         return ret;
1923         }
1924
1925         /* Initialize the arrays */
1926         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1927                 warn_to[cnt].w_type = QUOTA_NL_NOWARN;
1928                 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1929                 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1930         }
1931
1932         spin_lock(&dq_data_lock);
1933         if (IS_NOQUOTA(inode)) {        /* File without quota accounting? */
1934                 spin_unlock(&dq_data_lock);
1935                 return 0;
1936         }
1937         cur_space = inode_get_bytes(inode);
1938         rsv_space = inode_get_rsv_space(inode);
1939         space = cur_space + rsv_space;
1940         /* Build the transfer_from list and check the limits */
1941         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1942                 /*
1943                  * Skip changes for same uid or gid or for turned off quota-type.
1944                  */
1945                 if (!transfer_to[cnt])
1946                         continue;
1947                 /* Avoid races with quotaoff() */
1948                 if (!sb_has_quota_active(inode->i_sb, cnt))
1949                         continue;
1950                 is_valid[cnt] = 1;
1951                 transfer_from[cnt] = i_dquot(inode)[cnt];
1952                 ret = check_idq(transfer_to[cnt], inode_usage, &warn_to[cnt]);
1953                 if (ret)
1954                         goto over_quota;
1955                 ret = check_bdq(transfer_to[cnt], space, 0, &warn_to[cnt]);
1956                 if (ret)
1957                         goto over_quota;
1958         }
1959
1960         /*
1961          * Finally perform the needed transfer from transfer_from to transfer_to
1962          */
1963         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1964                 if (!is_valid[cnt])
1965                         continue;
1966                 /* Due to IO error we might not have transfer_from[] structure */
1967                 if (transfer_from[cnt]) {
1968                         int wtype;
1969                         wtype = info_idq_free(transfer_from[cnt], inode_usage);
1970                         if (wtype != QUOTA_NL_NOWARN)
1971                                 prepare_warning(&warn_from_inodes[cnt],
1972                                                 transfer_from[cnt], wtype);
1973                         wtype = info_bdq_free(transfer_from[cnt], space);
1974                         if (wtype != QUOTA_NL_NOWARN)
1975                                 prepare_warning(&warn_from_space[cnt],
1976                                                 transfer_from[cnt], wtype);
1977                         dquot_decr_inodes(transfer_from[cnt], inode_usage);
1978                         dquot_decr_space(transfer_from[cnt], cur_space);
1979                         dquot_free_reserved_space(transfer_from[cnt],
1980                                                   rsv_space);
1981                 }
1982
1983                 dquot_incr_inodes(transfer_to[cnt], inode_usage);
1984                 dquot_incr_space(transfer_to[cnt], cur_space);
1985                 dquot_resv_space(transfer_to[cnt], rsv_space);
1986
1987                 i_dquot(inode)[cnt] = transfer_to[cnt];
1988         }
1989         spin_unlock(&dq_data_lock);
1990
1991         mark_all_dquot_dirty(transfer_from);
1992         mark_all_dquot_dirty(transfer_to);
1993         flush_warnings(warn_to);
1994         flush_warnings(warn_from_inodes);
1995         flush_warnings(warn_from_space);
1996         /* Pass back references to put */
1997         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1998                 if (is_valid[cnt])
1999                         transfer_to[cnt] = transfer_from[cnt];
2000         return 0;
2001 over_quota:
2002         spin_unlock(&dq_data_lock);
2003         flush_warnings(warn_to);
2004         return ret;
2005 }
2006 EXPORT_SYMBOL(__dquot_transfer);
2007
2008 /* Wrapper for transferring ownership of an inode for uid/gid only
2009  * Called from FSXXX_setattr()
2010  */
2011 int dquot_transfer(struct inode *inode, struct iattr *iattr)
2012 {
2013         struct dquot *transfer_to[MAXQUOTAS] = {};
2014         struct dquot *dquot;
2015         struct super_block *sb = inode->i_sb;
2016         int ret;
2017
2018         if (!dquot_active(inode))
2019                 return 0;
2020
2021         if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
2022                 dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
2023                 if (IS_ERR(dquot)) {
2024                         if (PTR_ERR(dquot) != -ESRCH) {
2025                                 ret = PTR_ERR(dquot);
2026                                 goto out_put;
2027                         }
2028                         dquot = NULL;
2029                 }
2030                 transfer_to[USRQUOTA] = dquot;
2031         }
2032         if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
2033                 dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
2034                 if (IS_ERR(dquot)) {
2035                         if (PTR_ERR(dquot) != -ESRCH) {
2036                                 ret = PTR_ERR(dquot);
2037                                 goto out_put;
2038                         }
2039                         dquot = NULL;
2040                 }
2041                 transfer_to[GRPQUOTA] = dquot;
2042         }
2043         ret = __dquot_transfer(inode, transfer_to);
2044 out_put:
2045         dqput_all(transfer_to);
2046         return ret;
2047 }
2048 EXPORT_SYMBOL(dquot_transfer);
2049
2050 /*
2051  * Write info of quota file to disk
2052  */
2053 int dquot_commit_info(struct super_block *sb, int type)
2054 {
2055         struct quota_info *dqopt = sb_dqopt(sb);
2056
2057         return dqopt->ops[type]->write_file_info(sb, type);
2058 }
2059 EXPORT_SYMBOL(dquot_commit_info);
2060
2061 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2062 {
2063         struct quota_info *dqopt = sb_dqopt(sb);
2064
2065         if (!sb_has_quota_active(sb, qid->type))
2066                 return -ESRCH;
2067         if (!dqopt->ops[qid->type]->get_next_id)
2068                 return -ENOSYS;
2069         return dqopt->ops[qid->type]->get_next_id(sb, qid);
2070 }
2071 EXPORT_SYMBOL(dquot_get_next_id);
2072
2073 /*
2074  * Definitions of diskquota operations.
2075  */
2076 const struct dquot_operations dquot_operations = {
2077         .write_dquot    = dquot_commit,
2078         .acquire_dquot  = dquot_acquire,
2079         .release_dquot  = dquot_release,
2080         .mark_dirty     = dquot_mark_dquot_dirty,
2081         .write_info     = dquot_commit_info,
2082         .alloc_dquot    = dquot_alloc,
2083         .destroy_dquot  = dquot_destroy,
2084         .get_next_id    = dquot_get_next_id,
2085 };
2086 EXPORT_SYMBOL(dquot_operations);
2087
2088 /*
2089  * Generic helper for ->open on filesystems supporting disk quotas.
2090  */
2091 int dquot_file_open(struct inode *inode, struct file *file)
2092 {
2093         int error;
2094
2095         error = generic_file_open(inode, file);
2096         if (!error && (file->f_mode & FMODE_WRITE))
2097                 dquot_initialize(inode);
2098         return error;
2099 }
2100 EXPORT_SYMBOL(dquot_file_open);
2101
2102 /*
2103  * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2104  */
2105 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2106 {
2107         int cnt, ret = 0;
2108         struct quota_info *dqopt = sb_dqopt(sb);
2109         struct inode *toputinode[MAXQUOTAS];
2110
2111         /* s_umount should be held in exclusive mode */
2112         if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2113                 up_read(&sb->s_umount);
2114
2115         /* Cannot turn off usage accounting without turning off limits, or
2116          * suspend quotas and simultaneously turn quotas off. */
2117         if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2118             || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2119             DQUOT_USAGE_ENABLED)))
2120                 return -EINVAL;
2121
2122         /*
2123          * Skip everything if there's nothing to do. We have to do this because
2124          * sometimes we are called when fill_super() failed and calling
2125          * sync_fs() in such cases does no good.
2126          */
2127         if (!sb_any_quota_loaded(sb))
2128                 return 0;
2129
2130         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2131                 toputinode[cnt] = NULL;
2132                 if (type != -1 && cnt != type)
2133                         continue;
2134                 if (!sb_has_quota_loaded(sb, cnt))
2135                         continue;
2136
2137                 if (flags & DQUOT_SUSPENDED) {
2138                         spin_lock(&dq_state_lock);
2139                         dqopt->flags |=
2140                                 dquot_state_flag(DQUOT_SUSPENDED, cnt);
2141                         spin_unlock(&dq_state_lock);
2142                 } else {
2143                         spin_lock(&dq_state_lock);
2144                         dqopt->flags &= ~dquot_state_flag(flags, cnt);
2145                         /* Turning off suspended quotas? */
2146                         if (!sb_has_quota_loaded(sb, cnt) &&
2147                             sb_has_quota_suspended(sb, cnt)) {
2148                                 dqopt->flags &= ~dquot_state_flag(
2149                                                         DQUOT_SUSPENDED, cnt);
2150                                 spin_unlock(&dq_state_lock);
2151                                 iput(dqopt->files[cnt]);
2152                                 dqopt->files[cnt] = NULL;
2153                                 continue;
2154                         }
2155                         spin_unlock(&dq_state_lock);
2156                 }
2157
2158                 /* We still have to keep quota loaded? */
2159                 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2160                         continue;
2161
2162                 /* Note: these are blocking operations */
2163                 drop_dquot_ref(sb, cnt);
2164                 invalidate_dquots(sb, cnt);
2165                 /*
2166                  * Now all dquots should be invalidated, all writes done so we
2167                  * should be only users of the info. No locks needed.
2168                  */
2169                 if (info_dirty(&dqopt->info[cnt]))
2170                         sb->dq_op->write_info(sb, cnt);
2171                 if (dqopt->ops[cnt]->free_file_info)
2172                         dqopt->ops[cnt]->free_file_info(sb, cnt);
2173                 put_quota_format(dqopt->info[cnt].dqi_format);
2174
2175                 toputinode[cnt] = dqopt->files[cnt];
2176                 if (!sb_has_quota_loaded(sb, cnt))
2177                         dqopt->files[cnt] = NULL;
2178                 dqopt->info[cnt].dqi_flags = 0;
2179                 dqopt->info[cnt].dqi_igrace = 0;
2180                 dqopt->info[cnt].dqi_bgrace = 0;
2181                 dqopt->ops[cnt] = NULL;
2182         }
2183
2184         /* Skip syncing and setting flags if quota files are hidden */
2185         if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2186                 goto put_inodes;
2187
2188         /* Sync the superblock so that buffers with quota data are written to
2189          * disk (and so userspace sees correct data afterwards). */
2190         if (sb->s_op->sync_fs)
2191                 sb->s_op->sync_fs(sb, 1);
2192         sync_blockdev(sb->s_bdev);
2193         /* Now the quota files are just ordinary files and we can set the
2194          * inode flags back. Moreover we discard the pagecache so that
2195          * userspace sees the writes we did bypassing the pagecache. We
2196          * must also discard the blockdev buffers so that we see the
2197          * changes done by userspace on the next quotaon() */
2198         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2199                 /* This can happen when suspending quotas on remount-ro... */
2200                 if (toputinode[cnt] && !sb_has_quota_loaded(sb, cnt)) {
2201                         inode_lock(toputinode[cnt]);
2202                         toputinode[cnt]->i_flags &= ~S_NOQUOTA;
2203                         truncate_inode_pages(&toputinode[cnt]->i_data, 0);
2204                         inode_unlock(toputinode[cnt]);
2205                         mark_inode_dirty_sync(toputinode[cnt]);
2206                 }
2207         if (sb->s_bdev)
2208                 invalidate_bdev(sb->s_bdev);
2209 put_inodes:
2210         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2211                 if (toputinode[cnt]) {
2212                         /* On remount RO, we keep the inode pointer so that we
2213                          * can reenable quota on the subsequent remount RW. We
2214                          * have to check 'flags' variable and not use sb_has_
2215                          * function because another quotaon / quotaoff could
2216                          * change global state before we got here. We refuse
2217                          * to suspend quotas when there is pending delete on
2218                          * the quota file... */
2219                         if (!(flags & DQUOT_SUSPENDED))
2220                                 iput(toputinode[cnt]);
2221                         else if (!toputinode[cnt]->i_nlink)
2222                                 ret = -EBUSY;
2223                 }
2224         return ret;
2225 }
2226 EXPORT_SYMBOL(dquot_disable);
2227
2228 int dquot_quota_off(struct super_block *sb, int type)
2229 {
2230         return dquot_disable(sb, type,
2231                              DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2232 }
2233 EXPORT_SYMBOL(dquot_quota_off);
2234
2235 /*
2236  *      Turn quotas on on a device
2237  */
2238
2239 /*
2240  * Helper function to turn quotas on when we already have the inode of
2241  * quota file and no quota information is loaded.
2242  */
2243 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2244         unsigned int flags)
2245 {
2246         struct quota_format_type *fmt = find_quota_format(format_id);
2247         struct super_block *sb = inode->i_sb;
2248         struct quota_info *dqopt = sb_dqopt(sb);
2249         int error;
2250
2251         if (!fmt)
2252                 return -ESRCH;
2253         if (!S_ISREG(inode->i_mode)) {
2254                 error = -EACCES;
2255                 goto out_fmt;
2256         }
2257         if (IS_RDONLY(inode)) {
2258                 error = -EROFS;
2259                 goto out_fmt;
2260         }
2261         if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
2262             (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2263                 error = -EINVAL;
2264                 goto out_fmt;
2265         }
2266         /* Filesystems outside of init_user_ns not yet supported */
2267         if (sb->s_user_ns != &init_user_ns) {
2268                 error = -EINVAL;
2269                 goto out_fmt;
2270         }
2271         /* Usage always has to be set... */
2272         if (!(flags & DQUOT_USAGE_ENABLED)) {
2273                 error = -EINVAL;
2274                 goto out_fmt;
2275         }
2276         if (sb_has_quota_loaded(sb, type)) {
2277                 error = -EBUSY;
2278                 goto out_fmt;
2279         }
2280
2281         if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2282                 /* As we bypass the pagecache we must now flush all the
2283                  * dirty data and invalidate caches so that kernel sees
2284                  * changes from userspace. It is not enough to just flush
2285                  * the quota file since if blocksize < pagesize, invalidation
2286                  * of the cache could fail because of other unrelated dirty
2287                  * data */
2288                 sync_filesystem(sb);
2289                 invalidate_bdev(sb->s_bdev);
2290         }
2291
2292         if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2293                 /* We don't want quota and atime on quota files (deadlocks
2294                  * possible) Also nobody should write to the file - we use
2295                  * special IO operations which ignore the immutable bit. */
2296                 inode_lock(inode);
2297                 inode->i_flags |= S_NOQUOTA;
2298                 inode_unlock(inode);
2299                 /*
2300                  * When S_NOQUOTA is set, remove dquot references as no more
2301                  * references can be added
2302                  */
2303                 __dquot_drop(inode);
2304         }
2305
2306         error = -EIO;
2307         dqopt->files[type] = igrab(inode);
2308         if (!dqopt->files[type])
2309                 goto out_file_flags;
2310         error = -EINVAL;
2311         if (!fmt->qf_ops->check_quota_file(sb, type))
2312                 goto out_file_init;
2313
2314         dqopt->ops[type] = fmt->qf_ops;
2315         dqopt->info[type].dqi_format = fmt;
2316         dqopt->info[type].dqi_fmt_id = format_id;
2317         INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2318         error = dqopt->ops[type]->read_file_info(sb, type);
2319         if (error < 0)
2320                 goto out_file_init;
2321         if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
2322                 spin_lock(&dq_data_lock);
2323                 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2324                 spin_unlock(&dq_data_lock);
2325         }
2326         spin_lock(&dq_state_lock);
2327         dqopt->flags |= dquot_state_flag(flags, type);
2328         spin_unlock(&dq_state_lock);
2329
2330         add_dquot_ref(sb, type);
2331
2332         return 0;
2333
2334 out_file_init:
2335         dqopt->files[type] = NULL;
2336         iput(inode);
2337 out_file_flags:
2338         inode_lock(inode);
2339         inode->i_flags &= ~S_NOQUOTA;
2340         inode_unlock(inode);
2341 out_fmt:
2342         put_quota_format(fmt);
2343
2344         return error; 
2345 }
2346
2347 /* Reenable quotas on remount RW */
2348 int dquot_resume(struct super_block *sb, int type)
2349 {
2350         struct quota_info *dqopt = sb_dqopt(sb);
2351         struct inode *inode;
2352         int ret = 0, cnt;
2353         unsigned int flags;
2354
2355         /* s_umount should be held in exclusive mode */
2356         if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2357                 up_read(&sb->s_umount);
2358
2359         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2360                 if (type != -1 && cnt != type)
2361                         continue;
2362                 if (!sb_has_quota_suspended(sb, cnt))
2363                         continue;
2364
2365                 inode = dqopt->files[cnt];
2366                 dqopt->files[cnt] = NULL;
2367                 spin_lock(&dq_state_lock);
2368                 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2369                                                         DQUOT_LIMITS_ENABLED,
2370                                                         cnt);
2371                 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2372                 spin_unlock(&dq_state_lock);
2373
2374                 flags = dquot_generic_flag(flags, cnt);
2375                 ret = vfs_load_quota_inode(inode, cnt,
2376                                 dqopt->info[cnt].dqi_fmt_id, flags);
2377                 iput(inode);
2378         }
2379
2380         return ret;
2381 }
2382 EXPORT_SYMBOL(dquot_resume);
2383
2384 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2385                    const struct path *path)
2386 {
2387         int error = security_quota_on(path->dentry);
2388         if (error)
2389                 return error;
2390         /* Quota file not on the same filesystem? */
2391         if (path->dentry->d_sb != sb)
2392                 error = -EXDEV;
2393         else
2394                 error = vfs_load_quota_inode(d_inode(path->dentry), type,
2395                                              format_id, DQUOT_USAGE_ENABLED |
2396                                              DQUOT_LIMITS_ENABLED);
2397         return error;
2398 }
2399 EXPORT_SYMBOL(dquot_quota_on);
2400
2401 /*
2402  * More powerful function for turning on quotas allowing setting
2403  * of individual quota flags
2404  */
2405 int dquot_enable(struct inode *inode, int type, int format_id,
2406                  unsigned int flags)
2407 {
2408         struct super_block *sb = inode->i_sb;
2409
2410         /* Just unsuspend quotas? */
2411         BUG_ON(flags & DQUOT_SUSPENDED);
2412         /* s_umount should be held in exclusive mode */
2413         if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2414                 up_read(&sb->s_umount);
2415
2416         if (!flags)
2417                 return 0;
2418         /* Just updating flags needed? */
2419         if (sb_has_quota_loaded(sb, type)) {
2420                 if (flags & DQUOT_USAGE_ENABLED &&
2421                     sb_has_quota_usage_enabled(sb, type))
2422                         return -EBUSY;
2423                 if (flags & DQUOT_LIMITS_ENABLED &&
2424                     sb_has_quota_limits_enabled(sb, type))
2425                         return -EBUSY;
2426                 spin_lock(&dq_state_lock);
2427                 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2428                 spin_unlock(&dq_state_lock);
2429                 return 0;
2430         }
2431
2432         return vfs_load_quota_inode(inode, type, format_id, flags);
2433 }
2434 EXPORT_SYMBOL(dquot_enable);
2435
2436 /*
2437  * This function is used when filesystem needs to initialize quotas
2438  * during mount time.
2439  */
2440 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2441                 int format_id, int type)
2442 {
2443         struct dentry *dentry;
2444         int error;
2445
2446         dentry = lookup_one_len_unlocked(qf_name, sb->s_root, strlen(qf_name));
2447         if (IS_ERR(dentry))
2448                 return PTR_ERR(dentry);
2449
2450         if (d_really_is_negative(dentry)) {
2451                 error = -ENOENT;
2452                 goto out;
2453         }
2454
2455         error = security_quota_on(dentry);
2456         if (!error)
2457                 error = vfs_load_quota_inode(d_inode(dentry), type, format_id,
2458                                 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2459
2460 out:
2461         dput(dentry);
2462         return error;
2463 }
2464 EXPORT_SYMBOL(dquot_quota_on_mount);
2465
2466 static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2467 {
2468         int ret;
2469         int type;
2470         struct quota_info *dqopt = sb_dqopt(sb);
2471
2472         if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2473                 return -ENOSYS;
2474         /* Accounting cannot be turned on while fs is mounted */
2475         flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2476         if (!flags)
2477                 return -EINVAL;
2478         for (type = 0; type < MAXQUOTAS; type++) {
2479                 if (!(flags & qtype_enforce_flag(type)))
2480                         continue;
2481                 /* Can't enforce without accounting */
2482                 if (!sb_has_quota_usage_enabled(sb, type))
2483                         return -EINVAL;
2484                 ret = dquot_enable(dqopt->files[type], type,
2485                                    dqopt->info[type].dqi_fmt_id,
2486                                    DQUOT_LIMITS_ENABLED);
2487                 if (ret < 0)
2488                         goto out_err;
2489         }
2490         return 0;
2491 out_err:
2492         /* Backout enforcement enablement we already did */
2493         for (type--; type >= 0; type--)  {
2494                 if (flags & qtype_enforce_flag(type))
2495                         dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2496         }
2497         /* Error code translation for better compatibility with XFS */
2498         if (ret == -EBUSY)
2499                 ret = -EEXIST;
2500         return ret;
2501 }
2502
2503 static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2504 {
2505         int ret;
2506         int type;
2507         struct quota_info *dqopt = sb_dqopt(sb);
2508
2509         if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2510                 return -ENOSYS;
2511         /*
2512          * We don't support turning off accounting via quotactl. In principle
2513          * quota infrastructure can do this but filesystems don't expect
2514          * userspace to be able to do it.
2515          */
2516         if (flags &
2517                   (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2518                 return -EOPNOTSUPP;
2519
2520         /* Filter out limits not enabled */
2521         for (type = 0; type < MAXQUOTAS; type++)
2522                 if (!sb_has_quota_limits_enabled(sb, type))
2523                         flags &= ~qtype_enforce_flag(type);
2524         /* Nothing left? */
2525         if (!flags)
2526                 return -EEXIST;
2527         for (type = 0; type < MAXQUOTAS; type++) {
2528                 if (flags & qtype_enforce_flag(type)) {
2529                         ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2530                         if (ret < 0)
2531                                 goto out_err;
2532                 }
2533         }
2534         return 0;
2535 out_err:
2536         /* Backout enforcement disabling we already did */
2537         for (type--; type >= 0; type--)  {
2538                 if (flags & qtype_enforce_flag(type))
2539                         dquot_enable(dqopt->files[type], type,
2540                                      dqopt->info[type].dqi_fmt_id,
2541                                      DQUOT_LIMITS_ENABLED);
2542         }
2543         return ret;
2544 }
2545
2546 /* Generic routine for getting common part of quota structure */
2547 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2548 {
2549         struct mem_dqblk *dm = &dquot->dq_dqb;
2550
2551         memset(di, 0, sizeof(*di));
2552         spin_lock(&dq_data_lock);
2553         di->d_spc_hardlimit = dm->dqb_bhardlimit;
2554         di->d_spc_softlimit = dm->dqb_bsoftlimit;
2555         di->d_ino_hardlimit = dm->dqb_ihardlimit;
2556         di->d_ino_softlimit = dm->dqb_isoftlimit;
2557         di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2558         di->d_ino_count = dm->dqb_curinodes;
2559         di->d_spc_timer = dm->dqb_btime;
2560         di->d_ino_timer = dm->dqb_itime;
2561         spin_unlock(&dq_data_lock);
2562 }
2563
2564 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2565                     struct qc_dqblk *di)
2566 {
2567         struct dquot *dquot;
2568
2569         dquot = dqget(sb, qid);
2570         if (IS_ERR(dquot))
2571                 return PTR_ERR(dquot);
2572         do_get_dqblk(dquot, di);
2573         dqput(dquot);
2574
2575         return 0;
2576 }
2577 EXPORT_SYMBOL(dquot_get_dqblk);
2578
2579 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2580                          struct qc_dqblk *di)
2581 {
2582         struct dquot *dquot;
2583         int err;
2584
2585         if (!sb->dq_op->get_next_id)
2586                 return -ENOSYS;
2587         err = sb->dq_op->get_next_id(sb, qid);
2588         if (err < 0)
2589                 return err;
2590         dquot = dqget(sb, *qid);
2591         if (IS_ERR(dquot))
2592                 return PTR_ERR(dquot);
2593         do_get_dqblk(dquot, di);
2594         dqput(dquot);
2595
2596         return 0;
2597 }
2598 EXPORT_SYMBOL(dquot_get_next_dqblk);
2599
2600 #define VFS_QC_MASK \
2601         (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2602          QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2603          QC_SPC_TIMER | QC_INO_TIMER)
2604
2605 /* Generic routine for setting common part of quota structure */
2606 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2607 {
2608         struct mem_dqblk *dm = &dquot->dq_dqb;
2609         int check_blim = 0, check_ilim = 0;
2610         struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2611
2612         if (di->d_fieldmask & ~VFS_QC_MASK)
2613                 return -EINVAL;
2614
2615         if (((di->d_fieldmask & QC_SPC_SOFT) &&
2616              di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2617             ((di->d_fieldmask & QC_SPC_HARD) &&
2618              di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2619             ((di->d_fieldmask & QC_INO_SOFT) &&
2620              (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2621             ((di->d_fieldmask & QC_INO_HARD) &&
2622              (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2623                 return -ERANGE;
2624
2625         spin_lock(&dq_data_lock);
2626         if (di->d_fieldmask & QC_SPACE) {
2627                 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2628                 check_blim = 1;
2629                 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2630         }
2631
2632         if (di->d_fieldmask & QC_SPC_SOFT)
2633                 dm->dqb_bsoftlimit = di->d_spc_softlimit;
2634         if (di->d_fieldmask & QC_SPC_HARD)
2635                 dm->dqb_bhardlimit = di->d_spc_hardlimit;
2636         if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2637                 check_blim = 1;
2638                 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2639         }
2640
2641         if (di->d_fieldmask & QC_INO_COUNT) {
2642                 dm->dqb_curinodes = di->d_ino_count;
2643                 check_ilim = 1;
2644                 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2645         }
2646
2647         if (di->d_fieldmask & QC_INO_SOFT)
2648                 dm->dqb_isoftlimit = di->d_ino_softlimit;
2649         if (di->d_fieldmask & QC_INO_HARD)
2650                 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2651         if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2652                 check_ilim = 1;
2653                 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2654         }
2655
2656         if (di->d_fieldmask & QC_SPC_TIMER) {
2657                 dm->dqb_btime = di->d_spc_timer;
2658                 check_blim = 1;
2659                 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2660         }
2661
2662         if (di->d_fieldmask & QC_INO_TIMER) {
2663                 dm->dqb_itime = di->d_ino_timer;
2664                 check_ilim = 1;
2665                 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2666         }
2667
2668         if (check_blim) {
2669                 if (!dm->dqb_bsoftlimit ||
2670                     dm->dqb_curspace < dm->dqb_bsoftlimit) {
2671                         dm->dqb_btime = 0;
2672                         clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2673                 } else if (!(di->d_fieldmask & QC_SPC_TIMER))
2674                         /* Set grace only if user hasn't provided his own... */
2675                         dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2676         }
2677         if (check_ilim) {
2678                 if (!dm->dqb_isoftlimit ||
2679                     dm->dqb_curinodes < dm->dqb_isoftlimit) {
2680                         dm->dqb_itime = 0;
2681                         clear_bit(DQ_INODES_B, &dquot->dq_flags);
2682                 } else if (!(di->d_fieldmask & QC_INO_TIMER))
2683                         /* Set grace only if user hasn't provided his own... */
2684                         dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2685         }
2686         if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2687             dm->dqb_isoftlimit)
2688                 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2689         else
2690                 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2691         spin_unlock(&dq_data_lock);
2692         mark_dquot_dirty(dquot);
2693
2694         return 0;
2695 }
2696
2697 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2698                   struct qc_dqblk *di)
2699 {
2700         struct dquot *dquot;
2701         int rc;
2702
2703         dquot = dqget(sb, qid);
2704         if (IS_ERR(dquot)) {
2705                 rc = PTR_ERR(dquot);
2706                 goto out;
2707         }
2708         rc = do_set_dqblk(dquot, di);
2709         dqput(dquot);
2710 out:
2711         return rc;
2712 }
2713 EXPORT_SYMBOL(dquot_set_dqblk);
2714
2715 /* Generic routine for getting common part of quota file information */
2716 int dquot_get_state(struct super_block *sb, struct qc_state *state)
2717 {
2718         struct mem_dqinfo *mi;
2719         struct qc_type_state *tstate;
2720         struct quota_info *dqopt = sb_dqopt(sb);
2721         int type;
2722   
2723         memset(state, 0, sizeof(*state));
2724         for (type = 0; type < MAXQUOTAS; type++) {
2725                 if (!sb_has_quota_active(sb, type))
2726                         continue;
2727                 tstate = state->s_state + type;
2728                 mi = sb_dqopt(sb)->info + type;
2729                 tstate->flags = QCI_ACCT_ENABLED;
2730                 spin_lock(&dq_data_lock);
2731                 if (mi->dqi_flags & DQF_SYS_FILE)
2732                         tstate->flags |= QCI_SYSFILE;
2733                 if (mi->dqi_flags & DQF_ROOT_SQUASH)
2734                         tstate->flags |= QCI_ROOT_SQUASH;
2735                 if (sb_has_quota_limits_enabled(sb, type))
2736                         tstate->flags |= QCI_LIMITS_ENFORCED;
2737                 tstate->spc_timelimit = mi->dqi_bgrace;
2738                 tstate->ino_timelimit = mi->dqi_igrace;
2739                 tstate->ino = dqopt->files[type]->i_ino;
2740                 tstate->blocks = dqopt->files[type]->i_blocks;
2741                 tstate->nextents = 1;   /* We don't know... */
2742                 spin_unlock(&dq_data_lock);
2743         }
2744         return 0;
2745 }
2746 EXPORT_SYMBOL(dquot_get_state);
2747
2748 /* Generic routine for setting common part of quota file information */
2749 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2750 {
2751         struct mem_dqinfo *mi;
2752         int err = 0;
2753
2754         if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2755             (ii->i_fieldmask & QC_RT_SPC_TIMER))
2756                 return -EINVAL;
2757         if (!sb_has_quota_active(sb, type))
2758                 return -ESRCH;
2759         mi = sb_dqopt(sb)->info + type;
2760         if (ii->i_fieldmask & QC_FLAGS) {
2761                 if ((ii->i_flags & QCI_ROOT_SQUASH &&
2762                      mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2763                         return -EINVAL;
2764         }
2765         spin_lock(&dq_data_lock);
2766         if (ii->i_fieldmask & QC_SPC_TIMER)
2767                 mi->dqi_bgrace = ii->i_spc_timelimit;
2768         if (ii->i_fieldmask & QC_INO_TIMER)
2769                 mi->dqi_igrace = ii->i_ino_timelimit;
2770         if (ii->i_fieldmask & QC_FLAGS) {
2771                 if (ii->i_flags & QCI_ROOT_SQUASH)
2772                         mi->dqi_flags |= DQF_ROOT_SQUASH;
2773                 else
2774                         mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2775         }
2776         spin_unlock(&dq_data_lock);
2777         mark_info_dirty(sb, type);
2778         /* Force write to disk */
2779         sb->dq_op->write_info(sb, type);
2780         return err;
2781 }
2782 EXPORT_SYMBOL(dquot_set_dqinfo);
2783
2784 const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2785         .quota_enable   = dquot_quota_enable,
2786         .quota_disable  = dquot_quota_disable,
2787         .quota_sync     = dquot_quota_sync,
2788         .get_state      = dquot_get_state,
2789         .set_info       = dquot_set_dqinfo,
2790         .get_dqblk      = dquot_get_dqblk,
2791         .get_nextdqblk  = dquot_get_next_dqblk,
2792         .set_dqblk      = dquot_set_dqblk
2793 };
2794 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2795
2796 static int do_proc_dqstats(struct ctl_table *table, int write,
2797                      void __user *buffer, size_t *lenp, loff_t *ppos)
2798 {
2799         unsigned int type = (int *)table->data - dqstats.stat;
2800
2801         /* Update global table */
2802         dqstats.stat[type] =
2803                         percpu_counter_sum_positive(&dqstats.counter[type]);
2804         return proc_dointvec(table, write, buffer, lenp, ppos);
2805 }
2806
2807 static struct ctl_table fs_dqstats_table[] = {
2808         {
2809                 .procname       = "lookups",
2810                 .data           = &dqstats.stat[DQST_LOOKUPS],
2811                 .maxlen         = sizeof(int),
2812                 .mode           = 0444,
2813                 .proc_handler   = do_proc_dqstats,
2814         },
2815         {
2816                 .procname       = "drops",
2817                 .data           = &dqstats.stat[DQST_DROPS],
2818                 .maxlen         = sizeof(int),
2819                 .mode           = 0444,
2820                 .proc_handler   = do_proc_dqstats,
2821         },
2822         {
2823                 .procname       = "reads",
2824                 .data           = &dqstats.stat[DQST_READS],
2825                 .maxlen         = sizeof(int),
2826                 .mode           = 0444,
2827                 .proc_handler   = do_proc_dqstats,
2828         },
2829         {
2830                 .procname       = "writes",
2831                 .data           = &dqstats.stat[DQST_WRITES],
2832                 .maxlen         = sizeof(int),
2833                 .mode           = 0444,
2834                 .proc_handler   = do_proc_dqstats,
2835         },
2836         {
2837                 .procname       = "cache_hits",
2838                 .data           = &dqstats.stat[DQST_CACHE_HITS],
2839                 .maxlen         = sizeof(int),
2840                 .mode           = 0444,
2841                 .proc_handler   = do_proc_dqstats,
2842         },
2843         {
2844                 .procname       = "allocated_dquots",
2845                 .data           = &dqstats.stat[DQST_ALLOC_DQUOTS],
2846                 .maxlen         = sizeof(int),
2847                 .mode           = 0444,
2848                 .proc_handler   = do_proc_dqstats,
2849         },
2850         {
2851                 .procname       = "free_dquots",
2852                 .data           = &dqstats.stat[DQST_FREE_DQUOTS],
2853                 .maxlen         = sizeof(int),
2854                 .mode           = 0444,
2855                 .proc_handler   = do_proc_dqstats,
2856         },
2857         {
2858                 .procname       = "syncs",
2859                 .data           = &dqstats.stat[DQST_SYNCS],
2860                 .maxlen         = sizeof(int),
2861                 .mode           = 0444,
2862                 .proc_handler   = do_proc_dqstats,
2863         },
2864 #ifdef CONFIG_PRINT_QUOTA_WARNING
2865         {
2866                 .procname       = "warnings",
2867                 .data           = &flag_print_warnings,
2868                 .maxlen         = sizeof(int),
2869                 .mode           = 0644,
2870                 .proc_handler   = proc_dointvec,
2871         },
2872 #endif
2873         { },
2874 };
2875
2876 static struct ctl_table fs_table[] = {
2877         {
2878                 .procname       = "quota",
2879                 .mode           = 0555,
2880                 .child          = fs_dqstats_table,
2881         },
2882         { },
2883 };
2884
2885 static struct ctl_table sys_table[] = {
2886         {
2887                 .procname       = "fs",
2888                 .mode           = 0555,
2889                 .child          = fs_table,
2890         },
2891         { },
2892 };
2893
2894 static int __init dquot_init(void)
2895 {
2896         int i, ret;
2897         unsigned long nr_hash, order;
2898
2899         printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2900
2901         register_sysctl_table(sys_table);
2902
2903         dquot_cachep = kmem_cache_create("dquot",
2904                         sizeof(struct dquot), sizeof(unsigned long) * 4,
2905                         (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2906                                 SLAB_MEM_SPREAD|SLAB_PANIC),
2907                         NULL);
2908
2909         order = 0;
2910         dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2911         if (!dquot_hash)
2912                 panic("Cannot create dquot hash table");
2913
2914         for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2915                 ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
2916                 if (ret)
2917                         panic("Cannot create dquot stat counters");
2918         }
2919
2920         /* Find power-of-two hlist_heads which can fit into allocation */
2921         nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2922         dq_hash_bits = 0;
2923         do {
2924                 dq_hash_bits++;
2925         } while (nr_hash >> dq_hash_bits);
2926         dq_hash_bits--;
2927
2928         nr_hash = 1UL << dq_hash_bits;
2929         dq_hash_mask = nr_hash - 1;
2930         for (i = 0; i < nr_hash; i++)
2931                 INIT_HLIST_HEAD(dquot_hash + i);
2932
2933         pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
2934                 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
2935
2936         register_shrinker(&dqcache_shrinker);
2937
2938         return 0;
2939 }
2940 fs_initcall(dquot_init);