]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/nfs/pnfs.c
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / fs / nfs / pnfs.c
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include <linux/sort.h>
34 #include "internal.h"
35 #include "pnfs.h"
36 #include "iostat.h"
37 #include "nfs4trace.h"
38 #include "delegation.h"
39 #include "nfs42.h"
40 #include "nfs4_fs.h"
41
42 #define NFSDBG_FACILITY         NFSDBG_PNFS
43 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
44
45 /* Locking:
46  *
47  * pnfs_spinlock:
48  *      protects pnfs_modules_tbl.
49  */
50 static DEFINE_SPINLOCK(pnfs_spinlock);
51
52 /*
53  * pnfs_modules_tbl holds all pnfs modules
54  */
55 static LIST_HEAD(pnfs_modules_tbl);
56
57 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
58 static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
59                 struct list_head *free_me,
60                 const struct pnfs_layout_range *range,
61                 u32 seq);
62 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
63                                 struct list_head *tmp_list);
64
65 /* Return the registered pnfs layout driver module matching given id */
66 static struct pnfs_layoutdriver_type *
67 find_pnfs_driver_locked(u32 id)
68 {
69         struct pnfs_layoutdriver_type *local;
70
71         list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
72                 if (local->id == id)
73                         goto out;
74         local = NULL;
75 out:
76         dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
77         return local;
78 }
79
80 static struct pnfs_layoutdriver_type *
81 find_pnfs_driver(u32 id)
82 {
83         struct pnfs_layoutdriver_type *local;
84
85         spin_lock(&pnfs_spinlock);
86         local = find_pnfs_driver_locked(id);
87         if (local != NULL && !try_module_get(local->owner)) {
88                 dprintk("%s: Could not grab reference on module\n", __func__);
89                 local = NULL;
90         }
91         spin_unlock(&pnfs_spinlock);
92         return local;
93 }
94
95 void
96 unset_pnfs_layoutdriver(struct nfs_server *nfss)
97 {
98         if (nfss->pnfs_curr_ld) {
99                 if (nfss->pnfs_curr_ld->clear_layoutdriver)
100                         nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
101                 /* Decrement the MDS count. Purge the deviceid cache if zero */
102                 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
103                         nfs4_deviceid_purge_client(nfss->nfs_client);
104                 module_put(nfss->pnfs_curr_ld->owner);
105         }
106         nfss->pnfs_curr_ld = NULL;
107 }
108
109 /*
110  * When the server sends a list of layout types, we choose one in the order
111  * given in the list below.
112  *
113  * FIXME: should this list be configurable in some fashion? module param?
114  *        mount option? something else?
115  */
116 static const u32 ld_prefs[] = {
117         LAYOUT_SCSI,
118         LAYOUT_BLOCK_VOLUME,
119         LAYOUT_OSD2_OBJECTS,
120         LAYOUT_FLEX_FILES,
121         LAYOUT_NFSV4_1_FILES,
122         0
123 };
124
125 static int
126 ld_cmp(const void *e1, const void *e2)
127 {
128         u32 ld1 = *((u32 *)e1);
129         u32 ld2 = *((u32 *)e2);
130         int i;
131
132         for (i = 0; ld_prefs[i] != 0; i++) {
133                 if (ld1 == ld_prefs[i])
134                         return -1;
135
136                 if (ld2 == ld_prefs[i])
137                         return 1;
138         }
139         return 0;
140 }
141
142 /*
143  * Try to set the server's pnfs module to the pnfs layout type specified by id.
144  * Currently only one pNFS layout driver per filesystem is supported.
145  *
146  * @ids array of layout types supported by MDS.
147  */
148 void
149 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
150                       struct nfs_fsinfo *fsinfo)
151 {
152         struct pnfs_layoutdriver_type *ld_type = NULL;
153         u32 id;
154         int i;
155
156         if (fsinfo->nlayouttypes == 0)
157                 goto out_no_driver;
158         if (!(server->nfs_client->cl_exchange_flags &
159                  (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
160                 printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
161                         __func__, server->nfs_client->cl_exchange_flags);
162                 goto out_no_driver;
163         }
164
165         sort(fsinfo->layouttype, fsinfo->nlayouttypes,
166                 sizeof(*fsinfo->layouttype), ld_cmp, NULL);
167
168         for (i = 0; i < fsinfo->nlayouttypes; i++) {
169                 id = fsinfo->layouttype[i];
170                 ld_type = find_pnfs_driver(id);
171                 if (!ld_type) {
172                         request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
173                                         id);
174                         ld_type = find_pnfs_driver(id);
175                 }
176                 if (ld_type)
177                         break;
178         }
179
180         if (!ld_type) {
181                 dprintk("%s: No pNFS module found!\n", __func__);
182                 goto out_no_driver;
183         }
184
185         server->pnfs_curr_ld = ld_type;
186         if (ld_type->set_layoutdriver
187             && ld_type->set_layoutdriver(server, mntfh)) {
188                 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
189                         "driver %u.\n", __func__, id);
190                 module_put(ld_type->owner);
191                 goto out_no_driver;
192         }
193         /* Bump the MDS count */
194         atomic_inc(&server->nfs_client->cl_mds_count);
195
196         dprintk("%s: pNFS module for %u set\n", __func__, id);
197         return;
198
199 out_no_driver:
200         dprintk("%s: Using NFSv4 I/O\n", __func__);
201         server->pnfs_curr_ld = NULL;
202 }
203
204 int
205 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
206 {
207         int status = -EINVAL;
208         struct pnfs_layoutdriver_type *tmp;
209
210         if (ld_type->id == 0) {
211                 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
212                 return status;
213         }
214         if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
215                 printk(KERN_ERR "NFS: %s Layout driver must provide "
216                        "alloc_lseg and free_lseg.\n", __func__);
217                 return status;
218         }
219
220         spin_lock(&pnfs_spinlock);
221         tmp = find_pnfs_driver_locked(ld_type->id);
222         if (!tmp) {
223                 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
224                 status = 0;
225                 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
226                         ld_type->name);
227         } else {
228                 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
229                         __func__, ld_type->id);
230         }
231         spin_unlock(&pnfs_spinlock);
232
233         return status;
234 }
235 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
236
237 void
238 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
239 {
240         dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
241         spin_lock(&pnfs_spinlock);
242         list_del(&ld_type->pnfs_tblid);
243         spin_unlock(&pnfs_spinlock);
244 }
245 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
246
247 /*
248  * pNFS client layout cache
249  */
250
251 /* Need to hold i_lock if caller does not already hold reference */
252 void
253 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
254 {
255         refcount_inc(&lo->plh_refcount);
256 }
257
258 static struct pnfs_layout_hdr *
259 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
260 {
261         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
262         return ld->alloc_layout_hdr(ino, gfp_flags);
263 }
264
265 static void
266 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
267 {
268         struct nfs_server *server = NFS_SERVER(lo->plh_inode);
269         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
270
271         if (!list_empty(&lo->plh_layouts)) {
272                 struct nfs_client *clp = server->nfs_client;
273
274                 spin_lock(&clp->cl_lock);
275                 list_del_init(&lo->plh_layouts);
276                 spin_unlock(&clp->cl_lock);
277         }
278         put_cred(lo->plh_lc_cred);
279         return ld->free_layout_hdr(lo);
280 }
281
282 static void
283 pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
284 {
285         struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
286         dprintk("%s: freeing layout cache %p\n", __func__, lo);
287         nfsi->layout = NULL;
288         /* Reset MDS Threshold I/O counters */
289         nfsi->write_io = 0;
290         nfsi->read_io = 0;
291 }
292
293 void
294 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
295 {
296         struct inode *inode;
297
298         if (!lo)
299                 return;
300         inode = lo->plh_inode;
301         pnfs_layoutreturn_before_put_layout_hdr(lo);
302
303         if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
304                 if (!list_empty(&lo->plh_segs))
305                         WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
306                 pnfs_detach_layout_hdr(lo);
307                 spin_unlock(&inode->i_lock);
308                 pnfs_free_layout_hdr(lo);
309         }
310 }
311
312 static void
313 pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
314                          u32 seq)
315 {
316         if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
317                 iomode = IOMODE_ANY;
318         lo->plh_return_iomode = iomode;
319         set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
320         if (seq != 0) {
321                 WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
322                 lo->plh_return_seq = seq;
323         }
324 }
325
326 static void
327 pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
328 {
329         struct pnfs_layout_segment *lseg;
330         lo->plh_return_iomode = 0;
331         lo->plh_return_seq = 0;
332         clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
333         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
334                 if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
335                         continue;
336                 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
337         }
338 }
339
340 static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
341 {
342         clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
343         clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
344         smp_mb__after_atomic();
345         wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
346         rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
347 }
348
349 static void
350 pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
351                 struct list_head *free_me)
352 {
353         clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
354         clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
355         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
356                 pnfs_lseg_dec_and_remove_zero(lseg, free_me);
357         if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
358                 pnfs_lseg_dec_and_remove_zero(lseg, free_me);
359 }
360
361 /*
362  * Update the seqid of a layout stateid
363  */
364 bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
365                 struct pnfs_layout_range *dst_range,
366                 struct inode *inode)
367 {
368         struct pnfs_layout_hdr *lo;
369         struct pnfs_layout_range range = {
370                 .iomode = IOMODE_ANY,
371                 .offset = 0,
372                 .length = NFS4_MAX_UINT64,
373         };
374         bool ret = false;
375         LIST_HEAD(head);
376         int err;
377
378         spin_lock(&inode->i_lock);
379         lo = NFS_I(inode)->layout;
380         if (lo && nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
381                 err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
382                 if (err != -EBUSY) {
383                         dst->seqid = lo->plh_stateid.seqid;
384                         *dst_range = range;
385                         ret = true;
386                 }
387         }
388         spin_unlock(&inode->i_lock);
389         pnfs_free_lseg_list(&head);
390         return ret;
391 }
392
393 /*
394  * Mark a pnfs_layout_hdr and all associated layout segments as invalid
395  *
396  * In order to continue using the pnfs_layout_hdr, a full recovery
397  * is required.
398  * Note that caller must hold inode->i_lock.
399  */
400 int
401 pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
402                 struct list_head *lseg_list)
403 {
404         struct pnfs_layout_range range = {
405                 .iomode = IOMODE_ANY,
406                 .offset = 0,
407                 .length = NFS4_MAX_UINT64,
408         };
409         struct pnfs_layout_segment *lseg, *next;
410
411         set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
412         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
413                 pnfs_clear_lseg_state(lseg, lseg_list);
414         pnfs_clear_layoutreturn_info(lo);
415         pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
416         if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
417             !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
418                 pnfs_clear_layoutreturn_waitbit(lo);
419         return !list_empty(&lo->plh_segs);
420 }
421
422 static int
423 pnfs_iomode_to_fail_bit(u32 iomode)
424 {
425         return iomode == IOMODE_RW ?
426                 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
427 }
428
429 static void
430 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
431 {
432         lo->plh_retry_timestamp = jiffies;
433         if (!test_and_set_bit(fail_bit, &lo->plh_flags))
434                 refcount_inc(&lo->plh_refcount);
435 }
436
437 static void
438 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
439 {
440         if (test_and_clear_bit(fail_bit, &lo->plh_flags))
441                 refcount_dec(&lo->plh_refcount);
442 }
443
444 static void
445 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
446 {
447         struct inode *inode = lo->plh_inode;
448         struct pnfs_layout_range range = {
449                 .iomode = iomode,
450                 .offset = 0,
451                 .length = NFS4_MAX_UINT64,
452         };
453         LIST_HEAD(head);
454
455         spin_lock(&inode->i_lock);
456         pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
457         pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
458         spin_unlock(&inode->i_lock);
459         pnfs_free_lseg_list(&head);
460         dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
461                         iomode == IOMODE_RW ?  "RW" : "READ");
462 }
463
464 static bool
465 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
466 {
467         unsigned long start, end;
468         int fail_bit = pnfs_iomode_to_fail_bit(iomode);
469
470         if (test_bit(fail_bit, &lo->plh_flags) == 0)
471                 return false;
472         end = jiffies;
473         start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
474         if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
475                 /* It is time to retry the failed layoutgets */
476                 pnfs_layout_clear_fail_bit(lo, fail_bit);
477                 return false;
478         }
479         return true;
480 }
481
482 static void
483 pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
484                 const struct pnfs_layout_range *range,
485                 const nfs4_stateid *stateid)
486 {
487         INIT_LIST_HEAD(&lseg->pls_list);
488         INIT_LIST_HEAD(&lseg->pls_lc_list);
489         refcount_set(&lseg->pls_refcount, 1);
490         set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
491         lseg->pls_layout = lo;
492         lseg->pls_range = *range;
493         lseg->pls_seq = be32_to_cpu(stateid->seqid);
494 }
495
496 static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
497 {
498         if (lseg != NULL) {
499                 struct inode *inode = lseg->pls_layout->plh_inode;
500                 NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
501         }
502 }
503
504 static void
505 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
506                 struct pnfs_layout_segment *lseg)
507 {
508         WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
509         list_del_init(&lseg->pls_list);
510         /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
511         refcount_dec(&lo->plh_refcount);
512         if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
513                 return;
514         if (list_empty(&lo->plh_segs) &&
515             !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
516             !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
517                 if (atomic_read(&lo->plh_outstanding) == 0)
518                         set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
519                 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
520         }
521 }
522
523 static bool
524 pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
525                 struct pnfs_layout_segment *lseg)
526 {
527         if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
528             pnfs_layout_is_valid(lo)) {
529                 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
530                 list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
531                 return true;
532         }
533         return false;
534 }
535
536 void
537 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
538 {
539         struct pnfs_layout_hdr *lo;
540         struct inode *inode;
541
542         if (!lseg)
543                 return;
544
545         dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
546                 refcount_read(&lseg->pls_refcount),
547                 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
548
549         lo = lseg->pls_layout;
550         inode = lo->plh_inode;
551
552         if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
553                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
554                         spin_unlock(&inode->i_lock);
555                         return;
556                 }
557                 pnfs_get_layout_hdr(lo);
558                 pnfs_layout_remove_lseg(lo, lseg);
559                 if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
560                         lseg = NULL;
561                 spin_unlock(&inode->i_lock);
562                 pnfs_free_lseg(lseg);
563                 pnfs_put_layout_hdr(lo);
564         }
565 }
566 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
567
568 /*
569  * is l2 fully contained in l1?
570  *   start1                             end1
571  *   [----------------------------------)
572  *           start2           end2
573  *           [----------------)
574  */
575 static bool
576 pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
577                  const struct pnfs_layout_range *l2)
578 {
579         u64 start1 = l1->offset;
580         u64 end1 = pnfs_end_offset(start1, l1->length);
581         u64 start2 = l2->offset;
582         u64 end2 = pnfs_end_offset(start2, l2->length);
583
584         return (start1 <= start2) && (end1 >= end2);
585 }
586
587 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
588                 struct list_head *tmp_list)
589 {
590         if (!refcount_dec_and_test(&lseg->pls_refcount))
591                 return false;
592         pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
593         list_add(&lseg->pls_list, tmp_list);
594         return true;
595 }
596
597 /* Returns 1 if lseg is removed from list, 0 otherwise */
598 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
599                              struct list_head *tmp_list)
600 {
601         int rv = 0;
602
603         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
604                 /* Remove the reference keeping the lseg in the
605                  * list.  It will now be removed when all
606                  * outstanding io is finished.
607                  */
608                 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
609                         refcount_read(&lseg->pls_refcount));
610                 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
611                         rv = 1;
612         }
613         return rv;
614 }
615
616 /*
617  * Compare 2 layout stateid sequence ids, to see which is newer,
618  * taking into account wraparound issues.
619  */
620 static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
621 {
622         return (s32)(s1 - s2) > 0;
623 }
624
625 static bool
626 pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
627                  const struct pnfs_layout_range *recall_range)
628 {
629         return (recall_range->iomode == IOMODE_ANY ||
630                 lseg_range->iomode == recall_range->iomode) &&
631                pnfs_lseg_range_intersecting(lseg_range, recall_range);
632 }
633
634 static bool
635 pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
636                 const struct pnfs_layout_range *recall_range,
637                 u32 seq)
638 {
639         if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
640                 return false;
641         if (recall_range == NULL)
642                 return true;
643         return pnfs_should_free_range(&lseg->pls_range, recall_range);
644 }
645
646 /**
647  * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
648  * @lo: layout header containing the lsegs
649  * @tmp_list: list head where doomed lsegs should go
650  * @recall_range: optional recall range argument to match (may be NULL)
651  * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
652  *
653  * Walk the list of lsegs in the layout header, and tear down any that should
654  * be destroyed. If "recall_range" is specified then the segment must match
655  * that range. If "seq" is non-zero, then only match segments that were handed
656  * out at or before that sequence.
657  *
658  * Returns number of matching invalid lsegs remaining in list after scanning
659  * it and purging them.
660  */
661 int
662 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
663                             struct list_head *tmp_list,
664                             const struct pnfs_layout_range *recall_range,
665                             u32 seq)
666 {
667         struct pnfs_layout_segment *lseg, *next;
668         int remaining = 0;
669
670         dprintk("%s:Begin lo %p\n", __func__, lo);
671
672         if (list_empty(&lo->plh_segs))
673                 return 0;
674         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
675                 if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
676                         dprintk("%s: freeing lseg %p iomode %d seq %u "
677                                 "offset %llu length %llu\n", __func__,
678                                 lseg, lseg->pls_range.iomode, lseg->pls_seq,
679                                 lseg->pls_range.offset, lseg->pls_range.length);
680                         if (!mark_lseg_invalid(lseg, tmp_list))
681                                 remaining++;
682                 }
683         dprintk("%s:Return %i\n", __func__, remaining);
684         return remaining;
685 }
686
687 static void
688 pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
689                 struct list_head *free_me,
690                 const struct pnfs_layout_range *range,
691                 u32 seq)
692 {
693         struct pnfs_layout_segment *lseg, *next;
694
695         list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
696                 if (pnfs_match_lseg_recall(lseg, range, seq))
697                         list_move_tail(&lseg->pls_list, free_me);
698         }
699 }
700
701 /* note free_me must contain lsegs from a single layout_hdr */
702 void
703 pnfs_free_lseg_list(struct list_head *free_me)
704 {
705         struct pnfs_layout_segment *lseg, *tmp;
706
707         if (list_empty(free_me))
708                 return;
709
710         list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
711                 list_del(&lseg->pls_list);
712                 pnfs_free_lseg(lseg);
713         }
714 }
715
716 void
717 pnfs_destroy_layout(struct nfs_inode *nfsi)
718 {
719         struct pnfs_layout_hdr *lo;
720         LIST_HEAD(tmp_list);
721
722         spin_lock(&nfsi->vfs_inode.i_lock);
723         lo = nfsi->layout;
724         if (lo) {
725                 pnfs_get_layout_hdr(lo);
726                 pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
727                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
728                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
729                 spin_unlock(&nfsi->vfs_inode.i_lock);
730                 pnfs_free_lseg_list(&tmp_list);
731                 nfs_commit_inode(&nfsi->vfs_inode, 0);
732                 pnfs_put_layout_hdr(lo);
733         } else
734                 spin_unlock(&nfsi->vfs_inode.i_lock);
735 }
736 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
737
738 static bool
739 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
740                 struct list_head *layout_list)
741 {
742         struct pnfs_layout_hdr *lo;
743         bool ret = false;
744
745         spin_lock(&inode->i_lock);
746         lo = NFS_I(inode)->layout;
747         if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
748                 pnfs_get_layout_hdr(lo);
749                 list_add(&lo->plh_bulk_destroy, layout_list);
750                 ret = true;
751         }
752         spin_unlock(&inode->i_lock);
753         return ret;
754 }
755
756 /* Caller must hold rcu_read_lock and clp->cl_lock */
757 static int
758 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
759                 struct nfs_server *server,
760                 struct list_head *layout_list)
761         __must_hold(&clp->cl_lock)
762         __must_hold(RCU)
763 {
764         struct pnfs_layout_hdr *lo, *next;
765         struct inode *inode;
766
767         list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
768                 if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
769                     test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) ||
770                     !list_empty(&lo->plh_bulk_destroy))
771                         continue;
772                 /* If the sb is being destroyed, just bail */
773                 if (!nfs_sb_active(server->super))
774                         break;
775                 inode = igrab(lo->plh_inode);
776                 if (inode != NULL) {
777                         list_del_init(&lo->plh_layouts);
778                         if (pnfs_layout_add_bulk_destroy_list(inode,
779                                                 layout_list))
780                                 continue;
781                         rcu_read_unlock();
782                         spin_unlock(&clp->cl_lock);
783                         iput(inode);
784                 } else {
785                         rcu_read_unlock();
786                         spin_unlock(&clp->cl_lock);
787                         set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
788                 }
789                 nfs_sb_deactive(server->super);
790                 spin_lock(&clp->cl_lock);
791                 rcu_read_lock();
792                 return -EAGAIN;
793         }
794         return 0;
795 }
796
797 static int
798 pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
799                 bool is_bulk_recall)
800 {
801         struct pnfs_layout_hdr *lo;
802         struct inode *inode;
803         LIST_HEAD(lseg_list);
804         int ret = 0;
805
806         while (!list_empty(layout_list)) {
807                 lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
808                                 plh_bulk_destroy);
809                 dprintk("%s freeing layout for inode %lu\n", __func__,
810                         lo->plh_inode->i_ino);
811                 inode = lo->plh_inode;
812
813                 pnfs_layoutcommit_inode(inode, false);
814
815                 spin_lock(&inode->i_lock);
816                 list_del_init(&lo->plh_bulk_destroy);
817                 if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
818                         if (is_bulk_recall)
819                                 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
820                         ret = -EAGAIN;
821                 }
822                 spin_unlock(&inode->i_lock);
823                 pnfs_free_lseg_list(&lseg_list);
824                 /* Free all lsegs that are attached to commit buckets */
825                 nfs_commit_inode(inode, 0);
826                 pnfs_put_layout_hdr(lo);
827                 nfs_iput_and_deactive(inode);
828         }
829         return ret;
830 }
831
832 int
833 pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
834                 struct nfs_fsid *fsid,
835                 bool is_recall)
836 {
837         struct nfs_server *server;
838         LIST_HEAD(layout_list);
839
840         spin_lock(&clp->cl_lock);
841         rcu_read_lock();
842 restart:
843         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
844                 if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
845                         continue;
846                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
847                                 server,
848                                 &layout_list) != 0)
849                         goto restart;
850         }
851         rcu_read_unlock();
852         spin_unlock(&clp->cl_lock);
853
854         if (list_empty(&layout_list))
855                 return 0;
856         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
857 }
858
859 int
860 pnfs_destroy_layouts_byclid(struct nfs_client *clp,
861                 bool is_recall)
862 {
863         struct nfs_server *server;
864         LIST_HEAD(layout_list);
865
866         spin_lock(&clp->cl_lock);
867         rcu_read_lock();
868 restart:
869         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
870                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
871                                         server,
872                                         &layout_list) != 0)
873                         goto restart;
874         }
875         rcu_read_unlock();
876         spin_unlock(&clp->cl_lock);
877
878         if (list_empty(&layout_list))
879                 return 0;
880         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
881 }
882
883 /*
884  * Called by the state manger to remove all layouts established under an
885  * expired lease.
886  */
887 void
888 pnfs_destroy_all_layouts(struct nfs_client *clp)
889 {
890         nfs4_deviceid_mark_client_invalid(clp);
891         nfs4_deviceid_purge_client(clp);
892
893         pnfs_destroy_layouts_byclid(clp, false);
894 }
895
896 /* update lo->plh_stateid with new if is more recent */
897 void
898 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
899                         bool update_barrier)
900 {
901         u32 oldseq, newseq, new_barrier = 0;
902
903         oldseq = be32_to_cpu(lo->plh_stateid.seqid);
904         newseq = be32_to_cpu(new->seqid);
905
906         if (!pnfs_layout_is_valid(lo)) {
907                 nfs4_stateid_copy(&lo->plh_stateid, new);
908                 lo->plh_barrier = newseq;
909                 pnfs_clear_layoutreturn_info(lo);
910                 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
911                 return;
912         }
913         if (pnfs_seqid_is_newer(newseq, oldseq)) {
914                 nfs4_stateid_copy(&lo->plh_stateid, new);
915                 /*
916                  * Because of wraparound, we want to keep the barrier
917                  * "close" to the current seqids.
918                  */
919                 new_barrier = newseq - atomic_read(&lo->plh_outstanding);
920         }
921         if (update_barrier)
922                 new_barrier = be32_to_cpu(new->seqid);
923         else if (new_barrier == 0)
924                 return;
925         if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
926                 lo->plh_barrier = new_barrier;
927 }
928
929 static bool
930 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
931                 const nfs4_stateid *stateid)
932 {
933         u32 seqid = be32_to_cpu(stateid->seqid);
934
935         return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
936 }
937
938 /* lget is set to 1 if called from inside send_layoutget call chain */
939 static bool
940 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
941 {
942         return lo->plh_block_lgets ||
943                 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
944 }
945
946 static struct nfs_server *
947 pnfs_find_server(struct inode *inode, struct nfs_open_context *ctx)
948 {
949         struct nfs_server *server;
950
951         if (inode) {
952                 server = NFS_SERVER(inode);
953         } else {
954                 struct dentry *parent_dir = dget_parent(ctx->dentry);
955                 server = NFS_SERVER(parent_dir->d_inode);
956                 dput(parent_dir);
957         }
958         return server;
959 }
960
961 static void nfs4_free_pages(struct page **pages, size_t size)
962 {
963         int i;
964
965         if (!pages)
966                 return;
967
968         for (i = 0; i < size; i++) {
969                 if (!pages[i])
970                         break;
971                 __free_page(pages[i]);
972         }
973         kfree(pages);
974 }
975
976 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
977 {
978         struct page **pages;
979         int i;
980
981         pages = kmalloc_array(size, sizeof(struct page *), gfp_flags);
982         if (!pages) {
983                 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
984                 return NULL;
985         }
986
987         for (i = 0; i < size; i++) {
988                 pages[i] = alloc_page(gfp_flags);
989                 if (!pages[i]) {
990                         dprintk("%s: failed to allocate page\n", __func__);
991                         nfs4_free_pages(pages, i);
992                         return NULL;
993                 }
994         }
995
996         return pages;
997 }
998
999 static struct nfs4_layoutget *
1000 pnfs_alloc_init_layoutget_args(struct inode *ino,
1001            struct nfs_open_context *ctx,
1002            const nfs4_stateid *stateid,
1003            const struct pnfs_layout_range *range,
1004            gfp_t gfp_flags)
1005 {
1006         struct nfs_server *server = pnfs_find_server(ino, ctx);
1007         size_t max_reply_sz = server->pnfs_curr_ld->max_layoutget_response;
1008         size_t max_pages = max_response_pages(server);
1009         struct nfs4_layoutget *lgp;
1010
1011         dprintk("--> %s\n", __func__);
1012
1013         lgp = kzalloc(sizeof(*lgp), gfp_flags);
1014         if (lgp == NULL)
1015                 return NULL;
1016
1017         if (max_reply_sz) {
1018                 size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
1019                 if (npages < max_pages)
1020                         max_pages = npages;
1021         }
1022
1023         lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
1024         if (!lgp->args.layout.pages) {
1025                 kfree(lgp);
1026                 return NULL;
1027         }
1028         lgp->args.layout.pglen = max_pages * PAGE_SIZE;
1029         lgp->res.layoutp = &lgp->args.layout;
1030
1031         /* Don't confuse uninitialised result and success */
1032         lgp->res.status = -NFS4ERR_DELAY;
1033
1034         lgp->args.minlength = PAGE_SIZE;
1035         if (lgp->args.minlength > range->length)
1036                 lgp->args.minlength = range->length;
1037         if (ino) {
1038                 loff_t i_size = i_size_read(ino);
1039
1040                 if (range->iomode == IOMODE_READ) {
1041                         if (range->offset >= i_size)
1042                                 lgp->args.minlength = 0;
1043                         else if (i_size - range->offset < lgp->args.minlength)
1044                                 lgp->args.minlength = i_size - range->offset;
1045                 }
1046         }
1047         lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
1048         pnfs_copy_range(&lgp->args.range, range);
1049         lgp->args.type = server->pnfs_curr_ld->id;
1050         lgp->args.inode = ino;
1051         lgp->args.ctx = get_nfs_open_context(ctx);
1052         nfs4_stateid_copy(&lgp->args.stateid, stateid);
1053         lgp->gfp_flags = gfp_flags;
1054         lgp->cred = get_cred(ctx->cred);
1055         return lgp;
1056 }
1057
1058 void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
1059 {
1060         size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE;
1061
1062         nfs4_free_pages(lgp->args.layout.pages, max_pages);
1063         if (lgp->args.inode)
1064                 pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout);
1065         put_cred(lgp->cred);
1066         put_nfs_open_context(lgp->args.ctx);
1067         kfree(lgp);
1068 }
1069
1070 static void pnfs_clear_layoutcommit(struct inode *inode,
1071                 struct list_head *head)
1072 {
1073         struct nfs_inode *nfsi = NFS_I(inode);
1074         struct pnfs_layout_segment *lseg, *tmp;
1075
1076         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1077                 return;
1078         list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
1079                 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1080                         continue;
1081                 pnfs_lseg_dec_and_remove_zero(lseg, head);
1082         }
1083 }
1084
1085 void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
1086                 const nfs4_stateid *arg_stateid,
1087                 const struct pnfs_layout_range *range,
1088                 const nfs4_stateid *stateid)
1089 {
1090         struct inode *inode = lo->plh_inode;
1091         LIST_HEAD(freeme);
1092
1093         spin_lock(&inode->i_lock);
1094         if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
1095             !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
1096                 goto out_unlock;
1097         if (stateid) {
1098                 u32 seq = be32_to_cpu(arg_stateid->seqid);
1099
1100                 pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
1101                 pnfs_free_returned_lsegs(lo, &freeme, range, seq);
1102                 pnfs_set_layout_stateid(lo, stateid, true);
1103         } else
1104                 pnfs_mark_layout_stateid_invalid(lo, &freeme);
1105 out_unlock:
1106         pnfs_clear_layoutreturn_waitbit(lo);
1107         spin_unlock(&inode->i_lock);
1108         pnfs_free_lseg_list(&freeme);
1109
1110 }
1111
1112 static bool
1113 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
1114                 nfs4_stateid *stateid,
1115                 enum pnfs_iomode *iomode)
1116 {
1117         /* Serialise LAYOUTGET/LAYOUTRETURN */
1118         if (atomic_read(&lo->plh_outstanding) != 0)
1119                 return false;
1120         if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
1121                 return false;
1122         set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1123         pnfs_get_layout_hdr(lo);
1124         if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
1125                 if (stateid != NULL) {
1126                         nfs4_stateid_copy(stateid, &lo->plh_stateid);
1127                         if (lo->plh_return_seq != 0)
1128                                 stateid->seqid = cpu_to_be32(lo->plh_return_seq);
1129                 }
1130                 if (iomode != NULL)
1131                         *iomode = lo->plh_return_iomode;
1132                 pnfs_clear_layoutreturn_info(lo);
1133                 return true;
1134         }
1135         if (stateid != NULL)
1136                 nfs4_stateid_copy(stateid, &lo->plh_stateid);
1137         if (iomode != NULL)
1138                 *iomode = IOMODE_ANY;
1139         return true;
1140 }
1141
1142 static void
1143 pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
1144                 struct pnfs_layout_hdr *lo,
1145                 const nfs4_stateid *stateid,
1146                 enum pnfs_iomode iomode)
1147 {
1148         struct inode *inode = lo->plh_inode;
1149
1150         args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
1151         args->inode = inode;
1152         args->range.iomode = iomode;
1153         args->range.offset = 0;
1154         args->range.length = NFS4_MAX_UINT64;
1155         args->layout = lo;
1156         nfs4_stateid_copy(&args->stateid, stateid);
1157 }
1158
1159 static int
1160 pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
1161                        enum pnfs_iomode iomode, bool sync)
1162 {
1163         struct inode *ino = lo->plh_inode;
1164         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1165         struct nfs4_layoutreturn *lrp;
1166         int status = 0;
1167
1168         lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
1169         if (unlikely(lrp == NULL)) {
1170                 status = -ENOMEM;
1171                 spin_lock(&ino->i_lock);
1172                 pnfs_clear_layoutreturn_waitbit(lo);
1173                 spin_unlock(&ino->i_lock);
1174                 pnfs_put_layout_hdr(lo);
1175                 goto out;
1176         }
1177
1178         pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
1179         lrp->args.ld_private = &lrp->ld_private;
1180         lrp->clp = NFS_SERVER(ino)->nfs_client;
1181         lrp->cred = lo->plh_lc_cred;
1182         if (ld->prepare_layoutreturn)
1183                 ld->prepare_layoutreturn(&lrp->args);
1184
1185         status = nfs4_proc_layoutreturn(lrp, sync);
1186 out:
1187         dprintk("<-- %s status: %d\n", __func__, status);
1188         return status;
1189 }
1190
1191 /* Return true if layoutreturn is needed */
1192 static bool
1193 pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
1194 {
1195         struct pnfs_layout_segment *s;
1196         enum pnfs_iomode iomode;
1197         u32 seq;
1198
1199         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1200                 return false;
1201
1202         seq = lo->plh_return_seq;
1203         iomode = lo->plh_return_iomode;
1204
1205         /* Defer layoutreturn until all recalled lsegs are done */
1206         list_for_each_entry(s, &lo->plh_segs, pls_list) {
1207                 if (seq && pnfs_seqid_is_newer(s->pls_seq, seq))
1208                         continue;
1209                 if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode)
1210                         continue;
1211                 if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
1212                         return false;
1213         }
1214
1215         return true;
1216 }
1217
1218 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
1219 {
1220         struct inode *inode= lo->plh_inode;
1221
1222         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1223                 return;
1224         spin_lock(&inode->i_lock);
1225         if (pnfs_layout_need_return(lo)) {
1226                 nfs4_stateid stateid;
1227                 enum pnfs_iomode iomode;
1228                 bool send;
1229
1230                 send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1231                 spin_unlock(&inode->i_lock);
1232                 if (send) {
1233                         /* Send an async layoutreturn so we dont deadlock */
1234                         pnfs_send_layoutreturn(lo, &stateid, iomode, false);
1235                 }
1236         } else
1237                 spin_unlock(&inode->i_lock);
1238 }
1239
1240 /*
1241  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
1242  * when the layout segment list is empty.
1243  *
1244  * Note that a pnfs_layout_hdr can exist with an empty layout segment
1245  * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
1246  * deviceid is marked invalid.
1247  */
1248 int
1249 _pnfs_return_layout(struct inode *ino)
1250 {
1251         struct pnfs_layout_hdr *lo = NULL;
1252         struct nfs_inode *nfsi = NFS_I(ino);
1253         LIST_HEAD(tmp_list);
1254         nfs4_stateid stateid;
1255         int status = 0;
1256         bool send, valid_layout;
1257
1258         dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
1259
1260         spin_lock(&ino->i_lock);
1261         lo = nfsi->layout;
1262         if (!lo) {
1263                 spin_unlock(&ino->i_lock);
1264                 dprintk("NFS: %s no layout to return\n", __func__);
1265                 goto out;
1266         }
1267         /* Reference matched in nfs4_layoutreturn_release */
1268         pnfs_get_layout_hdr(lo);
1269         /* Is there an outstanding layoutreturn ? */
1270         if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1271                 spin_unlock(&ino->i_lock);
1272                 if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1273                                         TASK_UNINTERRUPTIBLE))
1274                         goto out_put_layout_hdr;
1275                 spin_lock(&ino->i_lock);
1276         }
1277         valid_layout = pnfs_layout_is_valid(lo);
1278         pnfs_clear_layoutcommit(ino, &tmp_list);
1279         pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
1280
1281         if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
1282                 struct pnfs_layout_range range = {
1283                         .iomode         = IOMODE_ANY,
1284                         .offset         = 0,
1285                         .length         = NFS4_MAX_UINT64,
1286                 };
1287                 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
1288         }
1289
1290         /* Don't send a LAYOUTRETURN if list was initially empty */
1291         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
1292                         !valid_layout) {
1293                 spin_unlock(&ino->i_lock);
1294                 dprintk("NFS: %s no layout segments to return\n", __func__);
1295                 goto out_put_layout_hdr;
1296         }
1297
1298         send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
1299         spin_unlock(&ino->i_lock);
1300         if (send)
1301                 status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1302 out_put_layout_hdr:
1303         pnfs_free_lseg_list(&tmp_list);
1304         pnfs_put_layout_hdr(lo);
1305 out:
1306         dprintk("<-- %s status: %d\n", __func__, status);
1307         return status;
1308 }
1309
1310 int
1311 pnfs_commit_and_return_layout(struct inode *inode)
1312 {
1313         struct pnfs_layout_hdr *lo;
1314         int ret;
1315
1316         spin_lock(&inode->i_lock);
1317         lo = NFS_I(inode)->layout;
1318         if (lo == NULL) {
1319                 spin_unlock(&inode->i_lock);
1320                 return 0;
1321         }
1322         pnfs_get_layout_hdr(lo);
1323         /* Block new layoutgets and read/write to ds */
1324         lo->plh_block_lgets++;
1325         spin_unlock(&inode->i_lock);
1326         filemap_fdatawait(inode->i_mapping);
1327         ret = pnfs_layoutcommit_inode(inode, true);
1328         if (ret == 0)
1329                 ret = _pnfs_return_layout(inode);
1330         spin_lock(&inode->i_lock);
1331         lo->plh_block_lgets--;
1332         spin_unlock(&inode->i_lock);
1333         pnfs_put_layout_hdr(lo);
1334         return ret;
1335 }
1336
1337 bool pnfs_roc(struct inode *ino,
1338                 struct nfs4_layoutreturn_args *args,
1339                 struct nfs4_layoutreturn_res *res,
1340                 const struct cred *cred)
1341 {
1342         struct nfs_inode *nfsi = NFS_I(ino);
1343         struct nfs_open_context *ctx;
1344         struct nfs4_state *state;
1345         struct pnfs_layout_hdr *lo;
1346         struct pnfs_layout_segment *lseg, *next;
1347         nfs4_stateid stateid;
1348         enum pnfs_iomode iomode = 0;
1349         bool layoutreturn = false, roc = false;
1350         bool skip_read = false;
1351
1352         if (!nfs_have_layout(ino))
1353                 return false;
1354 retry:
1355         rcu_read_lock();
1356         spin_lock(&ino->i_lock);
1357         lo = nfsi->layout;
1358         if (!lo || !pnfs_layout_is_valid(lo) ||
1359             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1360                 lo = NULL;
1361                 goto out_noroc;
1362         }
1363         pnfs_get_layout_hdr(lo);
1364         if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1365                 spin_unlock(&ino->i_lock);
1366                 rcu_read_unlock();
1367                 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1368                                 TASK_UNINTERRUPTIBLE);
1369                 pnfs_put_layout_hdr(lo);
1370                 goto retry;
1371         }
1372
1373         /* no roc if we hold a delegation */
1374         if (nfs4_check_delegation(ino, FMODE_READ)) {
1375                 if (nfs4_check_delegation(ino, FMODE_WRITE))
1376                         goto out_noroc;
1377                 skip_read = true;
1378         }
1379
1380         list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
1381                 state = ctx->state;
1382                 if (state == NULL)
1383                         continue;
1384                 /* Don't return layout if there is open file state */
1385                 if (state->state & FMODE_WRITE)
1386                         goto out_noroc;
1387                 if (state->state & FMODE_READ)
1388                         skip_read = true;
1389         }
1390
1391
1392         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
1393                 if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
1394                         continue;
1395                 /* If we are sending layoutreturn, invalidate all valid lsegs */
1396                 if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1397                         continue;
1398                 /*
1399                  * Note: mark lseg for return so pnfs_layout_remove_lseg
1400                  * doesn't invalidate the layout for us.
1401                  */
1402                 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
1403                 if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
1404                         continue;
1405                 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
1406         }
1407
1408         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1409                 goto out_noroc;
1410
1411         /* ROC in two conditions:
1412          * 1. there are ROC lsegs
1413          * 2. we don't send layoutreturn
1414          */
1415         /* lo ref dropped in pnfs_roc_release() */
1416         layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1417         /* If the creds don't match, we can't compound the layoutreturn */
1418         if (!layoutreturn || cred != lo->plh_lc_cred)
1419                 goto out_noroc;
1420
1421         roc = layoutreturn;
1422         pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
1423         res->lrs_present = 0;
1424         layoutreturn = false;
1425
1426 out_noroc:
1427         spin_unlock(&ino->i_lock);
1428         rcu_read_unlock();
1429         pnfs_layoutcommit_inode(ino, true);
1430         if (roc) {
1431                 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1432                 if (ld->prepare_layoutreturn)
1433                         ld->prepare_layoutreturn(args);
1434                 pnfs_put_layout_hdr(lo);
1435                 return true;
1436         }
1437         if (layoutreturn)
1438                 pnfs_send_layoutreturn(lo, &stateid, iomode, true);
1439         pnfs_put_layout_hdr(lo);
1440         return false;
1441 }
1442
1443 void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
1444                 struct nfs4_layoutreturn_res *res,
1445                 int ret)
1446 {
1447         struct pnfs_layout_hdr *lo = args->layout;
1448         const nfs4_stateid *arg_stateid = NULL;
1449         const nfs4_stateid *res_stateid = NULL;
1450         struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
1451
1452         if (ret == 0) {
1453                 arg_stateid = &args->stateid;
1454                 if (res->lrs_present)
1455                         res_stateid = &res->stateid;
1456         }
1457         pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
1458                         res_stateid);
1459         if (ld_private && ld_private->ops && ld_private->ops->free)
1460                 ld_private->ops->free(ld_private);
1461         pnfs_put_layout_hdr(lo);
1462         trace_nfs4_layoutreturn_on_close(args->inode, 0);
1463 }
1464
1465 bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1466 {
1467         struct nfs_inode *nfsi = NFS_I(ino);
1468         struct pnfs_layout_hdr *lo;
1469         bool sleep = false;
1470
1471         /* we might not have grabbed lo reference. so need to check under
1472          * i_lock */
1473         spin_lock(&ino->i_lock);
1474         lo = nfsi->layout;
1475         if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1476                 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1477                 sleep = true;
1478         }
1479         spin_unlock(&ino->i_lock);
1480         return sleep;
1481 }
1482
1483 /*
1484  * Compare two layout segments for sorting into layout cache.
1485  * We want to preferentially return RW over RO layouts, so ensure those
1486  * are seen first.
1487  */
1488 static s64
1489 pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1490            const struct pnfs_layout_range *l2)
1491 {
1492         s64 d;
1493
1494         /* high offset > low offset */
1495         d = l1->offset - l2->offset;
1496         if (d)
1497                 return d;
1498
1499         /* short length > long length */
1500         d = l2->length - l1->length;
1501         if (d)
1502                 return d;
1503
1504         /* read > read/write */
1505         return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1506 }
1507
1508 static bool
1509 pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
1510                 const struct pnfs_layout_range *l2)
1511 {
1512         return pnfs_lseg_range_cmp(l1, l2) > 0;
1513 }
1514
1515 static bool
1516 pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
1517                 struct pnfs_layout_segment *old)
1518 {
1519         return false;
1520 }
1521
1522 void
1523 pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1524                    struct pnfs_layout_segment *lseg,
1525                    bool (*is_after)(const struct pnfs_layout_range *,
1526                            const struct pnfs_layout_range *),
1527                    bool (*do_merge)(struct pnfs_layout_segment *,
1528                            struct pnfs_layout_segment *),
1529                    struct list_head *free_me)
1530 {
1531         struct pnfs_layout_segment *lp, *tmp;
1532
1533         dprintk("%s:Begin\n", __func__);
1534
1535         list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
1536                 if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
1537                         continue;
1538                 if (do_merge(lseg, lp)) {
1539                         mark_lseg_invalid(lp, free_me);
1540                         continue;
1541                 }
1542                 if (is_after(&lseg->pls_range, &lp->pls_range))
1543                         continue;
1544                 list_add_tail(&lseg->pls_list, &lp->pls_list);
1545                 dprintk("%s: inserted lseg %p "
1546                         "iomode %d offset %llu length %llu before "
1547                         "lp %p iomode %d offset %llu length %llu\n",
1548                         __func__, lseg, lseg->pls_range.iomode,
1549                         lseg->pls_range.offset, lseg->pls_range.length,
1550                         lp, lp->pls_range.iomode, lp->pls_range.offset,
1551                         lp->pls_range.length);
1552                 goto out;
1553         }
1554         list_add_tail(&lseg->pls_list, &lo->plh_segs);
1555         dprintk("%s: inserted lseg %p "
1556                 "iomode %d offset %llu length %llu at tail\n",
1557                 __func__, lseg, lseg->pls_range.iomode,
1558                 lseg->pls_range.offset, lseg->pls_range.length);
1559 out:
1560         pnfs_get_layout_hdr(lo);
1561
1562         dprintk("%s:Return\n", __func__);
1563 }
1564 EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
1565
1566 static void
1567 pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1568                    struct pnfs_layout_segment *lseg,
1569                    struct list_head *free_me)
1570 {
1571         struct inode *inode = lo->plh_inode;
1572         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
1573
1574         if (ld->add_lseg != NULL)
1575                 ld->add_lseg(lo, lseg, free_me);
1576         else
1577                 pnfs_generic_layout_insert_lseg(lo, lseg,
1578                                 pnfs_lseg_range_is_after,
1579                                 pnfs_lseg_no_merge,
1580                                 free_me);
1581 }
1582
1583 static struct pnfs_layout_hdr *
1584 alloc_init_layout_hdr(struct inode *ino,
1585                       struct nfs_open_context *ctx,
1586                       gfp_t gfp_flags)
1587 {
1588         struct pnfs_layout_hdr *lo;
1589
1590         lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1591         if (!lo)
1592                 return NULL;
1593         refcount_set(&lo->plh_refcount, 1);
1594         INIT_LIST_HEAD(&lo->plh_layouts);
1595         INIT_LIST_HEAD(&lo->plh_segs);
1596         INIT_LIST_HEAD(&lo->plh_return_segs);
1597         INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1598         lo->plh_inode = ino;
1599         lo->plh_lc_cred = get_cred(ctx->cred);
1600         lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
1601         return lo;
1602 }
1603
1604 static struct pnfs_layout_hdr *
1605 pnfs_find_alloc_layout(struct inode *ino,
1606                        struct nfs_open_context *ctx,
1607                        gfp_t gfp_flags)
1608         __releases(&ino->i_lock)
1609         __acquires(&ino->i_lock)
1610 {
1611         struct nfs_inode *nfsi = NFS_I(ino);
1612         struct pnfs_layout_hdr *new = NULL;
1613
1614         dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1615
1616         if (nfsi->layout != NULL)
1617                 goto out_existing;
1618         spin_unlock(&ino->i_lock);
1619         new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1620         spin_lock(&ino->i_lock);
1621
1622         if (likely(nfsi->layout == NULL)) {     /* Won the race? */
1623                 nfsi->layout = new;
1624                 return new;
1625         } else if (new != NULL)
1626                 pnfs_free_layout_hdr(new);
1627 out_existing:
1628         pnfs_get_layout_hdr(nfsi->layout);
1629         return nfsi->layout;
1630 }
1631
1632 /*
1633  * iomode matching rules:
1634  * iomode       lseg    strict match
1635  *                      iomode
1636  * -----        -----   ------ -----
1637  * ANY          READ    N/A    true
1638  * ANY          RW      N/A    true
1639  * RW           READ    N/A    false
1640  * RW           RW      N/A    true
1641  * READ         READ    N/A    true
1642  * READ         RW      true   false
1643  * READ         RW      false  true
1644  */
1645 static bool
1646 pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1647                  const struct pnfs_layout_range *range,
1648                  bool strict_iomode)
1649 {
1650         struct pnfs_layout_range range1;
1651
1652         if ((range->iomode == IOMODE_RW &&
1653              ls_range->iomode != IOMODE_RW) ||
1654             (range->iomode != ls_range->iomode &&
1655              strict_iomode) ||
1656             !pnfs_lseg_range_intersecting(ls_range, range))
1657                 return false;
1658
1659         /* range1 covers only the first byte in the range */
1660         range1 = *range;
1661         range1.length = 1;
1662         return pnfs_lseg_range_contained(ls_range, &range1);
1663 }
1664
1665 /*
1666  * lookup range in layout
1667  */
1668 static struct pnfs_layout_segment *
1669 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1670                 struct pnfs_layout_range *range,
1671                 bool strict_iomode)
1672 {
1673         struct pnfs_layout_segment *lseg, *ret = NULL;
1674
1675         dprintk("%s:Begin\n", __func__);
1676
1677         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1678                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1679                     pnfs_lseg_range_match(&lseg->pls_range, range,
1680                                           strict_iomode)) {
1681                         ret = pnfs_get_lseg(lseg);
1682                         break;
1683                 }
1684         }
1685
1686         dprintk("%s:Return lseg %p ref %d\n",
1687                 __func__, ret, ret ? refcount_read(&ret->pls_refcount) : 0);
1688         return ret;
1689 }
1690
1691 /*
1692  * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1693  * to the MDS or over pNFS
1694  *
1695  * The nfs_inode read_io and write_io fields are cumulative counters reset
1696  * when there are no layout segments. Note that in pnfs_update_layout iomode
1697  * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1698  * WRITE request.
1699  *
1700  * A return of true means use MDS I/O.
1701  *
1702  * From rfc 5661:
1703  * If a file's size is smaller than the file size threshold, data accesses
1704  * SHOULD be sent to the metadata server.  If an I/O request has a length that
1705  * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1706  * server.  If both file size and I/O size are provided, the client SHOULD
1707  * reach or exceed  both thresholds before sending its read or write
1708  * requests to the data server.
1709  */
1710 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1711                                      struct inode *ino, int iomode)
1712 {
1713         struct nfs4_threshold *t = ctx->mdsthreshold;
1714         struct nfs_inode *nfsi = NFS_I(ino);
1715         loff_t fsize = i_size_read(ino);
1716         bool size = false, size_set = false, io = false, io_set = false, ret = false;
1717
1718         if (t == NULL)
1719                 return ret;
1720
1721         dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1722                 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1723
1724         switch (iomode) {
1725         case IOMODE_READ:
1726                 if (t->bm & THRESHOLD_RD) {
1727                         dprintk("%s fsize %llu\n", __func__, fsize);
1728                         size_set = true;
1729                         if (fsize < t->rd_sz)
1730                                 size = true;
1731                 }
1732                 if (t->bm & THRESHOLD_RD_IO) {
1733                         dprintk("%s nfsi->read_io %llu\n", __func__,
1734                                 nfsi->read_io);
1735                         io_set = true;
1736                         if (nfsi->read_io < t->rd_io_sz)
1737                                 io = true;
1738                 }
1739                 break;
1740         case IOMODE_RW:
1741                 if (t->bm & THRESHOLD_WR) {
1742                         dprintk("%s fsize %llu\n", __func__, fsize);
1743                         size_set = true;
1744                         if (fsize < t->wr_sz)
1745                                 size = true;
1746                 }
1747                 if (t->bm & THRESHOLD_WR_IO) {
1748                         dprintk("%s nfsi->write_io %llu\n", __func__,
1749                                 nfsi->write_io);
1750                         io_set = true;
1751                         if (nfsi->write_io < t->wr_io_sz)
1752                                 io = true;
1753                 }
1754                 break;
1755         }
1756         if (size_set && io_set) {
1757                 if (size && io)
1758                         ret = true;
1759         } else if (size || io)
1760                 ret = true;
1761
1762         dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1763         return ret;
1764 }
1765
1766 static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
1767 {
1768         /*
1769          * send layoutcommit as it can hold up layoutreturn due to lseg
1770          * reference
1771          */
1772         pnfs_layoutcommit_inode(lo->plh_inode, false);
1773         return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1774                                    nfs_wait_bit_killable,
1775                                    TASK_KILLABLE);
1776 }
1777
1778 static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
1779 {
1780         atomic_inc(&lo->plh_outstanding);
1781 }
1782
1783 static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
1784 {
1785         if (atomic_dec_and_test(&lo->plh_outstanding))
1786                 wake_up_var(&lo->plh_outstanding);
1787 }
1788
1789 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
1790 {
1791         unsigned long *bitlock = &lo->plh_flags;
1792
1793         clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
1794         smp_mb__after_atomic();
1795         wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
1796 }
1797
1798 static void _add_to_server_list(struct pnfs_layout_hdr *lo,
1799                                 struct nfs_server *server)
1800 {
1801         if (list_empty(&lo->plh_layouts)) {
1802                 struct nfs_client *clp = server->nfs_client;
1803
1804                 /* The lo must be on the clp list if there is any
1805                  * chance of a CB_LAYOUTRECALL(FILE) coming in.
1806                  */
1807                 spin_lock(&clp->cl_lock);
1808                 if (list_empty(&lo->plh_layouts))
1809                         list_add_tail(&lo->plh_layouts, &server->layouts);
1810                 spin_unlock(&clp->cl_lock);
1811         }
1812 }
1813
1814 /*
1815  * Layout segment is retreived from the server if not cached.
1816  * The appropriate layout segment is referenced and returned to the caller.
1817  */
1818 struct pnfs_layout_segment *
1819 pnfs_update_layout(struct inode *ino,
1820                    struct nfs_open_context *ctx,
1821                    loff_t pos,
1822                    u64 count,
1823                    enum pnfs_iomode iomode,
1824                    bool strict_iomode,
1825                    gfp_t gfp_flags)
1826 {
1827         struct pnfs_layout_range arg = {
1828                 .iomode = iomode,
1829                 .offset = pos,
1830                 .length = count,
1831         };
1832         unsigned pg_offset;
1833         struct nfs_server *server = NFS_SERVER(ino);
1834         struct nfs_client *clp = server->nfs_client;
1835         struct pnfs_layout_hdr *lo = NULL;
1836         struct pnfs_layout_segment *lseg = NULL;
1837         struct nfs4_layoutget *lgp;
1838         nfs4_stateid stateid;
1839         long timeout = 0;
1840         unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
1841         bool first;
1842
1843         if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
1844                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1845                                  PNFS_UPDATE_LAYOUT_NO_PNFS);
1846                 goto out;
1847         }
1848
1849         if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
1850                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1851                                  PNFS_UPDATE_LAYOUT_MDSTHRESH);
1852                 goto out;
1853         }
1854
1855 lookup_again:
1856         lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
1857         if (IS_ERR(lseg))
1858                 goto out;
1859         first = false;
1860         spin_lock(&ino->i_lock);
1861         lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1862         if (lo == NULL) {
1863                 spin_unlock(&ino->i_lock);
1864                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1865                                  PNFS_UPDATE_LAYOUT_NOMEM);
1866                 goto out;
1867         }
1868
1869         /* Do we even need to bother with this? */
1870         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1871                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1872                                  PNFS_UPDATE_LAYOUT_BULK_RECALL);
1873                 dprintk("%s matches recall, use MDS\n", __func__);
1874                 goto out_unlock;
1875         }
1876
1877         /* if LAYOUTGET already failed once we don't try again */
1878         if (pnfs_layout_io_test_failed(lo, iomode)) {
1879                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1880                                  PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
1881                 goto out_unlock;
1882         }
1883
1884         /*
1885          * If the layout segment list is empty, but there are outstanding
1886          * layoutget calls, then they might be subject to a layoutrecall.
1887          */
1888         if (list_empty(&lo->plh_segs) &&
1889             atomic_read(&lo->plh_outstanding) != 0) {
1890                 spin_unlock(&ino->i_lock);
1891                 lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
1892                                         !atomic_read(&lo->plh_outstanding)));
1893                 if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
1894                         goto out_put_layout_hdr;
1895                 pnfs_put_layout_hdr(lo);
1896                 goto lookup_again;
1897         }
1898
1899         lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
1900         if (lseg) {
1901                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1902                                 PNFS_UPDATE_LAYOUT_FOUND_CACHED);
1903                 goto out_unlock;
1904         }
1905
1906         if (!nfs4_valid_open_stateid(ctx->state)) {
1907                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1908                                 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
1909                 goto out_unlock;
1910         }
1911
1912         /*
1913          * Choose a stateid for the LAYOUTGET. If we don't have a layout
1914          * stateid, or it has been invalidated, then we must use the open
1915          * stateid.
1916          */
1917         if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
1918
1919                 /*
1920                  * The first layoutget for the file. Need to serialize per
1921                  * RFC 5661 Errata 3208.
1922                  */
1923                 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
1924                                      &lo->plh_flags)) {
1925                         spin_unlock(&ino->i_lock);
1926                         lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
1927                                                 NFS_LAYOUT_FIRST_LAYOUTGET,
1928                                                 TASK_KILLABLE));
1929                         if (IS_ERR(lseg))
1930                                 goto out_put_layout_hdr;
1931                         pnfs_put_layout_hdr(lo);
1932                         dprintk("%s retrying\n", __func__);
1933                         goto lookup_again;
1934                 }
1935
1936                 first = true;
1937                 if (nfs4_select_rw_stateid(ctx->state,
1938                                         iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
1939                                         NULL, &stateid, NULL) != 0) {
1940                         trace_pnfs_update_layout(ino, pos, count,
1941                                         iomode, lo, lseg,
1942                                         PNFS_UPDATE_LAYOUT_INVALID_OPEN);
1943                         goto out_unlock;
1944                 }
1945         } else {
1946                 nfs4_stateid_copy(&stateid, &lo->plh_stateid);
1947         }
1948
1949         /*
1950          * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1951          * for LAYOUTRETURN even if first is true.
1952          */
1953         if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1954                 spin_unlock(&ino->i_lock);
1955                 dprintk("%s wait for layoutreturn\n", __func__);
1956                 lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
1957                 if (!IS_ERR(lseg)) {
1958                         if (first)
1959                                 pnfs_clear_first_layoutget(lo);
1960                         pnfs_put_layout_hdr(lo);
1961                         dprintk("%s retrying\n", __func__);
1962                         trace_pnfs_update_layout(ino, pos, count, iomode, lo,
1963                                         lseg, PNFS_UPDATE_LAYOUT_RETRY);
1964                         goto lookup_again;
1965                 }
1966                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1967                                 PNFS_UPDATE_LAYOUT_RETURN);
1968                 goto out_put_layout_hdr;
1969         }
1970
1971         if (pnfs_layoutgets_blocked(lo)) {
1972                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1973                                 PNFS_UPDATE_LAYOUT_BLOCKED);
1974                 goto out_unlock;
1975         }
1976         nfs_layoutget_begin(lo);
1977         spin_unlock(&ino->i_lock);
1978
1979         _add_to_server_list(lo, server);
1980
1981         pg_offset = arg.offset & ~PAGE_MASK;
1982         if (pg_offset) {
1983                 arg.offset -= pg_offset;
1984                 arg.length += pg_offset;
1985         }
1986         if (arg.length != NFS4_MAX_UINT64)
1987                 arg.length = PAGE_ALIGN(arg.length);
1988
1989         lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
1990         if (!lgp) {
1991                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
1992                                          PNFS_UPDATE_LAYOUT_NOMEM);
1993                 nfs_layoutget_end(lo);
1994                 goto out_put_layout_hdr;
1995         }
1996
1997         lseg = nfs4_proc_layoutget(lgp, &timeout);
1998         trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1999                                  PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
2000         nfs_layoutget_end(lo);
2001         if (IS_ERR(lseg)) {
2002                 switch(PTR_ERR(lseg)) {
2003                 case -EBUSY:
2004                         if (time_after(jiffies, giveup))
2005                                 lseg = NULL;
2006                         break;
2007                 case -ERECALLCONFLICT:
2008                 case -EAGAIN:
2009                         break;
2010                 default:
2011                         if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
2012                                 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2013                                 lseg = NULL;
2014                         }
2015                         goto out_put_layout_hdr;
2016                 }
2017                 if (lseg) {
2018                         if (first)
2019                                 pnfs_clear_first_layoutget(lo);
2020                         trace_pnfs_update_layout(ino, pos, count,
2021                                 iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
2022                         pnfs_put_layout_hdr(lo);
2023                         goto lookup_again;
2024                 }
2025         } else {
2026                 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2027         }
2028
2029 out_put_layout_hdr:
2030         if (first)
2031                 pnfs_clear_first_layoutget(lo);
2032         pnfs_put_layout_hdr(lo);
2033 out:
2034         dprintk("%s: inode %s/%llu pNFS layout segment %s for "
2035                         "(%s, offset: %llu, length: %llu)\n",
2036                         __func__, ino->i_sb->s_id,
2037                         (unsigned long long)NFS_FILEID(ino),
2038                         IS_ERR_OR_NULL(lseg) ? "not found" : "found",
2039                         iomode==IOMODE_RW ?  "read/write" : "read-only",
2040                         (unsigned long long)pos,
2041                         (unsigned long long)count);
2042         return lseg;
2043 out_unlock:
2044         spin_unlock(&ino->i_lock);
2045         goto out_put_layout_hdr;
2046 }
2047 EXPORT_SYMBOL_GPL(pnfs_update_layout);
2048
2049 static bool
2050 pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
2051 {
2052         switch (range->iomode) {
2053         case IOMODE_READ:
2054         case IOMODE_RW:
2055                 break;
2056         default:
2057                 return false;
2058         }
2059         if (range->offset == NFS4_MAX_UINT64)
2060                 return false;
2061         if (range->length == 0)
2062                 return false;
2063         if (range->length != NFS4_MAX_UINT64 &&
2064             range->length > NFS4_MAX_UINT64 - range->offset)
2065                 return false;
2066         return true;
2067 }
2068
2069 static struct pnfs_layout_hdr *
2070 _pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
2071 {
2072         struct pnfs_layout_hdr *lo;
2073
2074         spin_lock(&ino->i_lock);
2075         lo = pnfs_find_alloc_layout(ino, ctx, GFP_KERNEL);
2076         if (!lo)
2077                 goto out_unlock;
2078         if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
2079                 goto out_unlock;
2080         if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
2081                 goto out_unlock;
2082         if (pnfs_layoutgets_blocked(lo))
2083                 goto out_unlock;
2084         if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags))
2085                 goto out_unlock;
2086         nfs_layoutget_begin(lo);
2087         spin_unlock(&ino->i_lock);
2088         _add_to_server_list(lo, NFS_SERVER(ino));
2089         return lo;
2090
2091 out_unlock:
2092         spin_unlock(&ino->i_lock);
2093         pnfs_put_layout_hdr(lo);
2094         return NULL;
2095 }
2096
2097 extern const nfs4_stateid current_stateid;
2098
2099 static void _lgopen_prepare_attached(struct nfs4_opendata *data,
2100                                      struct nfs_open_context *ctx)
2101 {
2102         struct inode *ino = data->dentry->d_inode;
2103         struct pnfs_layout_range rng = {
2104                 .iomode = (data->o_arg.fmode & FMODE_WRITE) ?
2105                           IOMODE_RW: IOMODE_READ,
2106                 .offset = 0,
2107                 .length = NFS4_MAX_UINT64,
2108         };
2109         struct nfs4_layoutget *lgp;
2110         struct pnfs_layout_hdr *lo;
2111
2112         /* Heuristic: don't send layoutget if we have cached data */
2113         if (rng.iomode == IOMODE_READ &&
2114            (i_size_read(ino) == 0 || ino->i_mapping->nrpages != 0))
2115                 return;
2116
2117         lo = _pnfs_grab_empty_layout(ino, ctx);
2118         if (!lo)
2119                 return;
2120         lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid,
2121                                              &rng, GFP_KERNEL);
2122         if (!lgp) {
2123                 pnfs_clear_first_layoutget(lo);
2124                 pnfs_put_layout_hdr(lo);
2125                 return;
2126         }
2127         data->lgp = lgp;
2128         data->o_arg.lg_args = &lgp->args;
2129         data->o_res.lg_res = &lgp->res;
2130 }
2131
2132 static void _lgopen_prepare_floating(struct nfs4_opendata *data,
2133                                      struct nfs_open_context *ctx)
2134 {
2135         struct pnfs_layout_range rng = {
2136                 .iomode = (data->o_arg.fmode & FMODE_WRITE) ?
2137                           IOMODE_RW: IOMODE_READ,
2138                 .offset = 0,
2139                 .length = NFS4_MAX_UINT64,
2140         };
2141         struct nfs4_layoutget *lgp;
2142
2143         lgp = pnfs_alloc_init_layoutget_args(NULL, ctx, &current_stateid,
2144                                              &rng, GFP_KERNEL);
2145         if (!lgp)
2146                 return;
2147         data->lgp = lgp;
2148         data->o_arg.lg_args = &lgp->args;
2149         data->o_res.lg_res = &lgp->res;
2150 }
2151
2152 void pnfs_lgopen_prepare(struct nfs4_opendata *data,
2153                          struct nfs_open_context *ctx)
2154 {
2155         struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
2156
2157         if (!(pnfs_enabled_sb(server) &&
2158               server->pnfs_curr_ld->flags & PNFS_LAYOUTGET_ON_OPEN))
2159                 return;
2160         /* Could check on max_ops, but currently hardcoded high enough */
2161         if (!nfs_server_capable(data->dir->d_inode, NFS_CAP_LGOPEN))
2162                 return;
2163         if (data->state)
2164                 _lgopen_prepare_attached(data, ctx);
2165         else
2166                 _lgopen_prepare_floating(data, ctx);
2167 }
2168
2169 void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
2170                        struct nfs_open_context *ctx)
2171 {
2172         struct pnfs_layout_hdr *lo;
2173         struct pnfs_layout_segment *lseg;
2174         struct nfs_server *srv = NFS_SERVER(ino);
2175         u32 iomode;
2176
2177         if (!lgp)
2178                 return;
2179         dprintk("%s: entered with status %i\n", __func__, lgp->res.status);
2180         if (lgp->res.status) {
2181                 switch (lgp->res.status) {
2182                 default:
2183                         break;
2184                 /*
2185                  * Halt lgopen attempts if the server doesn't recognise
2186                  * the "current stateid" value, the layout type, or the
2187                  * layoutget operation as being valid.
2188                  * Also if it complains about too many ops in the compound
2189                  * or of the request/reply being too big.
2190                  */
2191                 case -NFS4ERR_BAD_STATEID:
2192                 case -NFS4ERR_NOTSUPP:
2193                 case -NFS4ERR_REP_TOO_BIG:
2194                 case -NFS4ERR_REP_TOO_BIG_TO_CACHE:
2195                 case -NFS4ERR_REQ_TOO_BIG:
2196                 case -NFS4ERR_TOO_MANY_OPS:
2197                 case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
2198                         srv->caps &= ~NFS_CAP_LGOPEN;
2199                 }
2200                 return;
2201         }
2202         if (!lgp->args.inode) {
2203                 lo = _pnfs_grab_empty_layout(ino, ctx);
2204                 if (!lo)
2205                         return;
2206                 lgp->args.inode = ino;
2207         } else
2208                 lo = NFS_I(lgp->args.inode)->layout;
2209
2210         lseg = pnfs_layout_process(lgp);
2211         if (!IS_ERR(lseg)) {
2212                 iomode = lgp->args.range.iomode;
2213                 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2214                 pnfs_put_lseg(lseg);
2215         }
2216 }
2217
2218 void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
2219 {
2220         if (lgp != NULL) {
2221                 struct inode *inode = lgp->args.inode;
2222                 if (inode) {
2223                         struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
2224                         pnfs_clear_first_layoutget(lo);
2225                         nfs_layoutget_end(lo);
2226                 }
2227                 pnfs_layoutget_free(lgp);
2228         }
2229 }
2230
2231 struct pnfs_layout_segment *
2232 pnfs_layout_process(struct nfs4_layoutget *lgp)
2233 {
2234         struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
2235         struct nfs4_layoutget_res *res = &lgp->res;
2236         struct pnfs_layout_segment *lseg;
2237         struct inode *ino = lo->plh_inode;
2238         LIST_HEAD(free_me);
2239
2240         if (!pnfs_sanity_check_layout_range(&res->range))
2241                 return ERR_PTR(-EINVAL);
2242
2243         /* Inject layout blob into I/O device driver */
2244         lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
2245         if (IS_ERR_OR_NULL(lseg)) {
2246                 if (!lseg)
2247                         lseg = ERR_PTR(-ENOMEM);
2248
2249                 dprintk("%s: Could not allocate layout: error %ld\n",
2250                        __func__, PTR_ERR(lseg));
2251                 return lseg;
2252         }
2253
2254         pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
2255
2256         spin_lock(&ino->i_lock);
2257         if (pnfs_layoutgets_blocked(lo)) {
2258                 dprintk("%s forget reply due to state\n", __func__);
2259                 goto out_forget;
2260         }
2261
2262         if (!pnfs_layout_is_valid(lo)) {
2263                 /* We have a completely new layout */
2264                 pnfs_set_layout_stateid(lo, &res->stateid, true);
2265         } else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
2266                 /* existing state ID, make sure the sequence number matches. */
2267                 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
2268                         dprintk("%s forget reply due to sequence\n", __func__);
2269                         goto out_forget;
2270                 }
2271                 pnfs_set_layout_stateid(lo, &res->stateid, false);
2272         } else {
2273                 /*
2274                  * We got an entirely new state ID.  Mark all segments for the
2275                  * inode invalid, and retry the layoutget
2276                  */
2277                 pnfs_mark_layout_stateid_invalid(lo, &free_me);
2278                 goto out_forget;
2279         }
2280
2281         pnfs_get_lseg(lseg);
2282         pnfs_layout_insert_lseg(lo, lseg, &free_me);
2283
2284
2285         if (res->return_on_close)
2286                 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
2287
2288         spin_unlock(&ino->i_lock);
2289         pnfs_free_lseg_list(&free_me);
2290         return lseg;
2291
2292 out_forget:
2293         spin_unlock(&ino->i_lock);
2294         lseg->pls_layout = lo;
2295         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
2296         return ERR_PTR(-EAGAIN);
2297 }
2298
2299 static int
2300 mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg,
2301                 struct list_head *tmp_list)
2302 {
2303         if (!mark_lseg_invalid(lseg, tmp_list))
2304                 return 0;
2305         pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg);
2306         return 1;
2307 }
2308
2309 /**
2310  * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
2311  * @lo: pointer to layout header
2312  * @tmp_list: list header to be used with pnfs_free_lseg_list()
2313  * @return_range: describe layout segment ranges to be returned
2314  * @seq: stateid seqid to match
2315  *
2316  * This function is mainly intended for use by layoutrecall. It attempts
2317  * to free the layout segment immediately, or else to mark it for return
2318  * as soon as its reference count drops to zero.
2319  *
2320  * Returns
2321  * - 0: a layoutreturn needs to be scheduled.
2322  * - EBUSY: there are layout segment that are still in use.
2323  * - ENOENT: there are no layout segments that need to be returned.
2324  */
2325 int
2326 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
2327                                 struct list_head *tmp_list,
2328                                 const struct pnfs_layout_range *return_range,
2329                                 u32 seq)
2330 {
2331         struct pnfs_layout_segment *lseg, *next;
2332         int remaining = 0;
2333
2334         dprintk("%s:Begin lo %p\n", __func__, lo);
2335
2336         assert_spin_locked(&lo->plh_inode->i_lock);
2337
2338         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
2339                 if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
2340                         dprintk("%s: marking lseg %p iomode %d "
2341                                 "offset %llu length %llu\n", __func__,
2342                                 lseg, lseg->pls_range.iomode,
2343                                 lseg->pls_range.offset,
2344                                 lseg->pls_range.length);
2345                         if (mark_lseg_invalid_or_return(lseg, tmp_list))
2346                                 continue;
2347                         remaining++;
2348                         set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
2349                 }
2350
2351         if (remaining) {
2352                 pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2353                 return -EBUSY;
2354         }
2355
2356         if (!list_empty(&lo->plh_return_segs)) {
2357                 pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2358                 return 0;
2359         }
2360
2361         return -ENOENT;
2362 }
2363
2364 void pnfs_error_mark_layout_for_return(struct inode *inode,
2365                                        struct pnfs_layout_segment *lseg)
2366 {
2367         struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
2368         struct pnfs_layout_range range = {
2369                 .iomode = lseg->pls_range.iomode,
2370                 .offset = 0,
2371                 .length = NFS4_MAX_UINT64,
2372         };
2373         bool return_now = false;
2374
2375         spin_lock(&inode->i_lock);
2376         if (!pnfs_layout_is_valid(lo)) {
2377                 spin_unlock(&inode->i_lock);
2378                 return;
2379         }
2380         pnfs_set_plh_return_info(lo, range.iomode, 0);
2381         /*
2382          * mark all matching lsegs so that we are sure to have no live
2383          * segments at hand when sending layoutreturn. See pnfs_put_lseg()
2384          * for how it works.
2385          */
2386         if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0) != -EBUSY) {
2387                 nfs4_stateid stateid;
2388                 enum pnfs_iomode iomode;
2389
2390                 return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
2391                 spin_unlock(&inode->i_lock);
2392                 if (return_now)
2393                         pnfs_send_layoutreturn(lo, &stateid, iomode, false);
2394         } else {
2395                 spin_unlock(&inode->i_lock);
2396                 nfs_commit_inode(inode, 0);
2397         }
2398 }
2399 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
2400
2401 void
2402 pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
2403 {
2404         if (pgio->pg_lseg == NULL ||
2405             test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags))
2406                 return;
2407         pnfs_put_lseg(pgio->pg_lseg);
2408         pgio->pg_lseg = NULL;
2409 }
2410 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
2411
2412 /*
2413  * Check for any intersection between the request and the pgio->pg_lseg,
2414  * and if none, put this pgio->pg_lseg away.
2415  */
2416 static void
2417 pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2418 {
2419         if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
2420                 pnfs_put_lseg(pgio->pg_lseg);
2421                 pgio->pg_lseg = NULL;
2422         }
2423 }
2424
2425 void
2426 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2427 {
2428         u64 rd_size = req->wb_bytes;
2429
2430         pnfs_generic_pg_check_layout(pgio);
2431         pnfs_generic_pg_check_range(pgio, req);
2432         if (pgio->pg_lseg == NULL) {
2433                 if (pgio->pg_dreq == NULL)
2434                         rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
2435                 else
2436                         rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
2437
2438                 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2439                                                    nfs_req_openctx(req),
2440                                                    req_offset(req),
2441                                                    rd_size,
2442                                                    IOMODE_READ,
2443                                                    false,
2444                                                    GFP_KERNEL);
2445                 if (IS_ERR(pgio->pg_lseg)) {
2446                         pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2447                         pgio->pg_lseg = NULL;
2448                         return;
2449                 }
2450         }
2451         /* If no lseg, fall back to read through mds */
2452         if (pgio->pg_lseg == NULL)
2453                 nfs_pageio_reset_read_mds(pgio);
2454
2455 }
2456 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
2457
2458 void
2459 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
2460                            struct nfs_page *req, u64 wb_size)
2461 {
2462         pnfs_generic_pg_check_layout(pgio);
2463         pnfs_generic_pg_check_range(pgio, req);
2464         if (pgio->pg_lseg == NULL) {
2465                 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2466                                                    nfs_req_openctx(req),
2467                                                    req_offset(req),
2468                                                    wb_size,
2469                                                    IOMODE_RW,
2470                                                    false,
2471                                                    GFP_NOFS);
2472                 if (IS_ERR(pgio->pg_lseg)) {
2473                         pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2474                         pgio->pg_lseg = NULL;
2475                         return;
2476                 }
2477         }
2478         /* If no lseg, fall back to write through mds */
2479         if (pgio->pg_lseg == NULL)
2480                 nfs_pageio_reset_write_mds(pgio);
2481 }
2482 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
2483
2484 void
2485 pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
2486 {
2487         if (desc->pg_lseg) {
2488                 pnfs_put_lseg(desc->pg_lseg);
2489                 desc->pg_lseg = NULL;
2490         }
2491 }
2492 EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
2493
2494 /*
2495  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
2496  * of bytes (maximum @req->wb_bytes) that can be coalesced.
2497  */
2498 size_t
2499 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
2500                      struct nfs_page *prev, struct nfs_page *req)
2501 {
2502         unsigned int size;
2503         u64 seg_end, req_start, seg_left;
2504
2505         size = nfs_generic_pg_test(pgio, prev, req);
2506         if (!size)
2507                 return 0;
2508
2509         /*
2510          * 'size' contains the number of bytes left in the current page (up
2511          * to the original size asked for in @req->wb_bytes).
2512          *
2513          * Calculate how many bytes are left in the layout segment
2514          * and if there are less bytes than 'size', return that instead.
2515          *
2516          * Please also note that 'end_offset' is actually the offset of the
2517          * first byte that lies outside the pnfs_layout_range. FIXME?
2518          *
2519          */
2520         if (pgio->pg_lseg) {
2521                 seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
2522                                      pgio->pg_lseg->pls_range.length);
2523                 req_start = req_offset(req);
2524
2525                 /* start of request is past the last byte of this segment */
2526                 if (req_start >= seg_end)
2527                         return 0;
2528
2529                 /* adjust 'size' iff there are fewer bytes left in the
2530                  * segment than what nfs_generic_pg_test returned */
2531                 seg_left = seg_end - req_start;
2532                 if (seg_left < size)
2533                         size = (unsigned int)seg_left;
2534         }
2535
2536         return size;
2537 }
2538 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
2539
2540 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
2541 {
2542         struct nfs_pageio_descriptor pgio;
2543
2544         /* Resend all requests through the MDS */
2545         nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
2546                               hdr->completion_ops);
2547         set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
2548         return nfs_pageio_resend(&pgio, hdr);
2549 }
2550 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
2551
2552 static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
2553 {
2554
2555         dprintk("pnfs write error = %d\n", hdr->pnfs_error);
2556         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2557             PNFS_LAYOUTRET_ON_ERROR) {
2558                 pnfs_return_layout(hdr->inode);
2559         }
2560         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2561                 hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
2562 }
2563
2564 /*
2565  * Called by non rpc-based layout drivers
2566  */
2567 void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
2568 {
2569         if (likely(!hdr->pnfs_error)) {
2570                 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
2571                                 hdr->mds_offset + hdr->res.count);
2572                 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2573         }
2574         trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
2575         if (unlikely(hdr->pnfs_error))
2576                 pnfs_ld_handle_write_error(hdr);
2577         hdr->mds_ops->rpc_release(hdr);
2578 }
2579 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
2580
2581 static void
2582 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
2583                 struct nfs_pgio_header *hdr)
2584 {
2585         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2586
2587         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2588                 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2589                 nfs_pageio_reset_write_mds(desc);
2590                 mirror->pg_recoalesce = 1;
2591         }
2592         hdr->completion_ops->completion(hdr);
2593 }
2594
2595 static enum pnfs_try_status
2596 pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
2597                         const struct rpc_call_ops *call_ops,
2598                         struct pnfs_layout_segment *lseg,
2599                         int how)
2600 {
2601         struct inode *inode = hdr->inode;
2602         enum pnfs_try_status trypnfs;
2603         struct nfs_server *nfss = NFS_SERVER(inode);
2604
2605         hdr->mds_ops = call_ops;
2606
2607         dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
2608                 inode->i_ino, hdr->args.count, hdr->args.offset, how);
2609         trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
2610         if (trypnfs != PNFS_NOT_ATTEMPTED)
2611                 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
2612         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2613         return trypnfs;
2614 }
2615
2616 static void
2617 pnfs_do_write(struct nfs_pageio_descriptor *desc,
2618               struct nfs_pgio_header *hdr, int how)
2619 {
2620         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2621         struct pnfs_layout_segment *lseg = desc->pg_lseg;
2622         enum pnfs_try_status trypnfs;
2623
2624         trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
2625         switch (trypnfs) {
2626         case PNFS_NOT_ATTEMPTED:
2627                 pnfs_write_through_mds(desc, hdr);
2628         case PNFS_ATTEMPTED:
2629                 break;
2630         case PNFS_TRY_AGAIN:
2631                 /* cleanup hdr and prepare to redo pnfs */
2632                 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2633                         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2634                         list_splice_init(&hdr->pages, &mirror->pg_list);
2635                         mirror->pg_recoalesce = 1;
2636                 }
2637                 hdr->mds_ops->rpc_release(hdr);
2638         }
2639 }
2640
2641 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
2642 {
2643         pnfs_put_lseg(hdr->lseg);
2644         nfs_pgio_header_free(hdr);
2645 }
2646
2647 int
2648 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
2649 {
2650         struct nfs_pgio_header *hdr;
2651         int ret;
2652
2653         hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2654         if (!hdr) {
2655                 desc->pg_error = -ENOMEM;
2656                 return desc->pg_error;
2657         }
2658         nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
2659
2660         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2661         ret = nfs_generic_pgio(desc, hdr);
2662         if (!ret)
2663                 pnfs_do_write(desc, hdr, desc->pg_ioflags);
2664
2665         return ret;
2666 }
2667 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
2668
2669 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
2670 {
2671         struct nfs_pageio_descriptor pgio;
2672
2673         /* Resend all requests through the MDS */
2674         nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
2675         return nfs_pageio_resend(&pgio, hdr);
2676 }
2677 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
2678
2679 static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
2680 {
2681         dprintk("pnfs read error = %d\n", hdr->pnfs_error);
2682         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2683             PNFS_LAYOUTRET_ON_ERROR) {
2684                 pnfs_return_layout(hdr->inode);
2685         }
2686         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2687                 hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
2688 }
2689
2690 /*
2691  * Called by non rpc-based layout drivers
2692  */
2693 void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
2694 {
2695         if (likely(!hdr->pnfs_error))
2696                 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2697         trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
2698         if (unlikely(hdr->pnfs_error))
2699                 pnfs_ld_handle_read_error(hdr);
2700         hdr->mds_ops->rpc_release(hdr);
2701 }
2702 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
2703
2704 static void
2705 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2706                 struct nfs_pgio_header *hdr)
2707 {
2708         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2709
2710         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2711                 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2712                 nfs_pageio_reset_read_mds(desc);
2713                 mirror->pg_recoalesce = 1;
2714         }
2715         hdr->completion_ops->completion(hdr);
2716 }
2717
2718 /*
2719  * Call the appropriate parallel I/O subsystem read function.
2720  */
2721 static enum pnfs_try_status
2722 pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2723                        const struct rpc_call_ops *call_ops,
2724                        struct pnfs_layout_segment *lseg)
2725 {
2726         struct inode *inode = hdr->inode;
2727         struct nfs_server *nfss = NFS_SERVER(inode);
2728         enum pnfs_try_status trypnfs;
2729
2730         hdr->mds_ops = call_ops;
2731
2732         dprintk("%s: Reading ino:%lu %u@%llu\n",
2733                 __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
2734
2735         trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2736         if (trypnfs != PNFS_NOT_ATTEMPTED)
2737                 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
2738         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2739         return trypnfs;
2740 }
2741
2742 /* Resend all requests through pnfs. */
2743 void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
2744 {
2745         struct nfs_pageio_descriptor pgio;
2746
2747         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2748                 /* Prevent deadlocks with layoutreturn! */
2749                 pnfs_put_lseg(hdr->lseg);
2750                 hdr->lseg = NULL;
2751
2752                 nfs_pageio_init_read(&pgio, hdr->inode, false,
2753                                         hdr->completion_ops);
2754                 hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
2755         }
2756 }
2757 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
2758
2759 static void
2760 pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
2761 {
2762         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2763         struct pnfs_layout_segment *lseg = desc->pg_lseg;
2764         enum pnfs_try_status trypnfs;
2765
2766         trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
2767         switch (trypnfs) {
2768         case PNFS_NOT_ATTEMPTED:
2769                 pnfs_read_through_mds(desc, hdr);
2770         case PNFS_ATTEMPTED:
2771                 break;
2772         case PNFS_TRY_AGAIN:
2773                 /* cleanup hdr and prepare to redo pnfs */
2774                 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2775                         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2776                         list_splice_init(&hdr->pages, &mirror->pg_list);
2777                         mirror->pg_recoalesce = 1;
2778                 }
2779                 hdr->mds_ops->rpc_release(hdr);
2780         }
2781 }
2782
2783 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
2784 {
2785         pnfs_put_lseg(hdr->lseg);
2786         nfs_pgio_header_free(hdr);
2787 }
2788
2789 int
2790 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
2791 {
2792         struct nfs_pgio_header *hdr;
2793         int ret;
2794
2795         hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2796         if (!hdr) {
2797                 desc->pg_error = -ENOMEM;
2798                 return desc->pg_error;
2799         }
2800         nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
2801         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2802         ret = nfs_generic_pgio(desc, hdr);
2803         if (!ret)
2804                 pnfs_do_read(desc, hdr);
2805         return ret;
2806 }
2807 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
2808
2809 static void pnfs_clear_layoutcommitting(struct inode *inode)
2810 {
2811         unsigned long *bitlock = &NFS_I(inode)->flags;
2812
2813         clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
2814         smp_mb__after_atomic();
2815         wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
2816 }
2817
2818 /*
2819  * There can be multiple RW segments.
2820  */
2821 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
2822 {
2823         struct pnfs_layout_segment *lseg;
2824
2825         list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
2826                 if (lseg->pls_range.iomode == IOMODE_RW &&
2827                     test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
2828                         list_add(&lseg->pls_lc_list, listp);
2829         }
2830 }
2831
2832 static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
2833 {
2834         struct pnfs_layout_segment *lseg, *tmp;
2835
2836         /* Matched by references in pnfs_set_layoutcommit */
2837         list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
2838                 list_del_init(&lseg->pls_lc_list);
2839                 pnfs_put_lseg(lseg);
2840         }
2841
2842         pnfs_clear_layoutcommitting(inode);
2843 }
2844
2845 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
2846 {
2847         pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
2848 }
2849 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
2850
2851 void
2852 pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
2853                 loff_t end_pos)
2854 {
2855         struct nfs_inode *nfsi = NFS_I(inode);
2856         bool mark_as_dirty = false;
2857
2858         spin_lock(&inode->i_lock);
2859         if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
2860                 nfsi->layout->plh_lwb = end_pos;
2861                 mark_as_dirty = true;
2862                 dprintk("%s: Set layoutcommit for inode %lu ",
2863                         __func__, inode->i_ino);
2864         } else if (end_pos > nfsi->layout->plh_lwb)
2865                 nfsi->layout->plh_lwb = end_pos;
2866         if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
2867                 /* references matched in nfs4_layoutcommit_release */
2868                 pnfs_get_lseg(lseg);
2869         }
2870         spin_unlock(&inode->i_lock);
2871         dprintk("%s: lseg %p end_pos %llu\n",
2872                 __func__, lseg, nfsi->layout->plh_lwb);
2873
2874         /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
2875          * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
2876         if (mark_as_dirty)
2877                 mark_inode_dirty_sync(inode);
2878 }
2879 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
2880
2881 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
2882 {
2883         struct nfs_server *nfss = NFS_SERVER(data->args.inode);
2884
2885         if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
2886                 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
2887         pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
2888 }
2889
2890 /*
2891  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
2892  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
2893  * data to disk to allow the server to recover the data if it crashes.
2894  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
2895  * is off, and a COMMIT is sent to a data server, or
2896  * if WRITEs to a data server return NFS_DATA_SYNC.
2897  */
2898 int
2899 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2900 {
2901         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2902         struct nfs4_layoutcommit_data *data;
2903         struct nfs_inode *nfsi = NFS_I(inode);
2904         loff_t end_pos;
2905         int status;
2906
2907         if (!pnfs_layoutcommit_outstanding(inode))
2908                 return 0;
2909
2910         dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
2911
2912         status = -EAGAIN;
2913         if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
2914                 if (!sync)
2915                         goto out;
2916                 status = wait_on_bit_lock_action(&nfsi->flags,
2917                                 NFS_INO_LAYOUTCOMMITTING,
2918                                 nfs_wait_bit_killable,
2919                                 TASK_KILLABLE);
2920                 if (status)
2921                         goto out;
2922         }
2923
2924         status = -ENOMEM;
2925         /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
2926         data = kzalloc(sizeof(*data), GFP_NOFS);
2927         if (!data)
2928                 goto clear_layoutcommitting;
2929
2930         status = 0;
2931         spin_lock(&inode->i_lock);
2932         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
2933                 goto out_unlock;
2934
2935         INIT_LIST_HEAD(&data->lseg_list);
2936         pnfs_list_write_lseg(inode, &data->lseg_list);
2937
2938         end_pos = nfsi->layout->plh_lwb;
2939
2940         nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
2941         spin_unlock(&inode->i_lock);
2942
2943         data->args.inode = inode;
2944         data->cred = get_cred(nfsi->layout->plh_lc_cred);
2945         nfs_fattr_init(&data->fattr);
2946         data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
2947         data->res.fattr = &data->fattr;
2948         if (end_pos != 0)
2949                 data->args.lastbytewritten = end_pos - 1;
2950         else
2951                 data->args.lastbytewritten = U64_MAX;
2952         data->res.server = NFS_SERVER(inode);
2953
2954         if (ld->prepare_layoutcommit) {
2955                 status = ld->prepare_layoutcommit(&data->args);
2956                 if (status) {
2957                         put_cred(data->cred);
2958                         spin_lock(&inode->i_lock);
2959                         set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2960                         if (end_pos > nfsi->layout->plh_lwb)
2961                                 nfsi->layout->plh_lwb = end_pos;
2962                         goto out_unlock;
2963                 }
2964         }
2965
2966
2967         status = nfs4_proc_layoutcommit(data, sync);
2968 out:
2969         if (status)
2970                 mark_inode_dirty_sync(inode);
2971         dprintk("<-- %s status %d\n", __func__, status);
2972         return status;
2973 out_unlock:
2974         spin_unlock(&inode->i_lock);
2975         kfree(data);
2976 clear_layoutcommitting:
2977         pnfs_clear_layoutcommitting(inode);
2978         goto out;
2979 }
2980 EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
2981
2982 int
2983 pnfs_generic_sync(struct inode *inode, bool datasync)
2984 {
2985         return pnfs_layoutcommit_inode(inode, true);
2986 }
2987 EXPORT_SYMBOL_GPL(pnfs_generic_sync);
2988
2989 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
2990 {
2991         struct nfs4_threshold *thp;
2992
2993         thp = kzalloc(sizeof(*thp), GFP_NOFS);
2994         if (!thp) {
2995                 dprintk("%s mdsthreshold allocation failed\n", __func__);
2996                 return NULL;
2997         }
2998         return thp;
2999 }
3000
3001 #if IS_ENABLED(CONFIG_NFS_V4_2)
3002 int
3003 pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
3004 {
3005         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
3006         struct nfs_server *server = NFS_SERVER(inode);
3007         struct nfs_inode *nfsi = NFS_I(inode);
3008         struct nfs42_layoutstat_data *data;
3009         struct pnfs_layout_hdr *hdr;
3010         int status = 0;
3011
3012         if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
3013                 goto out;
3014
3015         if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
3016                 goto out;
3017
3018         if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
3019                 goto out;
3020
3021         spin_lock(&inode->i_lock);
3022         if (!NFS_I(inode)->layout) {
3023                 spin_unlock(&inode->i_lock);
3024                 goto out_clear_layoutstats;
3025         }
3026         hdr = NFS_I(inode)->layout;
3027         pnfs_get_layout_hdr(hdr);
3028         spin_unlock(&inode->i_lock);
3029
3030         data = kzalloc(sizeof(*data), gfp_flags);
3031         if (!data) {
3032                 status = -ENOMEM;
3033                 goto out_put;
3034         }
3035
3036         data->args.fh = NFS_FH(inode);
3037         data->args.inode = inode;
3038         status = ld->prepare_layoutstats(&data->args);
3039         if (status)
3040                 goto out_free;
3041
3042         status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
3043
3044 out:
3045         dprintk("%s returns %d\n", __func__, status);
3046         return status;
3047
3048 out_free:
3049         kfree(data);
3050 out_put:
3051         pnfs_put_layout_hdr(hdr);
3052 out_clear_layoutstats:
3053         smp_mb__before_atomic();
3054         clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
3055         smp_mb__after_atomic();
3056         goto out;
3057 }
3058 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
3059 #endif
3060
3061 unsigned int layoutstats_timer;
3062 module_param(layoutstats_timer, uint, 0644);
3063 EXPORT_SYMBOL_GPL(layoutstats_timer);