]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/cifs/transport.c
Merge tag 'armsoc-defconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         temp->callback = cifs_wake_up_task;
80         temp->callback_data = current;
81
82         atomic_inc(&midCount);
83         temp->mid_state = MID_REQUEST_ALLOCATED;
84         return temp;
85 }
86
87 static void _cifs_mid_q_entry_release(struct kref *refcount)
88 {
89         struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90                                                refcount);
91
92         mempool_free(mid, cifs_mid_poolp);
93 }
94
95 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96 {
97         spin_lock(&GlobalMid_Lock);
98         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99         spin_unlock(&GlobalMid_Lock);
100 }
101
102 void
103 DeleteMidQEntry(struct mid_q_entry *midEntry)
104 {
105 #ifdef CONFIG_CIFS_STATS2
106         __le16 command = midEntry->server->vals->lock_cmd;
107         __u16 smb_cmd = le16_to_cpu(midEntry->command);
108         unsigned long now;
109         unsigned long roundtrip_time;
110         struct TCP_Server_Info *server = midEntry->server;
111 #endif
112         midEntry->mid_state = MID_FREE;
113         atomic_dec(&midCount);
114         if (midEntry->large_buf)
115                 cifs_buf_release(midEntry->resp_buf);
116         else
117                 cifs_small_buf_release(midEntry->resp_buf);
118 #ifdef CONFIG_CIFS_STATS2
119         now = jiffies;
120         if (now < midEntry->when_alloc)
121                 cifs_dbg(VFS, "invalid mid allocation time\n");
122         roundtrip_time = now - midEntry->when_alloc;
123
124         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
125                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
126                         server->slowest_cmd[smb_cmd] = roundtrip_time;
127                         server->fastest_cmd[smb_cmd] = roundtrip_time;
128                 } else {
129                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
130                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
131                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
132                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
133                 }
134                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
135                 server->time_per_cmd[smb_cmd] += roundtrip_time;
136         }
137         /*
138          * commands taking longer than one second (default) can be indications
139          * that something is wrong, unless it is quite a slow link or a very
140          * busy server. Note that this calc is unlikely or impossible to wrap
141          * as long as slow_rsp_threshold is not set way above recommended max
142          * value (32767 ie 9 hours) and is generally harmless even if wrong
143          * since only affects debug counters - so leaving the calc as simple
144          * comparison rather than doing multiple conversions and overflow
145          * checks
146          */
147         if ((slow_rsp_threshold != 0) &&
148             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
149             (midEntry->command != command)) {
150                 /*
151                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
152                  * NB: le16_to_cpu returns unsigned so can not be negative below
153                  */
154                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
155                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
156
157                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
158                                midEntry->when_sent, midEntry->when_received);
159                 if (cifsFYI & CIFS_TIMER) {
160                         pr_debug(" CIFS slow rsp: cmd %d mid %llu",
161                                midEntry->command, midEntry->mid);
162                         cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
163                                now - midEntry->when_alloc,
164                                now - midEntry->when_sent,
165                                now - midEntry->when_received);
166                 }
167         }
168 #endif
169         cifs_mid_q_entry_release(midEntry);
170 }
171
172 void
173 cifs_delete_mid(struct mid_q_entry *mid)
174 {
175         spin_lock(&GlobalMid_Lock);
176         list_del_init(&mid->qhead);
177         mid->mid_flags |= MID_DELETED;
178         spin_unlock(&GlobalMid_Lock);
179
180         DeleteMidQEntry(mid);
181 }
182
183 /*
184  * smb_send_kvec - send an array of kvecs to the server
185  * @server:     Server to send the data to
186  * @smb_msg:    Message to send
187  * @sent:       amount of data sent on socket is stored here
188  *
189  * Our basic "send data to server" function. Should be called with srv_mutex
190  * held. The caller is responsible for handling the results.
191  */
192 static int
193 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
194               size_t *sent)
195 {
196         int rc = 0;
197         int retries = 0;
198         struct socket *ssocket = server->ssocket;
199
200         *sent = 0;
201
202         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
203         smb_msg->msg_namelen = sizeof(struct sockaddr);
204         smb_msg->msg_control = NULL;
205         smb_msg->msg_controllen = 0;
206         if (server->noblocksnd)
207                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
208         else
209                 smb_msg->msg_flags = MSG_NOSIGNAL;
210
211         while (msg_data_left(smb_msg)) {
212                 /*
213                  * If blocking send, we try 3 times, since each can block
214                  * for 5 seconds. For nonblocking  we have to try more
215                  * but wait increasing amounts of time allowing time for
216                  * socket to clear.  The overall time we wait in either
217                  * case to send on the socket is about 15 seconds.
218                  * Similarly we wait for 15 seconds for a response from
219                  * the server in SendReceive[2] for the server to send
220                  * a response back for most types of requests (except
221                  * SMB Write past end of file which can be slow, and
222                  * blocking lock operations). NFS waits slightly longer
223                  * than CIFS, but this can make it take longer for
224                  * nonresponsive servers to be detected and 15 seconds
225                  * is more than enough time for modern networks to
226                  * send a packet.  In most cases if we fail to send
227                  * after the retries we will kill the socket and
228                  * reconnect which may clear the network problem.
229                  */
230                 rc = sock_sendmsg(ssocket, smb_msg);
231                 if (rc == -EAGAIN) {
232                         retries++;
233                         if (retries >= 14 ||
234                             (!server->noblocksnd && (retries > 2))) {
235                                 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
236                                          ssocket);
237                                 return -EAGAIN;
238                         }
239                         msleep(1 << retries);
240                         continue;
241                 }
242
243                 if (rc < 0)
244                         return rc;
245
246                 if (rc == 0) {
247                         /* should never happen, letting socket clear before
248                            retrying is our only obvious option here */
249                         cifs_dbg(VFS, "tcp sent no data\n");
250                         msleep(500);
251                         continue;
252                 }
253
254                 /* send was at least partially successful */
255                 *sent += rc;
256                 retries = 0; /* in case we get ENOSPC on the next send */
257         }
258         return 0;
259 }
260
261 unsigned long
262 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
263 {
264         unsigned int i;
265         struct kvec *iov;
266         int nvec;
267         unsigned long buflen = 0;
268
269         if (server->vals->header_preamble_size == 0 &&
270             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
271                 iov = &rqst->rq_iov[1];
272                 nvec = rqst->rq_nvec - 1;
273         } else {
274                 iov = rqst->rq_iov;
275                 nvec = rqst->rq_nvec;
276         }
277
278         /* total up iov array first */
279         for (i = 0; i < nvec; i++)
280                 buflen += iov[i].iov_len;
281
282         /*
283          * Add in the page array if there is one. The caller needs to make
284          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
285          * multiple pages ends at page boundary, rq_tailsz needs to be set to
286          * PAGE_SIZE.
287          */
288         if (rqst->rq_npages) {
289                 if (rqst->rq_npages == 1)
290                         buflen += rqst->rq_tailsz;
291                 else {
292                         /*
293                          * If there is more than one page, calculate the
294                          * buffer length based on rq_offset and rq_tailsz
295                          */
296                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
297                                         rqst->rq_offset;
298                         buflen += rqst->rq_tailsz;
299                 }
300         }
301
302         return buflen;
303 }
304
305 static int
306 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
307                 struct smb_rqst *rqst)
308 {
309         int rc = 0;
310         struct kvec *iov;
311         int n_vec;
312         unsigned int send_length = 0;
313         unsigned int i, j;
314         sigset_t mask, oldmask;
315         size_t total_len = 0, sent, size;
316         struct socket *ssocket = server->ssocket;
317         struct msghdr smb_msg;
318         int val = 1;
319         __be32 rfc1002_marker;
320
321         if (cifs_rdma_enabled(server) && server->smbd_conn) {
322                 rc = smbd_send(server, num_rqst, rqst);
323                 goto smbd_done;
324         }
325
326         if (ssocket == NULL)
327                 return -EAGAIN;
328
329         if (signal_pending(current)) {
330                 cifs_dbg(FYI, "signal is pending before sending any data\n");
331                 return -EINTR;
332         }
333
334         /* cork the socket */
335         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
336                                 (char *)&val, sizeof(val));
337
338         for (j = 0; j < num_rqst; j++)
339                 send_length += smb_rqst_len(server, &rqst[j]);
340         rfc1002_marker = cpu_to_be32(send_length);
341
342         /*
343          * We should not allow signals to interrupt the network send because
344          * any partial send will cause session reconnects thus increasing
345          * latency of system calls and overload a server with unnecessary
346          * requests.
347          */
348
349         sigfillset(&mask);
350         sigprocmask(SIG_BLOCK, &mask, &oldmask);
351
352         /* Generate a rfc1002 marker for SMB2+ */
353         if (server->vals->header_preamble_size == 0) {
354                 struct kvec hiov = {
355                         .iov_base = &rfc1002_marker,
356                         .iov_len  = 4
357                 };
358                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
359                 rc = smb_send_kvec(server, &smb_msg, &sent);
360                 if (rc < 0)
361                         goto unmask;
362
363                 total_len += sent;
364                 send_length += 4;
365         }
366
367         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
368
369         for (j = 0; j < num_rqst; j++) {
370                 iov = rqst[j].rq_iov;
371                 n_vec = rqst[j].rq_nvec;
372
373                 size = 0;
374                 for (i = 0; i < n_vec; i++) {
375                         dump_smb(iov[i].iov_base, iov[i].iov_len);
376                         size += iov[i].iov_len;
377                 }
378
379                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
380
381                 rc = smb_send_kvec(server, &smb_msg, &sent);
382                 if (rc < 0)
383                         goto unmask;
384
385                 total_len += sent;
386
387                 /* now walk the page array and send each page in it */
388                 for (i = 0; i < rqst[j].rq_npages; i++) {
389                         struct bio_vec bvec;
390
391                         bvec.bv_page = rqst[j].rq_pages[i];
392                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
393                                              &bvec.bv_offset);
394
395                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
396                                       &bvec, 1, bvec.bv_len);
397                         rc = smb_send_kvec(server, &smb_msg, &sent);
398                         if (rc < 0)
399                                 break;
400
401                         total_len += sent;
402                 }
403         }
404
405 unmask:
406         sigprocmask(SIG_SETMASK, &oldmask, NULL);
407
408         /*
409          * If signal is pending but we have already sent the whole packet to
410          * the server we need to return success status to allow a corresponding
411          * mid entry to be kept in the pending requests queue thus allowing
412          * to handle responses from the server by the client.
413          *
414          * If only part of the packet has been sent there is no need to hide
415          * interrupt because the session will be reconnected anyway, so there
416          * won't be any response from the server to handle.
417          */
418
419         if (signal_pending(current) && (total_len != send_length)) {
420                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
421                 rc = -EINTR;
422         }
423
424         /* uncork it */
425         val = 0;
426         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
427                                 (char *)&val, sizeof(val));
428
429         if ((total_len > 0) && (total_len != send_length)) {
430                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
431                          send_length, total_len);
432                 /*
433                  * If we have only sent part of an SMB then the next SMB could
434                  * be taken as the remainder of this one. We need to kill the
435                  * socket so the server throws away the partial SMB
436                  */
437                 server->tcpStatus = CifsNeedReconnect;
438                 trace_smb3_partial_send_reconnect(server->CurrentMid,
439                                                   server->hostname);
440         }
441 smbd_done:
442         if (rc < 0 && rc != -EINTR)
443                 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
444                          rc);
445         else if (rc > 0)
446                 rc = 0;
447
448         return rc;
449 }
450
451 static int
452 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
453               struct smb_rqst *rqst, int flags)
454 {
455         struct kvec iov;
456         struct smb2_transform_hdr tr_hdr;
457         struct smb_rqst cur_rqst[MAX_COMPOUND];
458         int rc;
459
460         if (!(flags & CIFS_TRANSFORM_REQ))
461                 return __smb_send_rqst(server, num_rqst, rqst);
462
463         if (num_rqst > MAX_COMPOUND - 1)
464                 return -ENOMEM;
465
466         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
467         memset(&iov, 0, sizeof(iov));
468         memset(&tr_hdr, 0, sizeof(tr_hdr));
469
470         iov.iov_base = &tr_hdr;
471         iov.iov_len = sizeof(tr_hdr);
472         cur_rqst[0].rq_iov = &iov;
473         cur_rqst[0].rq_nvec = 1;
474
475         if (!server->ops->init_transform_rq) {
476                 cifs_dbg(VFS, "Encryption requested but transform callback "
477                          "is missing\n");
478                 return -EIO;
479         }
480
481         rc = server->ops->init_transform_rq(server, num_rqst + 1,
482                                             &cur_rqst[0], rqst);
483         if (rc)
484                 return rc;
485
486         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
487         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
488         return rc;
489 }
490
491 int
492 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
493          unsigned int smb_buf_length)
494 {
495         struct kvec iov[2];
496         struct smb_rqst rqst = { .rq_iov = iov,
497                                  .rq_nvec = 2 };
498
499         iov[0].iov_base = smb_buffer;
500         iov[0].iov_len = 4;
501         iov[1].iov_base = (char *)smb_buffer + 4;
502         iov[1].iov_len = smb_buf_length;
503
504         return __smb_send_rqst(server, 1, &rqst);
505 }
506
507 static int
508 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
509                       const int timeout, const int flags,
510                       unsigned int *instance)
511 {
512         int rc;
513         int *credits;
514         int optype;
515         long int t;
516
517         if (timeout < 0)
518                 t = MAX_JIFFY_OFFSET;
519         else
520                 t = msecs_to_jiffies(timeout);
521
522         optype = flags & CIFS_OP_MASK;
523
524         *instance = 0;
525
526         credits = server->ops->get_credits_field(server, optype);
527         /* Since an echo is already inflight, no need to wait to send another */
528         if (*credits <= 0 && optype == CIFS_ECHO_OP)
529                 return -EAGAIN;
530
531         spin_lock(&server->req_lock);
532         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
533                 /* oplock breaks must not be held up */
534                 server->in_flight++;
535                 *credits -= 1;
536                 *instance = server->reconnect_instance;
537                 spin_unlock(&server->req_lock);
538                 return 0;
539         }
540
541         while (1) {
542                 if (*credits < num_credits) {
543                         spin_unlock(&server->req_lock);
544                         cifs_num_waiters_inc(server);
545                         rc = wait_event_killable_timeout(server->request_q,
546                                 has_credits(server, credits, num_credits), t);
547                         cifs_num_waiters_dec(server);
548                         if (!rc) {
549                                 trace_smb3_credit_timeout(server->CurrentMid,
550                                         server->hostname, num_credits);
551                                 cifs_dbg(VFS, "wait timed out after %d ms\n",
552                                          timeout);
553                                 return -ENOTSUPP;
554                         }
555                         if (rc == -ERESTARTSYS)
556                                 return -ERESTARTSYS;
557                         spin_lock(&server->req_lock);
558                 } else {
559                         if (server->tcpStatus == CifsExiting) {
560                                 spin_unlock(&server->req_lock);
561                                 return -ENOENT;
562                         }
563
564                         /*
565                          * For normal commands, reserve the last MAX_COMPOUND
566                          * credits to compound requests.
567                          * Otherwise these compounds could be permanently
568                          * starved for credits by single-credit requests.
569                          *
570                          * To prevent spinning CPU, block this thread until
571                          * there are >MAX_COMPOUND credits available.
572                          * But only do this is we already have a lot of
573                          * credits in flight to avoid triggering this check
574                          * for servers that are slow to hand out credits on
575                          * new sessions.
576                          */
577                         if (!optype && num_credits == 1 &&
578                             server->in_flight > 2 * MAX_COMPOUND &&
579                             *credits <= MAX_COMPOUND) {
580                                 spin_unlock(&server->req_lock);
581                                 cifs_num_waiters_inc(server);
582                                 rc = wait_event_killable_timeout(
583                                         server->request_q,
584                                         has_credits(server, credits,
585                                                     MAX_COMPOUND + 1),
586                                         t);
587                                 cifs_num_waiters_dec(server);
588                                 if (!rc) {
589                                         trace_smb3_credit_timeout(
590                                                 server->CurrentMid,
591                                                 server->hostname, num_credits);
592                                         cifs_dbg(VFS, "wait timed out after %d ms\n",
593                                                  timeout);
594                                         return -ENOTSUPP;
595                                 }
596                                 if (rc == -ERESTARTSYS)
597                                         return -ERESTARTSYS;
598                                 spin_lock(&server->req_lock);
599                                 continue;
600                         }
601
602                         /*
603                          * Can not count locking commands against total
604                          * as they are allowed to block on server.
605                          */
606
607                         /* update # of requests on the wire to server */
608                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
609                                 *credits -= num_credits;
610                                 server->in_flight += num_credits;
611                                 *instance = server->reconnect_instance;
612                         }
613                         spin_unlock(&server->req_lock);
614                         break;
615                 }
616         }
617         return 0;
618 }
619
620 static int
621 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
622                       unsigned int *instance)
623 {
624         return wait_for_free_credits(server, 1, -1, flags,
625                                      instance);
626 }
627
628 static int
629 wait_for_compound_request(struct TCP_Server_Info *server, int num,
630                           const int flags, unsigned int *instance)
631 {
632         int *credits;
633
634         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
635
636         spin_lock(&server->req_lock);
637         if (*credits < num) {
638                 /*
639                  * Return immediately if not too many requests in flight since
640                  * we will likely be stuck on waiting for credits.
641                  */
642                 if (server->in_flight < num - *credits) {
643                         spin_unlock(&server->req_lock);
644                         return -ENOTSUPP;
645                 }
646         }
647         spin_unlock(&server->req_lock);
648
649         return wait_for_free_credits(server, num, 60000, flags,
650                                      instance);
651 }
652
653 int
654 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
655                       unsigned int *num, struct cifs_credits *credits)
656 {
657         *num = size;
658         credits->value = 0;
659         credits->instance = server->reconnect_instance;
660         return 0;
661 }
662
663 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
664                         struct mid_q_entry **ppmidQ)
665 {
666         if (ses->server->tcpStatus == CifsExiting) {
667                 return -ENOENT;
668         }
669
670         if (ses->server->tcpStatus == CifsNeedReconnect) {
671                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
672                 return -EAGAIN;
673         }
674
675         if (ses->status == CifsNew) {
676                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
677                         (in_buf->Command != SMB_COM_NEGOTIATE))
678                         return -EAGAIN;
679                 /* else ok - we are setting up session */
680         }
681
682         if (ses->status == CifsExiting) {
683                 /* check if SMB session is bad because we are setting it up */
684                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
685                         return -EAGAIN;
686                 /* else ok - we are shutting down session */
687         }
688
689         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
690         if (*ppmidQ == NULL)
691                 return -ENOMEM;
692         spin_lock(&GlobalMid_Lock);
693         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
694         spin_unlock(&GlobalMid_Lock);
695         return 0;
696 }
697
698 static int
699 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
700 {
701         int error;
702
703         error = wait_event_freezekillable_unsafe(server->response_q,
704                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
705         if (error < 0)
706                 return -ERESTARTSYS;
707
708         return 0;
709 }
710
711 struct mid_q_entry *
712 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
713 {
714         int rc;
715         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
716         struct mid_q_entry *mid;
717
718         if (rqst->rq_iov[0].iov_len != 4 ||
719             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
720                 return ERR_PTR(-EIO);
721
722         /* enable signing if server requires it */
723         if (server->sign)
724                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
725
726         mid = AllocMidQEntry(hdr, server);
727         if (mid == NULL)
728                 return ERR_PTR(-ENOMEM);
729
730         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
731         if (rc) {
732                 DeleteMidQEntry(mid);
733                 return ERR_PTR(rc);
734         }
735
736         return mid;
737 }
738
739 /*
740  * Send a SMB request and set the callback function in the mid to handle
741  * the result. Caller is responsible for dealing with timeouts.
742  */
743 int
744 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
745                 mid_receive_t *receive, mid_callback_t *callback,
746                 mid_handle_t *handle, void *cbdata, const int flags,
747                 const struct cifs_credits *exist_credits)
748 {
749         int rc;
750         struct mid_q_entry *mid;
751         struct cifs_credits credits = { .value = 0, .instance = 0 };
752         unsigned int instance;
753         int optype;
754
755         optype = flags & CIFS_OP_MASK;
756
757         if ((flags & CIFS_HAS_CREDITS) == 0) {
758                 rc = wait_for_free_request(server, flags, &instance);
759                 if (rc)
760                         return rc;
761                 credits.value = 1;
762                 credits.instance = instance;
763         } else
764                 instance = exist_credits->instance;
765
766         mutex_lock(&server->srv_mutex);
767
768         /*
769          * We can't use credits obtained from the previous session to send this
770          * request. Check if there were reconnects after we obtained credits and
771          * return -EAGAIN in such cases to let callers handle it.
772          */
773         if (instance != server->reconnect_instance) {
774                 mutex_unlock(&server->srv_mutex);
775                 add_credits_and_wake_if(server, &credits, optype);
776                 return -EAGAIN;
777         }
778
779         mid = server->ops->setup_async_request(server, rqst);
780         if (IS_ERR(mid)) {
781                 mutex_unlock(&server->srv_mutex);
782                 add_credits_and_wake_if(server, &credits, optype);
783                 return PTR_ERR(mid);
784         }
785
786         mid->receive = receive;
787         mid->callback = callback;
788         mid->callback_data = cbdata;
789         mid->handle = handle;
790         mid->mid_state = MID_REQUEST_SUBMITTED;
791
792         /* put it on the pending_mid_q */
793         spin_lock(&GlobalMid_Lock);
794         list_add_tail(&mid->qhead, &server->pending_mid_q);
795         spin_unlock(&GlobalMid_Lock);
796
797         /*
798          * Need to store the time in mid before calling I/O. For call_async,
799          * I/O response may come back and free the mid entry on another thread.
800          */
801         cifs_save_when_sent(mid);
802         cifs_in_send_inc(server);
803         rc = smb_send_rqst(server, 1, rqst, flags);
804         cifs_in_send_dec(server);
805
806         if (rc < 0) {
807                 revert_current_mid(server, mid->credits);
808                 server->sequence_number -= 2;
809                 cifs_delete_mid(mid);
810         }
811
812         mutex_unlock(&server->srv_mutex);
813
814         if (rc == 0)
815                 return 0;
816
817         add_credits_and_wake_if(server, &credits, optype);
818         return rc;
819 }
820
821 /*
822  *
823  * Send an SMB Request.  No response info (other than return code)
824  * needs to be parsed.
825  *
826  * flags indicate the type of request buffer and how long to wait
827  * and whether to log NT STATUS code (error) before mapping it to POSIX error
828  *
829  */
830 int
831 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
832                  char *in_buf, int flags)
833 {
834         int rc;
835         struct kvec iov[1];
836         struct kvec rsp_iov;
837         int resp_buf_type;
838
839         iov[0].iov_base = in_buf;
840         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
841         flags |= CIFS_NO_RSP_BUF;
842         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
843         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
844
845         return rc;
846 }
847
848 static int
849 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
850 {
851         int rc = 0;
852
853         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
854                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
855
856         spin_lock(&GlobalMid_Lock);
857         switch (mid->mid_state) {
858         case MID_RESPONSE_RECEIVED:
859                 spin_unlock(&GlobalMid_Lock);
860                 return rc;
861         case MID_RETRY_NEEDED:
862                 rc = -EAGAIN;
863                 break;
864         case MID_RESPONSE_MALFORMED:
865                 rc = -EIO;
866                 break;
867         case MID_SHUTDOWN:
868                 rc = -EHOSTDOWN;
869                 break;
870         default:
871                 list_del_init(&mid->qhead);
872                 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
873                          __func__, mid->mid, mid->mid_state);
874                 rc = -EIO;
875         }
876         spin_unlock(&GlobalMid_Lock);
877
878         DeleteMidQEntry(mid);
879         return rc;
880 }
881
882 static inline int
883 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
884             struct mid_q_entry *mid)
885 {
886         return server->ops->send_cancel ?
887                                 server->ops->send_cancel(server, rqst, mid) : 0;
888 }
889
890 int
891 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
892                    bool log_error)
893 {
894         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
895
896         dump_smb(mid->resp_buf, min_t(u32, 92, len));
897
898         /* convert the length into a more usable form */
899         if (server->sign) {
900                 struct kvec iov[2];
901                 int rc = 0;
902                 struct smb_rqst rqst = { .rq_iov = iov,
903                                          .rq_nvec = 2 };
904
905                 iov[0].iov_base = mid->resp_buf;
906                 iov[0].iov_len = 4;
907                 iov[1].iov_base = (char *)mid->resp_buf + 4;
908                 iov[1].iov_len = len - 4;
909                 /* FIXME: add code to kill session */
910                 rc = cifs_verify_signature(&rqst, server,
911                                            mid->sequence_number);
912                 if (rc)
913                         cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
914                                  rc);
915         }
916
917         /* BB special case reconnect tid and uid here? */
918         return map_smb_to_linux_error(mid->resp_buf, log_error);
919 }
920
921 struct mid_q_entry *
922 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
923 {
924         int rc;
925         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
926         struct mid_q_entry *mid;
927
928         if (rqst->rq_iov[0].iov_len != 4 ||
929             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
930                 return ERR_PTR(-EIO);
931
932         rc = allocate_mid(ses, hdr, &mid);
933         if (rc)
934                 return ERR_PTR(rc);
935         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
936         if (rc) {
937                 cifs_delete_mid(mid);
938                 return ERR_PTR(rc);
939         }
940         return mid;
941 }
942
943 static void
944 cifs_compound_callback(struct mid_q_entry *mid)
945 {
946         struct TCP_Server_Info *server = mid->server;
947         struct cifs_credits credits;
948
949         credits.value = server->ops->get_credits(mid);
950         credits.instance = server->reconnect_instance;
951
952         add_credits(server, &credits, mid->optype);
953 }
954
955 static void
956 cifs_compound_last_callback(struct mid_q_entry *mid)
957 {
958         cifs_compound_callback(mid);
959         cifs_wake_up_task(mid);
960 }
961
962 static void
963 cifs_cancelled_callback(struct mid_q_entry *mid)
964 {
965         cifs_compound_callback(mid);
966         DeleteMidQEntry(mid);
967 }
968
969 int
970 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
971                    const int flags, const int num_rqst, struct smb_rqst *rqst,
972                    int *resp_buf_type, struct kvec *resp_iov)
973 {
974         int i, j, optype, rc = 0;
975         struct mid_q_entry *midQ[MAX_COMPOUND];
976         bool cancelled_mid[MAX_COMPOUND] = {false};
977         struct cifs_credits credits[MAX_COMPOUND] = {
978                 { .value = 0, .instance = 0 }
979         };
980         unsigned int instance;
981         char *buf;
982         struct TCP_Server_Info *server;
983
984         optype = flags & CIFS_OP_MASK;
985
986         for (i = 0; i < num_rqst; i++)
987                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
988
989         if ((ses == NULL) || (ses->server == NULL)) {
990                 cifs_dbg(VFS, "Null session\n");
991                 return -EIO;
992         }
993
994         server = ses->server;
995         if (server->tcpStatus == CifsExiting)
996                 return -ENOENT;
997
998         /*
999          * Wait for all the requests to become available.
1000          * This approach still leaves the possibility to be stuck waiting for
1001          * credits if the server doesn't grant credits to the outstanding
1002          * requests and if the client is completely idle, not generating any
1003          * other requests.
1004          * This can be handled by the eventual session reconnect.
1005          */
1006         rc = wait_for_compound_request(server, num_rqst, flags,
1007                                        &instance);
1008         if (rc)
1009                 return rc;
1010
1011         for (i = 0; i < num_rqst; i++) {
1012                 credits[i].value = 1;
1013                 credits[i].instance = instance;
1014         }
1015
1016         /*
1017          * Make sure that we sign in the same order that we send on this socket
1018          * and avoid races inside tcp sendmsg code that could cause corruption
1019          * of smb data.
1020          */
1021
1022         mutex_lock(&server->srv_mutex);
1023
1024         /*
1025          * All the parts of the compound chain belong obtained credits from the
1026          * same session. We can not use credits obtained from the previous
1027          * session to send this request. Check if there were reconnects after
1028          * we obtained credits and return -EAGAIN in such cases to let callers
1029          * handle it.
1030          */
1031         if (instance != server->reconnect_instance) {
1032                 mutex_unlock(&server->srv_mutex);
1033                 for (j = 0; j < num_rqst; j++)
1034                         add_credits(server, &credits[j], optype);
1035                 return -EAGAIN;
1036         }
1037
1038         for (i = 0; i < num_rqst; i++) {
1039                 midQ[i] = server->ops->setup_request(ses, &rqst[i]);
1040                 if (IS_ERR(midQ[i])) {
1041                         revert_current_mid(server, i);
1042                         for (j = 0; j < i; j++)
1043                                 cifs_delete_mid(midQ[j]);
1044                         mutex_unlock(&server->srv_mutex);
1045
1046                         /* Update # of requests on wire to server */
1047                         for (j = 0; j < num_rqst; j++)
1048                                 add_credits(server, &credits[j], optype);
1049                         return PTR_ERR(midQ[i]);
1050                 }
1051
1052                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1053                 midQ[i]->optype = optype;
1054                 /*
1055                  * Invoke callback for every part of the compound chain
1056                  * to calculate credits properly. Wake up this thread only when
1057                  * the last element is received.
1058                  */
1059                 if (i < num_rqst - 1)
1060                         midQ[i]->callback = cifs_compound_callback;
1061                 else
1062                         midQ[i]->callback = cifs_compound_last_callback;
1063         }
1064         cifs_in_send_inc(server);
1065         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1066         cifs_in_send_dec(server);
1067
1068         for (i = 0; i < num_rqst; i++)
1069                 cifs_save_when_sent(midQ[i]);
1070
1071         if (rc < 0) {
1072                 revert_current_mid(server, num_rqst);
1073                 server->sequence_number -= 2;
1074         }
1075
1076         mutex_unlock(&server->srv_mutex);
1077
1078         /*
1079          * If sending failed for some reason or it is an oplock break that we
1080          * will not receive a response to - return credits back
1081          */
1082         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1083                 for (i = 0; i < num_rqst; i++)
1084                         add_credits(server, &credits[i], optype);
1085                 goto out;
1086         }
1087
1088         /*
1089          * At this point the request is passed to the network stack - we assume
1090          * that any credits taken from the server structure on the client have
1091          * been spent and we can't return them back. Once we receive responses
1092          * we will collect credits granted by the server in the mid callbacks
1093          * and add those credits to the server structure.
1094          */
1095
1096         /*
1097          * Compounding is never used during session establish.
1098          */
1099         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1100                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1101                                            rqst[0].rq_nvec);
1102
1103         for (i = 0; i < num_rqst; i++) {
1104                 rc = wait_for_response(server, midQ[i]);
1105                 if (rc != 0)
1106                         break;
1107         }
1108         if (rc != 0) {
1109                 for (; i < num_rqst; i++) {
1110                         cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1111                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1112                         send_cancel(server, &rqst[i], midQ[i]);
1113                         spin_lock(&GlobalMid_Lock);
1114                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1115                                 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1116                                 midQ[i]->callback = cifs_cancelled_callback;
1117                                 cancelled_mid[i] = true;
1118                                 credits[i].value = 0;
1119                         }
1120                         spin_unlock(&GlobalMid_Lock);
1121                 }
1122         }
1123
1124         for (i = 0; i < num_rqst; i++) {
1125                 if (rc < 0)
1126                         goto out;
1127
1128                 rc = cifs_sync_mid_result(midQ[i], server);
1129                 if (rc != 0) {
1130                         /* mark this mid as cancelled to not free it below */
1131                         cancelled_mid[i] = true;
1132                         goto out;
1133                 }
1134
1135                 if (!midQ[i]->resp_buf ||
1136                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1137                         rc = -EIO;
1138                         cifs_dbg(FYI, "Bad MID state?\n");
1139                         goto out;
1140                 }
1141
1142                 buf = (char *)midQ[i]->resp_buf;
1143                 resp_iov[i].iov_base = buf;
1144                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1145                         server->vals->header_preamble_size;
1146
1147                 if (midQ[i]->large_buf)
1148                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1149                 else
1150                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1151
1152                 rc = server->ops->check_receive(midQ[i], server,
1153                                                      flags & CIFS_LOG_ERROR);
1154
1155                 /* mark it so buf will not be freed by cifs_delete_mid */
1156                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1157                         midQ[i]->resp_buf = NULL;
1158
1159         }
1160
1161         /*
1162          * Compounding is never used during session establish.
1163          */
1164         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1165                 struct kvec iov = {
1166                         .iov_base = resp_iov[0].iov_base,
1167                         .iov_len = resp_iov[0].iov_len
1168                 };
1169                 smb311_update_preauth_hash(ses, &iov, 1);
1170         }
1171
1172 out:
1173         /*
1174          * This will dequeue all mids. After this it is important that the
1175          * demultiplex_thread will not process any of these mids any futher.
1176          * This is prevented above by using a noop callback that will not
1177          * wake this thread except for the very last PDU.
1178          */
1179         for (i = 0; i < num_rqst; i++) {
1180                 if (!cancelled_mid[i])
1181                         cifs_delete_mid(midQ[i]);
1182         }
1183
1184         return rc;
1185 }
1186
1187 int
1188 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1189                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1190                struct kvec *resp_iov)
1191 {
1192         return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1193                                   resp_iov);
1194 }
1195
1196 int
1197 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1198              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1199              const int flags, struct kvec *resp_iov)
1200 {
1201         struct smb_rqst rqst;
1202         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1203         int rc;
1204
1205         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1206                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1207                                         GFP_KERNEL);
1208                 if (!new_iov) {
1209                         /* otherwise cifs_send_recv below sets resp_buf_type */
1210                         *resp_buf_type = CIFS_NO_BUFFER;
1211                         return -ENOMEM;
1212                 }
1213         } else
1214                 new_iov = s_iov;
1215
1216         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1217         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1218
1219         new_iov[0].iov_base = new_iov[1].iov_base;
1220         new_iov[0].iov_len = 4;
1221         new_iov[1].iov_base += 4;
1222         new_iov[1].iov_len -= 4;
1223
1224         memset(&rqst, 0, sizeof(struct smb_rqst));
1225         rqst.rq_iov = new_iov;
1226         rqst.rq_nvec = n_vec + 1;
1227
1228         rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1229         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1230                 kfree(new_iov);
1231         return rc;
1232 }
1233
1234 int
1235 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1236             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1237             int *pbytes_returned, const int flags)
1238 {
1239         int rc = 0;
1240         struct mid_q_entry *midQ;
1241         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1242         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1243         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1244         struct cifs_credits credits = { .value = 1, .instance = 0 };
1245
1246         if (ses == NULL) {
1247                 cifs_dbg(VFS, "Null smb session\n");
1248                 return -EIO;
1249         }
1250         if (ses->server == NULL) {
1251                 cifs_dbg(VFS, "Null tcp session\n");
1252                 return -EIO;
1253         }
1254
1255         if (ses->server->tcpStatus == CifsExiting)
1256                 return -ENOENT;
1257
1258         /* Ensure that we do not send more than 50 overlapping requests
1259            to the same server. We may make this configurable later or
1260            use ses->maxReq */
1261
1262         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1263                 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1264                          len);
1265                 return -EIO;
1266         }
1267
1268         rc = wait_for_free_request(ses->server, flags, &credits.instance);
1269         if (rc)
1270                 return rc;
1271
1272         /* make sure that we sign in the same order that we send on this socket
1273            and avoid races inside tcp sendmsg code that could cause corruption
1274            of smb data */
1275
1276         mutex_lock(&ses->server->srv_mutex);
1277
1278         rc = allocate_mid(ses, in_buf, &midQ);
1279         if (rc) {
1280                 mutex_unlock(&ses->server->srv_mutex);
1281                 /* Update # of requests on wire to server */
1282                 add_credits(ses->server, &credits, 0);
1283                 return rc;
1284         }
1285
1286         rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1287         if (rc) {
1288                 mutex_unlock(&ses->server->srv_mutex);
1289                 goto out;
1290         }
1291
1292         midQ->mid_state = MID_REQUEST_SUBMITTED;
1293
1294         cifs_in_send_inc(ses->server);
1295         rc = smb_send(ses->server, in_buf, len);
1296         cifs_in_send_dec(ses->server);
1297         cifs_save_when_sent(midQ);
1298
1299         if (rc < 0)
1300                 ses->server->sequence_number -= 2;
1301
1302         mutex_unlock(&ses->server->srv_mutex);
1303
1304         if (rc < 0)
1305                 goto out;
1306
1307         rc = wait_for_response(ses->server, midQ);
1308         if (rc != 0) {
1309                 send_cancel(ses->server, &rqst, midQ);
1310                 spin_lock(&GlobalMid_Lock);
1311                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1312                         /* no longer considered to be "in-flight" */
1313                         midQ->callback = DeleteMidQEntry;
1314                         spin_unlock(&GlobalMid_Lock);
1315                         add_credits(ses->server, &credits, 0);
1316                         return rc;
1317                 }
1318                 spin_unlock(&GlobalMid_Lock);
1319         }
1320
1321         rc = cifs_sync_mid_result(midQ, ses->server);
1322         if (rc != 0) {
1323                 add_credits(ses->server, &credits, 0);
1324                 return rc;
1325         }
1326
1327         if (!midQ->resp_buf || !out_buf ||
1328             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1329                 rc = -EIO;
1330                 cifs_dbg(VFS, "Bad MID state?\n");
1331                 goto out;
1332         }
1333
1334         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1335         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1336         rc = cifs_check_receive(midQ, ses->server, 0);
1337 out:
1338         cifs_delete_mid(midQ);
1339         add_credits(ses->server, &credits, 0);
1340
1341         return rc;
1342 }
1343
1344 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1345    blocking lock to return. */
1346
1347 static int
1348 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1349                         struct smb_hdr *in_buf,
1350                         struct smb_hdr *out_buf)
1351 {
1352         int bytes_returned;
1353         struct cifs_ses *ses = tcon->ses;
1354         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1355
1356         /* We just modify the current in_buf to change
1357            the type of lock from LOCKING_ANDX_SHARED_LOCK
1358            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1359            LOCKING_ANDX_CANCEL_LOCK. */
1360
1361         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1362         pSMB->Timeout = 0;
1363         pSMB->hdr.Mid = get_next_mid(ses->server);
1364
1365         return SendReceive(xid, ses, in_buf, out_buf,
1366                         &bytes_returned, 0);
1367 }
1368
1369 int
1370 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1371             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1372             int *pbytes_returned)
1373 {
1374         int rc = 0;
1375         int rstart = 0;
1376         struct mid_q_entry *midQ;
1377         struct cifs_ses *ses;
1378         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1379         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1380         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1381         unsigned int instance;
1382
1383         if (tcon == NULL || tcon->ses == NULL) {
1384                 cifs_dbg(VFS, "Null smb session\n");
1385                 return -EIO;
1386         }
1387         ses = tcon->ses;
1388
1389         if (ses->server == NULL) {
1390                 cifs_dbg(VFS, "Null tcp session\n");
1391                 return -EIO;
1392         }
1393
1394         if (ses->server->tcpStatus == CifsExiting)
1395                 return -ENOENT;
1396
1397         /* Ensure that we do not send more than 50 overlapping requests
1398            to the same server. We may make this configurable later or
1399            use ses->maxReq */
1400
1401         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1402                 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1403                          len);
1404                 return -EIO;
1405         }
1406
1407         rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
1408         if (rc)
1409                 return rc;
1410
1411         /* make sure that we sign in the same order that we send on this socket
1412            and avoid races inside tcp sendmsg code that could cause corruption
1413            of smb data */
1414
1415         mutex_lock(&ses->server->srv_mutex);
1416
1417         rc = allocate_mid(ses, in_buf, &midQ);
1418         if (rc) {
1419                 mutex_unlock(&ses->server->srv_mutex);
1420                 return rc;
1421         }
1422
1423         rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1424         if (rc) {
1425                 cifs_delete_mid(midQ);
1426                 mutex_unlock(&ses->server->srv_mutex);
1427                 return rc;
1428         }
1429
1430         midQ->mid_state = MID_REQUEST_SUBMITTED;
1431         cifs_in_send_inc(ses->server);
1432         rc = smb_send(ses->server, in_buf, len);
1433         cifs_in_send_dec(ses->server);
1434         cifs_save_when_sent(midQ);
1435
1436         if (rc < 0)
1437                 ses->server->sequence_number -= 2;
1438
1439         mutex_unlock(&ses->server->srv_mutex);
1440
1441         if (rc < 0) {
1442                 cifs_delete_mid(midQ);
1443                 return rc;
1444         }
1445
1446         /* Wait for a reply - allow signals to interrupt. */
1447         rc = wait_event_interruptible(ses->server->response_q,
1448                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1449                 ((ses->server->tcpStatus != CifsGood) &&
1450                  (ses->server->tcpStatus != CifsNew)));
1451
1452         /* Were we interrupted by a signal ? */
1453         if ((rc == -ERESTARTSYS) &&
1454                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1455                 ((ses->server->tcpStatus == CifsGood) ||
1456                  (ses->server->tcpStatus == CifsNew))) {
1457
1458                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1459                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1460                            blocking lock to return. */
1461                         rc = send_cancel(ses->server, &rqst, midQ);
1462                         if (rc) {
1463                                 cifs_delete_mid(midQ);
1464                                 return rc;
1465                         }
1466                 } else {
1467                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1468                            to cause the blocking lock to return. */
1469
1470                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1471
1472                         /* If we get -ENOLCK back the lock may have
1473                            already been removed. Don't exit in this case. */
1474                         if (rc && rc != -ENOLCK) {
1475                                 cifs_delete_mid(midQ);
1476                                 return rc;
1477                         }
1478                 }
1479
1480                 rc = wait_for_response(ses->server, midQ);
1481                 if (rc) {
1482                         send_cancel(ses->server, &rqst, midQ);
1483                         spin_lock(&GlobalMid_Lock);
1484                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1485                                 /* no longer considered to be "in-flight" */
1486                                 midQ->callback = DeleteMidQEntry;
1487                                 spin_unlock(&GlobalMid_Lock);
1488                                 return rc;
1489                         }
1490                         spin_unlock(&GlobalMid_Lock);
1491                 }
1492
1493                 /* We got the response - restart system call. */
1494                 rstart = 1;
1495         }
1496
1497         rc = cifs_sync_mid_result(midQ, ses->server);
1498         if (rc != 0)
1499                 return rc;
1500
1501         /* rcvd frame is ok */
1502         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1503                 rc = -EIO;
1504                 cifs_dbg(VFS, "Bad MID state?\n");
1505                 goto out;
1506         }
1507
1508         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1509         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1510         rc = cifs_check_receive(midQ, ses->server, 0);
1511 out:
1512         cifs_delete_mid(midQ);
1513         if (rstart && rc == -EACCES)
1514                 return -ERESTARTSYS;
1515         return rc;
1516 }