]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/cifs/transport.c
media: v4l2-device.h: Explicitly compare grp{id,mask} to zero in v4l2_device macros
[linux.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         temp->callback = cifs_wake_up_task;
80         temp->callback_data = current;
81
82         atomic_inc(&midCount);
83         temp->mid_state = MID_REQUEST_ALLOCATED;
84         return temp;
85 }
86
87 static void _cifs_mid_q_entry_release(struct kref *refcount)
88 {
89         struct mid_q_entry *midEntry =
90                         container_of(refcount, struct mid_q_entry, refcount);
91 #ifdef CONFIG_CIFS_STATS2
92         __le16 command = midEntry->server->vals->lock_cmd;
93         __u16 smb_cmd = le16_to_cpu(midEntry->command);
94         unsigned long now;
95         unsigned long roundtrip_time;
96 #endif
97         struct TCP_Server_Info *server = midEntry->server;
98
99         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
100             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
101             server->ops->handle_cancelled_mid)
102                 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
103
104         midEntry->mid_state = MID_FREE;
105         atomic_dec(&midCount);
106         if (midEntry->large_buf)
107                 cifs_buf_release(midEntry->resp_buf);
108         else
109                 cifs_small_buf_release(midEntry->resp_buf);
110 #ifdef CONFIG_CIFS_STATS2
111         now = jiffies;
112         if (now < midEntry->when_alloc)
113                 cifs_server_dbg(VFS, "invalid mid allocation time\n");
114         roundtrip_time = now - midEntry->when_alloc;
115
116         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
117                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
118                         server->slowest_cmd[smb_cmd] = roundtrip_time;
119                         server->fastest_cmd[smb_cmd] = roundtrip_time;
120                 } else {
121                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
122                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
123                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
124                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
125                 }
126                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
127                 server->time_per_cmd[smb_cmd] += roundtrip_time;
128         }
129         /*
130          * commands taking longer than one second (default) can be indications
131          * that something is wrong, unless it is quite a slow link or a very
132          * busy server. Note that this calc is unlikely or impossible to wrap
133          * as long as slow_rsp_threshold is not set way above recommended max
134          * value (32767 ie 9 hours) and is generally harmless even if wrong
135          * since only affects debug counters - so leaving the calc as simple
136          * comparison rather than doing multiple conversions and overflow
137          * checks
138          */
139         if ((slow_rsp_threshold != 0) &&
140             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
141             (midEntry->command != command)) {
142                 /*
143                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
144                  * NB: le16_to_cpu returns unsigned so can not be negative below
145                  */
146                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
147                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
148
149                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
150                                midEntry->when_sent, midEntry->when_received);
151                 if (cifsFYI & CIFS_TIMER) {
152                         pr_debug(" CIFS slow rsp: cmd %d mid %llu",
153                                midEntry->command, midEntry->mid);
154                         cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
155                                now - midEntry->when_alloc,
156                                now - midEntry->when_sent,
157                                now - midEntry->when_received);
158                 }
159         }
160 #endif
161
162         mempool_free(midEntry, cifs_mid_poolp);
163 }
164
165 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
166 {
167         spin_lock(&GlobalMid_Lock);
168         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
169         spin_unlock(&GlobalMid_Lock);
170 }
171
172 void DeleteMidQEntry(struct mid_q_entry *midEntry)
173 {
174         cifs_mid_q_entry_release(midEntry);
175 }
176
177 void
178 cifs_delete_mid(struct mid_q_entry *mid)
179 {
180         spin_lock(&GlobalMid_Lock);
181         if (!(mid->mid_flags & MID_DELETED)) {
182                 list_del_init(&mid->qhead);
183                 mid->mid_flags |= MID_DELETED;
184         }
185         spin_unlock(&GlobalMid_Lock);
186
187         DeleteMidQEntry(mid);
188 }
189
190 /*
191  * smb_send_kvec - send an array of kvecs to the server
192  * @server:     Server to send the data to
193  * @smb_msg:    Message to send
194  * @sent:       amount of data sent on socket is stored here
195  *
196  * Our basic "send data to server" function. Should be called with srv_mutex
197  * held. The caller is responsible for handling the results.
198  */
199 static int
200 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
201               size_t *sent)
202 {
203         int rc = 0;
204         int retries = 0;
205         struct socket *ssocket = server->ssocket;
206
207         *sent = 0;
208
209         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
210         smb_msg->msg_namelen = sizeof(struct sockaddr);
211         smb_msg->msg_control = NULL;
212         smb_msg->msg_controllen = 0;
213         if (server->noblocksnd)
214                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
215         else
216                 smb_msg->msg_flags = MSG_NOSIGNAL;
217
218         while (msg_data_left(smb_msg)) {
219                 /*
220                  * If blocking send, we try 3 times, since each can block
221                  * for 5 seconds. For nonblocking  we have to try more
222                  * but wait increasing amounts of time allowing time for
223                  * socket to clear.  The overall time we wait in either
224                  * case to send on the socket is about 15 seconds.
225                  * Similarly we wait for 15 seconds for a response from
226                  * the server in SendReceive[2] for the server to send
227                  * a response back for most types of requests (except
228                  * SMB Write past end of file which can be slow, and
229                  * blocking lock operations). NFS waits slightly longer
230                  * than CIFS, but this can make it take longer for
231                  * nonresponsive servers to be detected and 15 seconds
232                  * is more than enough time for modern networks to
233                  * send a packet.  In most cases if we fail to send
234                  * after the retries we will kill the socket and
235                  * reconnect which may clear the network problem.
236                  */
237                 rc = sock_sendmsg(ssocket, smb_msg);
238                 if (rc == -EAGAIN) {
239                         retries++;
240                         if (retries >= 14 ||
241                             (!server->noblocksnd && (retries > 2))) {
242                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
243                                          ssocket);
244                                 return -EAGAIN;
245                         }
246                         msleep(1 << retries);
247                         continue;
248                 }
249
250                 if (rc < 0)
251                         return rc;
252
253                 if (rc == 0) {
254                         /* should never happen, letting socket clear before
255                            retrying is our only obvious option here */
256                         cifs_server_dbg(VFS, "tcp sent no data\n");
257                         msleep(500);
258                         continue;
259                 }
260
261                 /* send was at least partially successful */
262                 *sent += rc;
263                 retries = 0; /* in case we get ENOSPC on the next send */
264         }
265         return 0;
266 }
267
268 unsigned long
269 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
270 {
271         unsigned int i;
272         struct kvec *iov;
273         int nvec;
274         unsigned long buflen = 0;
275
276         if (server->vals->header_preamble_size == 0 &&
277             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
278                 iov = &rqst->rq_iov[1];
279                 nvec = rqst->rq_nvec - 1;
280         } else {
281                 iov = rqst->rq_iov;
282                 nvec = rqst->rq_nvec;
283         }
284
285         /* total up iov array first */
286         for (i = 0; i < nvec; i++)
287                 buflen += iov[i].iov_len;
288
289         /*
290          * Add in the page array if there is one. The caller needs to make
291          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
292          * multiple pages ends at page boundary, rq_tailsz needs to be set to
293          * PAGE_SIZE.
294          */
295         if (rqst->rq_npages) {
296                 if (rqst->rq_npages == 1)
297                         buflen += rqst->rq_tailsz;
298                 else {
299                         /*
300                          * If there is more than one page, calculate the
301                          * buffer length based on rq_offset and rq_tailsz
302                          */
303                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
304                                         rqst->rq_offset;
305                         buflen += rqst->rq_tailsz;
306                 }
307         }
308
309         return buflen;
310 }
311
312 static int
313 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
314                 struct smb_rqst *rqst)
315 {
316         int rc = 0;
317         struct kvec *iov;
318         int n_vec;
319         unsigned int send_length = 0;
320         unsigned int i, j;
321         sigset_t mask, oldmask;
322         size_t total_len = 0, sent, size;
323         struct socket *ssocket = server->ssocket;
324         struct msghdr smb_msg;
325         int val = 1;
326         __be32 rfc1002_marker;
327
328         if (cifs_rdma_enabled(server)) {
329                 /* return -EAGAIN when connecting or reconnecting */
330                 rc = -EAGAIN;
331                 if (server->smbd_conn)
332                         rc = smbd_send(server, num_rqst, rqst);
333                 goto smbd_done;
334         }
335
336         if (ssocket == NULL)
337                 return -EAGAIN;
338
339         if (signal_pending(current)) {
340                 cifs_dbg(FYI, "signal is pending before sending any data\n");
341                 return -EINTR;
342         }
343
344         /* cork the socket */
345         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
346                                 (char *)&val, sizeof(val));
347
348         for (j = 0; j < num_rqst; j++)
349                 send_length += smb_rqst_len(server, &rqst[j]);
350         rfc1002_marker = cpu_to_be32(send_length);
351
352         /*
353          * We should not allow signals to interrupt the network send because
354          * any partial send will cause session reconnects thus increasing
355          * latency of system calls and overload a server with unnecessary
356          * requests.
357          */
358
359         sigfillset(&mask);
360         sigprocmask(SIG_BLOCK, &mask, &oldmask);
361
362         /* Generate a rfc1002 marker for SMB2+ */
363         if (server->vals->header_preamble_size == 0) {
364                 struct kvec hiov = {
365                         .iov_base = &rfc1002_marker,
366                         .iov_len  = 4
367                 };
368                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
369                 rc = smb_send_kvec(server, &smb_msg, &sent);
370                 if (rc < 0)
371                         goto unmask;
372
373                 total_len += sent;
374                 send_length += 4;
375         }
376
377         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
378
379         for (j = 0; j < num_rqst; j++) {
380                 iov = rqst[j].rq_iov;
381                 n_vec = rqst[j].rq_nvec;
382
383                 size = 0;
384                 for (i = 0; i < n_vec; i++) {
385                         dump_smb(iov[i].iov_base, iov[i].iov_len);
386                         size += iov[i].iov_len;
387                 }
388
389                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
390
391                 rc = smb_send_kvec(server, &smb_msg, &sent);
392                 if (rc < 0)
393                         goto unmask;
394
395                 total_len += sent;
396
397                 /* now walk the page array and send each page in it */
398                 for (i = 0; i < rqst[j].rq_npages; i++) {
399                         struct bio_vec bvec;
400
401                         bvec.bv_page = rqst[j].rq_pages[i];
402                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
403                                              &bvec.bv_offset);
404
405                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
406                                       &bvec, 1, bvec.bv_len);
407                         rc = smb_send_kvec(server, &smb_msg, &sent);
408                         if (rc < 0)
409                                 break;
410
411                         total_len += sent;
412                 }
413         }
414
415 unmask:
416         sigprocmask(SIG_SETMASK, &oldmask, NULL);
417
418         /*
419          * If signal is pending but we have already sent the whole packet to
420          * the server we need to return success status to allow a corresponding
421          * mid entry to be kept in the pending requests queue thus allowing
422          * to handle responses from the server by the client.
423          *
424          * If only part of the packet has been sent there is no need to hide
425          * interrupt because the session will be reconnected anyway, so there
426          * won't be any response from the server to handle.
427          */
428
429         if (signal_pending(current) && (total_len != send_length)) {
430                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
431                 rc = -EINTR;
432         }
433
434         /* uncork it */
435         val = 0;
436         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
437                                 (char *)&val, sizeof(val));
438
439         if ((total_len > 0) && (total_len != send_length)) {
440                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
441                          send_length, total_len);
442                 /*
443                  * If we have only sent part of an SMB then the next SMB could
444                  * be taken as the remainder of this one. We need to kill the
445                  * socket so the server throws away the partial SMB
446                  */
447                 server->tcpStatus = CifsNeedReconnect;
448                 trace_smb3_partial_send_reconnect(server->CurrentMid,
449                                                   server->hostname);
450         }
451 smbd_done:
452         if (rc < 0 && rc != -EINTR)
453                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
454                          rc);
455         else if (rc > 0)
456                 rc = 0;
457
458         return rc;
459 }
460
461 static int
462 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
463               struct smb_rqst *rqst, int flags)
464 {
465         struct kvec iov;
466         struct smb2_transform_hdr tr_hdr;
467         struct smb_rqst cur_rqst[MAX_COMPOUND];
468         int rc;
469
470         if (!(flags & CIFS_TRANSFORM_REQ))
471                 return __smb_send_rqst(server, num_rqst, rqst);
472
473         if (num_rqst > MAX_COMPOUND - 1)
474                 return -ENOMEM;
475
476         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
477         memset(&iov, 0, sizeof(iov));
478         memset(&tr_hdr, 0, sizeof(tr_hdr));
479
480         iov.iov_base = &tr_hdr;
481         iov.iov_len = sizeof(tr_hdr);
482         cur_rqst[0].rq_iov = &iov;
483         cur_rqst[0].rq_nvec = 1;
484
485         if (!server->ops->init_transform_rq) {
486                 cifs_server_dbg(VFS, "Encryption requested but transform "
487                                 "callback is missing\n");
488                 return -EIO;
489         }
490
491         rc = server->ops->init_transform_rq(server, num_rqst + 1,
492                                             &cur_rqst[0], rqst);
493         if (rc)
494                 return rc;
495
496         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
497         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
498         return rc;
499 }
500
501 int
502 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
503          unsigned int smb_buf_length)
504 {
505         struct kvec iov[2];
506         struct smb_rqst rqst = { .rq_iov = iov,
507                                  .rq_nvec = 2 };
508
509         iov[0].iov_base = smb_buffer;
510         iov[0].iov_len = 4;
511         iov[1].iov_base = (char *)smb_buffer + 4;
512         iov[1].iov_len = smb_buf_length;
513
514         return __smb_send_rqst(server, 1, &rqst);
515 }
516
517 static int
518 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
519                       const int timeout, const int flags,
520                       unsigned int *instance)
521 {
522         int rc;
523         int *credits;
524         int optype;
525         long int t;
526
527         if (timeout < 0)
528                 t = MAX_JIFFY_OFFSET;
529         else
530                 t = msecs_to_jiffies(timeout);
531
532         optype = flags & CIFS_OP_MASK;
533
534         *instance = 0;
535
536         credits = server->ops->get_credits_field(server, optype);
537         /* Since an echo is already inflight, no need to wait to send another */
538         if (*credits <= 0 && optype == CIFS_ECHO_OP)
539                 return -EAGAIN;
540
541         spin_lock(&server->req_lock);
542         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
543                 /* oplock breaks must not be held up */
544                 server->in_flight++;
545                 if (server->in_flight > server->max_in_flight)
546                         server->max_in_flight = server->in_flight;
547                 *credits -= 1;
548                 *instance = server->reconnect_instance;
549                 spin_unlock(&server->req_lock);
550                 return 0;
551         }
552
553         while (1) {
554                 if (*credits < num_credits) {
555                         spin_unlock(&server->req_lock);
556                         cifs_num_waiters_inc(server);
557                         rc = wait_event_killable_timeout(server->request_q,
558                                 has_credits(server, credits, num_credits), t);
559                         cifs_num_waiters_dec(server);
560                         if (!rc) {
561                                 trace_smb3_credit_timeout(server->CurrentMid,
562                                         server->hostname, num_credits);
563                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
564                                          timeout);
565                                 return -ENOTSUPP;
566                         }
567                         if (rc == -ERESTARTSYS)
568                                 return -ERESTARTSYS;
569                         spin_lock(&server->req_lock);
570                 } else {
571                         if (server->tcpStatus == CifsExiting) {
572                                 spin_unlock(&server->req_lock);
573                                 return -ENOENT;
574                         }
575
576                         /*
577                          * For normal commands, reserve the last MAX_COMPOUND
578                          * credits to compound requests.
579                          * Otherwise these compounds could be permanently
580                          * starved for credits by single-credit requests.
581                          *
582                          * To prevent spinning CPU, block this thread until
583                          * there are >MAX_COMPOUND credits available.
584                          * But only do this is we already have a lot of
585                          * credits in flight to avoid triggering this check
586                          * for servers that are slow to hand out credits on
587                          * new sessions.
588                          */
589                         if (!optype && num_credits == 1 &&
590                             server->in_flight > 2 * MAX_COMPOUND &&
591                             *credits <= MAX_COMPOUND) {
592                                 spin_unlock(&server->req_lock);
593                                 cifs_num_waiters_inc(server);
594                                 rc = wait_event_killable_timeout(
595                                         server->request_q,
596                                         has_credits(server, credits,
597                                                     MAX_COMPOUND + 1),
598                                         t);
599                                 cifs_num_waiters_dec(server);
600                                 if (!rc) {
601                                         trace_smb3_credit_timeout(
602                                                 server->CurrentMid,
603                                                 server->hostname, num_credits);
604                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
605                                                  timeout);
606                                         return -ENOTSUPP;
607                                 }
608                                 if (rc == -ERESTARTSYS)
609                                         return -ERESTARTSYS;
610                                 spin_lock(&server->req_lock);
611                                 continue;
612                         }
613
614                         /*
615                          * Can not count locking commands against total
616                          * as they are allowed to block on server.
617                          */
618
619                         /* update # of requests on the wire to server */
620                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
621                                 *credits -= num_credits;
622                                 server->in_flight += num_credits;
623                                 if (server->in_flight > server->max_in_flight)
624                                         server->max_in_flight = server->in_flight;
625                                 *instance = server->reconnect_instance;
626                         }
627                         spin_unlock(&server->req_lock);
628                         break;
629                 }
630         }
631         return 0;
632 }
633
634 static int
635 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
636                       unsigned int *instance)
637 {
638         return wait_for_free_credits(server, 1, -1, flags,
639                                      instance);
640 }
641
642 static int
643 wait_for_compound_request(struct TCP_Server_Info *server, int num,
644                           const int flags, unsigned int *instance)
645 {
646         int *credits;
647
648         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
649
650         spin_lock(&server->req_lock);
651         if (*credits < num) {
652                 /*
653                  * Return immediately if not too many requests in flight since
654                  * we will likely be stuck on waiting for credits.
655                  */
656                 if (server->in_flight < num - *credits) {
657                         spin_unlock(&server->req_lock);
658                         return -ENOTSUPP;
659                 }
660         }
661         spin_unlock(&server->req_lock);
662
663         return wait_for_free_credits(server, num, 60000, flags,
664                                      instance);
665 }
666
667 int
668 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
669                       unsigned int *num, struct cifs_credits *credits)
670 {
671         *num = size;
672         credits->value = 0;
673         credits->instance = server->reconnect_instance;
674         return 0;
675 }
676
677 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
678                         struct mid_q_entry **ppmidQ)
679 {
680         if (ses->server->tcpStatus == CifsExiting) {
681                 return -ENOENT;
682         }
683
684         if (ses->server->tcpStatus == CifsNeedReconnect) {
685                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
686                 return -EAGAIN;
687         }
688
689         if (ses->status == CifsNew) {
690                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
691                         (in_buf->Command != SMB_COM_NEGOTIATE))
692                         return -EAGAIN;
693                 /* else ok - we are setting up session */
694         }
695
696         if (ses->status == CifsExiting) {
697                 /* check if SMB session is bad because we are setting it up */
698                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
699                         return -EAGAIN;
700                 /* else ok - we are shutting down session */
701         }
702
703         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
704         if (*ppmidQ == NULL)
705                 return -ENOMEM;
706         spin_lock(&GlobalMid_Lock);
707         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
708         spin_unlock(&GlobalMid_Lock);
709         return 0;
710 }
711
712 static int
713 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
714 {
715         int error;
716
717         error = wait_event_freezekillable_unsafe(server->response_q,
718                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
719         if (error < 0)
720                 return -ERESTARTSYS;
721
722         return 0;
723 }
724
725 struct mid_q_entry *
726 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
727 {
728         int rc;
729         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
730         struct mid_q_entry *mid;
731
732         if (rqst->rq_iov[0].iov_len != 4 ||
733             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
734                 return ERR_PTR(-EIO);
735
736         /* enable signing if server requires it */
737         if (server->sign)
738                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
739
740         mid = AllocMidQEntry(hdr, server);
741         if (mid == NULL)
742                 return ERR_PTR(-ENOMEM);
743
744         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
745         if (rc) {
746                 DeleteMidQEntry(mid);
747                 return ERR_PTR(rc);
748         }
749
750         return mid;
751 }
752
753 /*
754  * Send a SMB request and set the callback function in the mid to handle
755  * the result. Caller is responsible for dealing with timeouts.
756  */
757 int
758 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
759                 mid_receive_t *receive, mid_callback_t *callback,
760                 mid_handle_t *handle, void *cbdata, const int flags,
761                 const struct cifs_credits *exist_credits)
762 {
763         int rc;
764         struct mid_q_entry *mid;
765         struct cifs_credits credits = { .value = 0, .instance = 0 };
766         unsigned int instance;
767         int optype;
768
769         optype = flags & CIFS_OP_MASK;
770
771         if ((flags & CIFS_HAS_CREDITS) == 0) {
772                 rc = wait_for_free_request(server, flags, &instance);
773                 if (rc)
774                         return rc;
775                 credits.value = 1;
776                 credits.instance = instance;
777         } else
778                 instance = exist_credits->instance;
779
780         mutex_lock(&server->srv_mutex);
781
782         /*
783          * We can't use credits obtained from the previous session to send this
784          * request. Check if there were reconnects after we obtained credits and
785          * return -EAGAIN in such cases to let callers handle it.
786          */
787         if (instance != server->reconnect_instance) {
788                 mutex_unlock(&server->srv_mutex);
789                 add_credits_and_wake_if(server, &credits, optype);
790                 return -EAGAIN;
791         }
792
793         mid = server->ops->setup_async_request(server, rqst);
794         if (IS_ERR(mid)) {
795                 mutex_unlock(&server->srv_mutex);
796                 add_credits_and_wake_if(server, &credits, optype);
797                 return PTR_ERR(mid);
798         }
799
800         mid->receive = receive;
801         mid->callback = callback;
802         mid->callback_data = cbdata;
803         mid->handle = handle;
804         mid->mid_state = MID_REQUEST_SUBMITTED;
805
806         /* put it on the pending_mid_q */
807         spin_lock(&GlobalMid_Lock);
808         list_add_tail(&mid->qhead, &server->pending_mid_q);
809         spin_unlock(&GlobalMid_Lock);
810
811         /*
812          * Need to store the time in mid before calling I/O. For call_async,
813          * I/O response may come back and free the mid entry on another thread.
814          */
815         cifs_save_when_sent(mid);
816         cifs_in_send_inc(server);
817         rc = smb_send_rqst(server, 1, rqst, flags);
818         cifs_in_send_dec(server);
819
820         if (rc < 0) {
821                 revert_current_mid(server, mid->credits);
822                 server->sequence_number -= 2;
823                 cifs_delete_mid(mid);
824         }
825
826         mutex_unlock(&server->srv_mutex);
827
828         if (rc == 0)
829                 return 0;
830
831         add_credits_and_wake_if(server, &credits, optype);
832         return rc;
833 }
834
835 /*
836  *
837  * Send an SMB Request.  No response info (other than return code)
838  * needs to be parsed.
839  *
840  * flags indicate the type of request buffer and how long to wait
841  * and whether to log NT STATUS code (error) before mapping it to POSIX error
842  *
843  */
844 int
845 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
846                  char *in_buf, int flags)
847 {
848         int rc;
849         struct kvec iov[1];
850         struct kvec rsp_iov;
851         int resp_buf_type;
852
853         iov[0].iov_base = in_buf;
854         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
855         flags |= CIFS_NO_RSP_BUF;
856         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
857         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
858
859         return rc;
860 }
861
862 static int
863 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
864 {
865         int rc = 0;
866
867         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
868                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
869
870         spin_lock(&GlobalMid_Lock);
871         switch (mid->mid_state) {
872         case MID_RESPONSE_RECEIVED:
873                 spin_unlock(&GlobalMid_Lock);
874                 return rc;
875         case MID_RETRY_NEEDED:
876                 rc = -EAGAIN;
877                 break;
878         case MID_RESPONSE_MALFORMED:
879                 rc = -EIO;
880                 break;
881         case MID_SHUTDOWN:
882                 rc = -EHOSTDOWN;
883                 break;
884         default:
885                 if (!(mid->mid_flags & MID_DELETED)) {
886                         list_del_init(&mid->qhead);
887                         mid->mid_flags |= MID_DELETED;
888                 }
889                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
890                          __func__, mid->mid, mid->mid_state);
891                 rc = -EIO;
892         }
893         spin_unlock(&GlobalMid_Lock);
894
895         DeleteMidQEntry(mid);
896         return rc;
897 }
898
899 static inline int
900 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
901             struct mid_q_entry *mid)
902 {
903         return server->ops->send_cancel ?
904                                 server->ops->send_cancel(server, rqst, mid) : 0;
905 }
906
907 int
908 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
909                    bool log_error)
910 {
911         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
912
913         dump_smb(mid->resp_buf, min_t(u32, 92, len));
914
915         /* convert the length into a more usable form */
916         if (server->sign) {
917                 struct kvec iov[2];
918                 int rc = 0;
919                 struct smb_rqst rqst = { .rq_iov = iov,
920                                          .rq_nvec = 2 };
921
922                 iov[0].iov_base = mid->resp_buf;
923                 iov[0].iov_len = 4;
924                 iov[1].iov_base = (char *)mid->resp_buf + 4;
925                 iov[1].iov_len = len - 4;
926                 /* FIXME: add code to kill session */
927                 rc = cifs_verify_signature(&rqst, server,
928                                            mid->sequence_number);
929                 if (rc)
930                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
931                                  rc);
932         }
933
934         /* BB special case reconnect tid and uid here? */
935         return map_smb_to_linux_error(mid->resp_buf, log_error);
936 }
937
938 struct mid_q_entry *
939 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
940                    struct smb_rqst *rqst)
941 {
942         int rc;
943         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
944         struct mid_q_entry *mid;
945
946         if (rqst->rq_iov[0].iov_len != 4 ||
947             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
948                 return ERR_PTR(-EIO);
949
950         rc = allocate_mid(ses, hdr, &mid);
951         if (rc)
952                 return ERR_PTR(rc);
953         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
954         if (rc) {
955                 cifs_delete_mid(mid);
956                 return ERR_PTR(rc);
957         }
958         return mid;
959 }
960
961 static void
962 cifs_compound_callback(struct mid_q_entry *mid)
963 {
964         struct TCP_Server_Info *server = mid->server;
965         struct cifs_credits credits;
966
967         credits.value = server->ops->get_credits(mid);
968         credits.instance = server->reconnect_instance;
969
970         add_credits(server, &credits, mid->optype);
971 }
972
973 static void
974 cifs_compound_last_callback(struct mid_q_entry *mid)
975 {
976         cifs_compound_callback(mid);
977         cifs_wake_up_task(mid);
978 }
979
980 static void
981 cifs_cancelled_callback(struct mid_q_entry *mid)
982 {
983         cifs_compound_callback(mid);
984         DeleteMidQEntry(mid);
985 }
986
987 int
988 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
989                    const int flags, const int num_rqst, struct smb_rqst *rqst,
990                    int *resp_buf_type, struct kvec *resp_iov)
991 {
992         int i, j, optype, rc = 0;
993         struct mid_q_entry *midQ[MAX_COMPOUND];
994         bool cancelled_mid[MAX_COMPOUND] = {false};
995         struct cifs_credits credits[MAX_COMPOUND] = {
996                 { .value = 0, .instance = 0 }
997         };
998         unsigned int instance;
999         char *buf;
1000         struct TCP_Server_Info *server;
1001
1002         optype = flags & CIFS_OP_MASK;
1003
1004         for (i = 0; i < num_rqst; i++)
1005                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1006
1007         if ((ses == NULL) || (ses->server == NULL)) {
1008                 cifs_dbg(VFS, "Null session\n");
1009                 return -EIO;
1010         }
1011
1012         if (!ses->binding) {
1013                 uint index = 0;
1014
1015                 if (ses->chan_count > 1) {
1016                         index = (uint)atomic_inc_return(&ses->chan_seq);
1017                         index %= ses->chan_count;
1018                 }
1019                 server = ses->chans[index].server;
1020         } else {
1021                 server = cifs_ses_server(ses);
1022         }
1023
1024         if (server->tcpStatus == CifsExiting)
1025                 return -ENOENT;
1026
1027         /*
1028          * Wait for all the requests to become available.
1029          * This approach still leaves the possibility to be stuck waiting for
1030          * credits if the server doesn't grant credits to the outstanding
1031          * requests and if the client is completely idle, not generating any
1032          * other requests.
1033          * This can be handled by the eventual session reconnect.
1034          */
1035         rc = wait_for_compound_request(server, num_rqst, flags,
1036                                        &instance);
1037         if (rc)
1038                 return rc;
1039
1040         for (i = 0; i < num_rqst; i++) {
1041                 credits[i].value = 1;
1042                 credits[i].instance = instance;
1043         }
1044
1045         /*
1046          * Make sure that we sign in the same order that we send on this socket
1047          * and avoid races inside tcp sendmsg code that could cause corruption
1048          * of smb data.
1049          */
1050
1051         mutex_lock(&server->srv_mutex);
1052
1053         /*
1054          * All the parts of the compound chain belong obtained credits from the
1055          * same session. We can not use credits obtained from the previous
1056          * session to send this request. Check if there were reconnects after
1057          * we obtained credits and return -EAGAIN in such cases to let callers
1058          * handle it.
1059          */
1060         if (instance != server->reconnect_instance) {
1061                 mutex_unlock(&server->srv_mutex);
1062                 for (j = 0; j < num_rqst; j++)
1063                         add_credits(server, &credits[j], optype);
1064                 return -EAGAIN;
1065         }
1066
1067         for (i = 0; i < num_rqst; i++) {
1068                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1069                 if (IS_ERR(midQ[i])) {
1070                         revert_current_mid(server, i);
1071                         for (j = 0; j < i; j++)
1072                                 cifs_delete_mid(midQ[j]);
1073                         mutex_unlock(&server->srv_mutex);
1074
1075                         /* Update # of requests on wire to server */
1076                         for (j = 0; j < num_rqst; j++)
1077                                 add_credits(server, &credits[j], optype);
1078                         return PTR_ERR(midQ[i]);
1079                 }
1080
1081                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1082                 midQ[i]->optype = optype;
1083                 /*
1084                  * Invoke callback for every part of the compound chain
1085                  * to calculate credits properly. Wake up this thread only when
1086                  * the last element is received.
1087                  */
1088                 if (i < num_rqst - 1)
1089                         midQ[i]->callback = cifs_compound_callback;
1090                 else
1091                         midQ[i]->callback = cifs_compound_last_callback;
1092         }
1093         cifs_in_send_inc(server);
1094         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1095         cifs_in_send_dec(server);
1096
1097         for (i = 0; i < num_rqst; i++)
1098                 cifs_save_when_sent(midQ[i]);
1099
1100         if (rc < 0) {
1101                 revert_current_mid(server, num_rqst);
1102                 server->sequence_number -= 2;
1103         }
1104
1105         mutex_unlock(&server->srv_mutex);
1106
1107         /*
1108          * If sending failed for some reason or it is an oplock break that we
1109          * will not receive a response to - return credits back
1110          */
1111         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1112                 for (i = 0; i < num_rqst; i++)
1113                         add_credits(server, &credits[i], optype);
1114                 goto out;
1115         }
1116
1117         /*
1118          * At this point the request is passed to the network stack - we assume
1119          * that any credits taken from the server structure on the client have
1120          * been spent and we can't return them back. Once we receive responses
1121          * we will collect credits granted by the server in the mid callbacks
1122          * and add those credits to the server structure.
1123          */
1124
1125         /*
1126          * Compounding is never used during session establish.
1127          */
1128         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1129                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1130                                            rqst[0].rq_nvec);
1131
1132         for (i = 0; i < num_rqst; i++) {
1133                 rc = wait_for_response(server, midQ[i]);
1134                 if (rc != 0)
1135                         break;
1136         }
1137         if (rc != 0) {
1138                 for (; i < num_rqst; i++) {
1139                         cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1140                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1141                         send_cancel(server, &rqst[i], midQ[i]);
1142                         spin_lock(&GlobalMid_Lock);
1143                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1144                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1145                                 midQ[i]->callback = cifs_cancelled_callback;
1146                                 cancelled_mid[i] = true;
1147                                 credits[i].value = 0;
1148                         }
1149                         spin_unlock(&GlobalMid_Lock);
1150                 }
1151         }
1152
1153         for (i = 0; i < num_rqst; i++) {
1154                 if (rc < 0)
1155                         goto out;
1156
1157                 rc = cifs_sync_mid_result(midQ[i], server);
1158                 if (rc != 0) {
1159                         /* mark this mid as cancelled to not free it below */
1160                         cancelled_mid[i] = true;
1161                         goto out;
1162                 }
1163
1164                 if (!midQ[i]->resp_buf ||
1165                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1166                         rc = -EIO;
1167                         cifs_dbg(FYI, "Bad MID state?\n");
1168                         goto out;
1169                 }
1170
1171                 buf = (char *)midQ[i]->resp_buf;
1172                 resp_iov[i].iov_base = buf;
1173                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1174                         server->vals->header_preamble_size;
1175
1176                 if (midQ[i]->large_buf)
1177                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1178                 else
1179                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1180
1181                 rc = server->ops->check_receive(midQ[i], server,
1182                                                      flags & CIFS_LOG_ERROR);
1183
1184                 /* mark it so buf will not be freed by cifs_delete_mid */
1185                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1186                         midQ[i]->resp_buf = NULL;
1187
1188         }
1189
1190         /*
1191          * Compounding is never used during session establish.
1192          */
1193         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1194                 struct kvec iov = {
1195                         .iov_base = resp_iov[0].iov_base,
1196                         .iov_len = resp_iov[0].iov_len
1197                 };
1198                 smb311_update_preauth_hash(ses, &iov, 1);
1199         }
1200
1201 out:
1202         /*
1203          * This will dequeue all mids. After this it is important that the
1204          * demultiplex_thread will not process any of these mids any futher.
1205          * This is prevented above by using a noop callback that will not
1206          * wake this thread except for the very last PDU.
1207          */
1208         for (i = 0; i < num_rqst; i++) {
1209                 if (!cancelled_mid[i])
1210                         cifs_delete_mid(midQ[i]);
1211         }
1212
1213         return rc;
1214 }
1215
1216 int
1217 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1218                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1219                struct kvec *resp_iov)
1220 {
1221         return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1222                                   resp_iov);
1223 }
1224
1225 int
1226 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1227              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1228              const int flags, struct kvec *resp_iov)
1229 {
1230         struct smb_rqst rqst;
1231         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1232         int rc;
1233
1234         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1235                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1236                                         GFP_KERNEL);
1237                 if (!new_iov) {
1238                         /* otherwise cifs_send_recv below sets resp_buf_type */
1239                         *resp_buf_type = CIFS_NO_BUFFER;
1240                         return -ENOMEM;
1241                 }
1242         } else
1243                 new_iov = s_iov;
1244
1245         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1246         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1247
1248         new_iov[0].iov_base = new_iov[1].iov_base;
1249         new_iov[0].iov_len = 4;
1250         new_iov[1].iov_base += 4;
1251         new_iov[1].iov_len -= 4;
1252
1253         memset(&rqst, 0, sizeof(struct smb_rqst));
1254         rqst.rq_iov = new_iov;
1255         rqst.rq_nvec = n_vec + 1;
1256
1257         rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1258         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1259                 kfree(new_iov);
1260         return rc;
1261 }
1262
1263 int
1264 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1265             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1266             int *pbytes_returned, const int flags)
1267 {
1268         int rc = 0;
1269         struct mid_q_entry *midQ;
1270         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1271         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1272         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1273         struct cifs_credits credits = { .value = 1, .instance = 0 };
1274         struct TCP_Server_Info *server;
1275
1276         if (ses == NULL) {
1277                 cifs_dbg(VFS, "Null smb session\n");
1278                 return -EIO;
1279         }
1280         server = ses->server;
1281         if (server == NULL) {
1282                 cifs_dbg(VFS, "Null tcp session\n");
1283                 return -EIO;
1284         }
1285
1286         if (server->tcpStatus == CifsExiting)
1287                 return -ENOENT;
1288
1289         /* Ensure that we do not send more than 50 overlapping requests
1290            to the same server. We may make this configurable later or
1291            use ses->maxReq */
1292
1293         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1294                 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1295                          len);
1296                 return -EIO;
1297         }
1298
1299         rc = wait_for_free_request(server, flags, &credits.instance);
1300         if (rc)
1301                 return rc;
1302
1303         /* make sure that we sign in the same order that we send on this socket
1304            and avoid races inside tcp sendmsg code that could cause corruption
1305            of smb data */
1306
1307         mutex_lock(&server->srv_mutex);
1308
1309         rc = allocate_mid(ses, in_buf, &midQ);
1310         if (rc) {
1311                 mutex_unlock(&server->srv_mutex);
1312                 /* Update # of requests on wire to server */
1313                 add_credits(server, &credits, 0);
1314                 return rc;
1315         }
1316
1317         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1318         if (rc) {
1319                 mutex_unlock(&server->srv_mutex);
1320                 goto out;
1321         }
1322
1323         midQ->mid_state = MID_REQUEST_SUBMITTED;
1324
1325         cifs_in_send_inc(server);
1326         rc = smb_send(server, in_buf, len);
1327         cifs_in_send_dec(server);
1328         cifs_save_when_sent(midQ);
1329
1330         if (rc < 0)
1331                 server->sequence_number -= 2;
1332
1333         mutex_unlock(&server->srv_mutex);
1334
1335         if (rc < 0)
1336                 goto out;
1337
1338         rc = wait_for_response(server, midQ);
1339         if (rc != 0) {
1340                 send_cancel(server, &rqst, midQ);
1341                 spin_lock(&GlobalMid_Lock);
1342                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1343                         /* no longer considered to be "in-flight" */
1344                         midQ->callback = DeleteMidQEntry;
1345                         spin_unlock(&GlobalMid_Lock);
1346                         add_credits(server, &credits, 0);
1347                         return rc;
1348                 }
1349                 spin_unlock(&GlobalMid_Lock);
1350         }
1351
1352         rc = cifs_sync_mid_result(midQ, server);
1353         if (rc != 0) {
1354                 add_credits(server, &credits, 0);
1355                 return rc;
1356         }
1357
1358         if (!midQ->resp_buf || !out_buf ||
1359             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1360                 rc = -EIO;
1361                 cifs_server_dbg(VFS, "Bad MID state?\n");
1362                 goto out;
1363         }
1364
1365         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1366         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1367         rc = cifs_check_receive(midQ, server, 0);
1368 out:
1369         cifs_delete_mid(midQ);
1370         add_credits(server, &credits, 0);
1371
1372         return rc;
1373 }
1374
1375 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1376    blocking lock to return. */
1377
1378 static int
1379 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1380                         struct smb_hdr *in_buf,
1381                         struct smb_hdr *out_buf)
1382 {
1383         int bytes_returned;
1384         struct cifs_ses *ses = tcon->ses;
1385         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1386
1387         /* We just modify the current in_buf to change
1388            the type of lock from LOCKING_ANDX_SHARED_LOCK
1389            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1390            LOCKING_ANDX_CANCEL_LOCK. */
1391
1392         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1393         pSMB->Timeout = 0;
1394         pSMB->hdr.Mid = get_next_mid(ses->server);
1395
1396         return SendReceive(xid, ses, in_buf, out_buf,
1397                         &bytes_returned, 0);
1398 }
1399
1400 int
1401 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1402             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1403             int *pbytes_returned)
1404 {
1405         int rc = 0;
1406         int rstart = 0;
1407         struct mid_q_entry *midQ;
1408         struct cifs_ses *ses;
1409         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1410         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1411         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1412         unsigned int instance;
1413         struct TCP_Server_Info *server;
1414
1415         if (tcon == NULL || tcon->ses == NULL) {
1416                 cifs_dbg(VFS, "Null smb session\n");
1417                 return -EIO;
1418         }
1419         ses = tcon->ses;
1420         server = ses->server;
1421
1422         if (server == NULL) {
1423                 cifs_dbg(VFS, "Null tcp session\n");
1424                 return -EIO;
1425         }
1426
1427         if (server->tcpStatus == CifsExiting)
1428                 return -ENOENT;
1429
1430         /* Ensure that we do not send more than 50 overlapping requests
1431            to the same server. We may make this configurable later or
1432            use ses->maxReq */
1433
1434         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1435                 cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1436                          len);
1437                 return -EIO;
1438         }
1439
1440         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1441         if (rc)
1442                 return rc;
1443
1444         /* make sure that we sign in the same order that we send on this socket
1445            and avoid races inside tcp sendmsg code that could cause corruption
1446            of smb data */
1447
1448         mutex_lock(&server->srv_mutex);
1449
1450         rc = allocate_mid(ses, in_buf, &midQ);
1451         if (rc) {
1452                 mutex_unlock(&server->srv_mutex);
1453                 return rc;
1454         }
1455
1456         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1457         if (rc) {
1458                 cifs_delete_mid(midQ);
1459                 mutex_unlock(&server->srv_mutex);
1460                 return rc;
1461         }
1462
1463         midQ->mid_state = MID_REQUEST_SUBMITTED;
1464         cifs_in_send_inc(server);
1465         rc = smb_send(server, in_buf, len);
1466         cifs_in_send_dec(server);
1467         cifs_save_when_sent(midQ);
1468
1469         if (rc < 0)
1470                 server->sequence_number -= 2;
1471
1472         mutex_unlock(&server->srv_mutex);
1473
1474         if (rc < 0) {
1475                 cifs_delete_mid(midQ);
1476                 return rc;
1477         }
1478
1479         /* Wait for a reply - allow signals to interrupt. */
1480         rc = wait_event_interruptible(server->response_q,
1481                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1482                 ((server->tcpStatus != CifsGood) &&
1483                  (server->tcpStatus != CifsNew)));
1484
1485         /* Were we interrupted by a signal ? */
1486         if ((rc == -ERESTARTSYS) &&
1487                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1488                 ((server->tcpStatus == CifsGood) ||
1489                  (server->tcpStatus == CifsNew))) {
1490
1491                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1492                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1493                            blocking lock to return. */
1494                         rc = send_cancel(server, &rqst, midQ);
1495                         if (rc) {
1496                                 cifs_delete_mid(midQ);
1497                                 return rc;
1498                         }
1499                 } else {
1500                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1501                            to cause the blocking lock to return. */
1502
1503                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1504
1505                         /* If we get -ENOLCK back the lock may have
1506                            already been removed. Don't exit in this case. */
1507                         if (rc && rc != -ENOLCK) {
1508                                 cifs_delete_mid(midQ);
1509                                 return rc;
1510                         }
1511                 }
1512
1513                 rc = wait_for_response(server, midQ);
1514                 if (rc) {
1515                         send_cancel(server, &rqst, midQ);
1516                         spin_lock(&GlobalMid_Lock);
1517                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1518                                 /* no longer considered to be "in-flight" */
1519                                 midQ->callback = DeleteMidQEntry;
1520                                 spin_unlock(&GlobalMid_Lock);
1521                                 return rc;
1522                         }
1523                         spin_unlock(&GlobalMid_Lock);
1524                 }
1525
1526                 /* We got the response - restart system call. */
1527                 rstart = 1;
1528         }
1529
1530         rc = cifs_sync_mid_result(midQ, server);
1531         if (rc != 0)
1532                 return rc;
1533
1534         /* rcvd frame is ok */
1535         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1536                 rc = -EIO;
1537                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1538                 goto out;
1539         }
1540
1541         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1542         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1543         rc = cifs_check_receive(midQ, server, 0);
1544 out:
1545         cifs_delete_mid(midQ);
1546         if (rstart && rc == -EACCES)
1547                 return -ERESTARTSYS;
1548         return rc;
1549 }