2 * Copyright (C) 2017, Microsoft Corporation.
4 * Author(s): Long Li <longli@microsoft.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
14 * the GNU General Public License for more details.
16 #include <linux/module.h>
17 #include <linux/highmem.h>
18 #include "smbdirect.h"
19 #include "cifs_debug.h"
20 #include "cifsproto.h"
21 #include "smb2proto.h"
23 static struct smbd_response *get_empty_queue_buffer(
24 struct smbd_connection *info);
25 static struct smbd_response *get_receive_buffer(
26 struct smbd_connection *info);
27 static void put_receive_buffer(
28 struct smbd_connection *info,
29 struct smbd_response *response);
30 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
31 static void destroy_receive_buffers(struct smbd_connection *info);
33 static void put_empty_packet(
34 struct smbd_connection *info, struct smbd_response *response);
35 static void enqueue_reassembly(
36 struct smbd_connection *info,
37 struct smbd_response *response, int data_length);
38 static struct smbd_response *_get_first_reassembly(
39 struct smbd_connection *info);
41 static int smbd_post_recv(
42 struct smbd_connection *info,
43 struct smbd_response *response);
45 static int smbd_post_send_empty(struct smbd_connection *info);
46 static int smbd_post_send_data(
47 struct smbd_connection *info,
48 struct kvec *iov, int n_vec, int remaining_data_length);
49 static int smbd_post_send_page(struct smbd_connection *info,
50 struct page *page, unsigned long offset,
51 size_t size, int remaining_data_length);
53 static void destroy_mr_list(struct smbd_connection *info);
54 static int allocate_mr_list(struct smbd_connection *info);
56 /* SMBD version number */
57 #define SMBD_V1 0x0100
59 /* Port numbers for SMBD transport */
61 #define SMBD_PORT 5445
63 /* Address lookup and resolve timeout in ms */
64 #define RDMA_RESOLVE_TIMEOUT 5000
66 /* SMBD negotiation timeout in seconds */
67 #define SMBD_NEGOTIATE_TIMEOUT 120
69 /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
70 #define SMBD_MIN_RECEIVE_SIZE 128
71 #define SMBD_MIN_FRAGMENTED_SIZE 131072
74 * Default maximum number of RDMA read/write outstanding on this connection
75 * This value is possibly decreased during QP creation on hardware limit
77 #define SMBD_CM_RESPONDER_RESOURCES 32
79 /* Maximum number of retries on data transfer operations */
80 #define SMBD_CM_RETRY 6
81 /* No need to retry on Receiver Not Ready since SMBD manages credits */
82 #define SMBD_CM_RNR_RETRY 0
85 * User configurable initial values per SMBD transport connection
86 * as defined in [MS-SMBD] 3.1.1.1
87 * Those may change after a SMBD negotiation
89 /* The local peer's maximum number of credits to grant to the peer */
90 int smbd_receive_credit_max = 255;
92 /* The remote peer's credit request of local peer */
93 int smbd_send_credit_target = 255;
95 /* The maximum single message size can be sent to remote peer */
96 int smbd_max_send_size = 1364;
98 /* The maximum fragmented upper-layer payload receive size supported */
99 int smbd_max_fragmented_recv_size = 1024 * 1024;
101 /* The maximum single-message size which can be received */
102 int smbd_max_receive_size = 8192;
104 /* The timeout to initiate send of a keepalive message on idle */
105 int smbd_keep_alive_interval = 120;
108 * User configurable initial values for RDMA transport
109 * The actual values used may be lower and are limited to hardware capabilities
111 /* Default maximum number of SGEs in a RDMA write/read */
112 int smbd_max_frmr_depth = 2048;
114 /* If payload is less than this byte, use RDMA send/recv not read/write */
115 int rdma_readwrite_threshold = 4096;
117 /* Transport logging functions
118 * Logging are defined as classes. They can be OR'ed to define the actual
119 * logging level via module parameter smbd_logging_class
120 * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and
123 #define LOG_OUTGOING 0x1
124 #define LOG_INCOMING 0x2
126 #define LOG_WRITE 0x8
127 #define LOG_RDMA_SEND 0x10
128 #define LOG_RDMA_RECV 0x20
129 #define LOG_KEEP_ALIVE 0x40
130 #define LOG_RDMA_EVENT 0x80
131 #define LOG_RDMA_MR 0x100
132 static unsigned int smbd_logging_class;
133 module_param(smbd_logging_class, uint, 0644);
134 MODULE_PARM_DESC(smbd_logging_class,
135 "Logging class for SMBD transport 0x0 to 0x100");
139 static unsigned int smbd_logging_level = ERR;
140 module_param(smbd_logging_level, uint, 0644);
141 MODULE_PARM_DESC(smbd_logging_level,
142 "Logging level for SMBD transport, 0 (default): error, 1: info");
144 #define log_rdma(level, class, fmt, args...) \
146 if (level <= smbd_logging_level || class & smbd_logging_class) \
147 cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\
150 #define log_outgoing(level, fmt, args...) \
151 log_rdma(level, LOG_OUTGOING, fmt, ##args)
152 #define log_incoming(level, fmt, args...) \
153 log_rdma(level, LOG_INCOMING, fmt, ##args)
154 #define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args)
155 #define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args)
156 #define log_rdma_send(level, fmt, args...) \
157 log_rdma(level, LOG_RDMA_SEND, fmt, ##args)
158 #define log_rdma_recv(level, fmt, args...) \
159 log_rdma(level, LOG_RDMA_RECV, fmt, ##args)
160 #define log_keep_alive(level, fmt, args...) \
161 log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args)
162 #define log_rdma_event(level, fmt, args...) \
163 log_rdma(level, LOG_RDMA_EVENT, fmt, ##args)
164 #define log_rdma_mr(level, fmt, args...) \
165 log_rdma(level, LOG_RDMA_MR, fmt, ##args)
167 static void smbd_disconnect_rdma_work(struct work_struct *work)
169 struct smbd_connection *info =
170 container_of(work, struct smbd_connection, disconnect_work);
172 if (info->transport_status == SMBD_CONNECTED) {
173 info->transport_status = SMBD_DISCONNECTING;
174 rdma_disconnect(info->id);
178 static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
180 queue_work(info->workqueue, &info->disconnect_work);
183 /* Upcall from RDMA CM */
184 static int smbd_conn_upcall(
185 struct rdma_cm_id *id, struct rdma_cm_event *event)
187 struct smbd_connection *info = id->context;
189 log_rdma_event(INFO, "event=%d status=%d\n",
190 event->event, event->status);
192 switch (event->event) {
193 case RDMA_CM_EVENT_ADDR_RESOLVED:
194 case RDMA_CM_EVENT_ROUTE_RESOLVED:
196 complete(&info->ri_done);
199 case RDMA_CM_EVENT_ADDR_ERROR:
200 info->ri_rc = -EHOSTUNREACH;
201 complete(&info->ri_done);
204 case RDMA_CM_EVENT_ROUTE_ERROR:
205 info->ri_rc = -ENETUNREACH;
206 complete(&info->ri_done);
209 case RDMA_CM_EVENT_ESTABLISHED:
210 log_rdma_event(INFO, "connected event=%d\n", event->event);
211 info->transport_status = SMBD_CONNECTED;
212 wake_up_interruptible(&info->conn_wait);
215 case RDMA_CM_EVENT_CONNECT_ERROR:
216 case RDMA_CM_EVENT_UNREACHABLE:
217 case RDMA_CM_EVENT_REJECTED:
218 log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
219 info->transport_status = SMBD_DISCONNECTED;
220 wake_up_interruptible(&info->conn_wait);
223 case RDMA_CM_EVENT_DEVICE_REMOVAL:
224 case RDMA_CM_EVENT_DISCONNECTED:
225 /* This happenes when we fail the negotiation */
226 if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
227 info->transport_status = SMBD_DISCONNECTED;
228 wake_up(&info->conn_wait);
232 info->transport_status = SMBD_DISCONNECTED;
233 wake_up_interruptible(&info->disconn_wait);
234 wake_up_interruptible(&info->wait_reassembly_queue);
235 wake_up_interruptible_all(&info->wait_send_queue);
245 /* Upcall from RDMA QP */
247 smbd_qp_async_error_upcall(struct ib_event *event, void *context)
249 struct smbd_connection *info = context;
251 log_rdma_event(ERR, "%s on device %s info %p\n",
252 ib_event_msg(event->event), event->device->name, info);
254 switch (event->event) {
255 case IB_EVENT_CQ_ERR:
256 case IB_EVENT_QP_FATAL:
257 smbd_disconnect_rdma_connection(info);
264 static inline void *smbd_request_payload(struct smbd_request *request)
266 return (void *)request->packet;
269 static inline void *smbd_response_payload(struct smbd_response *response)
271 return (void *)response->packet;
274 /* Called when a RDMA send is done */
275 static void send_done(struct ib_cq *cq, struct ib_wc *wc)
278 struct smbd_request *request =
279 container_of(wc->wr_cqe, struct smbd_request, cqe);
281 log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n",
282 request, wc->status);
284 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
285 log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
286 wc->status, wc->opcode);
287 smbd_disconnect_rdma_connection(request->info);
290 for (i = 0; i < request->num_sge; i++)
291 ib_dma_unmap_single(request->info->id->device,
292 request->sge[i].addr,
293 request->sge[i].length,
296 if (request->has_payload) {
297 if (atomic_dec_and_test(&request->info->send_payload_pending))
298 wake_up(&request->info->wait_send_payload_pending);
300 if (atomic_dec_and_test(&request->info->send_pending))
301 wake_up(&request->info->wait_send_pending);
304 mempool_free(request, request->info->request_mempool);
307 static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
309 log_rdma_event(INFO, "resp message min_version %u max_version %u "
310 "negotiated_version %u credits_requested %u "
311 "credits_granted %u status %u max_readwrite_size %u "
312 "preferred_send_size %u max_receive_size %u "
313 "max_fragmented_size %u\n",
314 resp->min_version, resp->max_version, resp->negotiated_version,
315 resp->credits_requested, resp->credits_granted, resp->status,
316 resp->max_readwrite_size, resp->preferred_send_size,
317 resp->max_receive_size, resp->max_fragmented_size);
321 * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
322 * response, packet_length: the negotiation response message
323 * return value: true if negotiation is a success, false if failed
325 static bool process_negotiation_response(
326 struct smbd_response *response, int packet_length)
328 struct smbd_connection *info = response->info;
329 struct smbd_negotiate_resp *packet = smbd_response_payload(response);
331 if (packet_length < sizeof(struct smbd_negotiate_resp)) {
333 "error: packet_length=%d\n", packet_length);
337 if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
338 log_rdma_event(ERR, "error: negotiated_version=%x\n",
339 le16_to_cpu(packet->negotiated_version));
342 info->protocol = le16_to_cpu(packet->negotiated_version);
344 if (packet->credits_requested == 0) {
345 log_rdma_event(ERR, "error: credits_requested==0\n");
348 info->receive_credit_target = le16_to_cpu(packet->credits_requested);
350 if (packet->credits_granted == 0) {
351 log_rdma_event(ERR, "error: credits_granted==0\n");
354 atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
356 atomic_set(&info->receive_credits, 0);
358 if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
359 log_rdma_event(ERR, "error: preferred_send_size=%d\n",
360 le32_to_cpu(packet->preferred_send_size));
363 info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
365 if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
366 log_rdma_event(ERR, "error: max_receive_size=%d\n",
367 le32_to_cpu(packet->max_receive_size));
370 info->max_send_size = min_t(int, info->max_send_size,
371 le32_to_cpu(packet->max_receive_size));
373 if (le32_to_cpu(packet->max_fragmented_size) <
374 SMBD_MIN_FRAGMENTED_SIZE) {
375 log_rdma_event(ERR, "error: max_fragmented_size=%d\n",
376 le32_to_cpu(packet->max_fragmented_size));
379 info->max_fragmented_send_size =
380 le32_to_cpu(packet->max_fragmented_size);
381 info->rdma_readwrite_threshold =
382 rdma_readwrite_threshold > info->max_fragmented_send_size ?
383 info->max_fragmented_send_size :
384 rdma_readwrite_threshold;
387 info->max_readwrite_size = min_t(u32,
388 le32_to_cpu(packet->max_readwrite_size),
389 info->max_frmr_depth * PAGE_SIZE);
390 info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
396 * Check and schedule to send an immediate packet
397 * This is used to extend credtis to remote peer to keep the transport busy
399 static void check_and_send_immediate(struct smbd_connection *info)
401 if (info->transport_status != SMBD_CONNECTED)
404 info->send_immediate = true;
407 * Promptly send a packet if our peer is running low on receive
410 if (atomic_read(&info->receive_credits) <
411 info->receive_credit_target - 1)
413 info->workqueue, &info->send_immediate_work, 0);
416 static void smbd_post_send_credits(struct work_struct *work)
419 int use_receive_queue = 1;
421 struct smbd_response *response;
422 struct smbd_connection *info =
423 container_of(work, struct smbd_connection,
424 post_send_credits_work);
426 if (info->transport_status != SMBD_CONNECTED) {
427 wake_up(&info->wait_receive_queues);
431 if (info->receive_credit_target >
432 atomic_read(&info->receive_credits)) {
434 if (use_receive_queue)
435 response = get_receive_buffer(info);
437 response = get_empty_queue_buffer(info);
439 /* now switch to emtpy packet queue */
440 if (use_receive_queue) {
441 use_receive_queue = 0;
447 response->type = SMBD_TRANSFER_DATA;
448 response->first_segment = false;
449 rc = smbd_post_recv(info, response);
452 "post_recv failed rc=%d\n", rc);
453 put_receive_buffer(info, response);
461 spin_lock(&info->lock_new_credits_offered);
462 info->new_credits_offered += ret;
463 spin_unlock(&info->lock_new_credits_offered);
465 atomic_add(ret, &info->receive_credits);
467 /* Check if we can post new receive and grant credits to peer */
468 check_and_send_immediate(info);
471 static void smbd_recv_done_work(struct work_struct *work)
473 struct smbd_connection *info =
474 container_of(work, struct smbd_connection, recv_done_work);
477 * We may have new send credits granted from remote peer
478 * If any sender is blcoked on lack of credets, unblock it
480 if (atomic_read(&info->send_credits))
481 wake_up_interruptible(&info->wait_send_queue);
484 * Check if we need to send something to remote peer to
485 * grant more credits or respond to KEEP_ALIVE packet
487 check_and_send_immediate(info);
490 /* Called from softirq, when recv is done */
491 static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
493 struct smbd_data_transfer *data_transfer;
494 struct smbd_response *response =
495 container_of(wc->wr_cqe, struct smbd_response, cqe);
496 struct smbd_connection *info = response->info;
499 log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d "
500 "byte_len=%d pkey_index=%x\n",
501 response, response->type, wc->status, wc->opcode,
502 wc->byte_len, wc->pkey_index);
504 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
505 log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
506 wc->status, wc->opcode);
507 smbd_disconnect_rdma_connection(info);
511 ib_dma_sync_single_for_cpu(
514 response->sge.length,
517 switch (response->type) {
518 /* SMBD negotiation response */
519 case SMBD_NEGOTIATE_RESP:
520 dump_smbd_negotiate_resp(smbd_response_payload(response));
521 info->full_packet_received = true;
522 info->negotiate_done =
523 process_negotiation_response(response, wc->byte_len);
524 complete(&info->negotiate_completion);
527 /* SMBD data transfer packet */
528 case SMBD_TRANSFER_DATA:
529 data_transfer = smbd_response_payload(response);
530 data_length = le32_to_cpu(data_transfer->data_length);
533 * If this is a packet with data playload place the data in
534 * reassembly queue and wake up the reading thread
537 if (info->full_packet_received)
538 response->first_segment = true;
540 if (le32_to_cpu(data_transfer->remaining_data_length))
541 info->full_packet_received = false;
543 info->full_packet_received = true;
550 put_empty_packet(info, response);
553 wake_up_interruptible(&info->wait_reassembly_queue);
555 atomic_dec(&info->receive_credits);
556 info->receive_credit_target =
557 le16_to_cpu(data_transfer->credits_requested);
558 atomic_add(le16_to_cpu(data_transfer->credits_granted),
559 &info->send_credits);
561 log_incoming(INFO, "data flags %d data_offset %d "
562 "data_length %d remaining_data_length %d\n",
563 le16_to_cpu(data_transfer->flags),
564 le32_to_cpu(data_transfer->data_offset),
565 le32_to_cpu(data_transfer->data_length),
566 le32_to_cpu(data_transfer->remaining_data_length));
568 /* Send a KEEP_ALIVE response right away if requested */
569 info->keep_alive_requested = KEEP_ALIVE_NONE;
570 if (le16_to_cpu(data_transfer->flags) &
571 SMB_DIRECT_RESPONSE_REQUESTED) {
572 info->keep_alive_requested = KEEP_ALIVE_PENDING;
575 queue_work(info->workqueue, &info->recv_done_work);
580 "unexpected response type=%d\n", response->type);
584 put_receive_buffer(info, response);
587 static struct rdma_cm_id *smbd_create_id(
588 struct smbd_connection *info,
589 struct sockaddr *dstaddr, int port)
591 struct rdma_cm_id *id;
595 id = rdma_create_id(&init_net, smbd_conn_upcall, info,
596 RDMA_PS_TCP, IB_QPT_RC);
599 log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc);
603 if (dstaddr->sa_family == AF_INET6)
604 sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port;
606 sport = &((struct sockaddr_in *)dstaddr)->sin_port;
608 *sport = htons(port);
610 init_completion(&info->ri_done);
611 info->ri_rc = -ETIMEDOUT;
613 rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
614 RDMA_RESOLVE_TIMEOUT);
616 log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
619 wait_for_completion_interruptible_timeout(
620 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
623 log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
627 info->ri_rc = -ETIMEDOUT;
628 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
630 log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
633 wait_for_completion_interruptible_timeout(
634 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
637 log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
649 * Test if FRWR (Fast Registration Work Requests) is supported on the device
650 * This implementation requries FRWR on RDMA read/write
651 * return value: true if it is supported
653 static bool frwr_is_supported(struct ib_device_attr *attrs)
655 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
657 if (attrs->max_fast_reg_page_list_len == 0)
662 static int smbd_ia_open(
663 struct smbd_connection *info,
664 struct sockaddr *dstaddr, int port)
668 info->id = smbd_create_id(info, dstaddr, port);
669 if (IS_ERR(info->id)) {
670 rc = PTR_ERR(info->id);
674 if (!frwr_is_supported(&info->id->device->attrs)) {
676 "Fast Registration Work Requests "
677 "(FRWR) is not supported\n");
679 "Device capability flags = %llx "
680 "max_fast_reg_page_list_len = %u\n",
681 info->id->device->attrs.device_cap_flags,
682 info->id->device->attrs.max_fast_reg_page_list_len);
683 rc = -EPROTONOSUPPORT;
686 info->max_frmr_depth = min_t(int,
688 info->id->device->attrs.max_fast_reg_page_list_len);
689 info->mr_type = IB_MR_TYPE_MEM_REG;
690 if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
691 info->mr_type = IB_MR_TYPE_SG_GAPS;
693 info->pd = ib_alloc_pd(info->id->device, 0);
694 if (IS_ERR(info->pd)) {
695 rc = PTR_ERR(info->pd);
696 log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
703 rdma_destroy_id(info->id);
711 * Send a negotiation request message to the peer
712 * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
713 * After negotiation, the transport is connected and ready for
714 * carrying upper layer SMB payload
716 static int smbd_post_send_negotiate_req(struct smbd_connection *info)
718 struct ib_send_wr send_wr;
720 struct smbd_request *request;
721 struct smbd_negotiate_req *packet;
723 request = mempool_alloc(info->request_mempool, GFP_KERNEL);
727 request->info = info;
729 packet = smbd_request_payload(request);
730 packet->min_version = cpu_to_le16(SMBD_V1);
731 packet->max_version = cpu_to_le16(SMBD_V1);
732 packet->reserved = 0;
733 packet->credits_requested = cpu_to_le16(info->send_credit_target);
734 packet->preferred_send_size = cpu_to_le32(info->max_send_size);
735 packet->max_receive_size = cpu_to_le32(info->max_receive_size);
736 packet->max_fragmented_size =
737 cpu_to_le32(info->max_fragmented_recv_size);
739 request->num_sge = 1;
740 request->sge[0].addr = ib_dma_map_single(
741 info->id->device, (void *)packet,
742 sizeof(*packet), DMA_TO_DEVICE);
743 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
745 goto dma_mapping_failed;
748 request->sge[0].length = sizeof(*packet);
749 request->sge[0].lkey = info->pd->local_dma_lkey;
751 ib_dma_sync_single_for_device(
752 info->id->device, request->sge[0].addr,
753 request->sge[0].length, DMA_TO_DEVICE);
755 request->cqe.done = send_done;
758 send_wr.wr_cqe = &request->cqe;
759 send_wr.sg_list = request->sge;
760 send_wr.num_sge = request->num_sge;
761 send_wr.opcode = IB_WR_SEND;
762 send_wr.send_flags = IB_SEND_SIGNALED;
764 log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n",
765 request->sge[0].addr,
766 request->sge[0].length, request->sge[0].lkey);
768 request->has_payload = false;
769 atomic_inc(&info->send_pending);
770 rc = ib_post_send(info->id->qp, &send_wr, NULL);
774 /* if we reach here, post send failed */
775 log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
776 atomic_dec(&info->send_pending);
777 ib_dma_unmap_single(info->id->device, request->sge[0].addr,
778 request->sge[0].length, DMA_TO_DEVICE);
780 smbd_disconnect_rdma_connection(info);
783 mempool_free(request, info->request_mempool);
788 * Extend the credits to remote peer
789 * This implements [MS-SMBD] 3.1.5.9
790 * The idea is that we should extend credits to remote peer as quickly as
791 * it's allowed, to maintain data flow. We allocate as much receive
792 * buffer as possible, and extend the receive credits to remote peer
793 * return value: the new credtis being granted.
795 static int manage_credits_prior_sending(struct smbd_connection *info)
799 spin_lock(&info->lock_new_credits_offered);
800 new_credits = info->new_credits_offered;
801 info->new_credits_offered = 0;
802 spin_unlock(&info->lock_new_credits_offered);
808 * Check if we need to send a KEEP_ALIVE message
809 * The idle connection timer triggers a KEEP_ALIVE message when expires
810 * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
813 * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
816 static int manage_keep_alive_before_sending(struct smbd_connection *info)
818 if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
819 info->keep_alive_requested = KEEP_ALIVE_SENT;
826 * Build and prepare the SMBD packet header
827 * This function waits for avaialbe send credits and build a SMBD packet
828 * header. The caller then optional append payload to the packet after
831 * size: the size of the payload
832 * remaining_data_length: remaining data to send if this is part of a
835 * request_out: the request allocated from this function
836 * return values: 0 on success, otherwise actual error code returned
838 static int smbd_create_header(struct smbd_connection *info,
839 int size, int remaining_data_length,
840 struct smbd_request **request_out)
842 struct smbd_request *request;
843 struct smbd_data_transfer *packet;
847 /* Wait for send credits. A SMBD packet needs one credit */
848 rc = wait_event_interruptible(info->wait_send_queue,
849 atomic_read(&info->send_credits) > 0 ||
850 info->transport_status != SMBD_CONNECTED);
854 if (info->transport_status != SMBD_CONNECTED) {
855 log_outgoing(ERR, "disconnected not sending\n");
858 atomic_dec(&info->send_credits);
860 request = mempool_alloc(info->request_mempool, GFP_KERNEL);
866 request->info = info;
868 /* Fill in the packet header */
869 packet = smbd_request_payload(request);
870 packet->credits_requested = cpu_to_le16(info->send_credit_target);
871 packet->credits_granted =
872 cpu_to_le16(manage_credits_prior_sending(info));
873 info->send_immediate = false;
876 if (manage_keep_alive_before_sending(info))
877 packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
879 packet->reserved = 0;
881 packet->data_offset = 0;
883 packet->data_offset = cpu_to_le32(24);
884 packet->data_length = cpu_to_le32(size);
885 packet->remaining_data_length = cpu_to_le32(remaining_data_length);
888 log_outgoing(INFO, "credits_requested=%d credits_granted=%d "
889 "data_offset=%d data_length=%d remaining_data_length=%d\n",
890 le16_to_cpu(packet->credits_requested),
891 le16_to_cpu(packet->credits_granted),
892 le32_to_cpu(packet->data_offset),
893 le32_to_cpu(packet->data_length),
894 le32_to_cpu(packet->remaining_data_length));
896 /* Map the packet to DMA */
897 header_length = sizeof(struct smbd_data_transfer);
898 /* If this is a packet without payload, don't send padding */
900 header_length = offsetof(struct smbd_data_transfer, padding);
902 request->num_sge = 1;
903 request->sge[0].addr = ib_dma_map_single(info->id->device,
907 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
908 mempool_free(request, info->request_mempool);
913 request->sge[0].length = header_length;
914 request->sge[0].lkey = info->pd->local_dma_lkey;
916 *request_out = request;
920 atomic_inc(&info->send_credits);
924 static void smbd_destroy_header(struct smbd_connection *info,
925 struct smbd_request *request)
928 ib_dma_unmap_single(info->id->device,
929 request->sge[0].addr,
930 request->sge[0].length,
932 mempool_free(request, info->request_mempool);
933 atomic_inc(&info->send_credits);
936 /* Post the send request */
937 static int smbd_post_send(struct smbd_connection *info,
938 struct smbd_request *request, bool has_payload)
940 struct ib_send_wr send_wr;
943 for (i = 0; i < request->num_sge; i++) {
945 "rdma_request sge[%d] addr=%llu length=%u\n",
946 i, request->sge[i].addr, request->sge[i].length);
947 ib_dma_sync_single_for_device(
949 request->sge[i].addr,
950 request->sge[i].length,
954 request->cqe.done = send_done;
957 send_wr.wr_cqe = &request->cqe;
958 send_wr.sg_list = request->sge;
959 send_wr.num_sge = request->num_sge;
960 send_wr.opcode = IB_WR_SEND;
961 send_wr.send_flags = IB_SEND_SIGNALED;
964 request->has_payload = true;
965 atomic_inc(&info->send_payload_pending);
967 request->has_payload = false;
968 atomic_inc(&info->send_pending);
971 rc = ib_post_send(info->id->qp, &send_wr, NULL);
973 log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
975 if (atomic_dec_and_test(&info->send_payload_pending))
976 wake_up(&info->wait_send_payload_pending);
978 if (atomic_dec_and_test(&info->send_pending))
979 wake_up(&info->wait_send_pending);
981 smbd_disconnect_rdma_connection(info);
983 /* Reset timer for idle connection after packet is sent */
984 mod_delayed_work(info->workqueue, &info->idle_timer_work,
985 info->keep_alive_interval*HZ);
990 static int smbd_post_send_sgl(struct smbd_connection *info,
991 struct scatterlist *sgl, int data_length, int remaining_data_length)
995 struct smbd_request *request;
996 struct scatterlist *sg;
998 rc = smbd_create_header(
999 info, data_length, remaining_data_length, &request);
1003 num_sgs = sgl ? sg_nents(sgl) : 0;
1004 for_each_sg(sgl, sg, num_sgs, i) {
1005 request->sge[i+1].addr =
1006 ib_dma_map_page(info->id->device, sg_page(sg),
1007 sg->offset, sg->length, DMA_BIDIRECTIONAL);
1008 if (ib_dma_mapping_error(
1009 info->id->device, request->sge[i+1].addr)) {
1011 request->sge[i+1].addr = 0;
1012 goto dma_mapping_failure;
1014 request->sge[i+1].length = sg->length;
1015 request->sge[i+1].lkey = info->pd->local_dma_lkey;
1019 rc = smbd_post_send(info, request, data_length);
1023 dma_mapping_failure:
1024 for (i = 1; i < request->num_sge; i++)
1025 if (request->sge[i].addr)
1026 ib_dma_unmap_single(info->id->device,
1027 request->sge[i].addr,
1028 request->sge[i].length,
1030 smbd_destroy_header(info, request);
1036 * page: the page to send
1037 * offset: offset in the page to send
1038 * size: length in the page to send
1039 * remaining_data_length: remaining data to send in this payload
1041 static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
1042 unsigned long offset, size_t size, int remaining_data_length)
1044 struct scatterlist sgl;
1046 sg_init_table(&sgl, 1);
1047 sg_set_page(&sgl, page, size, offset);
1049 return smbd_post_send_sgl(info, &sgl, size, remaining_data_length);
1053 * Send an empty message
1054 * Empty message is used to extend credits to peer to for keep live
1055 * while there is no upper layer payload to send at the time
1057 static int smbd_post_send_empty(struct smbd_connection *info)
1059 info->count_send_empty++;
1060 return smbd_post_send_sgl(info, NULL, 0, 0);
1064 * Send a data buffer
1065 * iov: the iov array describing the data buffers
1066 * n_vec: number of iov array
1067 * remaining_data_length: remaining data to send following this packet
1068 * in segmented SMBD packet
1070 static int smbd_post_send_data(
1071 struct smbd_connection *info, struct kvec *iov, int n_vec,
1072 int remaining_data_length)
1075 u32 data_length = 0;
1076 struct scatterlist sgl[SMBDIRECT_MAX_SGE];
1078 if (n_vec > SMBDIRECT_MAX_SGE) {
1079 cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
1083 sg_init_table(sgl, n_vec);
1084 for (i = 0; i < n_vec; i++) {
1085 data_length += iov[i].iov_len;
1086 sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len);
1089 return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length);
1093 * Post a receive request to the transport
1094 * The remote peer can only send data when a receive request is posted
1095 * The interaction is controlled by send/receive credit system
1097 static int smbd_post_recv(
1098 struct smbd_connection *info, struct smbd_response *response)
1100 struct ib_recv_wr recv_wr;
1103 response->sge.addr = ib_dma_map_single(
1104 info->id->device, response->packet,
1105 info->max_receive_size, DMA_FROM_DEVICE);
1106 if (ib_dma_mapping_error(info->id->device, response->sge.addr))
1109 response->sge.length = info->max_receive_size;
1110 response->sge.lkey = info->pd->local_dma_lkey;
1112 response->cqe.done = recv_done;
1114 recv_wr.wr_cqe = &response->cqe;
1115 recv_wr.next = NULL;
1116 recv_wr.sg_list = &response->sge;
1117 recv_wr.num_sge = 1;
1119 rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
1121 ib_dma_unmap_single(info->id->device, response->sge.addr,
1122 response->sge.length, DMA_FROM_DEVICE);
1123 smbd_disconnect_rdma_connection(info);
1124 log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
1130 /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
1131 static int smbd_negotiate(struct smbd_connection *info)
1134 struct smbd_response *response = get_receive_buffer(info);
1136 response->type = SMBD_NEGOTIATE_RESP;
1137 rc = smbd_post_recv(info, response);
1138 log_rdma_event(INFO,
1139 "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x "
1141 rc, response->sge.addr,
1142 response->sge.length, response->sge.lkey);
1146 init_completion(&info->negotiate_completion);
1147 info->negotiate_done = false;
1148 rc = smbd_post_send_negotiate_req(info);
1152 rc = wait_for_completion_interruptible_timeout(
1153 &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
1154 log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
1156 if (info->negotiate_done)
1161 else if (rc == -ERESTARTSYS)
1169 static void put_empty_packet(
1170 struct smbd_connection *info, struct smbd_response *response)
1172 spin_lock(&info->empty_packet_queue_lock);
1173 list_add_tail(&response->list, &info->empty_packet_queue);
1174 info->count_empty_packet_queue++;
1175 spin_unlock(&info->empty_packet_queue_lock);
1177 queue_work(info->workqueue, &info->post_send_credits_work);
1181 * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
1182 * This is a queue for reassembling upper layer payload and present to upper
1183 * layer. All the inncoming payload go to the reassembly queue, regardless of
1184 * if reassembly is required. The uuper layer code reads from the queue for all
1185 * incoming payloads.
1186 * Put a received packet to the reassembly queue
1187 * response: the packet received
1188 * data_length: the size of payload in this packet
1190 static void enqueue_reassembly(
1191 struct smbd_connection *info,
1192 struct smbd_response *response,
1195 spin_lock(&info->reassembly_queue_lock);
1196 list_add_tail(&response->list, &info->reassembly_queue);
1197 info->reassembly_queue_length++;
1199 * Make sure reassembly_data_length is updated after list and
1200 * reassembly_queue_length are updated. On the dequeue side
1201 * reassembly_data_length is checked without a lock to determine
1202 * if reassembly_queue_length and list is up to date
1205 info->reassembly_data_length += data_length;
1206 spin_unlock(&info->reassembly_queue_lock);
1207 info->count_reassembly_queue++;
1208 info->count_enqueue_reassembly_queue++;
1212 * Get the first entry at the front of reassembly queue
1213 * Caller is responsible for locking
1214 * return value: the first entry if any, NULL if queue is empty
1216 static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
1218 struct smbd_response *ret = NULL;
1220 if (!list_empty(&info->reassembly_queue)) {
1221 ret = list_first_entry(
1222 &info->reassembly_queue,
1223 struct smbd_response, list);
1228 static struct smbd_response *get_empty_queue_buffer(
1229 struct smbd_connection *info)
1231 struct smbd_response *ret = NULL;
1232 unsigned long flags;
1234 spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
1235 if (!list_empty(&info->empty_packet_queue)) {
1236 ret = list_first_entry(
1237 &info->empty_packet_queue,
1238 struct smbd_response, list);
1239 list_del(&ret->list);
1240 info->count_empty_packet_queue--;
1242 spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
1248 * Get a receive buffer
1249 * For each remote send, we need to post a receive. The receive buffers are
1250 * pre-allocated in advance.
1251 * return value: the receive buffer, NULL if none is available
1253 static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
1255 struct smbd_response *ret = NULL;
1256 unsigned long flags;
1258 spin_lock_irqsave(&info->receive_queue_lock, flags);
1259 if (!list_empty(&info->receive_queue)) {
1260 ret = list_first_entry(
1261 &info->receive_queue,
1262 struct smbd_response, list);
1263 list_del(&ret->list);
1264 info->count_receive_queue--;
1265 info->count_get_receive_buffer++;
1267 spin_unlock_irqrestore(&info->receive_queue_lock, flags);
1273 * Return a receive buffer
1274 * Upon returning of a receive buffer, we can post new receive and extend
1275 * more receive credits to remote peer. This is done immediately after a
1276 * receive buffer is returned.
1278 static void put_receive_buffer(
1279 struct smbd_connection *info, struct smbd_response *response)
1281 unsigned long flags;
1283 ib_dma_unmap_single(info->id->device, response->sge.addr,
1284 response->sge.length, DMA_FROM_DEVICE);
1286 spin_lock_irqsave(&info->receive_queue_lock, flags);
1287 list_add_tail(&response->list, &info->receive_queue);
1288 info->count_receive_queue++;
1289 info->count_put_receive_buffer++;
1290 spin_unlock_irqrestore(&info->receive_queue_lock, flags);
1292 queue_work(info->workqueue, &info->post_send_credits_work);
1295 /* Preallocate all receive buffer on transport establishment */
1296 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
1299 struct smbd_response *response;
1301 INIT_LIST_HEAD(&info->reassembly_queue);
1302 spin_lock_init(&info->reassembly_queue_lock);
1303 info->reassembly_data_length = 0;
1304 info->reassembly_queue_length = 0;
1306 INIT_LIST_HEAD(&info->receive_queue);
1307 spin_lock_init(&info->receive_queue_lock);
1308 info->count_receive_queue = 0;
1310 INIT_LIST_HEAD(&info->empty_packet_queue);
1311 spin_lock_init(&info->empty_packet_queue_lock);
1312 info->count_empty_packet_queue = 0;
1314 init_waitqueue_head(&info->wait_receive_queues);
1316 for (i = 0; i < num_buf; i++) {
1317 response = mempool_alloc(info->response_mempool, GFP_KERNEL);
1319 goto allocate_failed;
1321 response->info = info;
1322 list_add_tail(&response->list, &info->receive_queue);
1323 info->count_receive_queue++;
1329 while (!list_empty(&info->receive_queue)) {
1330 response = list_first_entry(
1331 &info->receive_queue,
1332 struct smbd_response, list);
1333 list_del(&response->list);
1334 info->count_receive_queue--;
1336 mempool_free(response, info->response_mempool);
1341 static void destroy_receive_buffers(struct smbd_connection *info)
1343 struct smbd_response *response;
1345 while ((response = get_receive_buffer(info)))
1346 mempool_free(response, info->response_mempool);
1348 while ((response = get_empty_queue_buffer(info)))
1349 mempool_free(response, info->response_mempool);
1353 * Check and send an immediate or keep alive packet
1354 * The condition to send those packets are defined in [MS-SMBD] 3.1.1.1
1355 * Connection.KeepaliveRequested and Connection.SendImmediate
1356 * The idea is to extend credits to server as soon as it becomes available
1358 static void send_immediate_work(struct work_struct *work)
1360 struct smbd_connection *info = container_of(
1361 work, struct smbd_connection,
1362 send_immediate_work.work);
1364 if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
1365 info->send_immediate) {
1366 log_keep_alive(INFO, "send an empty message\n");
1367 smbd_post_send_empty(info);
1371 /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
1372 static void idle_connection_timer(struct work_struct *work)
1374 struct smbd_connection *info = container_of(
1375 work, struct smbd_connection,
1376 idle_timer_work.work);
1378 if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
1380 "error status info->keep_alive_requested=%d\n",
1381 info->keep_alive_requested);
1382 smbd_disconnect_rdma_connection(info);
1386 log_keep_alive(INFO, "about to send an empty idle message\n");
1387 smbd_post_send_empty(info);
1389 /* Setup the next idle timeout work */
1390 queue_delayed_work(info->workqueue, &info->idle_timer_work,
1391 info->keep_alive_interval*HZ);
1395 * Destroy the transport and related RDMA and memory resources
1396 * Need to go through all the pending counters and make sure on one is using
1397 * the transport while it is destroyed
1399 void smbd_destroy(struct TCP_Server_Info *server)
1401 struct smbd_connection *info = server->smbd_conn;
1402 struct smbd_response *response;
1403 unsigned long flags;
1406 log_rdma_event(INFO, "rdma session already destroyed\n");
1410 log_rdma_event(INFO, "destroying rdma session\n");
1411 if (info->transport_status != SMBD_DISCONNECTED) {
1412 rdma_disconnect(server->smbd_conn->id);
1413 log_rdma_event(INFO, "wait for transport being disconnected\n");
1414 wait_event_interruptible(
1416 info->transport_status == SMBD_DISCONNECTED);
1419 log_rdma_event(INFO, "destroying qp\n");
1420 ib_drain_qp(info->id->qp);
1421 rdma_destroy_qp(info->id);
1423 log_rdma_event(INFO, "cancelling idle timer\n");
1424 cancel_delayed_work_sync(&info->idle_timer_work);
1425 log_rdma_event(INFO, "cancelling send immediate work\n");
1426 cancel_delayed_work_sync(&info->send_immediate_work);
1428 log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
1429 wait_event(info->wait_send_pending,
1430 atomic_read(&info->send_pending) == 0);
1431 wait_event(info->wait_send_payload_pending,
1432 atomic_read(&info->send_payload_pending) == 0);
1434 /* It's not posssible for upper layer to get to reassembly */
1435 log_rdma_event(INFO, "drain the reassembly queue\n");
1437 spin_lock_irqsave(&info->reassembly_queue_lock, flags);
1438 response = _get_first_reassembly(info);
1440 list_del(&response->list);
1441 spin_unlock_irqrestore(
1442 &info->reassembly_queue_lock, flags);
1443 put_receive_buffer(info, response);
1445 spin_unlock_irqrestore(
1446 &info->reassembly_queue_lock, flags);
1448 info->reassembly_data_length = 0;
1450 log_rdma_event(INFO, "free receive buffers\n");
1451 wait_event(info->wait_receive_queues,
1452 info->count_receive_queue + info->count_empty_packet_queue
1453 == info->receive_credit_max);
1454 destroy_receive_buffers(info);
1457 * For performance reasons, memory registration and deregistration
1458 * are not locked by srv_mutex. It is possible some processes are
1459 * blocked on transport srv_mutex while holding memory registration.
1460 * Release the transport srv_mutex to allow them to hit the failure
1461 * path when sending data, and then release memory registartions.
1463 log_rdma_event(INFO, "freeing mr list\n");
1464 wake_up_interruptible_all(&info->wait_mr);
1465 while (atomic_read(&info->mr_used_count)) {
1466 mutex_unlock(&server->srv_mutex);
1468 mutex_lock(&server->srv_mutex);
1470 destroy_mr_list(info);
1472 ib_free_cq(info->send_cq);
1473 ib_free_cq(info->recv_cq);
1474 ib_dealloc_pd(info->pd);
1475 rdma_destroy_id(info->id);
1478 mempool_destroy(info->request_mempool);
1479 kmem_cache_destroy(info->request_cache);
1481 mempool_destroy(info->response_mempool);
1482 kmem_cache_destroy(info->response_cache);
1484 info->transport_status = SMBD_DESTROYED;
1486 destroy_workqueue(info->workqueue);
1491 * Reconnect this SMBD connection, called from upper layer
1492 * return value: 0 on success, or actual error code
1494 int smbd_reconnect(struct TCP_Server_Info *server)
1496 log_rdma_event(INFO, "reconnecting rdma session\n");
1498 if (!server->smbd_conn) {
1499 log_rdma_event(INFO, "rdma session already destroyed\n");
1504 * This is possible if transport is disconnected and we haven't received
1505 * notification from RDMA, but upper layer has detected timeout
1507 if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
1508 log_rdma_event(INFO, "disconnecting transport\n");
1509 smbd_destroy(server);
1513 log_rdma_event(INFO, "creating rdma session\n");
1514 server->smbd_conn = smbd_get_connection(
1515 server, (struct sockaddr *) &server->dstaddr);
1516 log_rdma_event(INFO, "created rdma session info=%p\n",
1519 return server->smbd_conn ? 0 : -ENOENT;
1522 static void destroy_caches_and_workqueue(struct smbd_connection *info)
1524 destroy_receive_buffers(info);
1525 destroy_workqueue(info->workqueue);
1526 mempool_destroy(info->response_mempool);
1527 kmem_cache_destroy(info->response_cache);
1528 mempool_destroy(info->request_mempool);
1529 kmem_cache_destroy(info->request_cache);
1532 #define MAX_NAME_LEN 80
1533 static int allocate_caches_and_workqueue(struct smbd_connection *info)
1535 char name[MAX_NAME_LEN];
1538 scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
1539 info->request_cache =
1542 sizeof(struct smbd_request) +
1543 sizeof(struct smbd_data_transfer),
1544 0, SLAB_HWCACHE_ALIGN, NULL);
1545 if (!info->request_cache)
1548 info->request_mempool =
1549 mempool_create(info->send_credit_target, mempool_alloc_slab,
1550 mempool_free_slab, info->request_cache);
1551 if (!info->request_mempool)
1554 scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
1555 info->response_cache =
1558 sizeof(struct smbd_response) +
1559 info->max_receive_size,
1560 0, SLAB_HWCACHE_ALIGN, NULL);
1561 if (!info->response_cache)
1564 info->response_mempool =
1565 mempool_create(info->receive_credit_max, mempool_alloc_slab,
1566 mempool_free_slab, info->response_cache);
1567 if (!info->response_mempool)
1570 scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
1571 info->workqueue = create_workqueue(name);
1572 if (!info->workqueue)
1575 rc = allocate_receive_buffers(info, info->receive_credit_max);
1577 log_rdma_event(ERR, "failed to allocate receive buffers\n");
1584 destroy_workqueue(info->workqueue);
1586 mempool_destroy(info->response_mempool);
1588 kmem_cache_destroy(info->response_cache);
1590 mempool_destroy(info->request_mempool);
1592 kmem_cache_destroy(info->request_cache);
1596 /* Create a SMBD connection, called by upper layer */
1597 static struct smbd_connection *_smbd_get_connection(
1598 struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port)
1601 struct smbd_connection *info;
1602 struct rdma_conn_param conn_param;
1603 struct ib_qp_init_attr qp_attr;
1604 struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
1605 struct ib_port_immutable port_immutable;
1608 info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
1612 info->transport_status = SMBD_CONNECTING;
1613 rc = smbd_ia_open(info, dstaddr, port);
1615 log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
1616 goto create_id_failed;
1619 if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
1620 smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
1622 "consider lowering send_credit_target = %d. "
1623 "Possible CQE overrun, device "
1624 "reporting max_cpe %d max_qp_wr %d\n",
1625 smbd_send_credit_target,
1626 info->id->device->attrs.max_cqe,
1627 info->id->device->attrs.max_qp_wr);
1631 if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
1632 smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
1634 "consider lowering receive_credit_max = %d. "
1635 "Possible CQE overrun, device "
1636 "reporting max_cpe %d max_qp_wr %d\n",
1637 smbd_receive_credit_max,
1638 info->id->device->attrs.max_cqe,
1639 info->id->device->attrs.max_qp_wr);
1643 info->receive_credit_max = smbd_receive_credit_max;
1644 info->send_credit_target = smbd_send_credit_target;
1645 info->max_send_size = smbd_max_send_size;
1646 info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
1647 info->max_receive_size = smbd_max_receive_size;
1648 info->keep_alive_interval = smbd_keep_alive_interval;
1650 if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) {
1652 "warning: device max_send_sge = %d too small\n",
1653 info->id->device->attrs.max_send_sge);
1654 log_rdma_event(ERR, "Queue Pair creation may fail\n");
1656 if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) {
1658 "warning: device max_recv_sge = %d too small\n",
1659 info->id->device->attrs.max_recv_sge);
1660 log_rdma_event(ERR, "Queue Pair creation may fail\n");
1663 info->send_cq = NULL;
1664 info->recv_cq = NULL;
1665 info->send_cq = ib_alloc_cq(info->id->device, info,
1666 info->send_credit_target, 0, IB_POLL_SOFTIRQ);
1667 if (IS_ERR(info->send_cq)) {
1668 info->send_cq = NULL;
1669 goto alloc_cq_failed;
1672 info->recv_cq = ib_alloc_cq(info->id->device, info,
1673 info->receive_credit_max, 0, IB_POLL_SOFTIRQ);
1674 if (IS_ERR(info->recv_cq)) {
1675 info->recv_cq = NULL;
1676 goto alloc_cq_failed;
1679 memset(&qp_attr, 0, sizeof(qp_attr));
1680 qp_attr.event_handler = smbd_qp_async_error_upcall;
1681 qp_attr.qp_context = info;
1682 qp_attr.cap.max_send_wr = info->send_credit_target;
1683 qp_attr.cap.max_recv_wr = info->receive_credit_max;
1684 qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SGE;
1685 qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_SGE;
1686 qp_attr.cap.max_inline_data = 0;
1687 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1688 qp_attr.qp_type = IB_QPT_RC;
1689 qp_attr.send_cq = info->send_cq;
1690 qp_attr.recv_cq = info->recv_cq;
1691 qp_attr.port_num = ~0;
1693 rc = rdma_create_qp(info->id, info->pd, &qp_attr);
1695 log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
1696 goto create_qp_failed;
1699 memset(&conn_param, 0, sizeof(conn_param));
1700 conn_param.initiator_depth = 0;
1702 conn_param.responder_resources =
1703 info->id->device->attrs.max_qp_rd_atom
1704 < SMBD_CM_RESPONDER_RESOURCES ?
1705 info->id->device->attrs.max_qp_rd_atom :
1706 SMBD_CM_RESPONDER_RESOURCES;
1707 info->responder_resources = conn_param.responder_resources;
1708 log_rdma_mr(INFO, "responder_resources=%d\n",
1709 info->responder_resources);
1711 /* Need to send IRD/ORD in private data for iWARP */
1712 info->id->device->ops.get_port_immutable(
1713 info->id->device, info->id->port_num, &port_immutable);
1714 if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
1715 ird_ord_hdr[0] = info->responder_resources;
1717 conn_param.private_data = ird_ord_hdr;
1718 conn_param.private_data_len = sizeof(ird_ord_hdr);
1720 conn_param.private_data = NULL;
1721 conn_param.private_data_len = 0;
1724 conn_param.retry_count = SMBD_CM_RETRY;
1725 conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
1726 conn_param.flow_control = 0;
1728 log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
1729 &addr_in->sin_addr, port);
1731 init_waitqueue_head(&info->conn_wait);
1732 init_waitqueue_head(&info->disconn_wait);
1733 init_waitqueue_head(&info->wait_reassembly_queue);
1734 rc = rdma_connect(info->id, &conn_param);
1736 log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
1737 goto rdma_connect_failed;
1740 wait_event_interruptible(
1741 info->conn_wait, info->transport_status != SMBD_CONNECTING);
1743 if (info->transport_status != SMBD_CONNECTED) {
1744 log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
1745 goto rdma_connect_failed;
1748 log_rdma_event(INFO, "rdma_connect connected\n");
1750 rc = allocate_caches_and_workqueue(info);
1752 log_rdma_event(ERR, "cache allocation failed\n");
1753 goto allocate_cache_failed;
1756 init_waitqueue_head(&info->wait_send_queue);
1757 INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
1758 INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work);
1759 queue_delayed_work(info->workqueue, &info->idle_timer_work,
1760 info->keep_alive_interval*HZ);
1762 init_waitqueue_head(&info->wait_send_pending);
1763 atomic_set(&info->send_pending, 0);
1765 init_waitqueue_head(&info->wait_send_payload_pending);
1766 atomic_set(&info->send_payload_pending, 0);
1768 INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
1769 INIT_WORK(&info->recv_done_work, smbd_recv_done_work);
1770 INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
1771 info->new_credits_offered = 0;
1772 spin_lock_init(&info->lock_new_credits_offered);
1774 rc = smbd_negotiate(info);
1776 log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
1777 goto negotiation_failed;
1780 rc = allocate_mr_list(info);
1782 log_rdma_mr(ERR, "memory registration allocation failed\n");
1783 goto allocate_mr_failed;
1789 /* At this point, need to a full transport shutdown */
1790 smbd_destroy(server);
1794 cancel_delayed_work_sync(&info->idle_timer_work);
1795 destroy_caches_and_workqueue(info);
1796 info->transport_status = SMBD_NEGOTIATE_FAILED;
1797 init_waitqueue_head(&info->conn_wait);
1798 rdma_disconnect(info->id);
1799 wait_event(info->conn_wait,
1800 info->transport_status == SMBD_DISCONNECTED);
1802 allocate_cache_failed:
1803 rdma_connect_failed:
1804 rdma_destroy_qp(info->id);
1809 ib_free_cq(info->send_cq);
1811 ib_free_cq(info->recv_cq);
1814 ib_dealloc_pd(info->pd);
1815 rdma_destroy_id(info->id);
1822 struct smbd_connection *smbd_get_connection(
1823 struct TCP_Server_Info *server, struct sockaddr *dstaddr)
1825 struct smbd_connection *ret;
1826 int port = SMBD_PORT;
1829 ret = _smbd_get_connection(server, dstaddr, port);
1831 /* Try SMB_PORT if SMBD_PORT doesn't work */
1832 if (!ret && port == SMBD_PORT) {
1840 * Receive data from receive reassembly queue
1841 * All the incoming data packets are placed in reassembly queue
1842 * buf: the buffer to read data into
1843 * size: the length of data to read
1844 * return value: actual data read
1845 * Note: this implementation copies the data from reassebmly queue to receive
1846 * buffers used by upper layer. This is not the optimal code path. A better way
1847 * to do it is to not have upper layer allocate its receive buffers but rather
1848 * borrow the buffer from reassembly queue, and return it after data is
1849 * consumed. But this will require more changes to upper layer code, and also
1850 * need to consider packet boundaries while they still being reassembled.
1852 static int smbd_recv_buf(struct smbd_connection *info, char *buf,
1855 struct smbd_response *response;
1856 struct smbd_data_transfer *data_transfer;
1857 int to_copy, to_read, data_read, offset;
1858 u32 data_length, remaining_data_length, data_offset;
1863 * No need to hold the reassembly queue lock all the time as we are
1864 * the only one reading from the front of the queue. The transport
1865 * may add more entries to the back of the queue at the same time
1867 log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
1868 info->reassembly_data_length);
1869 if (info->reassembly_data_length >= size) {
1871 int queue_removed = 0;
1874 * Need to make sure reassembly_data_length is read before
1875 * reading reassembly_queue_length and calling
1876 * _get_first_reassembly. This call is lock free
1877 * as we never read at the end of the queue which are being
1878 * updated in SOFTIRQ as more data is received
1881 queue_length = info->reassembly_queue_length;
1884 offset = info->first_entry_offset;
1885 while (data_read < size) {
1886 response = _get_first_reassembly(info);
1887 data_transfer = smbd_response_payload(response);
1888 data_length = le32_to_cpu(data_transfer->data_length);
1889 remaining_data_length =
1891 data_transfer->remaining_data_length);
1892 data_offset = le32_to_cpu(data_transfer->data_offset);
1895 * The upper layer expects RFC1002 length at the
1896 * beginning of the payload. Return it to indicate
1897 * the total length of the packet. This minimize the
1898 * change to upper layer packet processing logic. This
1899 * will be eventually remove when an intermediate
1900 * transport layer is added
1902 if (response->first_segment && size == 4) {
1903 unsigned int rfc1002_len =
1904 data_length + remaining_data_length;
1905 *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
1907 response->first_segment = false;
1908 log_read(INFO, "returning rfc1002 length %d\n",
1910 goto read_rfc1002_done;
1913 to_copy = min_t(int, data_length - offset, to_read);
1916 (char *)data_transfer + data_offset + offset,
1919 /* move on to the next buffer? */
1920 if (to_copy == data_length - offset) {
1923 * No need to lock if we are not at the
1927 list_del(&response->list);
1930 &info->reassembly_queue_lock);
1931 list_del(&response->list);
1933 &info->reassembly_queue_lock);
1936 info->count_reassembly_queue--;
1937 info->count_dequeue_reassembly_queue++;
1938 put_receive_buffer(info, response);
1940 log_read(INFO, "put_receive_buffer offset=0\n");
1945 data_read += to_copy;
1947 log_read(INFO, "_get_first_reassembly memcpy %d bytes "
1948 "data_transfer_length-offset=%d after that "
1949 "to_read=%d data_read=%d offset=%d\n",
1950 to_copy, data_length - offset,
1951 to_read, data_read, offset);
1954 spin_lock_irq(&info->reassembly_queue_lock);
1955 info->reassembly_data_length -= data_read;
1956 info->reassembly_queue_length -= queue_removed;
1957 spin_unlock_irq(&info->reassembly_queue_lock);
1959 info->first_entry_offset = offset;
1960 log_read(INFO, "returning to thread data_read=%d "
1961 "reassembly_data_length=%d first_entry_offset=%d\n",
1962 data_read, info->reassembly_data_length,
1963 info->first_entry_offset);
1968 log_read(INFO, "wait_event on more data\n");
1969 rc = wait_event_interruptible(
1970 info->wait_reassembly_queue,
1971 info->reassembly_data_length >= size ||
1972 info->transport_status != SMBD_CONNECTED);
1973 /* Don't return any data if interrupted */
1977 if (info->transport_status != SMBD_CONNECTED) {
1978 log_read(ERR, "disconnected\n");
1986 * Receive a page from receive reassembly queue
1987 * page: the page to read data into
1988 * to_read: the length of data to read
1989 * return value: actual data read
1991 static int smbd_recv_page(struct smbd_connection *info,
1992 struct page *page, unsigned int page_offset,
1993 unsigned int to_read)
1999 /* make sure we have the page ready for read */
2000 ret = wait_event_interruptible(
2001 info->wait_reassembly_queue,
2002 info->reassembly_data_length >= to_read ||
2003 info->transport_status != SMBD_CONNECTED);
2007 /* now we can read from reassembly queue and not sleep */
2008 page_address = kmap_atomic(page);
2009 to_address = (char *) page_address + page_offset;
2011 log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
2012 page, to_address, to_read);
2014 ret = smbd_recv_buf(info, to_address, to_read);
2015 kunmap_atomic(page_address);
2021 * Receive data from transport
2022 * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
2023 * return: total bytes read, or 0. SMB Direct will not do partial read.
2025 int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
2029 unsigned int to_read, page_offset;
2032 if (iov_iter_rw(&msg->msg_iter) == WRITE) {
2033 /* It's a bug in upper layer to get there */
2034 cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n",
2035 iov_iter_rw(&msg->msg_iter));
2040 switch (iov_iter_type(&msg->msg_iter)) {
2042 buf = msg->msg_iter.kvec->iov_base;
2043 to_read = msg->msg_iter.kvec->iov_len;
2044 rc = smbd_recv_buf(info, buf, to_read);
2048 page = msg->msg_iter.bvec->bv_page;
2049 page_offset = msg->msg_iter.bvec->bv_offset;
2050 to_read = msg->msg_iter.bvec->bv_len;
2051 rc = smbd_recv_page(info, page, page_offset, to_read);
2055 /* It's a bug in upper layer to get there */
2056 cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
2057 iov_iter_type(&msg->msg_iter));
2062 /* SMBDirect will read it all or nothing */
2064 msg->msg_iter.count = 0;
2069 * Send data to transport
2070 * Each rqst is transported as a SMBDirect payload
2071 * rqst: the data to write
2072 * return value: 0 if successfully write, otherwise error code
2074 int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
2076 struct smbd_connection *info = server->smbd_conn;
2080 unsigned int buflen, remaining_data_length;
2083 info->max_send_size - sizeof(struct smbd_data_transfer);
2087 if (info->transport_status != SMBD_CONNECTED) {
2093 * Skip the RFC1002 length defined in MS-SMB2 section 2.1
2094 * It is used only for TCP transport in the iov[0]
2095 * In future we may want to add a transport layer under protocol
2096 * layer so this will only be issued to TCP transport
2099 if (rqst->rq_iov[0].iov_len != 4) {
2100 log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
2105 * Add in the page array if there is one. The caller needs to set
2106 * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
2107 * ends at page boundary
2109 buflen = smb_rqst_len(server, rqst);
2111 if (buflen + sizeof(struct smbd_data_transfer) >
2112 info->max_fragmented_send_size) {
2113 log_write(ERR, "payload size %d > max size %d\n",
2114 buflen, info->max_fragmented_send_size);
2119 iov = &rqst->rq_iov[1];
2121 cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
2122 for (i = 0; i < rqst->rq_nvec-1; i++)
2123 dump_smb(iov[i].iov_base, iov[i].iov_len);
2125 remaining_data_length = buflen;
2127 log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
2128 "rq_tailsz=%d buflen=%d\n",
2129 rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
2130 rqst->rq_tailsz, buflen);
2132 start = i = iov[0].iov_len ? 0 : 1;
2135 buflen += iov[i].iov_len;
2136 if (buflen > max_iov_size) {
2138 remaining_data_length -=
2139 (buflen-iov[i].iov_len);
2140 log_write(INFO, "sending iov[] from start=%d "
2142 "remaining_data_length=%d\n",
2144 remaining_data_length);
2145 rc = smbd_post_send_data(
2146 info, &iov[start], i-start,
2147 remaining_data_length);
2151 /* iov[start] is too big, break it */
2152 nvecs = (buflen+max_iov_size-1)/max_iov_size;
2153 log_write(INFO, "iov[%d] iov_base=%p buflen=%d"
2154 " break to %d vectors\n",
2155 start, iov[start].iov_base,
2157 for (j = 0; j < nvecs; j++) {
2159 (char *)iov[start].iov_base +
2161 vec.iov_len = max_iov_size;
2165 max_iov_size*(nvecs-1);
2166 remaining_data_length -= vec.iov_len;
2168 "sending vec j=%d iov_base=%p"
2170 "remaining_data_length=%d\n",
2171 j, vec.iov_base, vec.iov_len,
2172 remaining_data_length);
2173 rc = smbd_post_send_data(
2175 remaining_data_length);
2180 if (i == rqst->rq_nvec-1)
2187 if (i == rqst->rq_nvec-1) {
2188 /* send out all remaining vecs */
2189 remaining_data_length -= buflen;
2191 "sending iov[] from start=%d i=%d "
2192 "nvecs=%d remaining_data_length=%d\n",
2194 remaining_data_length);
2195 rc = smbd_post_send_data(info, &iov[start],
2196 i-start, remaining_data_length);
2202 log_write(INFO, "looping i=%d buflen=%d\n", i, buflen);
2205 /* now sending pages if there are any */
2206 for (i = 0; i < rqst->rq_npages; i++) {
2207 unsigned int offset;
2209 rqst_page_get_length(rqst, i, &buflen, &offset);
2210 nvecs = (buflen + max_iov_size - 1) / max_iov_size;
2211 log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
2213 for (j = 0; j < nvecs; j++) {
2214 size = max_iov_size;
2216 size = buflen - j*max_iov_size;
2217 remaining_data_length -= size;
2218 log_write(INFO, "sending pages i=%d offset=%d size=%d"
2219 " remaining_data_length=%d\n",
2220 i, j*max_iov_size+offset, size,
2221 remaining_data_length);
2222 rc = smbd_post_send_page(
2223 info, rqst->rq_pages[i],
2224 j*max_iov_size + offset,
2225 size, remaining_data_length);
2233 * As an optimization, we don't wait for individual I/O to finish
2234 * before sending the next one.
2235 * Send them all and wait for pending send count to get to 0
2236 * that means all the I/Os have been out and we are good to return
2239 wait_event(info->wait_send_payload_pending,
2240 atomic_read(&info->send_payload_pending) == 0);
2245 static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
2251 log_rdma_mr(ERR, "status=%d\n", wc->status);
2253 mr = container_of(cqe, struct smbd_mr, cqe);
2254 smbd_disconnect_rdma_connection(mr->conn);
2259 * The work queue function that recovers MRs
2260 * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
2261 * again. Both calls are slow, so finish them in a workqueue. This will not
2263 * There is one workqueue that recovers MRs, there is no need to lock as the
2264 * I/O requests calling smbd_register_mr will never update the links in the
2267 static void smbd_mr_recovery_work(struct work_struct *work)
2269 struct smbd_connection *info =
2270 container_of(work, struct smbd_connection, mr_recovery_work);
2271 struct smbd_mr *smbdirect_mr;
2274 list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
2275 if (smbdirect_mr->state == MR_INVALIDATED)
2277 info->id->device, smbdirect_mr->sgl,
2278 smbdirect_mr->sgl_count,
2280 else if (smbdirect_mr->state == MR_ERROR) {
2282 /* recover this MR entry */
2283 rc = ib_dereg_mr(smbdirect_mr->mr);
2286 "ib_dereg_mr failed rc=%x\n",
2288 smbd_disconnect_rdma_connection(info);
2292 smbdirect_mr->mr = ib_alloc_mr(
2293 info->pd, info->mr_type,
2294 info->max_frmr_depth);
2295 if (IS_ERR(smbdirect_mr->mr)) {
2297 "ib_alloc_mr failed mr_type=%x "
2298 "max_frmr_depth=%x\n",
2300 info->max_frmr_depth);
2301 smbd_disconnect_rdma_connection(info);
2305 /* This MR is being used, don't recover it */
2308 smbdirect_mr->state = MR_READY;
2310 /* smbdirect_mr->state is updated by this function
2311 * and is read and updated by I/O issuing CPUs trying
2312 * to get a MR, the call to atomic_inc_return
2313 * implicates a memory barrier and guarantees this
2314 * value is updated before waking up any calls to
2315 * get_mr() from the I/O issuing CPUs
2317 if (atomic_inc_return(&info->mr_ready_count) == 1)
2318 wake_up_interruptible(&info->wait_mr);
2322 static void destroy_mr_list(struct smbd_connection *info)
2324 struct smbd_mr *mr, *tmp;
2326 cancel_work_sync(&info->mr_recovery_work);
2327 list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
2328 if (mr->state == MR_INVALIDATED)
2329 ib_dma_unmap_sg(info->id->device, mr->sgl,
2330 mr->sgl_count, mr->dir);
2331 ib_dereg_mr(mr->mr);
2338 * Allocate MRs used for RDMA read/write
2339 * The number of MRs will not exceed hardware capability in responder_resources
2340 * All MRs are kept in mr_list. The MR can be recovered after it's used
2341 * Recovery is done in smbd_mr_recovery_work. The content of list entry changes
2342 * as MRs are used and recovered for I/O, but the list links will not change
2344 static int allocate_mr_list(struct smbd_connection *info)
2347 struct smbd_mr *smbdirect_mr, *tmp;
2349 INIT_LIST_HEAD(&info->mr_list);
2350 init_waitqueue_head(&info->wait_mr);
2351 spin_lock_init(&info->mr_list_lock);
2352 atomic_set(&info->mr_ready_count, 0);
2353 atomic_set(&info->mr_used_count, 0);
2354 init_waitqueue_head(&info->wait_for_mr_cleanup);
2355 /* Allocate more MRs (2x) than hardware responder_resources */
2356 for (i = 0; i < info->responder_resources * 2; i++) {
2357 smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
2360 smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
2361 info->max_frmr_depth);
2362 if (IS_ERR(smbdirect_mr->mr)) {
2363 log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x "
2364 "max_frmr_depth=%x\n",
2365 info->mr_type, info->max_frmr_depth);
2368 smbdirect_mr->sgl = kcalloc(
2369 info->max_frmr_depth,
2370 sizeof(struct scatterlist),
2372 if (!smbdirect_mr->sgl) {
2373 log_rdma_mr(ERR, "failed to allocate sgl\n");
2374 ib_dereg_mr(smbdirect_mr->mr);
2377 smbdirect_mr->state = MR_READY;
2378 smbdirect_mr->conn = info;
2380 list_add_tail(&smbdirect_mr->list, &info->mr_list);
2381 atomic_inc(&info->mr_ready_count);
2383 INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
2387 kfree(smbdirect_mr);
2389 list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
2390 ib_dereg_mr(smbdirect_mr->mr);
2391 kfree(smbdirect_mr->sgl);
2392 kfree(smbdirect_mr);
2398 * Get a MR from mr_list. This function waits until there is at least one
2399 * MR available in the list. It may access the list while the
2400 * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
2401 * as they never modify the same places. However, there may be several CPUs
2402 * issueing I/O trying to get MR at the same time, mr_list_lock is used to
2403 * protect this situation.
2405 static struct smbd_mr *get_mr(struct smbd_connection *info)
2407 struct smbd_mr *ret;
2410 rc = wait_event_interruptible(info->wait_mr,
2411 atomic_read(&info->mr_ready_count) ||
2412 info->transport_status != SMBD_CONNECTED);
2414 log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
2418 if (info->transport_status != SMBD_CONNECTED) {
2419 log_rdma_mr(ERR, "info->transport_status=%x\n",
2420 info->transport_status);
2424 spin_lock(&info->mr_list_lock);
2425 list_for_each_entry(ret, &info->mr_list, list) {
2426 if (ret->state == MR_READY) {
2427 ret->state = MR_REGISTERED;
2428 spin_unlock(&info->mr_list_lock);
2429 atomic_dec(&info->mr_ready_count);
2430 atomic_inc(&info->mr_used_count);
2435 spin_unlock(&info->mr_list_lock);
2437 * It is possible that we could fail to get MR because other processes may
2438 * try to acquire a MR at the same time. If this is the case, retry it.
2444 * Register memory for RDMA read/write
2445 * pages[]: the list of pages to register memory with
2446 * num_pages: the number of pages to register
2447 * tailsz: if non-zero, the bytes to register in the last page
2448 * writing: true if this is a RDMA write (SMB read), false for RDMA read
2449 * need_invalidate: true if this MR needs to be locally invalidated after I/O
2450 * return value: the MR registered, NULL if failed.
2452 struct smbd_mr *smbd_register_mr(
2453 struct smbd_connection *info, struct page *pages[], int num_pages,
2454 int offset, int tailsz, bool writing, bool need_invalidate)
2456 struct smbd_mr *smbdirect_mr;
2458 enum dma_data_direction dir;
2459 struct ib_reg_wr *reg_wr;
2461 if (num_pages > info->max_frmr_depth) {
2462 log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
2463 num_pages, info->max_frmr_depth);
2467 smbdirect_mr = get_mr(info);
2468 if (!smbdirect_mr) {
2469 log_rdma_mr(ERR, "get_mr returning NULL\n");
2472 smbdirect_mr->need_invalidate = need_invalidate;
2473 smbdirect_mr->sgl_count = num_pages;
2474 sg_init_table(smbdirect_mr->sgl, num_pages);
2476 log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n",
2477 num_pages, offset, tailsz);
2479 if (num_pages == 1) {
2480 sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset);
2481 goto skip_multiple_pages;
2484 /* We have at least two pages to register */
2486 &smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset);
2488 while (i < num_pages - 1) {
2489 sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
2492 sg_set_page(&smbdirect_mr->sgl[i], pages[i],
2493 tailsz ? tailsz : PAGE_SIZE, 0);
2495 skip_multiple_pages:
2496 dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2497 smbdirect_mr->dir = dir;
2498 rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
2500 log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
2501 num_pages, dir, rc);
2505 rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
2507 if (rc != num_pages) {
2509 "ib_map_mr_sg failed rc = %d num_pages = %x\n",
2514 ib_update_fast_reg_key(smbdirect_mr->mr,
2515 ib_inc_rkey(smbdirect_mr->mr->rkey));
2516 reg_wr = &smbdirect_mr->wr;
2517 reg_wr->wr.opcode = IB_WR_REG_MR;
2518 smbdirect_mr->cqe.done = register_mr_done;
2519 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
2520 reg_wr->wr.num_sge = 0;
2521 reg_wr->wr.send_flags = IB_SEND_SIGNALED;
2522 reg_wr->mr = smbdirect_mr->mr;
2523 reg_wr->key = smbdirect_mr->mr->rkey;
2524 reg_wr->access = writing ?
2525 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
2526 IB_ACCESS_REMOTE_READ;
2529 * There is no need for waiting for complemtion on ib_post_send
2530 * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
2531 * on the next ib_post_send when we actaully send I/O to remote peer
2533 rc = ib_post_send(info->id->qp, ®_wr->wr, NULL);
2535 return smbdirect_mr;
2537 log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
2540 /* If all failed, attempt to recover this MR by setting it MR_ERROR*/
2542 ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl,
2543 smbdirect_mr->sgl_count, smbdirect_mr->dir);
2546 smbdirect_mr->state = MR_ERROR;
2547 if (atomic_dec_and_test(&info->mr_used_count))
2548 wake_up(&info->wait_for_mr_cleanup);
2550 smbd_disconnect_rdma_connection(info);
2555 static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
2557 struct smbd_mr *smbdirect_mr;
2561 smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
2562 smbdirect_mr->state = MR_INVALIDATED;
2563 if (wc->status != IB_WC_SUCCESS) {
2564 log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
2565 smbdirect_mr->state = MR_ERROR;
2567 complete(&smbdirect_mr->invalidate_done);
2571 * Deregister a MR after I/O is done
2572 * This function may wait if remote invalidation is not used
2573 * and we have to locally invalidate the buffer to prevent data is being
2574 * modified by remote peer after upper layer consumes it
2576 int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
2578 struct ib_send_wr *wr;
2579 struct smbd_connection *info = smbdirect_mr->conn;
2582 if (smbdirect_mr->need_invalidate) {
2583 /* Need to finish local invalidation before returning */
2584 wr = &smbdirect_mr->inv_wr;
2585 wr->opcode = IB_WR_LOCAL_INV;
2586 smbdirect_mr->cqe.done = local_inv_done;
2587 wr->wr_cqe = &smbdirect_mr->cqe;
2589 wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
2590 wr->send_flags = IB_SEND_SIGNALED;
2592 init_completion(&smbdirect_mr->invalidate_done);
2593 rc = ib_post_send(info->id->qp, wr, NULL);
2595 log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
2596 smbd_disconnect_rdma_connection(info);
2599 wait_for_completion(&smbdirect_mr->invalidate_done);
2600 smbdirect_mr->need_invalidate = false;
2603 * For remote invalidation, just set it to MR_INVALIDATED
2604 * and defer to mr_recovery_work to recover the MR for next use
2606 smbdirect_mr->state = MR_INVALIDATED;
2609 * Schedule the work to do MR recovery for future I/Os
2610 * MR recovery is slow and we don't want it to block the current I/O
2612 queue_work(info->workqueue, &info->mr_recovery_work);
2615 if (atomic_dec_and_test(&info->mr_used_count))
2616 wake_up(&info->wait_for_mr_cleanup);