1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2017, Microsoft Corporation.
5 * Author(s): Long Li <longli@microsoft.com>
7 #include <linux/module.h>
8 #include <linux/highmem.h>
10 #include "cifs_debug.h"
11 #include "cifsproto.h"
12 #include "smb2proto.h"
14 static struct smbd_response *get_empty_queue_buffer(
15 struct smbd_connection *info);
16 static struct smbd_response *get_receive_buffer(
17 struct smbd_connection *info);
18 static void put_receive_buffer(
19 struct smbd_connection *info,
20 struct smbd_response *response);
21 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
22 static void destroy_receive_buffers(struct smbd_connection *info);
24 static void put_empty_packet(
25 struct smbd_connection *info, struct smbd_response *response);
26 static void enqueue_reassembly(
27 struct smbd_connection *info,
28 struct smbd_response *response, int data_length);
29 static struct smbd_response *_get_first_reassembly(
30 struct smbd_connection *info);
32 static int smbd_post_recv(
33 struct smbd_connection *info,
34 struct smbd_response *response);
36 static int smbd_post_send_empty(struct smbd_connection *info);
37 static int smbd_post_send_data(
38 struct smbd_connection *info,
39 struct kvec *iov, int n_vec, int remaining_data_length);
40 static int smbd_post_send_page(struct smbd_connection *info,
41 struct page *page, unsigned long offset,
42 size_t size, int remaining_data_length);
44 static void destroy_mr_list(struct smbd_connection *info);
45 static int allocate_mr_list(struct smbd_connection *info);
47 /* SMBD version number */
48 #define SMBD_V1 0x0100
50 /* Port numbers for SMBD transport */
52 #define SMBD_PORT 5445
54 /* Address lookup and resolve timeout in ms */
55 #define RDMA_RESOLVE_TIMEOUT 5000
57 /* SMBD negotiation timeout in seconds */
58 #define SMBD_NEGOTIATE_TIMEOUT 120
60 /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
61 #define SMBD_MIN_RECEIVE_SIZE 128
62 #define SMBD_MIN_FRAGMENTED_SIZE 131072
65 * Default maximum number of RDMA read/write outstanding on this connection
66 * This value is possibly decreased during QP creation on hardware limit
68 #define SMBD_CM_RESPONDER_RESOURCES 32
70 /* Maximum number of retries on data transfer operations */
71 #define SMBD_CM_RETRY 6
72 /* No need to retry on Receiver Not Ready since SMBD manages credits */
73 #define SMBD_CM_RNR_RETRY 0
76 * User configurable initial values per SMBD transport connection
77 * as defined in [MS-SMBD] 3.1.1.1
78 * Those may change after a SMBD negotiation
80 /* The local peer's maximum number of credits to grant to the peer */
81 int smbd_receive_credit_max = 255;
83 /* The remote peer's credit request of local peer */
84 int smbd_send_credit_target = 255;
86 /* The maximum single message size can be sent to remote peer */
87 int smbd_max_send_size = 1364;
89 /* The maximum fragmented upper-layer payload receive size supported */
90 int smbd_max_fragmented_recv_size = 1024 * 1024;
92 /* The maximum single-message size which can be received */
93 int smbd_max_receive_size = 8192;
95 /* The timeout to initiate send of a keepalive message on idle */
96 int smbd_keep_alive_interval = 120;
99 * User configurable initial values for RDMA transport
100 * The actual values used may be lower and are limited to hardware capabilities
102 /* Default maximum number of SGEs in a RDMA write/read */
103 int smbd_max_frmr_depth = 2048;
105 /* If payload is less than this byte, use RDMA send/recv not read/write */
106 int rdma_readwrite_threshold = 4096;
108 /* Transport logging functions
109 * Logging are defined as classes. They can be OR'ed to define the actual
110 * logging level via module parameter smbd_logging_class
111 * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and
114 #define LOG_OUTGOING 0x1
115 #define LOG_INCOMING 0x2
117 #define LOG_WRITE 0x8
118 #define LOG_RDMA_SEND 0x10
119 #define LOG_RDMA_RECV 0x20
120 #define LOG_KEEP_ALIVE 0x40
121 #define LOG_RDMA_EVENT 0x80
122 #define LOG_RDMA_MR 0x100
123 static unsigned int smbd_logging_class;
124 module_param(smbd_logging_class, uint, 0644);
125 MODULE_PARM_DESC(smbd_logging_class,
126 "Logging class for SMBD transport 0x0 to 0x100");
130 static unsigned int smbd_logging_level = ERR;
131 module_param(smbd_logging_level, uint, 0644);
132 MODULE_PARM_DESC(smbd_logging_level,
133 "Logging level for SMBD transport, 0 (default): error, 1: info");
135 #define log_rdma(level, class, fmt, args...) \
137 if (level <= smbd_logging_level || class & smbd_logging_class) \
138 cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\
141 #define log_outgoing(level, fmt, args...) \
142 log_rdma(level, LOG_OUTGOING, fmt, ##args)
143 #define log_incoming(level, fmt, args...) \
144 log_rdma(level, LOG_INCOMING, fmt, ##args)
145 #define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args)
146 #define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args)
147 #define log_rdma_send(level, fmt, args...) \
148 log_rdma(level, LOG_RDMA_SEND, fmt, ##args)
149 #define log_rdma_recv(level, fmt, args...) \
150 log_rdma(level, LOG_RDMA_RECV, fmt, ##args)
151 #define log_keep_alive(level, fmt, args...) \
152 log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args)
153 #define log_rdma_event(level, fmt, args...) \
154 log_rdma(level, LOG_RDMA_EVENT, fmt, ##args)
155 #define log_rdma_mr(level, fmt, args...) \
156 log_rdma(level, LOG_RDMA_MR, fmt, ##args)
158 static void smbd_disconnect_rdma_work(struct work_struct *work)
160 struct smbd_connection *info =
161 container_of(work, struct smbd_connection, disconnect_work);
163 if (info->transport_status == SMBD_CONNECTED) {
164 info->transport_status = SMBD_DISCONNECTING;
165 rdma_disconnect(info->id);
169 static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
171 queue_work(info->workqueue, &info->disconnect_work);
174 /* Upcall from RDMA CM */
175 static int smbd_conn_upcall(
176 struct rdma_cm_id *id, struct rdma_cm_event *event)
178 struct smbd_connection *info = id->context;
180 log_rdma_event(INFO, "event=%d status=%d\n",
181 event->event, event->status);
183 switch (event->event) {
184 case RDMA_CM_EVENT_ADDR_RESOLVED:
185 case RDMA_CM_EVENT_ROUTE_RESOLVED:
187 complete(&info->ri_done);
190 case RDMA_CM_EVENT_ADDR_ERROR:
191 info->ri_rc = -EHOSTUNREACH;
192 complete(&info->ri_done);
195 case RDMA_CM_EVENT_ROUTE_ERROR:
196 info->ri_rc = -ENETUNREACH;
197 complete(&info->ri_done);
200 case RDMA_CM_EVENT_ESTABLISHED:
201 log_rdma_event(INFO, "connected event=%d\n", event->event);
202 info->transport_status = SMBD_CONNECTED;
203 wake_up_interruptible(&info->conn_wait);
206 case RDMA_CM_EVENT_CONNECT_ERROR:
207 case RDMA_CM_EVENT_UNREACHABLE:
208 case RDMA_CM_EVENT_REJECTED:
209 log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
210 info->transport_status = SMBD_DISCONNECTED;
211 wake_up_interruptible(&info->conn_wait);
214 case RDMA_CM_EVENT_DEVICE_REMOVAL:
215 case RDMA_CM_EVENT_DISCONNECTED:
216 /* This happenes when we fail the negotiation */
217 if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
218 info->transport_status = SMBD_DISCONNECTED;
219 wake_up(&info->conn_wait);
223 info->transport_status = SMBD_DISCONNECTED;
224 wake_up_interruptible(&info->disconn_wait);
225 wake_up_interruptible(&info->wait_reassembly_queue);
226 wake_up_interruptible_all(&info->wait_send_queue);
236 /* Upcall from RDMA QP */
238 smbd_qp_async_error_upcall(struct ib_event *event, void *context)
240 struct smbd_connection *info = context;
242 log_rdma_event(ERR, "%s on device %s info %p\n",
243 ib_event_msg(event->event), event->device->name, info);
245 switch (event->event) {
246 case IB_EVENT_CQ_ERR:
247 case IB_EVENT_QP_FATAL:
248 smbd_disconnect_rdma_connection(info);
255 static inline void *smbd_request_payload(struct smbd_request *request)
257 return (void *)request->packet;
260 static inline void *smbd_response_payload(struct smbd_response *response)
262 return (void *)response->packet;
265 /* Called when a RDMA send is done */
266 static void send_done(struct ib_cq *cq, struct ib_wc *wc)
269 struct smbd_request *request =
270 container_of(wc->wr_cqe, struct smbd_request, cqe);
272 log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n",
273 request, wc->status);
275 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
276 log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
277 wc->status, wc->opcode);
278 smbd_disconnect_rdma_connection(request->info);
281 for (i = 0; i < request->num_sge; i++)
282 ib_dma_unmap_single(request->info->id->device,
283 request->sge[i].addr,
284 request->sge[i].length,
287 if (request->has_payload) {
288 if (atomic_dec_and_test(&request->info->send_payload_pending))
289 wake_up(&request->info->wait_send_payload_pending);
291 if (atomic_dec_and_test(&request->info->send_pending))
292 wake_up(&request->info->wait_send_pending);
295 mempool_free(request, request->info->request_mempool);
298 static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
300 log_rdma_event(INFO, "resp message min_version %u max_version %u "
301 "negotiated_version %u credits_requested %u "
302 "credits_granted %u status %u max_readwrite_size %u "
303 "preferred_send_size %u max_receive_size %u "
304 "max_fragmented_size %u\n",
305 resp->min_version, resp->max_version, resp->negotiated_version,
306 resp->credits_requested, resp->credits_granted, resp->status,
307 resp->max_readwrite_size, resp->preferred_send_size,
308 resp->max_receive_size, resp->max_fragmented_size);
312 * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
313 * response, packet_length: the negotiation response message
314 * return value: true if negotiation is a success, false if failed
316 static bool process_negotiation_response(
317 struct smbd_response *response, int packet_length)
319 struct smbd_connection *info = response->info;
320 struct smbd_negotiate_resp *packet = smbd_response_payload(response);
322 if (packet_length < sizeof(struct smbd_negotiate_resp)) {
324 "error: packet_length=%d\n", packet_length);
328 if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
329 log_rdma_event(ERR, "error: negotiated_version=%x\n",
330 le16_to_cpu(packet->negotiated_version));
333 info->protocol = le16_to_cpu(packet->negotiated_version);
335 if (packet->credits_requested == 0) {
336 log_rdma_event(ERR, "error: credits_requested==0\n");
339 info->receive_credit_target = le16_to_cpu(packet->credits_requested);
341 if (packet->credits_granted == 0) {
342 log_rdma_event(ERR, "error: credits_granted==0\n");
345 atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
347 atomic_set(&info->receive_credits, 0);
349 if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
350 log_rdma_event(ERR, "error: preferred_send_size=%d\n",
351 le32_to_cpu(packet->preferred_send_size));
354 info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
356 if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
357 log_rdma_event(ERR, "error: max_receive_size=%d\n",
358 le32_to_cpu(packet->max_receive_size));
361 info->max_send_size = min_t(int, info->max_send_size,
362 le32_to_cpu(packet->max_receive_size));
364 if (le32_to_cpu(packet->max_fragmented_size) <
365 SMBD_MIN_FRAGMENTED_SIZE) {
366 log_rdma_event(ERR, "error: max_fragmented_size=%d\n",
367 le32_to_cpu(packet->max_fragmented_size));
370 info->max_fragmented_send_size =
371 le32_to_cpu(packet->max_fragmented_size);
372 info->rdma_readwrite_threshold =
373 rdma_readwrite_threshold > info->max_fragmented_send_size ?
374 info->max_fragmented_send_size :
375 rdma_readwrite_threshold;
378 info->max_readwrite_size = min_t(u32,
379 le32_to_cpu(packet->max_readwrite_size),
380 info->max_frmr_depth * PAGE_SIZE);
381 info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
387 * Check and schedule to send an immediate packet
388 * This is used to extend credtis to remote peer to keep the transport busy
390 static void check_and_send_immediate(struct smbd_connection *info)
392 if (info->transport_status != SMBD_CONNECTED)
395 info->send_immediate = true;
398 * Promptly send a packet if our peer is running low on receive
401 if (atomic_read(&info->receive_credits) <
402 info->receive_credit_target - 1)
404 info->workqueue, &info->send_immediate_work, 0);
407 static void smbd_post_send_credits(struct work_struct *work)
410 int use_receive_queue = 1;
412 struct smbd_response *response;
413 struct smbd_connection *info =
414 container_of(work, struct smbd_connection,
415 post_send_credits_work);
417 if (info->transport_status != SMBD_CONNECTED) {
418 wake_up(&info->wait_receive_queues);
422 if (info->receive_credit_target >
423 atomic_read(&info->receive_credits)) {
425 if (use_receive_queue)
426 response = get_receive_buffer(info);
428 response = get_empty_queue_buffer(info);
430 /* now switch to emtpy packet queue */
431 if (use_receive_queue) {
432 use_receive_queue = 0;
438 response->type = SMBD_TRANSFER_DATA;
439 response->first_segment = false;
440 rc = smbd_post_recv(info, response);
443 "post_recv failed rc=%d\n", rc);
444 put_receive_buffer(info, response);
452 spin_lock(&info->lock_new_credits_offered);
453 info->new_credits_offered += ret;
454 spin_unlock(&info->lock_new_credits_offered);
456 atomic_add(ret, &info->receive_credits);
458 /* Check if we can post new receive and grant credits to peer */
459 check_and_send_immediate(info);
462 static void smbd_recv_done_work(struct work_struct *work)
464 struct smbd_connection *info =
465 container_of(work, struct smbd_connection, recv_done_work);
468 * We may have new send credits granted from remote peer
469 * If any sender is blcoked on lack of credets, unblock it
471 if (atomic_read(&info->send_credits))
472 wake_up_interruptible(&info->wait_send_queue);
475 * Check if we need to send something to remote peer to
476 * grant more credits or respond to KEEP_ALIVE packet
478 check_and_send_immediate(info);
481 /* Called from softirq, when recv is done */
482 static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
484 struct smbd_data_transfer *data_transfer;
485 struct smbd_response *response =
486 container_of(wc->wr_cqe, struct smbd_response, cqe);
487 struct smbd_connection *info = response->info;
490 log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d "
491 "byte_len=%d pkey_index=%x\n",
492 response, response->type, wc->status, wc->opcode,
493 wc->byte_len, wc->pkey_index);
495 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
496 log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
497 wc->status, wc->opcode);
498 smbd_disconnect_rdma_connection(info);
502 ib_dma_sync_single_for_cpu(
505 response->sge.length,
508 switch (response->type) {
509 /* SMBD negotiation response */
510 case SMBD_NEGOTIATE_RESP:
511 dump_smbd_negotiate_resp(smbd_response_payload(response));
512 info->full_packet_received = true;
513 info->negotiate_done =
514 process_negotiation_response(response, wc->byte_len);
515 complete(&info->negotiate_completion);
518 /* SMBD data transfer packet */
519 case SMBD_TRANSFER_DATA:
520 data_transfer = smbd_response_payload(response);
521 data_length = le32_to_cpu(data_transfer->data_length);
524 * If this is a packet with data playload place the data in
525 * reassembly queue and wake up the reading thread
528 if (info->full_packet_received)
529 response->first_segment = true;
531 if (le32_to_cpu(data_transfer->remaining_data_length))
532 info->full_packet_received = false;
534 info->full_packet_received = true;
541 put_empty_packet(info, response);
544 wake_up_interruptible(&info->wait_reassembly_queue);
546 atomic_dec(&info->receive_credits);
547 info->receive_credit_target =
548 le16_to_cpu(data_transfer->credits_requested);
549 atomic_add(le16_to_cpu(data_transfer->credits_granted),
550 &info->send_credits);
552 log_incoming(INFO, "data flags %d data_offset %d "
553 "data_length %d remaining_data_length %d\n",
554 le16_to_cpu(data_transfer->flags),
555 le32_to_cpu(data_transfer->data_offset),
556 le32_to_cpu(data_transfer->data_length),
557 le32_to_cpu(data_transfer->remaining_data_length));
559 /* Send a KEEP_ALIVE response right away if requested */
560 info->keep_alive_requested = KEEP_ALIVE_NONE;
561 if (le16_to_cpu(data_transfer->flags) &
562 SMB_DIRECT_RESPONSE_REQUESTED) {
563 info->keep_alive_requested = KEEP_ALIVE_PENDING;
566 queue_work(info->workqueue, &info->recv_done_work);
571 "unexpected response type=%d\n", response->type);
575 put_receive_buffer(info, response);
578 static struct rdma_cm_id *smbd_create_id(
579 struct smbd_connection *info,
580 struct sockaddr *dstaddr, int port)
582 struct rdma_cm_id *id;
586 id = rdma_create_id(&init_net, smbd_conn_upcall, info,
587 RDMA_PS_TCP, IB_QPT_RC);
590 log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc);
594 if (dstaddr->sa_family == AF_INET6)
595 sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port;
597 sport = &((struct sockaddr_in *)dstaddr)->sin_port;
599 *sport = htons(port);
601 init_completion(&info->ri_done);
602 info->ri_rc = -ETIMEDOUT;
604 rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
605 RDMA_RESOLVE_TIMEOUT);
607 log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
610 wait_for_completion_interruptible_timeout(
611 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
614 log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
618 info->ri_rc = -ETIMEDOUT;
619 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
621 log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
624 wait_for_completion_interruptible_timeout(
625 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
628 log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
640 * Test if FRWR (Fast Registration Work Requests) is supported on the device
641 * This implementation requries FRWR on RDMA read/write
642 * return value: true if it is supported
644 static bool frwr_is_supported(struct ib_device_attr *attrs)
646 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
648 if (attrs->max_fast_reg_page_list_len == 0)
653 static int smbd_ia_open(
654 struct smbd_connection *info,
655 struct sockaddr *dstaddr, int port)
659 info->id = smbd_create_id(info, dstaddr, port);
660 if (IS_ERR(info->id)) {
661 rc = PTR_ERR(info->id);
665 if (!frwr_is_supported(&info->id->device->attrs)) {
667 "Fast Registration Work Requests "
668 "(FRWR) is not supported\n");
670 "Device capability flags = %llx "
671 "max_fast_reg_page_list_len = %u\n",
672 info->id->device->attrs.device_cap_flags,
673 info->id->device->attrs.max_fast_reg_page_list_len);
674 rc = -EPROTONOSUPPORT;
677 info->max_frmr_depth = min_t(int,
679 info->id->device->attrs.max_fast_reg_page_list_len);
680 info->mr_type = IB_MR_TYPE_MEM_REG;
681 if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
682 info->mr_type = IB_MR_TYPE_SG_GAPS;
684 info->pd = ib_alloc_pd(info->id->device, 0);
685 if (IS_ERR(info->pd)) {
686 rc = PTR_ERR(info->pd);
687 log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
694 rdma_destroy_id(info->id);
702 * Send a negotiation request message to the peer
703 * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
704 * After negotiation, the transport is connected and ready for
705 * carrying upper layer SMB payload
707 static int smbd_post_send_negotiate_req(struct smbd_connection *info)
709 struct ib_send_wr send_wr;
711 struct smbd_request *request;
712 struct smbd_negotiate_req *packet;
714 request = mempool_alloc(info->request_mempool, GFP_KERNEL);
718 request->info = info;
720 packet = smbd_request_payload(request);
721 packet->min_version = cpu_to_le16(SMBD_V1);
722 packet->max_version = cpu_to_le16(SMBD_V1);
723 packet->reserved = 0;
724 packet->credits_requested = cpu_to_le16(info->send_credit_target);
725 packet->preferred_send_size = cpu_to_le32(info->max_send_size);
726 packet->max_receive_size = cpu_to_le32(info->max_receive_size);
727 packet->max_fragmented_size =
728 cpu_to_le32(info->max_fragmented_recv_size);
730 request->num_sge = 1;
731 request->sge[0].addr = ib_dma_map_single(
732 info->id->device, (void *)packet,
733 sizeof(*packet), DMA_TO_DEVICE);
734 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
736 goto dma_mapping_failed;
739 request->sge[0].length = sizeof(*packet);
740 request->sge[0].lkey = info->pd->local_dma_lkey;
742 ib_dma_sync_single_for_device(
743 info->id->device, request->sge[0].addr,
744 request->sge[0].length, DMA_TO_DEVICE);
746 request->cqe.done = send_done;
749 send_wr.wr_cqe = &request->cqe;
750 send_wr.sg_list = request->sge;
751 send_wr.num_sge = request->num_sge;
752 send_wr.opcode = IB_WR_SEND;
753 send_wr.send_flags = IB_SEND_SIGNALED;
755 log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n",
756 request->sge[0].addr,
757 request->sge[0].length, request->sge[0].lkey);
759 request->has_payload = false;
760 atomic_inc(&info->send_pending);
761 rc = ib_post_send(info->id->qp, &send_wr, NULL);
765 /* if we reach here, post send failed */
766 log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
767 atomic_dec(&info->send_pending);
768 ib_dma_unmap_single(info->id->device, request->sge[0].addr,
769 request->sge[0].length, DMA_TO_DEVICE);
771 smbd_disconnect_rdma_connection(info);
774 mempool_free(request, info->request_mempool);
779 * Extend the credits to remote peer
780 * This implements [MS-SMBD] 3.1.5.9
781 * The idea is that we should extend credits to remote peer as quickly as
782 * it's allowed, to maintain data flow. We allocate as much receive
783 * buffer as possible, and extend the receive credits to remote peer
784 * return value: the new credtis being granted.
786 static int manage_credits_prior_sending(struct smbd_connection *info)
790 spin_lock(&info->lock_new_credits_offered);
791 new_credits = info->new_credits_offered;
792 info->new_credits_offered = 0;
793 spin_unlock(&info->lock_new_credits_offered);
799 * Check if we need to send a KEEP_ALIVE message
800 * The idle connection timer triggers a KEEP_ALIVE message when expires
801 * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
804 * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
807 static int manage_keep_alive_before_sending(struct smbd_connection *info)
809 if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
810 info->keep_alive_requested = KEEP_ALIVE_SENT;
817 * Build and prepare the SMBD packet header
818 * This function waits for avaialbe send credits and build a SMBD packet
819 * header. The caller then optional append payload to the packet after
822 * size: the size of the payload
823 * remaining_data_length: remaining data to send if this is part of a
826 * request_out: the request allocated from this function
827 * return values: 0 on success, otherwise actual error code returned
829 static int smbd_create_header(struct smbd_connection *info,
830 int size, int remaining_data_length,
831 struct smbd_request **request_out)
833 struct smbd_request *request;
834 struct smbd_data_transfer *packet;
838 /* Wait for send credits. A SMBD packet needs one credit */
839 rc = wait_event_interruptible(info->wait_send_queue,
840 atomic_read(&info->send_credits) > 0 ||
841 info->transport_status != SMBD_CONNECTED);
845 if (info->transport_status != SMBD_CONNECTED) {
846 log_outgoing(ERR, "disconnected not sending\n");
849 atomic_dec(&info->send_credits);
851 request = mempool_alloc(info->request_mempool, GFP_KERNEL);
857 request->info = info;
859 /* Fill in the packet header */
860 packet = smbd_request_payload(request);
861 packet->credits_requested = cpu_to_le16(info->send_credit_target);
862 packet->credits_granted =
863 cpu_to_le16(manage_credits_prior_sending(info));
864 info->send_immediate = false;
867 if (manage_keep_alive_before_sending(info))
868 packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
870 packet->reserved = 0;
872 packet->data_offset = 0;
874 packet->data_offset = cpu_to_le32(24);
875 packet->data_length = cpu_to_le32(size);
876 packet->remaining_data_length = cpu_to_le32(remaining_data_length);
879 log_outgoing(INFO, "credits_requested=%d credits_granted=%d "
880 "data_offset=%d data_length=%d remaining_data_length=%d\n",
881 le16_to_cpu(packet->credits_requested),
882 le16_to_cpu(packet->credits_granted),
883 le32_to_cpu(packet->data_offset),
884 le32_to_cpu(packet->data_length),
885 le32_to_cpu(packet->remaining_data_length));
887 /* Map the packet to DMA */
888 header_length = sizeof(struct smbd_data_transfer);
889 /* If this is a packet without payload, don't send padding */
891 header_length = offsetof(struct smbd_data_transfer, padding);
893 request->num_sge = 1;
894 request->sge[0].addr = ib_dma_map_single(info->id->device,
898 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
899 mempool_free(request, info->request_mempool);
904 request->sge[0].length = header_length;
905 request->sge[0].lkey = info->pd->local_dma_lkey;
907 *request_out = request;
911 atomic_inc(&info->send_credits);
915 static void smbd_destroy_header(struct smbd_connection *info,
916 struct smbd_request *request)
919 ib_dma_unmap_single(info->id->device,
920 request->sge[0].addr,
921 request->sge[0].length,
923 mempool_free(request, info->request_mempool);
924 atomic_inc(&info->send_credits);
927 /* Post the send request */
928 static int smbd_post_send(struct smbd_connection *info,
929 struct smbd_request *request, bool has_payload)
931 struct ib_send_wr send_wr;
934 for (i = 0; i < request->num_sge; i++) {
936 "rdma_request sge[%d] addr=%llu length=%u\n",
937 i, request->sge[i].addr, request->sge[i].length);
938 ib_dma_sync_single_for_device(
940 request->sge[i].addr,
941 request->sge[i].length,
945 request->cqe.done = send_done;
948 send_wr.wr_cqe = &request->cqe;
949 send_wr.sg_list = request->sge;
950 send_wr.num_sge = request->num_sge;
951 send_wr.opcode = IB_WR_SEND;
952 send_wr.send_flags = IB_SEND_SIGNALED;
955 request->has_payload = true;
956 atomic_inc(&info->send_payload_pending);
958 request->has_payload = false;
959 atomic_inc(&info->send_pending);
962 rc = ib_post_send(info->id->qp, &send_wr, NULL);
964 log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
966 if (atomic_dec_and_test(&info->send_payload_pending))
967 wake_up(&info->wait_send_payload_pending);
969 if (atomic_dec_and_test(&info->send_pending))
970 wake_up(&info->wait_send_pending);
972 smbd_disconnect_rdma_connection(info);
975 /* Reset timer for idle connection after packet is sent */
976 mod_delayed_work(info->workqueue, &info->idle_timer_work,
977 info->keep_alive_interval*HZ);
982 static int smbd_post_send_sgl(struct smbd_connection *info,
983 struct scatterlist *sgl, int data_length, int remaining_data_length)
987 struct smbd_request *request;
988 struct scatterlist *sg;
990 rc = smbd_create_header(
991 info, data_length, remaining_data_length, &request);
995 num_sgs = sgl ? sg_nents(sgl) : 0;
996 for_each_sg(sgl, sg, num_sgs, i) {
997 request->sge[i+1].addr =
998 ib_dma_map_page(info->id->device, sg_page(sg),
999 sg->offset, sg->length, DMA_TO_DEVICE);
1000 if (ib_dma_mapping_error(
1001 info->id->device, request->sge[i+1].addr)) {
1003 request->sge[i+1].addr = 0;
1004 goto dma_mapping_failure;
1006 request->sge[i+1].length = sg->length;
1007 request->sge[i+1].lkey = info->pd->local_dma_lkey;
1011 rc = smbd_post_send(info, request, data_length);
1015 dma_mapping_failure:
1016 for (i = 1; i < request->num_sge; i++)
1017 if (request->sge[i].addr)
1018 ib_dma_unmap_single(info->id->device,
1019 request->sge[i].addr,
1020 request->sge[i].length,
1022 smbd_destroy_header(info, request);
1028 * page: the page to send
1029 * offset: offset in the page to send
1030 * size: length in the page to send
1031 * remaining_data_length: remaining data to send in this payload
1033 static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
1034 unsigned long offset, size_t size, int remaining_data_length)
1036 struct scatterlist sgl;
1038 sg_init_table(&sgl, 1);
1039 sg_set_page(&sgl, page, size, offset);
1041 return smbd_post_send_sgl(info, &sgl, size, remaining_data_length);
1045 * Send an empty message
1046 * Empty message is used to extend credits to peer to for keep live
1047 * while there is no upper layer payload to send at the time
1049 static int smbd_post_send_empty(struct smbd_connection *info)
1051 info->count_send_empty++;
1052 return smbd_post_send_sgl(info, NULL, 0, 0);
1056 * Send a data buffer
1057 * iov: the iov array describing the data buffers
1058 * n_vec: number of iov array
1059 * remaining_data_length: remaining data to send following this packet
1060 * in segmented SMBD packet
1062 static int smbd_post_send_data(
1063 struct smbd_connection *info, struct kvec *iov, int n_vec,
1064 int remaining_data_length)
1067 u32 data_length = 0;
1068 struct scatterlist sgl[SMBDIRECT_MAX_SGE];
1070 if (n_vec > SMBDIRECT_MAX_SGE) {
1071 cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
1075 sg_init_table(sgl, n_vec);
1076 for (i = 0; i < n_vec; i++) {
1077 data_length += iov[i].iov_len;
1078 sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len);
1081 return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length);
1085 * Post a receive request to the transport
1086 * The remote peer can only send data when a receive request is posted
1087 * The interaction is controlled by send/receive credit system
1089 static int smbd_post_recv(
1090 struct smbd_connection *info, struct smbd_response *response)
1092 struct ib_recv_wr recv_wr;
1095 response->sge.addr = ib_dma_map_single(
1096 info->id->device, response->packet,
1097 info->max_receive_size, DMA_FROM_DEVICE);
1098 if (ib_dma_mapping_error(info->id->device, response->sge.addr))
1101 response->sge.length = info->max_receive_size;
1102 response->sge.lkey = info->pd->local_dma_lkey;
1104 response->cqe.done = recv_done;
1106 recv_wr.wr_cqe = &response->cqe;
1107 recv_wr.next = NULL;
1108 recv_wr.sg_list = &response->sge;
1109 recv_wr.num_sge = 1;
1111 rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
1113 ib_dma_unmap_single(info->id->device, response->sge.addr,
1114 response->sge.length, DMA_FROM_DEVICE);
1115 smbd_disconnect_rdma_connection(info);
1116 log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
1122 /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
1123 static int smbd_negotiate(struct smbd_connection *info)
1126 struct smbd_response *response = get_receive_buffer(info);
1128 response->type = SMBD_NEGOTIATE_RESP;
1129 rc = smbd_post_recv(info, response);
1130 log_rdma_event(INFO,
1131 "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x "
1133 rc, response->sge.addr,
1134 response->sge.length, response->sge.lkey);
1138 init_completion(&info->negotiate_completion);
1139 info->negotiate_done = false;
1140 rc = smbd_post_send_negotiate_req(info);
1144 rc = wait_for_completion_interruptible_timeout(
1145 &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
1146 log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
1148 if (info->negotiate_done)
1153 else if (rc == -ERESTARTSYS)
1161 static void put_empty_packet(
1162 struct smbd_connection *info, struct smbd_response *response)
1164 spin_lock(&info->empty_packet_queue_lock);
1165 list_add_tail(&response->list, &info->empty_packet_queue);
1166 info->count_empty_packet_queue++;
1167 spin_unlock(&info->empty_packet_queue_lock);
1169 queue_work(info->workqueue, &info->post_send_credits_work);
1173 * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
1174 * This is a queue for reassembling upper layer payload and present to upper
1175 * layer. All the inncoming payload go to the reassembly queue, regardless of
1176 * if reassembly is required. The uuper layer code reads from the queue for all
1177 * incoming payloads.
1178 * Put a received packet to the reassembly queue
1179 * response: the packet received
1180 * data_length: the size of payload in this packet
1182 static void enqueue_reassembly(
1183 struct smbd_connection *info,
1184 struct smbd_response *response,
1187 spin_lock(&info->reassembly_queue_lock);
1188 list_add_tail(&response->list, &info->reassembly_queue);
1189 info->reassembly_queue_length++;
1191 * Make sure reassembly_data_length is updated after list and
1192 * reassembly_queue_length are updated. On the dequeue side
1193 * reassembly_data_length is checked without a lock to determine
1194 * if reassembly_queue_length and list is up to date
1197 info->reassembly_data_length += data_length;
1198 spin_unlock(&info->reassembly_queue_lock);
1199 info->count_reassembly_queue++;
1200 info->count_enqueue_reassembly_queue++;
1204 * Get the first entry at the front of reassembly queue
1205 * Caller is responsible for locking
1206 * return value: the first entry if any, NULL if queue is empty
1208 static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
1210 struct smbd_response *ret = NULL;
1212 if (!list_empty(&info->reassembly_queue)) {
1213 ret = list_first_entry(
1214 &info->reassembly_queue,
1215 struct smbd_response, list);
1220 static struct smbd_response *get_empty_queue_buffer(
1221 struct smbd_connection *info)
1223 struct smbd_response *ret = NULL;
1224 unsigned long flags;
1226 spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
1227 if (!list_empty(&info->empty_packet_queue)) {
1228 ret = list_first_entry(
1229 &info->empty_packet_queue,
1230 struct smbd_response, list);
1231 list_del(&ret->list);
1232 info->count_empty_packet_queue--;
1234 spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
1240 * Get a receive buffer
1241 * For each remote send, we need to post a receive. The receive buffers are
1242 * pre-allocated in advance.
1243 * return value: the receive buffer, NULL if none is available
1245 static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
1247 struct smbd_response *ret = NULL;
1248 unsigned long flags;
1250 spin_lock_irqsave(&info->receive_queue_lock, flags);
1251 if (!list_empty(&info->receive_queue)) {
1252 ret = list_first_entry(
1253 &info->receive_queue,
1254 struct smbd_response, list);
1255 list_del(&ret->list);
1256 info->count_receive_queue--;
1257 info->count_get_receive_buffer++;
1259 spin_unlock_irqrestore(&info->receive_queue_lock, flags);
1265 * Return a receive buffer
1266 * Upon returning of a receive buffer, we can post new receive and extend
1267 * more receive credits to remote peer. This is done immediately after a
1268 * receive buffer is returned.
1270 static void put_receive_buffer(
1271 struct smbd_connection *info, struct smbd_response *response)
1273 unsigned long flags;
1275 ib_dma_unmap_single(info->id->device, response->sge.addr,
1276 response->sge.length, DMA_FROM_DEVICE);
1278 spin_lock_irqsave(&info->receive_queue_lock, flags);
1279 list_add_tail(&response->list, &info->receive_queue);
1280 info->count_receive_queue++;
1281 info->count_put_receive_buffer++;
1282 spin_unlock_irqrestore(&info->receive_queue_lock, flags);
1284 queue_work(info->workqueue, &info->post_send_credits_work);
1287 /* Preallocate all receive buffer on transport establishment */
1288 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
1291 struct smbd_response *response;
1293 INIT_LIST_HEAD(&info->reassembly_queue);
1294 spin_lock_init(&info->reassembly_queue_lock);
1295 info->reassembly_data_length = 0;
1296 info->reassembly_queue_length = 0;
1298 INIT_LIST_HEAD(&info->receive_queue);
1299 spin_lock_init(&info->receive_queue_lock);
1300 info->count_receive_queue = 0;
1302 INIT_LIST_HEAD(&info->empty_packet_queue);
1303 spin_lock_init(&info->empty_packet_queue_lock);
1304 info->count_empty_packet_queue = 0;
1306 init_waitqueue_head(&info->wait_receive_queues);
1308 for (i = 0; i < num_buf; i++) {
1309 response = mempool_alloc(info->response_mempool, GFP_KERNEL);
1311 goto allocate_failed;
1313 response->info = info;
1314 list_add_tail(&response->list, &info->receive_queue);
1315 info->count_receive_queue++;
1321 while (!list_empty(&info->receive_queue)) {
1322 response = list_first_entry(
1323 &info->receive_queue,
1324 struct smbd_response, list);
1325 list_del(&response->list);
1326 info->count_receive_queue--;
1328 mempool_free(response, info->response_mempool);
1333 static void destroy_receive_buffers(struct smbd_connection *info)
1335 struct smbd_response *response;
1337 while ((response = get_receive_buffer(info)))
1338 mempool_free(response, info->response_mempool);
1340 while ((response = get_empty_queue_buffer(info)))
1341 mempool_free(response, info->response_mempool);
1345 * Check and send an immediate or keep alive packet
1346 * The condition to send those packets are defined in [MS-SMBD] 3.1.1.1
1347 * Connection.KeepaliveRequested and Connection.SendImmediate
1348 * The idea is to extend credits to server as soon as it becomes available
1350 static void send_immediate_work(struct work_struct *work)
1352 struct smbd_connection *info = container_of(
1353 work, struct smbd_connection,
1354 send_immediate_work.work);
1356 if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
1357 info->send_immediate) {
1358 log_keep_alive(INFO, "send an empty message\n");
1359 smbd_post_send_empty(info);
1363 /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
1364 static void idle_connection_timer(struct work_struct *work)
1366 struct smbd_connection *info = container_of(
1367 work, struct smbd_connection,
1368 idle_timer_work.work);
1370 if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
1372 "error status info->keep_alive_requested=%d\n",
1373 info->keep_alive_requested);
1374 smbd_disconnect_rdma_connection(info);
1378 log_keep_alive(INFO, "about to send an empty idle message\n");
1379 smbd_post_send_empty(info);
1381 /* Setup the next idle timeout work */
1382 queue_delayed_work(info->workqueue, &info->idle_timer_work,
1383 info->keep_alive_interval*HZ);
1387 * Destroy the transport and related RDMA and memory resources
1388 * Need to go through all the pending counters and make sure on one is using
1389 * the transport while it is destroyed
1391 void smbd_destroy(struct TCP_Server_Info *server)
1393 struct smbd_connection *info = server->smbd_conn;
1394 struct smbd_response *response;
1395 unsigned long flags;
1398 log_rdma_event(INFO, "rdma session already destroyed\n");
1402 log_rdma_event(INFO, "destroying rdma session\n");
1403 if (info->transport_status != SMBD_DISCONNECTED) {
1404 rdma_disconnect(server->smbd_conn->id);
1405 log_rdma_event(INFO, "wait for transport being disconnected\n");
1406 wait_event_interruptible(
1408 info->transport_status == SMBD_DISCONNECTED);
1411 log_rdma_event(INFO, "destroying qp\n");
1412 ib_drain_qp(info->id->qp);
1413 rdma_destroy_qp(info->id);
1415 log_rdma_event(INFO, "cancelling idle timer\n");
1416 cancel_delayed_work_sync(&info->idle_timer_work);
1417 log_rdma_event(INFO, "cancelling send immediate work\n");
1418 cancel_delayed_work_sync(&info->send_immediate_work);
1420 log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
1421 wait_event(info->wait_send_pending,
1422 atomic_read(&info->send_pending) == 0);
1423 wait_event(info->wait_send_payload_pending,
1424 atomic_read(&info->send_payload_pending) == 0);
1426 /* It's not posssible for upper layer to get to reassembly */
1427 log_rdma_event(INFO, "drain the reassembly queue\n");
1429 spin_lock_irqsave(&info->reassembly_queue_lock, flags);
1430 response = _get_first_reassembly(info);
1432 list_del(&response->list);
1433 spin_unlock_irqrestore(
1434 &info->reassembly_queue_lock, flags);
1435 put_receive_buffer(info, response);
1437 spin_unlock_irqrestore(
1438 &info->reassembly_queue_lock, flags);
1440 info->reassembly_data_length = 0;
1442 log_rdma_event(INFO, "free receive buffers\n");
1443 wait_event(info->wait_receive_queues,
1444 info->count_receive_queue + info->count_empty_packet_queue
1445 == info->receive_credit_max);
1446 destroy_receive_buffers(info);
1449 * For performance reasons, memory registration and deregistration
1450 * are not locked by srv_mutex. It is possible some processes are
1451 * blocked on transport srv_mutex while holding memory registration.
1452 * Release the transport srv_mutex to allow them to hit the failure
1453 * path when sending data, and then release memory registartions.
1455 log_rdma_event(INFO, "freeing mr list\n");
1456 wake_up_interruptible_all(&info->wait_mr);
1457 while (atomic_read(&info->mr_used_count)) {
1458 mutex_unlock(&server->srv_mutex);
1460 mutex_lock(&server->srv_mutex);
1462 destroy_mr_list(info);
1464 ib_free_cq(info->send_cq);
1465 ib_free_cq(info->recv_cq);
1466 ib_dealloc_pd(info->pd);
1467 rdma_destroy_id(info->id);
1470 mempool_destroy(info->request_mempool);
1471 kmem_cache_destroy(info->request_cache);
1473 mempool_destroy(info->response_mempool);
1474 kmem_cache_destroy(info->response_cache);
1476 info->transport_status = SMBD_DESTROYED;
1478 destroy_workqueue(info->workqueue);
1483 * Reconnect this SMBD connection, called from upper layer
1484 * return value: 0 on success, or actual error code
1486 int smbd_reconnect(struct TCP_Server_Info *server)
1488 log_rdma_event(INFO, "reconnecting rdma session\n");
1490 if (!server->smbd_conn) {
1491 log_rdma_event(INFO, "rdma session already destroyed\n");
1496 * This is possible if transport is disconnected and we haven't received
1497 * notification from RDMA, but upper layer has detected timeout
1499 if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
1500 log_rdma_event(INFO, "disconnecting transport\n");
1501 smbd_destroy(server);
1505 log_rdma_event(INFO, "creating rdma session\n");
1506 server->smbd_conn = smbd_get_connection(
1507 server, (struct sockaddr *) &server->dstaddr);
1508 log_rdma_event(INFO, "created rdma session info=%p\n",
1511 return server->smbd_conn ? 0 : -ENOENT;
1514 static void destroy_caches_and_workqueue(struct smbd_connection *info)
1516 destroy_receive_buffers(info);
1517 destroy_workqueue(info->workqueue);
1518 mempool_destroy(info->response_mempool);
1519 kmem_cache_destroy(info->response_cache);
1520 mempool_destroy(info->request_mempool);
1521 kmem_cache_destroy(info->request_cache);
1524 #define MAX_NAME_LEN 80
1525 static int allocate_caches_and_workqueue(struct smbd_connection *info)
1527 char name[MAX_NAME_LEN];
1530 scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
1531 info->request_cache =
1534 sizeof(struct smbd_request) +
1535 sizeof(struct smbd_data_transfer),
1536 0, SLAB_HWCACHE_ALIGN, NULL);
1537 if (!info->request_cache)
1540 info->request_mempool =
1541 mempool_create(info->send_credit_target, mempool_alloc_slab,
1542 mempool_free_slab, info->request_cache);
1543 if (!info->request_mempool)
1546 scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
1547 info->response_cache =
1550 sizeof(struct smbd_response) +
1551 info->max_receive_size,
1552 0, SLAB_HWCACHE_ALIGN, NULL);
1553 if (!info->response_cache)
1556 info->response_mempool =
1557 mempool_create(info->receive_credit_max, mempool_alloc_slab,
1558 mempool_free_slab, info->response_cache);
1559 if (!info->response_mempool)
1562 scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
1563 info->workqueue = create_workqueue(name);
1564 if (!info->workqueue)
1567 rc = allocate_receive_buffers(info, info->receive_credit_max);
1569 log_rdma_event(ERR, "failed to allocate receive buffers\n");
1576 destroy_workqueue(info->workqueue);
1578 mempool_destroy(info->response_mempool);
1580 kmem_cache_destroy(info->response_cache);
1582 mempool_destroy(info->request_mempool);
1584 kmem_cache_destroy(info->request_cache);
1588 /* Create a SMBD connection, called by upper layer */
1589 static struct smbd_connection *_smbd_get_connection(
1590 struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port)
1593 struct smbd_connection *info;
1594 struct rdma_conn_param conn_param;
1595 struct ib_qp_init_attr qp_attr;
1596 struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
1597 struct ib_port_immutable port_immutable;
1600 info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
1604 info->transport_status = SMBD_CONNECTING;
1605 rc = smbd_ia_open(info, dstaddr, port);
1607 log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
1608 goto create_id_failed;
1611 if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
1612 smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
1614 "consider lowering send_credit_target = %d. "
1615 "Possible CQE overrun, device "
1616 "reporting max_cpe %d max_qp_wr %d\n",
1617 smbd_send_credit_target,
1618 info->id->device->attrs.max_cqe,
1619 info->id->device->attrs.max_qp_wr);
1623 if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
1624 smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
1626 "consider lowering receive_credit_max = %d. "
1627 "Possible CQE overrun, device "
1628 "reporting max_cpe %d max_qp_wr %d\n",
1629 smbd_receive_credit_max,
1630 info->id->device->attrs.max_cqe,
1631 info->id->device->attrs.max_qp_wr);
1635 info->receive_credit_max = smbd_receive_credit_max;
1636 info->send_credit_target = smbd_send_credit_target;
1637 info->max_send_size = smbd_max_send_size;
1638 info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
1639 info->max_receive_size = smbd_max_receive_size;
1640 info->keep_alive_interval = smbd_keep_alive_interval;
1642 if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) {
1644 "warning: device max_send_sge = %d too small\n",
1645 info->id->device->attrs.max_send_sge);
1646 log_rdma_event(ERR, "Queue Pair creation may fail\n");
1648 if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) {
1650 "warning: device max_recv_sge = %d too small\n",
1651 info->id->device->attrs.max_recv_sge);
1652 log_rdma_event(ERR, "Queue Pair creation may fail\n");
1655 info->send_cq = NULL;
1656 info->recv_cq = NULL;
1657 info->send_cq = ib_alloc_cq(info->id->device, info,
1658 info->send_credit_target, 0, IB_POLL_SOFTIRQ);
1659 if (IS_ERR(info->send_cq)) {
1660 info->send_cq = NULL;
1661 goto alloc_cq_failed;
1664 info->recv_cq = ib_alloc_cq(info->id->device, info,
1665 info->receive_credit_max, 0, IB_POLL_SOFTIRQ);
1666 if (IS_ERR(info->recv_cq)) {
1667 info->recv_cq = NULL;
1668 goto alloc_cq_failed;
1671 memset(&qp_attr, 0, sizeof(qp_attr));
1672 qp_attr.event_handler = smbd_qp_async_error_upcall;
1673 qp_attr.qp_context = info;
1674 qp_attr.cap.max_send_wr = info->send_credit_target;
1675 qp_attr.cap.max_recv_wr = info->receive_credit_max;
1676 qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SGE;
1677 qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_SGE;
1678 qp_attr.cap.max_inline_data = 0;
1679 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1680 qp_attr.qp_type = IB_QPT_RC;
1681 qp_attr.send_cq = info->send_cq;
1682 qp_attr.recv_cq = info->recv_cq;
1683 qp_attr.port_num = ~0;
1685 rc = rdma_create_qp(info->id, info->pd, &qp_attr);
1687 log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
1688 goto create_qp_failed;
1691 memset(&conn_param, 0, sizeof(conn_param));
1692 conn_param.initiator_depth = 0;
1694 conn_param.responder_resources =
1695 info->id->device->attrs.max_qp_rd_atom
1696 < SMBD_CM_RESPONDER_RESOURCES ?
1697 info->id->device->attrs.max_qp_rd_atom :
1698 SMBD_CM_RESPONDER_RESOURCES;
1699 info->responder_resources = conn_param.responder_resources;
1700 log_rdma_mr(INFO, "responder_resources=%d\n",
1701 info->responder_resources);
1703 /* Need to send IRD/ORD in private data for iWARP */
1704 info->id->device->ops.get_port_immutable(
1705 info->id->device, info->id->port_num, &port_immutable);
1706 if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
1707 ird_ord_hdr[0] = info->responder_resources;
1709 conn_param.private_data = ird_ord_hdr;
1710 conn_param.private_data_len = sizeof(ird_ord_hdr);
1712 conn_param.private_data = NULL;
1713 conn_param.private_data_len = 0;
1716 conn_param.retry_count = SMBD_CM_RETRY;
1717 conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
1718 conn_param.flow_control = 0;
1720 log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
1721 &addr_in->sin_addr, port);
1723 init_waitqueue_head(&info->conn_wait);
1724 init_waitqueue_head(&info->disconn_wait);
1725 init_waitqueue_head(&info->wait_reassembly_queue);
1726 rc = rdma_connect(info->id, &conn_param);
1728 log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
1729 goto rdma_connect_failed;
1732 wait_event_interruptible(
1733 info->conn_wait, info->transport_status != SMBD_CONNECTING);
1735 if (info->transport_status != SMBD_CONNECTED) {
1736 log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
1737 goto rdma_connect_failed;
1740 log_rdma_event(INFO, "rdma_connect connected\n");
1742 rc = allocate_caches_and_workqueue(info);
1744 log_rdma_event(ERR, "cache allocation failed\n");
1745 goto allocate_cache_failed;
1748 init_waitqueue_head(&info->wait_send_queue);
1749 INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
1750 INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work);
1751 queue_delayed_work(info->workqueue, &info->idle_timer_work,
1752 info->keep_alive_interval*HZ);
1754 init_waitqueue_head(&info->wait_send_pending);
1755 atomic_set(&info->send_pending, 0);
1757 init_waitqueue_head(&info->wait_send_payload_pending);
1758 atomic_set(&info->send_payload_pending, 0);
1760 INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
1761 INIT_WORK(&info->recv_done_work, smbd_recv_done_work);
1762 INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
1763 info->new_credits_offered = 0;
1764 spin_lock_init(&info->lock_new_credits_offered);
1766 rc = smbd_negotiate(info);
1768 log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
1769 goto negotiation_failed;
1772 rc = allocate_mr_list(info);
1774 log_rdma_mr(ERR, "memory registration allocation failed\n");
1775 goto allocate_mr_failed;
1781 /* At this point, need to a full transport shutdown */
1782 smbd_destroy(server);
1786 cancel_delayed_work_sync(&info->idle_timer_work);
1787 destroy_caches_and_workqueue(info);
1788 info->transport_status = SMBD_NEGOTIATE_FAILED;
1789 init_waitqueue_head(&info->conn_wait);
1790 rdma_disconnect(info->id);
1791 wait_event(info->conn_wait,
1792 info->transport_status == SMBD_DISCONNECTED);
1794 allocate_cache_failed:
1795 rdma_connect_failed:
1796 rdma_destroy_qp(info->id);
1801 ib_free_cq(info->send_cq);
1803 ib_free_cq(info->recv_cq);
1806 ib_dealloc_pd(info->pd);
1807 rdma_destroy_id(info->id);
1814 struct smbd_connection *smbd_get_connection(
1815 struct TCP_Server_Info *server, struct sockaddr *dstaddr)
1817 struct smbd_connection *ret;
1818 int port = SMBD_PORT;
1821 ret = _smbd_get_connection(server, dstaddr, port);
1823 /* Try SMB_PORT if SMBD_PORT doesn't work */
1824 if (!ret && port == SMBD_PORT) {
1832 * Receive data from receive reassembly queue
1833 * All the incoming data packets are placed in reassembly queue
1834 * buf: the buffer to read data into
1835 * size: the length of data to read
1836 * return value: actual data read
1837 * Note: this implementation copies the data from reassebmly queue to receive
1838 * buffers used by upper layer. This is not the optimal code path. A better way
1839 * to do it is to not have upper layer allocate its receive buffers but rather
1840 * borrow the buffer from reassembly queue, and return it after data is
1841 * consumed. But this will require more changes to upper layer code, and also
1842 * need to consider packet boundaries while they still being reassembled.
1844 static int smbd_recv_buf(struct smbd_connection *info, char *buf,
1847 struct smbd_response *response;
1848 struct smbd_data_transfer *data_transfer;
1849 int to_copy, to_read, data_read, offset;
1850 u32 data_length, remaining_data_length, data_offset;
1855 * No need to hold the reassembly queue lock all the time as we are
1856 * the only one reading from the front of the queue. The transport
1857 * may add more entries to the back of the queue at the same time
1859 log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
1860 info->reassembly_data_length);
1861 if (info->reassembly_data_length >= size) {
1863 int queue_removed = 0;
1866 * Need to make sure reassembly_data_length is read before
1867 * reading reassembly_queue_length and calling
1868 * _get_first_reassembly. This call is lock free
1869 * as we never read at the end of the queue which are being
1870 * updated in SOFTIRQ as more data is received
1873 queue_length = info->reassembly_queue_length;
1876 offset = info->first_entry_offset;
1877 while (data_read < size) {
1878 response = _get_first_reassembly(info);
1879 data_transfer = smbd_response_payload(response);
1880 data_length = le32_to_cpu(data_transfer->data_length);
1881 remaining_data_length =
1883 data_transfer->remaining_data_length);
1884 data_offset = le32_to_cpu(data_transfer->data_offset);
1887 * The upper layer expects RFC1002 length at the
1888 * beginning of the payload. Return it to indicate
1889 * the total length of the packet. This minimize the
1890 * change to upper layer packet processing logic. This
1891 * will be eventually remove when an intermediate
1892 * transport layer is added
1894 if (response->first_segment && size == 4) {
1895 unsigned int rfc1002_len =
1896 data_length + remaining_data_length;
1897 *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
1899 response->first_segment = false;
1900 log_read(INFO, "returning rfc1002 length %d\n",
1902 goto read_rfc1002_done;
1905 to_copy = min_t(int, data_length - offset, to_read);
1908 (char *)data_transfer + data_offset + offset,
1911 /* move on to the next buffer? */
1912 if (to_copy == data_length - offset) {
1915 * No need to lock if we are not at the
1919 list_del(&response->list);
1922 &info->reassembly_queue_lock);
1923 list_del(&response->list);
1925 &info->reassembly_queue_lock);
1928 info->count_reassembly_queue--;
1929 info->count_dequeue_reassembly_queue++;
1930 put_receive_buffer(info, response);
1932 log_read(INFO, "put_receive_buffer offset=0\n");
1937 data_read += to_copy;
1939 log_read(INFO, "_get_first_reassembly memcpy %d bytes "
1940 "data_transfer_length-offset=%d after that "
1941 "to_read=%d data_read=%d offset=%d\n",
1942 to_copy, data_length - offset,
1943 to_read, data_read, offset);
1946 spin_lock_irq(&info->reassembly_queue_lock);
1947 info->reassembly_data_length -= data_read;
1948 info->reassembly_queue_length -= queue_removed;
1949 spin_unlock_irq(&info->reassembly_queue_lock);
1951 info->first_entry_offset = offset;
1952 log_read(INFO, "returning to thread data_read=%d "
1953 "reassembly_data_length=%d first_entry_offset=%d\n",
1954 data_read, info->reassembly_data_length,
1955 info->first_entry_offset);
1960 log_read(INFO, "wait_event on more data\n");
1961 rc = wait_event_interruptible(
1962 info->wait_reassembly_queue,
1963 info->reassembly_data_length >= size ||
1964 info->transport_status != SMBD_CONNECTED);
1965 /* Don't return any data if interrupted */
1969 if (info->transport_status != SMBD_CONNECTED) {
1970 log_read(ERR, "disconnected\n");
1978 * Receive a page from receive reassembly queue
1979 * page: the page to read data into
1980 * to_read: the length of data to read
1981 * return value: actual data read
1983 static int smbd_recv_page(struct smbd_connection *info,
1984 struct page *page, unsigned int page_offset,
1985 unsigned int to_read)
1991 /* make sure we have the page ready for read */
1992 ret = wait_event_interruptible(
1993 info->wait_reassembly_queue,
1994 info->reassembly_data_length >= to_read ||
1995 info->transport_status != SMBD_CONNECTED);
1999 /* now we can read from reassembly queue and not sleep */
2000 page_address = kmap_atomic(page);
2001 to_address = (char *) page_address + page_offset;
2003 log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
2004 page, to_address, to_read);
2006 ret = smbd_recv_buf(info, to_address, to_read);
2007 kunmap_atomic(page_address);
2013 * Receive data from transport
2014 * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
2015 * return: total bytes read, or 0. SMB Direct will not do partial read.
2017 int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
2021 unsigned int to_read, page_offset;
2024 if (iov_iter_rw(&msg->msg_iter) == WRITE) {
2025 /* It's a bug in upper layer to get there */
2026 cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n",
2027 iov_iter_rw(&msg->msg_iter));
2032 switch (iov_iter_type(&msg->msg_iter)) {
2034 buf = msg->msg_iter.kvec->iov_base;
2035 to_read = msg->msg_iter.kvec->iov_len;
2036 rc = smbd_recv_buf(info, buf, to_read);
2040 page = msg->msg_iter.bvec->bv_page;
2041 page_offset = msg->msg_iter.bvec->bv_offset;
2042 to_read = msg->msg_iter.bvec->bv_len;
2043 rc = smbd_recv_page(info, page, page_offset, to_read);
2047 /* It's a bug in upper layer to get there */
2048 cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
2049 iov_iter_type(&msg->msg_iter));
2054 /* SMBDirect will read it all or nothing */
2056 msg->msg_iter.count = 0;
2061 * Send data to transport
2062 * Each rqst is transported as a SMBDirect payload
2063 * rqst: the data to write
2064 * return value: 0 if successfully write, otherwise error code
2066 int smbd_send(struct TCP_Server_Info *server,
2067 int num_rqst, struct smb_rqst *rqst_array)
2069 struct smbd_connection *info = server->smbd_conn;
2073 unsigned int buflen, remaining_data_length;
2076 info->max_send_size - sizeof(struct smbd_data_transfer);
2079 struct smb_rqst *rqst;
2082 if (info->transport_status != SMBD_CONNECTED) {
2088 * Add in the page array if there is one. The caller needs to set
2089 * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
2090 * ends at page boundary
2092 remaining_data_length = 0;
2093 for (i = 0; i < num_rqst; i++)
2094 remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
2096 if (remaining_data_length + sizeof(struct smbd_data_transfer) >
2097 info->max_fragmented_send_size) {
2098 log_write(ERR, "payload size %d > max size %d\n",
2099 remaining_data_length, info->max_fragmented_send_size);
2104 log_write(INFO, "num_rqst=%d total length=%u\n",
2105 num_rqst, remaining_data_length);
2109 rqst = &rqst_array[rqst_idx];
2112 cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
2113 rqst_idx, smb_rqst_len(server, rqst));
2114 for (i = 0; i < rqst->rq_nvec; i++)
2115 dump_smb(iov[i].iov_base, iov[i].iov_len);
2118 log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
2119 "rq_tailsz=%d buflen=%lu\n",
2120 rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
2121 rqst->rq_tailsz, smb_rqst_len(server, rqst));
2126 buflen += iov[i].iov_len;
2127 if (buflen > max_iov_size) {
2129 remaining_data_length -=
2130 (buflen-iov[i].iov_len);
2131 log_write(INFO, "sending iov[] from start=%d "
2133 "remaining_data_length=%d\n",
2135 remaining_data_length);
2136 rc = smbd_post_send_data(
2137 info, &iov[start], i-start,
2138 remaining_data_length);
2142 /* iov[start] is too big, break it */
2143 nvecs = (buflen+max_iov_size-1)/max_iov_size;
2144 log_write(INFO, "iov[%d] iov_base=%p buflen=%d"
2145 " break to %d vectors\n",
2146 start, iov[start].iov_base,
2148 for (j = 0; j < nvecs; j++) {
2150 (char *)iov[start].iov_base +
2152 vec.iov_len = max_iov_size;
2156 max_iov_size*(nvecs-1);
2157 remaining_data_length -= vec.iov_len;
2159 "sending vec j=%d iov_base=%p"
2161 "remaining_data_length=%d\n",
2162 j, vec.iov_base, vec.iov_len,
2163 remaining_data_length);
2164 rc = smbd_post_send_data(
2166 remaining_data_length);
2171 if (i == rqst->rq_nvec)
2178 if (i == rqst->rq_nvec) {
2179 /* send out all remaining vecs */
2180 remaining_data_length -= buflen;
2182 "sending iov[] from start=%d i=%d "
2183 "nvecs=%d remaining_data_length=%d\n",
2185 remaining_data_length);
2186 rc = smbd_post_send_data(info, &iov[start],
2187 i-start, remaining_data_length);
2193 log_write(INFO, "looping i=%d buflen=%d\n", i, buflen);
2196 /* now sending pages if there are any */
2197 for (i = 0; i < rqst->rq_npages; i++) {
2198 unsigned int offset;
2200 rqst_page_get_length(rqst, i, &buflen, &offset);
2201 nvecs = (buflen + max_iov_size - 1) / max_iov_size;
2202 log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
2204 for (j = 0; j < nvecs; j++) {
2205 size = max_iov_size;
2207 size = buflen - j*max_iov_size;
2208 remaining_data_length -= size;
2209 log_write(INFO, "sending pages i=%d offset=%d size=%d"
2210 " remaining_data_length=%d\n",
2211 i, j*max_iov_size+offset, size,
2212 remaining_data_length);
2213 rc = smbd_post_send_page(
2214 info, rqst->rq_pages[i],
2215 j*max_iov_size + offset,
2216 size, remaining_data_length);
2223 if (rqst_idx < num_rqst)
2228 * As an optimization, we don't wait for individual I/O to finish
2229 * before sending the next one.
2230 * Send them all and wait for pending send count to get to 0
2231 * that means all the I/Os have been out and we are good to return
2234 wait_event(info->wait_send_payload_pending,
2235 atomic_read(&info->send_payload_pending) == 0);
2240 static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
2246 log_rdma_mr(ERR, "status=%d\n", wc->status);
2248 mr = container_of(cqe, struct smbd_mr, cqe);
2249 smbd_disconnect_rdma_connection(mr->conn);
2254 * The work queue function that recovers MRs
2255 * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
2256 * again. Both calls are slow, so finish them in a workqueue. This will not
2258 * There is one workqueue that recovers MRs, there is no need to lock as the
2259 * I/O requests calling smbd_register_mr will never update the links in the
2262 static void smbd_mr_recovery_work(struct work_struct *work)
2264 struct smbd_connection *info =
2265 container_of(work, struct smbd_connection, mr_recovery_work);
2266 struct smbd_mr *smbdirect_mr;
2269 list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
2270 if (smbdirect_mr->state == MR_INVALIDATED)
2272 info->id->device, smbdirect_mr->sgl,
2273 smbdirect_mr->sgl_count,
2275 else if (smbdirect_mr->state == MR_ERROR) {
2277 /* recover this MR entry */
2278 rc = ib_dereg_mr(smbdirect_mr->mr);
2281 "ib_dereg_mr failed rc=%x\n",
2283 smbd_disconnect_rdma_connection(info);
2287 smbdirect_mr->mr = ib_alloc_mr(
2288 info->pd, info->mr_type,
2289 info->max_frmr_depth);
2290 if (IS_ERR(smbdirect_mr->mr)) {
2292 "ib_alloc_mr failed mr_type=%x "
2293 "max_frmr_depth=%x\n",
2295 info->max_frmr_depth);
2296 smbd_disconnect_rdma_connection(info);
2300 /* This MR is being used, don't recover it */
2303 smbdirect_mr->state = MR_READY;
2305 /* smbdirect_mr->state is updated by this function
2306 * and is read and updated by I/O issuing CPUs trying
2307 * to get a MR, the call to atomic_inc_return
2308 * implicates a memory barrier and guarantees this
2309 * value is updated before waking up any calls to
2310 * get_mr() from the I/O issuing CPUs
2312 if (atomic_inc_return(&info->mr_ready_count) == 1)
2313 wake_up_interruptible(&info->wait_mr);
2317 static void destroy_mr_list(struct smbd_connection *info)
2319 struct smbd_mr *mr, *tmp;
2321 cancel_work_sync(&info->mr_recovery_work);
2322 list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
2323 if (mr->state == MR_INVALIDATED)
2324 ib_dma_unmap_sg(info->id->device, mr->sgl,
2325 mr->sgl_count, mr->dir);
2326 ib_dereg_mr(mr->mr);
2333 * Allocate MRs used for RDMA read/write
2334 * The number of MRs will not exceed hardware capability in responder_resources
2335 * All MRs are kept in mr_list. The MR can be recovered after it's used
2336 * Recovery is done in smbd_mr_recovery_work. The content of list entry changes
2337 * as MRs are used and recovered for I/O, but the list links will not change
2339 static int allocate_mr_list(struct smbd_connection *info)
2342 struct smbd_mr *smbdirect_mr, *tmp;
2344 INIT_LIST_HEAD(&info->mr_list);
2345 init_waitqueue_head(&info->wait_mr);
2346 spin_lock_init(&info->mr_list_lock);
2347 atomic_set(&info->mr_ready_count, 0);
2348 atomic_set(&info->mr_used_count, 0);
2349 init_waitqueue_head(&info->wait_for_mr_cleanup);
2350 /* Allocate more MRs (2x) than hardware responder_resources */
2351 for (i = 0; i < info->responder_resources * 2; i++) {
2352 smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
2355 smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
2356 info->max_frmr_depth);
2357 if (IS_ERR(smbdirect_mr->mr)) {
2358 log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x "
2359 "max_frmr_depth=%x\n",
2360 info->mr_type, info->max_frmr_depth);
2363 smbdirect_mr->sgl = kcalloc(
2364 info->max_frmr_depth,
2365 sizeof(struct scatterlist),
2367 if (!smbdirect_mr->sgl) {
2368 log_rdma_mr(ERR, "failed to allocate sgl\n");
2369 ib_dereg_mr(smbdirect_mr->mr);
2372 smbdirect_mr->state = MR_READY;
2373 smbdirect_mr->conn = info;
2375 list_add_tail(&smbdirect_mr->list, &info->mr_list);
2376 atomic_inc(&info->mr_ready_count);
2378 INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
2382 kfree(smbdirect_mr);
2384 list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
2385 ib_dereg_mr(smbdirect_mr->mr);
2386 kfree(smbdirect_mr->sgl);
2387 kfree(smbdirect_mr);
2393 * Get a MR from mr_list. This function waits until there is at least one
2394 * MR available in the list. It may access the list while the
2395 * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
2396 * as they never modify the same places. However, there may be several CPUs
2397 * issueing I/O trying to get MR at the same time, mr_list_lock is used to
2398 * protect this situation.
2400 static struct smbd_mr *get_mr(struct smbd_connection *info)
2402 struct smbd_mr *ret;
2405 rc = wait_event_interruptible(info->wait_mr,
2406 atomic_read(&info->mr_ready_count) ||
2407 info->transport_status != SMBD_CONNECTED);
2409 log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
2413 if (info->transport_status != SMBD_CONNECTED) {
2414 log_rdma_mr(ERR, "info->transport_status=%x\n",
2415 info->transport_status);
2419 spin_lock(&info->mr_list_lock);
2420 list_for_each_entry(ret, &info->mr_list, list) {
2421 if (ret->state == MR_READY) {
2422 ret->state = MR_REGISTERED;
2423 spin_unlock(&info->mr_list_lock);
2424 atomic_dec(&info->mr_ready_count);
2425 atomic_inc(&info->mr_used_count);
2430 spin_unlock(&info->mr_list_lock);
2432 * It is possible that we could fail to get MR because other processes may
2433 * try to acquire a MR at the same time. If this is the case, retry it.
2439 * Register memory for RDMA read/write
2440 * pages[]: the list of pages to register memory with
2441 * num_pages: the number of pages to register
2442 * tailsz: if non-zero, the bytes to register in the last page
2443 * writing: true if this is a RDMA write (SMB read), false for RDMA read
2444 * need_invalidate: true if this MR needs to be locally invalidated after I/O
2445 * return value: the MR registered, NULL if failed.
2447 struct smbd_mr *smbd_register_mr(
2448 struct smbd_connection *info, struct page *pages[], int num_pages,
2449 int offset, int tailsz, bool writing, bool need_invalidate)
2451 struct smbd_mr *smbdirect_mr;
2453 enum dma_data_direction dir;
2454 struct ib_reg_wr *reg_wr;
2456 if (num_pages > info->max_frmr_depth) {
2457 log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
2458 num_pages, info->max_frmr_depth);
2462 smbdirect_mr = get_mr(info);
2463 if (!smbdirect_mr) {
2464 log_rdma_mr(ERR, "get_mr returning NULL\n");
2467 smbdirect_mr->need_invalidate = need_invalidate;
2468 smbdirect_mr->sgl_count = num_pages;
2469 sg_init_table(smbdirect_mr->sgl, num_pages);
2471 log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n",
2472 num_pages, offset, tailsz);
2474 if (num_pages == 1) {
2475 sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset);
2476 goto skip_multiple_pages;
2479 /* We have at least two pages to register */
2481 &smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset);
2483 while (i < num_pages - 1) {
2484 sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
2487 sg_set_page(&smbdirect_mr->sgl[i], pages[i],
2488 tailsz ? tailsz : PAGE_SIZE, 0);
2490 skip_multiple_pages:
2491 dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2492 smbdirect_mr->dir = dir;
2493 rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
2495 log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
2496 num_pages, dir, rc);
2500 rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
2502 if (rc != num_pages) {
2504 "ib_map_mr_sg failed rc = %d num_pages = %x\n",
2509 ib_update_fast_reg_key(smbdirect_mr->mr,
2510 ib_inc_rkey(smbdirect_mr->mr->rkey));
2511 reg_wr = &smbdirect_mr->wr;
2512 reg_wr->wr.opcode = IB_WR_REG_MR;
2513 smbdirect_mr->cqe.done = register_mr_done;
2514 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
2515 reg_wr->wr.num_sge = 0;
2516 reg_wr->wr.send_flags = IB_SEND_SIGNALED;
2517 reg_wr->mr = smbdirect_mr->mr;
2518 reg_wr->key = smbdirect_mr->mr->rkey;
2519 reg_wr->access = writing ?
2520 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
2521 IB_ACCESS_REMOTE_READ;
2524 * There is no need for waiting for complemtion on ib_post_send
2525 * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
2526 * on the next ib_post_send when we actaully send I/O to remote peer
2528 rc = ib_post_send(info->id->qp, ®_wr->wr, NULL);
2530 return smbdirect_mr;
2532 log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
2535 /* If all failed, attempt to recover this MR by setting it MR_ERROR*/
2537 ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl,
2538 smbdirect_mr->sgl_count, smbdirect_mr->dir);
2541 smbdirect_mr->state = MR_ERROR;
2542 if (atomic_dec_and_test(&info->mr_used_count))
2543 wake_up(&info->wait_for_mr_cleanup);
2545 smbd_disconnect_rdma_connection(info);
2550 static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
2552 struct smbd_mr *smbdirect_mr;
2556 smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
2557 smbdirect_mr->state = MR_INVALIDATED;
2558 if (wc->status != IB_WC_SUCCESS) {
2559 log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
2560 smbdirect_mr->state = MR_ERROR;
2562 complete(&smbdirect_mr->invalidate_done);
2566 * Deregister a MR after I/O is done
2567 * This function may wait if remote invalidation is not used
2568 * and we have to locally invalidate the buffer to prevent data is being
2569 * modified by remote peer after upper layer consumes it
2571 int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
2573 struct ib_send_wr *wr;
2574 struct smbd_connection *info = smbdirect_mr->conn;
2577 if (smbdirect_mr->need_invalidate) {
2578 /* Need to finish local invalidation before returning */
2579 wr = &smbdirect_mr->inv_wr;
2580 wr->opcode = IB_WR_LOCAL_INV;
2581 smbdirect_mr->cqe.done = local_inv_done;
2582 wr->wr_cqe = &smbdirect_mr->cqe;
2584 wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
2585 wr->send_flags = IB_SEND_SIGNALED;
2587 init_completion(&smbdirect_mr->invalidate_done);
2588 rc = ib_post_send(info->id->qp, wr, NULL);
2590 log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
2591 smbd_disconnect_rdma_connection(info);
2594 wait_for_completion(&smbdirect_mr->invalidate_done);
2595 smbdirect_mr->need_invalidate = false;
2598 * For remote invalidation, just set it to MR_INVALIDATED
2599 * and defer to mr_recovery_work to recover the MR for next use
2601 smbdirect_mr->state = MR_INVALIDATED;
2604 * Schedule the work to do MR recovery for future I/Os
2605 * MR recovery is slow and we don't want it to block the current I/O
2607 queue_work(info->workqueue, &info->mr_recovery_work);
2610 if (atomic_dec_and_test(&info->mr_used_count))
2611 wake_up(&info->wait_for_mr_cleanup);