2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
53 #include <rdma/opa_addr.h>
55 #include "core_priv.h"
57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
60 #define IB_SA_CPI_MAX_RETRY_CNT 3
61 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
71 enum rdma_class_port_info_type {
72 RDMA_CLASS_PORT_INFO_IB,
73 RDMA_CLASS_PORT_INFO_OPA
76 struct rdma_class_port_info {
77 enum rdma_class_port_info_type type;
79 struct ib_class_port_info ib;
80 struct opa_class_port_info opa;
84 struct ib_sa_classport_cache {
87 struct rdma_class_port_info data;
91 struct ib_mad_agent *agent;
92 struct ib_sa_sm_ah *sm_ah;
93 struct work_struct update_task;
94 struct ib_sa_classport_cache classport_info;
95 struct delayed_work ib_cpi_work;
96 spinlock_t classport_lock; /* protects class port info set */
101 struct ib_sa_device {
102 int start_port, end_port;
103 struct ib_event_handler event_handler;
104 struct ib_sa_port port[0];
108 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
109 void (*release)(struct ib_sa_query *);
110 struct ib_sa_client *client;
111 struct ib_sa_port *port;
112 struct ib_mad_send_buf *mad_buf;
113 struct ib_sa_sm_ah *sm_ah;
116 struct list_head list; /* Local svc request list */
117 u32 seq; /* Local svc request sequence number */
118 unsigned long timeout; /* Local svc timeout */
119 u8 path_use; /* How will the pathrecord be used */
122 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
123 #define IB_SA_CANCEL 0x00000002
124 #define IB_SA_QUERY_OPA 0x00000004
126 struct ib_sa_service_query {
127 void (*callback)(int, struct ib_sa_service_rec *, void *);
129 struct ib_sa_query sa_query;
132 struct ib_sa_path_query {
133 void (*callback)(int, struct sa_path_rec *, void *);
135 struct ib_sa_query sa_query;
136 struct sa_path_rec *conv_pr;
139 struct ib_sa_guidinfo_query {
140 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
142 struct ib_sa_query sa_query;
145 struct ib_sa_classport_info_query {
146 void (*callback)(void *);
148 struct ib_sa_query sa_query;
151 struct ib_sa_mcmember_query {
152 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
154 struct ib_sa_query sa_query;
157 static LIST_HEAD(ib_nl_request_list);
158 static DEFINE_SPINLOCK(ib_nl_request_lock);
159 static atomic_t ib_nl_sa_request_seq;
160 static struct workqueue_struct *ib_nl_wq;
161 static struct delayed_work ib_nl_timed_work;
162 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
163 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
164 .len = sizeof(struct ib_path_rec_data)},
165 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
166 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
167 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
168 .len = sizeof(struct rdma_nla_ls_gid)},
169 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
170 .len = sizeof(struct rdma_nla_ls_gid)},
171 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
172 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
173 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
177 static void ib_sa_add_one(struct ib_device *device);
178 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
180 static struct ib_client sa_client = {
182 .add = ib_sa_add_one,
183 .remove = ib_sa_remove_one
186 static DEFINE_SPINLOCK(idr_lock);
187 static DEFINE_IDR(query_idr);
189 static DEFINE_SPINLOCK(tid_lock);
192 #define PATH_REC_FIELD(field) \
193 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
194 .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
195 .field_name = "sa_path_rec:" #field
197 static const struct ib_field path_rec_table[] = {
198 { PATH_REC_FIELD(service_id),
202 { PATH_REC_FIELD(dgid),
206 { PATH_REC_FIELD(sgid),
210 { PATH_REC_FIELD(ib.dlid),
214 { PATH_REC_FIELD(ib.slid),
218 { PATH_REC_FIELD(ib.raw_traffic),
226 { PATH_REC_FIELD(flow_label),
230 { PATH_REC_FIELD(hop_limit),
234 { PATH_REC_FIELD(traffic_class),
238 { PATH_REC_FIELD(reversible),
242 { PATH_REC_FIELD(numb_path),
246 { PATH_REC_FIELD(pkey),
250 { PATH_REC_FIELD(qos_class),
254 { PATH_REC_FIELD(sl),
258 { PATH_REC_FIELD(mtu_selector),
262 { PATH_REC_FIELD(mtu),
266 { PATH_REC_FIELD(rate_selector),
270 { PATH_REC_FIELD(rate),
274 { PATH_REC_FIELD(packet_life_time_selector),
278 { PATH_REC_FIELD(packet_life_time),
282 { PATH_REC_FIELD(preference),
292 #define OPA_PATH_REC_FIELD(field) \
293 .struct_offset_bytes = \
294 offsetof(struct sa_path_rec, field), \
295 .struct_size_bytes = \
296 sizeof((struct sa_path_rec *)0)->field, \
297 .field_name = "sa_path_rec:" #field
299 static const struct ib_field opa_path_rec_table[] = {
300 { OPA_PATH_REC_FIELD(service_id),
304 { OPA_PATH_REC_FIELD(dgid),
308 { OPA_PATH_REC_FIELD(sgid),
312 { OPA_PATH_REC_FIELD(opa.dlid),
316 { OPA_PATH_REC_FIELD(opa.slid),
320 { OPA_PATH_REC_FIELD(opa.raw_traffic),
328 { OPA_PATH_REC_FIELD(flow_label),
332 { OPA_PATH_REC_FIELD(hop_limit),
336 { OPA_PATH_REC_FIELD(traffic_class),
340 { OPA_PATH_REC_FIELD(reversible),
344 { OPA_PATH_REC_FIELD(numb_path),
348 { OPA_PATH_REC_FIELD(pkey),
352 { OPA_PATH_REC_FIELD(opa.l2_8B),
356 { OPA_PATH_REC_FIELD(opa.l2_10B),
360 { OPA_PATH_REC_FIELD(opa.l2_9B),
364 { OPA_PATH_REC_FIELD(opa.l2_16B),
372 { OPA_PATH_REC_FIELD(opa.qos_type),
376 { OPA_PATH_REC_FIELD(opa.qos_priority),
384 { OPA_PATH_REC_FIELD(sl),
392 { OPA_PATH_REC_FIELD(mtu_selector),
396 { OPA_PATH_REC_FIELD(mtu),
400 { OPA_PATH_REC_FIELD(rate_selector),
404 { OPA_PATH_REC_FIELD(rate),
408 { OPA_PATH_REC_FIELD(packet_life_time_selector),
412 { OPA_PATH_REC_FIELD(packet_life_time),
416 { OPA_PATH_REC_FIELD(preference),
422 #define MCMEMBER_REC_FIELD(field) \
423 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
424 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
425 .field_name = "sa_mcmember_rec:" #field
427 static const struct ib_field mcmember_rec_table[] = {
428 { MCMEMBER_REC_FIELD(mgid),
432 { MCMEMBER_REC_FIELD(port_gid),
436 { MCMEMBER_REC_FIELD(qkey),
440 { MCMEMBER_REC_FIELD(mlid),
444 { MCMEMBER_REC_FIELD(mtu_selector),
448 { MCMEMBER_REC_FIELD(mtu),
452 { MCMEMBER_REC_FIELD(traffic_class),
456 { MCMEMBER_REC_FIELD(pkey),
460 { MCMEMBER_REC_FIELD(rate_selector),
464 { MCMEMBER_REC_FIELD(rate),
468 { MCMEMBER_REC_FIELD(packet_life_time_selector),
472 { MCMEMBER_REC_FIELD(packet_life_time),
476 { MCMEMBER_REC_FIELD(sl),
480 { MCMEMBER_REC_FIELD(flow_label),
484 { MCMEMBER_REC_FIELD(hop_limit),
488 { MCMEMBER_REC_FIELD(scope),
492 { MCMEMBER_REC_FIELD(join_state),
496 { MCMEMBER_REC_FIELD(proxy_join),
506 #define SERVICE_REC_FIELD(field) \
507 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
508 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
509 .field_name = "sa_service_rec:" #field
511 static const struct ib_field service_rec_table[] = {
512 { SERVICE_REC_FIELD(id),
516 { SERVICE_REC_FIELD(gid),
520 { SERVICE_REC_FIELD(pkey),
524 { SERVICE_REC_FIELD(lease),
528 { SERVICE_REC_FIELD(key),
532 { SERVICE_REC_FIELD(name),
536 { SERVICE_REC_FIELD(data8),
540 { SERVICE_REC_FIELD(data16),
544 { SERVICE_REC_FIELD(data32),
548 { SERVICE_REC_FIELD(data64),
554 #define CLASSPORTINFO_REC_FIELD(field) \
555 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
556 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
557 .field_name = "ib_class_port_info:" #field
559 static const struct ib_field ib_classport_info_rec_table[] = {
560 { CLASSPORTINFO_REC_FIELD(base_version),
564 { CLASSPORTINFO_REC_FIELD(class_version),
568 { CLASSPORTINFO_REC_FIELD(capability_mask),
572 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
576 { CLASSPORTINFO_REC_FIELD(redirect_gid),
580 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
584 { CLASSPORTINFO_REC_FIELD(redirect_lid),
588 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
593 { CLASSPORTINFO_REC_FIELD(redirect_qp),
597 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
602 { CLASSPORTINFO_REC_FIELD(trap_gid),
606 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
611 { CLASSPORTINFO_REC_FIELD(trap_lid),
615 { CLASSPORTINFO_REC_FIELD(trap_pkey),
620 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
624 { CLASSPORTINFO_REC_FIELD(trap_qkey),
630 #define OPA_CLASSPORTINFO_REC_FIELD(field) \
631 .struct_offset_bytes =\
632 offsetof(struct opa_class_port_info, field), \
633 .struct_size_bytes = \
634 sizeof((struct opa_class_port_info *)0)->field, \
635 .field_name = "opa_class_port_info:" #field
637 static const struct ib_field opa_classport_info_rec_table[] = {
638 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
642 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
646 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
650 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
654 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
658 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
662 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
666 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
670 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
674 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
678 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
682 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
686 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
690 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
694 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
698 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
702 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
712 #define GUIDINFO_REC_FIELD(field) \
713 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
714 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
715 .field_name = "sa_guidinfo_rec:" #field
717 static const struct ib_field guidinfo_rec_table[] = {
718 { GUIDINFO_REC_FIELD(lid),
722 { GUIDINFO_REC_FIELD(block_num),
726 { GUIDINFO_REC_FIELD(res1),
730 { GUIDINFO_REC_FIELD(res2),
734 { GUIDINFO_REC_FIELD(guid_info_list),
740 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
742 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
745 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
747 return (query->flags & IB_SA_CANCEL);
750 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
751 struct ib_sa_query *query)
753 struct sa_path_rec *sa_rec = query->mad_buf->context[1];
754 struct ib_sa_mad *mad = query->mad_buf->mad;
755 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
758 struct rdma_ls_resolve_header *header;
760 query->mad_buf->context[1] = NULL;
762 /* Construct the family header first */
763 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
764 memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
766 header->port_num = query->port->port_num;
768 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
769 sa_rec->reversible != 0)
770 query->path_use = LS_RESOLVE_PATH_USE_GMP;
772 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
773 header->path_use = query->path_use;
775 /* Now build the attributes */
776 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
777 val64 = be64_to_cpu(sa_rec->service_id);
778 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
779 sizeof(val64), &val64);
781 if (comp_mask & IB_SA_PATH_REC_DGID)
782 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
783 sizeof(sa_rec->dgid), &sa_rec->dgid);
784 if (comp_mask & IB_SA_PATH_REC_SGID)
785 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
786 sizeof(sa_rec->sgid), &sa_rec->sgid);
787 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
788 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
789 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
791 if (comp_mask & IB_SA_PATH_REC_PKEY) {
792 val16 = be16_to_cpu(sa_rec->pkey);
793 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
794 sizeof(val16), &val16);
796 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
797 val16 = be16_to_cpu(sa_rec->qos_class);
798 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
799 sizeof(val16), &val16);
803 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
807 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
808 len += nla_total_size(sizeof(u64));
809 if (comp_mask & IB_SA_PATH_REC_DGID)
810 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
811 if (comp_mask & IB_SA_PATH_REC_SGID)
812 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
813 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
814 len += nla_total_size(sizeof(u8));
815 if (comp_mask & IB_SA_PATH_REC_PKEY)
816 len += nla_total_size(sizeof(u16));
817 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
818 len += nla_total_size(sizeof(u16));
821 * Make sure that at least some of the required comp_mask bits are
824 if (WARN_ON(len == 0))
827 /* Add the family header */
828 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
833 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
835 struct sk_buff *skb = NULL;
836 struct nlmsghdr *nlh;
838 struct ib_sa_mad *mad;
841 mad = query->mad_buf->mad;
842 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
846 skb = nlmsg_new(len, gfp_mask);
850 /* Put nlmsg header only for now */
851 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
852 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
859 ib_nl_set_path_rec_attrs(skb, query);
861 /* Repair the nlmsg header length */
864 return rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
867 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
873 INIT_LIST_HEAD(&query->list);
874 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
876 /* Put the request on the list first.*/
877 spin_lock_irqsave(&ib_nl_request_lock, flags);
878 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
879 query->timeout = delay + jiffies;
880 list_add_tail(&query->list, &ib_nl_request_list);
881 /* Start the timeout if this is the only request */
882 if (ib_nl_request_list.next == &query->list)
883 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
884 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
886 ret = ib_nl_send_msg(query, gfp_mask);
889 /* Remove the request */
890 spin_lock_irqsave(&ib_nl_request_lock, flags);
891 list_del(&query->list);
892 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
898 static int ib_nl_cancel_request(struct ib_sa_query *query)
901 struct ib_sa_query *wait_query;
904 spin_lock_irqsave(&ib_nl_request_lock, flags);
905 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
906 /* Let the timeout to take care of the callback */
907 if (query == wait_query) {
908 query->flags |= IB_SA_CANCEL;
909 query->timeout = jiffies;
910 list_move(&query->list, &ib_nl_request_list);
912 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
916 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
921 static void send_handler(struct ib_mad_agent *agent,
922 struct ib_mad_send_wc *mad_send_wc);
924 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
925 const struct nlmsghdr *nlh)
927 struct ib_mad_send_wc mad_send_wc;
928 struct ib_sa_mad *mad = NULL;
929 const struct nlattr *head, *curr;
930 struct ib_path_rec_data *rec;
935 if (query->callback) {
936 head = (const struct nlattr *) nlmsg_data(nlh);
937 len = nlmsg_len(nlh);
938 switch (query->path_use) {
939 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
940 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
943 case LS_RESOLVE_PATH_USE_ALL:
944 case LS_RESOLVE_PATH_USE_GMP:
946 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
947 IB_PATH_BIDIRECTIONAL;
950 nla_for_each_attr(curr, head, len, rem) {
951 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
952 rec = nla_data(curr);
954 * Get the first one. In the future, we may
955 * need to get up to 6 pathrecords.
957 if ((rec->flags & mask) == mask) {
958 mad = query->mad_buf->mad;
959 mad->mad_hdr.method |=
961 memcpy(mad->data, rec->path_rec,
962 sizeof(rec->path_rec));
968 query->callback(query, status, mad);
971 mad_send_wc.send_buf = query->mad_buf;
972 mad_send_wc.status = IB_WC_SUCCESS;
973 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
976 static void ib_nl_request_timeout(struct work_struct *work)
979 struct ib_sa_query *query;
981 struct ib_mad_send_wc mad_send_wc;
984 spin_lock_irqsave(&ib_nl_request_lock, flags);
985 while (!list_empty(&ib_nl_request_list)) {
986 query = list_entry(ib_nl_request_list.next,
987 struct ib_sa_query, list);
989 if (time_after(query->timeout, jiffies)) {
990 delay = query->timeout - jiffies;
991 if ((long)delay <= 0)
993 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
997 list_del(&query->list);
998 ib_sa_disable_local_svc(query);
999 /* Hold the lock to protect against query cancellation */
1000 if (ib_sa_query_cancelled(query))
1003 ret = ib_post_send_mad(query->mad_buf, NULL);
1005 mad_send_wc.send_buf = query->mad_buf;
1006 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1007 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1008 send_handler(query->port->agent, &mad_send_wc);
1009 spin_lock_irqsave(&ib_nl_request_lock, flags);
1012 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1015 int ib_nl_handle_set_timeout(struct sk_buff *skb,
1016 struct nlmsghdr *nlh,
1017 struct netlink_ext_ack *extack)
1019 int timeout, delta, abs_delta;
1020 const struct nlattr *attr;
1021 unsigned long flags;
1022 struct ib_sa_query *query;
1024 struct nlattr *tb[LS_NLA_TYPE_MAX];
1027 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1028 !(NETLINK_CB(skb).sk))
1031 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1032 nlmsg_len(nlh), ib_nl_policy, NULL);
1033 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1035 goto settimeout_out;
1037 timeout = *(int *) nla_data(attr);
1038 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1039 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1040 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1041 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1043 delta = timeout - sa_local_svc_timeout_ms;
1050 spin_lock_irqsave(&ib_nl_request_lock, flags);
1051 sa_local_svc_timeout_ms = timeout;
1052 list_for_each_entry(query, &ib_nl_request_list, list) {
1053 if (delta < 0 && abs_delta > query->timeout)
1056 query->timeout += delta;
1058 /* Get the new delay from the first entry */
1060 delay = query->timeout - jiffies;
1066 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1067 (unsigned long)delay);
1068 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1075 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1077 struct nlattr *tb[LS_NLA_TYPE_MAX];
1080 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1083 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1084 nlmsg_len(nlh), ib_nl_policy, NULL);
1091 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1092 struct nlmsghdr *nlh,
1093 struct netlink_ext_ack *extack)
1095 unsigned long flags;
1096 struct ib_sa_query *query;
1097 struct ib_mad_send_buf *send_buf;
1098 struct ib_mad_send_wc mad_send_wc;
1102 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1103 !(NETLINK_CB(skb).sk))
1106 spin_lock_irqsave(&ib_nl_request_lock, flags);
1107 list_for_each_entry(query, &ib_nl_request_list, list) {
1109 * If the query is cancelled, let the timeout routine
1112 if (nlh->nlmsg_seq == query->seq) {
1113 found = !ib_sa_query_cancelled(query);
1115 list_del(&query->list);
1121 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1125 send_buf = query->mad_buf;
1127 if (!ib_nl_is_good_resolve_resp(nlh)) {
1128 /* if the result is a failure, send out the packet via IB */
1129 ib_sa_disable_local_svc(query);
1130 ret = ib_post_send_mad(query->mad_buf, NULL);
1131 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1133 mad_send_wc.send_buf = send_buf;
1134 mad_send_wc.status = IB_WC_GENERAL_ERR;
1135 send_handler(query->port->agent, &mad_send_wc);
1138 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1139 ib_nl_process_good_resolve_rsp(query, nlh);
1146 static void free_sm_ah(struct kref *kref)
1148 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1150 rdma_destroy_ah(sm_ah->ah);
1154 void ib_sa_register_client(struct ib_sa_client *client)
1156 atomic_set(&client->users, 1);
1157 init_completion(&client->comp);
1159 EXPORT_SYMBOL(ib_sa_register_client);
1161 void ib_sa_unregister_client(struct ib_sa_client *client)
1163 ib_sa_client_put(client);
1164 wait_for_completion(&client->comp);
1166 EXPORT_SYMBOL(ib_sa_unregister_client);
1169 * ib_sa_cancel_query - try to cancel an SA query
1170 * @id:ID of query to cancel
1171 * @query:query pointer to cancel
1173 * Try to cancel an SA query. If the id and query don't match up or
1174 * the query has already completed, nothing is done. Otherwise the
1175 * query is canceled and will complete with a status of -EINTR.
1177 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1179 unsigned long flags;
1180 struct ib_mad_agent *agent;
1181 struct ib_mad_send_buf *mad_buf;
1183 spin_lock_irqsave(&idr_lock, flags);
1184 if (idr_find(&query_idr, id) != query) {
1185 spin_unlock_irqrestore(&idr_lock, flags);
1188 agent = query->port->agent;
1189 mad_buf = query->mad_buf;
1190 spin_unlock_irqrestore(&idr_lock, flags);
1193 * If the query is still on the netlink request list, schedule
1194 * it to be cancelled by the timeout routine. Otherwise, it has been
1195 * sent to the MAD layer and has to be cancelled from there.
1197 if (!ib_nl_cancel_request(query))
1198 ib_cancel_mad(agent, mad_buf);
1200 EXPORT_SYMBOL(ib_sa_cancel_query);
1202 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1204 struct ib_sa_device *sa_dev;
1205 struct ib_sa_port *port;
1206 unsigned long flags;
1209 sa_dev = ib_get_client_data(device, &sa_client);
1213 port = &sa_dev->port[port_num - sa_dev->start_port];
1214 spin_lock_irqsave(&port->ah_lock, flags);
1215 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1216 spin_unlock_irqrestore(&port->ah_lock, flags);
1218 return src_path_mask;
1221 static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
1222 struct sa_path_rec *rec,
1223 struct rdma_ah_attr *ah_attr,
1224 const struct ib_gid_attr *gid_attr)
1226 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1229 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
1231 if (IS_ERR(gid_attr))
1232 return PTR_ERR(gid_attr);
1234 rdma_hold_gid_attr(gid_attr);
1236 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
1237 be32_to_cpu(rec->flow_label),
1238 rec->hop_limit, rec->traffic_class,
1244 * ib_init_ah_attr_from_path - Initialize address handle attributes based on
1245 * an SA path record.
1246 * @device: Device associated ah attributes initialization.
1247 * @port_num: Port on the specified device.
1248 * @rec: path record entry to use for ah attributes initialization.
1249 * @ah_attr: address handle attributes to initialization from path record.
1250 * @sgid_attr: SGID attribute to consider during initialization.
1252 * When ib_init_ah_attr_from_path() returns success,
1253 * (a) for IB link layer it optionally contains a reference to SGID attribute
1254 * when GRH is present for IB link layer.
1255 * (b) for RoCE link layer it contains a reference to SGID attribute.
1256 * User must invoke rdma_destroy_ah_attr() to release reference to SGID
1257 * attributes which are initialized using ib_init_ah_attr_from_path().
1259 int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1260 struct sa_path_rec *rec,
1261 struct rdma_ah_attr *ah_attr,
1262 const struct ib_gid_attr *gid_attr)
1266 memset(ah_attr, 0, sizeof(*ah_attr));
1267 ah_attr->type = rdma_ah_find_type(device, port_num);
1268 rdma_ah_set_sl(ah_attr, rec->sl);
1269 rdma_ah_set_port_num(ah_attr, port_num);
1270 rdma_ah_set_static_rate(ah_attr, rec->rate);
1272 if (sa_path_is_roce(rec)) {
1273 ret = roce_resolve_route_from_path(rec, gid_attr);
1277 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
1279 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1280 if (sa_path_is_opa(rec) &&
1281 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
1282 rdma_ah_set_make_grd(ah_attr, true);
1284 rdma_ah_set_path_bits(ah_attr,
1285 be32_to_cpu(sa_path_get_slid(rec)) &
1286 get_src_path_mask(device, port_num));
1289 if (rec->hop_limit > 0 || sa_path_is_roce(rec))
1290 ret = init_ah_attr_grh_fields(device, port_num,
1291 rec, ah_attr, gid_attr);
1294 EXPORT_SYMBOL(ib_init_ah_attr_from_path);
1296 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1298 struct rdma_ah_attr ah_attr;
1299 unsigned long flags;
1301 spin_lock_irqsave(&query->port->ah_lock, flags);
1302 if (!query->port->sm_ah) {
1303 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1306 kref_get(&query->port->sm_ah->ref);
1307 query->sm_ah = query->port->sm_ah;
1308 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1311 * Always check if sm_ah has valid dlid assigned,
1312 * before querying for class port info
1314 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
1315 !rdma_is_valid_unicast_lid(&ah_attr)) {
1316 kref_put(&query->sm_ah->ref, free_sm_ah);
1319 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1320 query->sm_ah->pkey_index,
1321 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1323 ((query->flags & IB_SA_QUERY_OPA) ?
1324 OPA_MGMT_BASE_VERSION :
1325 IB_MGMT_BASE_VERSION));
1326 if (IS_ERR(query->mad_buf)) {
1327 kref_put(&query->sm_ah->ref, free_sm_ah);
1331 query->mad_buf->ah = query->sm_ah->ah;
1336 static void free_mad(struct ib_sa_query *query)
1338 ib_free_send_mad(query->mad_buf);
1339 kref_put(&query->sm_ah->ref, free_sm_ah);
1342 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1344 struct ib_sa_mad *mad = query->mad_buf->mad;
1345 unsigned long flags;
1347 memset(mad, 0, sizeof *mad);
1349 if (query->flags & IB_SA_QUERY_OPA) {
1350 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1351 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1353 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1354 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1356 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1357 spin_lock_irqsave(&tid_lock, flags);
1359 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1360 spin_unlock_irqrestore(&tid_lock, flags);
1363 static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
1366 bool preload = gfpflags_allow_blocking(gfp_mask);
1367 unsigned long flags;
1371 idr_preload(gfp_mask);
1372 spin_lock_irqsave(&idr_lock, flags);
1374 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1376 spin_unlock_irqrestore(&idr_lock, flags);
1382 query->mad_buf->timeout_ms = timeout_ms;
1383 query->mad_buf->context[0] = query;
1386 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1387 (!(query->flags & IB_SA_QUERY_OPA))) {
1388 if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1389 if (!ib_nl_make_request(query, gfp_mask))
1392 ib_sa_disable_local_svc(query);
1395 ret = ib_post_send_mad(query->mad_buf, NULL);
1397 spin_lock_irqsave(&idr_lock, flags);
1398 idr_remove(&query_idr, id);
1399 spin_unlock_irqrestore(&idr_lock, flags);
1403 * It's not safe to dereference query any more, because the
1404 * send may already have completed and freed the query in
1407 return ret ? ret : id;
1410 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1412 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1414 EXPORT_SYMBOL(ib_sa_unpack_path);
1416 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1418 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1420 EXPORT_SYMBOL(ib_sa_pack_path);
1422 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1423 struct ib_device *device,
1426 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1427 struct ib_sa_port *port;
1428 unsigned long flags;
1434 port = &sa_dev->port[port_num - sa_dev->start_port];
1435 spin_lock_irqsave(&port->classport_lock, flags);
1436 if (!port->classport_info.valid)
1439 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1440 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1441 OPA_CLASS_PORT_INFO_PR_SUPPORT;
1443 spin_unlock_irqrestore(&port->classport_lock, flags);
1447 enum opa_pr_supported {
1454 * Check if current PR query can be an OPA query.
1455 * Retuns PR_NOT_SUPPORTED if a path record query is not
1456 * possible, PR_OPA_SUPPORTED if an OPA path record query
1457 * is possible and PR_IB_SUPPORTED if an IB path record
1458 * query is possible.
1460 static int opa_pr_query_possible(struct ib_sa_client *client,
1461 struct ib_device *device,
1463 struct sa_path_rec *rec)
1465 struct ib_port_attr port_attr;
1467 if (ib_query_port(device, port_num, &port_attr))
1468 return PR_NOT_SUPPORTED;
1470 if (ib_sa_opa_pathrecord_support(client, device, port_num))
1471 return PR_OPA_SUPPORTED;
1473 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1474 return PR_NOT_SUPPORTED;
1476 return PR_IB_SUPPORTED;
1479 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1481 struct ib_sa_mad *mad)
1483 struct ib_sa_path_query *query =
1484 container_of(sa_query, struct ib_sa_path_query, sa_query);
1487 struct sa_path_rec rec;
1489 if (sa_query->flags & IB_SA_QUERY_OPA) {
1490 ib_unpack(opa_path_rec_table,
1491 ARRAY_SIZE(opa_path_rec_table),
1493 rec.rec_type = SA_PATH_REC_TYPE_OPA;
1494 query->callback(status, &rec, query->context);
1496 ib_unpack(path_rec_table,
1497 ARRAY_SIZE(path_rec_table),
1499 rec.rec_type = SA_PATH_REC_TYPE_IB;
1500 sa_path_set_dmac_zero(&rec);
1502 if (query->conv_pr) {
1503 struct sa_path_rec opa;
1505 memset(&opa, 0, sizeof(struct sa_path_rec));
1506 sa_convert_path_ib_to_opa(&opa, &rec);
1507 query->callback(status, &opa, query->context);
1509 query->callback(status, &rec, query->context);
1513 query->callback(status, NULL, query->context);
1516 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1518 struct ib_sa_path_query *query =
1519 container_of(sa_query, struct ib_sa_path_query, sa_query);
1521 kfree(query->conv_pr);
1526 * ib_sa_path_rec_get - Start a Path get query
1528 * @device:device to send query on
1529 * @port_num: port number to send query on
1530 * @rec:Path Record to send in query
1531 * @comp_mask:component mask to send in query
1532 * @timeout_ms:time to wait for response
1533 * @gfp_mask:GFP mask to use for internal allocations
1534 * @callback:function called when query completes, times out or is
1536 * @context:opaque user context passed to callback
1537 * @sa_query:query context, used to cancel query
1539 * Send a Path Record Get query to the SA to look up a path. The
1540 * callback function will be called when the query completes (or
1541 * fails); status is 0 for a successful response, -EINTR if the query
1542 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1543 * occurred sending the query. The resp parameter of the callback is
1544 * only valid if status is 0.
1546 * If the return value of ib_sa_path_rec_get() is negative, it is an
1547 * error code. Otherwise it is a query ID that can be used to cancel
1550 int ib_sa_path_rec_get(struct ib_sa_client *client,
1551 struct ib_device *device, u8 port_num,
1552 struct sa_path_rec *rec,
1553 ib_sa_comp_mask comp_mask,
1554 unsigned long timeout_ms, gfp_t gfp_mask,
1555 void (*callback)(int status,
1556 struct sa_path_rec *resp,
1559 struct ib_sa_query **sa_query)
1561 struct ib_sa_path_query *query;
1562 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1563 struct ib_sa_port *port;
1564 struct ib_mad_agent *agent;
1565 struct ib_sa_mad *mad;
1566 enum opa_pr_supported status;
1572 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1573 (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1576 port = &sa_dev->port[port_num - sa_dev->start_port];
1577 agent = port->agent;
1579 query = kzalloc(sizeof(*query), gfp_mask);
1583 query->sa_query.port = port;
1584 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1585 status = opa_pr_query_possible(client, device, port_num, rec);
1586 if (status == PR_NOT_SUPPORTED) {
1589 } else if (status == PR_OPA_SUPPORTED) {
1590 query->sa_query.flags |= IB_SA_QUERY_OPA;
1593 kmalloc(sizeof(*query->conv_pr), gfp_mask);
1594 if (!query->conv_pr) {
1601 ret = alloc_mad(&query->sa_query, gfp_mask);
1605 ib_sa_client_get(client);
1606 query->sa_query.client = client;
1607 query->callback = callback;
1608 query->context = context;
1610 mad = query->sa_query.mad_buf->mad;
1611 init_mad(&query->sa_query, agent);
1613 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1614 query->sa_query.release = ib_sa_path_rec_release;
1615 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1616 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1617 mad->sa_hdr.comp_mask = comp_mask;
1619 if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1620 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1622 } else if (query->conv_pr) {
1623 sa_convert_path_opa_to_ib(query->conv_pr, rec);
1624 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1625 query->conv_pr, mad->data);
1627 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1631 *sa_query = &query->sa_query;
1633 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1634 query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1635 query->conv_pr : rec;
1637 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1645 ib_sa_client_put(query->sa_query.client);
1646 free_mad(&query->sa_query);
1648 kfree(query->conv_pr);
1653 EXPORT_SYMBOL(ib_sa_path_rec_get);
1655 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1657 struct ib_sa_mad *mad)
1659 struct ib_sa_service_query *query =
1660 container_of(sa_query, struct ib_sa_service_query, sa_query);
1663 struct ib_sa_service_rec rec;
1665 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1667 query->callback(status, &rec, query->context);
1669 query->callback(status, NULL, query->context);
1672 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1674 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1678 * ib_sa_service_rec_query - Start Service Record operation
1680 * @device:device to send request on
1681 * @port_num: port number to send request on
1682 * @method:SA method - should be get, set, or delete
1683 * @rec:Service Record to send in request
1684 * @comp_mask:component mask to send in request
1685 * @timeout_ms:time to wait for response
1686 * @gfp_mask:GFP mask to use for internal allocations
1687 * @callback:function called when request completes, times out or is
1689 * @context:opaque user context passed to callback
1690 * @sa_query:request context, used to cancel request
1692 * Send a Service Record set/get/delete to the SA to register,
1693 * unregister or query a service record.
1694 * The callback function will be called when the request completes (or
1695 * fails); status is 0 for a successful response, -EINTR if the query
1696 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1697 * occurred sending the query. The resp parameter of the callback is
1698 * only valid if status is 0.
1700 * If the return value of ib_sa_service_rec_query() is negative, it is an
1701 * error code. Otherwise it is a request ID that can be used to cancel
1704 int ib_sa_service_rec_query(struct ib_sa_client *client,
1705 struct ib_device *device, u8 port_num, u8 method,
1706 struct ib_sa_service_rec *rec,
1707 ib_sa_comp_mask comp_mask,
1708 unsigned long timeout_ms, gfp_t gfp_mask,
1709 void (*callback)(int status,
1710 struct ib_sa_service_rec *resp,
1713 struct ib_sa_query **sa_query)
1715 struct ib_sa_service_query *query;
1716 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1717 struct ib_sa_port *port;
1718 struct ib_mad_agent *agent;
1719 struct ib_sa_mad *mad;
1725 port = &sa_dev->port[port_num - sa_dev->start_port];
1726 agent = port->agent;
1728 if (method != IB_MGMT_METHOD_GET &&
1729 method != IB_MGMT_METHOD_SET &&
1730 method != IB_SA_METHOD_DELETE)
1733 query = kzalloc(sizeof(*query), gfp_mask);
1737 query->sa_query.port = port;
1738 ret = alloc_mad(&query->sa_query, gfp_mask);
1742 ib_sa_client_get(client);
1743 query->sa_query.client = client;
1744 query->callback = callback;
1745 query->context = context;
1747 mad = query->sa_query.mad_buf->mad;
1748 init_mad(&query->sa_query, agent);
1750 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1751 query->sa_query.release = ib_sa_service_rec_release;
1752 mad->mad_hdr.method = method;
1753 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1754 mad->sa_hdr.comp_mask = comp_mask;
1756 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1759 *sa_query = &query->sa_query;
1761 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1769 ib_sa_client_put(query->sa_query.client);
1770 free_mad(&query->sa_query);
1776 EXPORT_SYMBOL(ib_sa_service_rec_query);
1778 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1780 struct ib_sa_mad *mad)
1782 struct ib_sa_mcmember_query *query =
1783 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1786 struct ib_sa_mcmember_rec rec;
1788 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1790 query->callback(status, &rec, query->context);
1792 query->callback(status, NULL, query->context);
1795 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1797 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1800 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1801 struct ib_device *device, u8 port_num,
1803 struct ib_sa_mcmember_rec *rec,
1804 ib_sa_comp_mask comp_mask,
1805 unsigned long timeout_ms, gfp_t gfp_mask,
1806 void (*callback)(int status,
1807 struct ib_sa_mcmember_rec *resp,
1810 struct ib_sa_query **sa_query)
1812 struct ib_sa_mcmember_query *query;
1813 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1814 struct ib_sa_port *port;
1815 struct ib_mad_agent *agent;
1816 struct ib_sa_mad *mad;
1822 port = &sa_dev->port[port_num - sa_dev->start_port];
1823 agent = port->agent;
1825 query = kzalloc(sizeof(*query), gfp_mask);
1829 query->sa_query.port = port;
1830 ret = alloc_mad(&query->sa_query, gfp_mask);
1834 ib_sa_client_get(client);
1835 query->sa_query.client = client;
1836 query->callback = callback;
1837 query->context = context;
1839 mad = query->sa_query.mad_buf->mad;
1840 init_mad(&query->sa_query, agent);
1842 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1843 query->sa_query.release = ib_sa_mcmember_rec_release;
1844 mad->mad_hdr.method = method;
1845 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1846 mad->sa_hdr.comp_mask = comp_mask;
1848 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1851 *sa_query = &query->sa_query;
1853 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1861 ib_sa_client_put(query->sa_query.client);
1862 free_mad(&query->sa_query);
1869 /* Support GuidInfoRecord */
1870 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1872 struct ib_sa_mad *mad)
1874 struct ib_sa_guidinfo_query *query =
1875 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1878 struct ib_sa_guidinfo_rec rec;
1880 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1882 query->callback(status, &rec, query->context);
1884 query->callback(status, NULL, query->context);
1887 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1889 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1892 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1893 struct ib_device *device, u8 port_num,
1894 struct ib_sa_guidinfo_rec *rec,
1895 ib_sa_comp_mask comp_mask, u8 method,
1896 unsigned long timeout_ms, gfp_t gfp_mask,
1897 void (*callback)(int status,
1898 struct ib_sa_guidinfo_rec *resp,
1901 struct ib_sa_query **sa_query)
1903 struct ib_sa_guidinfo_query *query;
1904 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1905 struct ib_sa_port *port;
1906 struct ib_mad_agent *agent;
1907 struct ib_sa_mad *mad;
1913 if (method != IB_MGMT_METHOD_GET &&
1914 method != IB_MGMT_METHOD_SET &&
1915 method != IB_SA_METHOD_DELETE) {
1919 port = &sa_dev->port[port_num - sa_dev->start_port];
1920 agent = port->agent;
1922 query = kzalloc(sizeof(*query), gfp_mask);
1926 query->sa_query.port = port;
1927 ret = alloc_mad(&query->sa_query, gfp_mask);
1931 ib_sa_client_get(client);
1932 query->sa_query.client = client;
1933 query->callback = callback;
1934 query->context = context;
1936 mad = query->sa_query.mad_buf->mad;
1937 init_mad(&query->sa_query, agent);
1939 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1940 query->sa_query.release = ib_sa_guidinfo_rec_release;
1942 mad->mad_hdr.method = method;
1943 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1944 mad->sa_hdr.comp_mask = comp_mask;
1946 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1949 *sa_query = &query->sa_query;
1951 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1959 ib_sa_client_put(query->sa_query.client);
1960 free_mad(&query->sa_query);
1966 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1968 bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
1969 struct ib_device *device,
1972 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1973 struct ib_sa_port *port;
1975 unsigned long flags;
1980 port = &sa_dev->port[port_num - sa_dev->start_port];
1982 spin_lock_irqsave(&port->classport_lock, flags);
1983 if ((port->classport_info.valid) &&
1984 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
1985 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
1986 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
1987 spin_unlock_irqrestore(&port->classport_lock, flags);
1990 EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
1992 struct ib_classport_info_context {
1993 struct completion done;
1994 struct ib_sa_query *sa_query;
1997 static void ib_classportinfo_cb(void *context)
1999 struct ib_classport_info_context *cb_ctx = context;
2001 complete(&cb_ctx->done);
2004 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
2006 struct ib_sa_mad *mad)
2008 unsigned long flags;
2009 struct ib_sa_classport_info_query *query =
2010 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
2011 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
2014 if (sa_query->flags & IB_SA_QUERY_OPA) {
2015 struct opa_class_port_info rec;
2017 ib_unpack(opa_classport_info_rec_table,
2018 ARRAY_SIZE(opa_classport_info_rec_table),
2021 spin_lock_irqsave(&sa_query->port->classport_lock,
2023 if (!status && !info->valid) {
2024 memcpy(&info->data.opa, &rec,
2025 sizeof(info->data.opa));
2028 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
2030 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2034 struct ib_class_port_info rec;
2036 ib_unpack(ib_classport_info_rec_table,
2037 ARRAY_SIZE(ib_classport_info_rec_table),
2040 spin_lock_irqsave(&sa_query->port->classport_lock,
2042 if (!status && !info->valid) {
2043 memcpy(&info->data.ib, &rec,
2044 sizeof(info->data.ib));
2047 info->data.type = RDMA_CLASS_PORT_INFO_IB;
2049 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2053 query->callback(query->context);
2056 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
2058 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
2062 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
2063 unsigned long timeout_ms,
2064 void (*callback)(void *context),
2066 struct ib_sa_query **sa_query)
2068 struct ib_mad_agent *agent;
2069 struct ib_sa_classport_info_query *query;
2070 struct ib_sa_mad *mad;
2071 gfp_t gfp_mask = GFP_KERNEL;
2074 agent = port->agent;
2076 query = kzalloc(sizeof(*query), gfp_mask);
2080 query->sa_query.port = port;
2081 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
2083 IB_SA_QUERY_OPA : 0;
2084 ret = alloc_mad(&query->sa_query, gfp_mask);
2088 query->callback = callback;
2089 query->context = context;
2091 mad = query->sa_query.mad_buf->mad;
2092 init_mad(&query->sa_query, agent);
2094 query->sa_query.callback = ib_sa_classport_info_rec_callback;
2095 query->sa_query.release = ib_sa_classport_info_rec_release;
2096 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
2097 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
2098 mad->sa_hdr.comp_mask = 0;
2099 *sa_query = &query->sa_query;
2101 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2109 free_mad(&query->sa_query);
2116 static void update_ib_cpi(struct work_struct *work)
2118 struct ib_sa_port *port =
2119 container_of(work, struct ib_sa_port, ib_cpi_work.work);
2120 struct ib_classport_info_context *cb_context;
2121 unsigned long flags;
2124 /* If the classport info is valid, nothing
2127 spin_lock_irqsave(&port->classport_lock, flags);
2128 if (port->classport_info.valid) {
2129 spin_unlock_irqrestore(&port->classport_lock, flags);
2132 spin_unlock_irqrestore(&port->classport_lock, flags);
2134 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
2138 init_completion(&cb_context->done);
2140 ret = ib_sa_classport_info_rec_query(port, 3000,
2141 ib_classportinfo_cb, cb_context,
2142 &cb_context->sa_query);
2145 wait_for_completion(&cb_context->done);
2148 spin_lock_irqsave(&port->classport_lock, flags);
2150 /* If the classport info is still not valid, the query should have
2151 * failed for some reason. Retry issuing the query
2153 if (!port->classport_info.valid) {
2154 port->classport_info.retry_cnt++;
2155 if (port->classport_info.retry_cnt <=
2156 IB_SA_CPI_MAX_RETRY_CNT) {
2157 unsigned long delay =
2158 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2160 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
2163 spin_unlock_irqrestore(&port->classport_lock, flags);
2169 static void send_handler(struct ib_mad_agent *agent,
2170 struct ib_mad_send_wc *mad_send_wc)
2172 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
2173 unsigned long flags;
2175 if (query->callback)
2176 switch (mad_send_wc->status) {
2178 /* No callback -- already got recv */
2180 case IB_WC_RESP_TIMEOUT_ERR:
2181 query->callback(query, -ETIMEDOUT, NULL);
2183 case IB_WC_WR_FLUSH_ERR:
2184 query->callback(query, -EINTR, NULL);
2187 query->callback(query, -EIO, NULL);
2191 spin_lock_irqsave(&idr_lock, flags);
2192 idr_remove(&query_idr, query->id);
2193 spin_unlock_irqrestore(&idr_lock, flags);
2197 ib_sa_client_put(query->client);
2198 query->release(query);
2201 static void recv_handler(struct ib_mad_agent *mad_agent,
2202 struct ib_mad_send_buf *send_buf,
2203 struct ib_mad_recv_wc *mad_recv_wc)
2205 struct ib_sa_query *query;
2210 query = send_buf->context[0];
2211 if (query->callback) {
2212 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2213 query->callback(query,
2214 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2216 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2218 query->callback(query, -EIO, NULL);
2221 ib_free_recv_mad(mad_recv_wc);
2224 static void update_sm_ah(struct work_struct *work)
2226 struct ib_sa_port *port =
2227 container_of(work, struct ib_sa_port, update_task);
2228 struct ib_sa_sm_ah *new_ah;
2229 struct ib_port_attr port_attr;
2230 struct rdma_ah_attr ah_attr;
2233 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2234 pr_warn("Couldn't query port\n");
2238 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2242 kref_init(&new_ah->ref);
2243 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2245 new_ah->pkey_index = 0;
2246 if (ib_find_pkey(port->agent->device, port->port_num,
2247 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2248 pr_err("Couldn't find index for default PKey\n");
2250 memset(&ah_attr, 0, sizeof(ah_attr));
2251 ah_attr.type = rdma_ah_find_type(port->agent->device,
2253 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2254 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2255 rdma_ah_set_port_num(&ah_attr, port->port_num);
2257 grh_required = rdma_is_grh_required(port->agent->device,
2261 * The OPA sm_lid of 0xFFFF needs special handling so that it can be
2262 * differentiated from a permissive LID of 0xFFFF. We set the
2263 * grh_required flag here so the SA can program the DGID in the
2264 * address handle appropriately
2266 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
2268 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
2269 rdma_ah_set_make_grd(&ah_attr, true);
2271 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
2272 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2273 rdma_ah_set_subnet_prefix(&ah_attr,
2274 cpu_to_be64(port_attr.subnet_prefix));
2275 rdma_ah_set_interface_id(&ah_attr,
2276 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2279 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
2280 if (IS_ERR(new_ah->ah)) {
2281 pr_warn("Couldn't create new SM AH\n");
2286 spin_lock_irq(&port->ah_lock);
2288 kref_put(&port->sm_ah->ref, free_sm_ah);
2289 port->sm_ah = new_ah;
2290 spin_unlock_irq(&port->ah_lock);
2293 static void ib_sa_event(struct ib_event_handler *handler,
2294 struct ib_event *event)
2296 if (event->event == IB_EVENT_PORT_ERR ||
2297 event->event == IB_EVENT_PORT_ACTIVE ||
2298 event->event == IB_EVENT_LID_CHANGE ||
2299 event->event == IB_EVENT_PKEY_CHANGE ||
2300 event->event == IB_EVENT_SM_CHANGE ||
2301 event->event == IB_EVENT_CLIENT_REREGISTER) {
2302 unsigned long flags;
2303 struct ib_sa_device *sa_dev =
2304 container_of(handler, typeof(*sa_dev), event_handler);
2305 u8 port_num = event->element.port_num - sa_dev->start_port;
2306 struct ib_sa_port *port = &sa_dev->port[port_num];
2308 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2311 spin_lock_irqsave(&port->ah_lock, flags);
2313 kref_put(&port->sm_ah->ref, free_sm_ah);
2315 spin_unlock_irqrestore(&port->ah_lock, flags);
2317 if (event->event == IB_EVENT_SM_CHANGE ||
2318 event->event == IB_EVENT_CLIENT_REREGISTER ||
2319 event->event == IB_EVENT_LID_CHANGE ||
2320 event->event == IB_EVENT_PORT_ACTIVE) {
2321 unsigned long delay =
2322 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2324 spin_lock_irqsave(&port->classport_lock, flags);
2325 port->classport_info.valid = false;
2326 port->classport_info.retry_cnt = 0;
2327 spin_unlock_irqrestore(&port->classport_lock, flags);
2328 queue_delayed_work(ib_wq,
2329 &port->ib_cpi_work, delay);
2331 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2335 static void ib_sa_add_one(struct ib_device *device)
2337 struct ib_sa_device *sa_dev;
2341 s = rdma_start_port(device);
2342 e = rdma_end_port(device);
2344 sa_dev = kzalloc(sizeof *sa_dev +
2345 (e - s + 1) * sizeof (struct ib_sa_port),
2350 sa_dev->start_port = s;
2351 sa_dev->end_port = e;
2353 for (i = 0; i <= e - s; ++i) {
2354 spin_lock_init(&sa_dev->port[i].ah_lock);
2355 if (!rdma_cap_ib_sa(device, i + 1))
2358 sa_dev->port[i].sm_ah = NULL;
2359 sa_dev->port[i].port_num = i + s;
2361 spin_lock_init(&sa_dev->port[i].classport_lock);
2362 sa_dev->port[i].classport_info.valid = false;
2364 sa_dev->port[i].agent =
2365 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2366 NULL, 0, send_handler,
2367 recv_handler, sa_dev, 0);
2368 if (IS_ERR(sa_dev->port[i].agent))
2371 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2372 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2381 ib_set_client_data(device, &sa_client, sa_dev);
2384 * We register our event handler after everything is set up,
2385 * and then update our cached info after the event handler is
2386 * registered to avoid any problems if a port changes state
2387 * during our initialization.
2390 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2391 ib_register_event_handler(&sa_dev->event_handler);
2393 for (i = 0; i <= e - s; ++i) {
2394 if (rdma_cap_ib_sa(device, i + 1))
2395 update_sm_ah(&sa_dev->port[i].update_task);
2402 if (rdma_cap_ib_sa(device, i + 1))
2403 ib_unregister_mad_agent(sa_dev->port[i].agent);
2410 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2412 struct ib_sa_device *sa_dev = client_data;
2418 ib_unregister_event_handler(&sa_dev->event_handler);
2419 flush_workqueue(ib_wq);
2421 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2422 if (rdma_cap_ib_sa(device, i + 1)) {
2423 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2424 ib_unregister_mad_agent(sa_dev->port[i].agent);
2425 if (sa_dev->port[i].sm_ah)
2426 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2434 int ib_sa_init(void)
2438 get_random_bytes(&tid, sizeof tid);
2440 atomic_set(&ib_nl_sa_request_seq, 0);
2442 ret = ib_register_client(&sa_client);
2444 pr_err("Couldn't register ib_sa client\n");
2450 pr_err("Couldn't initialize multicast handling\n");
2454 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2460 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2467 ib_unregister_client(&sa_client);
2472 void ib_sa_cleanup(void)
2474 cancel_delayed_work(&ib_nl_timed_work);
2475 flush_workqueue(ib_nl_wq);
2476 destroy_workqueue(ib_nl_wq);
2478 ib_unregister_client(&sa_client);
2479 idr_destroy(&query_idr);