1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2018 - 2019 Intel Corporation
7 #include <net/cfg80211.h>
12 static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
13 struct nlattr *ftmreq,
14 struct cfg80211_pmsr_request_peer *out,
15 struct genl_info *info)
17 const struct cfg80211_pmsr_capabilities *capa = rdev->wiphy.pmsr_capa;
18 struct nlattr *tb[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1];
19 u32 preamble = NL80211_PREAMBLE_DMG; /* only optional in DMG */
21 /* validate existing data */
22 if (!(rdev->wiphy.pmsr_capa->ftm.bandwidths & BIT(out->chandef.width))) {
23 NL_SET_ERR_MSG(info->extack, "FTM: unsupported bandwidth");
27 /* no validation needed - was already done via nested policy */
28 nla_parse_nested_deprecated(tb, NL80211_PMSR_FTM_REQ_ATTR_MAX, ftmreq,
31 if (tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE])
32 preamble = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]);
34 /* set up values - struct is 0-initialized */
35 out->ftm.requested = true;
37 switch (out->chandef.chan->band) {
38 case NL80211_BAND_60GHZ:
42 if (!tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) {
43 NL_SET_ERR_MSG(info->extack,
44 "FTM: must specify preamble");
49 if (!(capa->ftm.preambles & BIT(preamble))) {
50 NL_SET_ERR_MSG_ATTR(info->extack,
51 tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE],
52 "FTM: invalid preamble");
56 out->ftm.preamble = preamble;
58 out->ftm.burst_period = 0;
59 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
60 out->ftm.burst_period =
61 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
63 out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
64 if (out->ftm.asap && !capa->ftm.asap) {
65 NL_SET_ERR_MSG_ATTR(info->extack,
66 tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP],
67 "FTM: ASAP mode not supported");
71 if (!out->ftm.asap && !capa->ftm.non_asap) {
72 NL_SET_ERR_MSG(info->extack,
73 "FTM: non-ASAP mode not supported");
77 out->ftm.num_bursts_exp = 0;
78 if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
79 out->ftm.num_bursts_exp =
80 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
82 if (capa->ftm.max_bursts_exponent >= 0 &&
83 out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
84 NL_SET_ERR_MSG_ATTR(info->extack,
85 tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP],
86 "FTM: max NUM_BURSTS_EXP must be set lower than the device limit");
90 out->ftm.burst_duration = 15;
91 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
92 out->ftm.burst_duration =
93 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
95 out->ftm.ftms_per_burst = 0;
96 if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
97 out->ftm.ftms_per_burst =
98 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]);
100 if (capa->ftm.max_ftms_per_burst &&
101 (out->ftm.ftms_per_burst > capa->ftm.max_ftms_per_burst ||
102 out->ftm.ftms_per_burst == 0)) {
103 NL_SET_ERR_MSG_ATTR(info->extack,
104 tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST],
105 "FTM: FTMs per burst must be set lower than the device limit but non-zero");
109 out->ftm.ftmr_retries = 3;
110 if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
111 out->ftm.ftmr_retries =
112 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
114 out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
115 if (out->ftm.request_lci && !capa->ftm.request_lci) {
116 NL_SET_ERR_MSG_ATTR(info->extack,
117 tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI],
118 "FTM: LCI request not supported");
121 out->ftm.request_civicloc =
122 !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC];
123 if (out->ftm.request_civicloc && !capa->ftm.request_civicloc) {
124 NL_SET_ERR_MSG_ATTR(info->extack,
125 tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC],
126 "FTM: civic location request not supported");
132 static int pmsr_parse_peer(struct cfg80211_registered_device *rdev,
134 struct cfg80211_pmsr_request_peer *out,
135 struct genl_info *info)
137 struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1];
138 struct nlattr *req[NL80211_PMSR_REQ_ATTR_MAX + 1];
142 /* no validation needed - was already done via nested policy */
143 nla_parse_nested_deprecated(tb, NL80211_PMSR_PEER_ATTR_MAX, peer,
146 if (!tb[NL80211_PMSR_PEER_ATTR_ADDR] ||
147 !tb[NL80211_PMSR_PEER_ATTR_CHAN] ||
148 !tb[NL80211_PMSR_PEER_ATTR_REQ]) {
149 NL_SET_ERR_MSG_ATTR(info->extack, peer,
150 "insufficient peer data");
154 memcpy(out->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN);
156 /* reuse info->attrs */
157 memset(info->attrs, 0, sizeof(*info->attrs) * (NL80211_ATTR_MAX + 1));
158 /* need to validate here, we don't want to have validation recursion */
159 err = nla_parse_nested_deprecated(info->attrs, NL80211_ATTR_MAX,
160 tb[NL80211_PMSR_PEER_ATTR_CHAN],
161 nl80211_policy, info->extack);
165 err = nl80211_parse_chandef(rdev, info, &out->chandef);
169 /* no validation needed - was already done via nested policy */
170 nla_parse_nested_deprecated(req, NL80211_PMSR_REQ_ATTR_MAX,
171 tb[NL80211_PMSR_PEER_ATTR_REQ], NULL,
174 if (!req[NL80211_PMSR_REQ_ATTR_DATA]) {
175 NL_SET_ERR_MSG_ATTR(info->extack,
176 tb[NL80211_PMSR_PEER_ATTR_REQ],
177 "missing request type/data");
181 if (req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF])
182 out->report_ap_tsf = true;
184 if (out->report_ap_tsf && !rdev->wiphy.pmsr_capa->report_ap_tsf) {
185 NL_SET_ERR_MSG_ATTR(info->extack,
186 req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF],
187 "reporting AP TSF is not supported");
191 nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) {
192 switch (nla_type(treq)) {
193 case NL80211_PMSR_TYPE_FTM:
194 err = pmsr_parse_ftm(rdev, treq, out, info);
197 NL_SET_ERR_MSG_ATTR(info->extack, treq,
198 "unsupported measurement type");
209 int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
211 struct nlattr *reqattr = info->attrs[NL80211_ATTR_PEER_MEASUREMENTS];
212 struct cfg80211_registered_device *rdev = info->user_ptr[0];
213 struct wireless_dev *wdev = info->user_ptr[1];
214 struct cfg80211_pmsr_request *req;
215 struct nlattr *peers, *peer;
216 int count, rem, err, idx;
218 if (!rdev->wiphy.pmsr_capa)
224 peers = nla_find(nla_data(reqattr), nla_len(reqattr),
225 NL80211_PMSR_ATTR_PEERS);
230 nla_for_each_nested(peer, peers, rem) {
233 if (count > rdev->wiphy.pmsr_capa->max_peers) {
234 NL_SET_ERR_MSG_ATTR(info->extack, peer,
235 "Too many peers used");
240 req = kzalloc(struct_size(req, peers, count), GFP_KERNEL);
244 if (info->attrs[NL80211_ATTR_TIMEOUT])
245 req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]);
247 if (info->attrs[NL80211_ATTR_MAC]) {
248 if (!rdev->wiphy.pmsr_capa->randomize_mac_addr) {
249 NL_SET_ERR_MSG_ATTR(info->extack,
250 info->attrs[NL80211_ATTR_MAC],
251 "device cannot randomize MAC address");
256 err = nl80211_parse_random_mac(info->attrs, req->mac_addr,
261 memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN);
262 eth_broadcast_addr(req->mac_addr_mask);
266 nla_for_each_nested(peer, peers, rem) {
267 /* NB: this reuses info->attrs, but we no longer need it */
268 err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info);
274 req->n_peers = count;
275 req->cookie = cfg80211_assign_cookie(rdev);
276 req->nl_portid = info->snd_portid;
278 err = rdev_start_pmsr(rdev, wdev, req);
282 list_add_tail(&req->list, &wdev->pmsr_list);
284 nl_set_extack_cookie_u64(info->extack, req->cookie);
291 void cfg80211_pmsr_complete(struct wireless_dev *wdev,
292 struct cfg80211_pmsr_request *req,
295 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
299 trace_cfg80211_pmsr_complete(wdev->wiphy, wdev, req->cookie);
301 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
305 hdr = nl80211hdr_put(msg, 0, 0, 0,
306 NL80211_CMD_PEER_MEASUREMENT_COMPLETE);
310 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
311 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
315 if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
319 genlmsg_end(msg, hdr);
320 genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
325 spin_lock_bh(&wdev->pmsr_lock);
326 list_del(&req->list);
327 spin_unlock_bh(&wdev->pmsr_lock);
330 EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
332 static int nl80211_pmsr_send_ftm_res(struct sk_buff *msg,
333 struct cfg80211_pmsr_result *res)
335 if (res->status == NL80211_PMSR_STATUS_FAILURE) {
336 if (nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON,
337 res->ftm.failure_reason))
340 if (res->ftm.failure_reason ==
341 NL80211_PMSR_FTM_FAILURE_PEER_BUSY &&
342 res->ftm.busy_retry_time &&
343 nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME,
344 res->ftm.busy_retry_time))
350 #define PUT(tp, attr, val) \
352 if (nla_put_##tp(msg, \
353 NL80211_PMSR_FTM_RESP_ATTR_##attr, \
358 #define PUTOPT(tp, attr, val) \
360 if (res->ftm.val##_valid) \
361 PUT(tp, attr, val); \
364 #define PUT_U64(attr, val) \
366 if (nla_put_u64_64bit(msg, \
367 NL80211_PMSR_FTM_RESP_ATTR_##attr,\
369 NL80211_PMSR_FTM_RESP_ATTR_PAD)) \
373 #define PUTOPT_U64(attr, val) \
375 if (res->ftm.val##_valid) \
376 PUT_U64(attr, val); \
379 if (res->ftm.burst_index >= 0)
380 PUT(u32, BURST_INDEX, burst_index);
381 PUTOPT(u32, NUM_FTMR_ATTEMPTS, num_ftmr_attempts);
382 PUTOPT(u32, NUM_FTMR_SUCCESSES, num_ftmr_successes);
383 PUT(u8, NUM_BURSTS_EXP, num_bursts_exp);
384 PUT(u8, BURST_DURATION, burst_duration);
385 PUT(u8, FTMS_PER_BURST, ftms_per_burst);
386 PUTOPT(s32, RSSI_AVG, rssi_avg);
387 PUTOPT(s32, RSSI_SPREAD, rssi_spread);
388 if (res->ftm.tx_rate_valid &&
389 !nl80211_put_sta_rate(msg, &res->ftm.tx_rate,
390 NL80211_PMSR_FTM_RESP_ATTR_TX_RATE))
392 if (res->ftm.rx_rate_valid &&
393 !nl80211_put_sta_rate(msg, &res->ftm.rx_rate,
394 NL80211_PMSR_FTM_RESP_ATTR_RX_RATE))
396 PUTOPT_U64(RTT_AVG, rtt_avg);
397 PUTOPT_U64(RTT_VARIANCE, rtt_variance);
398 PUTOPT_U64(RTT_SPREAD, rtt_spread);
399 PUTOPT_U64(DIST_AVG, dist_avg);
400 PUTOPT_U64(DIST_VARIANCE, dist_variance);
401 PUTOPT_U64(DIST_SPREAD, dist_spread);
402 if (res->ftm.lci && res->ftm.lci_len &&
403 nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_LCI,
404 res->ftm.lci_len, res->ftm.lci))
406 if (res->ftm.civicloc && res->ftm.civicloc_len &&
407 nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC,
408 res->ftm.civicloc_len, res->ftm.civicloc))
420 static int nl80211_pmsr_send_result(struct sk_buff *msg,
421 struct cfg80211_pmsr_result *res)
423 struct nlattr *pmsr, *peers, *peer, *resp, *data, *typedata;
425 pmsr = nla_nest_start_noflag(msg, NL80211_ATTR_PEER_MEASUREMENTS);
429 peers = nla_nest_start_noflag(msg, NL80211_PMSR_ATTR_PEERS);
433 peer = nla_nest_start_noflag(msg, 1);
437 if (nla_put(msg, NL80211_PMSR_PEER_ATTR_ADDR, ETH_ALEN, res->addr))
440 resp = nla_nest_start_noflag(msg, NL80211_PMSR_PEER_ATTR_RESP);
444 if (nla_put_u32(msg, NL80211_PMSR_RESP_ATTR_STATUS, res->status) ||
445 nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_HOST_TIME,
446 res->host_time, NL80211_PMSR_RESP_ATTR_PAD))
449 if (res->ap_tsf_valid &&
450 nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF,
451 res->ap_tsf, NL80211_PMSR_RESP_ATTR_PAD))
454 if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL))
457 data = nla_nest_start_noflag(msg, NL80211_PMSR_RESP_ATTR_DATA);
461 typedata = nla_nest_start_noflag(msg, res->type);
466 case NL80211_PMSR_TYPE_FTM:
467 if (nl80211_pmsr_send_ftm_res(msg, res))
474 nla_nest_end(msg, typedata);
475 nla_nest_end(msg, data);
476 nla_nest_end(msg, resp);
477 nla_nest_end(msg, peer);
478 nla_nest_end(msg, peers);
479 nla_nest_end(msg, pmsr);
486 void cfg80211_pmsr_report(struct wireless_dev *wdev,
487 struct cfg80211_pmsr_request *req,
488 struct cfg80211_pmsr_result *result,
491 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
496 trace_cfg80211_pmsr_report(wdev->wiphy, wdev, req->cookie,
500 * Currently, only variable items are LCI and civic location,
501 * both of which are reasonably short so we don't need to
502 * worry about them here for the allocation.
504 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
508 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_RESULT);
512 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
513 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
517 if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
521 err = nl80211_pmsr_send_result(msg, result);
523 pr_err_ratelimited("peer measurement result: message didn't fit!");
527 genlmsg_end(msg, hdr);
528 genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
533 EXPORT_SYMBOL_GPL(cfg80211_pmsr_report);
535 static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev)
537 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
538 struct cfg80211_pmsr_request *req, *tmp;
539 LIST_HEAD(free_list);
541 lockdep_assert_held(&wdev->mtx);
543 spin_lock_bh(&wdev->pmsr_lock);
544 list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
547 list_move_tail(&req->list, &free_list);
549 spin_unlock_bh(&wdev->pmsr_lock);
551 list_for_each_entry_safe(req, tmp, &free_list, list) {
552 rdev_abort_pmsr(rdev, wdev, req);
558 void cfg80211_pmsr_free_wk(struct work_struct *work)
560 struct wireless_dev *wdev = container_of(work, struct wireless_dev,
564 cfg80211_pmsr_process_abort(wdev);
568 void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
570 struct cfg80211_pmsr_request *req;
573 spin_lock_bh(&wdev->pmsr_lock);
574 list_for_each_entry(req, &wdev->pmsr_list, list) {
578 spin_unlock_bh(&wdev->pmsr_lock);
581 cfg80211_pmsr_process_abort(wdev);
583 WARN_ON(!list_empty(&wdev->pmsr_list));
586 void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid)
588 struct cfg80211_pmsr_request *req;
590 spin_lock_bh(&wdev->pmsr_lock);
591 list_for_each_entry(req, &wdev->pmsr_list, list) {
592 if (req->nl_portid == portid) {
594 schedule_work(&wdev->pmsr_free_wk);
597 spin_unlock_bh(&wdev->pmsr_lock);
600 #endif /* __PMSR_H */