]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/wireless/realtek/rtw88/fw.c
4b41bf531998816d301fef7af2dd500d71303eb3
[linux.git] / drivers / net / wireless / realtek / rtw88 / fw.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4
5 #include "main.h"
6 #include "coex.h"
7 #include "fw.h"
8 #include "tx.h"
9 #include "reg.h"
10 #include "sec.h"
11 #include "debug.h"
12 #include "util.h"
13
14 static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
15                                       struct sk_buff *skb)
16 {
17         struct rtw_c2h_cmd *c2h;
18         u8 sub_cmd_id;
19
20         c2h = get_c2h_from_skb(skb);
21         sub_cmd_id = c2h->payload[0];
22
23         switch (sub_cmd_id) {
24         case C2H_CCX_RPT:
25                 rtw_tx_report_handle(rtwdev, skb);
26                 break;
27         default:
28                 break;
29         }
30 }
31
32 struct rtw_fw_iter_ra_data {
33         struct rtw_dev *rtwdev;
34         u8 *payload;
35 };
36
37 static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
38 {
39         struct rtw_fw_iter_ra_data *ra_data = data;
40         struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
41         u8 mac_id, rate, sgi, bw;
42         u8 mcs, nss;
43         u32 bit_rate;
44
45         mac_id = GET_RA_REPORT_MACID(ra_data->payload);
46         if (si->mac_id != mac_id)
47                 return;
48
49         si->ra_report.txrate.flags = 0;
50
51         rate = GET_RA_REPORT_RATE(ra_data->payload);
52         sgi = GET_RA_REPORT_SGI(ra_data->payload);
53         bw = GET_RA_REPORT_BW(ra_data->payload);
54
55         if (rate < DESC_RATEMCS0) {
56                 si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
57                 goto legacy;
58         }
59
60         rtw_desc_to_mcsrate(rate, &mcs, &nss);
61         if (rate >= DESC_RATEVHT1SS_MCS0)
62                 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
63         else if (rate >= DESC_RATEMCS0)
64                 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
65
66         if (rate >= DESC_RATEMCS0) {
67                 si->ra_report.txrate.mcs = mcs;
68                 si->ra_report.txrate.nss = nss;
69         }
70
71         if (sgi)
72                 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
73
74         if (bw == RTW_CHANNEL_WIDTH_80)
75                 si->ra_report.txrate.bw = RATE_INFO_BW_80;
76         else if (bw == RTW_CHANNEL_WIDTH_40)
77                 si->ra_report.txrate.bw = RATE_INFO_BW_40;
78         else
79                 si->ra_report.txrate.bw = RATE_INFO_BW_20;
80
81 legacy:
82         bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
83
84         si->ra_report.desc_rate = rate;
85         si->ra_report.bit_rate = bit_rate;
86 }
87
88 static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
89                                     u8 length)
90 {
91         struct rtw_fw_iter_ra_data ra_data;
92
93         if (WARN(length < 7, "invalid ra report c2h length\n"))
94                 return;
95
96         ra_data.rtwdev = rtwdev;
97         ra_data.payload = payload;
98         rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
99 }
100
101 void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
102 {
103         struct rtw_c2h_cmd *c2h;
104         u32 pkt_offset;
105         u8 len;
106
107         pkt_offset = *((u32 *)skb->cb);
108         c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
109         len = skb->len - pkt_offset - 2;
110
111         mutex_lock(&rtwdev->mutex);
112
113         switch (c2h->id) {
114         case C2H_BT_INFO:
115                 rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
116                 break;
117         case C2H_WLAN_INFO:
118                 rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
119                 break;
120         case C2H_HALMAC:
121                 rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
122                 break;
123         case C2H_RA_RPT:
124                 rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
125                 break;
126         default:
127                 break;
128         }
129
130         mutex_unlock(&rtwdev->mutex);
131 }
132
133 void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
134                                struct sk_buff *skb)
135 {
136         struct rtw_c2h_cmd *c2h;
137         u8 len;
138
139         c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
140         len = skb->len - pkt_offset - 2;
141         *((u32 *)skb->cb) = pkt_offset;
142
143         rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
144                 c2h->id, c2h->seq, len);
145
146         switch (c2h->id) {
147         case C2H_BT_MP_INFO:
148                 rtw_coex_info_response(rtwdev, skb);
149                 break;
150         default:
151                 /* pass offset for further operation */
152                 *((u32 *)skb->cb) = pkt_offset;
153                 skb_queue_tail(&rtwdev->c2h_queue, skb);
154                 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
155                 break;
156         }
157 }
158 EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
159
160 static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
161                                     u8 *h2c)
162 {
163         u8 box;
164         u8 box_state;
165         u32 box_reg, box_ex_reg;
166         u32 h2c_wait;
167         int idx;
168
169         rtw_dbg(rtwdev, RTW_DBG_FW,
170                 "send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
171                 h2c[3], h2c[2], h2c[1], h2c[0],
172                 h2c[7], h2c[6], h2c[5], h2c[4]);
173
174         spin_lock(&rtwdev->h2c.lock);
175
176         box = rtwdev->h2c.last_box_num;
177         switch (box) {
178         case 0:
179                 box_reg = REG_HMEBOX0;
180                 box_ex_reg = REG_HMEBOX0_EX;
181                 break;
182         case 1:
183                 box_reg = REG_HMEBOX1;
184                 box_ex_reg = REG_HMEBOX1_EX;
185                 break;
186         case 2:
187                 box_reg = REG_HMEBOX2;
188                 box_ex_reg = REG_HMEBOX2_EX;
189                 break;
190         case 3:
191                 box_reg = REG_HMEBOX3;
192                 box_ex_reg = REG_HMEBOX3_EX;
193                 break;
194         default:
195                 WARN(1, "invalid h2c mail box number\n");
196                 goto out;
197         }
198
199         h2c_wait = 20;
200         do {
201                 box_state = rtw_read8(rtwdev, REG_HMETFR);
202         } while ((box_state >> box) & 0x1 && --h2c_wait > 0);
203
204         if (!h2c_wait) {
205                 rtw_err(rtwdev, "failed to send h2c command\n");
206                 goto out;
207         }
208
209         for (idx = 0; idx < 4; idx++)
210                 rtw_write8(rtwdev, box_reg + idx, h2c[idx]);
211         for (idx = 0; idx < 4; idx++)
212                 rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]);
213
214         if (++rtwdev->h2c.last_box_num >= 4)
215                 rtwdev->h2c.last_box_num = 0;
216
217 out:
218         spin_unlock(&rtwdev->h2c.lock);
219 }
220
221 static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
222 {
223         int ret;
224
225         spin_lock(&rtwdev->h2c.lock);
226
227         FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
228         ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
229         if (ret)
230                 rtw_err(rtwdev, "failed to send h2c packet\n");
231         rtwdev->h2c.seq++;
232
233         spin_unlock(&rtwdev->h2c.lock);
234 }
235
236 void
237 rtw_fw_send_general_info(struct rtw_dev *rtwdev)
238 {
239         struct rtw_fifo_conf *fifo = &rtwdev->fifo;
240         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
241         u16 total_size = H2C_PKT_HDR_SIZE + 4;
242
243         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
244
245         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
246
247         GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
248                                         fifo->rsvd_fw_txbuf_addr -
249                                         fifo->rsvd_boundary);
250
251         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
252 }
253
254 void
255 rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
256 {
257         struct rtw_hal *hal = &rtwdev->hal;
258         struct rtw_efuse *efuse = &rtwdev->efuse;
259         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
260         u16 total_size = H2C_PKT_HDR_SIZE + 8;
261         u8 fw_rf_type = 0;
262
263         if (hal->rf_type == RF_1T1R)
264                 fw_rf_type = FW_RF_1T1R;
265         else if (hal->rf_type == RF_2T2R)
266                 fw_rf_type = FW_RF_2T2R;
267
268         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
269
270         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
271         PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
272         PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
273         PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
274         PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
275         PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
276
277         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
278 }
279
280 void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
281 {
282         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
283         u16 total_size = H2C_PKT_HDR_SIZE + 1;
284
285         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
286         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
287         IQK_SET_CLEAR(h2c_pkt, para->clear);
288         IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
289
290         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
291 }
292
293 void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
294 {
295         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
296
297         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO);
298
299         SET_QUERY_BT_INFO(h2c_pkt, true);
300
301         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
302 }
303
304 void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw)
305 {
306         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
307
308         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO);
309
310         SET_WL_CH_INFO_LINK(h2c_pkt, link);
311         SET_WL_CH_INFO_CHNL(h2c_pkt, ch);
312         SET_WL_CH_INFO_BW(h2c_pkt, bw);
313
314         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
315 }
316
317 void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
318                              struct rtw_coex_info_req *req)
319 {
320         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
321
322         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO);
323
324         SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
325         SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
326         SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
327         SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
328         SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
329
330         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
331 }
332
333 void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
334 {
335         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
336         u8 index = 0 - bt_pwr_dec_lvl;
337
338         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER);
339
340         SET_BT_TX_POWER_INDEX(h2c_pkt, index);
341
342         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
343 }
344
345 void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable)
346 {
347         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
348
349         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION);
350
351         SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable);
352
353         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
354 }
355
356 void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
357                            u8 para1, u8 para2, u8 para3, u8 para4, u8 para5)
358 {
359         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
360
361         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE);
362
363         SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1);
364         SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2);
365         SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3);
366         SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4);
367         SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5);
368
369         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
370 }
371
372 void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
373 {
374         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
375
376         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL);
377
378         SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code);
379
380         SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data);
381         SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1));
382         SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2));
383         SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3));
384         SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4));
385
386         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
387 }
388
389 void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
390 {
391         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
392         u8 rssi = ewma_rssi_read(&si->avg_rssi);
393         bool stbc_en = si->stbc_en ? true : false;
394
395         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
396
397         SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
398         SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
399         SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
400
401         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
402 }
403
404 void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
405 {
406         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
407         bool no_update = si->updated;
408         bool disable_pt = true;
409
410         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
411
412         SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
413         SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
414         SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
415         SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
416         SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
417         SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en);
418         SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
419         SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
420         SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
421         SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
422         SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
423         SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
424         SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
425
426         si->init_ra_lv = 0;
427         si->updated = true;
428
429         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
430 }
431
432 void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
433 {
434         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
435
436         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
437         MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
438         MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
439
440         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
441 }
442
443 void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
444 {
445         struct rtw_lps_conf *conf = &rtwdev->lps_conf;
446         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
447
448         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
449
450         SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
451         SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
452         SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
453         SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
454         SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
455         SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
456
457         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
458 }
459
460 static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
461                                      enum rtw_rsvd_packet_type type)
462 {
463         struct rtw_rsvd_page *rsvd_pkt;
464         u8 location = 0;
465
466         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
467                 if (type == rsvd_pkt->type)
468                         location = rsvd_pkt->page;
469         }
470
471         return location;
472 }
473
474 void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
475 {
476         struct rtw_lps_conf *conf = &rtwdev->lps_conf;
477         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
478         u8 loc_pg, loc_dpk;
479
480         loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
481         loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
482
483         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
484
485         LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
486         LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
487         LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
488
489         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
490 }
491
492 void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
493 {
494         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
495         u8 location = 0;
496
497         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
498
499         location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
500         *(h2c_pkt + 1) = location;
501         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
502
503         location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
504         *(h2c_pkt + 2) = location;
505         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
506
507         location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
508         *(h2c_pkt + 3) = location;
509         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
510
511         location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
512         *(h2c_pkt + 4) = location;
513         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
514
515         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
516 }
517
518 static struct sk_buff *
519 rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
520 {
521         struct sk_buff *skb_new;
522
523         if (vif->type != NL80211_IFTYPE_AP &&
524             vif->type != NL80211_IFTYPE_ADHOC &&
525             !ieee80211_vif_is_mesh(vif)) {
526                 skb_new = alloc_skb(1, GFP_KERNEL);
527                 if (!skb_new)
528                         return NULL;
529                 skb_put(skb_new, 1);
530         } else {
531                 skb_new = ieee80211_beacon_get(hw, vif);
532         }
533
534         return skb_new;
535 }
536
537 static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
538 {
539         struct rtw_dev *rtwdev = hw->priv;
540         struct rtw_chip_info *chip = rtwdev->chip;
541         struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
542         struct rtw_lps_pg_dpk_hdr *dpk_hdr;
543         struct sk_buff *skb;
544         u32 size;
545
546         size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
547         skb = alloc_skb(size, GFP_KERNEL);
548         if (!skb)
549                 return NULL;
550
551         skb_reserve(skb, chip->tx_pkt_desc_sz);
552         dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
553         dpk_hdr->dpk_ch = dpk_info->dpk_ch;
554         dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
555         memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
556         memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
557         memcpy(dpk_hdr->coef, dpk_info->coef, 160);
558
559         return skb;
560 }
561
562 static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
563                                            struct ieee80211_vif *vif)
564 {
565         struct rtw_dev *rtwdev = hw->priv;
566         struct rtw_chip_info *chip = rtwdev->chip;
567         struct rtw_lps_conf *conf = &rtwdev->lps_conf;
568         struct rtw_lps_pg_info_hdr *pg_info_hdr;
569         struct sk_buff *skb;
570         u32 size;
571
572         size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
573         skb = alloc_skb(size, GFP_KERNEL);
574         if (!skb)
575                 return NULL;
576
577         skb_reserve(skb, chip->tx_pkt_desc_sz);
578         pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
579         pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
580         pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
581         pg_info_hdr->sec_cam_count =
582                 rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
583
584         conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
585
586         return skb;
587 }
588
589 static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
590                                              struct ieee80211_vif *vif,
591                                              enum rtw_rsvd_packet_type type)
592 {
593         struct sk_buff *skb_new;
594
595         switch (type) {
596         case RSVD_BEACON:
597                 skb_new = rtw_beacon_get(hw, vif);
598                 break;
599         case RSVD_PS_POLL:
600                 skb_new = ieee80211_pspoll_get(hw, vif);
601                 break;
602         case RSVD_PROBE_RESP:
603                 skb_new = ieee80211_proberesp_get(hw, vif);
604                 break;
605         case RSVD_NULL:
606                 skb_new = ieee80211_nullfunc_get(hw, vif, false);
607                 break;
608         case RSVD_QOS_NULL:
609                 skb_new = ieee80211_nullfunc_get(hw, vif, true);
610                 break;
611         case RSVD_LPS_PG_DPK:
612                 skb_new = rtw_lps_pg_dpk_get(hw);
613                 break;
614         case RSVD_LPS_PG_INFO:
615                 skb_new = rtw_lps_pg_info_get(hw, vif);
616                 break;
617         default:
618                 return NULL;
619         }
620
621         if (!skb_new)
622                 return NULL;
623
624         return skb_new;
625 }
626
627 static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb)
628 {
629         struct rtw_tx_pkt_info pkt_info;
630         struct rtw_chip_info *chip = rtwdev->chip;
631         u8 *pkt_desc;
632
633         memset(&pkt_info, 0, sizeof(pkt_info));
634         rtw_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb);
635         pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
636         memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
637         rtw_tx_fill_tx_desc(&pkt_info, skb);
638 }
639
640 static inline u8 rtw_len_to_page(unsigned int len, u8 page_size)
641 {
642         return DIV_ROUND_UP(len, page_size);
643 }
644
645 static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
646                                       u8 page_margin, u32 page, u8 *buf,
647                                       struct rtw_rsvd_page *rsvd_pkt)
648 {
649         struct sk_buff *skb = rsvd_pkt->skb;
650
651         if (rsvd_pkt->add_txdesc)
652                 rtw_fill_rsvd_page_desc(rtwdev, skb);
653
654         if (page >= 1)
655                 memcpy(buf + page_margin + page_size * (page - 1),
656                        skb->data, skb->len);
657         else
658                 memcpy(buf, skb->data, skb->len);
659 }
660
661 void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
662                        bool txdesc)
663 {
664         struct rtw_rsvd_page *rsvd_pkt;
665
666         lockdep_assert_held(&rtwdev->mutex);
667
668         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
669                 if (rsvd_pkt->type == type)
670                         return;
671         }
672
673         rsvd_pkt = kmalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
674         if (!rsvd_pkt)
675                 return;
676
677         rsvd_pkt->type = type;
678         rsvd_pkt->add_txdesc = txdesc;
679         list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
680 }
681
682 void rtw_reset_rsvd_page(struct rtw_dev *rtwdev)
683 {
684         struct rtw_rsvd_page *rsvd_pkt, *tmp;
685
686         lockdep_assert_held(&rtwdev->mutex);
687
688         list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
689                 if (rsvd_pkt->type == RSVD_BEACON)
690                         continue;
691                 list_del(&rsvd_pkt->list);
692                 kfree(rsvd_pkt);
693         }
694 }
695
696 int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
697                                 u8 *buf, u32 size)
698 {
699         u8 bckp[2];
700         u8 val;
701         u16 rsvd_pg_head;
702         int ret;
703
704         lockdep_assert_held(&rtwdev->mutex);
705
706         if (!size)
707                 return -EINVAL;
708
709         pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
710         rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr | BIT_BCN_VALID_V1);
711
712         val = rtw_read8(rtwdev, REG_CR + 1);
713         bckp[0] = val;
714         val |= BIT_ENSWBCN >> 8;
715         rtw_write8(rtwdev, REG_CR + 1, val);
716
717         val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
718         bckp[1] = val;
719         val &= ~(BIT_EN_BCNQ_DL >> 16);
720         rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
721
722         ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
723         if (ret) {
724                 rtw_err(rtwdev, "failed to write data to rsvd page\n");
725                 goto restore;
726         }
727
728         if (!check_hw_ready(rtwdev, REG_FIFOPAGE_CTRL_2, BIT_BCN_VALID_V1, 1)) {
729                 rtw_err(rtwdev, "error beacon valid\n");
730                 ret = -EBUSY;
731         }
732
733 restore:
734         rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
735         rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
736                     rsvd_pg_head | BIT_BCN_VALID_V1);
737         rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
738         rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
739
740         return ret;
741 }
742
743 static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
744 {
745         u32 pg_size;
746         u32 pg_num = 0;
747         u16 pg_addr = 0;
748
749         pg_size = rtwdev->chip->page_size;
750         pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
751         if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
752                 return -ENOMEM;
753
754         pg_addr = rtwdev->fifo.rsvd_drv_addr;
755
756         return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
757 }
758
759 static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
760                                struct ieee80211_vif *vif, u32 *size)
761 {
762         struct ieee80211_hw *hw = rtwdev->hw;
763         struct rtw_chip_info *chip = rtwdev->chip;
764         struct sk_buff *iter;
765         struct rtw_rsvd_page *rsvd_pkt;
766         u32 page = 0;
767         u8 total_page = 0;
768         u8 page_size, page_margin, tx_desc_sz;
769         u8 *buf;
770
771         page_size = chip->page_size;
772         tx_desc_sz = chip->tx_pkt_desc_sz;
773         page_margin = page_size - tx_desc_sz;
774
775         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
776                 iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
777                 if (!iter) {
778                         rtw_err(rtwdev, "fail to build rsvd packet\n");
779                         goto release_skb;
780                 }
781                 rsvd_pkt->skb = iter;
782                 rsvd_pkt->page = total_page;
783                 if (rsvd_pkt->add_txdesc)
784                         total_page += rtw_len_to_page(iter->len + tx_desc_sz,
785                                                       page_size);
786                 else
787                         total_page += rtw_len_to_page(iter->len, page_size);
788         }
789
790         if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
791                 rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
792                 goto release_skb;
793         }
794
795         *size = (total_page - 1) * page_size + page_margin;
796         buf = kzalloc(*size, GFP_KERNEL);
797         if (!buf)
798                 goto release_skb;
799
800         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
801                 rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
802                                           page, buf, rsvd_pkt);
803                 page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
804         }
805         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
806                 kfree_skb(rsvd_pkt->skb);
807
808         return buf;
809
810 release_skb:
811         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
812                 kfree_skb(rsvd_pkt->skb);
813
814         return NULL;
815 }
816
817 static int
818 rtw_download_beacon(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
819 {
820         struct ieee80211_hw *hw = rtwdev->hw;
821         struct sk_buff *skb;
822         int ret = 0;
823
824         skb = rtw_beacon_get(hw, vif);
825         if (!skb) {
826                 rtw_err(rtwdev, "failed to get beacon skb\n");
827                 ret = -ENOMEM;
828                 goto out;
829         }
830
831         ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
832         if (ret)
833                 rtw_err(rtwdev, "failed to download drv rsvd page\n");
834
835         dev_kfree_skb(skb);
836
837 out:
838         return ret;
839 }
840
841 int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
842 {
843         u8 *buf;
844         u32 size;
845         int ret;
846
847         buf = rtw_build_rsvd_page(rtwdev, vif, &size);
848         if (!buf) {
849                 rtw_err(rtwdev, "failed to build rsvd page pkt\n");
850                 return -ENOMEM;
851         }
852
853         ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
854         if (ret) {
855                 rtw_err(rtwdev, "failed to download drv rsvd page\n");
856                 goto free;
857         }
858
859         ret = rtw_download_beacon(rtwdev, vif);
860         if (ret) {
861                 rtw_err(rtwdev, "failed to download beacon\n");
862                 goto free;
863         }
864
865 free:
866         kfree(buf);
867
868         return ret;
869 }
870
871 int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
872                            u32 offset, u32 size, u32 *buf)
873 {
874         struct rtw_fifo_conf *fifo = &rtwdev->fifo;
875         u32 residue, i;
876         u16 start_pg;
877         u16 idx = 0;
878         u16 ctl;
879         u8 rcr;
880
881         if (size & 0x3) {
882                 rtw_warn(rtwdev, "should be 4-byte aligned\n");
883                 return -EINVAL;
884         }
885
886         offset += fifo->rsvd_boundary << TX_PAGE_SIZE_SHIFT;
887         residue = offset & (FIFO_PAGE_SIZE - 1);
888         start_pg = offset >> FIFO_PAGE_SIZE_SHIFT;
889         start_pg += RSVD_PAGE_START_ADDR;
890
891         rcr = rtw_read8(rtwdev, REG_RCR + 2);
892         ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
893
894         /* disable rx clock gate */
895         rtw_write8(rtwdev, REG_RCR, rcr | BIT(3));
896
897         do {
898                 rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
899
900                 for (i = FIFO_DUMP_ADDR + residue;
901                      i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
902                         buf[idx++] = rtw_read32(rtwdev, i);
903                         size -= 4;
904                         if (size == 0)
905                                 goto out;
906                 }
907
908                 residue = 0;
909                 start_pg++;
910         } while (size);
911
912 out:
913         rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
914         rtw_write8(rtwdev, REG_RCR + 2, rcr);
915         return 0;
916 }