1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2020 Realtek Corporation
3 */
4
5#include <linux/pci.h>
6
7#include "mac.h"
8#include "pci.h"
9#include "reg.h"
10#include "ser.h"
11
12static bool rtw89_pci_disable_clkreq;
13static bool rtw89_pci_disable_aspm_l1;
14static bool rtw89_pci_disable_l1ss;
15module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
16module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
17module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
18MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
19MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
20MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
21
22static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev,
23 u32 *phy_offset)
24{
25 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
26 struct pci_dev *pdev = rtwpci->pdev;
27 u32 val;
28 int ret;
29
30 ret = pci_read_config_dword(dev: pdev, RTW89_PCIE_L1_STS_V1, val: &val);
31 if (ret)
32 return ret;
33
34 val = u32_get_bits(v: val, RTW89_BCFG_LINK_SPEED_MASK);
35 if (val == RTW89_PCIE_GEN1_SPEED) {
36 *phy_offset = R_RAC_DIRECT_OFFSET_G1;
37 } else if (val == RTW89_PCIE_GEN2_SPEED) {
38 *phy_offset = R_RAC_DIRECT_OFFSET_G2;
39 } else {
40 rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val);
41 return -EFAULT;
42 }
43
44 return 0;
45}
46
47static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev)
48{
49 u32 val;
50 int ret;
51
52 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM);
53
54 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
55 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
56 rtwdev, R_AX_PCIE_INIT_CFG1);
57
58 return ret;
59}
60
61static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
62 struct rtw89_pci_dma_ring *bd_ring,
63 u32 cur_idx, bool tx)
64{
65 const struct rtw89_pci_info *info = rtwdev->pci_info;
66 u32 cnt, cur_rp, wp, rp, len;
67
68 rp = bd_ring->rp;
69 wp = bd_ring->wp;
70 len = bd_ring->len;
71
72 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
73 if (tx) {
74 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
75 } else {
76 if (info->rx_ring_eq_is_full)
77 wp += 1;
78
79 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
80 }
81
82 bd_ring->rp = cur_rp;
83
84 return cnt;
85}
86
87static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
88 struct rtw89_pci_tx_ring *tx_ring)
89{
90 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
91 u32 addr_idx = bd_ring->addr.idx;
92 u32 cnt, idx;
93
94 idx = rtw89_read32(rtwdev, addr: addr_idx);
95 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, cur_idx: idx, tx: true);
96
97 return cnt;
98}
99
100static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
101 struct rtw89_pci *rtwpci,
102 u32 cnt, bool release_all)
103{
104 struct rtw89_pci_tx_data *tx_data;
105 struct sk_buff *skb;
106 u32 qlen;
107
108 while (cnt--) {
109 skb = skb_dequeue(list: &rtwpci->h2c_queue);
110 if (!skb) {
111 rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
112 return;
113 }
114 skb_queue_tail(list: &rtwpci->h2c_release_queue, newsk: skb);
115 }
116
117 qlen = skb_queue_len(list_: &rtwpci->h2c_release_queue);
118 if (!release_all)
119 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
120
121 while (qlen--) {
122 skb = skb_dequeue(list: &rtwpci->h2c_release_queue);
123 if (!skb) {
124 rtw89_err(rtwdev, "failed to release fwcmd\n");
125 return;
126 }
127 tx_data = RTW89_PCI_TX_SKB_CB(skb);
128 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
129 DMA_TO_DEVICE);
130 dev_kfree_skb_any(skb);
131 }
132}
133
134static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
135 struct rtw89_pci *rtwpci)
136{
137 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12];
138 u32 cnt;
139
140 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
141 if (!cnt)
142 return;
143 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, release_all: false);
144}
145
146static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
147 struct rtw89_pci_rx_ring *rx_ring)
148{
149 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
150 u32 addr_idx = bd_ring->addr.idx;
151 u32 cnt, idx;
152
153 idx = rtw89_read32(rtwdev, addr: addr_idx);
154 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, cur_idx: idx, tx: false);
155
156 return cnt;
157}
158
159static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
160 struct sk_buff *skb)
161{
162 struct rtw89_pci_rx_info *rx_info;
163 dma_addr_t dma;
164
165 rx_info = RTW89_PCI_RX_SKB_CB(skb);
166 dma = rx_info->dma;
167 dma_sync_single_for_cpu(dev: rtwdev->dev, addr: dma, RTW89_PCI_RX_BUF_SIZE,
168 dir: DMA_FROM_DEVICE);
169}
170
171static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
172 struct sk_buff *skb)
173{
174 struct rtw89_pci_rx_info *rx_info;
175 dma_addr_t dma;
176
177 rx_info = RTW89_PCI_RX_SKB_CB(skb);
178 dma = rx_info->dma;
179 dma_sync_single_for_device(dev: rtwdev->dev, addr: dma, RTW89_PCI_RX_BUF_SIZE,
180 dir: DMA_FROM_DEVICE);
181}
182
183static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
184 struct sk_buff *skb)
185{
186 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
187 struct rtw89_pci_rxbd_info *rxbd_info;
188 __le32 info;
189
190 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
191 info = rxbd_info->dword;
192
193 rx_info->fs = le32_get_bits(v: info, RTW89_PCI_RXBD_FS);
194 rx_info->ls = le32_get_bits(v: info, RTW89_PCI_RXBD_LS);
195 rx_info->len = le32_get_bits(v: info, RTW89_PCI_RXBD_WRITE_SIZE);
196 rx_info->tag = le32_get_bits(v: info, RTW89_PCI_RXBD_TAG);
197}
198
199static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
200 struct rtw89_pci_rx_ring *rx_ring,
201 struct sk_buff *skb)
202{
203 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
204 const struct rtw89_pci_info *info = rtwdev->pci_info;
205 u32 target_rx_tag;
206
207 if (!info->check_rx_tag)
208 return 0;
209
210 /* valid range is 1 ~ 0x1FFF */
211 if (rx_ring->target_rx_tag == 0)
212 target_rx_tag = 1;
213 else
214 target_rx_tag = rx_ring->target_rx_tag;
215
216 if (rx_info->tag != target_rx_tag) {
217 rtw89_debug(rtwdev, mask: RTW89_DBG_UNEXP, fmt: "mismatch RX tag 0x%x 0x%x\n",
218 rx_info->tag, target_rx_tag);
219 return -EAGAIN;
220 }
221
222 return 0;
223}
224
225static
226int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
227 struct rtw89_pci_rx_ring *rx_ring,
228 struct sk_buff *skb)
229{
230 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
231 int rx_tag_retry = 1000;
232 int ret;
233
234 do {
235 rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
236 rtw89_pci_rxbd_info_update(rtwdev, skb);
237
238 ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
239 if (ret != -EAGAIN)
240 break;
241 } while (rx_tag_retry--);
242
243 /* update target rx_tag for next RX */
244 rx_ring->target_rx_tag = rx_info->tag + 1;
245
246 return ret;
247}
248
249static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable)
250{
251 const struct rtw89_pci_info *info = rtwdev->pci_info;
252 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
253 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
254
255 if (enable) {
256 rtw89_write32_clr(rtwdev, addr: dma_stop1->addr, bit: dma_stop1->mask);
257 if (dma_stop2->addr)
258 rtw89_write32_clr(rtwdev, addr: dma_stop2->addr, bit: dma_stop2->mask);
259 } else {
260 rtw89_write32_set(rtwdev, addr: dma_stop1->addr, bit: dma_stop1->mask);
261 if (dma_stop2->addr)
262 rtw89_write32_set(rtwdev, addr: dma_stop2->addr, bit: dma_stop2->mask);
263 }
264}
265
266static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable)
267{
268 const struct rtw89_pci_info *info = rtwdev->pci_info;
269 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
270
271 if (enable)
272 rtw89_write32_clr(rtwdev, addr: dma_stop1->addr, B_AX_STOP_CH12);
273 else
274 rtw89_write32_set(rtwdev, addr: dma_stop1->addr, B_AX_STOP_CH12);
275}
276
277static bool
278rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
279 struct sk_buff *new,
280 const struct sk_buff *skb, u32 offset,
281 const struct rtw89_pci_rx_info *rx_info,
282 const struct rtw89_rx_desc_info *desc_info)
283{
284 u32 copy_len = rx_info->len - offset;
285
286 if (unlikely(skb_tailroom(new) < copy_len)) {
287 rtw89_debug(rtwdev, mask: RTW89_DBG_TXRX,
288 fmt: "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
289 rx_info->len, desc_info->pkt_size, offset, fs, ls);
290 rtw89_hex_dump(rtwdev, mask: RTW89_DBG_TXRX, prefix_str: "rx_data: ",
291 buf: skb->data, len: rx_info->len);
292 /* length of a single segment skb is desc_info->pkt_size */
293 if (fs && ls) {
294 copy_len = desc_info->pkt_size;
295 } else {
296 rtw89_info(rtwdev, "drop rx data due to invalid length\n");
297 return false;
298 }
299 }
300
301 skb_put_data(skb: new, data: skb->data + offset, len: copy_len);
302
303 return true;
304}
305
306static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev,
307 struct rtw89_pci_dma_ring *bd_ring)
308{
309 const struct rtw89_pci_info *info = rtwdev->pci_info;
310 u32 wp = bd_ring->wp;
311
312 if (!info->rx_ring_eq_is_full)
313 return wp;
314
315 if (++wp >= bd_ring->len)
316 wp = 0;
317
318 return wp;
319}
320
321static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
322 struct rtw89_pci_rx_ring *rx_ring)
323{
324 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
325 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
326 const struct rtw89_pci_info *info = rtwdev->pci_info;
327 struct sk_buff *new = rx_ring->diliver_skb;
328 struct rtw89_pci_rx_info *rx_info;
329 struct sk_buff *skb;
330 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
331 u32 skb_idx;
332 u32 offset;
333 u32 cnt = 1;
334 bool fs, ls;
335 int ret;
336
337 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
338 skb = rx_ring->buf[skb_idx];
339
340 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
341 if (ret) {
342 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
343 bd_ring->wp, ret);
344 goto err_sync_device;
345 }
346
347 rx_info = RTW89_PCI_RX_SKB_CB(skb);
348 fs = info->no_rxbd_fs ? !new : rx_info->fs;
349 ls = rx_info->ls;
350
351 if (unlikely(!fs || !ls))
352 rtw89_debug(rtwdev, mask: RTW89_DBG_UNEXP,
353 fmt: "unexpected fs/ls=%d/%d tag=%u len=%u new->len=%u\n",
354 fs, ls, rx_info->tag, rx_info->len, new ? new->len : 0);
355
356 if (fs) {
357 if (new) {
358 rtw89_debug(rtwdev, mask: RTW89_DBG_UNEXP,
359 fmt: "skb should not be ready before first segment start\n");
360 goto err_sync_device;
361 }
362 if (desc_info->ready) {
363 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
364 goto err_sync_device;
365 }
366
367 rtw89_chip_query_rxdesc(rtwdev, desc_info, data: skb->data, data_offset: rxinfo_size);
368
369 new = rtw89_alloc_skb_for_rx(rtwdev, length: desc_info->pkt_size);
370 if (!new)
371 goto err_sync_device;
372
373 rx_ring->diliver_skb = new;
374
375 /* first segment has RX desc */
376 offset = desc_info->offset + desc_info->rxd_len;
377 } else {
378 offset = sizeof(struct rtw89_pci_rxbd_info);
379 if (!new) {
380 rtw89_debug(rtwdev, mask: RTW89_DBG_UNEXP, fmt: "no last skb\n");
381 goto err_sync_device;
382 }
383 }
384 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
385 goto err_sync_device;
386 rtw89_pci_sync_skb_for_device(rtwdev, skb);
387 rtw89_pci_rxbd_increase(rx_ring, cnt: 1);
388
389 if (!desc_info->ready) {
390 rtw89_warn(rtwdev, "no rx desc information\n");
391 goto err_free_resource;
392 }
393 if (ls) {
394 rtw89_core_rx(rtwdev, desc_info, skb: new);
395 rx_ring->diliver_skb = NULL;
396 desc_info->ready = false;
397 }
398
399 return cnt;
400
401err_sync_device:
402 rtw89_pci_sync_skb_for_device(rtwdev, skb);
403 rtw89_pci_rxbd_increase(rx_ring, cnt: 1);
404err_free_resource:
405 if (new)
406 dev_kfree_skb_any(skb: new);
407 rx_ring->diliver_skb = NULL;
408 desc_info->ready = false;
409
410 return cnt;
411}
412
413static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
414 struct rtw89_pci_rx_ring *rx_ring,
415 u32 cnt)
416{
417 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
418 u32 rx_cnt;
419
420 while (cnt && rtwdev->napi_budget_countdown > 0) {
421 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
422 if (!rx_cnt) {
423 rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
424
425 /* skip the rest RXBD bufs */
426 rtw89_pci_rxbd_increase(rx_ring, cnt);
427 break;
428 }
429
430 cnt -= rx_cnt;
431 }
432
433 rtw89_write16(rtwdev, addr: bd_ring->addr.idx, data: bd_ring->wp);
434}
435
436static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
437 struct rtw89_pci *rtwpci, int budget)
438{
439 struct rtw89_pci_rx_ring *rx_ring;
440 int countdown = rtwdev->napi_budget_countdown;
441 u32 cnt;
442
443 rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ];
444
445 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
446 if (!cnt)
447 return 0;
448
449 cnt = min_t(u32, budget, cnt);
450
451 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
452
453 /* In case of flushing pending SKBs, the countdown may exceed. */
454 if (rtwdev->napi_budget_countdown <= 0)
455 return budget;
456
457 return budget - countdown;
458}
459
460static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
461 struct rtw89_pci_tx_ring *tx_ring,
462 struct sk_buff *skb, u8 tx_status)
463{
464 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
465 struct ieee80211_tx_info *info;
466
467 if (rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status))
468 return;
469
470 info = IEEE80211_SKB_CB(skb);
471 ieee80211_tx_info_clear_status(info);
472
473 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
474 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
475 if (tx_status == RTW89_TX_DONE) {
476 info->flags |= IEEE80211_TX_STAT_ACK;
477 tx_ring->tx_acked++;
478 } else {
479 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
480 rtw89_debug(rtwdev, mask: RTW89_DBG_FW,
481 fmt: "failed to TX of status %x\n", tx_status);
482 switch (tx_status) {
483 case RTW89_TX_RETRY_LIMIT:
484 tx_ring->tx_retry_lmt++;
485 break;
486 case RTW89_TX_LIFE_TIME:
487 tx_ring->tx_life_time++;
488 break;
489 case RTW89_TX_MACID_DROP:
490 tx_ring->tx_mac_id_drop++;
491 break;
492 default:
493 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
494 break;
495 }
496 }
497
498 ieee80211_tx_status_ni(hw: rtwdev->hw, skb);
499}
500
501static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
502{
503 struct rtw89_pci_tx_wd *txwd;
504 u32 cnt;
505
506 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
507 while (cnt--) {
508 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
509 if (!txwd) {
510 rtw89_warn(rtwdev, "No busy txwd pages available\n");
511 break;
512 }
513
514 list_del_init(entry: &txwd->list);
515
516 /* this skb has been freed by RPP */
517 if (skb_queue_len(list_: &txwd->queue) == 0)
518 rtw89_pci_enqueue_txwd(tx_ring, txwd);
519 }
520}
521
522static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
523 struct rtw89_pci_tx_ring *tx_ring)
524{
525 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
526 struct rtw89_pci_tx_wd *txwd;
527 int i;
528
529 for (i = 0; i < wd_ring->page_num; i++) {
530 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
531 if (!txwd)
532 break;
533
534 list_del_init(entry: &txwd->list);
535 }
536}
537
538static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
539 struct rtw89_pci_tx_ring *tx_ring,
540 struct rtw89_pci_tx_wd *txwd, u16 seq,
541 u8 tx_status)
542{
543 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
544 struct rtw89_pci_tx_data *tx_data;
545 struct sk_buff *skb, *tmp;
546 u8 txch = tx_ring->txch;
547
548 if (!list_empty(head: &txwd->list)) {
549 rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
550 /* In low power mode, RPP can receive before updating of TX BD.
551 * In normal mode, it should not happen so give it a warning.
552 */
553 if (!rtwpci->low_power && !list_empty(head: &txwd->list))
554 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
555 txch, seq);
556 }
557
558 skb_queue_walk_safe(&txwd->queue, skb, tmp) {
559 skb_unlink(skb, list: &txwd->queue);
560
561 tx_data = RTW89_PCI_TX_SKB_CB(skb);
562 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
563 DMA_TO_DEVICE);
564
565 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
566 }
567
568 if (list_empty(head: &txwd->list))
569 rtw89_pci_enqueue_txwd(tx_ring, txwd);
570}
571
572void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp,
573 struct rtw89_pci_rpp_info *rpp_info)
574{
575 const struct rtw89_pci_rpp_fmt *rpp = _rpp;
576
577 rpp_info->seq = le32_get_bits(v: rpp->dword, RTW89_PCI_RPP_SEQ);
578 rpp_info->qsel = le32_get_bits(v: rpp->dword, RTW89_PCI_RPP_QSEL);
579 rpp_info->tx_status = le32_get_bits(v: rpp->dword, RTW89_PCI_RPP_TX_STATUS);
580 rpp_info->txch = rtw89_chip_get_ch_dma(rtwdev, qsel: rpp_info->qsel);
581}
582EXPORT_SYMBOL(rtw89_pci_parse_rpp);
583
584void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp,
585 struct rtw89_pci_rpp_info *rpp_info)
586{
587 const struct rtw89_pci_rpp_fmt_v1 *rpp = _rpp;
588
589 rpp_info->seq = le32_get_bits(v: rpp->w0, RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK);
590 rpp_info->qsel = le32_get_bits(v: rpp->w1, RTW89_PCI_RPP_W1_QSEL_V1_MASK);
591 rpp_info->tx_status = le32_get_bits(v: rpp->w0, RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK);
592 rpp_info->txch = le32_get_bits(v: rpp->w0, RTW89_PCI_RPP_W0_DMA_CH_MASK);
593}
594EXPORT_SYMBOL(rtw89_pci_parse_rpp_v1);
595
596static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, void *rpp)
597{
598 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
599 const struct rtw89_pci_info *info = rtwdev->pci_info;
600 struct rtw89_pci_rpp_info rpp_info = {};
601 struct rtw89_pci_tx_wd_ring *wd_ring;
602 struct rtw89_pci_tx_ring *tx_ring;
603 struct rtw89_pci_tx_wd *txwd;
604
605 info->parse_rpp(rtwdev, rpp, &rpp_info);
606
607 if (rpp_info.txch == RTW89_TXCH_CH12) {
608 rtw89_warn(rtwdev, "should no fwcmd release report\n");
609 return;
610 }
611
612 tx_ring = &rtwpci->tx.rings[rpp_info.txch];
613 wd_ring = &tx_ring->wd_ring;
614 txwd = &wd_ring->pages[rpp_info.seq];
615
616 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq: rpp_info.seq,
617 tx_status: rpp_info.tx_status);
618}
619
620static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
621 struct rtw89_pci_tx_ring *tx_ring)
622{
623 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
624 struct rtw89_pci_tx_wd *txwd;
625 int i;
626
627 for (i = 0; i < wd_ring->page_num; i++) {
628 txwd = &wd_ring->pages[i];
629
630 if (!list_empty(head: &txwd->list))
631 continue;
632
633 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq: i, RTW89_TX_MACID_DROP);
634 }
635}
636
637static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
638 struct rtw89_pci_rx_ring *rx_ring,
639 u32 max_cnt)
640{
641 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
642 const struct rtw89_pci_info *info = rtwdev->pci_info;
643 struct rtw89_rx_desc_info desc_info = {};
644 struct rtw89_pci_rx_info *rx_info;
645 struct sk_buff *skb;
646 void *rpp;
647 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
648 u32 rpp_size = info->rpp_fmt_size;
649 u32 cnt = 0;
650 u32 skb_idx;
651 u32 offset;
652 int ret;
653
654 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
655 skb = rx_ring->buf[skb_idx];
656
657 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
658 if (ret) {
659 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
660 bd_ring->wp, ret);
661 goto err_sync_device;
662 }
663
664 rx_info = RTW89_PCI_RX_SKB_CB(skb);
665 if (!rx_info->fs || !rx_info->ls) {
666 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
667 return cnt;
668 }
669
670 rtw89_chip_query_rxdesc(rtwdev, desc_info: &desc_info, data: skb->data, data_offset: rxinfo_size);
671
672 /* first segment has RX desc */
673 offset = desc_info.offset + desc_info.rxd_len;
674 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
675 rpp = skb->data + offset;
676 rtw89_pci_release_rpp(rtwdev, rpp);
677 }
678
679 rtw89_pci_sync_skb_for_device(rtwdev, skb);
680 rtw89_pci_rxbd_increase(rx_ring, cnt: 1);
681 cnt++;
682
683 return cnt;
684
685err_sync_device:
686 rtw89_pci_sync_skb_for_device(rtwdev, skb);
687 return 0;
688}
689
690static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
691 struct rtw89_pci_rx_ring *rx_ring,
692 u32 cnt)
693{
694 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
695 u32 release_cnt;
696
697 while (cnt) {
698 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, max_cnt: cnt);
699 if (!release_cnt) {
700 rtw89_err(rtwdev, "failed to release TX skbs\n");
701
702 /* skip the rest RXBD bufs */
703 rtw89_pci_rxbd_increase(rx_ring, cnt);
704 break;
705 }
706
707 cnt -= release_cnt;
708 }
709
710 rtw89_write16(rtwdev, addr: bd_ring->addr.idx, data: bd_ring->wp);
711}
712
713static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
714 struct rtw89_pci *rtwpci, int budget)
715{
716 struct rtw89_pci_rx_ring *rx_ring;
717 u32 cnt;
718 int work_done;
719
720 rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
721
722 spin_lock_bh(lock: &rtwpci->trx_lock);
723
724 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
725 if (cnt == 0)
726 goto out_unlock;
727
728 rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
729
730out_unlock:
731 spin_unlock_bh(lock: &rtwpci->trx_lock);
732
733 /* always release all RPQ */
734 work_done = min_t(int, cnt, budget);
735 rtwdev->napi_budget_countdown -= work_done;
736
737 return work_done;
738}
739
740static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
741 struct rtw89_pci *rtwpci)
742{
743 struct rtw89_pci_rx_ring *rx_ring;
744 struct rtw89_pci_dma_ring *bd_ring;
745 u32 reg_idx;
746 u16 hw_idx, hw_idx_next, host_idx;
747 int i;
748
749 for (i = 0; i < RTW89_RXCH_NUM; i++) {
750 rx_ring = &rtwpci->rx.rings[i];
751 bd_ring = &rx_ring->bd_ring;
752
753 reg_idx = rtw89_read32(rtwdev, addr: bd_ring->addr.idx);
754 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
755 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
756 hw_idx_next = (hw_idx + 1) % bd_ring->len;
757
758 if (hw_idx_next == host_idx)
759 rtw89_debug(rtwdev, mask: RTW89_DBG_UNEXP, fmt: "%d RXD unavailable\n", i);
760
761 rtw89_debug(rtwdev, mask: RTW89_DBG_TXRX,
762 fmt: "%d RXD unavailable, idx=0x%08x, len=%d\n",
763 i, reg_idx, bd_ring->len);
764 }
765}
766
767void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
768 struct rtw89_pci *rtwpci,
769 struct rtw89_pci_isrs *isrs)
770{
771 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
772 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
773 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
774
775 rtw89_write32(rtwdev, R_AX_HISR0, data: isrs->halt_c2h_isrs);
776 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, data: isrs->isrs[0]);
777 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, data: isrs->isrs[1]);
778}
779EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
780
781void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
782 struct rtw89_pci *rtwpci,
783 struct rtw89_pci_isrs *isrs)
784{
785 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
786 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
787 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
788 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
789 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
790 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
791 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
792
793 if (isrs->halt_c2h_isrs)
794 rtw89_write32(rtwdev, R_AX_HISR0, data: isrs->halt_c2h_isrs);
795 if (isrs->isrs[0])
796 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, data: isrs->isrs[0]);
797 if (isrs->isrs[1])
798 rtw89_write32(rtwdev, R_AX_HISR1, data: isrs->isrs[1]);
799}
800EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
801
802void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
803 struct rtw89_pci *rtwpci,
804 struct rtw89_pci_isrs *isrs)
805{
806 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
807 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
808 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
809 isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
810 rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
811 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
812
813 if (isrs->halt_c2h_isrs)
814 rtw89_write32(rtwdev, R_BE_HISR0, data: isrs->halt_c2h_isrs);
815 if (isrs->isrs[0])
816 rtw89_write32(rtwdev, R_BE_HAXI_HISR00, data: isrs->isrs[0]);
817 if (isrs->isrs[1])
818 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, data: isrs->isrs[1]);
819 rtw89_write32(rtwdev, R_BE_PCIE_HISR, data: isrs->ind_isrs);
820}
821EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
822
823void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev,
824 struct rtw89_pci *rtwpci,
825 struct rtw89_pci_isrs *isrs)
826{
827 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
828 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
829 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
830 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
831
832 /* isrs[0] is not used, so borrow to store RDU status to share common
833 * flow in rtw89_pci_interrupt_threadfn().
834 */
835 isrs->isrs[0] = isrs->isrs[1] & (B_BE_PCIE_RDU_CH1_INT |
836 B_BE_PCIE_RDU_CH0_INT);
837
838 if (isrs->halt_c2h_isrs)
839 rtw89_write32(rtwdev, R_BE_HISR0, data: isrs->halt_c2h_isrs);
840 if (isrs->isrs[1])
841 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, data: isrs->isrs[1]);
842 rtw89_write32(rtwdev, R_BE_PCIE_HISR, data: isrs->ind_isrs);
843}
844EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v3);
845
846void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
847{
848 rtw89_write32(rtwdev, R_AX_HIMR0, data: rtwpci->halt_c2h_intrs);
849 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, data: rtwpci->intrs[0]);
850 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, data: rtwpci->intrs[1]);
851}
852EXPORT_SYMBOL(rtw89_pci_enable_intr);
853
854void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
855{
856 rtw89_write32(rtwdev, R_AX_HIMR0, data: 0);
857 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, data: 0);
858 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, data: 0);
859}
860EXPORT_SYMBOL(rtw89_pci_disable_intr);
861
862void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
863{
864 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, data: rtwpci->ind_intrs);
865 rtw89_write32(rtwdev, R_AX_HIMR0, data: rtwpci->halt_c2h_intrs);
866 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, data: rtwpci->intrs[0]);
867 rtw89_write32(rtwdev, R_AX_HIMR1, data: rtwpci->intrs[1]);
868}
869EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
870
871void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
872{
873 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, data: 0);
874}
875EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
876
877void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
878{
879 rtw89_write32(rtwdev, R_BE_HIMR0, data: rtwpci->halt_c2h_intrs);
880 rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, data: rtwpci->intrs[0]);
881 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, data: rtwpci->intrs[1]);
882 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, data: rtwpci->ind_intrs);
883}
884EXPORT_SYMBOL(rtw89_pci_enable_intr_v2);
885
886void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
887{
888 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, data: 0);
889 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, data: 0);
890}
891EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
892
893void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
894{
895 rtw89_write32(rtwdev, R_BE_HIMR0, data: rtwpci->halt_c2h_intrs);
896 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, data: rtwpci->intrs[1]);
897 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, data: rtwpci->ind_intrs);
898}
899EXPORT_SYMBOL(rtw89_pci_enable_intr_v3);
900
901void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
902{
903 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, data: 0);
904 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, data: 0);
905}
906EXPORT_SYMBOL(rtw89_pci_disable_intr_v3);
907
908static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
909{
910 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
911 unsigned long flags;
912
913 spin_lock_irqsave(&rtwpci->irq_lock, flags);
914 rtw89_chip_disable_intr(rtwdev, rtwpci);
915 rtw89_chip_config_intr_mask(rtwdev, cfg: RTW89_PCI_INTR_MASK_RECOVERY_START);
916 rtw89_chip_enable_intr(rtwdev, rtwpci);
917 spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags);
918}
919
920static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
921{
922 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
923 unsigned long flags;
924
925 spin_lock_irqsave(&rtwpci->irq_lock, flags);
926 rtw89_chip_disable_intr(rtwdev, rtwpci);
927 rtw89_chip_config_intr_mask(rtwdev, cfg: RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
928 rtw89_chip_enable_intr(rtwdev, rtwpci);
929 spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags);
930}
931
932static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
933{
934 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
935 int budget = NAPI_POLL_WEIGHT;
936
937 /* To prevent RXQ get stuck due to run out of budget. */
938 rtwdev->napi_budget_countdown = budget;
939
940 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
941 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
942}
943
944static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
945{
946 struct rtw89_dev *rtwdev = dev;
947 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
948 const struct rtw89_pci_info *info = rtwdev->pci_info;
949 const struct rtw89_pci_isr_def *isr_def = info->isr_def;
950 struct rtw89_pci_isrs isrs;
951 unsigned long flags;
952
953 spin_lock_irqsave(&rtwpci->irq_lock, flags);
954 rtw89_chip_recognize_intrs(rtwdev, rtwpci, isrs: &isrs);
955 spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags);
956
957 if (unlikely(isrs.isrs[0] & isr_def->isr_rdu))
958 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
959
960 if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_halt_c2h))
961 rtw89_ser_notify(rtwdev, err: rtw89_mac_get_err_status(rtwdev));
962
963 if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_wdt_timeout))
964 rtw89_ser_notify(rtwdev, err: MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
965
966 if (unlikely(rtwpci->under_recovery))
967 goto enable_intr;
968
969 if (unlikely(rtwpci->low_power)) {
970 rtw89_pci_low_power_interrupt_handler(rtwdev);
971 goto enable_intr;
972 }
973
974 if (likely(rtwpci->running)) {
975 local_bh_disable();
976 napi_schedule(n: &rtwdev->napi);
977 local_bh_enable();
978 }
979
980 return IRQ_HANDLED;
981
982enable_intr:
983 spin_lock_irqsave(&rtwpci->irq_lock, flags);
984 if (likely(rtwpci->running))
985 rtw89_chip_enable_intr(rtwdev, rtwpci);
986 spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags);
987 return IRQ_HANDLED;
988}
989
990static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
991{
992 struct rtw89_dev *rtwdev = dev;
993 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
994 unsigned long flags;
995 irqreturn_t irqret = IRQ_WAKE_THREAD;
996
997 spin_lock_irqsave(&rtwpci->irq_lock, flags);
998
999 /* If interrupt event is on the road, it is still trigger interrupt
1000 * even we have done pci_stop() to turn off IMR.
1001 */
1002 if (unlikely(!rtwpci->running)) {
1003 irqret = IRQ_HANDLED;
1004 goto exit;
1005 }
1006
1007 rtw89_chip_disable_intr(rtwdev, rtwpci);
1008exit:
1009 spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags);
1010
1011 return irqret;
1012}
1013
1014#define DEF_TXCHADDRS_TYPE3(gen, ch_idx, txch, v...) \
1015 [RTW89_TXCH_##ch_idx] = { \
1016 .num = R_##gen##_##txch##_TXBD_CFG, \
1017 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \
1018 .bdram = 0, \
1019 .desa_l = 0, \
1020 .desa_h = 0, \
1021 }
1022
1023#define DEF_TXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, txch, grp, v...) \
1024 [RTW89_TXCH_##ch_idx] = { \
1025 .num = R_##gen##_##txch##_TXBD_CFG, \
1026 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \
1027 .bdram = 0, \
1028 .desa_l = R_##gen##_##grp##_TXBD_DESA_L, \
1029 .desa_h = R_##gen##_##grp##_TXBD_DESA_H, \
1030 }
1031
1032#define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
1033 [RTW89_TXCH_##ch_idx] = { \
1034 .num = R_##gen##_##txch##_TXBD_NUM ##v, \
1035 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \
1036 .bdram = 0, \
1037 .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \
1038 .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \
1039 }
1040
1041#define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
1042 [RTW89_TXCH_##txch] = { \
1043 .num = R_AX_##txch##_TXBD_NUM ##v, \
1044 .idx = R_AX_##txch##_TXBD_IDX ##v, \
1045 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
1046 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
1047 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
1048 }
1049
1050#define DEF_TXCHADDRS(info, txch, v...) \
1051 [RTW89_TXCH_##txch] = { \
1052 .num = R_AX_##txch##_TXBD_NUM, \
1053 .idx = R_AX_##txch##_TXBD_IDX, \
1054 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
1055 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
1056 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
1057 }
1058
1059#define DEF_RXCHADDRS_TYPE3(gen, ch_idx, rxch, v...) \
1060 [RTW89_RXCH_##ch_idx] = { \
1061 .num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \
1062 .idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \
1063 .desa_l = 0, \
1064 .desa_h = 0, \
1065 }
1066
1067#define DEF_RXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, rxch, grp, v...) \
1068 [RTW89_RXCH_##ch_idx] = { \
1069 .num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \
1070 .idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \
1071 .desa_l = R_##gen##_##grp##_RXBD_DESA_L, \
1072 .desa_h = R_##gen##_##grp##_RXBD_DESA_H, \
1073 }
1074
1075#define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
1076 [RTW89_RXCH_##ch_idx] = { \
1077 .num = R_##gen##_##rxch##_RXBD_NUM ##v, \
1078 .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \
1079 .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \
1080 .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \
1081 }
1082
1083const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
1084 .tx = {
1085 DEF_TXCHADDRS(info, ACH0),
1086 DEF_TXCHADDRS(info, ACH1),
1087 DEF_TXCHADDRS(info, ACH2),
1088 DEF_TXCHADDRS(info, ACH3),
1089 DEF_TXCHADDRS(info, ACH4),
1090 DEF_TXCHADDRS(info, ACH5),
1091 DEF_TXCHADDRS(info, ACH6),
1092 DEF_TXCHADDRS(info, ACH7),
1093 DEF_TXCHADDRS(info, CH8),
1094 DEF_TXCHADDRS(info, CH9),
1095 DEF_TXCHADDRS_TYPE1(info, CH10),
1096 DEF_TXCHADDRS_TYPE1(info, CH11),
1097 DEF_TXCHADDRS(info, CH12),
1098 },
1099 .rx = {
1100 DEF_RXCHADDRS(AX, RXQ, RXQ),
1101 DEF_RXCHADDRS(AX, RPQ, RPQ),
1102 },
1103};
1104EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
1105
1106const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
1107 .tx = {
1108 DEF_TXCHADDRS(info, ACH0, _V1),
1109 DEF_TXCHADDRS(info, ACH1, _V1),
1110 DEF_TXCHADDRS(info, ACH2, _V1),
1111 DEF_TXCHADDRS(info, ACH3, _V1),
1112 DEF_TXCHADDRS(info, ACH4, _V1),
1113 DEF_TXCHADDRS(info, ACH5, _V1),
1114 DEF_TXCHADDRS(info, ACH6, _V1),
1115 DEF_TXCHADDRS(info, ACH7, _V1),
1116 DEF_TXCHADDRS(info, CH8, _V1),
1117 DEF_TXCHADDRS(info, CH9, _V1),
1118 DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
1119 DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
1120 DEF_TXCHADDRS(info, CH12, _V1),
1121 },
1122 .rx = {
1123 DEF_RXCHADDRS(AX, RXQ, RXQ, _V1),
1124 DEF_RXCHADDRS(AX, RPQ, RPQ, _V1),
1125 },
1126};
1127EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
1128
1129const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
1130 .tx = {
1131 DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1),
1132 DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1),
1133 DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1),
1134 DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1),
1135 DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1),
1136 DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1),
1137 DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1),
1138 DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1),
1139 DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1),
1140 DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1),
1141 DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1),
1142 DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1),
1143 DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1),
1144 },
1145 .rx = {
1146 DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1),
1147 DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1),
1148 },
1149};
1150EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
1151
1152const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1 = {
1153 .tx = {
1154 DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, ACH0, CH0, ACQ, _V1),
1155 /* no CH1 */
1156 DEF_TXCHADDRS_TYPE3(BE, ACH2, CH2, _V1),
1157 /* no CH3 */
1158 DEF_TXCHADDRS_TYPE3(BE, ACH4, CH4, _V1),
1159 /* no CH5 */
1160 DEF_TXCHADDRS_TYPE3(BE, ACH6, CH6, _V1),
1161 /* no CH7 */
1162 DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, CH8, CH8, NACQ, _V1),
1163 /* no CH9 */
1164 DEF_TXCHADDRS_TYPE3(BE, CH10, CH10, _V1),
1165 /* no CH11 */
1166 DEF_TXCHADDRS_TYPE3(BE, CH12, CH12, _V1),
1167 },
1168 .rx = {
1169 DEF_RXCHADDRS_TYPE3_GRP_BASE(BE, RXQ, CH0, HOST0, _V1),
1170 DEF_RXCHADDRS_TYPE3(BE, RPQ, CH1, _V1),
1171 },
1172};
1173EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be_v1);
1174
1175#undef DEF_TXCHADDRS_TYPE3
1176#undef DEF_TXCHADDRS_TYPE3_GRP_BASE
1177#undef DEF_TXCHADDRS_TYPE2
1178#undef DEF_TXCHADDRS_TYPE1
1179#undef DEF_TXCHADDRS
1180#undef DEF_RXCHADDRS_TYPE3
1181#undef DEF_RXCHADDRS_TYPE3_GRP_BASE
1182#undef DEF_RXCHADDRS
1183
1184static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
1185 enum rtw89_tx_channel txch,
1186 const struct rtw89_pci_ch_dma_addr **addr)
1187{
1188 const struct rtw89_pci_info *info = rtwdev->pci_info;
1189
1190 if (txch >= RTW89_TXCH_NUM)
1191 return -EINVAL;
1192
1193 *addr = &info->dma_addr_set->tx[txch];
1194
1195 return 0;
1196}
1197
1198static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
1199 enum rtw89_rx_channel rxch,
1200 const struct rtw89_pci_ch_dma_addr **addr)
1201{
1202 const struct rtw89_pci_info *info = rtwdev->pci_info;
1203
1204 if (rxch >= RTW89_RXCH_NUM)
1205 return -EINVAL;
1206
1207 *addr = &info->dma_addr_set->rx[rxch];
1208
1209 return 0;
1210}
1211
1212static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
1213{
1214 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
1215
1216 /* reserved 1 desc check ring is full or not */
1217 if (bd_ring->rp > bd_ring->wp)
1218 return bd_ring->rp - bd_ring->wp - 1;
1219
1220 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
1221}
1222
1223static
1224u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
1225{
1226 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1227 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12];
1228 u32 cnt;
1229
1230 spin_lock_bh(lock: &rtwpci->trx_lock);
1231 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
1232 cnt = rtw89_pci_get_avail_txbd_num(ring: tx_ring);
1233 spin_unlock_bh(lock: &rtwpci->trx_lock);
1234
1235 return cnt;
1236}
1237
1238static
1239u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
1240 u8 txch)
1241{
1242 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1243 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
1244 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1245 u32 cnt;
1246
1247 spin_lock_bh(lock: &rtwpci->trx_lock);
1248 cnt = rtw89_pci_get_avail_txbd_num(ring: tx_ring);
1249 if (txch != RTW89_TXCH_CH12)
1250 cnt = min(cnt, wd_ring->curr_num);
1251 spin_unlock_bh(lock: &rtwpci->trx_lock);
1252
1253 return cnt;
1254}
1255
1256static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1257 u8 txch)
1258{
1259 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1260 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
1261 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1262 const struct rtw89_chip_info *chip = rtwdev->chip;
1263 u32 bd_cnt, wd_cnt, min_cnt = 0;
1264 struct rtw89_pci_rx_ring *rx_ring;
1265 enum rtw89_debug_mask debug_mask;
1266 u32 cnt;
1267
1268 rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
1269
1270 spin_lock_bh(lock: &rtwpci->trx_lock);
1271 bd_cnt = rtw89_pci_get_avail_txbd_num(ring: tx_ring);
1272 wd_cnt = wd_ring->curr_num;
1273
1274 if (wd_cnt == 0 || bd_cnt == 0) {
1275 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
1276 if (cnt)
1277 rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
1278 else if (wd_cnt == 0)
1279 goto out_unlock;
1280
1281 bd_cnt = rtw89_pci_get_avail_txbd_num(ring: tx_ring);
1282 if (bd_cnt == 0)
1283 rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
1284 }
1285
1286 bd_cnt = rtw89_pci_get_avail_txbd_num(ring: tx_ring);
1287 wd_cnt = wd_ring->curr_num;
1288 min_cnt = min(bd_cnt, wd_cnt);
1289 if (min_cnt == 0) {
1290 /* This message can be frequently shown in low power mode or
1291 * high traffic with small FIFO chips, and we have recognized it as normal
1292 * behavior, so print with mask RTW89_DBG_TXRX in these situations.
1293 */
1294 if (rtwpci->low_power || chip->small_fifo_size)
1295 debug_mask = RTW89_DBG_TXRX;
1296 else
1297 debug_mask = RTW89_DBG_UNEXP;
1298
1299 rtw89_debug(rtwdev, mask: debug_mask,
1300 fmt: "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
1301 wd_cnt, bd_cnt);
1302 }
1303
1304out_unlock:
1305 spin_unlock_bh(lock: &rtwpci->trx_lock);
1306
1307 return min_cnt;
1308}
1309
1310static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1311 u8 txch)
1312{
1313 if (rtwdev->hci.paused)
1314 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
1315
1316 if (txch == RTW89_TXCH_CH12)
1317 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
1318
1319 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
1320}
1321
1322static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
1323{
1324 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1325 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1326 u32 host_idx, addr;
1327
1328 spin_lock_bh(lock: &rtwpci->trx_lock);
1329
1330 addr = bd_ring->addr.idx;
1331 host_idx = bd_ring->wp;
1332 rtw89_write16(rtwdev, addr, data: host_idx);
1333
1334 spin_unlock_bh(lock: &rtwpci->trx_lock);
1335}
1336
1337static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
1338 int n_txbd)
1339{
1340 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1341 u32 host_idx, len;
1342
1343 len = bd_ring->len;
1344 host_idx = bd_ring->wp + n_txbd;
1345 host_idx = host_idx < len ? host_idx : host_idx - len;
1346
1347 bd_ring->wp = host_idx;
1348}
1349
1350static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
1351{
1352 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1353 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
1354
1355 if (rtwdev->hci.paused) {
1356 set_bit(nr: txch, addr: rtwpci->kick_map);
1357 return;
1358 }
1359
1360 __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1361}
1362
1363static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
1364{
1365 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1366 struct rtw89_pci_tx_ring *tx_ring;
1367 int txch;
1368
1369 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1370 if (!test_and_clear_bit(nr: txch, addr: rtwpci->kick_map))
1371 continue;
1372
1373 tx_ring = &rtwpci->tx.rings[txch];
1374 __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1375 }
1376}
1377
1378static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
1379{
1380 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1381 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
1382 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1383 u32 cur_idx, cur_rp;
1384 u8 i;
1385
1386 /* Because the time taked by the I/O is a bit dynamic, it's hard to
1387 * define a reasonable fixed total timeout to use read_poll_timeout*
1388 * helper. Instead, we can ensure a reasonable polling times, so we
1389 * just use for loop with udelay here.
1390 */
1391 for (i = 0; i < 60; i++) {
1392 cur_idx = rtw89_read32(rtwdev, addr: bd_ring->addr.idx);
1393 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
1394 if (cur_rp == bd_ring->wp)
1395 return;
1396
1397 udelay(usec: 1);
1398 }
1399
1400 if (!drop)
1401 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
1402}
1403
1404static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
1405 bool drop)
1406{
1407 const struct rtw89_pci_info *info = rtwdev->pci_info;
1408 u8 i;
1409
1410 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1411 /* It may be unnecessary to flush FWCMD queue. */
1412 if (i == RTW89_TXCH_CH12)
1413 continue;
1414 if (info->tx_dma_ch_mask & BIT(i))
1415 continue;
1416
1417 if (txchs & BIT(i))
1418 __pci_flush_txch(rtwdev, txch: i, drop);
1419 }
1420}
1421
1422static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
1423 bool drop)
1424{
1425 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
1426}
1427
1428u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
1429 void *txaddr_info_addr, u32 total_len,
1430 dma_addr_t dma, u8 *add_info_nr)
1431{
1432 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
1433 __le16 option;
1434
1435 txaddr_info->length = cpu_to_le16(total_len);
1436 option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1));
1437 option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK);
1438 txaddr_info->option = option;
1439 txaddr_info->dma = cpu_to_le32(dma);
1440
1441 *add_info_nr = 1;
1442
1443 return sizeof(*txaddr_info);
1444}
1445EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
1446
1447u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
1448 void *txaddr_info_addr, u32 total_len,
1449 dma_addr_t dma, u8 *add_info_nr)
1450{
1451 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
1452 u32 remain = total_len;
1453 u32 len;
1454 u16 length_option;
1455 int n;
1456
1457 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
1458 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
1459 TXADDR_INFO_LENTHG_V1_MAX : remain;
1460 remain -= len;
1461
1462 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
1463 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
1464 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
1465 length_option |= u16_encode_bits(upper_32_bits(dma),
1466 B_PCIADDR_HIGH_SEL_V1_MASK);
1467 txaddr_info->length_opt = cpu_to_le16(length_option);
1468 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
1469 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
1470
1471 dma += len;
1472 txaddr_info++;
1473 }
1474
1475 WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
1476 remain, total_len);
1477
1478 *add_info_nr = n;
1479
1480 return n * sizeof(*txaddr_info);
1481}
1482EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
1483
1484static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
1485 struct rtw89_pci_tx_ring *tx_ring,
1486 struct rtw89_pci_tx_wd *txwd,
1487 struct rtw89_core_tx_request *tx_req)
1488{
1489 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1490 const struct rtw89_chip_info *chip = rtwdev->chip;
1491 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1492 struct rtw89_pci_tx_wp_info *txwp_info;
1493 void *txaddr_info_addr;
1494 struct pci_dev *pdev = rtwpci->pdev;
1495 struct sk_buff *skb = tx_req->skb;
1496 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1497 bool en_wd_info = desc_info->en_wd_info;
1498 u32 txwd_len;
1499 u32 txwp_len;
1500 u32 txaddr_info_len;
1501 dma_addr_t dma;
1502 int ret;
1503
1504 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1505 if (dma_mapping_error(dev: &pdev->dev, dma_addr: dma)) {
1506 rtw89_err(rtwdev, "failed to map skb dma data\n");
1507 ret = -EBUSY;
1508 goto err;
1509 }
1510
1511 tx_data->dma = dma;
1512
1513 txwp_len = sizeof(*txwp_info);
1514 txwd_len = chip->txwd_body_size;
1515 txwd_len += en_wd_info ? chip->txwd_info_size : 0;
1516
1517 txwp_info = txwd->vaddr + txwd_len;
1518 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
1519 txwp_info->seq1 = 0;
1520 txwp_info->seq2 = 0;
1521 txwp_info->seq3 = 0;
1522
1523 tx_ring->tx_cnt++;
1524 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
1525 txaddr_info_len =
1526 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, total_len: skb->len,
1527 dma, add_info_nr: &desc_info->addr_info_nr);
1528
1529 txwd->len = txwd_len + txwp_len + txaddr_info_len;
1530
1531 rtw89_chip_fill_txdesc(rtwdev, desc_info, txdesc: txwd->vaddr);
1532
1533 skb_queue_tail(list: &txwd->queue, newsk: skb);
1534
1535 return 0;
1536
1537err:
1538 return ret;
1539}
1540
1541static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
1542 struct rtw89_pci_tx_ring *tx_ring,
1543 struct rtw89_pci_tx_bd_32 *txbd,
1544 struct rtw89_core_tx_request *tx_req)
1545{
1546 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1547 const struct rtw89_chip_info *chip = rtwdev->chip;
1548 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1549 void *txdesc;
1550 int txdesc_size = chip->h2c_desc_size;
1551 struct pci_dev *pdev = rtwpci->pdev;
1552 struct sk_buff *skb = tx_req->skb;
1553 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1554 dma_addr_t dma;
1555 __le16 opt;
1556
1557 txdesc = skb_push(skb, len: txdesc_size);
1558 memset(txdesc, 0, txdesc_size);
1559 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
1560
1561 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1562 if (dma_mapping_error(dev: &pdev->dev, dma_addr: dma)) {
1563 rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
1564 return -EBUSY;
1565 }
1566
1567 tx_data->dma = dma;
1568 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
1569 opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI);
1570 txbd->opt = opt;
1571 txbd->length = cpu_to_le16(skb->len);
1572 txbd->dma = cpu_to_le32(tx_data->dma);
1573 skb_queue_tail(list: &rtwpci->h2c_queue, newsk: skb);
1574
1575 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, n_txbd: 1);
1576
1577 return 0;
1578}
1579
1580static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
1581 struct rtw89_pci_tx_ring *tx_ring,
1582 struct rtw89_pci_tx_bd_32 *txbd,
1583 struct rtw89_core_tx_request *tx_req)
1584{
1585 struct rtw89_pci_tx_wd *txwd;
1586 __le16 opt;
1587 int ret;
1588
1589 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
1590 * buffer with WD BODY only. So here we don't need to check the free
1591 * pages of the wd ring.
1592 */
1593 if (tx_ring->txch == RTW89_TXCH_CH12)
1594 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
1595
1596 txwd = rtw89_pci_dequeue_txwd(tx_ring);
1597 if (!txwd) {
1598 rtw89_err(rtwdev, "no available TXWD\n");
1599 ret = -ENOSPC;
1600 goto err;
1601 }
1602
1603 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
1604 if (ret) {
1605 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
1606 goto err_enqueue_wd;
1607 }
1608
1609 list_add_tail(new: &txwd->list, head: &tx_ring->busy_pages);
1610
1611 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
1612 opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI);
1613 txbd->opt = opt;
1614 txbd->length = cpu_to_le16(txwd->len);
1615 txbd->dma = cpu_to_le32(txwd->paddr);
1616
1617 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, n_txbd: 1);
1618
1619 return 0;
1620
1621err_enqueue_wd:
1622 rtw89_pci_enqueue_txwd(tx_ring, txwd);
1623err:
1624 return ret;
1625}
1626
1627static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
1628 u8 txch)
1629{
1630 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1631 struct rtw89_pci_tx_ring *tx_ring;
1632 struct rtw89_pci_tx_bd_32 *txbd;
1633 u32 n_avail_txbd;
1634 int ret = 0;
1635
1636 /* check the tx type and dma channel for fw cmd queue */
1637 if ((txch == RTW89_TXCH_CH12 ||
1638 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
1639 (txch != RTW89_TXCH_CH12 ||
1640 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
1641 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
1642 return -EINVAL;
1643 }
1644
1645 tx_ring = &rtwpci->tx.rings[txch];
1646 spin_lock_bh(lock: &rtwpci->trx_lock);
1647
1648 n_avail_txbd = rtw89_pci_get_avail_txbd_num(ring: tx_ring);
1649 if (n_avail_txbd == 0) {
1650 rtw89_err(rtwdev, "no available TXBD\n");
1651 ret = -ENOSPC;
1652 goto err_unlock;
1653 }
1654
1655 txbd = rtw89_pci_get_next_txbd(tx_ring);
1656 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
1657 if (ret) {
1658 rtw89_err(rtwdev, "failed to submit TXBD\n");
1659 goto err_unlock;
1660 }
1661
1662 spin_unlock_bh(lock: &rtwpci->trx_lock);
1663 return 0;
1664
1665err_unlock:
1666 spin_unlock_bh(lock: &rtwpci->trx_lock);
1667 return ret;
1668}
1669
1670static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
1671{
1672 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1673 int ret;
1674
1675 ret = rtw89_pci_tx_write(rtwdev, tx_req, txch: desc_info->ch_dma);
1676 if (ret) {
1677 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
1678 return ret;
1679 }
1680
1681 return 0;
1682}
1683
1684const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
1685 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
1686 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
1687 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1688 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1689 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
1690 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
1691 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
1692 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
1693 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1},
1694 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1},
1695 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
1696 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
1697 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
1698};
1699EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
1700
1701const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
1702 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
1703 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
1704 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1705 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1706 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1},
1707 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1},
1708 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
1709};
1710EXPORT_SYMBOL(rtw89_bd_ram_table_single);
1711
1712static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev)
1713{
1714 const struct rtw89_pci_info *info = rtwdev->pci_info;
1715 u32 addr = info->wp_sel_addr;
1716 u32 val;
1717 int i;
1718
1719 if (!info->wp_sel_addr)
1720 return;
1721
1722 for (i = 0; i < 16; i += 4) {
1723 val = u32_encode_bits(v: i + 0, MASKBYTE0) |
1724 u32_encode_bits(v: i + 1, MASKBYTE1) |
1725 u32_encode_bits(v: i + 2, MASKBYTE2) |
1726 u32_encode_bits(v: i + 3, MASKBYTE3);
1727 rtw89_write32(rtwdev, addr: addr + i, data: val);
1728 }
1729}
1730
1731static u16 rtw89_pci_enc_bd_cfg(struct rtw89_dev *rtwdev, u16 bd_num,
1732 u32 dma_offset)
1733{
1734 u16 dma_offset_sel;
1735 u16 num_sel;
1736
1737 /* B_BE_TX_NUM_SEL_MASK, B_BE_RX_NUM_SEL_MASK:
1738 * 0 -> 0
1739 * 1 -> 64 = 2^6
1740 * 2 -> 128 = 2^7
1741 * ...
1742 * 7 -> 4096 = 2^12
1743 */
1744 num_sel = ilog2(bd_num) - 5;
1745
1746 if (hweight16(bd_num) != 1)
1747 rtw89_warn(rtwdev, "bd_num %u is not power of 2\n", bd_num);
1748
1749 /* B_BE_TX_START_OFFSET_MASK, B_BE_RX_START_OFFSET_MASK:
1750 * 0 -> 0 = 0 * 2^9
1751 * 1 -> 512 = 1 * 2^9
1752 * 2 -> 1024 = 2 * 2^9
1753 * 3 -> 1536 = 3 * 2^9
1754 * ...
1755 * 255 -> 130560 = 255 * 2^9
1756 */
1757 dma_offset_sel = dma_offset >> 9;
1758
1759 if (dma_offset % 512)
1760 rtw89_warn(rtwdev, "offset %u is not multiple of 512\n", dma_offset);
1761
1762 return u16_encode_bits(v: num_sel, B_BE_TX_NUM_SEL_MASK) |
1763 u16_encode_bits(v: dma_offset_sel, B_BE_TX_START_OFFSET_MASK);
1764}
1765
1766static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
1767{
1768 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1769 const struct rtw89_pci_info *info = rtwdev->pci_info;
1770 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
1771 struct rtw89_pci_tx_ring *tx_ring;
1772 struct rtw89_pci_rx_ring *rx_ring;
1773 struct rtw89_pci_dma_ring *bd_ring;
1774 const struct rtw89_pci_bd_ram *bd_ram;
1775 dma_addr_t group_dma_base = 0;
1776 u16 num_or_offset;
1777 u32 addr_desa_l;
1778 u32 addr_bdram;
1779 u32 addr_num;
1780 u32 addr_idx;
1781 u32 val32;
1782 int i;
1783
1784 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1785 if (info->tx_dma_ch_mask & BIT(i))
1786 continue;
1787
1788 tx_ring = &rtwpci->tx.rings[i];
1789 bd_ring = &tx_ring->bd_ring;
1790 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
1791 addr_num = bd_ring->addr.num;
1792 addr_bdram = bd_ring->addr.bdram;
1793 addr_desa_l = bd_ring->addr.desa_l;
1794 bd_ring->wp = 0;
1795 bd_ring->rp = 0;
1796
1797 if (info->group_bd_addr) {
1798 if (addr_desa_l)
1799 group_dma_base = bd_ring->dma;
1800
1801 num_or_offset =
1802 rtw89_pci_enc_bd_cfg(rtwdev, bd_num: bd_ring->len,
1803 dma_offset: bd_ring->dma - group_dma_base);
1804 } else {
1805 num_or_offset = bd_ring->len;
1806 }
1807 rtw89_write16(rtwdev, addr: addr_num, data: num_or_offset);
1808
1809 if (addr_bdram && bd_ram) {
1810 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
1811 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
1812 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
1813
1814 rtw89_write32(rtwdev, addr: addr_bdram, data: val32);
1815 }
1816 if (addr_desa_l) {
1817 rtw89_write32(rtwdev, addr: addr_desa_l, data: bd_ring->dma);
1818 rtw89_write32(rtwdev, addr: addr_desa_l + 4, upper_32_bits(bd_ring->dma));
1819 }
1820 }
1821
1822 for (i = 0; i < RTW89_RXCH_NUM; i++) {
1823 rx_ring = &rtwpci->rx.rings[i];
1824 bd_ring = &rx_ring->bd_ring;
1825 addr_num = bd_ring->addr.num;
1826 addr_idx = bd_ring->addr.idx;
1827 addr_desa_l = bd_ring->addr.desa_l;
1828 if (info->rx_ring_eq_is_full)
1829 bd_ring->wp = bd_ring->len - 1;
1830 else
1831 bd_ring->wp = 0;
1832 bd_ring->rp = 0;
1833 rx_ring->diliver_skb = NULL;
1834 rx_ring->diliver_desc.ready = false;
1835 rx_ring->target_rx_tag = 0;
1836
1837 if (info->group_bd_addr) {
1838 if (addr_desa_l)
1839 group_dma_base = bd_ring->dma;
1840
1841 num_or_offset =
1842 rtw89_pci_enc_bd_cfg(rtwdev, bd_num: bd_ring->len,
1843 dma_offset: bd_ring->dma - group_dma_base);
1844 } else {
1845 num_or_offset = bd_ring->len;
1846 }
1847 rtw89_write16(rtwdev, addr: addr_num, data: num_or_offset);
1848
1849 if (addr_desa_l) {
1850 rtw89_write32(rtwdev, addr: addr_desa_l, data: bd_ring->dma);
1851 rtw89_write32(rtwdev, addr: addr_desa_l + 4, upper_32_bits(bd_ring->dma));
1852 }
1853
1854 if (info->rx_ring_eq_is_full)
1855 rtw89_write16(rtwdev, addr: addr_idx, data: bd_ring->wp);
1856 }
1857
1858 rtw89_pci_init_wp_16sel(rtwdev);
1859}
1860
1861static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
1862 struct rtw89_pci_tx_ring *tx_ring)
1863{
1864 rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
1865 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
1866}
1867
1868void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
1869{
1870 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1871 const struct rtw89_pci_info *info = rtwdev->pci_info;
1872 int txch;
1873
1874 rtw89_pci_reset_trx_rings(rtwdev);
1875
1876 spin_lock_bh(lock: &rtwpci->trx_lock);
1877 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1878 if (info->tx_dma_ch_mask & BIT(txch))
1879 continue;
1880 if (txch == RTW89_TXCH_CH12) {
1881 rtw89_pci_release_fwcmd(rtwdev, rtwpci,
1882 cnt: skb_queue_len(list_: &rtwpci->h2c_queue), release_all: true);
1883 continue;
1884 }
1885 rtw89_pci_release_tx_ring(rtwdev, tx_ring: &rtwpci->tx.rings[txch]);
1886 }
1887 spin_unlock_bh(lock: &rtwpci->trx_lock);
1888}
1889
1890static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
1891{
1892 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1893 unsigned long flags;
1894
1895 spin_lock_irqsave(&rtwpci->irq_lock, flags);
1896 rtwpci->running = true;
1897 rtw89_chip_enable_intr(rtwdev, rtwpci);
1898 spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags);
1899}
1900
1901static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
1902{
1903 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1904 unsigned long flags;
1905
1906 spin_lock_irqsave(&rtwpci->irq_lock, flags);
1907 rtwpci->running = false;
1908 rtw89_chip_disable_intr(rtwdev, rtwpci);
1909 spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags);
1910}
1911
1912static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
1913{
1914 rtw89_core_napi_start(rtwdev);
1915 rtw89_pci_enable_intr_lock(rtwdev);
1916
1917 return 0;
1918}
1919
1920static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
1921{
1922 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1923 struct pci_dev *pdev = rtwpci->pdev;
1924
1925 rtw89_pci_disable_intr_lock(rtwdev);
1926 synchronize_irq(irq: pdev->irq);
1927 rtw89_core_napi_stop(rtwdev);
1928}
1929
1930static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
1931{
1932 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1933 struct pci_dev *pdev = rtwpci->pdev;
1934
1935 if (pause) {
1936 rtw89_pci_disable_intr_lock(rtwdev);
1937 synchronize_irq(irq: pdev->irq);
1938 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
1939 napi_synchronize(n: &rtwdev->napi);
1940 } else {
1941 rtw89_pci_enable_intr_lock(rtwdev);
1942 rtw89_pci_tx_kick_off_pending(rtwdev);
1943 }
1944}
1945
1946static
1947void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
1948{
1949 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1950 const struct rtw89_pci_info *info = rtwdev->pci_info;
1951 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
1952 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
1953 struct rtw89_pci_tx_ring *tx_ring;
1954 struct rtw89_pci_rx_ring *rx_ring;
1955 int i;
1956
1957 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
1958 return;
1959
1960 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1961 tx_ring = &rtwpci->tx.rings[i];
1962 tx_ring->bd_ring.addr.idx = low_power ?
1963 bd_idx_addr->tx_bd_addrs[i] :
1964 dma_addr_set->tx[i].idx;
1965 }
1966
1967 for (i = 0; i < RTW89_RXCH_NUM; i++) {
1968 rx_ring = &rtwpci->rx.rings[i];
1969 rx_ring->bd_ring.addr.idx = low_power ?
1970 bd_idx_addr->rx_bd_addrs[i] :
1971 dma_addr_set->rx[i].idx;
1972 }
1973}
1974
1975static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
1976{
1977 enum rtw89_pci_intr_mask_cfg cfg;
1978
1979 WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
1980
1981 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
1982 rtw89_chip_config_intr_mask(rtwdev, cfg);
1983 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
1984}
1985
1986static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
1987
1988static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
1989{
1990 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1991 u32 val = readl(addr: rtwpci->mmap + addr);
1992 int count;
1993
1994 for (count = 0; ; count++) {
1995 if (val != RTW89_R32_DEAD)
1996 return val;
1997 if (count >= MAC_REG_POOL_COUNT) {
1998 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
1999 return RTW89_R32_DEAD;
2000 }
2001 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
2002 val = readl(addr: rtwpci->mmap + addr);
2003 }
2004
2005 return val;
2006}
2007
2008static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
2009{
2010 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2011 u32 addr32, val32, shift;
2012
2013 if (!ACCESS_CMAC(addr))
2014 return readb(addr: rtwpci->mmap + addr);
2015
2016 addr32 = addr & ~0x3;
2017 shift = (addr & 0x3) * 8;
2018 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr: addr32);
2019 return val32 >> shift;
2020}
2021
2022static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
2023{
2024 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2025 u32 addr32, val32, shift;
2026
2027 if (!ACCESS_CMAC(addr))
2028 return readw(addr: rtwpci->mmap + addr);
2029
2030 addr32 = addr & ~0x3;
2031 shift = (addr & 0x3) * 8;
2032 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr: addr32);
2033 return val32 >> shift;
2034}
2035
2036static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
2037{
2038 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2039
2040 if (!ACCESS_CMAC(addr))
2041 return readl(addr: rtwpci->mmap + addr);
2042
2043 return rtw89_pci_ops_read32_cmac(rtwdev, addr);
2044}
2045
2046static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
2047{
2048 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2049
2050 writeb(val: data, addr: rtwpci->mmap + addr);
2051}
2052
2053static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
2054{
2055 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2056
2057 writew(val: data, addr: rtwpci->mmap + addr);
2058}
2059
2060static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
2061{
2062 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2063
2064 writel(val: data, addr: rtwpci->mmap + addr);
2065}
2066
2067static u32 rtw89_pci_ops_read32_pci_cfg(struct rtw89_dev *rtwdev, u32 addr)
2068{
2069 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2070 struct pci_dev *pdev = rtwpci->pdev;
2071 u32 value;
2072 int ret;
2073
2074 ret = pci_read_config_dword(dev: pdev, where: addr, val: &value);
2075 if (ret)
2076 return RTW89_R32_EA;
2077
2078 return value;
2079}
2080
2081static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
2082{
2083 const struct rtw89_pci_info *info = rtwdev->pci_info;
2084
2085 if (enable)
2086 rtw89_write32_set(rtwdev, addr: info->init_cfg_reg,
2087 bit: info->rxhci_en_bit | info->txhci_en_bit);
2088 else
2089 rtw89_write32_clr(rtwdev, addr: info->init_cfg_reg,
2090 bit: info->rxhci_en_bit | info->txhci_en_bit);
2091}
2092
2093static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
2094{
2095 const struct rtw89_pci_info *info = rtwdev->pci_info;
2096 const struct rtw89_reg_def *reg = &info->dma_io_stop;
2097
2098 if (enable)
2099 rtw89_write32_clr(rtwdev, addr: reg->addr, bit: reg->mask);
2100 else
2101 rtw89_write32_set(rtwdev, addr: reg->addr, bit: reg->mask);
2102}
2103
2104void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
2105{
2106 rtw89_pci_ctrl_dma_io(rtwdev, enable);
2107 rtw89_pci_ctrl_dma_trx(rtwdev, enable);
2108}
2109
2110static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
2111{
2112 u16 val;
2113
2114 rtw89_write8(rtwdev, R_AX_MDIO_CFG, data: addr & 0x1F);
2115
2116 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
2117 switch (speed) {
2118 case PCIE_PHY_GEN1:
2119 if (addr < 0x20)
2120 val = u16_replace_bits(old: val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
2121 else
2122 val = u16_replace_bits(old: val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
2123 break;
2124 case PCIE_PHY_GEN2:
2125 if (addr < 0x20)
2126 val = u16_replace_bits(old: val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
2127 else
2128 val = u16_replace_bits(old: val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
2129 break;
2130 default:
2131 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
2132 return -EINVAL;
2133 }
2134 rtw89_write16(rtwdev, R_AX_MDIO_CFG, data: val);
2135 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, bit: rw_bit);
2136
2137 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
2138 false, rtwdev, R_AX_MDIO_CFG);
2139}
2140
2141static int
2142rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
2143{
2144 int ret;
2145
2146 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
2147 if (ret) {
2148 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
2149 return ret;
2150 }
2151 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
2152
2153 return 0;
2154}
2155
2156static int
2157rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
2158{
2159 int ret;
2160
2161 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
2162 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
2163 if (ret) {
2164 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
2165 return ret;
2166 }
2167
2168 return 0;
2169}
2170
2171static int
2172rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
2173{
2174 u32 shift;
2175 int ret;
2176 u16 val;
2177
2178 ret = rtw89_read16_mdio(rtwdev, addr, speed, val: &val);
2179 if (ret)
2180 return ret;
2181
2182 shift = __ffs(mask);
2183 val &= ~mask;
2184 val |= ((data << shift) & mask);
2185
2186 ret = rtw89_write16_mdio(rtwdev, addr, data: val, speed);
2187 if (ret)
2188 return ret;
2189
2190 return 0;
2191}
2192
2193static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
2194{
2195 int ret;
2196 u16 val;
2197
2198 ret = rtw89_read16_mdio(rtwdev, addr, speed, val: &val);
2199 if (ret)
2200 return ret;
2201 ret = rtw89_write16_mdio(rtwdev, addr, data: val | mask, speed);
2202 if (ret)
2203 return ret;
2204
2205 return 0;
2206}
2207
2208static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
2209{
2210 int ret;
2211 u16 val;
2212
2213 ret = rtw89_read16_mdio(rtwdev, addr, speed, val: &val);
2214 if (ret)
2215 return ret;
2216 ret = rtw89_write16_mdio(rtwdev, addr, data: val & ~mask, speed);
2217 if (ret)
2218 return ret;
2219
2220 return 0;
2221}
2222
2223static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
2224{
2225 u16 addr_2lsb = addr & B_AX_DBI_2LSB;
2226 u16 write_addr;
2227 u8 flag;
2228 int ret;
2229
2230 write_addr = addr & B_AX_DBI_ADDR_MSK;
2231 write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
2232 rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
2233 rtw89_write16(rtwdev, R_AX_DBI_FLAG, data: write_addr);
2234 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
2235
2236 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
2237 10 * RTW89_PCI_WR_RETRY_CNT, false,
2238 rtwdev, R_AX_DBI_FLAG + 2);
2239 if (ret)
2240 rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n",
2241 addr);
2242
2243 return ret;
2244}
2245
2246static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
2247{
2248 u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
2249 u8 flag;
2250 int ret;
2251
2252 rtw89_write16(rtwdev, R_AX_DBI_FLAG, data: read_addr);
2253 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
2254
2255 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
2256 10 * RTW89_PCI_WR_RETRY_CNT, false,
2257 rtwdev, R_AX_DBI_FLAG + 2);
2258 if (ret) {
2259 rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n",
2260 addr);
2261 return ret;
2262 }
2263
2264 read_addr = R_AX_DBI_RDATA + (addr & 3);
2265 *value = rtw89_read8(rtwdev, addr: read_addr);
2266
2267 return 0;
2268}
2269
2270static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2271 u8 data)
2272{
2273 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2274 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2275 struct pci_dev *pdev = rtwpci->pdev;
2276 int ret;
2277
2278 ret = pci_write_config_byte(dev: pdev, where: addr, val: data);
2279 if (!ret)
2280 return 0;
2281
2282 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
2283 ret = rtw89_dbi_write8(rtwdev, addr, data);
2284
2285 return ret;
2286}
2287
2288static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2289 u8 *value)
2290{
2291 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2292 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2293 struct pci_dev *pdev = rtwpci->pdev;
2294 int ret;
2295
2296 ret = pci_read_config_byte(dev: pdev, where: addr, val: value);
2297 if (!ret)
2298 return 0;
2299
2300 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
2301 ret = rtw89_dbi_read8(rtwdev, addr, value);
2302
2303 return ret;
2304}
2305
2306static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
2307 u8 bit)
2308{
2309 u8 value;
2310 int ret;
2311
2312 ret = rtw89_pci_read_config_byte(rtwdev, addr, value: &value);
2313 if (ret)
2314 return ret;
2315
2316 value |= bit;
2317 ret = rtw89_pci_write_config_byte(rtwdev, addr, data: value);
2318
2319 return ret;
2320}
2321
2322static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
2323 u8 bit)
2324{
2325 u8 value;
2326 int ret;
2327
2328 ret = rtw89_pci_read_config_byte(rtwdev, addr, value: &value);
2329 if (ret)
2330 return ret;
2331
2332 value &= ~bit;
2333 ret = rtw89_pci_write_config_byte(rtwdev, addr, data: value);
2334
2335 return ret;
2336}
2337
2338static int
2339__get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
2340{
2341 u16 val, tar;
2342 int ret;
2343
2344 /* Enable counter */
2345 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, speed: phy_rate, val: &val);
2346 if (ret)
2347 return ret;
2348 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, data: val & ~B_AX_CLK_CALIB_EN,
2349 speed: phy_rate);
2350 if (ret)
2351 return ret;
2352 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, data: val | B_AX_CLK_CALIB_EN,
2353 speed: phy_rate);
2354 if (ret)
2355 return ret;
2356
2357 fsleep(usecs: 300);
2358
2359 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, speed: phy_rate, val: &tar);
2360 if (ret)
2361 return ret;
2362 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, data: val & ~B_AX_CLK_CALIB_EN,
2363 speed: phy_rate);
2364 if (ret)
2365 return ret;
2366
2367 tar = tar & 0x0FFF;
2368 if (tar == 0 || tar == 0x0FFF) {
2369 rtw89_err(rtwdev, "[ERR]Get target failed.\n");
2370 return -EINVAL;
2371 }
2372
2373 *target = tar;
2374
2375 return 0;
2376}
2377
2378static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
2379{
2380 int ret;
2381
2382 if (!rtw89_is_rtl885xb(rtwdev))
2383 return 0;
2384
2385 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
2386 PCIE_AUTOK_4, speed: PCIE_PHY_GEN1);
2387 return ret;
2388}
2389
2390static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
2391{
2392 enum rtw89_pcie_phy phy_rate;
2393 u16 val16, mgn_set, div_set, tar;
2394 u8 val8, bdr_ori;
2395 bool l1_flag = false;
2396 int ret = 0;
2397
2398 if (!rtw89_is_rtl885xb(rtwdev))
2399 return 0;
2400
2401 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, value: &val8);
2402 if (ret) {
2403 rtw89_err(rtwdev, "[ERR]pci config read %X\n",
2404 RTW89_PCIE_PHY_RATE);
2405 return ret;
2406 }
2407
2408 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
2409 phy_rate = PCIE_PHY_GEN1;
2410 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
2411 phy_rate = PCIE_PHY_GEN2;
2412 } else {
2413 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
2414 return -EOPNOTSUPP;
2415 }
2416 /* Disable L1BD */
2417 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, value: &bdr_ori);
2418 if (ret) {
2419 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
2420 return ret;
2421 }
2422
2423 if (bdr_ori & RTW89_PCIE_BIT_L1) {
2424 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2425 data: bdr_ori & ~RTW89_PCIE_BIT_L1);
2426 if (ret) {
2427 rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2428 RTW89_PCIE_L1_CTRL);
2429 return ret;
2430 }
2431 l1_flag = true;
2432 }
2433
2434 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, speed: phy_rate, val: &val16);
2435 if (ret) {
2436 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2437 goto end;
2438 }
2439
2440 if (val16 & B_AX_CALIB_EN) {
2441 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
2442 data: val16 & ~B_AX_CALIB_EN, speed: phy_rate);
2443 if (ret) {
2444 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2445 goto end;
2446 }
2447 }
2448
2449 if (!autook_en)
2450 goto end;
2451 /* Set div */
2452 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, speed: phy_rate);
2453 if (ret) {
2454 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2455 goto end;
2456 }
2457
2458 /* Obtain div and margin */
2459 ret = __get_target(rtwdev, target: &tar, phy_rate);
2460 if (ret) {
2461 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
2462 goto end;
2463 }
2464
2465 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
2466
2467 if (mgn_set >= 128) {
2468 div_set = 0x0003;
2469 mgn_set = 0x000F;
2470 } else if (mgn_set >= 64) {
2471 div_set = 0x0003;
2472 mgn_set >>= 3;
2473 } else if (mgn_set >= 32) {
2474 div_set = 0x0002;
2475 mgn_set >>= 2;
2476 } else if (mgn_set >= 16) {
2477 div_set = 0x0001;
2478 mgn_set >>= 1;
2479 } else if (mgn_set == 0) {
2480 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
2481 goto end;
2482 } else {
2483 div_set = 0x0000;
2484 }
2485
2486 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, speed: phy_rate, val: &val16);
2487 if (ret) {
2488 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2489 goto end;
2490 }
2491
2492 val16 |= u16_encode_bits(v: div_set, B_AX_DIV);
2493
2494 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, data: val16, speed: phy_rate);
2495 if (ret) {
2496 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2497 goto end;
2498 }
2499
2500 ret = __get_target(rtwdev, target: &tar, phy_rate);
2501 if (ret) {
2502 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
2503 goto end;
2504 }
2505
2506 rtw89_debug(rtwdev, mask: RTW89_DBG_HCI, fmt: "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
2507 tar, div_set, mgn_set);
2508 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
2509 data: (tar & 0x0FFF) | (mgn_set << 12), speed: phy_rate);
2510 if (ret) {
2511 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
2512 goto end;
2513 }
2514
2515 /* Enable function */
2516 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, speed: phy_rate);
2517 if (ret) {
2518 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2519 goto end;
2520 }
2521
2522 /* CLK delay = 0 */
2523 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
2524 data: PCIE_CLKDLY_HW_0);
2525
2526end:
2527 /* Set L1BD to ori */
2528 if (l1_flag) {
2529 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2530 data: bdr_ori);
2531 if (ret) {
2532 rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2533 RTW89_PCIE_L1_CTRL);
2534 return ret;
2535 }
2536 }
2537
2538 return ret;
2539}
2540
2541static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
2542{
2543 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2544 int ret;
2545
2546 if (chip_id == RTL8852A) {
2547 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2548 speed: PCIE_PHY_GEN1);
2549 if (ret)
2550 return ret;
2551 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2552 speed: PCIE_PHY_GEN2);
2553 if (ret)
2554 return ret;
2555 } else if (chip_id == RTL8852C) {
2556 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
2557 B_AX_DEGLITCH);
2558 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
2559 B_AX_DEGLITCH);
2560 }
2561
2562 return 0;
2563}
2564
2565static void rtw89_pci_disable_eq_ax(struct rtw89_dev *rtwdev)
2566{
2567 u16 g1_oobs, g2_oobs;
2568 u32 backup_aspm;
2569 u32 phy_offset;
2570 u16 offset_cal;
2571 u16 oobs_val;
2572 int ret;
2573 u8 gen;
2574
2575 if (rtwdev->chip->chip_id != RTL8852C)
2576 return;
2577
2578 g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
2579 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2580 g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 +
2581 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2582 if (g1_oobs && g2_oobs)
2583 return;
2584
2585 backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1);
2586 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
2587
2588 ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, phy_offset: &phy_offset);
2589 if (ret)
2590 goto out;
2591
2592 rtw89_write16_set(rtwdev, addr: phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN);
2593 rtw89_write16(rtwdev, addr: phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL);
2594 rtw89_write16_set(rtwdev, addr: phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL);
2595
2596 oobs_val = rtw89_read16_mask(rtwdev, addr: phy_offset + RAC_ANA1F * RAC_MULT,
2597 OOBS_LEVEL_MASK);
2598
2599 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT,
2600 OOBS_SEN_MASK, data: oobs_val);
2601 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT,
2602 BAC_OOBS_SEL);
2603
2604 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT,
2605 OOBS_SEN_MASK, data: oobs_val);
2606 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT,
2607 BAC_OOBS_SEL);
2608
2609 /* offset K */
2610 for (gen = 1; gen <= 2; gen++) {
2611 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 :
2612 R_RAC_DIRECT_OFFSET_G2;
2613
2614 rtw89_write16_clr(rtwdev, addr: phy_offset + RAC_ANA19 * RAC_MULT,
2615 B_PCIE_BIT_RD_SEL);
2616 }
2617
2618 offset_cal = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
2619 RAC_ANA1F * RAC_MULT, OFFSET_CAL_MASK);
2620
2621 for (gen = 1; gen <= 2; gen++) {
2622 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 :
2623 R_RAC_DIRECT_OFFSET_G2;
2624
2625 rtw89_write16_mask(rtwdev, addr: phy_offset + RAC_ANA0B * RAC_MULT,
2626 MANUAL_LVL_MASK, data: offset_cal);
2627 rtw89_write16_clr(rtwdev, addr: phy_offset + RAC_ANA0D * RAC_MULT,
2628 OFFSET_CAL_MODE);
2629 }
2630
2631out:
2632 rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, data: backup_aspm);
2633}
2634
2635static void rtw89_pci_ber(struct rtw89_dev *rtwdev)
2636{
2637 u32 phy_offset;
2638
2639 if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks))
2640 return;
2641
2642 phy_offset = R_RAC_DIRECT_OFFSET_G1;
2643 rtw89_write16(rtwdev, addr: phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL);
2644 rtw89_write16(rtwdev, addr: phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2645
2646 phy_offset = R_RAC_DIRECT_OFFSET_G2;
2647 rtw89_write16(rtwdev, addr: phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL);
2648 rtw89_write16(rtwdev, addr: phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2649}
2650
2651static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
2652{
2653 if (rtwdev->chip->chip_id != RTL8852A)
2654 return;
2655
2656 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
2657}
2658
2659static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
2660{
2661 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2662
2663 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
2664 return;
2665
2666 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
2667}
2668
2669static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
2670{
2671 int ret;
2672
2673 if (rtwdev->chip->chip_id != RTL8852A)
2674 return 0;
2675
2676 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2677 speed: PCIE_PHY_GEN1);
2678 if (ret)
2679 return ret;
2680
2681 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2682 speed: PCIE_PHY_GEN2);
2683 if (ret)
2684 return ret;
2685
2686 return 0;
2687}
2688
2689static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
2690{
2691 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2692
2693 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
2694 return;
2695
2696 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
2697}
2698
2699static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
2700{
2701 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2702
2703 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2704 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
2705 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2706 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2707 B_AX_PCIE_DIS_WLSUS_AFT_PDN);
2708 } else if (rtwdev->chip->chip_id == RTL8852C) {
2709 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2710 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2711 }
2712}
2713
2714static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
2715{
2716 if (!rtw89_is_rtl885xb(rtwdev))
2717 return 0;
2718
2719 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
2720 PCIE_DPHY_DLY_25US, speed: PCIE_PHY_GEN1);
2721}
2722
2723static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up)
2724{
2725 if (pwr_up)
2726 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2727 else
2728 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2729}
2730
2731static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
2732{
2733 if (rtwdev->chip->chip_id != RTL8852C)
2734 return;
2735
2736 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2737 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2738}
2739
2740static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
2741{
2742 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2743 return;
2744
2745 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
2746}
2747
2748static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
2749{
2750 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2751 return;
2752
2753 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
2754 B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2755 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
2756 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
2757 B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2758}
2759
2760static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
2761{
2762 if (rtwdev->chip->chip_id != RTL8852C)
2763 return;
2764
2765 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
2766}
2767
2768static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
2769{
2770 if (rtwdev->chip->chip_id != RTL8852C)
2771 return;
2772
2773 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
2774}
2775
2776static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
2777{
2778 if (rtwdev->chip->chip_id == RTL8852C)
2779 return;
2780
2781 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
2782 B_AX_SIC_EN_FORCE_CLKREQ);
2783}
2784
2785static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
2786{
2787 const struct rtw89_pci_info *info = rtwdev->pci_info;
2788 u32 lbc;
2789
2790 if (rtwdev->chip->chip_id == RTL8852C)
2791 return;
2792
2793 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
2794 if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
2795 lbc = u32_replace_bits(old: lbc, val: info->lbc_tmr, B_AX_LBC_TIMER);
2796 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
2797 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, data: lbc);
2798 } else {
2799 lbc &= ~B_AX_LBC_EN;
2800 }
2801 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, bit: lbc);
2802}
2803
2804static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
2805{
2806 const struct rtw89_pci_info *info = rtwdev->pci_info;
2807 u32 val32;
2808
2809 if (rtwdev->chip->chip_id != RTL8852C)
2810 return;
2811
2812 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
2813 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
2814 info->io_rcy_tmr);
2815 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, data: val32);
2816 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, data: val32);
2817 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, data: val32);
2818
2819 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2820 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2821 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2822 } else {
2823 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2824 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2825 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2826 }
2827
2828 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
2829}
2830
2831static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
2832{
2833 if (rtwdev->chip->chip_id == RTL8852C)
2834 return;
2835
2836 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
2837 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
2838
2839 rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL,
2840 B_AX_EN_STUCK_DBG | B_AX_ASFF_FULL_NO_STK,
2841 B_AX_EN_STUCK_DBG);
2842
2843 if (rtwdev->chip->chip_id == RTL8852A)
2844 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
2845 B_AX_EN_CHKDSC_NO_RX_STUCK);
2846}
2847
2848static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
2849{
2850 if (rtwdev->chip->chip_id == RTL8852C)
2851 return;
2852
2853 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
2854 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
2855}
2856
2857static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev)
2858{
2859 const struct rtw89_pci_info *info = rtwdev->pci_info;
2860 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2861 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
2862 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
2863 B_AX_CLR_CH12_IDX;
2864 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
2865 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
2866
2867 if (chip_id == RTL8852A || chip_id == RTL8852C)
2868 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
2869 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
2870 /* clear DMA indexes */
2871 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, bit: val);
2872 if (chip_id == RTL8852A || chip_id == RTL8852C)
2873 rtw89_write32_set(rtwdev, addr: txbd_rwptr_clr2,
2874 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
2875 rtw89_write32_set(rtwdev, addr: rxbd_rwptr_clr,
2876 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
2877}
2878
2879static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2880{
2881 const struct rtw89_pci_info *info = rtwdev->pci_info;
2882 u32 dma_busy1 = info->dma_busy1.addr;
2883 u32 dma_busy2 = info->dma_busy2_reg;
2884 u32 check, dma_busy;
2885 int ret;
2886
2887 check = info->dma_busy1.mask;
2888
2889 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2890 10, 100, false, rtwdev, dma_busy1);
2891 if (ret)
2892 return ret;
2893
2894 if (!dma_busy2)
2895 return 0;
2896
2897 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
2898
2899 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2900 10, 100, false, rtwdev, dma_busy2);
2901 if (ret)
2902 return ret;
2903
2904 return 0;
2905}
2906
2907static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2908{
2909 const struct rtw89_pci_info *info = rtwdev->pci_info;
2910 u32 dma_busy3 = info->dma_busy3_reg;
2911 u32 check, dma_busy;
2912 int ret;
2913
2914 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
2915
2916 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2917 10, 100, false, rtwdev, dma_busy3);
2918 if (ret)
2919 return ret;
2920
2921 return 0;
2922}
2923
2924static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
2925{
2926 int ret;
2927
2928 ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
2929 if (ret) {
2930 rtw89_err(rtwdev, "txdma ch busy\n");
2931 return ret;
2932 }
2933
2934 ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev);
2935 if (ret) {
2936 rtw89_err(rtwdev, "rxdma ch busy\n");
2937 return ret;
2938 }
2939
2940 return 0;
2941}
2942
2943static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
2944{
2945 const struct rtw89_pci_info *info = rtwdev->pci_info;
2946 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
2947 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
2948 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
2949 enum mac_ax_tag_mode tag_mode = info->tag_mode;
2950 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
2951 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
2952 enum mac_ax_tx_burst tx_burst = info->tx_burst;
2953 enum mac_ax_rx_burst rx_burst = info->rx_burst;
2954 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2955 u8 cv = rtwdev->hal.cv;
2956 u32 val32;
2957
2958 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2959 if (chip_id == RTL8852A && cv == CHIP_CBV)
2960 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2961 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2962 if (chip_id == RTL8852A || chip_id == RTL8852B)
2963 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2964 }
2965
2966 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
2967 if (chip_id == RTL8852A && cv == CHIP_CBV)
2968 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2969 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
2970 if (chip_id == RTL8852A || chip_id == RTL8852B)
2971 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2972 }
2973
2974 if (rxbd_mode == MAC_AX_RXBD_PKT) {
2975 rtw89_write32_clr(rtwdev, addr: info->init_cfg_reg, bit: info->rxbd_mode_bit);
2976 } else if (rxbd_mode == MAC_AX_RXBD_SEP) {
2977 rtw89_write32_set(rtwdev, addr: info->init_cfg_reg, bit: info->rxbd_mode_bit);
2978
2979 if (chip_id == RTL8852A || chip_id == RTL8852B)
2980 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
2981 B_AX_PCIE_RX_APPLEN_MASK, data: 0);
2982 }
2983
2984 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2985 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, data: tx_burst);
2986 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, data: rx_burst);
2987 } else if (chip_id == RTL8852C) {
2988 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, data: tx_burst);
2989 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, data: rx_burst);
2990 }
2991
2992 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2993 if (tag_mode == MAC_AX_TAG_SGL) {
2994 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
2995 ~B_AX_LATENCY_CONTROL;
2996 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, data: val32);
2997 } else if (tag_mode == MAC_AX_TAG_MULTI) {
2998 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
2999 B_AX_LATENCY_CONTROL;
3000 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, data: val32);
3001 }
3002 }
3003
3004 rtw89_write32_mask(rtwdev, addr: info->exp_ctrl_reg, mask: info->max_tag_num_mask,
3005 data: info->multi_tag_num);
3006
3007 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3008 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
3009 data: wd_dma_idle_intvl);
3010 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
3011 data: wd_dma_act_intvl);
3012 } else if (chip_id == RTL8852C) {
3013 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
3014 data: wd_dma_idle_intvl);
3015 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
3016 data: wd_dma_act_intvl);
3017 }
3018
3019 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
3020 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
3021 B_AX_HOST_ADDR_INFO_8B_SEL);
3022 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
3023 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
3024 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
3025 B_AX_HOST_ADDR_INFO_8B_SEL);
3026 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
3027 }
3028
3029 return 0;
3030}
3031
3032static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
3033{
3034 const struct rtw89_pci_info *info = rtwdev->pci_info;
3035
3036 rtw89_pci_power_wake(rtwdev, pwr_up: false);
3037
3038 if (rtwdev->chip->chip_id == RTL8852A) {
3039 /* ltr sw trigger */
3040 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
3041 }
3042 info->ltr_set(rtwdev, false);
3043 rtw89_pci_ctrl_dma_all(rtwdev, enable: false);
3044 rtw89_pci_clr_idx_all(rtwdev);
3045
3046 return 0;
3047}
3048
3049static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
3050{
3051 const struct rtw89_pci_info *info = rtwdev->pci_info;
3052 int ret;
3053
3054 rtw89_pci_ber(rtwdev);
3055 rtw89_pci_rxdma_prefth(rtwdev);
3056 rtw89_pci_l1off_pwroff(rtwdev);
3057 rtw89_pci_deglitch_setting(rtwdev);
3058 ret = rtw89_pci_l2_rxen_lat(rtwdev);
3059 if (ret) {
3060 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
3061 return ret;
3062 }
3063
3064 rtw89_pci_aphy_pwrcut(rtwdev);
3065 rtw89_pci_hci_ldo(rtwdev);
3066 rtw89_pci_dphy_delay(rtwdev);
3067
3068 ret = rtw89_pci_autok_x(rtwdev);
3069 if (ret) {
3070 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
3071 return ret;
3072 }
3073
3074 ret = rtw89_pci_auto_refclk_cal(rtwdev, autook_en: false);
3075 if (ret) {
3076 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
3077 return ret;
3078 }
3079
3080 rtw89_pci_power_wake_ax(rtwdev, pwr_up: true);
3081 rtw89_pci_autoload_hang(rtwdev);
3082 rtw89_pci_l12_vmain(rtwdev);
3083 rtw89_pci_gen2_force_ib(rtwdev);
3084 rtw89_pci_l1_ent_lat(rtwdev);
3085 rtw89_pci_wd_exit_l1(rtwdev);
3086 rtw89_pci_set_sic(rtwdev);
3087 rtw89_pci_set_lbc(rtwdev);
3088 rtw89_pci_set_io_rcy(rtwdev);
3089 rtw89_pci_set_dbg(rtwdev);
3090 rtw89_pci_set_keep_reg(rtwdev);
3091
3092 rtw89_write32_set(rtwdev, addr: info->dma_stop1.addr, B_AX_STOP_WPDMA);
3093
3094 /* stop DMA activities */
3095 rtw89_pci_ctrl_dma_all(rtwdev, enable: false);
3096
3097 ret = rtw89_pci_poll_dma_all_idle(rtwdev);
3098 if (ret) {
3099 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
3100 return ret;
3101 }
3102
3103 rtw89_pci_clr_idx_all(rtwdev);
3104 rtw89_pci_mode_op(rtwdev);
3105
3106 /* fill TRX BD indexes */
3107 rtw89_pci_ops_reset(rtwdev);
3108
3109 ret = rtw89_pci_rst_bdram_ax(rtwdev);
3110 if (ret) {
3111 rtw89_warn(rtwdev, "reset bdram busy\n");
3112 return ret;
3113 }
3114
3115 /* disable all channels except to FW CMD channel to download firmware */
3116 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, enable: false);
3117 rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, enable: true);
3118
3119 /* start DMA activities */
3120 rtw89_pci_ctrl_dma_all(rtwdev, enable: true);
3121
3122 return 0;
3123}
3124
3125static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev)
3126{
3127 rtw89_pci_power_wake_ax(rtwdev, pwr_up: false);
3128
3129 return 0;
3130}
3131
3132int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
3133{
3134 u32 val;
3135
3136 if (!en)
3137 return 0;
3138
3139 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
3140 if (rtw89_pci_ltr_is_err_reg_val(val))
3141 return -EINVAL;
3142 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
3143 if (rtw89_pci_ltr_is_err_reg_val(val))
3144 return -EINVAL;
3145 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
3146 if (rtw89_pci_ltr_is_err_reg_val(val))
3147 return -EINVAL;
3148 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
3149 if (rtw89_pci_ltr_is_err_reg_val(val))
3150 return -EINVAL;
3151
3152 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
3153 B_AX_LTR_WD_NOEMP_CHK);
3154 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
3155 PCI_LTR_SPC_500US);
3156 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
3157 PCI_LTR_IDLE_TIMER_3_2MS);
3158 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, data: 0x28);
3159 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, data: 0x28);
3160 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, data: 0x90039003);
3161 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, data: 0x880b880b);
3162
3163 return 0;
3164}
3165EXPORT_SYMBOL(rtw89_pci_ltr_set);
3166
3167int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
3168{
3169 u32 dec_ctrl;
3170 u32 val32;
3171
3172 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
3173 if (rtw89_pci_ltr_is_err_reg_val(val: val32))
3174 return -EINVAL;
3175 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
3176 if (rtw89_pci_ltr_is_err_reg_val(val: val32))
3177 return -EINVAL;
3178 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
3179 if (rtw89_pci_ltr_is_err_reg_val(val: dec_ctrl))
3180 return -EINVAL;
3181 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
3182 if (rtw89_pci_ltr_is_err_reg_val(val: val32))
3183 return -EINVAL;
3184 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
3185 if (rtw89_pci_ltr_is_err_reg_val(val: val32))
3186 return -EINVAL;
3187
3188 if (!en) {
3189 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
3190 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
3191 B_AX_LTR_REQ_DRV;
3192 } else {
3193 dec_ctrl |= B_AX_LTR_HW_DEC_EN;
3194 }
3195
3196 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
3197 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
3198
3199 if (en)
3200 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
3201 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
3202 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
3203 PCI_LTR_IDLE_TIMER_3_2MS);
3204 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, data: 0x28);
3205 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, data: 0x28);
3206 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, data: dec_ctrl);
3207 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, data: 0x90039003);
3208 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, data: 0x880b880b);
3209
3210 return 0;
3211}
3212EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
3213
3214static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
3215{
3216 const struct rtw89_pci_info *info = rtwdev->pci_info;
3217 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3218 int ret;
3219
3220 ret = info->ltr_set(rtwdev, true);
3221 if (ret) {
3222 rtw89_err(rtwdev, "pci ltr set fail\n");
3223 return ret;
3224 }
3225 if (chip_id == RTL8852A) {
3226 /* ltr sw trigger */
3227 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
3228 }
3229 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3230 /* ADDR info 8-byte mode */
3231 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
3232 B_AX_HOST_ADDR_INFO_8B_SEL);
3233 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
3234 }
3235
3236 /* enable DMA for all queues */
3237 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, enable: true);
3238
3239 /* Release PCI IO */
3240 rtw89_write32_clr(rtwdev, addr: info->dma_stop1.addr,
3241 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
3242
3243 return 0;
3244}
3245
3246static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
3247 struct pci_dev *pdev)
3248{
3249 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3250 int ret;
3251
3252 ret = pci_enable_device(dev: pdev);
3253 if (ret) {
3254 rtw89_err(rtwdev, "failed to enable pci device\n");
3255 return ret;
3256 }
3257
3258 pci_set_master(dev: pdev);
3259 pci_set_drvdata(pdev, data: rtwdev->hw);
3260
3261 rtwpci->pdev = pdev;
3262
3263 return 0;
3264}
3265
3266static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
3267 struct pci_dev *pdev)
3268{
3269 pci_disable_device(dev: pdev);
3270}
3271
3272static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev)
3273{
3274 const struct rtw89_chip_info *chip = rtwdev->chip;
3275
3276 switch (chip->chip_id) {
3277 case RTL8852A:
3278 case RTL8852B:
3279 case RTL8851B:
3280 case RTL8852BT:
3281 return true;
3282 default:
3283 return false;
3284 }
3285}
3286
3287static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev)
3288{
3289 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3290 struct pci_dev *bridge = pci_upstream_bridge(dev: rtwpci->pdev);
3291
3292 if (!rtw89_pci_chip_is_manual_dac(rtwdev))
3293 return true;
3294
3295 if (!bridge)
3296 return false;
3297
3298 switch (bridge->vendor) {
3299 case PCI_VENDOR_ID_INTEL:
3300 return true;
3301 case PCI_VENDOR_ID_ASMEDIA:
3302 if (bridge->device == 0x2806)
3303 return true;
3304 break;
3305 }
3306
3307 return false;
3308}
3309
3310static int rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev, bool force)
3311{
3312 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3313 struct pci_dev *pdev = rtwpci->pdev;
3314 int ret;
3315 u8 val;
3316
3317 if (!rtwpci->enable_dac && !force)
3318 return 0;
3319
3320 if (!rtw89_pci_chip_is_manual_dac(rtwdev))
3321 return 0;
3322
3323 /* Configure DAC only via PCI config API, not DBI interfaces */
3324 ret = pci_read_config_byte(dev: pdev, RTW89_PCIE_L1_CTRL, val: &val);
3325 if (ret)
3326 return ret;
3327
3328 val |= RTW89_PCIE_BIT_EN_64BITS;
3329 return pci_write_config_byte(dev: pdev, RTW89_PCIE_L1_CTRL, val);
3330}
3331
3332static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
3333 struct pci_dev *pdev)
3334{
3335 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3336 unsigned long resource_len;
3337 u8 bar_id = 2;
3338 int ret;
3339
3340 ret = pci_request_regions(pdev, KBUILD_MODNAME);
3341 if (ret) {
3342 rtw89_err(rtwdev, "failed to request pci regions\n");
3343 goto err;
3344 }
3345
3346 if (!rtw89_pci_is_dac_compatible_bridge(rtwdev))
3347 goto try_dac_done;
3348
3349 ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(36));
3350 if (!ret) {
3351 ret = rtw89_pci_cfg_dac(rtwdev, force: true);
3352 if (!ret) {
3353 rtwpci->enable_dac = true;
3354 goto try_dac_done;
3355 }
3356
3357 ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32));
3358 if (ret) {
3359 rtw89_err(rtwdev,
3360 "failed to set dma and consistent mask to 32/36-bit\n");
3361 goto err_release_regions;
3362 }
3363 }
3364try_dac_done:
3365
3366 resource_len = pci_resource_len(pdev, bar_id);
3367 rtwpci->mmap = pci_iomap(dev: pdev, bar: bar_id, max: resource_len);
3368 if (!rtwpci->mmap) {
3369 rtw89_err(rtwdev, "failed to map pci io\n");
3370 ret = -EIO;
3371 goto err_release_regions;
3372 }
3373
3374 return 0;
3375
3376err_release_regions:
3377 pci_release_regions(pdev);
3378err:
3379 return ret;
3380}
3381
3382static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
3383 struct pci_dev *pdev)
3384{
3385 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3386
3387 if (rtwpci->mmap) {
3388 pci_iounmap(dev: pdev, rtwpci->mmap);
3389 pci_release_regions(pdev);
3390 }
3391}
3392
3393static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
3394 struct pci_dev *pdev,
3395 struct rtw89_pci_tx_ring *tx_ring)
3396{
3397 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3398 u8 *head = wd_ring->head;
3399 dma_addr_t dma = wd_ring->dma;
3400 u32 page_size = wd_ring->page_size;
3401 u32 page_num = wd_ring->page_num;
3402 u32 ring_sz = page_size * page_num;
3403
3404 dma_free_coherent(dev: &pdev->dev, size: ring_sz, cpu_addr: head, dma_handle: dma);
3405 wd_ring->head = NULL;
3406}
3407
3408static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
3409 struct pci_dev *pdev,
3410 struct rtw89_pci_tx_ring *tx_ring)
3411{
3412 tx_ring->bd_ring.head = NULL;
3413}
3414
3415static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
3416 struct pci_dev *pdev)
3417{
3418 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3419 struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
3420 const struct rtw89_pci_info *info = rtwdev->pci_info;
3421 struct rtw89_pci_tx_ring *tx_ring;
3422 int i;
3423
3424 for (i = 0; i < RTW89_TXCH_NUM; i++) {
3425 if (info->tx_dma_ch_mask & BIT(i))
3426 continue;
3427 tx_ring = &rtwpci->tx.rings[i];
3428 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3429 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3430 }
3431
3432 dma_free_coherent(dev: &pdev->dev, size: bd_pool->size, cpu_addr: bd_pool->head, dma_handle: bd_pool->dma);
3433}
3434
3435static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
3436 struct pci_dev *pdev,
3437 struct rtw89_pci_rx_ring *rx_ring)
3438{
3439 struct rtw89_pci_rx_info *rx_info;
3440 struct sk_buff *skb;
3441 dma_addr_t dma;
3442 u32 buf_sz;
3443 int i;
3444
3445 buf_sz = rx_ring->buf_sz;
3446 for (i = 0; i < rx_ring->bd_ring.len; i++) {
3447 skb = rx_ring->buf[i];
3448 if (!skb)
3449 continue;
3450
3451 rx_info = RTW89_PCI_RX_SKB_CB(skb);
3452 dma = rx_info->dma;
3453 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3454 dev_kfree_skb(skb);
3455 rx_ring->buf[i] = NULL;
3456 }
3457
3458 rx_ring->bd_ring.head = NULL;
3459}
3460
3461static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
3462 struct pci_dev *pdev)
3463{
3464 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3465 struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
3466 struct rtw89_pci_rx_ring *rx_ring;
3467 int i;
3468
3469 for (i = 0; i < RTW89_RXCH_NUM; i++) {
3470 rx_ring = &rtwpci->rx.rings[i];
3471 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3472 }
3473
3474 dma_free_coherent(dev: &pdev->dev, size: bd_pool->size, cpu_addr: bd_pool->head, dma_handle: bd_pool->dma);
3475}
3476
3477static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
3478 struct pci_dev *pdev)
3479{
3480 rtw89_pci_free_rx_rings(rtwdev, pdev);
3481 rtw89_pci_free_tx_rings(rtwdev, pdev);
3482}
3483
3484static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
3485 struct rtw89_pci_rx_ring *rx_ring,
3486 struct sk_buff *skb, int buf_sz, u32 idx)
3487{
3488 struct rtw89_pci_rx_info *rx_info;
3489 struct rtw89_pci_rx_bd_32 *rx_bd;
3490 dma_addr_t dma;
3491
3492 if (!skb)
3493 return -EINVAL;
3494
3495 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
3496 if (dma_mapping_error(dev: &pdev->dev, dma_addr: dma))
3497 return -EBUSY;
3498
3499 rx_info = RTW89_PCI_RX_SKB_CB(skb);
3500 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
3501
3502 memset(rx_bd, 0, sizeof(*rx_bd));
3503 rx_bd->buf_size = cpu_to_le16(buf_sz);
3504 rx_bd->dma = cpu_to_le32(dma);
3505 rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI);
3506 rx_info->dma = dma;
3507
3508 return 0;
3509}
3510
3511static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
3512 struct pci_dev *pdev,
3513 struct rtw89_pci_tx_ring *tx_ring,
3514 enum rtw89_tx_channel txch)
3515{
3516 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3517 struct rtw89_pci_tx_wd *txwd;
3518 dma_addr_t dma;
3519 dma_addr_t cur_paddr;
3520 u8 *head;
3521 u8 *cur_vaddr;
3522 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
3523 u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
3524 u32 ring_sz = page_size * page_num;
3525 u32 page_offset;
3526 int i;
3527
3528 /* FWCMD queue doesn't use txwd as pages */
3529 if (txch == RTW89_TXCH_CH12)
3530 return 0;
3531
3532 head = dma_alloc_coherent(dev: &pdev->dev, size: ring_sz, dma_handle: &dma, GFP_KERNEL);
3533 if (!head)
3534 return -ENOMEM;
3535
3536 INIT_LIST_HEAD(list: &wd_ring->free_pages);
3537 wd_ring->head = head;
3538 wd_ring->dma = dma;
3539 wd_ring->page_size = page_size;
3540 wd_ring->page_num = page_num;
3541
3542 page_offset = 0;
3543 for (i = 0; i < page_num; i++) {
3544 txwd = &wd_ring->pages[i];
3545 cur_paddr = dma + page_offset;
3546 cur_vaddr = head + page_offset;
3547
3548 skb_queue_head_init(list: &txwd->queue);
3549 INIT_LIST_HEAD(list: &txwd->list);
3550 txwd->paddr = cur_paddr;
3551 txwd->vaddr = cur_vaddr;
3552 txwd->len = page_size;
3553 txwd->seq = i;
3554 rtw89_pci_enqueue_txwd(tx_ring, txwd);
3555
3556 page_offset += page_size;
3557 }
3558
3559 return 0;
3560}
3561
3562static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
3563 struct pci_dev *pdev,
3564 struct rtw89_pci_tx_ring *tx_ring,
3565 u32 desc_size, u32 len,
3566 enum rtw89_tx_channel txch,
3567 void *head, dma_addr_t dma)
3568{
3569 const struct rtw89_pci_ch_dma_addr *txch_addr;
3570 int ret;
3571
3572 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
3573 if (ret) {
3574 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
3575 goto err;
3576 }
3577
3578 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, addr: &txch_addr);
3579 if (ret) {
3580 rtw89_err(rtwdev, "failed to get address of txch %d", txch);
3581 goto err_free_wd_ring;
3582 }
3583
3584 INIT_LIST_HEAD(list: &tx_ring->busy_pages);
3585 tx_ring->bd_ring.head = head;
3586 tx_ring->bd_ring.dma = dma;
3587 tx_ring->bd_ring.len = len;
3588 tx_ring->bd_ring.desc_size = desc_size;
3589 tx_ring->bd_ring.addr = *txch_addr;
3590 tx_ring->bd_ring.wp = 0;
3591 tx_ring->bd_ring.rp = 0;
3592 tx_ring->txch = txch;
3593
3594 return 0;
3595
3596err_free_wd_ring:
3597 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3598err:
3599 return ret;
3600}
3601
3602static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
3603 struct pci_dev *pdev)
3604{
3605 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3606 struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
3607 const struct rtw89_pci_info *info = rtwdev->pci_info;
3608 struct rtw89_pci_tx_ring *tx_ring;
3609 u32 i, tx_allocated;
3610 dma_addr_t dma;
3611 u32 desc_size;
3612 u32 ring_sz;
3613 u32 pool_sz;
3614 u32 ch_num;
3615 void *head;
3616 u32 len;
3617 int ret;
3618
3619 BUILD_BUG_ON(RTW89_PCI_TXBD_NUM_MAX % 16);
3620
3621 desc_size = sizeof(struct rtw89_pci_tx_bd_32);
3622 len = RTW89_PCI_TXBD_NUM_MAX;
3623 ch_num = RTW89_TXCH_NUM - hweight32(info->tx_dma_ch_mask);
3624 ring_sz = desc_size * len;
3625 pool_sz = ring_sz * ch_num;
3626
3627 head = dma_alloc_coherent(dev: &pdev->dev, size: pool_sz, dma_handle: &dma, GFP_KERNEL);
3628 if (!head)
3629 return -ENOMEM;
3630
3631 bd_pool->head = head;
3632 bd_pool->dma = dma;
3633 bd_pool->size = pool_sz;
3634
3635 for (i = 0; i < RTW89_TXCH_NUM; i++) {
3636 if (info->tx_dma_ch_mask & BIT(i))
3637 continue;
3638 tx_ring = &rtwpci->tx.rings[i];
3639 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
3640 desc_size, len, txch: i, head, dma);
3641 if (ret) {
3642 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
3643 goto err_free;
3644 }
3645
3646 head += ring_sz;
3647 dma += ring_sz;
3648 }
3649
3650 return 0;
3651
3652err_free:
3653 tx_allocated = i;
3654 for (i = 0; i < tx_allocated; i++) {
3655 tx_ring = &rtwpci->tx.rings[i];
3656 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3657 }
3658
3659 dma_free_coherent(dev: &pdev->dev, size: bd_pool->size, cpu_addr: bd_pool->head, dma_handle: bd_pool->dma);
3660
3661 return ret;
3662}
3663
3664static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
3665 struct pci_dev *pdev,
3666 struct rtw89_pci_rx_ring *rx_ring,
3667 u32 desc_size, u32 len, u32 rxch,
3668 void *head, dma_addr_t dma)
3669{
3670 const struct rtw89_pci_info *info = rtwdev->pci_info;
3671 const struct rtw89_pci_ch_dma_addr *rxch_addr;
3672 struct sk_buff *skb;
3673 int buf_sz = RTW89_PCI_RX_BUF_SIZE;
3674 int i, allocated;
3675 int ret;
3676
3677 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, addr: &rxch_addr);
3678 if (ret) {
3679 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
3680 return ret;
3681 }
3682
3683 rx_ring->bd_ring.head = head;
3684 rx_ring->bd_ring.dma = dma;
3685 rx_ring->bd_ring.len = len;
3686 rx_ring->bd_ring.desc_size = desc_size;
3687 rx_ring->bd_ring.addr = *rxch_addr;
3688 if (info->rx_ring_eq_is_full)
3689 rx_ring->bd_ring.wp = len - 1;
3690 else
3691 rx_ring->bd_ring.wp = 0;
3692 rx_ring->bd_ring.rp = 0;
3693 rx_ring->buf_sz = buf_sz;
3694 rx_ring->diliver_skb = NULL;
3695 rx_ring->diliver_desc.ready = false;
3696 rx_ring->target_rx_tag = 0;
3697
3698 for (i = 0; i < len; i++) {
3699 skb = dev_alloc_skb(length: buf_sz);
3700 if (!skb) {
3701 ret = -ENOMEM;
3702 goto err_free;
3703 }
3704
3705 memset(skb->data, 0, buf_sz);
3706 rx_ring->buf[i] = skb;
3707 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
3708 buf_sz, idx: i);
3709 if (ret) {
3710 rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
3711 dev_kfree_skb_any(skb);
3712 rx_ring->buf[i] = NULL;
3713 goto err_free;
3714 }
3715 }
3716
3717 return 0;
3718
3719err_free:
3720 allocated = i;
3721 for (i = 0; i < allocated; i++) {
3722 skb = rx_ring->buf[i];
3723 if (!skb)
3724 continue;
3725 dma = *((dma_addr_t *)skb->cb);
3726 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3727 dev_kfree_skb(skb);
3728 rx_ring->buf[i] = NULL;
3729 }
3730
3731 rx_ring->bd_ring.head = NULL;
3732
3733 return ret;
3734}
3735
3736static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
3737 struct pci_dev *pdev)
3738{
3739 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3740 struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
3741 struct rtw89_pci_rx_ring *rx_ring;
3742 int i, rx_allocated;
3743 dma_addr_t dma;
3744 u32 desc_size;
3745 u32 ring_sz;
3746 u32 pool_sz;
3747 void *head;
3748 u32 len;
3749 int ret;
3750
3751 desc_size = sizeof(struct rtw89_pci_rx_bd_32);
3752 len = RTW89_PCI_RXBD_NUM_MAX;
3753 ring_sz = desc_size * len;
3754 pool_sz = ring_sz * RTW89_RXCH_NUM;
3755
3756 head = dma_alloc_coherent(dev: &pdev->dev, size: pool_sz, dma_handle: &dma, GFP_KERNEL);
3757 if (!head)
3758 return -ENOMEM;
3759
3760 bd_pool->head = head;
3761 bd_pool->dma = dma;
3762 bd_pool->size = pool_sz;
3763
3764 for (i = 0; i < RTW89_RXCH_NUM; i++) {
3765 rx_ring = &rtwpci->rx.rings[i];
3766
3767 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
3768 desc_size, len, rxch: i,
3769 head, dma);
3770 if (ret) {
3771 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
3772 goto err_free;
3773 }
3774
3775 head += ring_sz;
3776 dma += ring_sz;
3777 }
3778
3779 return 0;
3780
3781err_free:
3782 rx_allocated = i;
3783 for (i = 0; i < rx_allocated; i++) {
3784 rx_ring = &rtwpci->rx.rings[i];
3785 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3786 }
3787
3788 dma_free_coherent(dev: &pdev->dev, size: bd_pool->size, cpu_addr: bd_pool->head, dma_handle: bd_pool->dma);
3789
3790 return ret;
3791}
3792
3793static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
3794 struct pci_dev *pdev)
3795{
3796 int ret;
3797
3798 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
3799 if (ret) {
3800 rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
3801 goto err;
3802 }
3803
3804 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
3805 if (ret) {
3806 rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
3807 goto err_free_tx_rings;
3808 }
3809
3810 return 0;
3811
3812err_free_tx_rings:
3813 rtw89_pci_free_tx_rings(rtwdev, pdev);
3814err:
3815 return ret;
3816}
3817
3818static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
3819 struct rtw89_pci *rtwpci)
3820{
3821 skb_queue_head_init(list: &rtwpci->h2c_queue);
3822 skb_queue_head_init(list: &rtwpci->h2c_release_queue);
3823}
3824
3825static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
3826 struct pci_dev *pdev)
3827{
3828 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3829 int ret;
3830
3831 ret = rtw89_pci_setup_mapping(rtwdev, pdev);
3832 if (ret) {
3833 rtw89_err(rtwdev, "failed to setup pci mapping\n");
3834 goto err;
3835 }
3836
3837 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
3838 if (ret) {
3839 rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
3840 goto err_pci_unmap;
3841 }
3842
3843 rtw89_pci_h2c_init(rtwdev, rtwpci);
3844
3845 spin_lock_init(&rtwpci->irq_lock);
3846 spin_lock_init(&rtwpci->trx_lock);
3847
3848 return 0;
3849
3850err_pci_unmap:
3851 rtw89_pci_clear_mapping(rtwdev, pdev);
3852err:
3853 return ret;
3854}
3855
3856static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
3857 struct pci_dev *pdev)
3858{
3859 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3860
3861 rtw89_pci_free_trx_rings(rtwdev, pdev);
3862 rtw89_pci_clear_mapping(rtwdev, pdev);
3863 rtw89_pci_release_fwcmd(rtwdev, rtwpci,
3864 cnt: skb_queue_len(list_: &rtwpci->h2c_queue), release_all: true);
3865}
3866
3867void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
3868{
3869 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3870 const struct rtw89_chip_info *chip = rtwdev->chip;
3871 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN;
3872
3873 if (chip->chip_id == RTL8851B)
3874 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND;
3875
3876 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
3877
3878 if (rtwpci->under_recovery) {
3879 rtwpci->intrs[0] = hs0isr_ind_int_en;
3880 rtwpci->intrs[1] = 0;
3881 } else {
3882 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3883 B_AX_RXDMA_INT_EN |
3884 B_AX_RXP1DMA_INT_EN |
3885 B_AX_RPQDMA_INT_EN |
3886 B_AX_RXDMA_STUCK_INT_EN |
3887 B_AX_RDU_INT_EN |
3888 B_AX_RPQBD_FULL_INT_EN |
3889 hs0isr_ind_int_en;
3890
3891 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
3892 }
3893}
3894EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
3895
3896static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
3897{
3898 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3899
3900 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
3901 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3902 rtwpci->intrs[0] = 0;
3903 rtwpci->intrs[1] = 0;
3904}
3905
3906static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
3907{
3908 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3909
3910 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
3911 B_AX_HS1ISR_IND_INT_EN |
3912 B_AX_HS0ISR_IND_INT_EN;
3913 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3914 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3915 B_AX_RXDMA_INT_EN |
3916 B_AX_RXP1DMA_INT_EN |
3917 B_AX_RPQDMA_INT_EN |
3918 B_AX_RXDMA_STUCK_INT_EN |
3919 B_AX_RDU_INT_EN |
3920 B_AX_RPQBD_FULL_INT_EN;
3921 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3922}
3923
3924static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
3925{
3926 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3927
3928 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
3929 B_AX_HS0ISR_IND_INT_EN;
3930 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3931 rtwpci->intrs[0] = 0;
3932 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3933}
3934
3935void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
3936{
3937 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3938
3939 if (rtwpci->under_recovery)
3940 rtw89_pci_recovery_intr_mask_v1(rtwdev);
3941 else if (rtwpci->low_power)
3942 rtw89_pci_low_power_intr_mask_v1(rtwdev);
3943 else
3944 rtw89_pci_default_intr_mask_v1(rtwdev);
3945}
3946EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
3947
3948static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
3949{
3950 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3951
3952 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
3953 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3954 rtwpci->intrs[0] = 0;
3955 rtwpci->intrs[1] = 0;
3956}
3957
3958static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
3959{
3960 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3961
3962 rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 |
3963 B_BE_HS0_IND_INT_EN0;
3964 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3965 rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 |
3966 B_BE_RDU_CH0_INT_IMR_V1;
3967 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
3968 B_BE_PCIE_RX_RPQ0_IMR0_V1;
3969}
3970
3971static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev)
3972{
3973 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3974
3975 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 |
3976 B_BE_HS1_IND_INT_EN0;
3977 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3978 rtwpci->intrs[0] = 0;
3979 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
3980 B_BE_PCIE_RX_RPQ0_IMR0_V1;
3981}
3982
3983void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev)
3984{
3985 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3986
3987 if (rtwpci->under_recovery)
3988 rtw89_pci_recovery_intr_mask_v2(rtwdev);
3989 else if (rtwpci->low_power)
3990 rtw89_pci_low_power_intr_mask_v2(rtwdev);
3991 else
3992 rtw89_pci_default_intr_mask_v2(rtwdev);
3993}
3994EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2);
3995
3996static void rtw89_pci_recovery_intr_mask_v3(struct rtw89_dev *rtwdev)
3997{
3998 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3999
4000 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
4001 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
4002 rtwpci->intrs[0] = 0;
4003 rtwpci->intrs[1] = 0;
4004}
4005
4006static void rtw89_pci_default_intr_mask_v3(struct rtw89_dev *rtwdev)
4007{
4008 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4009
4010 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
4011 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
4012 rtwpci->intrs[0] = 0;
4013 rtwpci->intrs[1] = B_BE_PCIE_RDU_CH1_IMR |
4014 B_BE_PCIE_RDU_CH0_IMR |
4015 B_BE_PCIE_RX_RX0P2_IMR0_V1 |
4016 B_BE_PCIE_RX_RPQ0_IMR0_V1;
4017}
4018
4019void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev)
4020{
4021 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4022
4023 if (rtwpci->under_recovery)
4024 rtw89_pci_recovery_intr_mask_v3(rtwdev);
4025 else
4026 rtw89_pci_default_intr_mask_v3(rtwdev);
4027}
4028EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v3);
4029
4030static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
4031 struct pci_dev *pdev)
4032{
4033 unsigned long flags = 0;
4034 int ret;
4035
4036 flags |= PCI_IRQ_INTX | PCI_IRQ_MSI;
4037 ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: 1, flags);
4038 if (ret < 0) {
4039 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
4040 goto err;
4041 }
4042
4043 ret = devm_request_threaded_irq(dev: rtwdev->dev, irq: pdev->irq,
4044 handler: rtw89_pci_interrupt_handler,
4045 thread_fn: rtw89_pci_interrupt_threadfn,
4046 IRQF_SHARED, KBUILD_MODNAME, dev_id: rtwdev);
4047 if (ret) {
4048 rtw89_err(rtwdev, "failed to request threaded irq\n");
4049 goto err_free_vector;
4050 }
4051
4052 rtw89_chip_config_intr_mask(rtwdev, cfg: RTW89_PCI_INTR_MASK_RESET);
4053
4054 return 0;
4055
4056err_free_vector:
4057 pci_free_irq_vectors(dev: pdev);
4058err:
4059 return ret;
4060}
4061
4062static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
4063 struct pci_dev *pdev)
4064{
4065 devm_free_irq(dev: rtwdev->dev, irq: pdev->irq, dev_id: rtwdev);
4066 pci_free_irq_vectors(dev: pdev);
4067}
4068
4069static u16 gray_code_to_bin(u16 gray_code)
4070{
4071 u16 binary = gray_code;
4072
4073 while (gray_code) {
4074 gray_code >>= 1;
4075 binary ^= gray_code;
4076 }
4077
4078 return binary;
4079}
4080
4081static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
4082{
4083 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4084 struct pci_dev *pdev = rtwpci->pdev;
4085 u16 val16, filter_out_val;
4086 u32 val, phy_offset;
4087 int ret;
4088
4089 if (rtwdev->chip->chip_id != RTL8852C)
4090 return 0;
4091
4092 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
4093 if (val == B_AX_ASPM_CTRL_L1)
4094 return 0;
4095
4096 ret = pci_read_config_dword(dev: pdev, RTW89_PCIE_L1_STS_V1, val: &val);
4097 if (ret)
4098 return ret;
4099
4100 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
4101 if (val == RTW89_PCIE_GEN1_SPEED) {
4102 phy_offset = R_RAC_DIRECT_OFFSET_G1;
4103 } else if (val == RTW89_PCIE_GEN2_SPEED) {
4104 phy_offset = R_RAC_DIRECT_OFFSET_G2;
4105 val16 = rtw89_read16(rtwdev, addr: phy_offset + RAC_ANA10 * RAC_MULT);
4106 rtw89_write16_set(rtwdev, addr: phy_offset + RAC_ANA10 * RAC_MULT,
4107 bit: val16 | B_PCIE_BIT_PINOUT_DIS);
4108 rtw89_write16_set(rtwdev, addr: phy_offset + RAC_ANA19 * RAC_MULT,
4109 bit: val16 & ~B_PCIE_BIT_RD_SEL);
4110
4111 val16 = rtw89_read16_mask(rtwdev,
4112 addr: phy_offset + RAC_ANA1F * RAC_MULT,
4113 FILTER_OUT_EQ_MASK);
4114 val16 = gray_code_to_bin(gray_code: val16);
4115 filter_out_val = rtw89_read16(rtwdev, addr: phy_offset + RAC_ANA24 *
4116 RAC_MULT);
4117 filter_out_val &= ~REG_FILTER_OUT_MASK;
4118 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
4119
4120 rtw89_write16(rtwdev, addr: phy_offset + RAC_ANA24 * RAC_MULT,
4121 data: filter_out_val);
4122 rtw89_write16_set(rtwdev, addr: phy_offset + RAC_ANA0A * RAC_MULT,
4123 B_BAC_EQ_SEL);
4124 rtw89_write16_set(rtwdev,
4125 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
4126 B_PCIE_BIT_PSAVE);
4127 } else {
4128 return -EOPNOTSUPP;
4129 }
4130 rtw89_write16_set(rtwdev, addr: phy_offset + RAC_ANA0C * RAC_MULT,
4131 B_PCIE_BIT_PSAVE);
4132
4133 return 0;
4134}
4135
4136static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
4137{
4138 const struct rtw89_pci_info *info = rtwdev->pci_info;
4139 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4140
4141 if (rtw89_pci_disable_clkreq)
4142 return;
4143
4144 gen_def->clkreq_set(rtwdev, enable);
4145}
4146
4147static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
4148{
4149 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4150 int ret;
4151
4152 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
4153 data: PCIE_CLKDLY_HW_30US);
4154 if (ret)
4155 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
4156
4157 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4158 if (enable)
4159 ret = rtw89_pci_config_byte_set(rtwdev,
4160 RTW89_PCIE_L1_CTRL,
4161 RTW89_PCIE_BIT_CLK);
4162 else
4163 ret = rtw89_pci_config_byte_clr(rtwdev,
4164 RTW89_PCIE_L1_CTRL,
4165 RTW89_PCIE_BIT_CLK);
4166 if (ret)
4167 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
4168 enable ? "set" : "unset", ret);
4169 } else if (chip_id == RTL8852C) {
4170 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
4171 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
4172 if (enable)
4173 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
4174 B_AX_CLK_REQ_N);
4175 else
4176 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
4177 B_AX_CLK_REQ_N);
4178 }
4179}
4180
4181static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
4182{
4183 const struct rtw89_pci_info *info = rtwdev->pci_info;
4184 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4185
4186 if (rtw89_pci_disable_aspm_l1)
4187 return;
4188
4189 gen_def->aspm_set(rtwdev, enable);
4190}
4191
4192static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
4193{
4194 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4195 u8 value = 0;
4196 int ret;
4197
4198 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value: &value);
4199 if (ret)
4200 rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
4201
4202 u8p_replace_bits(p: &value, val: PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
4203 u8p_replace_bits(p: &value, val: PCIE_L0SDLY_4US, RTW89_L0DLY_MASK);
4204
4205 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, data: value);
4206 if (ret)
4207 rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
4208
4209 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4210 if (enable)
4211 ret = rtw89_pci_config_byte_set(rtwdev,
4212 RTW89_PCIE_L1_CTRL,
4213 RTW89_PCIE_BIT_L1);
4214 else
4215 ret = rtw89_pci_config_byte_clr(rtwdev,
4216 RTW89_PCIE_L1_CTRL,
4217 RTW89_PCIE_BIT_L1);
4218 } else if (chip_id == RTL8852C) {
4219 if (enable)
4220 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4221 B_AX_ASPM_CTRL_L1);
4222 else
4223 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4224 B_AX_ASPM_CTRL_L1);
4225 }
4226 if (ret)
4227 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
4228 enable ? "set" : "unset", ret);
4229}
4230
4231static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
4232{
4233 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
4234 const struct rtw89_pci_info *info = rtwdev->pci_info;
4235 struct rtw89_traffic_stats *stats = &rtwdev->stats;
4236 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
4237 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
4238 u32 val = 0;
4239
4240 if (rtwdev->scanning ||
4241 (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH))
4242 goto out;
4243
4244 if (chip_gen == RTW89_CHIP_BE)
4245 val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN;
4246 else
4247 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
4248 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
4249 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
4250 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
4251
4252out:
4253 rtw89_write32(rtwdev, addr: info->mit_addr, data: val);
4254}
4255
4256static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
4257{
4258 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4259 struct pci_dev *pdev = rtwpci->pdev;
4260 u16 link_ctrl;
4261 int ret;
4262
4263 /* Though there is standard PCIE configuration space to set the
4264 * link control register, but by Realtek's design, driver should
4265 * check if host supports CLKREQ/ASPM to enable the HW module.
4266 *
4267 * These functions are implemented by two HW modules associated,
4268 * one is responsible to access PCIE configuration space to
4269 * follow the host settings, and another is in charge of doing
4270 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
4271 * the host does not support it, and due to some reasons or wrong
4272 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
4273 * loss if HW misbehaves on the link.
4274 *
4275 * Hence it's designed that driver should first check the PCIE
4276 * configuration space is sync'ed and enabled, then driver can turn
4277 * on the other module that is actually working on the mechanism.
4278 */
4279 ret = pcie_capability_read_word(dev: pdev, PCI_EXP_LNKCTL, val: &link_ctrl);
4280 if (ret) {
4281 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
4282 return;
4283 }
4284
4285 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
4286 rtw89_pci_clkreq_set(rtwdev, enable: true);
4287
4288 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
4289 rtw89_pci_aspm_set(rtwdev, enable: true);
4290}
4291
4292static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
4293{
4294 const struct rtw89_pci_info *info = rtwdev->pci_info;
4295 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4296
4297 if (rtw89_pci_disable_l1ss)
4298 return;
4299
4300 gen_def->l1ss_set(rtwdev, enable);
4301}
4302
4303static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
4304{
4305 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4306 int ret;
4307
4308 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4309 if (enable)
4310 ret = rtw89_pci_config_byte_set(rtwdev,
4311 RTW89_PCIE_TIMER_CTRL,
4312 RTW89_PCIE_BIT_L1SUB);
4313 else
4314 ret = rtw89_pci_config_byte_clr(rtwdev,
4315 RTW89_PCIE_TIMER_CTRL,
4316 RTW89_PCIE_BIT_L1SUB);
4317 if (ret)
4318 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
4319 enable ? "set" : "unset", ret);
4320 } else if (chip_id == RTL8852C) {
4321 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
4322 RTW89_PCIE_BIT_ASPM_L11 |
4323 RTW89_PCIE_BIT_PCI_L11);
4324 if (ret)
4325 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
4326 if (enable)
4327 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4328 B_AX_L1SUB_DISABLE);
4329 else
4330 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4331 B_AX_L1SUB_DISABLE);
4332 }
4333}
4334
4335static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
4336{
4337 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4338 struct pci_dev *pdev = rtwpci->pdev;
4339 u32 l1ss_cap_ptr, l1ss_ctrl;
4340
4341 if (rtw89_pci_disable_l1ss)
4342 return;
4343
4344 l1ss_cap_ptr = pci_find_ext_capability(dev: pdev, PCI_EXT_CAP_ID_L1SS);
4345 if (!l1ss_cap_ptr)
4346 return;
4347
4348 pci_read_config_dword(dev: pdev, where: l1ss_cap_ptr + PCI_L1SS_CTL1, val: &l1ss_ctrl);
4349
4350 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
4351 rtw89_pci_l1ss_set(rtwdev, enable: true);
4352}
4353
4354static void rtw89_pci_cpl_timeout_cfg(struct rtw89_dev *rtwdev)
4355{
4356 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4357 struct pci_dev *pdev = rtwpci->pdev;
4358
4359 pcie_capability_set_word(dev: pdev, PCI_EXP_DEVCTL2,
4360 PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
4361}
4362
4363static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev)
4364{
4365 int ret = 0;
4366 u32 sts;
4367 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
4368
4369 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
4370 10, 1000, false, rtwdev,
4371 R_AX_PCIE_DMA_BUSY1);
4372 if (ret) {
4373 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
4374 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
4375 return -EINVAL;
4376 }
4377 return ret;
4378}
4379
4380static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev)
4381{
4382 u32 val;
4383 int ret;
4384
4385 if (rtwdev->chip->chip_id == RTL8852C)
4386 return 0;
4387
4388 rtw89_pci_ctrl_dma_all(rtwdev, enable: false);
4389 ret = rtw89_pci_poll_io_idle_ax(rtwdev);
4390 if (ret) {
4391 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
4392 rtw89_debug(rtwdev, mask: RTW89_DBG_HCI,
4393 fmt: "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
4394 R_AX_DBG_ERR_FLAG, val);
4395 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
4396 rtw89_mac_ctrl_hci_dma_tx(rtwdev, enable: false);
4397 if (val & B_AX_RX_STUCK)
4398 rtw89_mac_ctrl_hci_dma_rx(rtwdev, enable: false);
4399 rtw89_mac_ctrl_hci_dma_trx(rtwdev, enable: true);
4400 ret = rtw89_pci_poll_io_idle_ax(rtwdev);
4401 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
4402 rtw89_debug(rtwdev, mask: RTW89_DBG_HCI,
4403 fmt: "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
4404 R_AX_DBG_ERR_FLAG, val);
4405 }
4406
4407 return ret;
4408}
4409
4410static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
4411{
4412 int ret;
4413
4414 if (rtwdev->chip->chip_id == RTL8852C)
4415 return 0;
4416
4417 rtw89_mac_ctrl_hci_dma_trx(rtwdev, enable: false);
4418 rtw89_mac_ctrl_hci_dma_trx(rtwdev, enable: true);
4419 rtw89_pci_clr_idx_all(rtwdev);
4420
4421 ret = rtw89_pci_rst_bdram_ax(rtwdev);
4422 if (ret)
4423 return ret;
4424
4425 rtw89_pci_ctrl_dma_all(rtwdev, enable: true);
4426 return 0;
4427}
4428
4429static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
4430 enum rtw89_lv1_rcvy_step step)
4431{
4432 const struct rtw89_pci_info *info = rtwdev->pci_info;
4433 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4434 int ret;
4435
4436 switch (step) {
4437 case RTW89_LV1_RCVY_STEP_1:
4438 ret = gen_def->lv1rst_stop_dma(rtwdev);
4439 if (ret)
4440 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
4441
4442 break;
4443
4444 case RTW89_LV1_RCVY_STEP_2:
4445 ret = gen_def->lv1rst_start_dma(rtwdev);
4446 if (ret)
4447 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
4448 break;
4449
4450 default:
4451 return -EINVAL;
4452 }
4453
4454 return ret;
4455}
4456
4457static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
4458{
4459 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
4460 return;
4461
4462 if (rtwdev->chip->chip_id == RTL8852C) {
4463 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4464 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1));
4465 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4466 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1));
4467 } else {
4468 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
4469 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
4470 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4471 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
4472 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4473 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
4474 }
4475}
4476
4477static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
4478{
4479 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
4480 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4481 const struct rtw89_pci_info *info = rtwdev->pci_info;
4482 const struct rtw89_pci_isr_def *isr_def = info->isr_def;
4483 unsigned long flags;
4484 int work_done;
4485
4486 rtwdev->napi_budget_countdown = budget;
4487
4488 rtw89_write32(rtwdev, addr: isr_def->isr_clear_rpq.addr, data: isr_def->isr_clear_rpq.data);
4489 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget: rtwdev->napi_budget_countdown);
4490 if (work_done == budget)
4491 return budget;
4492
4493 rtw89_write32(rtwdev, addr: isr_def->isr_clear_rxq.addr, data: isr_def->isr_clear_rxq.data);
4494 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget: rtwdev->napi_budget_countdown);
4495 if (work_done < budget && napi_complete_done(n: napi, work_done)) {
4496 spin_lock_irqsave(&rtwpci->irq_lock, flags);
4497 if (likely(rtwpci->running))
4498 rtw89_chip_enable_intr(rtwdev, rtwpci);
4499 spin_unlock_irqrestore(lock: &rtwpci->irq_lock, flags);
4500 }
4501
4502 return work_done;
4503}
4504
4505static
4506void rtw89_check_pci_ssid_quirks(struct rtw89_dev *rtwdev,
4507 struct pci_dev *pdev,
4508 const struct rtw89_pci_ssid_quirk *ssid_quirks)
4509{
4510 int i;
4511
4512 if (!ssid_quirks)
4513 return;
4514
4515 for (i = 0; i < 200; i++, ssid_quirks++) {
4516 if (ssid_quirks->vendor == 0 && ssid_quirks->device == 0)
4517 break;
4518
4519 if (ssid_quirks->vendor != pdev->vendor ||
4520 ssid_quirks->device != pdev->device ||
4521 ssid_quirks->subsystem_vendor != pdev->subsystem_vendor ||
4522 ssid_quirks->subsystem_device != pdev->subsystem_device)
4523 continue;
4524
4525 bitmap_or(dst: rtwdev->quirks, src1: rtwdev->quirks, src2: &ssid_quirks->bitmap,
4526 nbits: NUM_OF_RTW89_QUIRKS);
4527 rtwdev->custid = ssid_quirks->custid;
4528 break;
4529 }
4530
4531 rtw89_debug(rtwdev, mask: RTW89_DBG_HCI, fmt: "quirks=%*ph custid=%d\n",
4532 (int)sizeof(rtwdev->quirks), rtwdev->quirks, rtwdev->custid);
4533}
4534
4535static int __maybe_unused rtw89_pci_suspend(struct device *dev)
4536{
4537 struct ieee80211_hw *hw = dev_get_drvdata(dev);
4538 struct rtw89_dev *rtwdev = hw->priv;
4539 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4540
4541 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4542 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4543 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4544 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4545 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
4546 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4547 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
4548 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4549 } else {
4550 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4551 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4552 }
4553
4554 return 0;
4555}
4556
4557static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
4558{
4559 if (rtwdev->chip->chip_id == RTL8852C)
4560 return;
4561
4562 /* Hardware need write the reg twice to ensure the setting work */
4563 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4564 RTW89_PCIE_BIT_CFG_RST_MSTATE);
4565 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4566 RTW89_PCIE_BIT_CFG_RST_MSTATE);
4567}
4568
4569void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume)
4570{
4571 if (resume)
4572 rtw89_pci_cfg_dac(rtwdev, force: false);
4573
4574 rtw89_pci_disable_eq(rtwdev);
4575 rtw89_pci_filter_out(rtwdev);
4576 rtw89_pci_cpl_timeout_cfg(rtwdev);
4577 rtw89_pci_link_cfg(rtwdev);
4578 rtw89_pci_l1ss_cfg(rtwdev);
4579}
4580
4581static int __maybe_unused rtw89_pci_resume(struct device *dev)
4582{
4583 struct ieee80211_hw *hw = dev_get_drvdata(dev);
4584 struct rtw89_dev *rtwdev = hw->priv;
4585 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4586
4587 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4588 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4589 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4590 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4591 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
4592 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4593 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
4594 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4595 } else {
4596 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4597 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4598 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4599 B_AX_SEL_REQ_ENTR_L1);
4600 }
4601 rtw89_pci_l2_hci_ldo(rtwdev);
4602
4603 rtw89_pci_basic_cfg(rtwdev, resume: true);
4604
4605 return 0;
4606}
4607
4608SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
4609EXPORT_SYMBOL(rtw89_pm_ops);
4610
4611static pci_ers_result_t rtw89_pci_io_error_detected(struct pci_dev *pdev,
4612 pci_channel_state_t state)
4613{
4614 struct net_device *netdev = pci_get_drvdata(pdev);
4615
4616 netif_device_detach(dev: netdev);
4617
4618 return PCI_ERS_RESULT_NEED_RESET;
4619}
4620
4621static pci_ers_result_t rtw89_pci_io_slot_reset(struct pci_dev *pdev)
4622{
4623 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
4624 struct rtw89_dev *rtwdev = hw->priv;
4625
4626 rtw89_ser_notify(rtwdev, err: MAC_AX_ERR_ASSERTION);
4627
4628 return PCI_ERS_RESULT_RECOVERED;
4629}
4630
4631static void rtw89_pci_io_resume(struct pci_dev *pdev)
4632{
4633 struct net_device *netdev = pci_get_drvdata(pdev);
4634
4635 /* ack any pending wake events, disable PME */
4636 pci_enable_wake(dev: pdev, PCI_D0, enable: 0);
4637
4638 netif_device_attach(dev: netdev);
4639}
4640
4641const struct pci_error_handlers rtw89_pci_err_handler = {
4642 .error_detected = rtw89_pci_io_error_detected,
4643 .slot_reset = rtw89_pci_io_slot_reset,
4644 .resume = rtw89_pci_io_resume,
4645};
4646EXPORT_SYMBOL(rtw89_pci_err_handler);
4647
4648const struct rtw89_pci_isr_def rtw89_pci_isr_ax = {
4649 .isr_rdu = B_AX_RDU_INT,
4650 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
4651 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
4652 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
4653 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
4654 B_AX_RDU_INT},
4655};
4656EXPORT_SYMBOL(rtw89_pci_isr_ax);
4657
4658const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
4659 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
4660 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
4661 .mac_post_init = rtw89_pci_ops_mac_post_init_ax,
4662
4663 .clr_idx_all = rtw89_pci_clr_idx_all_ax,
4664 .rst_bdram = rtw89_pci_rst_bdram_ax,
4665
4666 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax,
4667 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax,
4668
4669 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax,
4670 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax,
4671 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax,
4672
4673 .aspm_set = rtw89_pci_aspm_set_ax,
4674 .clkreq_set = rtw89_pci_clkreq_set_ax,
4675 .l1ss_set = rtw89_pci_l1ss_set_ax,
4676
4677 .disable_eq = rtw89_pci_disable_eq_ax,
4678 .power_wake = rtw89_pci_power_wake_ax,
4679};
4680EXPORT_SYMBOL(rtw89_pci_gen_ax);
4681
4682static const struct rtw89_hci_ops rtw89_pci_ops = {
4683 .tx_write = rtw89_pci_ops_tx_write,
4684 .tx_kick_off = rtw89_pci_ops_tx_kick_off,
4685 .flush_queues = rtw89_pci_ops_flush_queues,
4686 .reset = rtw89_pci_ops_reset,
4687 .start = rtw89_pci_ops_start,
4688 .stop = rtw89_pci_ops_stop,
4689 .pause = rtw89_pci_ops_pause,
4690 .switch_mode = rtw89_pci_ops_switch_mode,
4691 .recalc_int_mit = rtw89_pci_recalc_int_mit,
4692
4693 .read8 = rtw89_pci_ops_read8,
4694 .read16 = rtw89_pci_ops_read16,
4695 .read32 = rtw89_pci_ops_read32,
4696 .write8 = rtw89_pci_ops_write8,
4697 .write16 = rtw89_pci_ops_write16,
4698 .write32 = rtw89_pci_ops_write32,
4699
4700 .read32_pci_cfg = rtw89_pci_ops_read32_pci_cfg,
4701
4702 .mac_pre_init = rtw89_pci_ops_mac_pre_init,
4703 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit,
4704 .mac_post_init = rtw89_pci_ops_mac_post_init,
4705 .deinit = rtw89_pci_ops_deinit,
4706
4707 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
4708 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery,
4709 .dump_err_status = rtw89_pci_ops_dump_err_status,
4710 .napi_poll = rtw89_pci_napi_poll,
4711
4712 .recovery_start = rtw89_pci_ops_recovery_start,
4713 .recovery_complete = rtw89_pci_ops_recovery_complete,
4714
4715 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch,
4716 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch,
4717 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx,
4718 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle,
4719
4720 .clr_idx_all = rtw89_pci_clr_idx_all,
4721 .clear = rtw89_pci_clear_resource,
4722 .disable_intr = rtw89_pci_disable_intr_lock,
4723 .enable_intr = rtw89_pci_enable_intr_lock,
4724 .rst_bdram = rtw89_pci_reset_bdram,
4725};
4726
4727int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4728{
4729 struct rtw89_dev *rtwdev;
4730 const struct rtw89_driver_info *info;
4731 const struct rtw89_pci_info *pci_info;
4732 int ret;
4733
4734 info = (const struct rtw89_driver_info *)id->driver_data;
4735
4736 rtwdev = rtw89_alloc_ieee80211_hw(device: &pdev->dev,
4737 bus_data_size: sizeof(struct rtw89_pci),
4738 chip: info->chip, variant: info->variant);
4739 if (!rtwdev) {
4740 dev_err(&pdev->dev, "failed to allocate hw\n");
4741 return -ENOMEM;
4742 }
4743
4744 pci_info = info->bus.pci;
4745
4746 rtwdev->pci_info = info->bus.pci;
4747 rtwdev->hci.ops = &rtw89_pci_ops;
4748 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
4749 rtwdev->hci.dle_type = RTW89_HCI_DLE_TYPE_PCIE;
4750 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
4751 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
4752
4753 rtw89_check_quirks(rtwdev, quirks: info->quirks);
4754 rtw89_check_pci_ssid_quirks(rtwdev, pdev, ssid_quirks: pci_info->ssid_quirks);
4755
4756 SET_IEEE80211_DEV(hw: rtwdev->hw, dev: &pdev->dev);
4757
4758 ret = rtw89_core_init(rtwdev);
4759 if (ret) {
4760 rtw89_err(rtwdev, "failed to initialise core\n");
4761 goto err_release_hw;
4762 }
4763
4764 ret = rtw89_pci_claim_device(rtwdev, pdev);
4765 if (ret) {
4766 rtw89_err(rtwdev, "failed to claim pci device\n");
4767 goto err_core_deinit;
4768 }
4769
4770 ret = rtw89_pci_setup_resource(rtwdev, pdev);
4771 if (ret) {
4772 rtw89_err(rtwdev, "failed to setup pci resource\n");
4773 goto err_declaim_pci;
4774 }
4775
4776 ret = rtw89_chip_info_setup(rtwdev);
4777 if (ret) {
4778 rtw89_err(rtwdev, "failed to setup chip information\n");
4779 goto err_clear_resource;
4780 }
4781
4782 rtw89_pci_basic_cfg(rtwdev, resume: false);
4783
4784 ret = rtw89_core_napi_init(rtwdev);
4785 if (ret) {
4786 rtw89_err(rtwdev, "failed to init napi\n");
4787 goto err_clear_resource;
4788 }
4789
4790 ret = rtw89_pci_request_irq(rtwdev, pdev);
4791 if (ret) {
4792 rtw89_err(rtwdev, "failed to request pci irq\n");
4793 goto err_deinit_napi;
4794 }
4795
4796 ret = rtw89_core_register(rtwdev);
4797 if (ret) {
4798 rtw89_err(rtwdev, "failed to register core\n");
4799 goto err_free_irq;
4800 }
4801
4802 set_bit(nr: RTW89_FLAG_PROBE_DONE, addr: rtwdev->flags);
4803
4804 return 0;
4805
4806err_free_irq:
4807 rtw89_pci_free_irq(rtwdev, pdev);
4808err_deinit_napi:
4809 rtw89_core_napi_deinit(rtwdev);
4810err_clear_resource:
4811 rtw89_pci_clear_resource(rtwdev, pdev);
4812err_declaim_pci:
4813 rtw89_pci_declaim_device(rtwdev, pdev);
4814err_core_deinit:
4815 rtw89_core_deinit(rtwdev);
4816err_release_hw:
4817 rtw89_free_ieee80211_hw(rtwdev);
4818
4819 return ret;
4820}
4821EXPORT_SYMBOL(rtw89_pci_probe);
4822
4823void rtw89_pci_remove(struct pci_dev *pdev)
4824{
4825 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
4826 struct rtw89_dev *rtwdev;
4827
4828 rtwdev = hw->priv;
4829
4830 rtw89_pci_free_irq(rtwdev, pdev);
4831 rtw89_core_napi_deinit(rtwdev);
4832 rtw89_core_unregister(rtwdev);
4833 rtw89_pci_clear_resource(rtwdev, pdev);
4834 rtw89_pci_declaim_device(rtwdev, pdev);
4835 rtw89_core_deinit(rtwdev);
4836 rtw89_free_ieee80211_hw(rtwdev);
4837}
4838EXPORT_SYMBOL(rtw89_pci_remove);
4839
4840MODULE_AUTHOR("Realtek Corporation");
4841MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver");
4842MODULE_LICENSE("Dual BSD/GPL");
4843

source code of linux/drivers/net/wireless/realtek/rtw89/pci.c